author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
259,885
10.06.2021 18:22:18
25,200
0892420c9796358da06ea3ba375ee3e0fa8595ac
Minor VFS2 xattr changes. Allow the gofer client to use most xattr namespaces. As documented by the updated comment, this is consistent with e.g. Linux's FUSE client, and allows gofers to provide extended attributes from FUSE filesystems. Make tmpfs' listxattr omit xattrs in the "trusted" namespace for non-privileged users.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/xattr.go", "new_path": "pkg/abi/linux/xattr.go", "diff": "@@ -23,6 +23,12 @@ const (\nXATTR_CREATE = 1\nXATTR_REPLACE = 2\n+ XATTR_SECURITY_PREFIX = \"security.\"\n+ XATTR_SECURITY_PREFIX_LEN = len(XATTR_SECURITY_PREFIX)\n+\n+ XATTR_SYSTEM_PREFIX = \"system.\"\n+ XATTR_SYSTEM_PREFIX_LEN = len(XATTR_SYSTEM_PREFIX)\n+\nXATTR_TRUSTED_PREFIX = \"trusted.\"\nXATTR_TRUSTED_PREFIX_LEN = len(XATTR_TRUSTED_PREFIX)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1282,9 +1282,12 @@ func (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes)\n}\nfunc (d *dentry) checkXattrPermissions(creds *auth.Credentials, name string, ats vfs.AccessTypes) error {\n- // We only support xattrs prefixed with \"user.\" (see b/148380782). Currently,\n- // there is no need to expose any other xattrs through a gofer.\n- if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {\n+ // Deny access to the \"security\" and \"system\" namespaces since applications\n+ // may expect these to affect kernel behavior in unimplemented ways\n+ // (b/148380782). Allow all other extended attributes to be passed through\n+ // to the remote filesystem. This is inconsistent with Linux's 9p client,\n+ // but consistent with other filesystems (e.g. FUSE).\n+ if strings.HasPrefix(name, linux.XATTR_SECURITY_PREFIX) || strings.HasPrefix(name, linux.XATTR_SYSTEM_PREFIX) {\nreturn syserror.EOPNOTSUPP\n}\nmode := linux.FileMode(atomic.LoadUint32(&d.mode))\n@@ -1684,7 +1687,7 @@ func (d *dentry) setDeleted() {\n}\nfunc (d *dentry) listXattr(ctx context.Context, creds *auth.Credentials, size uint64) ([]string, error) {\n- if d.file.isNil() || !d.userXattrSupported() {\n+ if d.file.isNil() {\nreturn nil, nil\n}\nxattrMap, err := d.file.listXattr(ctx, size)\n@@ -1693,11 +1696,8 @@ func (d *dentry) listXattr(ctx context.Context, creds *auth.Credentials, size ui\n}\nxattrs := make([]string, 0, len(xattrMap))\nfor x := range xattrMap {\n- // We only support xattrs in the user.* namespace.\n- if strings.HasPrefix(x, linux.XATTR_USER_PREFIX) {\nxattrs = append(xattrs, x)\n}\n- }\nreturn xattrs, nil\n}\n@@ -1731,13 +1731,6 @@ func (d *dentry) removeXattr(ctx context.Context, creds *auth.Credentials, name\nreturn d.file.removeXattr(ctx, name)\n}\n-// Extended attributes in the user.* namespace are only supported for regular\n-// files and directories.\n-func (d *dentry) userXattrSupported() bool {\n- filetype := linux.FileMode(atomic.LoadUint32(&d.mode)).FileType()\n- return filetype == linux.ModeRegular || filetype == linux.ModeDirectory\n-}\n-\n// Preconditions:\n// * !d.isSynthetic().\n// * d.isRegularFile() || d.isDir().\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -822,7 +822,7 @@ func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, si\nif err != nil {\nreturn nil, err\n}\n- return d.inode.listXattr(size)\n+ return d.inode.listXattr(rp.Credentials(), size)\n}\n// GetXattrAt implements vfs.FilesystemImpl.GetXattrAt.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -717,44 +717,63 @@ func (i *inode) touchCMtimeLocked() {\natomic.StoreInt64(&i.ctime, now)\n}\n-func (i *inode) listXattr(size uint64) ([]string, error) {\n- return i.xattrs.ListXattr(size)\n+func checkXattrName(name string) error {\n+ // Linux's tmpfs supports \"security\" and \"trusted\" xattr namespaces, and\n+ // (depending on build configuration) POSIX ACL xattr namespaces\n+ // (\"system.posix_acl_access\" and \"system.posix_acl_default\"). We don't\n+ // support POSIX ACLs or the \"security\" namespace (b/148380782).\n+ if strings.HasPrefix(name, linux.XATTR_TRUSTED_PREFIX) {\n+ return nil\n+ }\n+ // We support the \"user\" namespace because we have tests that depend on\n+ // this feature.\n+ if strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {\n+ return nil\n+ }\n+ return syserror.EOPNOTSUPP\n+}\n+\n+func (i *inode) listXattr(creds *auth.Credentials, size uint64) ([]string, error) {\n+ return i.xattrs.ListXattr(creds, size)\n}\nfunc (i *inode) getXattr(creds *auth.Credentials, opts *vfs.GetXattrOptions) (string, error) {\n- if err := i.checkXattrPermissions(creds, opts.Name, vfs.MayRead); err != nil {\n+ if err := checkXattrName(opts.Name); err != nil {\nreturn \"\", err\n}\n- return i.xattrs.GetXattr(opts)\n+ mode := linux.FileMode(atomic.LoadUint32(&i.mode))\n+ kuid := auth.KUID(atomic.LoadUint32(&i.uid))\n+ kgid := auth.KGID(atomic.LoadUint32(&i.gid))\n+ if err := vfs.GenericCheckPermissions(creds, vfs.MayRead, mode, kuid, kgid); err != nil {\n+ return \"\", err\n+ }\n+ return i.xattrs.GetXattr(creds, mode, kuid, opts)\n}\nfunc (i *inode) setXattr(creds *auth.Credentials, opts *vfs.SetXattrOptions) error {\n- if err := i.checkXattrPermissions(creds, opts.Name, vfs.MayWrite); err != nil {\n+ if err := checkXattrName(opts.Name); err != nil {\nreturn err\n}\n- return i.xattrs.SetXattr(opts)\n-}\n-\n-func (i *inode) removeXattr(creds *auth.Credentials, name string) error {\n- if err := i.checkXattrPermissions(creds, name, vfs.MayWrite); err != nil {\n+ mode := linux.FileMode(atomic.LoadUint32(&i.mode))\n+ kuid := auth.KUID(atomic.LoadUint32(&i.uid))\n+ kgid := auth.KGID(atomic.LoadUint32(&i.gid))\n+ if err := vfs.GenericCheckPermissions(creds, vfs.MayWrite, mode, kuid, kgid); err != nil {\nreturn err\n}\n- return i.xattrs.RemoveXattr(name)\n+ return i.xattrs.SetXattr(creds, mode, kuid, opts)\n}\n-func (i *inode) checkXattrPermissions(creds *auth.Credentials, name string, ats vfs.AccessTypes) error {\n- // We currently only support extended attributes in the user.* and\n- // trusted.* namespaces. See b/148380782.\n- if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) && !strings.HasPrefix(name, linux.XATTR_TRUSTED_PREFIX) {\n- return syserror.EOPNOTSUPP\n+func (i *inode) removeXattr(creds *auth.Credentials, name string) error {\n+ if err := checkXattrName(name); err != nil {\n+ return err\n}\nmode := linux.FileMode(atomic.LoadUint32(&i.mode))\nkuid := auth.KUID(atomic.LoadUint32(&i.uid))\nkgid := auth.KGID(atomic.LoadUint32(&i.gid))\n- if err := vfs.GenericCheckPermissions(creds, ats, mode, kuid, kgid); err != nil {\n+ if err := vfs.GenericCheckPermissions(creds, vfs.MayWrite, mode, kuid, kgid); err != nil {\nreturn err\n}\n- return vfs.CheckXattrPermissions(creds, ats, mode, kuid, name)\n+ return i.xattrs.RemoveXattr(creds, mode, kuid, name)\n}\n// fileDescription is embedded by tmpfs implementations of\n@@ -807,7 +826,7 @@ func (fd *fileDescription) StatFS(ctx context.Context) (linux.Statfs, error) {\n// ListXattr implements vfs.FileDescriptionImpl.ListXattr.\nfunc (fd *fileDescription) ListXattr(ctx context.Context, size uint64) ([]string, error) {\n- return fd.inode().listXattr(size)\n+ return fd.inode().listXattr(auth.CredentialsFromContext(ctx), size)\n}\n// GetXattr implements vfs.FileDescriptionImpl.GetXattr.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/memxattr/BUILD", "new_path": "pkg/sentry/vfs/memxattr/BUILD", "diff": "@@ -8,6 +8,7 @@ go_library(\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/vfs\",\n\"//pkg/sync\",\n\"//pkg/syserror\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/memxattr/xattr.go", "new_path": "pkg/sentry/vfs/memxattr/xattr.go", "diff": "package memxattr\nimport (\n+ \"strings\"\n+\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -26,6 +29,9 @@ import (\n// SimpleExtendedAttributes implements extended attributes using a map of\n// names to values.\n//\n+// SimpleExtendedAttributes calls vfs.CheckXattrPermissions, so callers are not\n+// required to do so.\n+//\n// +stateify savable\ntype SimpleExtendedAttributes struct {\n// mu protects the below fields.\n@@ -34,7 +40,11 @@ type SimpleExtendedAttributes struct {\n}\n// GetXattr returns the value at 'name'.\n-func (x *SimpleExtendedAttributes) GetXattr(opts *vfs.GetXattrOptions) (string, error) {\n+func (x *SimpleExtendedAttributes) GetXattr(creds *auth.Credentials, mode linux.FileMode, kuid auth.KUID, opts *vfs.GetXattrOptions) (string, error) {\n+ if err := vfs.CheckXattrPermissions(creds, vfs.MayRead, mode, kuid, opts.Name); err != nil {\n+ return \"\", err\n+ }\n+\nx.mu.RLock()\nvalue, ok := x.xattrs[opts.Name]\nx.mu.RUnlock()\n@@ -50,7 +60,11 @@ func (x *SimpleExtendedAttributes) GetXattr(opts *vfs.GetXattrOptions) (string,\n}\n// SetXattr sets 'value' at 'name'.\n-func (x *SimpleExtendedAttributes) SetXattr(opts *vfs.SetXattrOptions) error {\n+func (x *SimpleExtendedAttributes) SetXattr(creds *auth.Credentials, mode linux.FileMode, kuid auth.KUID, opts *vfs.SetXattrOptions) error {\n+ if err := vfs.CheckXattrPermissions(creds, vfs.MayWrite, mode, kuid, opts.Name); err != nil {\n+ return err\n+ }\n+\nx.mu.Lock()\ndefer x.mu.Unlock()\nif x.xattrs == nil {\n@@ -73,12 +87,19 @@ func (x *SimpleExtendedAttributes) SetXattr(opts *vfs.SetXattrOptions) error {\n}\n// ListXattr returns all names in xattrs.\n-func (x *SimpleExtendedAttributes) ListXattr(size uint64) ([]string, error) {\n+func (x *SimpleExtendedAttributes) ListXattr(creds *auth.Credentials, size uint64) ([]string, error) {\n// Keep track of the size of the buffer needed in listxattr(2) for the list.\nlistSize := 0\nx.mu.RLock()\nnames := make([]string, 0, len(x.xattrs))\n+ haveCap := creds.HasCapability(linux.CAP_SYS_ADMIN)\nfor n := range x.xattrs {\n+ // Hide extended attributes in the \"trusted\" namespace from\n+ // non-privileged users. This is consistent with Linux's\n+ // fs/xattr.c:simple_xattr_list().\n+ if !haveCap && strings.HasPrefix(n, linux.XATTR_TRUSTED_PREFIX) {\n+ continue\n+ }\nnames = append(names, n)\n// Add one byte per null terminator.\nlistSize += len(n) + 1\n@@ -91,7 +112,11 @@ func (x *SimpleExtendedAttributes) ListXattr(size uint64) ([]string, error) {\n}\n// RemoveXattr removes the xattr at 'name'.\n-func (x *SimpleExtendedAttributes) RemoveXattr(name string) error {\n+func (x *SimpleExtendedAttributes) RemoveXattr(creds *auth.Credentials, mode linux.FileMode, kuid auth.KUID, name string) error {\n+ if err := vfs.CheckXattrPermissions(creds, vfs.MayWrite, mode, kuid, name); err != nil {\n+ return err\n+ }\n+\nx.mu.Lock()\ndefer x.mu.Unlock()\nif _, ok := x.xattrs[name]; !ok {\n" } ]
Go
Apache License 2.0
google/gvisor
Minor VFS2 xattr changes. - Allow the gofer client to use most xattr namespaces. As documented by the updated comment, this is consistent with e.g. Linux's FUSE client, and allows gofers to provide extended attributes from FUSE filesystems. - Make tmpfs' listxattr omit xattrs in the "trusted" namespace for non-privileged users. PiperOrigin-RevId: 378778854
259,853
11.06.2021 16:44:55
25,200
3c63fce628ccee1f41c9c3a1ff693042ce094503
Temorary skip test cases that fail on Linux
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/exec.cc", "new_path": "test/syscalls/linux/exec.cc", "diff": "@@ -278,6 +278,9 @@ TEST(ExecTest, InterpreterScriptArgNUL) {\n// Trailing whitespace following interpreter path is ignored.\nTEST(ExecTest, InterpreterScriptTrailingWhitespace) {\n+ // FIXME(b/190850365): This test case fails on Linux.\n+ SKIP_IF(!IsRunningOnGvisor());\n+\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", RunfilePath(kBasicWorkload)));\n@@ -303,6 +306,9 @@ TEST(ExecTest, InterpreterScriptArgWhitespace) {\n}\nTEST(ExecTest, InterpreterScriptNoPath) {\n+ // FIXME(b/190850365): This test case fails on Linux.\n+ SKIP_IF(!IsRunningOnGvisor());\n+\nTempPath script = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateFileWith(GetAbsoluteTestTmpdir(), \"#!\", 0755));\n" } ]
Go
Apache License 2.0
google/gvisor
Temorary skip test cases that fail on Linux PiperOrigin-RevId: 378974239
259,853
11.06.2021 18:41:13
25,200
ec6a7ebc75f714e9ac8ae1dc785304661f6e63b4
Rework the workaround of the XCR0 issue XCR0 has to be synchronized with the host. We can call xsave from the host context and then call xrstor from the guest context and vise versa. This means we need to support the same set of FPU features in both contexts.
[ { "change_type": "MODIFY", "old_path": "pkg/ring0/kernel_amd64.go", "new_path": "pkg/ring0/kernel_amd64.go", "diff": "@@ -254,6 +254,8 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\nreturn\n}\n+var sentryXCR0 = xgetbv(0)\n+\n// start is the CPU entrypoint.\n//\n// This is called from the Start asm stub (see entry_amd64.go); on return the\n@@ -265,24 +267,10 @@ func start(c *CPU) {\nWriteGS(kernelAddr(c.kernelEntry))\nWriteFS(uintptr(c.registers.Fs_base))\n- // Initialize floating point.\n- //\n- // Note that on skylake, the valid XCR0 mask reported seems to be 0xff.\n- // This breaks down as:\n- //\n- // bit0 - x87\n- // bit1 - SSE\n- // bit2 - AVX\n- // bit3-4 - MPX\n- // bit5-7 - AVX512\n- //\n- // For some reason, enabled MPX & AVX512 on platforms that report them\n- // seems to be cause a general protection fault. (Maybe there are some\n- // virtualization issues and these aren't exported to the guest cpuid.)\n- // This needs further investigation, but we can limit the floating\n- // point operations to x87, SSE & AVX for now.\nfninit()\n- xsetbv(0, validXCR0Mask&0x7)\n+ // Need to sync XCR0 with the host, because xsave and xrstor can be\n+ // called from different contexts.\n+ xsetbv(0, sentryXCR0)\n// Set the syscall target.\nwrmsr(_MSR_LSTAR, kernelFunc(sysenter))\n" } ]
Go
Apache License 2.0
google/gvisor
Rework the workaround of the XCR0 issue XCR0 has to be synchronized with the host. We can call xsave from the host context and then call xrstor from the guest context and vise versa. This means we need to support the same set of FPU features in both contexts. PiperOrigin-RevId: 378988281
259,884
13.06.2021 19:21:46
25,200
5c9e84622305dc9fd4e9b81eeb7309b8a894f99e
Remove usermem dependency from marshal Both marshal and usermem are depended on by many packages and a dependency on marshal can often create circular dependencies. marshal should consider adding internal dependencies carefully moving forward. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/signal.go", "new_path": "pkg/abi/linux/signal.go", "diff": "@@ -293,8 +293,6 @@ type Sigevent struct {\nUnRemainder [44]byte\n}\n-// LINT.IfChange\n-\n// SigAction represents struct sigaction.\n//\n// +marshal\n@@ -306,8 +304,6 @@ type SigAction struct {\nMask SignalSet\n}\n-// LINT.ThenChange(../../safecopy/safecopy_unsafe.go)\n-\n// SignalStack represents information about a user stack, and is equivalent to\n// stack_t.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/marshal/primitive/BUILD", "new_path": "pkg/marshal/primitive/BUILD", "diff": "@@ -12,9 +12,7 @@ go_library(\n\"//:sandbox\",\n],\ndeps = [\n- \"//pkg/context\",\n\"//pkg/hostarch\",\n\"//pkg/marshal\",\n- \"//pkg/usermem\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/marshal/primitive/primitive.go", "new_path": "pkg/marshal/primitive/primitive.go", "diff": "@@ -19,10 +19,8 @@ package primitive\nimport (\n\"io\"\n- \"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n\"gvisor.dev/gvisor/pkg/marshal\"\n- \"gvisor.dev/gvisor/pkg/usermem\"\n)\n// Int8 is a marshal.Marshallable implementation for int8.\n@@ -400,26 +398,3 @@ func CopyStringOut(cc marshal.CopyContext, addr hostarch.Addr, src string) (int,\nsrcP := ByteSlice(src)\nreturn srcP.CopyOut(cc, addr)\n}\n-\n-// IOCopyContext wraps an object implementing hostarch.IO to implement\n-// marshal.CopyContext.\n-type IOCopyContext struct {\n- Ctx context.Context\n- IO usermem.IO\n- Opts usermem.IOOpts\n-}\n-\n-// CopyScratchBuffer implements marshal.CopyContext.CopyScratchBuffer.\n-func (i *IOCopyContext) CopyScratchBuffer(size int) []byte {\n- return make([]byte, size)\n-}\n-\n-// CopyOutBytes implements marshal.CopyContext.CopyOutBytes.\n-func (i *IOCopyContext) CopyOutBytes(addr hostarch.Addr, b []byte) (int, error) {\n- return i.IO.CopyOut(i.Ctx, addr, b, i.Opts)\n-}\n-\n-// CopyInBytes implements marshal.CopyContext.CopyInBytes.\n-func (i *IOCopyContext) CopyInBytes(addr hostarch.Addr, b []byte) (int, error) {\n- return i.IO.CopyIn(i.Ctx, addr, b, i.Opts)\n-}\n" }, { "change_type": "MODIFY", "old_path": "pkg/safecopy/BUILD", "new_path": "pkg/safecopy/BUILD", "diff": "@@ -18,6 +18,7 @@ go_library(\n],\nvisibility = [\"//:sandbox\"],\ndeps = [\n+ \"//pkg/abi/linux\",\n\"//pkg/syserror\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n" }, { "change_type": "MODIFY", "old_path": "pkg/safecopy/safecopy_unsafe.go", "new_path": "pkg/safecopy/safecopy_unsafe.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"unsafe\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n)\n// maxRegisterSize is the maximum register size used in memcpy and memclr. It\n@@ -342,15 +343,7 @@ func errorFromFaultSignal(addr uintptr, sig int32) error {\n// handler however, and if this is function is being used externally then the\n// same courtesy is expected.\nfunc ReplaceSignalHandler(sig unix.Signal, handler uintptr, previous *uintptr) error {\n- // TODO(gvisor.dev/issue/6160): This struct is the same as linux.SigAction.\n- // Once the usermem dependency is removed from primitive, delete this replica\n- // and remove IFTTT comments in abi/linux/signal.go.\n- var sa struct {\n- handler uintptr\n- flags uint64\n- restorer uintptr\n- mask uint64\n- }\n+ var sa linux.SigAction\nconst maskLen = 8\n// Get the existing signal handler information, and save the current\n@@ -361,14 +354,14 @@ func ReplaceSignalHandler(sig unix.Signal, handler uintptr, previous *uintptr) e\n}\n// Fail if there isn't a previous handler.\n- if sa.handler == 0 {\n+ if sa.Handler == 0 {\nreturn fmt.Errorf(\"previous handler for signal %x isn't set\", sig)\n}\n- *previous = sa.handler\n+ *previous = uintptr(sa.Handler)\n// Install our own handler.\n- sa.handler = handler\n+ sa.Handler = uint64(handler)\nif _, _, e := unix.RawSyscall6(unix.SYS_RT_SIGACTION, uintptr(sig), uintptr(unsafe.Pointer(&sa)), 0, maskLen, 0, 0); e != 0 {\nreturn e\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/pipe/pipe_util.go", "new_path": "pkg/sentry/kernel/pipe/pipe_util.go", "diff": "@@ -135,7 +135,7 @@ func (p *Pipe) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArgume\nv = math.MaxInt32 // Silently truncate.\n}\n// Copy result to userspace.\n- iocc := primitive.IOCopyContext{\n+ iocc := usermem.IOCopyContext{\nIO: io,\nCtx: ctx,\nOpts: usermem.IOOpts{\n" }, { "change_type": "MODIFY", "old_path": "pkg/usermem/BUILD", "new_path": "pkg/usermem/BUILD", "diff": "@@ -7,6 +7,7 @@ go_library(\nsrcs = [\n\"bytes_io.go\",\n\"bytes_io_unsafe.go\",\n+ \"marshal.go\",\n\"usermem.go\",\n],\nvisibility = [\"//:sandbox\"],\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/usermem/marshal.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package usermem\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/hostarch\"\n+)\n+\n+// IOCopyContext wraps an object implementing hostarch.IO to implement\n+// marshal.CopyContext.\n+type IOCopyContext struct {\n+ Ctx context.Context\n+ IO IO\n+ Opts IOOpts\n+}\n+\n+// CopyScratchBuffer implements marshal.CopyContext.CopyScratchBuffer.\n+func (i *IOCopyContext) CopyScratchBuffer(size int) []byte {\n+ return make([]byte, size)\n+}\n+\n+// CopyOutBytes implements marshal.CopyContext.CopyOutBytes.\n+func (i *IOCopyContext) CopyOutBytes(addr hostarch.Addr, b []byte) (int, error) {\n+ return i.IO.CopyOut(i.Ctx, addr, b, i.Opts)\n+}\n+\n+// CopyInBytes implements marshal.CopyContext.CopyInBytes.\n+func (i *IOCopyContext) CopyInBytes(addr hostarch.Addr, b []byte) (int, error) {\n+ return i.IO.CopyIn(i.Ctx, addr, b, i.Opts)\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove usermem dependency from marshal Both marshal and usermem are depended on by many packages and a dependency on marshal can often create circular dependencies. marshal should consider adding internal dependencies carefully moving forward. Fixes #6160 PiperOrigin-RevId: 379199882
259,975
14.06.2021 09:49:59
25,200
397a59fc956e9d8af05960d31afd4536b62c2399
Remove debug lines from exec.cc
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/exec.cc", "new_path": "test/syscalls/linux/exec.cc", "diff": "@@ -306,9 +306,6 @@ TEST(ExecTest, InterpreterScriptNoPath) {\nTempPath script = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateFileWith(GetAbsoluteTestTmpdir(), \"#!\\n\\n\", 0755));\n- std::cerr << \"path: \" << script.path() << std::endl;\n- std::cerr << system(absl::StrCat(\"cat \", script.path()).c_str()) << std::endl;\n-\nint execve_errno;\nASSERT_NO_ERRNO_AND_VALUE(\nForkAndExec(script.path(), {script.path()}, {}, nullptr, &execve_errno));\n" } ]
Go
Apache License 2.0
google/gvisor
Remove debug lines from exec.cc PiperOrigin-RevId: 379298590
260,004
14.06.2021 15:27:01
25,200
d4af8da36160643614cae2405c5d829bb7c1bf78
Rename DefaultRouter event to OffLinkRoute event This change prepares for a later change which supports the NDP Route Information option to discover more-specific routes, as per RFC 4191. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ndp.go", "new_path": "pkg/tcpip/network/ipv6/ndp.go", "diff": "@@ -214,19 +214,17 @@ type NDPDispatcher interface {\n// is also not permitted to call into the stack.\nOnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult)\n- // OnDefaultRouterDiscovered is called when a new default router is\n- // discovered.\n+ // OnOffLinkRouteUpdated is called when an off-link route is updated.\n//\n// This function is not permitted to block indefinitely. This function\n// is also not permitted to call into the stack.\n- OnDefaultRouterDiscovered(tcpip.NICID, tcpip.Address)\n+ OnOffLinkRouteUpdated(tcpip.NICID, tcpip.Subnet, tcpip.Address)\n- // OnDefaultRouterInvalidated is called when a discovered default router that\n- // was remembered is invalidated.\n+ // OnOffLinkRouteInvalidated is called when an off-link route is invalidated.\n//\n// This function is not permitted to block indefinitely. This function\n// is also not permitted to call into the stack.\n- OnDefaultRouterInvalidated(tcpip.NICID, tcpip.Address)\n+ OnOffLinkRouteInvalidated(tcpip.NICID, tcpip.Subnet, tcpip.Address)\n// OnOnLinkPrefixDiscovered is called when a new on-link prefix is discovered.\n//\n@@ -826,7 +824,7 @@ func (ndp *ndpState) invalidateDefaultRouter(ip tcpip.Address) {\n// Let the integrator know a discovered default router is invalidated.\nif ndpDisp := ndp.ep.protocol.options.NDPDisp; ndpDisp != nil {\n- ndpDisp.OnDefaultRouterInvalidated(ndp.ep.nic.ID(), ip)\n+ ndpDisp.OnOffLinkRouteInvalidated(ndp.ep.nic.ID(), header.IPv6EmptySubnet, ip)\n}\n}\n@@ -843,7 +841,7 @@ func (ndp *ndpState) rememberDefaultRouter(ip tcpip.Address, rl time.Duration) {\n}\n// Inform the integrator when we discovered a default router.\n- ndpDisp.OnDefaultRouterDiscovered(ndp.ep.nic.ID(), ip)\n+ ndpDisp.OnOffLinkRouteUpdated(ndp.ep.nic.ID(), header.IPv6EmptySubnet, ip)\nstate := defaultRouterState{\ninvalidationJob: ndp.ep.protocol.stack.NewJob(&ndp.ep.mu, func() {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ndp_test.go", "new_path": "pkg/tcpip/network/ipv6/ndp_test.go", "diff": "@@ -42,11 +42,11 @@ type testNDPDispatcher struct {\nfunc (*testNDPDispatcher) OnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult) {\n}\n-func (t *testNDPDispatcher) OnDefaultRouterDiscovered(_ tcpip.NICID, addr tcpip.Address) {\n+func (t *testNDPDispatcher) OnOffLinkRouteUpdated(_ tcpip.NICID, _ tcpip.Subnet, addr tcpip.Address) {\nt.addr = addr\n}\n-func (t *testNDPDispatcher) OnDefaultRouterInvalidated(_ tcpip.NICID, addr tcpip.Address) {\n+func (t *testNDPDispatcher) OnOffLinkRouteInvalidated(_ tcpip.NICID, _ tcpip.Subnet, addr tcpip.Address) {\nt.addr = addr\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -112,11 +112,12 @@ type ndpDADEvent struct {\nres stack.DADResult\n}\n-type ndpRouterEvent struct {\n+type ndpOffLinkRouteEvent struct {\nnicID tcpip.NICID\n- addr tcpip.Address\n- // true if router was discovered, false if invalidated.\n- discovered bool\n+ subnet tcpip.Subnet\n+ router tcpip.Address\n+ // true if route was updated, false if invalidated.\n+ updated bool\n}\ntype ndpPrefixEvent struct {\n@@ -167,7 +168,7 @@ var _ ipv6.NDPDispatcher = (*ndpDispatcher)(nil)\n// related events happen for test purposes.\ntype ndpDispatcher struct {\ndadC chan ndpDADEvent\n- routerC chan ndpRouterEvent\n+ offLinkRouteC chan ndpOffLinkRouteEvent\nprefixC chan ndpPrefixEvent\nautoGenAddrC chan ndpAutoGenAddrEvent\nrdnssC chan ndpRDNSSEvent\n@@ -187,23 +188,25 @@ func (n *ndpDispatcher) OnDuplicateAddressDetectionResult(nicID tcpip.NICID, add\n}\n}\n-// Implements ipv6.NDPDispatcher.OnDefaultRouterDiscovered.\n-func (n *ndpDispatcher) OnDefaultRouterDiscovered(nicID tcpip.NICID, addr tcpip.Address) {\n- if c := n.routerC; c != nil {\n- c <- ndpRouterEvent{\n+// Implements ipv6.NDPDispatcher.OnOffLinkRouteUpdated.\n+func (n *ndpDispatcher) OnOffLinkRouteUpdated(nicID tcpip.NICID, subnet tcpip.Subnet, router tcpip.Address) {\n+ if c := n.offLinkRouteC; c != nil {\n+ c <- ndpOffLinkRouteEvent{\nnicID,\n- addr,\n+ subnet,\n+ router,\ntrue,\n}\n}\n}\n-// Implements ipv6.NDPDispatcher.OnDefaultRouterInvalidated.\n-func (n *ndpDispatcher) OnDefaultRouterInvalidated(nicID tcpip.NICID, addr tcpip.Address) {\n- if c := n.routerC; c != nil {\n- c <- ndpRouterEvent{\n+// Implements ipv6.NDPDispatcher.OnOffLinkRouteInvalidated.\n+func (n *ndpDispatcher) OnOffLinkRouteInvalidated(nicID tcpip.NICID, subnet tcpip.Subnet, router tcpip.Address) {\n+ if c := n.offLinkRouteC; c != nil {\n+ c <- ndpOffLinkRouteEvent{\nnicID,\n- addr,\n+ subnet,\n+ router,\nfalse,\n}\n}\n@@ -1198,7 +1201,7 @@ func TestDynamicConfigurationsDisabled(t *testing.T) {\nt.Run(fmt.Sprintf(\"HandleRAs(%s), Forwarding(%t), Enabled(%t)\", handle, forwarding, enable), func(t *testing.T) {\nndpDisp := ndpDispatcher{\n- routerC: make(chan ndpRouterEvent, 1),\n+ offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),\nprefixC: make(chan ndpPrefixEvent, 1),\nautoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),\n}\n@@ -1270,8 +1273,8 @@ func TestDynamicConfigurationsDisabled(t *testing.T) {\nt.Errorf(\"got v6Stats.UnhandledRouterAdvertisements.Value() = %d, want = %d\", got, want)\n}\nselect {\n- case e := <-ndpDisp.routerC:\n- t.Errorf(\"unexpectedly discovered a router when configured not to: %#v\", e)\n+ case e := <-ndpDisp.offLinkRouteC:\n+ t.Errorf(\"unexpectedly updated an off-link route when configured not to: %#v\", e)\ndefault:\n}\nselect {\n@@ -1298,9 +1301,9 @@ func boolToUint64(v bool) uint64 {\n}\n// Check e to make sure that the event is for addr on nic with ID 1, and the\n-// discovered flag set to discovered.\n-func checkRouterEvent(e ndpRouterEvent, addr tcpip.Address, discovered bool) string {\n- return cmp.Diff(ndpRouterEvent{nicID: 1, addr: addr, discovered: discovered}, e, cmp.AllowUnexported(e))\n+// update flag set to updated.\n+func checkOffLinkRouteEvent(e ndpOffLinkRouteEvent, router tcpip.Address, updated bool) string {\n+ return cmp.Diff(ndpOffLinkRouteEvent{nicID: 1, subnet: header.IPv6EmptySubnet, router: router, updated: updated}, e, cmp.AllowUnexported(e))\n}\nfunc testWithRAs(t *testing.T, f func(*testing.T, ipv6.HandleRAsConfiguration, bool)) {\n@@ -1336,7 +1339,7 @@ func testWithRAs(t *testing.T, f func(*testing.T, ipv6.HandleRAsConfiguration, b\nfunc TestRouterDiscovery(t *testing.T) {\ntestWithRAs(t, func(t *testing.T, handleRAs ipv6.HandleRAsConfiguration, forwarding bool) {\nndpDisp := ndpDispatcher{\n- routerC: make(chan ndpRouterEvent, 1),\n+ offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),\n}\ne := channel.New(0, 1280, linkAddr1)\nclock := faketime.NewManualClock()\n@@ -1351,27 +1354,27 @@ func TestRouterDiscovery(t *testing.T) {\nClock: clock,\n})\n- expectRouterEvent := func(addr tcpip.Address, discovered bool) {\n+ expectOffLinkRouteEvent := func(addr tcpip.Address, updated bool) {\nt.Helper()\nselect {\n- case e := <-ndpDisp.routerC:\n- if diff := checkRouterEvent(e, addr, discovered); diff != \"\" {\n- t.Errorf(\"router event mismatch (-want +got):\\n%s\", diff)\n+ case e := <-ndpDisp.offLinkRouteC:\n+ if diff := checkOffLinkRouteEvent(e, addr, updated); diff != \"\" {\n+ t.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"expected router discovery event\")\n}\n}\n- expectAsyncRouterInvalidationEvent := func(addr tcpip.Address, timeout time.Duration) {\n+ expectAsyncOffLinkRouteInvalidationEvent := func(addr tcpip.Address, timeout time.Duration) {\nt.Helper()\nclock.Advance(timeout)\nselect {\n- case e := <-ndpDisp.routerC:\n- if diff := checkRouterEvent(e, addr, false); diff != \"\" {\n- t.Errorf(\"router event mismatch (-want +got):\\n%s\", diff)\n+ case e := <-ndpDisp.offLinkRouteC:\n+ if diff := checkOffLinkRouteEvent(e, addr, false); diff != \"\" {\n+ t.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"timed out waiting for router discovery event\")\n@@ -1390,26 +1393,26 @@ func TestRouterDiscovery(t *testing.T) {\n// remembered.\ne.InjectInbound(header.IPv6ProtocolNumber, raBuf(llAddr2, 0))\nselect {\n- case <-ndpDisp.routerC:\n- t.Fatal(\"unexpectedly discovered a router with 0 lifetime\")\n+ case <-ndpDisp.offLinkRouteC:\n+ t.Fatal(\"unexpectedly updated an off-link route with 0 lifetime\")\ndefault:\n}\n// Rx an RA from lladdr2 with a huge lifetime.\ne.InjectInbound(header.IPv6ProtocolNumber, raBuf(llAddr2, 1000))\n- expectRouterEvent(llAddr2, true)\n+ expectOffLinkRouteEvent(llAddr2, true)\n// Rx an RA from another router (lladdr3) with non-zero lifetime.\nconst l3LifetimeSeconds = 6\ne.InjectInbound(header.IPv6ProtocolNumber, raBuf(llAddr3, l3LifetimeSeconds))\n- expectRouterEvent(llAddr3, true)\n+ expectOffLinkRouteEvent(llAddr3, true)\n// Rx an RA from lladdr2 with lesser lifetime.\nconst l2LifetimeSeconds = 2\ne.InjectInbound(header.IPv6ProtocolNumber, raBuf(llAddr2, l2LifetimeSeconds))\nselect {\n- case <-ndpDisp.routerC:\n- t.Fatal(\"Should not receive a router event when updating lifetimes for known routers\")\n+ case <-ndpDisp.offLinkRouteC:\n+ t.Fatal(\"should not receive a off-link route event when updating lifetimes for known routers\")\ndefault:\n}\n@@ -1420,15 +1423,15 @@ func TestRouterDiscovery(t *testing.T) {\n// Wait for the normal lifetime plus an extra bit for the\n// router to get invalidated. If we don't get an invalidation\n// event after this time, then something is wrong.\n- expectAsyncRouterInvalidationEvent(llAddr2, l2LifetimeSeconds*time.Second)\n+ expectAsyncOffLinkRouteInvalidationEvent(llAddr2, l2LifetimeSeconds*time.Second)\n// Rx an RA from lladdr2 with huge lifetime.\ne.InjectInbound(header.IPv6ProtocolNumber, raBuf(llAddr2, 1000))\n- expectRouterEvent(llAddr2, true)\n+ expectOffLinkRouteEvent(llAddr2, true)\n// Rx an RA from lladdr2 with zero lifetime. It should be invalidated.\ne.InjectInbound(header.IPv6ProtocolNumber, raBuf(llAddr2, 0))\n- expectRouterEvent(llAddr2, false)\n+ expectOffLinkRouteEvent(llAddr2, false)\n// Wait for lladdr3's router invalidation job to execute. The lifetime\n// of the router should have been updated to the most recent (smaller)\n@@ -1437,7 +1440,7 @@ func TestRouterDiscovery(t *testing.T) {\n// Wait for the normal lifetime plus an extra bit for the\n// router to get invalidated. If we don't get an invalidation\n// event after this time, then something is wrong.\n- expectAsyncRouterInvalidationEvent(llAddr3, l3LifetimeSeconds*time.Second)\n+ expectAsyncOffLinkRouteInvalidationEvent(llAddr3, l3LifetimeSeconds*time.Second)\n})\n}\n@@ -1445,7 +1448,7 @@ func TestRouterDiscovery(t *testing.T) {\n// ipv6.MaxDiscoveredDefaultRouters discovered routers are remembered.\nfunc TestRouterDiscoveryMaxRouters(t *testing.T) {\nndpDisp := ndpDispatcher{\n- routerC: make(chan ndpRouterEvent, 1),\n+ offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),\n}\ne := channel.New(0, 1280, linkAddr1)\ns := stack.New(stack.Options{\n@@ -1472,9 +1475,9 @@ func TestRouterDiscoveryMaxRouters(t *testing.T) {\nif i <= ipv6.MaxDiscoveredDefaultRouters {\nselect {\n- case e := <-ndpDisp.routerC:\n- if diff := checkRouterEvent(e, llAddr, true); diff != \"\" {\n- t.Errorf(\"router event mismatch (-want +got):\\n%s\", diff)\n+ case e := <-ndpDisp.offLinkRouteC:\n+ if diff := checkOffLinkRouteEvent(e, llAddr, true); diff != \"\" {\n+ t.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"expected router discovery event\")\n@@ -1482,7 +1485,7 @@ func TestRouterDiscoveryMaxRouters(t *testing.T) {\n} else {\nselect {\n- case <-ndpDisp.routerC:\n+ case <-ndpDisp.offLinkRouteC:\nt.Fatal(\"should not have discovered a new router after we already discovered the max number of routers\")\ndefault:\n}\n@@ -4612,7 +4615,7 @@ func TestNoCleanupNDPStateWhenForwardingEnabled(t *testing.T) {\n)\nndpDisp := ndpDispatcher{\n- routerC: make(chan ndpRouterEvent, 1),\n+ offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),\nprefixC: make(chan ndpPrefixEvent, 1),\nautoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),\n}\n@@ -4657,17 +4660,17 @@ func TestNoCleanupNDPStateWhenForwardingEnabled(t *testing.T) {\n),\n)\nselect {\n- case e := <-ndpDisp.routerC:\n- if diff := checkRouterEvent(e, llAddr3, true /* discovered */); diff != \"\" {\n- t.Errorf(\"router event mismatch (-want +got):\\n%s\", diff)\n+ case e := <-ndpDisp.offLinkRouteC:\n+ if diff := checkOffLinkRouteEvent(e, llAddr3, true /* discovered */); diff != \"\" {\n+ t.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\n- t.Errorf(\"expected router event for %s on NIC(%d)\", llAddr3, nicID)\n+ t.Errorf(\"expected off-link route event for %s on NIC(%d)\", llAddr3, nicID)\n}\nselect {\ncase e := <-ndpDisp.prefixC:\nif diff := checkPrefixEvent(e, subnet, true /* discovered */); diff != \"\" {\n- t.Errorf(\"router event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Errorf(\"expected prefix event for %s on NIC(%d)\", prefix, nicID)\n@@ -4689,8 +4692,8 @@ func TestNoCleanupNDPStateWhenForwardingEnabled(t *testing.T) {\nt.Fatalf(\"SetForwardingDefaultAndAllNICs(%d, %t): %s\", ipv6.ProtocolNumber, forwarding, err)\n}\nselect {\n- case e := <-ndpDisp.routerC:\n- t.Errorf(\"unexpected router event = %#v\", e)\n+ case e := <-ndpDisp.offLinkRouteC:\n+ t.Errorf(\"unexpected off-link route event = %#v\", e)\ndefault:\n}\nselect {\n@@ -4776,7 +4779,7 @@ func TestCleanupNDPState(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\nndpDisp := ndpDispatcher{\n- routerC: make(chan ndpRouterEvent, maxRouterAndPrefixEvents),\n+ offLinkRouteC: make(chan ndpOffLinkRouteEvent, maxRouterAndPrefixEvents),\nprefixC: make(chan ndpPrefixEvent, maxRouterAndPrefixEvents),\nautoGenAddrC: make(chan ndpAutoGenAddrEvent, test.maxAutoGenAddrEvents),\n}\n@@ -4795,14 +4798,14 @@ func TestCleanupNDPState(t *testing.T) {\nClock: clock,\n})\n- expectRouterEvent := func() (bool, ndpRouterEvent) {\n+ expectOffLinkRouteEvent := func() (bool, ndpOffLinkRouteEvent) {\nselect {\n- case e := <-ndpDisp.routerC:\n+ case e := <-ndpDisp.offLinkRouteC:\nreturn true, e\ndefault:\n}\n- return false, ndpRouterEvent{}\n+ return false, ndpOffLinkRouteEvent{}\n}\nexpectPrefixEvent := func() (bool, ndpPrefixEvent) {\n@@ -4847,8 +4850,8 @@ func TestCleanupNDPState(t *testing.T) {\n// multiple addresses.\ne1.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, lifetimeSeconds, prefix1, true, true, lifetimeSeconds, lifetimeSeconds))\n- if ok, _ := expectRouterEvent(); !ok {\n- t.Errorf(\"expected router event for %s on NIC(%d)\", llAddr3, nicID1)\n+ if ok, _ := expectOffLinkRouteEvent(); !ok {\n+ t.Errorf(\"expected off-link route event for %s on NIC(%d)\", llAddr3, nicID1)\n}\nif ok, _ := expectPrefixEvent(); !ok {\nt.Errorf(\"expected prefix event for %s on NIC(%d)\", prefix1, nicID1)\n@@ -4858,8 +4861,8 @@ func TestCleanupNDPState(t *testing.T) {\n}\ne1.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr4, lifetimeSeconds, prefix2, true, true, lifetimeSeconds, lifetimeSeconds))\n- if ok, _ := expectRouterEvent(); !ok {\n- t.Errorf(\"expected router event for %s on NIC(%d)\", llAddr4, nicID1)\n+ if ok, _ := expectOffLinkRouteEvent(); !ok {\n+ t.Errorf(\"expected off-link route event for %s on NIC(%d)\", llAddr4, nicID1)\n}\nif ok, _ := expectPrefixEvent(); !ok {\nt.Errorf(\"expected prefix event for %s on NIC(%d)\", prefix2, nicID1)\n@@ -4869,8 +4872,8 @@ func TestCleanupNDPState(t *testing.T) {\n}\ne2.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, lifetimeSeconds, prefix1, true, true, lifetimeSeconds, lifetimeSeconds))\n- if ok, _ := expectRouterEvent(); !ok {\n- t.Errorf(\"expected router event for %s on NIC(%d)\", llAddr3, nicID2)\n+ if ok, _ := expectOffLinkRouteEvent(); !ok {\n+ t.Errorf(\"expected off-link route event for %s on NIC(%d)\", llAddr3, nicID2)\n}\nif ok, _ := expectPrefixEvent(); !ok {\nt.Errorf(\"expected prefix event for %s on NIC(%d)\", prefix1, nicID2)\n@@ -4880,8 +4883,8 @@ func TestCleanupNDPState(t *testing.T) {\n}\ne2.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr4, lifetimeSeconds, prefix2, true, true, lifetimeSeconds, lifetimeSeconds))\n- if ok, _ := expectRouterEvent(); !ok {\n- t.Errorf(\"expected router event for %s on NIC(%d)\", llAddr4, nicID2)\n+ if ok, _ := expectOffLinkRouteEvent(); !ok {\n+ t.Errorf(\"expected off-link route event for %s on NIC(%d)\", llAddr4, nicID2)\n}\nif ok, _ := expectPrefixEvent(); !ok {\nt.Errorf(\"expected prefix event for %s on NIC(%d)\", prefix2, nicID2)\n@@ -4922,14 +4925,14 @@ func TestCleanupNDPState(t *testing.T) {\ntest.cleanupFn(t, s)\n// Collect invalidation events after having NDP state cleaned up.\n- gotRouterEvents := make(map[ndpRouterEvent]int)\n+ gotOffLinkRouteEvents := make(map[ndpOffLinkRouteEvent]int)\nfor i := 0; i < maxRouterAndPrefixEvents; i++ {\n- ok, e := expectRouterEvent()\n+ ok, e := expectOffLinkRouteEvent()\nif !ok {\n- t.Errorf(\"expected %d router events after becoming a router; got = %d\", maxRouterAndPrefixEvents, i)\n+ t.Errorf(\"expected %d off-link route events after becoming a router; got = %d\", maxRouterAndPrefixEvents, i)\nbreak\n}\n- gotRouterEvents[e]++\n+ gotOffLinkRouteEvents[e]++\n}\ngotPrefixEvents := make(map[ndpPrefixEvent]int)\nfor i := 0; i < maxRouterAndPrefixEvents; i++ {\n@@ -4956,14 +4959,14 @@ func TestCleanupNDPState(t *testing.T) {\nt.FailNow()\n}\n- expectedRouterEvents := map[ndpRouterEvent]int{\n- {nicID: nicID1, addr: llAddr3, discovered: false}: 1,\n- {nicID: nicID1, addr: llAddr4, discovered: false}: 1,\n- {nicID: nicID2, addr: llAddr3, discovered: false}: 1,\n- {nicID: nicID2, addr: llAddr4, discovered: false}: 1,\n+ expectedOffLinkRouteEvents := map[ndpOffLinkRouteEvent]int{\n+ {nicID: nicID1, subnet: header.IPv6EmptySubnet, router: llAddr3, updated: false}: 1,\n+ {nicID: nicID1, subnet: header.IPv6EmptySubnet, router: llAddr4, updated: false}: 1,\n+ {nicID: nicID2, subnet: header.IPv6EmptySubnet, router: llAddr3, updated: false}: 1,\n+ {nicID: nicID2, subnet: header.IPv6EmptySubnet, router: llAddr4, updated: false}: 1,\n}\n- if diff := cmp.Diff(expectedRouterEvents, gotRouterEvents); diff != \"\" {\n- t.Errorf(\"router events mismatch (-want +got):\\n%s\", diff)\n+ if diff := cmp.Diff(expectedOffLinkRouteEvents, gotOffLinkRouteEvents); diff != \"\" {\n+ t.Errorf(\"off-link route events mismatch (-want +got):\\n%s\", diff)\n}\nexpectedPrefixEvents := map[ndpPrefixEvent]int{\n{nicID: nicID1, prefix: subnet1, discovered: false}: 1,\n@@ -5027,8 +5030,8 @@ func TestCleanupNDPState(t *testing.T) {\n// cancelled when the NDP state was cleaned up).\nclock.Advance(lifetimeSeconds * time.Second)\nselect {\n- case <-ndpDisp.routerC:\n- t.Error(\"unexpected router event\")\n+ case <-ndpDisp.offLinkRouteC:\n+ t.Error(\"unexpected off-link route event\")\ndefault:\n}\nselect {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/loopback_test.go", "new_path": "pkg/tcpip/tests/integration/loopback_test.go", "diff": "@@ -44,10 +44,10 @@ type ndpDispatcher struct{}\nfunc (*ndpDispatcher) OnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult) {\n}\n-func (*ndpDispatcher) OnDefaultRouterDiscovered(tcpip.NICID, tcpip.Address) {\n+func (*ndpDispatcher) OnOffLinkRouteUpdated(tcpip.NICID, tcpip.Subnet, tcpip.Address) {\n}\n-func (*ndpDispatcher) OnDefaultRouterInvalidated(tcpip.NICID, tcpip.Address) {}\n+func (*ndpDispatcher) OnOffLinkRouteInvalidated(tcpip.NICID, tcpip.Subnet, tcpip.Address) {}\nfunc (*ndpDispatcher) OnOnLinkPrefixDiscovered(tcpip.NICID, tcpip.Subnet) {\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Rename DefaultRouter event to OffLinkRoute event This change prepares for a later change which supports the NDP Route Information option to discover more-specific routes, as per RFC 4191. Updates #6172. PiperOrigin-RevId: 379361330
259,891
14.06.2021 17:14:09
25,200
20c68160459a5f2393facc30f24be5770e628428
Cleanup lint messages
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/accept_bind.cc", "new_path": "test/syscalls/linux/accept_bind.cc", "diff": "@@ -37,7 +37,8 @@ TEST_P(AllSocketPairTest, Listen) {\nsockets->first_addr_size()),\nSyscallSucceeds());\n- ASSERT_THAT(listen(sockets->first_fd(), /* backlog = */ 5),\n+ ASSERT_THAT(listen(sockets->first_fd(),\n+ /* backlog = */ 5), // NOLINT(bugprone-argument-comment)\nSyscallSucceeds());\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/poll.cc", "new_path": "test/syscalls/linux/poll.cc", "diff": "@@ -116,7 +116,7 @@ void BlockingReadableTest(int16_t mask) {\n});\nnotify.WaitForNotification();\n- absl::SleepFor(absl::Seconds(1.0));\n+ absl::SleepFor(absl::Seconds(1));\n// Write some data to the pipe.\nchar s[] = \"foo\\n\";\n@@ -221,7 +221,7 @@ TEST_F(PollTest, BlockingEventPOLLHUP) {\n});\nnotify.WaitForNotification();\n- absl::SleepFor(absl::Seconds(1.0));\n+ absl::SleepFor(absl::Seconds(1));\n// Write some data and close the writer fd.\nfd1.reset();\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/prctl.cc", "new_path": "test/syscalls/linux/prctl.cc", "diff": "@@ -101,11 +101,11 @@ TEST(PrctlTest, NoNewPrivsPreservedAcrossCloneForkAndExecve) {\nint no_new_privs;\nASSERT_THAT(no_new_privs = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),\nSyscallSucceeds());\n- ScopedThread([] {\n+ ScopedThread thread = ScopedThread([] {\nASSERT_THAT(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), SyscallSucceeds());\nEXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),\nSyscallSucceedsWithValue(1));\n- ScopedThread([] {\n+ ScopedThread threadInner = ScopedThread([] {\nEXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),\nSyscallSucceedsWithValue(1));\n// Note that these ASSERT_*s failing will only return from this thread,\n@@ -129,9 +129,11 @@ TEST(PrctlTest, NoNewPrivsPreservedAcrossCloneForkAndExecve) {\nEXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),\nSyscallSucceedsWithValue(1));\n});\n+ threadInner.Join();\nEXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),\nSyscallSucceedsWithValue(1));\n});\n+ thread.Join();\nEXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),\nSyscallSucceedsWithValue(no_new_privs));\n}\n@@ -141,7 +143,7 @@ TEST(PrctlTest, PDeathSig) {\n// Make the new process' parent a separate thread since the parent death\n// signal fires when the parent *thread* exits.\n- ScopedThread([&] {\n+ ScopedThread thread = ScopedThread([&] {\nchild_pid = fork();\nTEST_CHECK(child_pid >= 0);\nif (child_pid == 0) {\n@@ -172,6 +174,7 @@ TEST(PrctlTest, PDeathSig) {\n// Suppress the SIGSTOP and detach from the child.\nASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());\n});\n+ thread.Join();\n// The child should have been killed by its parent death SIGKILL.\nint status;\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/priority.cc", "new_path": "test/syscalls/linux/priority.cc", "diff": "@@ -72,7 +72,8 @@ TEST(SetpriorityTest, Implemented) {\n// No need to clear errno for setpriority():\n// \"The setpriority() call returns 0 if there is no error, or -1 if there is\"\n- EXPECT_THAT(setpriority(PRIO_PROCESS, /*who=*/0, /*nice=*/16),\n+ EXPECT_THAT(setpriority(PRIO_PROCESS, /*who=*/0,\n+ /*nice=*/16), // NOLINT(bugprone-argument-comment)\nSyscallSucceeds());\n}\n@@ -80,7 +81,8 @@ TEST(SetpriorityTest, Implemented) {\nTEST(Setpriority, InvalidWhich) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));\n- EXPECT_THAT(setpriority(/*which=*/3, /*who=*/0, /*nice=*/16),\n+ EXPECT_THAT(setpriority(/*which=*/3, /*who=*/0,\n+ /*nice=*/16), // NOLINT(bugprone-argument-comment)\nSyscallFailsWithErrno(EINVAL));\n}\n@@ -88,7 +90,8 @@ TEST(Setpriority, InvalidWhich) {\nTEST(SetpriorityTest, ValidWho) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));\n- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(), /*nice=*/16),\n+ EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(),\n+ /*nice=*/16), // NOLINT(bugprone-argument-comment)\nSyscallSucceeds());\n}\n@@ -142,22 +145,26 @@ TEST(SetpriorityTest, OutsideRange) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));\n// Set niceval > 19\n- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(), /*nice=*/100),\n+ EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(),\n+ /*nice=*/100), // NOLINT(bugprone-argument-comment)\nSyscallSucceeds());\nerrno = 0;\n// Test niceval truncated to 19\nEXPECT_THAT(getpriority(PRIO_PROCESS, getpid()),\n- SyscallSucceedsWithValue(/*maxnice=*/19));\n+ SyscallSucceedsWithValue(\n+ /*maxnice=*/19)); // NOLINT(bugprone-argument-comment)\n// Set niceval < -20\n- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(), /*nice=*/-100),\n+ EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(),\n+ /*nice=*/-100), // NOLINT(bugprone-argument-comment)\nSyscallSucceeds());\nerrno = 0;\n// Test niceval truncated to -20\nEXPECT_THAT(getpriority(PRIO_PROCESS, getpid()),\n- SyscallSucceedsWithValue(/*minnice=*/-20));\n+ SyscallSucceedsWithValue(\n+ /*minnice=*/-20)); // NOLINT(bugprone-argument-comment)\n}\n// Process is not found when which=PRIO_PROCESS\n@@ -167,7 +174,7 @@ TEST(SetpriorityTest, InvalidWho) {\n// Flaky, but it's tough to avoid a race condition when finding an unused pid\nEXPECT_THAT(setpriority(PRIO_PROCESS,\n/*who=*/INT_MAX - 1,\n- /*nice=*/16),\n+ /*nice=*/16), // NOLINT(bugprone-argument-comment)\nSyscallFailsWithErrno(ESRCH));\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_test_util.cc", "new_path": "test/syscalls/linux/socket_test_util.cc", "diff": "@@ -85,7 +85,8 @@ Creator<SocketPair> AcceptBindSocketPairCreator(bool abstract, int domain,\nRETURN_ERROR_IF_SYSCALL_FAIL(\nbind(bound, AsSockAddr(&bind_addr), sizeof(bind_addr)));\nMaybeSave(); // Successful bind.\n- RETURN_ERROR_IF_SYSCALL_FAIL(listen(bound, /* backlog = */ 5));\n+ RETURN_ERROR_IF_SYSCALL_FAIL(\n+ listen(bound, /* backlog = */ 5)); // NOLINT(bugprone-argument-comment)\nMaybeSave(); // Successful listen.\nint connected;\n@@ -316,7 +317,8 @@ PosixErrorOr<T> BindIP(int fd, bool dual_stack) {\ntemplate <typename T>\nPosixErrorOr<T> TCPBindAndListen(int fd, bool dual_stack) {\nASSIGN_OR_RETURN_ERRNO(T addr, BindIP<T>(fd, dual_stack));\n- RETURN_ERROR_IF_SYSCALL_FAIL(listen(fd, /* backlog = */ 5));\n+ RETURN_ERROR_IF_SYSCALL_FAIL(\n+ listen(fd, /* backlog = */ 5)); // NOLINT(bugprone-argument-comment)\nreturn addr;\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/uidgid.cc", "new_path": "test/syscalls/linux/uidgid.cc", "diff": "@@ -170,7 +170,9 @@ TEST(UidGidRootTest, SetgidNotFromThreadGroupLeader) {\nconst gid_t gid = absl::GetFlag(FLAGS_scratch_gid1);\n// NOTE(b/64676707): Do setgid in a separate thread so that we can test if\n// info.si_pid is set correctly.\n+ ScopedThread thread =\nScopedThread([gid] { ASSERT_THAT(setgid(gid), SyscallSucceeds()); });\n+ thread.Join();\nEXPECT_NO_ERRNO(CheckGIDs(gid, gid, gid));\n#pragma pop_macro(\"allow_setgid\")\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/uname.cc", "new_path": "test/syscalls/linux/uname.cc", "diff": "@@ -88,7 +88,7 @@ TEST(UnameTest, UnshareUTS) {\nstruct utsname init;\nASSERT_THAT(uname(&init), SyscallSucceeds());\n- ScopedThread([&]() {\n+ ScopedThread thread = ScopedThread([&]() {\nEXPECT_THAT(unshare(CLONE_NEWUTS), SyscallSucceeds());\nconstexpr char kHostname[] = \"wubbalubba\";\n@@ -97,6 +97,7 @@ TEST(UnameTest, UnshareUTS) {\nchar hostname[65];\nEXPECT_THAT(gethostname(hostname, sizeof(hostname)), SyscallSucceeds());\n});\n+ thread.Join();\nstruct utsname after;\nEXPECT_THAT(uname(&after), SyscallSucceeds());\n" } ]
Go
Apache License 2.0
google/gvisor
Cleanup lint messages PiperOrigin-RevId: 379380041
259,884
14.06.2021 18:47:16
25,200
bccc4461545655f4f1b5b03e83e6a1ededde48d8
Update debugging example Update the debugging example to use make to make sure the debuggable `runsc` binary is installed as a docker runtime before attempting to start a container. Also use nginx as an example container and Accept as an example break point since it's easy to trigger.
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/debugging.md", "new_path": "g3doc/user_guide/debugging.md", "diff": "@@ -61,24 +61,39 @@ You can debug gVisor like any other Golang program. If you're running with\nDocker, you'll need to find the sandbox PID and attach the debugger as root.\nHere is an example:\n+Install a runsc with debug symbols (you can also use the\n+[nightly release](../install/#nightly)):\n+\n```bash\n-# Get a runsc with debug symbols (download nightly or build with symbols).\n-bazel build -c dbg //runsc:runsc\n+make dev BAZEL_OPTIONS=\"-c dbg\"\n+```\n-# Start the container you want to debug.\n-docker run --runtime=runsc --rm --name=test -d alpine sleep 1000\n+Start the container you want to debug using the runsc runtime with debug\n+options:\n-# Find the sandbox PID.\n-docker inspect test | grep Pid | head -n 1\n+```bash\n+docker run --runtime=$(git branch --show-current)-d --rm --name=test -p 8080:80 -d nginx\n+```\n-# Attach your favorite debugger.\n-sudo dlv attach <PID>\n+Find the PID and attach your favorite debugger:\n+\n+```bash\n+sudo dlv attach $(docker inspect test | grep Pid | head -n 1 | grep -oe \"[0-9]*\")\n+```\n-# Set a breakpoint and resume.\n-break mm.MemoryManager.MMap\n+Set a breakpoint for accept:\n+\n+```bash\n+break gvisor.dev/gvisor/pkg/sentry/socket/netstack.(*SocketOperations).Accept\ncontinue\n```\n+In a different window connect to nginx to trigger the breakpoint:\n+\n+```bash\n+curl http://localhost:8080/\n+```\n+\n## Profiling\n`runsc` integrates with Go profiling tools and gives you easy commands to\n" } ]
Go
Apache License 2.0
google/gvisor
Update debugging example Update the debugging example to use make to make sure the debuggable `runsc` binary is installed as a docker runtime before attempting to start a container. Also use nginx as an example container and Accept as an example break point since it's easy to trigger. PiperOrigin-RevId: 379393892
260,004
14.06.2021 23:06:34
25,200
3a8ba8ed9d222ac5084f14219dbbd3d3cddbdb8e
Support parsing Prf field in RAs This change prepares for a later change which actually handles the Prf field in RAs to discover default routers with preference values, as per RFC 4191. Updates Test: header_test.TestNDPRouterAdvert
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_router_advert.go", "new_path": "pkg/tcpip/header/ndp_router_advert.go", "diff": "@@ -19,12 +19,72 @@ import (\n\"time\"\n)\n+// NDPRoutePreference is the preference values for default routers or\n+// more-specific routes.\n+//\n+// As per RFC 4191 section 2.1,\n+//\n+// Default router preferences and preferences for more-specific routes\n+// are encoded the same way.\n+//\n+// Preference values are encoded as a two-bit signed integer, as\n+// follows:\n+//\n+// 01 High\n+// 00 Medium (default)\n+// 11 Low\n+// 10 Reserved - MUST NOT be sent\n+//\n+// Note that implementations can treat the value as a two-bit signed\n+// integer.\n+//\n+// Having just three values reinforces that they are not metrics and\n+// more values do not appear to be necessary for reasonable scenarios.\n+type NDPRoutePreference uint8\n+\n+const (\n+ // HighRoutePreference indicates a high preference, as per\n+ // RFC 4191 section 2.1.\n+ HighRoutePreference NDPRoutePreference = 0b01\n+\n+ // MediumRoutePreference indicates a medium preference, as per\n+ // RFC 4191 section 2.1.\n+ //\n+ // This is the default preference value.\n+ MediumRoutePreference = 0b00\n+\n+ // LowRoutePreference indicates a low preference, as per\n+ // RFC 4191 section 2.1.\n+ LowRoutePreference = 0b11\n+\n+ // ReservedRoutePreference is a reserved preference value, as per\n+ // RFC 4191 section 2.1.\n+ //\n+ // It MUST NOT be sent.\n+ ReservedRoutePreference = 0b10\n+)\n+\n// NDPRouterAdvert is an NDP Router Advertisement message. It will only contain\n// the body of an ICMPv6 packet.\n//\n-// See RFC 4861 section 4.2 for more details.\n+// See RFC 4861 section 4.2 and RFC 4191 section 2.2 for more details.\ntype NDPRouterAdvert []byte\n+// As per RFC 4191 section 2.2,\n+//\n+// 0 1 2 3\n+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Type | Code | Checksum |\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Cur Hop Limit |M|O|H|Prf|Resvd| Router Lifetime |\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Reachable Time |\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Retrans Timer |\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Options ...\n+// +-+-+-+-+-+-+-+-+-+-+-+-\nconst (\n// NDPRAMinimumSize is the minimum size of a valid NDP Router\n// Advertisement message (body of an ICMPv6 packet).\n@@ -47,6 +107,14 @@ const (\n// within the bit-field/flags byte of an NDPRouterAdvert.\nndpRAOtherConfFlagMask = (1 << 6)\n+ // ndpDefaultRouterPreferenceShift is the shift of the Prf (Default Router\n+ // Preference) field within the flags byte of an NDPRouterAdvert.\n+ ndpDefaultRouterPreferenceShift = 3\n+\n+ // ndpDefaultRouterPreferenceMask is the mask of the Prf (Default Router\n+ // Preference) field within the flags byte of an NDPRouterAdvert.\n+ ndpDefaultRouterPreferenceMask = (0b11 << ndpDefaultRouterPreferenceShift)\n+\n// ndpRARouterLifetimeOffset is the start of the 2-byte Router Lifetime\n// field within an NDPRouterAdvert.\nndpRARouterLifetimeOffset = 2\n@@ -80,6 +148,11 @@ func (b NDPRouterAdvert) OtherConfFlag() bool {\nreturn b[ndpRAFlagsOffset]&ndpRAOtherConfFlagMask != 0\n}\n+// DefaultRouterPreference returns the Default Router Preference field.\n+func (b NDPRouterAdvert) DefaultRouterPreference() NDPRoutePreference {\n+ return NDPRoutePreference((b[ndpRAFlagsOffset] & ndpDefaultRouterPreferenceMask) >> ndpDefaultRouterPreferenceShift)\n+}\n+\n// RouterLifetime returns the lifetime associated with the default router. A\n// value of 0 means the source of the Router Advertisement is not a default\n// router and SHOULD NOT appear on the default router list. Note, a value of 0\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_test.go", "new_path": "pkg/tcpip/header/ndp_test.go", "diff": "@@ -126,36 +126,83 @@ func TestNDPNeighborAdvert(t *testing.T) {\n}\nfunc TestNDPRouterAdvert(t *testing.T) {\n+ tests := []struct {\n+ hopLimit uint8\n+ managedFlag, otherConfFlag bool\n+ prf NDPRoutePreference\n+ routerLifetimeS uint16\n+ reachableTimeMS, retransTimerMS uint32\n+ }{\n+ {\n+ hopLimit: 1,\n+ managedFlag: false,\n+ otherConfFlag: true,\n+ prf: HighRoutePreference,\n+ routerLifetimeS: 2,\n+ reachableTimeMS: 3,\n+ retransTimerMS: 4,\n+ },\n+ {\n+ hopLimit: 64,\n+ managedFlag: true,\n+ otherConfFlag: false,\n+ prf: LowRoutePreference,\n+ routerLifetimeS: 258,\n+ reachableTimeMS: 78492,\n+ retransTimerMS: 13213,\n+ },\n+ }\n+\n+ for i, test := range tests {\n+ t.Run(fmt.Sprintf(\"%d\", i), func(t *testing.T) {\n+ flags := uint8(0)\n+ if test.managedFlag {\n+ flags |= 1 << 7\n+ }\n+ if test.otherConfFlag {\n+ flags |= 1 << 6\n+ }\n+ flags |= uint8(test.prf) << 3\n+\nb := []byte{\n- 64, 128, 1, 2,\n+ test.hopLimit, flags, 1, 2,\n3, 4, 5, 6,\n7, 8, 9, 10,\n}\n+ binary.BigEndian.PutUint16(b[2:], test.routerLifetimeS)\n+ binary.BigEndian.PutUint32(b[4:], test.reachableTimeMS)\n+ binary.BigEndian.PutUint32(b[8:], test.retransTimerMS)\nra := NDPRouterAdvert(b)\n- if got := ra.CurrHopLimit(); got != 64 {\n- t.Errorf(\"got ra.CurrHopLimit = %d, want = 64\", got)\n+ if got := ra.CurrHopLimit(); got != test.hopLimit {\n+ t.Errorf(\"got ra.CurrHopLimit() = %d, want = %d\", got, test.hopLimit)\n}\n- if got := ra.ManagedAddrConfFlag(); !got {\n- t.Errorf(\"got ManagedAddrConfFlag = false, want = true\")\n+ if got := ra.ManagedAddrConfFlag(); got != test.managedFlag {\n+ t.Errorf(\"got ManagedAddrConfFlag() = %t, want = %t\", got, test.managedFlag)\n}\n- if got := ra.OtherConfFlag(); got {\n- t.Errorf(\"got OtherConfFlag = true, want = false\")\n+ if got := ra.OtherConfFlag(); got != test.otherConfFlag {\n+ t.Errorf(\"got OtherConfFlag() = %t, want = %t\", got, test.otherConfFlag)\n}\n- if got, want := ra.RouterLifetime(), time.Second*258; got != want {\n- t.Errorf(\"got ra.RouterLifetime = %d, want = %d\", got, want)\n+ if got := ra.DefaultRouterPreference(); got != test.prf {\n+ t.Errorf(\"got DefaultRouterPreference() = %d, want = %d\", got, test.prf)\n}\n- if got, want := ra.ReachableTime(), time.Millisecond*50595078; got != want {\n- t.Errorf(\"got ra.ReachableTime = %d, want = %d\", got, want)\n+ if got, want := ra.RouterLifetime(), time.Second*time.Duration(test.routerLifetimeS); got != want {\n+ t.Errorf(\"got ra.RouterLifetime() = %d, want = %d\", got, want)\n}\n- if got, want := ra.RetransTimer(), time.Millisecond*117967114; got != want {\n- t.Errorf(\"got ra.RetransTimer = %d, want = %d\", got, want)\n+ if got, want := ra.ReachableTime(), time.Millisecond*time.Duration(test.reachableTimeMS); got != want {\n+ t.Errorf(\"got ra.ReachableTime() = %d, want = %d\", got, want)\n+ }\n+\n+ if got, want := ra.RetransTimer(), time.Millisecond*time.Duration(test.retransTimerMS); got != want {\n+ t.Errorf(\"got ra.RetransTimer() = %d, want = %d\", got, want)\n+ }\n+ })\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Support parsing Prf field in RAs This change prepares for a later change which actually handles the Prf field in RAs to discover default routers with preference values, as per RFC 4191. Updates #6172. Test: header_test.TestNDPRouterAdvert PiperOrigin-RevId: 379421710
259,891
15.06.2021 11:29:47
25,200
488ba4176ed3b255660ca9b39982033101bfc2a4
Deflake SocketInetLoopbackTest.TCPBacklog The value can be off by one depending on the kernel we're running. Tested with --runs_per_test=1000.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -368,7 +368,8 @@ TEST_P(SocketInetLoopbackTest, TCPListenShutdown) {\nTestAddress const& connector = param.connector;\nconstexpr int kBacklog = 2;\n- constexpr int kFDs = kBacklog + 1;\n+ // See the comment in TCPBacklog for why this isn't kBacklog + 1.\n+ constexpr int kFDs = kBacklog;\n// Create the listening socket.\nFileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -454,6 +455,8 @@ TEST_P(SocketInetLoopbackTest, TCPListenClose) {\nuint16_t const port =\nASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));\n+ // Connect repeatedly, keeping each connection open. After kBacklog\n+ // connections, we'll start getting EINPROGRESS.\nsockaddr_storage conn_addr = connector.addr;\nASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));\nstd::vector<FileDescriptor> clients;\n@@ -608,6 +611,8 @@ TEST_P(SocketInetLoopbackTest, TCPListenShutdownDuringConnect) {\nvoid TestListenHangupConnectingRead(const TestParam& param,\nvoid (*hangup)(FileDescriptor&)) {\n+ constexpr int kTimeout = 10000;\n+\nTestAddress const& listener = param.listener;\nTestAddress const& connector = param.connector;\n@@ -637,14 +642,33 @@ void TestListenHangupConnectingRead(const TestParam& param,\nsockaddr_storage conn_addr = connector.addr;\nASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));\nFileDescriptor established_client = ASSERT_NO_ERRNO_AND_VALUE(\n- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));\n- ASSERT_THAT(connect(established_client.get(), AsSockAddr(&conn_addr),\n+ Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));\n+ int ret = connect(established_client.get(), AsSockAddr(&conn_addr),\n+ connector.addr_len);\n+ if (ret != 0) {\n+ EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));\n+ }\n+\n+ // On some kernels a backlog of 0 means no backlog, while on others it means a\n+ // backlog of 1. See commit c609e6aae4efcf383fe86b195d1b060befcb3666 for more\n+ // explanation.\n+ //\n+ // If we timeout connecting to loopback, we're on a kernel with no backlog.\n+ pollfd pfd = {\n+ .fd = established_client.get(),\n+ .events = POLLIN | POLLOUT,\n+ };\n+ if (!poll(&pfd, 1, kTimeout)) {\n+ // We're on one of those kernels. It should be impossible to establish the\n+ // connection, so connect will always return EALREADY.\n+ EXPECT_THAT(connect(established_client.get(), AsSockAddr(&conn_addr),\nconnector.addr_len),\n- SyscallSucceeds());\n+ SyscallFailsWithErrno(EALREADY));\n+ return;\n+ }\n// Ensure that the accept queue has the completed connection.\n- constexpr int kTimeout = 10000;\n- pollfd pfd = {\n+ pfd = {\n.fd = listen_fd.get(),\n.events = POLLIN,\n};\n@@ -654,7 +678,7 @@ void TestListenHangupConnectingRead(const TestParam& param,\nFileDescriptor connecting_client = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));\n// Keep the last client in connecting state.\n- int ret = connect(connecting_client.get(), AsSockAddr(&conn_addr),\n+ ret = connect(connecting_client.get(), AsSockAddr(&conn_addr),\nconnector.addr_len);\nif (ret != 0) {\nEXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));\n@@ -795,7 +819,8 @@ TEST_P(SocketInetLoopbackTest, TCPAcceptBacklogSizes) {\nif (backlog < 0) {\nexpected_accepts = 1024;\n} else {\n- expected_accepts = backlog + 1;\n+ // See the comment in TCPBacklog for why this isn't backlog + 1.\n+ expected_accepts = backlog;\n}\nfor (int i = 0; i < expected_accepts; i++) {\nSCOPED_TRACE(absl::StrCat(\"i=\", i));\n@@ -896,7 +921,11 @@ TEST_P(SocketInetLoopbackTest, TCPBacklog) {\n// enqueuing established connections to the accept queue, newer SYNs could\n// still be replied to causing those client connections would be accepted as\n// we start dequeuing the queue.\n- ASSERT_GE(accepted_conns, kBacklogSize + 1);\n+ //\n+ // On some kernels this can value can be off by one, so we don't add 1 to\n+ // kBacklogSize. See commit c609e6aae4efcf383fe86b195d1b060befcb3666 for more\n+ // explanation.\n+ ASSERT_GE(accepted_conns, kBacklogSize);\nASSERT_GE(client_conns, accepted_conns);\n}\n@@ -931,7 +960,9 @@ TEST_P(SocketInetLoopbackTest, TCPBacklogAcceptAll) {\n// Fill up the accept queue and trigger more client connections which would be\n// waiting to be accepted.\n- std::array<FileDescriptor, kBacklog + 1> established_clients;\n+ //\n+ // See the comment in TCPBacklog for why this isn't backlog + 1.\n+ std::array<FileDescriptor, kBacklog> established_clients;\nfor (auto& fd : established_clients) {\nfd = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(connector.family(), SOCK_STREAM, IPPROTO_TCP));\n" } ]
Go
Apache License 2.0
google/gvisor
Deflake SocketInetLoopbackTest.TCPBacklog The value can be off by one depending on the kernel we're running. Tested with --runs_per_test=1000. PiperOrigin-RevId: 379535390
259,853
16.06.2021 12:55:27
25,200
47149b7c4275ddd4404d86eddab6feab4f059ed3
kvm: mark UpperHalf PTE-s as global UpperHalf is shared with all address spaces.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_amd64.go", "new_path": "pkg/sentry/platform/kvm/machine_amd64.go", "diff": "@@ -485,7 +485,7 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {\npageTable.Map(\nhostarch.Addr(ring0.KernelStartAddress|r.virtual),\nr.length,\n- pagetables.MapOpts{AccessType: hostarch.Execute},\n+ pagetables.MapOpts{AccessType: hostarch.Execute, Global: true},\nphysical)\n}\n})\n@@ -498,7 +498,7 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {\npageTable.Map(\nhostarch.Addr(ring0.KernelStartAddress|start),\nregionLen,\n- pagetables.MapOpts{AccessType: hostarch.ReadWrite},\n+ pagetables.MapOpts{AccessType: hostarch.ReadWrite, Global: true},\nphysical)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
kvm: mark UpperHalf PTE-s as global UpperHalf is shared with all address spaces. PiperOrigin-RevId: 379790539
259,891
16.06.2021 14:40:21
25,200
34152da7e5c3c81253ffe4a433c95789d3002a8e
Fix broken hdrincl test Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/raw/endpoint.go", "new_path": "pkg/tcpip/transport/raw/endpoint.go", "diff": "@@ -286,26 +286,6 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp\nreturn nil, nil, nil, &tcpip.ErrBadBuffer{}\n}\n- // If this is an unassociated socket and callee provided a nonzero\n- // destination address, route using that address.\n- if e.ops.GetHeaderIncluded() {\n- ip := header.IPv4(payloadBytes)\n- if !ip.IsValid(len(payloadBytes)) {\n- return nil, nil, nil, &tcpip.ErrInvalidOptionValue{}\n- }\n- dstAddr := ip.DestinationAddress()\n- // Update dstAddr with the address in the IP header, unless\n- // opts.To is set (e.g. if sendto specifies a specific\n- // address).\n- if dstAddr != tcpip.Address([]byte{0, 0, 0, 0}) && opts.To == nil {\n- opts.To = &tcpip.FullAddress{\n- NIC: 0, // NIC is unset.\n- Addr: dstAddr, // The address from the payload.\n- Port: 0, // There are no ports here.\n- }\n- }\n- }\n-\n// Did the user caller provide a destination? If not, use the connected\n// destination.\nif opts.To == nil {\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket_hdrincl.cc", "new_path": "test/syscalls/linux/raw_socket_hdrincl.cc", "diff": "@@ -177,11 +177,10 @@ TEST_F(RawHDRINCL, ConnectToLoopback) {\nSyscallSucceeds());\n}\n-// FIXME(gvisor.dev/issue/3159): Test currently flaky.\n-TEST_F(RawHDRINCL, DISABLED_SendWithoutConnectSucceeds) {\n+TEST_F(RawHDRINCL, SendWithoutConnectFails) {\nstruct iphdr hdr = LoopbackHeader();\nASSERT_THAT(send(socket_, &hdr, sizeof(hdr), 0),\n- SyscallSucceedsWithValue(sizeof(hdr)));\n+ SyscallFailsWithErrno(EDESTADDRREQ));\n}\n// HDRINCL implies write-only. Verify that we can't read a packet sent to\n" } ]
Go
Apache License 2.0
google/gvisor
Fix broken hdrincl test Fixes #3159. PiperOrigin-RevId: 379814096
259,891
17.06.2021 13:33:12
25,200
0f5c1f5eafb2cc67a9148bdf346b6083e5a8480c
raw sockets: don't overwrite destination address Also makes the behavior of raw sockets WRT fragmentation clearer, and makes the ICMPv4 header-length check explicit. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ip_test.go", "new_path": "pkg/tcpip/network/ip_test.go", "diff": "@@ -88,6 +88,7 @@ type testObject struct {\ndataCalls int\ncontrolCalls int\n+ rawCalls int\n}\n// checkValues verifies that the transport protocol, data contents, src & dst\n@@ -148,6 +149,10 @@ func (t *testObject) DeliverTransportError(local, remote tcpip.Address, net tcpi\nt.controlCalls++\n}\n+func (t *testObject) DeliverRawPacket(tcpip.TransportProtocolNumber, *stack.PacketBuffer) {\n+ t.rawCalls++\n+}\n+\n// Attach is only implemented to satisfy the LinkEndpoint interface.\nfunc (*testObject) Attach(stack.NetworkDispatcher) {}\n@@ -717,7 +722,10 @@ func TestReceive(t *testing.T) {\n}\ntest.handlePacket(t, ep, &nic)\nif nic.testObject.dataCalls != 1 {\n- t.Errorf(\"Bad number of data calls: got %x, want 1\", nic.testObject.dataCalls)\n+ t.Errorf(\"Bad number of data calls: got %d, want 1\", nic.testObject.dataCalls)\n+ }\n+ if nic.testObject.rawCalls != 1 {\n+ t.Errorf(\"Bad number of raw calls: got %d, want 1\", nic.testObject.rawCalls)\n}\nif got := stat.Value(); got != 1 {\nt.Errorf(\"got s.Stats().IP.PacketsReceived.Value() = %d, want = 1\", got)\n@@ -968,7 +976,10 @@ func TestIPv4FragmentationReceive(t *testing.T) {\nep.HandlePacket(pkt)\nif nic.testObject.dataCalls != 0 {\n- t.Fatalf(\"Bad number of data calls: got %x, want 0\", nic.testObject.dataCalls)\n+ t.Fatalf(\"Bad number of data calls: got %d, want 0\", nic.testObject.dataCalls)\n+ }\n+ if nic.testObject.rawCalls != 0 {\n+ t.Errorf(\"Bad number of raw calls: got %d, want 0\", nic.testObject.rawCalls)\n}\n// Send second segment.\n@@ -977,7 +988,10 @@ func TestIPv4FragmentationReceive(t *testing.T) {\n})\nep.HandlePacket(pkt)\nif nic.testObject.dataCalls != 1 {\n- t.Fatalf(\"Bad number of data calls: got %x, want 1\", nic.testObject.dataCalls)\n+ t.Fatalf(\"Bad number of data calls: got %d, want 1\", nic.testObject.dataCalls)\n+ }\n+ if nic.testObject.rawCalls != 1 {\n+ t.Errorf(\"Bad number of raw calls: got %d, want 1\", nic.testObject.rawCalls)\n}\n}\n@@ -1310,7 +1324,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nProtocol: transportProto,\nTTL: ipv4.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv4Addr,\n})\nreturn hdr.View().ToVectorisedView()\n},\n@@ -1351,7 +1365,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nProtocol: transportProto,\nTTL: ipv4.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv4Addr,\n})\nip.SetHeaderLength(header.IPv4MinimumSize - 1)\nreturn hdr.View().ToVectorisedView()\n@@ -1370,7 +1384,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nProtocol: transportProto,\nTTL: ipv4.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv4Addr,\n})\nreturn buffer.View(ip[:len(ip)-1]).ToVectorisedView()\n},\n@@ -1388,7 +1402,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nProtocol: transportProto,\nTTL: ipv4.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv4Addr,\n})\nreturn buffer.View(ip).ToVectorisedView()\n},\n@@ -1430,7 +1444,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nProtocol: transportProto,\nTTL: ipv4.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv4Addr,\nOptions: ipv4Options,\n})\nreturn hdr.View().ToVectorisedView()\n@@ -1469,7 +1483,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nProtocol: transportProto,\nTTL: ipv4.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv4Addr,\nOptions: ipv4Options,\n})\nvv := buffer.View(ip).ToVectorisedView()\n@@ -1515,7 +1529,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nTransportProtocol: transportProto,\nHopLimit: ipv6.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv6Addr,\n})\nreturn hdr.View().ToVectorisedView()\n},\n@@ -1560,7 +1574,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nTransportProtocol: tcpip.TransportProtocolNumber(header.IPv6FragmentExtHdrIdentifier),\nHopLimit: ipv6.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv6Addr,\n})\nreturn hdr.View().ToVectorisedView()\n},\n@@ -1595,7 +1609,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nTransportProtocol: transportProto,\nHopLimit: ipv6.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv6Addr,\n})\nreturn buffer.View(ip).ToVectorisedView()\n},\n@@ -1630,7 +1644,7 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\nTransportProtocol: transportProto,\nHopLimit: ipv6.DefaultTTL,\nSrcAddr: src,\n- DstAddr: header.IPv4Any,\n+ DstAddr: remoteIPv4Addr,\n})\nreturn buffer.View(ip[:len(ip)-1]).ToVectorisedView()\n},\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -614,10 +614,6 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBu\nipH.SetSourceAddress(r.LocalAddress())\n}\n- // Set the destination. If the packet already included a destination, it will\n- // be part of the route anyways.\n- ipH.SetDestinationAddress(r.RemoteAddress())\n-\n// Set the packet ID when zero.\nif ipH.ID() == 0 {\n// RFC 6864 section 4.3 mandates uniqueness of ID values for\n@@ -860,6 +856,13 @@ func (e *endpoint) handleLocalPacket(pkt *stack.PacketBuffer, canSkipRXChecksum\n}\nfunc (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer, inNICName string) {\n+ // Raw socket packets are delivered based solely on the transport protocol\n+ // number. We only require that the packet be valid IPv4, and that they not\n+ // be fragmented.\n+ if !h.More() && h.FragmentOffset() == 0 {\n+ e.dispatcher.DeliverRawPacket(h.TransportProtocol(), pkt)\n+ }\n+\npkt.NICID = e.nic.ID()\nstats := e.stats\nstats.ip.ValidPacketsReceived.Increment()\n@@ -995,6 +998,9 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer,\n// to do it here.\nh.SetTotalLength(uint16(pkt.Data().Size() + len(h)))\nh.SetFlagsFragmentOffset(0, 0)\n+\n+ // Now that the packet is reassembled, it can be sent to raw sockets.\n+ e.dispatcher.DeliverRawPacket(h.TransportProtocol(), pkt)\n}\nstats.ip.PacketsDelivered.Increment()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp_test.go", "new_path": "pkg/tcpip/network/ipv6/icmp_test.go", "diff": "@@ -90,6 +90,10 @@ func (*stubDispatcher) DeliverTransportPacket(tcpip.TransportProtocolNumber, *st\nreturn stack.TransportPacketHandled\n}\n+func (*stubDispatcher) DeliverRawPacket(tcpip.TransportProtocolNumber, *stack.PacketBuffer) {\n+ // No-op.\n+}\n+\nvar _ stack.NetworkInterface = (*testInterface)(nil)\ntype testInterface struct {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6.go", "new_path": "pkg/tcpip/network/ipv6/ipv6.go", "diff": "@@ -928,10 +928,6 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBu\nipH.SetSourceAddress(r.LocalAddress())\n}\n- // Set the destination. If the packet already included a destination, it will\n- // be part of the route anyways.\n- ipH.SetDestinationAddress(r.RemoteAddress())\n-\n// Populate the packet buffer's network header and don't allow an invalid\n// packet to be sent.\n//\n@@ -1128,6 +1124,10 @@ func (e *endpoint) handleLocalPacket(pkt *stack.PacketBuffer, canSkipRXChecksum\n}\nfunc (e *endpoint) handleValidatedPacket(h header.IPv6, pkt *stack.PacketBuffer, inNICName string) {\n+ // Raw socket packets are delivered based solely on the transport protocol\n+ // number. We only require that the packet be valid IPv6.\n+ e.dispatcher.DeliverRawPacket(h.TransportProtocol(), pkt)\n+\npkt.NICID = e.nic.ID()\nstats := e.stats.ip\nstats.ValidPacketsReceived.Increment()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -779,11 +779,6 @@ func (n *nic) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt\ntransProto := state.proto\n- // Raw socket packets are delivered based solely on the transport\n- // protocol number. We do not inspect the payload to ensure it's\n- // validly formed.\n- n.stack.demux.deliverRawPacket(protocol, pkt)\n-\n// TransportHeader is empty only when pkt is an ICMP packet or was reassembled\n// from fragments.\nif pkt.TransportHeader().View().IsEmpty() {\n@@ -877,6 +872,17 @@ func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.Netwo\n}\n}\n+// DeliverRawPacket implements TransportDispatcher.\n+func (n *nic) DeliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) {\n+ // For ICMPv4 only we validate the header length for compatibility with\n+ // raw(7) ICMP_FILTER. The same check is made in Linux here:\n+ // https://github.com/torvalds/linux/blob/70585216/net/ipv4/raw.c#L189.\n+ if protocol == header.ICMPv4ProtocolNumber && pkt.TransportHeader().View().Size()+pkt.Data().Size() < header.ICMPv4MinimumSize {\n+ return\n+ }\n+ n.stack.demux.deliverRawPacket(protocol, pkt)\n+}\n+\n// ID implements NetworkInterface.\nfunc (n *nic) ID() tcpip.NICID {\nreturn n.id\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -265,6 +265,11 @@ type TransportDispatcher interface {\n//\n// DeliverTransportError takes ownership of the packet buffer.\nDeliverTransportError(local, remote tcpip.Address, _ tcpip.NetworkProtocolNumber, _ tcpip.TransportProtocolNumber, _ TransportError, _ *PacketBuffer)\n+\n+ // DeliverRawPacket delivers a packet to any subscribed raw sockets.\n+ //\n+ // DeliverRawPacket does NOT take ownership of the packet buffer.\n+ DeliverRawPacket(tcpip.TransportProtocolNumber, *PacketBuffer)\n}\n// PacketLooping specifies where an outbound packet should be sent.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/transport_demuxer.go", "new_path": "pkg/tcpip/stack/transport_demuxer.go", "diff": "@@ -16,6 +16,7 @@ package stack\nimport (\n\"fmt\"\n+\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/hash/jenkins\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/packet/endpoint.go", "new_path": "pkg/tcpip/transport/packet/endpoint.go", "diff": "@@ -424,7 +424,6 @@ func (ep *endpoint) HandlePacket(nicID tcpip.NICID, localAddr tcpip.LinkAddress,\ndefault:\npanic(fmt.Sprintf(\"unexpected PktType in pkt: %+v\", pkt))\n}\n-\n} else {\n// Raw packets need their ethernet headers prepended before\n// queueing.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket_hdrincl.cc", "new_path": "test/syscalls/linux/raw_socket_hdrincl.cc", "diff": "@@ -281,9 +281,6 @@ TEST_F(RawHDRINCL, SendAndReceive) {\n// Send and receive a packet where the sendto address is not the same as the\n// provided destination.\nTEST_F(RawHDRINCL, SendAndReceiveDifferentAddress) {\n- // FIXME(gvisor.dev/issue/3160): Test currently flaky.\n- SKIP_IF(true);\n-\nint port = 40000;\nif (!IsRunningOnGvisor()) {\nport = static_cast<short>(ASSERT_NO_ERRNO_AND_VALUE(\n@@ -301,18 +298,20 @@ TEST_F(RawHDRINCL, SendAndReceiveDifferentAddress) {\nASSERT_TRUE(\nFillPacket(packet, sizeof(packet), port, kPayload, sizeof(kPayload)));\n// Overwrite the IP destination address with an IP we can't get to.\n+ constexpr int32_t kUnreachable = 42;\nstruct iphdr iphdr = {};\nmemcpy(&iphdr, packet, sizeof(iphdr));\n- iphdr.daddr = 42;\n+ iphdr.daddr = kUnreachable;\nmemcpy(packet, &iphdr, sizeof(iphdr));\n+ // Send to localhost via loopback.\nsocklen_t addrlen = sizeof(addr_);\nASSERT_NO_FATAL_FAILURE(sendto(socket_, &packet, sizeof(packet), 0,\nreinterpret_cast<struct sockaddr*>(&addr_),\naddrlen));\n- // Receive the payload, since sendto should replace the bad destination with\n- // localhost.\n+ // Receive the payload. Despite an unreachable destination address, sendto\n+ // should have sent the packet through loopback.\nchar recv_buf[sizeof(packet)];\nstruct sockaddr_in src;\nsocklen_t src_size = sizeof(src);\n@@ -330,9 +329,8 @@ TEST_F(RawHDRINCL, SendAndReceiveDifferentAddress) {\nstruct iphdr recv_iphdr = {};\nmemcpy(&recv_iphdr, recv_buf, sizeof(recv_iphdr));\nEXPECT_NE(recv_iphdr.id, 0);\n- // The destination address should be localhost, not the bad IP we set\n- // initially.\n- EXPECT_EQ(absl::gbswap_32(recv_iphdr.daddr), INADDR_LOOPBACK);\n+ // The destination address is kUnreachable despite arriving via loopback.\n+ EXPECT_EQ(recv_iphdr.daddr, kUnreachable);\n}\n// Send and receive a packet w/ the IP_HDRINCL option set.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket_icmp.cc", "new_path": "test/syscalls/linux/raw_socket_icmp.cc", "diff": "@@ -262,8 +262,8 @@ TEST_F(RawSocketICMPTest, RawAndPingSockets) {\n}\n// A raw ICMP socket should be able to send a malformed short ICMP Echo Request,\n-// while ping socket should not.\n-// Neither should be able to receieve a short malformed packet.\n+// while a ping socket should not. Neither should be able to receieve a short\n+// malformed packet.\nTEST_F(RawSocketICMPTest, ShortEchoRawAndPingSockets) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n" } ]
Go
Apache License 2.0
google/gvisor
raw sockets: don't overwrite destination address Also makes the behavior of raw sockets WRT fragmentation clearer, and makes the ICMPv4 header-length check explicit. Fixes #3160. PiperOrigin-RevId: 380033450
259,891
17.06.2021 15:28:51
25,200
bc27a991851fdffa59f028eecfc22bdd17ccaa55
remove outdated ip6tables TODOs IPv6 SO_ORIGINAL_DST is supported, and the flag check as-written will detect when other flags are needed. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/netfilter_ipv6.go", "new_path": "pkg/abi/linux/netfilter_ipv6.go", "diff": "@@ -41,7 +41,6 @@ const (\n// IP6T_ORIGINAL_DST is the ip6tables SOL_IPV6 socket option. Corresponds to\n// the value in include/uapi/linux/netfilter_ipv6/ip6_tables.h.\n-// TODO(gvisor.dev/issue/3549): Support IPv6 original destination.\nconst IP6T_ORIGINAL_DST = 80\n// IP6TReplace is the argument for the IP6T_SO_SET_REPLACE sockopt. It\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/targets.go", "new_path": "pkg/sentry/socket/netfilter/targets.go", "diff": "@@ -418,7 +418,6 @@ func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar\nreturn nil, syserr.ErrInvalidArgument\n}\n- // TODO(gvisor.dev/issue/3549): Check for other flags.\n// For now, redirect target only supports destination change.\nif natRange.Flags != linux.NF_NAT_RANGE_PROTO_SPECIFIED {\nnflog(\"nfNATTargetMaker: invalid range flags %d\", natRange.Flags)\n" } ]
Go
Apache License 2.0
google/gvisor
remove outdated ip6tables TODOs IPv6 SO_ORIGINAL_DST is supported, and the flag check as-written will detect when other flags are needed. Fixes #3549. PiperOrigin-RevId: 380059115
259,975
17.06.2021 17:51:36
25,200
1f2ce9f46100d6ff958f7c257d3c0a949ede4f4c
[syserror] Change p9 server to use linuxerr. Change the p9 server to use *errors.Error defined in pkg linuxerr. Done separate from the client so that we ensure different p9 server/client versions work with each other.
[ { "change_type": "MODIFY", "old_path": "pkg/errors/linuxerr/linuxerr.go", "new_path": "pkg/errors/linuxerr/linuxerr.go", "diff": "@@ -27,6 +27,7 @@ import (\nconst maxErrno uint32 = errno.EHWPOISON + 1\nvar (\n+ NOERROR = errors.New(errno.NOERRNO, \"not an error\")\nEPERM = errors.New(errno.EPERM, \"operation not permitted\")\nENOENT = errors.New(errno.ENOENT, \"no such file or directory\")\nESRCH = errors.New(errno.ESRCH, \"no such process\")\n@@ -176,7 +177,7 @@ var errNotValidError = errors.New(errno.Errno(maxErrno), \"not a valid error\")\n// errnos (especially uint32(sycall.Errno)) and *Error.\nvar errorSlice = []*errors.Error{\n// Errno values from include/uapi/asm-generic/errno-base.h.\n- errno.NOERRNO: nil,\n+ errno.NOERRNO: NOERROR,\nerrno.EPERM: EPERM,\nerrno.ENOENT: ENOENT,\nerrno.ESRCH: ESRCH,\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/BUILD", "new_path": "pkg/p9/BUILD", "diff": "@@ -22,6 +22,9 @@ go_library(\n\"version.go\",\n],\ndeps = [\n+ \"//pkg/abi/linux/errno\",\n+ \"//pkg/errors\",\n+ \"//pkg/errors/linuxerr\",\n\"//pkg/fd\",\n\"//pkg/fdchannel\",\n\"//pkg/flipcall\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/handlers.go", "new_path": "pkg/p9/handlers.go", "diff": "@@ -23,6 +23,9 @@ import (\n\"sync/atomic\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux/errno\"\n+ \"gvisor.dev/gvisor/pkg/errors\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/fd\"\n\"gvisor.dev/gvisor/pkg/log\"\n)\n@@ -62,6 +65,45 @@ func newErr(err error) *Rlerror {\nreturn &Rlerror{Error: uint32(ExtractErrno(err))}\n}\n+// ExtractLinuxerrErrno extracts a *errors.Error from a error, best effort.\n+// TODO(b/34162363): Merge this with ExtractErrno.\n+func ExtractLinuxerrErrno(err error) *errors.Error {\n+ switch err {\n+ case os.ErrNotExist:\n+ return linuxerr.ENOENT\n+ case os.ErrExist:\n+ return linuxerr.EEXIST\n+ case os.ErrPermission:\n+ return linuxerr.EACCES\n+ case os.ErrInvalid:\n+ return linuxerr.EINVAL\n+ }\n+\n+ // Attempt to unwrap.\n+ switch e := err.(type) {\n+ case *errors.Error:\n+ return e\n+ case unix.Errno:\n+ return linuxerr.ErrorFromErrno(errno.Errno(e))\n+ case *os.PathError:\n+ return ExtractLinuxerrErrno(e.Err)\n+ case *os.SyscallError:\n+ return ExtractLinuxerrErrno(e.Err)\n+ case *os.LinkError:\n+ return ExtractLinuxerrErrno(e.Err)\n+ }\n+\n+ // Default case.\n+ log.Warningf(\"unknown error: %v\", err)\n+ return linuxerr.EIO\n+}\n+\n+// newErrFromLinuxerr returns an Rlerror from the linuxerr list.\n+// TODO(b/34162363): Merge this with newErr.\n+func newErrFromLinuxerr(err error) *Rlerror {\n+ return &Rlerror{Error: uint32(ExtractLinuxerrErrno(err).Errno())}\n+}\n+\n// handler is implemented for server-handled messages.\n//\n// See server.go for call information.\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/server.go", "new_path": "pkg/p9/server.go", "diff": "@@ -19,7 +19,8 @@ import (\n\"runtime/debug\"\n\"sync/atomic\"\n- \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux/errno\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/fd\"\n\"gvisor.dev/gvisor/pkg/fdchannel\"\n\"gvisor.dev/gvisor/pkg/flipcall\"\n@@ -483,7 +484,7 @@ func (cs *connState) lookupChannel(id uint32) *channel {\nfunc (cs *connState) handle(m message) (r message) {\nif !cs.reqGate.Enter() {\n// connState.stop() has been called; the connection is shutting down.\n- r = newErr(unix.ECONNRESET)\n+ r = newErrFromLinuxerr(linuxerr.ECONNRESET)\nreturn\n}\ndefer func() {\n@@ -498,15 +499,23 @@ func (cs *connState) handle(m message) (r message) {\n// Wrap in an EFAULT error; we don't really have a\n// better way to describe this kind of error. It will\n// usually manifest as a result of the test framework.\n- r = newErr(unix.EFAULT)\n+ r = newErrFromLinuxerr(linuxerr.EFAULT)\n}\n}()\nif handler, ok := m.(handler); ok {\n// Call the message handler.\nr = handler.handle(cs)\n+ // TODO(b/34162363):This is only here to make sure the server works with\n+ // only linuxerr Errors, as the handlers work with both client and server.\n+ // It will be removed a followup, when all the unix.Errno errors are\n+ // replaced with linuxerr.\n+ if rlError, ok := r.(*Rlerror); ok {\n+ e := linuxerr.ErrorFromErrno(errno.Errno(rlError.Error))\n+ r = newErrFromLinuxerr(e)\n+ }\n} else {\n// Produce an ENOSYS error.\n- r = newErr(unix.ENOSYS)\n+ r = newErrFromLinuxerr(linuxerr.ENOSYS)\n}\nreturn\n}\n@@ -553,7 +562,7 @@ func (cs *connState) handleRequest() bool {\n// If it's not a connection error, but some other protocol error,\n// we can send a response immediately.\ncs.sendMu.Lock()\n- err := send(cs.conn, tag, newErr(err))\n+ err := send(cs.conn, tag, newErrFromLinuxerr(err))\ncs.sendMu.Unlock()\nif err != nil {\nlog.Debugf(\"p9.send: %v\", err)\n" } ]
Go
Apache License 2.0
google/gvisor
[syserror] Change p9 server to use linuxerr. Change the p9 server to use *errors.Error defined in pkg linuxerr. Done separate from the client so that we ensure different p9 server/client versions work with each other. PiperOrigin-RevId: 380084491
259,992
21.06.2021 12:21:42
25,200
d823b7bd975dd2926324971a08f97522b41dd6e7
Add gcore to list of supported tools
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/compatibility.md", "new_path": "g3doc/user_guide/compatibility.md", "diff": "@@ -61,6 +61,7 @@ Most common utilities work. Note that:\n| drill | Working. |\n| env | Working. |\n| find | Working. |\n+| gcore | Working. |\n| gdb | Working. |\n| gosu | Working. |\n| grep | Working (unless stdin is a pipe and stdout is /dev/null). |\n" } ]
Go
Apache License 2.0
google/gvisor
Add gcore to list of supported tools PiperOrigin-RevId: 380636877
259,891
21.06.2021 22:13:24
25,200
c6da1b0022484561cf1a37b240b670120936393b
clean up tcpdump TODOs tcpdump is largely supported. We've also chose not to implement writeable AF_PACKET sockets, and there's a bug specifically for promiscuous mode Fixes
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/compatibility.md", "new_path": "g3doc/user_guide/compatibility.md", "diff": "@@ -82,7 +82,7 @@ Most common utilities work. Note that:\n| sshd | Partially working. Job control [in progress](https://gvisor.dev/issue/154). |\n| strace | Working. |\n| tar | Working. |\n-| tcpdump | [In progress](https://gvisor.dev/issue/173). |\n+| tcpdump | Working. [Promiscuous mode in progress](https://gvisor.dev/issue/3333). |\n| top | Working. |\n| uptime | Working. |\n| vim | Working. |\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -659,7 +659,6 @@ func ConvertAddress(family int, addr tcpip.FullAddress) (linux.SockAddr, uint32)\nreturn &out, uint32(sockAddrInet6Size)\ncase linux.AF_PACKET:\n- // TODO(gvisor.dev/issue/173): Return protocol too.\nvar out linux.SockAddrLink\nout.Family = linux.AF_PACKET\nout.InterfaceIndex = int32(addr.NIC)\n@@ -749,7 +748,6 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) {\nreturn tcpip.FullAddress{}, family, syserr.ErrInvalidArgument\n}\n- // TODO(gvisor.dev/issue/173): Return protocol too.\nreturn tcpip.FullAddress{\nNIC: tcpip.NICID(a.InterfaceIndex),\nAddr: tcpip.Address(a.HardwareAddr[:header.EthernetAddressSize]),\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/packet/endpoint.go", "new_path": "pkg/tcpip/transport/packet/endpoint.go", "diff": "@@ -208,7 +208,6 @@ func (ep *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResul\n}\nfunc (*endpoint) Write(tcpip.Payloader, tcpip.WriteOptions) (int64, tcpip.Error) {\n- // TODO(gvisor.dev/issue/173): Implement.\nreturn 0, &tcpip.ErrInvalidOptionValue{}\n}\n@@ -244,8 +243,6 @@ func (*endpoint) Accept(*tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, tcpi\n// Bind implements tcpip.Endpoint.Bind.\nfunc (ep *endpoint) Bind(addr tcpip.FullAddress) tcpip.Error {\n- // TODO(gvisor.dev/issue/173): Add Bind support.\n-\n// \"By default, all packets of the specified protocol type are passed\n// to a packet socket. To get packets only from a specific interface\n// use bind(2) specifying an address in a struct sockaddr_ll to bind\n@@ -385,7 +382,6 @@ func (ep *endpoint) HandlePacket(nicID tcpip.NICID, localAddr tcpip.LinkAddress,\n// Push new packet into receive list and increment the buffer size.\nvar packet packet\n- // TODO(gvisor.dev/issue/173): Return network protocol.\nif !pkt.LinkHeader().View().IsEmpty() {\n// Get info directly from the ethernet header.\nhdr := header.Ethernet(pkt.LinkHeader().View())\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/packet_socket.cc", "new_path": "test/syscalls/linux/packet_socket.cc", "diff": "//\n// These tests require CAP_NET_RAW to run.\n-// TODO(gvisor.dev/issue/173): gVisor support.\n-\nnamespace gvisor {\nnamespace testing {\n@@ -188,7 +186,6 @@ void ReceiveMessage(int sock, int ifindex) {\n// sizeof(sockaddr_ll).\nASSERT_THAT(src_len, AnyOf(Eq(sizeof(src)), Eq(sizeof(src) - 2)));\n- // TODO(gvisor.dev/issue/173): Verify protocol once we return it.\n// Verify the source address.\nEXPECT_EQ(src.sll_family, AF_PACKET);\nEXPECT_EQ(src.sll_ifindex, ifindex);\n@@ -234,7 +231,7 @@ TEST_P(CookedPacketTest, Receive) {\n// Send via a packet socket.\nTEST_P(CookedPacketTest, Send) {\n- // TODO(gvisor.dev/issue/173): Remove once we support packet socket writing.\n+ // We don't implement writing to packet sockets on gVisor.\nSKIP_IF(IsRunningOnGvisor());\n// Let's send a UDP packet and receive it using a regular UDP socket.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/packet_socket_raw.cc", "new_path": "test/syscalls/linux/packet_socket_raw.cc", "diff": "//\n// These tests require CAP_NET_RAW to run.\n-// TODO(gvisor.dev/issue/173): gVisor support.\n-\nnamespace gvisor {\nnamespace testing {\n@@ -193,7 +191,6 @@ TEST_P(RawPacketTest, Receive) {\n// sizeof(sockaddr_ll).\nASSERT_THAT(src_len, AnyOf(Eq(sizeof(src)), Eq(sizeof(src) - 2)));\n- // TODO(gvisor.dev/issue/173): Verify protocol once we return it.\n// Verify the source address.\nEXPECT_EQ(src.sll_family, AF_PACKET);\nEXPECT_EQ(src.sll_ifindex, GetLoopbackIndex());\n@@ -238,7 +235,7 @@ TEST_P(RawPacketTest, Receive) {\n// Send via a packet socket.\nTEST_P(RawPacketTest, Send) {\n- // TODO(gvisor.dev/issue/173): Remove once we support packet socket writing.\n+ // We don't implement writing to packet sockets on gVisor.\nSKIP_IF(IsRunningOnGvisor());\n// Let's send a UDP packet and receive it using a regular UDP socket.\n" } ]
Go
Apache License 2.0
google/gvisor
clean up tcpdump TODOs tcpdump is largely supported. We've also chose not to implement writeable AF_PACKET sockets, and there's a bug specifically for promiscuous mode (#3333). Fixes #173. PiperOrigin-RevId: 380733686
259,884
22.06.2021 01:06:02
25,200
04a81bc33664b7f7b3da0666b9296e5aaf0f63e7
Trigger poll/epoll events on zero-length hostinet sendmsg Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket.go", "new_path": "pkg/sentry/socket/hostinet/socket.go", "diff": "@@ -67,23 +67,6 @@ type socketOperations struct {\nsocketOpsCommon\n}\n-// socketOpsCommon contains the socket operations common to VFS1 and VFS2.\n-//\n-// +stateify savable\n-type socketOpsCommon struct {\n- socket.SendReceiveTimeout\n-\n- family int // Read-only.\n- stype linux.SockType // Read-only.\n- protocol int // Read-only.\n- queue waiter.Queue\n-\n- // fd is the host socket fd. It must have O_NONBLOCK, so that operations\n- // will return EWOULDBLOCK instead of blocking on the host. This allows us to\n- // handle blocking behavior independently in the sentry.\n- fd int\n-}\n-\nvar _ = socket.Socket(&socketOperations{})\nfunc newSocketFile(ctx context.Context, family int, stype linux.SockType, protocol int, fd int, nonblock bool) (*fs.File, *syserr.Error) {\n@@ -103,29 +86,6 @@ func newSocketFile(ctx context.Context, family int, stype linux.SockType, protoc\nreturn fs.NewFile(ctx, dirent, fs.FileFlags{NonBlocking: nonblock, Read: true, Write: true, NonSeekable: true}, s), nil\n}\n-// Release implements fs.FileOperations.Release.\n-func (s *socketOpsCommon) Release(context.Context) {\n- fdnotifier.RemoveFD(int32(s.fd))\n- unix.Close(s.fd)\n-}\n-\n-// Readiness implements waiter.Waitable.Readiness.\n-func (s *socketOpsCommon) Readiness(mask waiter.EventMask) waiter.EventMask {\n- return fdnotifier.NonBlockingPoll(int32(s.fd), mask)\n-}\n-\n-// EventRegister implements waiter.Waitable.EventRegister.\n-func (s *socketOpsCommon) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\n- s.queue.EventRegister(e, mask)\n- fdnotifier.UpdateFD(int32(s.fd))\n-}\n-\n-// EventUnregister implements waiter.Waitable.EventUnregister.\n-func (s *socketOpsCommon) EventUnregister(e *waiter.Entry) {\n- s.queue.EventUnregister(e)\n- fdnotifier.UpdateFD(int32(s.fd))\n-}\n-\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (s *socketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nreturn ioctl(ctx, s.fd, io, args)\n@@ -177,6 +137,96 @@ func (s *socketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IO\nreturn int64(n), err\n}\n+// Socket implements socket.Provider.Socket.\n+func (p *socketProvider) Socket(t *kernel.Task, stypeflags linux.SockType, protocol int) (*fs.File, *syserr.Error) {\n+ // Check that we are using the host network stack.\n+ stack := t.NetworkContext()\n+ if stack == nil {\n+ return nil, nil\n+ }\n+ if _, ok := stack.(*Stack); !ok {\n+ return nil, nil\n+ }\n+\n+ // Only accept TCP and UDP.\n+ stype := stypeflags & linux.SOCK_TYPE_MASK\n+ switch stype {\n+ case unix.SOCK_STREAM:\n+ switch protocol {\n+ case 0, unix.IPPROTO_TCP:\n+ // ok\n+ default:\n+ return nil, nil\n+ }\n+ case unix.SOCK_DGRAM:\n+ switch protocol {\n+ case 0, unix.IPPROTO_UDP:\n+ // ok\n+ default:\n+ return nil, nil\n+ }\n+ default:\n+ return nil, nil\n+ }\n+\n+ // Conservatively ignore all flags specified by the application and add\n+ // SOCK_NONBLOCK since socketOperations requires it. Pass a protocol of 0\n+ // to simplify the syscall filters, since 0 and IPPROTO_* are equivalent.\n+ fd, err := unix.Socket(p.family, int(stype)|unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC, 0)\n+ if err != nil {\n+ return nil, syserr.FromError(err)\n+ }\n+ return newSocketFile(t, p.family, stype, protocol, fd, stypeflags&unix.SOCK_NONBLOCK != 0)\n+}\n+\n+// Pair implements socket.Provider.Pair.\n+func (p *socketProvider) Pair(t *kernel.Task, stype linux.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) {\n+ // Not supported by AF_INET/AF_INET6.\n+ return nil, nil, nil\n+}\n+\n+// LINT.ThenChange(./socket_vfs2.go)\n+\n+// socketOpsCommon contains the socket operations common to VFS1 and VFS2.\n+//\n+// +stateify savable\n+type socketOpsCommon struct {\n+ socket.SendReceiveTimeout\n+\n+ family int // Read-only.\n+ stype linux.SockType // Read-only.\n+ protocol int // Read-only.\n+ queue waiter.Queue\n+\n+ // fd is the host socket fd. It must have O_NONBLOCK, so that operations\n+ // will return EWOULDBLOCK instead of blocking on the host. This allows us to\n+ // handle blocking behavior independently in the sentry.\n+ fd int\n+}\n+\n+// Release implements fs.FileOperations.Release.\n+func (s *socketOpsCommon) Release(context.Context) {\n+ fdnotifier.RemoveFD(int32(s.fd))\n+ unix.Close(s.fd)\n+}\n+\n+// Readiness implements waiter.Waitable.Readiness.\n+func (s *socketOpsCommon) Readiness(mask waiter.EventMask) waiter.EventMask {\n+ return fdnotifier.NonBlockingPoll(int32(s.fd), mask)\n+}\n+\n+// EventRegister implements waiter.Waitable.EventRegister.\n+func (s *socketOpsCommon) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\n+ s.queue.EventRegister(e, mask)\n+ fdnotifier.UpdateFD(int32(s.fd))\n+}\n+\n+// EventUnregister implements waiter.Waitable.EventUnregister.\n+func (s *socketOpsCommon) EventUnregister(e *waiter.Entry) {\n+ s.queue.EventUnregister(e)\n+ fdnotifier.UpdateFD(int32(s.fd))\n+}\n+\n// Connect implements socket.Socket.Connect.\nfunc (s *socketOpsCommon) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error {\nif len(sockaddr) > sizeofSockaddr {\n@@ -596,6 +646,17 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b\nreturn 0, syserr.ErrInvalidArgument\n}\n+ // If the src is zero-length, call SENDTO directly with a null buffer in\n+ // order to generate poll/epoll notifications.\n+ if src.NumBytes() == 0 {\n+ sysflags := flags | unix.MSG_DONTWAIT\n+ n, _, errno := unix.Syscall6(unix.SYS_SENDTO, uintptr(s.fd), 0, 0, uintptr(sysflags), uintptr(firstBytePtr(to)), uintptr(len(to)))\n+ if errno != 0 {\n+ return 0, syserr.FromError(errno)\n+ }\n+ return int(n), nil\n+ }\n+\nspace := uint64(control.CmsgsSpace(t, controlMessages))\nif space > maxControlLen {\nspace = maxControlLen\n@@ -709,56 +770,6 @@ type socketProvider struct {\nfamily int\n}\n-// Socket implements socket.Provider.Socket.\n-func (p *socketProvider) Socket(t *kernel.Task, stypeflags linux.SockType, protocol int) (*fs.File, *syserr.Error) {\n- // Check that we are using the host network stack.\n- stack := t.NetworkContext()\n- if stack == nil {\n- return nil, nil\n- }\n- if _, ok := stack.(*Stack); !ok {\n- return nil, nil\n- }\n-\n- // Only accept TCP and UDP.\n- stype := stypeflags & linux.SOCK_TYPE_MASK\n- switch stype {\n- case unix.SOCK_STREAM:\n- switch protocol {\n- case 0, unix.IPPROTO_TCP:\n- // ok\n- default:\n- return nil, nil\n- }\n- case unix.SOCK_DGRAM:\n- switch protocol {\n- case 0, unix.IPPROTO_UDP:\n- // ok\n- default:\n- return nil, nil\n- }\n- default:\n- return nil, nil\n- }\n-\n- // Conservatively ignore all flags specified by the application and add\n- // SOCK_NONBLOCK since socketOperations requires it. Pass a protocol of 0\n- // to simplify the syscall filters, since 0 and IPPROTO_* are equivalent.\n- fd, err := unix.Socket(p.family, int(stype)|unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC, 0)\n- if err != nil {\n- return nil, syserr.FromError(err)\n- }\n- return newSocketFile(t, p.family, stype, protocol, fd, stypeflags&unix.SOCK_NONBLOCK != 0)\n-}\n-\n-// Pair implements socket.Provider.Pair.\n-func (p *socketProvider) Pair(t *kernel.Task, stype linux.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) {\n- // Not supported by AF_INET/AF_INET6.\n- return nil, nil, nil\n-}\n-\n-// LINT.ThenChange(./socket_vfs2.go)\n-\nfunc init() {\nfor _, family := range []int{unix.AF_INET, unix.AF_INET6} {\nsocket.RegisterProvider(family, &socketProvider{family})\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/udp_socket.cc", "new_path": "test/syscalls/linux/udp_socket.cc", "diff": "@@ -1429,12 +1429,8 @@ TEST_P(UdpSocketTest, FIONREADZeroLengthPacket) {\nsendto(sock_.get(), buf + i * psize, 0, 0, bind_addr_, addrlen_),\nSyscallSucceedsWithValue(0));\n- // TODO(gvisor.dev/issue/2726): sending a zero-length message to a hostinet\n- // socket does not cause a poll event to be triggered.\n- if (!IsRunningWithHostinet()) {\nASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),\nSyscallSucceedsWithValue(1));\n- }\n// Check that regardless of how many packets are in the queue, the size\n// reported is that of a single packet.\n" } ]
Go
Apache License 2.0
google/gvisor
Trigger poll/epoll events on zero-length hostinet sendmsg Fixes #2726 PiperOrigin-RevId: 380753516
259,891
22.06.2021 18:33:22
25,200
179ed309f4eaf424c078dba4688eef2731e6649c
netstack: further deflake tcp_test There are unnecessarily short timeouts in several places. Note: a later change will switch tcp_test to fake clocks intead of the built-in `time` package.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -3451,17 +3451,13 @@ loop:\nfor {\nswitch _, err := c.EP.Read(ioutil.Discard, tcpip.ReadOptions{}); err.(type) {\ncase *tcpip.ErrWouldBlock:\n- select {\n- case <-ch:\n+ <-ch\n// Expect the state to be StateError and subsequent Reads to fail with HardError.\n_, err := c.EP.Read(ioutil.Discard, tcpip.ReadOptions{})\nif d := cmp.Diff(&tcpip.ErrConnectionReset{}, err); d != \"\" {\nt.Fatalf(\"c.EP.Read() mismatch (-want +got):\\n%s\", d)\n}\nbreak loop\n- case <-time.After(1 * time.Second):\n- t.Fatalf(\"Timed out waiting for reset to arrive\")\n- }\ncase *tcpip.ErrConnectionReset:\nbreak loop\ndefault:\n@@ -3472,14 +3468,27 @@ loop:\nif tcp.EndpointState(c.EP.State()) != tcp.StateError {\nt.Fatalf(\"got EP state is not StateError\")\n}\n+\n+ checkValid := func() []error {\n+ var errors []error\nif got := c.Stack().Stats().TCP.EstablishedResets.Value(); got != 1 {\n- t.Errorf(\"got stats.TCP.EstablishedResets.Value() = %d, want = 1\", got)\n+ errors = append(errors, fmt.Errorf(\"got stats.TCP.EstablishedResets.Value() = %d, want = 1\", got))\n}\nif got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {\n- t.Errorf(\"got stats.TCP.CurrentEstablished.Value() = %d, want = 0\", got)\n+ errors = append(errors, fmt.Errorf(\"got stats.TCP.CurrentEstablished.Value() = %d, want = 0\", got))\n}\nif got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {\n- t.Errorf(\"got stats.TCP.CurrentConnected.Value() = %d, want = 0\", got)\n+ errors = append(errors, fmt.Errorf(\"got stats.TCP.CurrentConnected.Value() = %d, want = 0\", got))\n+ }\n+ return errors\n+ }\n+\n+ start := time.Now()\n+ for time.Since(start) < time.Minute && len(checkValid()) > 0 {\n+ time.Sleep(50 * time.Millisecond)\n+ }\n+ for _, err := range checkValid() {\n+ t.Error(err)\n}\n}\n@@ -6092,16 +6101,11 @@ func TestSynRcvdBadSeqNumber(t *testing.T) {\ndefer c.WQ.EventUnregister(&we)\n// Wait for connection to be established.\n- select {\n- case <-ch:\n+ <-ch\nnewEP, _, err = c.EP.Accept(nil)\nif err != nil {\nt.Fatalf(\"Accept failed: %s\", err)\n}\n-\n- case <-time.After(1 * time.Second):\n- t.Fatalf(\"Timed out waiting for accept\")\n- }\n}\n// Now verify that the TCP socket is usable and in a connected state.\n@@ -6209,12 +6213,26 @@ func TestPassiveFailedConnectionAttemptIncrement(t *testing.T) {\nRcvWnd: 30000,\n})\n- time.Sleep(50 * time.Millisecond)\n+ checkValid := func() []error {\n+ var errors []error\nif got := stats.TCP.ListenOverflowSynDrop.Value(); got != want {\n- t.Errorf(\"got stats.TCP.ListenOverflowSynDrop.Value() = %d, want = %d\", got, want)\n+ errors = append(errors, fmt.Errorf(\"got stats.TCP.ListenOverflowSynDrop.Value() = %d, want = %d\", got, want))\n}\nif got := c.EP.Stats().(*tcp.Stats).ReceiveErrors.ListenOverflowSynDrop.Value(); got != want {\n- t.Errorf(\"got EP stats Stats.ReceiveErrors.ListenOverflowSynDrop = %d, want = %d\", got, want)\n+ errors = append(errors, fmt.Errorf(\"got EP stats Stats.ReceiveErrors.ListenOverflowSynDrop = %d, want = %d\", got, want))\n+ }\n+ return errors\n+ }\n+\n+ start := time.Now()\n+ for time.Since(start) < time.Minute && len(checkValid()) > 0 {\n+ time.Sleep(50 * time.Millisecond)\n+ }\n+ for _, err := range checkValid() {\n+ t.Error(err)\n+ }\n+ if t.Failed() {\n+ t.FailNow()\n}\nwe, ch := waiter.NewChannelEntry(nil)\n@@ -6225,16 +6243,11 @@ func TestPassiveFailedConnectionAttemptIncrement(t *testing.T) {\n_, _, err = c.EP.Accept(nil)\nif cmp.Equal(&tcpip.ErrWouldBlock{}, err) {\n// Wait for connection to be established.\n- select {\n- case <-ch:\n+ <-ch\n_, _, err = c.EP.Accept(nil)\nif err != nil {\nt.Fatalf(\"Accept failed: %s\", err)\n}\n-\n- case <-time.After(1 * time.Second):\n- t.Fatalf(\"Timed out waiting for accept\")\n- }\n}\n}\n@@ -7483,7 +7496,7 @@ func TestTCPUserTimeout(t *testing.T) {\nselect {\ncase <-notifyCh:\ncase <-time.After(2 * initRTO):\n- t.Fatalf(\"connection still alive after %s, should have been closed after :%s\", 2*initRTO, userTimeout)\n+ t.Fatalf(\"connection still alive after %s, should have been closed after %s\", 2*initRTO, userTimeout)\n}\n// No packet should be received as the connection should be silently\n" } ]
Go
Apache License 2.0
google/gvisor
netstack: further deflake tcp_test There are unnecessarily short timeouts in several places. Note: a later change will switch tcp_test to fake clocks intead of the built-in `time` package. PiperOrigin-RevId: 380935400
259,962
22.06.2021 23:38:37
25,200
e5fe488b22734e798df760d9646c6b1c5f25c207
Wake up Writers when tcp socket is shutdown for writes.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -2372,6 +2372,9 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error {\ne.notifyProtocolGoroutine(notifyTickleWorker)\nreturn nil\n}\n+ // Wake up any readers that maybe waiting for the stream to become\n+ // readable.\n+ e.waiterQueue.Notify(waiter.ReadableEvents)\n}\n// Close for write.\n@@ -2394,6 +2397,9 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error {\ne.sndQueueInfo.SndClosed = true\ne.sndQueueInfo.sndQueueMu.Unlock()\ne.handleClose()\n+ // Wake up any writers that maybe waiting for the stream to become\n+ // writable.\n+ e.waiterQueue.Notify(waiter.WritableEvents)\n}\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/tcp_socket.cc", "new_path": "test/syscalls/linux/tcp_socket.cc", "diff": "@@ -1182,6 +1182,62 @@ TEST_P(SimpleTcpSocketTest, SelfConnectSend) {\nEXPECT_THAT(shutdown(s.get(), SHUT_WR), SyscallSucceedsWithValue(0));\n}\n+TEST_P(SimpleTcpSocketTest, SelfConnectSendShutdownWrite) {\n+ // Initialize address to the loopback one.\n+ sockaddr_storage addr =\n+ ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));\n+ socklen_t addrlen = sizeof(addr);\n+\n+ const FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+\n+ ASSERT_THAT(bind(s.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());\n+ // Get the bound port.\n+ ASSERT_THAT(getsockname(s.get(), AsSockAddr(&addr), &addrlen),\n+ SyscallSucceeds());\n+ ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),\n+ SyscallSucceeds());\n+\n+ // Write enough data to fill send and receive buffers.\n+ size_t write_size = 24 << 20; // 24 MiB.\n+ std::vector<char> writebuf(write_size);\n+\n+ ScopedThread t([&s]() {\n+ absl::SleepFor(absl::Milliseconds(250));\n+ ASSERT_THAT(shutdown(s.get(), SHUT_WR), SyscallSucceeds());\n+ });\n+\n+ // Try to send the whole thing.\n+ int n;\n+ ASSERT_THAT(n = SendFd(s.get(), writebuf.data(), writebuf.size(), 0),\n+ SyscallFailsWithErrno(EPIPE));\n+}\n+\n+TEST_P(SimpleTcpSocketTest, SelfConnectRecvShutdownRead) {\n+ // Initialize address to the loopback one.\n+ sockaddr_storage addr =\n+ ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));\n+ socklen_t addrlen = sizeof(addr);\n+\n+ const FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+\n+ ASSERT_THAT(bind(s.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());\n+ // Get the bound port.\n+ ASSERT_THAT(getsockname(s.get(), AsSockAddr(&addr), &addrlen),\n+ SyscallSucceeds());\n+ ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),\n+ SyscallSucceeds());\n+\n+ ScopedThread t([&s]() {\n+ absl::SleepFor(absl::Milliseconds(250));\n+ ASSERT_THAT(shutdown(s.get(), SHUT_RD), SyscallSucceeds());\n+ });\n+\n+ char buf[1];\n+ EXPECT_THAT(recv(s.get(), buf, 0, 0), SyscallSucceedsWithValue(0));\n+}\n+\nvoid NonBlockingConnect(int family, int16_t pollMask) {\nconst FileDescriptor listener =\nASSERT_NO_ERRNO_AND_VALUE(Socket(family, SOCK_STREAM, IPPROTO_TCP));\n" } ]
Go
Apache License 2.0
google/gvisor
Wake up Writers when tcp socket is shutdown for writes. PiperOrigin-RevId: 380967023
259,987
22.06.2021 14:17:26
25,200
99f9230e3fbd3b3ede5628eeeb8458175bc9c1a0
Ensure shim propagates errors over gRPC correctly This change wraps containerd's errdefs.ToGRPC function with one that understands Go 1.13-style error wrapping style, which is used pervasively throughout the shim. With this change, errors that have been marked with, e.g., `errdefs.ErrNotFound`, will be correctly propagated back to the containerd server.
[ { "change_type": "MODIFY", "old_path": "pkg/shim/BUILD", "new_path": "pkg/shim/BUILD", "diff": "@@ -8,6 +8,7 @@ go_library(\n\"api.go\",\n\"debug.go\",\n\"epoll.go\",\n+ \"errors.go\",\n\"options.go\",\n\"service.go\",\n\"service_linux.go\",\n@@ -44,6 +45,8 @@ go_library(\n\"@com_github_gogo_protobuf//types:go_default_library\",\n\"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\",\n\"@com_github_sirupsen_logrus//:go_default_library\",\n+ \"@org_golang_google_grpc//codes:go_default_library\",\n+ \"@org_golang_google_grpc//status:go_default_library\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n@@ -51,7 +54,10 @@ go_library(\ngo_test(\nname = \"shim_test\",\nsize = \"small\",\n- srcs = [\"service_test.go\"],\n+ srcs = [\n+ \"errors_test.go\",\n+ \"service_test.go\",\n+ ],\nlibrary = \":shim\",\ndeps = [\n\"//pkg/shim/utils\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/shim/errors.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// https://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package shim\n+\n+import (\n+ \"context\"\n+ \"errors\"\n+\n+ \"github.com/containerd/containerd/errdefs\"\n+ \"google.golang.org/grpc/codes\"\n+ \"google.golang.org/grpc/status\"\n+)\n+\n+// errToGRPC wraps containerd's ToGRPC error mapper which depends on\n+// github.com/pkg/errors to work correctly. Once we upgrade to containerd v1.4,\n+// this function can go away and we can use errdefs.ToGRPC directly instead.\n+//\n+// TODO(gvisor.dev/issue/6232): Remove after upgrading to containerd v1.4\n+func errToGRPC(err error) error {\n+ if _, ok := status.FromError(err); ok {\n+ return err\n+ }\n+\n+ switch {\n+ case errors.Is(err, errdefs.ErrInvalidArgument):\n+ return status.Errorf(codes.InvalidArgument, err.Error())\n+ case errors.Is(err, errdefs.ErrNotFound):\n+ return status.Errorf(codes.NotFound, err.Error())\n+ case errors.Is(err, errdefs.ErrAlreadyExists):\n+ return status.Errorf(codes.AlreadyExists, err.Error())\n+ case errors.Is(err, errdefs.ErrFailedPrecondition):\n+ return status.Errorf(codes.FailedPrecondition, err.Error())\n+ case errors.Is(err, errdefs.ErrUnavailable):\n+ return status.Errorf(codes.Unavailable, err.Error())\n+ case errors.Is(err, errdefs.ErrNotImplemented):\n+ return status.Errorf(codes.Unimplemented, err.Error())\n+ case errors.Is(err, context.Canceled):\n+ return status.Errorf(codes.Canceled, err.Error())\n+ case errors.Is(err, context.DeadlineExceeded):\n+ return status.Errorf(codes.DeadlineExceeded, err.Error())\n+ }\n+\n+ return errdefs.ToGRPC(err)\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/shim/errors_test.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// https://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package shim\n+\n+import (\n+ \"fmt\"\n+ \"testing\"\n+\n+ \"github.com/containerd/containerd/errdefs\"\n+)\n+\n+func TestGRPCRoundTripsErrors(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ err error\n+ test func(err error) bool\n+ }{\n+ {\n+ name: \"passthrough\",\n+ err: errdefs.ErrNotFound,\n+ test: errdefs.IsNotFound,\n+ },\n+ {\n+ name: \"wrapped\",\n+ err: fmt.Errorf(\"oh no: %w\", errdefs.ErrNotFound),\n+ test: errdefs.IsNotFound,\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ if err := errdefs.FromGRPC(errToGRPC(tc.err)); !tc.test(err) {\n+ t.Errorf(\"got %+v\", err)\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/service.go", "new_path": "pkg/shim/service.go", "diff": "@@ -452,10 +452,10 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (*ta\n}\nprocess, err := newInit(r.Bundle, filepath.Join(r.Bundle, \"work\"), ns, s.platform, config, &s.opts, st.Rootfs)\nif err != nil {\n- return nil, errdefs.ToGRPC(err)\n+ return nil, errToGRPC(err)\n}\nif err := process.Create(ctx, config); err != nil {\n- return nil, errdefs.ToGRPC(err)\n+ return nil, errToGRPC(err)\n}\n// Set up OOM notification on the sandbox's cgroup. This is done on\n@@ -544,7 +544,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*typ\nSpec: r.Spec,\n})\nif err != nil {\n- return nil, errdefs.ToGRPC(err)\n+ return nil, errToGRPC(err)\n}\ns.mu.Lock()\ns.processes[r.ExecID] = process\n@@ -565,7 +565,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*\nHeight: uint16(r.Height),\n}\nif err := p.Resize(ws); err != nil {\n- return nil, errdefs.ToGRPC(err)\n+ return nil, errToGRPC(err)\n}\nreturn empty, nil\n}\n@@ -648,7 +648,7 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*types.Empt\n}\nif err := p.Kill(ctx, r.Signal, r.All); err != nil {\nlog.L.Debugf(\"Kill failed: %v\", err)\n- return nil, errdefs.ToGRPC(err)\n+ return nil, errToGRPC(err)\n}\nlog.L.Debugf(\"Kill succeeded\")\nreturn empty, nil\n@@ -660,7 +660,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.Pi\npids, err := s.getContainerPids(ctx, r.ID)\nif err != nil {\n- return nil, errdefs.ToGRPC(err)\n+ return nil, errToGRPC(err)\n}\nvar processes []*task.ProcessInfo\nfor _, pid := range pids {\n" } ]
Go
Apache License 2.0
google/gvisor
Ensure shim propagates errors over gRPC correctly This change wraps containerd's errdefs.ToGRPC function with one that understands Go 1.13-style error wrapping style, which is used pervasively throughout the shim. With this change, errors that have been marked with, e.g., `errdefs.ErrNotFound`, will be correctly propagated back to the containerd server.
259,987
22.06.2021 14:20:21
25,200
b8430201f0046d78ee5ac6229718fa88c5246c96
Do not attempt to kill stopped exec processes While addressed the stopped state for handling signals in the main process, it did not update exec processes in the same way. This change mirrors that adjustment for exec processes.
[ { "change_type": "MODIFY", "old_path": "pkg/shim/proc/exec_state.go", "new_path": "pkg/shim/proc/exec_state.go", "diff": "@@ -151,8 +151,8 @@ func (s *execStoppedState) Delete(ctx context.Context) error {\nreturn nil\n}\n-func (s *execStoppedState) Kill(ctx context.Context, sig uint32, all bool) error {\n- return s.p.kill(ctx, sig, all)\n+func (s *execStoppedState) Kill(_ context.Context, sig uint32, _ bool) error {\n+ return handleStoppedKill(sig)\n}\nfunc (s *execStoppedState) SetExited(int) {\n" } ]
Go
Apache License 2.0
google/gvisor
Do not attempt to kill stopped exec processes While #6204 addressed the stopped state for handling signals in the main process, it did not update exec processes in the same way. This change mirrors that adjustment for exec processes.
259,885
23.06.2021 11:13:45
25,200
6b23d2a08e71ce7d93b1d52e545f792f549225dc
Fix PR_SET_PTRACER applicability to non-leader threads. Compare if (!thread_group_leader(tracee)) tracee = rcu_dereference(tracee->group_leader); in security/yama/yama_lsm.c:ptracer_exception_found().
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/ptrace.go", "new_path": "pkg/sentry/kernel/ptrace.go", "diff": "@@ -294,7 +294,7 @@ func (t *Task) isYAMADescendantOfLocked(ancestor *Task) bool {\n// Precondition: the TaskSet mutex must be locked (for reading or writing).\nfunc (t *Task) hasYAMAExceptionForLocked(tracer *Task) bool {\n- allowed, ok := t.k.ptraceExceptions[t]\n+ allowed, ok := t.k.ptraceExceptions[t.tg.leader]\nif !ok {\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1885,6 +1885,7 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\"@com_google_absl//absl/flags:flag\",\n+ \"@com_google_absl//absl/strings\",\n\"@com_google_absl//absl/time\",\ngtest,\n\"//test/util:capability_util\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/ptrace.cc", "new_path": "test/syscalls/linux/ptrace.cc", "diff": "#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/flags/flag.h\"\n+#include \"absl/strings/string_view.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"test/util/capability_util.h\"\n@@ -51,17 +52,10 @@ ABSL_FLAG(bool, ptrace_test_execve_child, false,\nABSL_FLAG(bool, ptrace_test_trace_descendants_allowed, false,\n\"If set, run the child workload for \"\n\"PtraceTest_TraceDescendantsAllowed.\");\n-ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer_pid, false,\n- \"If set, run the child workload for PtraceTest_PrctlSetPtracerPID.\");\n-ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer_any, false,\n- \"If set, run the child workload for PtraceTest_PrctlSetPtracerAny.\");\n-ABSL_FLAG(bool, ptrace_test_prctl_clear_ptracer, false,\n- \"If set, run the child workload for PtraceTest_PrctlClearPtracer.\");\n-ABSL_FLAG(bool, ptrace_test_prctl_replace_ptracer, false,\n- \"If set, run the child workload for PtraceTest_PrctlReplacePtracer.\");\n-ABSL_FLAG(int, ptrace_test_prctl_replace_ptracer_tid, -1,\n- \"Specifies the replacement tracer tid in the child workload for \"\n- \"PtraceTest_PrctlReplacePtracer.\");\n+ABSL_FLAG(bool, ptrace_test_ptrace_attacher, false,\n+ \"If set, run the child workload for PtraceAttacherSubprocess.\");\n+ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer, false,\n+ \"If set, run the child workload for PrctlSetPtracerSubprocess.\");\nABSL_FLAG(bool, ptrace_test_prctl_set_ptracer_and_exit_tracee_thread, false,\n\"If set, run the child workload for \"\n\"PtraceTest_PrctlSetPtracerPersistsPastTraceeThreadExit.\");\n@@ -161,6 +155,86 @@ int CheckPtraceAttach(pid_t pid) {\nreturn 0;\n}\n+class SimpleSubprocess {\n+ public:\n+ explicit SimpleSubprocess(absl::string_view child_flag) {\n+ int sockets[2];\n+ TEST_PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets) == 0);\n+\n+ // Allocate vector before forking (not async-signal-safe).\n+ ExecveArray const owned_child_argv = {\"/proc/self/exe\", child_flag,\n+ \"--ptrace_test_fd\",\n+ std::to_string(sockets[0])};\n+ char* const* const child_argv = owned_child_argv.get();\n+\n+ pid_ = fork();\n+ if (pid_ == 0) {\n+ TEST_PCHECK(close(sockets[1]) == 0);\n+ execve(child_argv[0], child_argv, /* envp = */ nullptr);\n+ TEST_PCHECK_MSG(false, \"Survived execve to test child\");\n+ }\n+ TEST_PCHECK(pid_ > 0);\n+ TEST_PCHECK(close(sockets[0]) == 0);\n+ sockfd_ = sockets[1];\n+ }\n+\n+ SimpleSubprocess(SimpleSubprocess&& orig)\n+ : pid_(orig.pid_), sockfd_(orig.sockfd_) {\n+ orig.pid_ = -1;\n+ orig.sockfd_ = -1;\n+ }\n+\n+ SimpleSubprocess& operator=(SimpleSubprocess&& orig) {\n+ if (this != &orig) {\n+ this->~SimpleSubprocess();\n+ pid_ = orig.pid_;\n+ sockfd_ = orig.sockfd_;\n+ orig.pid_ = -1;\n+ orig.sockfd_ = -1;\n+ }\n+ return *this;\n+ }\n+\n+ SimpleSubprocess(SimpleSubprocess const&) = delete;\n+ SimpleSubprocess& operator=(SimpleSubprocess const&) = delete;\n+\n+ ~SimpleSubprocess() {\n+ if (pid_ < 0) {\n+ return;\n+ }\n+ EXPECT_THAT(shutdown(sockfd_, SHUT_RDWR), SyscallSucceeds());\n+ EXPECT_THAT(close(sockfd_), SyscallSucceeds());\n+ int status;\n+ EXPECT_THAT(waitpid(pid_, &status, 0), SyscallSucceedsWithValue(pid_));\n+ EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)\n+ << \" status \" << status;\n+ }\n+\n+ pid_t pid() const { return pid_; }\n+\n+ // Sends the child process the given value, receives an errno in response, and\n+ // returns a PosixError corresponding to the received errno.\n+ template <typename T>\n+ PosixError Cmd(T val) {\n+ if (WriteFd(sockfd_, &val, sizeof(val)) < 0) {\n+ return PosixError(errno, \"write failed\");\n+ }\n+ return RecvErrno();\n+ }\n+\n+ private:\n+ PosixError RecvErrno() {\n+ int resp_errno;\n+ if (ReadFd(sockfd_, &resp_errno, sizeof(resp_errno)) < 0) {\n+ return PosixError(errno, \"read failed\");\n+ }\n+ return PosixError(resp_errno);\n+ }\n+\n+ pid_t pid_ = -1;\n+ int sockfd_ = -1;\n+};\n+\nTEST(PtraceTest, AttachSelf) {\nEXPECT_THAT(ptrace(PTRACE_ATTACH, gettid(), 0, 0),\nSyscallFailsWithErrno(EPERM));\n@@ -343,289 +417,128 @@ TEST(PtraceTest, PrctlSetPtracerInvalidPID) {\nEXPECT_THAT(prctl(PR_SET_PTRACER, 123456789), SyscallFailsWithErrno(EINVAL));\n}\n-TEST(PtraceTest, PrctlSetPtracerPID) {\n- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);\n-\n- AutoCapability cap(CAP_SYS_PTRACE, false);\n-\n- // Use sockets to synchronize between tracer and tracee.\n- int sockets[2];\n- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());\n-\n- // Allocate vector before forking (not async-signal-safe).\n- ExecveArray const owned_child_argv = {\n- \"/proc/self/exe\", \"--ptrace_test_prctl_set_ptracer_pid\",\n- \"--ptrace_test_fd\", std::to_string(sockets[0])};\n- char* const* const child_argv = owned_child_argv.get();\n-\n- pid_t const tracee_pid = fork();\n- if (tracee_pid == 0) {\n- TEST_PCHECK(close(sockets[1]) == 0);\n- // This test will create a new thread in the child process.\n- // pthread_create(2) isn't async-signal-safe, so we execve() first.\n- execve(child_argv[0], child_argv, /* envp = */ nullptr);\n- TEST_PCHECK_MSG(false, \"Survived execve to test child\");\n+SimpleSubprocess CreatePtraceAttacherSubprocess() {\n+ return SimpleSubprocess(\"--ptrace_test_ptrace_attacher\");\n}\n- ASSERT_THAT(tracee_pid, SyscallSucceeds());\n- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());\n-\n- pid_t const tracer_pid = fork();\n- if (tracer_pid == 0) {\n- // Wait until tracee has called prctl.\n- char done;\n- TEST_PCHECK(read(sockets[1], &done, 1) == 1);\n- MaybeSave();\n- TEST_PCHECK(CheckPtraceAttach(tracee_pid) == 0);\n- _exit(0);\n- }\n- ASSERT_THAT(tracer_pid, SyscallSucceeds());\n-\n- // Clean up tracer.\n- int status;\n- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());\n- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);\n-\n- // Clean up tracee.\n- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());\n- ASSERT_THAT(waitpid(tracee_pid, &status, 0),\n- SyscallSucceedsWithValue(tracee_pid));\n- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)\n- << \" status \" << status;\n-}\n-\n-[[noreturn]] void RunPrctlSetPtracerPID(int fd) {\n- ScopedThread t([fd] {\n- // Perform prctl in a separate thread to verify that it is process-wide.\n- TEST_PCHECK(prctl(PR_SET_PTRACER, getppid()) == 0);\n- MaybeSave();\n- // Indicate that the prctl has been set.\n- TEST_PCHECK(write(fd, \"x\", 1) == 1);\n- MaybeSave();\n- });\n+[[noreturn]] static void RunPtraceAttacher(int sockfd) {\n+ // execve() may have restored CAP_SYS_PTRACE if we had real UID 0.\n+ TEST_CHECK(SetCapability(CAP_SYS_PTRACE, false).ok());\n+ // Perform PTRACE_ATTACH in a separate thread to verify that permissions\n+ // apply process-wide.\n+ ScopedThread t([&] {\nwhile (true) {\n- SleepSafe(absl::Seconds(1));\n- }\n+ pid_t pid;\n+ int rv = read(sockfd, &pid, sizeof(pid));\n+ if (rv == 0) {\n+ _exit(0);\n}\n-\n-TEST(PtraceTest, PrctlSetPtracerAny) {\n- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);\n- AutoCapability cap(CAP_SYS_PTRACE, false);\n-\n- // Use sockets to synchronize between tracer and tracee.\n- int sockets[2];\n- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());\n-\n- // Allocate vector before forking (not async-signal-safe).\n- ExecveArray const owned_child_argv = {\n- \"/proc/self/exe\", \"--ptrace_test_prctl_set_ptracer_any\",\n- \"--ptrace_test_fd\", std::to_string(sockets[0])};\n- char* const* const child_argv = owned_child_argv.get();\n-\n- pid_t const tracee_pid = fork();\n- if (tracee_pid == 0) {\n- // This test will create a new thread in the child process.\n- // pthread_create(2) isn't async-signal-safe, so we execve() first.\n- TEST_PCHECK(close(sockets[1]) == 0);\n- execve(child_argv[0], child_argv, /* envp = */ nullptr);\n- TEST_PCHECK_MSG(false, \"Survived execve to test child\");\n+ if (rv < 0) {\n+ _exit(1);\n}\n- ASSERT_THAT(tracee_pid, SyscallSucceeds());\n- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());\n-\n- pid_t const tracer_pid = fork();\n- if (tracer_pid == 0) {\n- // Wait until tracee has called prctl.\n- char done;\n- TEST_PCHECK(read(sockets[1], &done, 1) == 1);\n- MaybeSave();\n-\n- TEST_PCHECK(CheckPtraceAttach(tracee_pid) == 0);\n- _exit(0);\n+ int resp_errno = 0;\n+ if (CheckPtraceAttach(pid) < 0) {\n+ resp_errno = errno;\n}\n- ASSERT_THAT(tracer_pid, SyscallSucceeds());\n-\n- // Clean up tracer.\n- int status;\n- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());\n- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)\n- << \" status \" << status;\n-\n- // Clean up tracee.\n- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());\n- ASSERT_THAT(waitpid(tracee_pid, &status, 0),\n- SyscallSucceedsWithValue(tracee_pid));\n- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)\n- << \" status \" << status;\n+ TEST_PCHECK(write(sockfd, &resp_errno, sizeof(resp_errno)) ==\n+ sizeof(resp_errno));\n}\n-\n-[[noreturn]] void RunPrctlSetPtracerAny(int fd) {\n- ScopedThread t([fd] {\n- // Perform prctl in a separate thread to verify that it is process-wide.\n- TEST_PCHECK(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) == 0);\n- MaybeSave();\n- // Indicate that the prctl has been set.\n- TEST_PCHECK(write(fd, \"x\", 1) == 1);\n- MaybeSave();\n});\nwhile (true) {\nSleepSafe(absl::Seconds(1));\n}\n}\n-TEST(PtraceTest, PrctlClearPtracer) {\n- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);\n- AutoCapability cap(CAP_SYS_PTRACE, false);\n-\n- // Use sockets to synchronize between tracer and tracee.\n- int sockets[2];\n- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());\n-\n- // Allocate vector before forking (not async-signal-safe).\n- ExecveArray const owned_child_argv = {\n- \"/proc/self/exe\", \"--ptrace_test_prctl_clear_ptracer\", \"--ptrace_test_fd\",\n- std::to_string(sockets[0])};\n- char* const* const child_argv = owned_child_argv.get();\n-\n- pid_t const tracee_pid = fork();\n- if (tracee_pid == 0) {\n- // This test will create a new thread in the child process.\n- // pthread_create(2) isn't async-signal-safe, so we execve() first.\n- TEST_PCHECK(close(sockets[1]) == 0);\n- execve(child_argv[0], child_argv, /* envp = */ nullptr);\n- TEST_PCHECK_MSG(false, \"Survived execve to test child\");\n+SimpleSubprocess CreatePrctlSetPtracerSubprocess() {\n+ return SimpleSubprocess(\"--ptrace_test_prctl_set_ptracer\");\n}\n- ASSERT_THAT(tracee_pid, SyscallSucceeds());\n- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());\n- pid_t const tracer_pid = fork();\n- if (tracer_pid == 0) {\n- // Wait until tracee has called prctl.\n- char done;\n- TEST_PCHECK(read(sockets[1], &done, 1) == 1);\n- MaybeSave();\n-\n- TEST_CHECK(CheckPtraceAttach(tracee_pid) == -1);\n- TEST_PCHECK(errno == EPERM);\n+[[noreturn]] static void RunPrctlSetPtracer(int sockfd) {\n+ // Perform prctl in a separate thread to verify that it applies\n+ // process-wide.\n+ ScopedThread t([&] {\n+ while (true) {\n+ pid_t pid;\n+ int rv = read(sockfd, &pid, sizeof(pid));\n+ if (rv == 0) {\n_exit(0);\n}\n- ASSERT_THAT(tracer_pid, SyscallSucceeds());\n-\n- // Clean up tracer.\n- int status;\n- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());\n- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)\n- << \" status \" << status;\n-\n- // Clean up tracee.\n- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());\n- ASSERT_THAT(waitpid(tracee_pid, &status, 0),\n- SyscallSucceedsWithValue(tracee_pid));\n- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)\n- << \" status \" << status;\n+ if (rv < 0) {\n+ _exit(1);\n+ }\n+ int resp_errno = 0;\n+ if (prctl(PR_SET_PTRACER, pid) < 0) {\n+ resp_errno = errno;\n+ }\n+ TEST_PCHECK(write(sockfd, &resp_errno, sizeof(resp_errno)) ==\n+ sizeof(resp_errno));\n}\n-\n-[[noreturn]] void RunPrctlClearPtracer(int fd) {\n- ScopedThread t([fd] {\n- // Perform prctl in a separate thread to verify that it is process-wide.\n- TEST_PCHECK(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) == 0);\n- MaybeSave();\n- TEST_PCHECK(prctl(PR_SET_PTRACER, 0) == 0);\n- MaybeSave();\n- // Indicate that the prctl has been set/cleared.\n- TEST_PCHECK(write(fd, \"x\", 1) == 1);\n- MaybeSave();\n});\nwhile (true) {\nSleepSafe(absl::Seconds(1));\n}\n}\n-TEST(PtraceTest, PrctlReplacePtracer) {\n+TEST(PtraceTest, PrctlSetPtracer) {\nSKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);\n+\nAutoCapability cap(CAP_SYS_PTRACE, false);\n- pid_t const unused_pid = fork();\n- if (unused_pid == 0) {\n- while (true) {\n- SleepSafe(absl::Seconds(1));\n- }\n- }\n- ASSERT_THAT(unused_pid, SyscallSucceeds());\n+ // Ensure that initially, no tracer exception is set.\n+ ASSERT_THAT(prctl(PR_SET_PTRACER, 0), SyscallSucceeds());\n- // Use sockets to synchronize between tracer and tracee.\n- int sockets[2];\n- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());\n+ SimpleSubprocess tracee = CreatePrctlSetPtracerSubprocess();\n+ SimpleSubprocess tracer = CreatePtraceAttacherSubprocess();\n- // Allocate vector before forking (not async-signal-safe).\n- ExecveArray const owned_child_argv = {\n- \"/proc/self/exe\",\n- \"--ptrace_test_prctl_replace_ptracer\",\n- \"--ptrace_test_prctl_replace_ptracer_tid\",\n- std::to_string(unused_pid),\n- \"--ptrace_test_fd\",\n- std::to_string(sockets[0])};\n- char* const* const child_argv = owned_child_argv.get();\n+ // By default, Yama should prevent tracer from tracing its parent (this\n+ // process) or siblings (tracee).\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));\n+ EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(EPERM));\n- pid_t const tracee_pid = fork();\n- if (tracee_pid == 0) {\n- TEST_PCHECK(close(sockets[1]) == 0);\n- // This test will create a new thread in the child process.\n- // pthread_create(2) isn't async-signal-safe, so we execve() first.\n- execve(child_argv[0], child_argv, /* envp = */ nullptr);\n- TEST_PCHECK_MSG(false, \"Survived execve to test child\");\n- }\n- ASSERT_THAT(tracee_pid, SyscallSucceeds());\n- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());\n+ // If tracee invokes PR_SET_PTRACER on either tracer's pid, the pid of any of\n+ // its ancestors (i.e. us), or PR_SET_PTRACER_ANY, then tracer can trace it\n+ // (but not us).\n- pid_t const tracer_pid = fork();\n- if (tracer_pid == 0) {\n- // Wait until tracee has called prctl.\n- char done;\n- TEST_PCHECK(read(sockets[1], &done, 1) == 1);\n- MaybeSave();\n+ ASSERT_THAT(tracee.Cmd(tracer.pid()), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));\n- TEST_CHECK(CheckPtraceAttach(tracee_pid) == -1);\n- TEST_PCHECK(errno == EPERM);\n- _exit(0);\n- }\n- ASSERT_THAT(tracer_pid, SyscallSucceeds());\n+ ASSERT_THAT(tracee.Cmd(gettid()), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));\n- // Clean up tracer.\n- int status;\n- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());\n- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)\n- << \" status \" << status;\n+ ASSERT_THAT(tracee.Cmd(static_cast<pid_t>(PR_SET_PTRACER_ANY)),\n+ PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));\n- // Clean up tracee.\n- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());\n- ASSERT_THAT(waitpid(tracee_pid, &status, 0),\n- SyscallSucceedsWithValue(tracee_pid));\n- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)\n- << \" status \" << status;\n+ // If tracee invokes PR_SET_PTRACER with pid 0, then tracer can no longer\n+ // trace it.\n+ ASSERT_THAT(tracee.Cmd(0), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(EPERM));\n- // Clean up unused.\n- ASSERT_THAT(kill(unused_pid, SIGKILL), SyscallSucceeds());\n- ASSERT_THAT(waitpid(unused_pid, &status, 0),\n- SyscallSucceedsWithValue(unused_pid));\n- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)\n- << \" status \" << status;\n-}\n+ // If we invoke PR_SET_PTRACER with tracer's pid, then it can trace us (but\n+ // not our descendants).\n+ ASSERT_THAT(prctl(PR_SET_PTRACER, tracer.pid()), SyscallSucceeds());\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(EPERM));\n-[[noreturn]] void RunPrctlReplacePtracer(int new_tracer_pid, int fd) {\n- TEST_PCHECK(prctl(PR_SET_PTRACER, getppid()) == 0);\n- MaybeSave();\n+ // If we invoke PR_SET_PTRACER with pid 0, then tracer can no longer trace us.\n+ ASSERT_THAT(prctl(PR_SET_PTRACER, 0), SyscallSucceeds());\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));\n- ScopedThread t([new_tracer_pid, fd] {\n- TEST_PCHECK(prctl(PR_SET_PTRACER, new_tracer_pid) == 0);\n- MaybeSave();\n- // Indicate that the prctl has been set.\n- TEST_PCHECK(write(fd, \"x\", 1) == 1);\n- MaybeSave();\n- });\n- while (true) {\n- SleepSafe(absl::Seconds(1));\n- }\n+ // Another thread in our thread group can invoke PR_SET_PTRACER instead; its\n+ // effect applies to the whole thread group.\n+ pid_t const our_tid = gettid();\n+ ScopedThread([&] {\n+ ASSERT_THAT(prctl(PR_SET_PTRACER, tracer.pid()), SyscallSucceeds());\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(0));\n+ EXPECT_THAT(tracer.Cmd(our_tid), PosixErrorIs(0));\n+\n+ ASSERT_THAT(prctl(PR_SET_PTRACER, 0), SyscallSucceeds());\n+ EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));\n+ EXPECT_THAT(tracer.Cmd(our_tid), PosixErrorIs(EPERM));\n+ }).Join();\n}\n// Tests that YAMA exceptions store tracees by thread group leader. Exceptions\n@@ -2342,21 +2255,12 @@ int main(int argc, char** argv) {\ngvisor::testing::RunTraceDescendantsAllowed(fd);\n}\n- if (absl::GetFlag(FLAGS_ptrace_test_prctl_set_ptracer_pid)) {\n- gvisor::testing::RunPrctlSetPtracerPID(fd);\n- }\n-\n- if (absl::GetFlag(FLAGS_ptrace_test_prctl_set_ptracer_any)) {\n- gvisor::testing::RunPrctlSetPtracerAny(fd);\n- }\n-\n- if (absl::GetFlag(FLAGS_ptrace_test_prctl_clear_ptracer)) {\n- gvisor::testing::RunPrctlClearPtracer(fd);\n+ if (absl::GetFlag(FLAGS_ptrace_test_ptrace_attacher)) {\n+ gvisor::testing::RunPtraceAttacher(fd);\n}\n- if (absl::GetFlag(FLAGS_ptrace_test_prctl_replace_ptracer)) {\n- gvisor::testing::RunPrctlReplacePtracer(\n- absl::GetFlag(FLAGS_ptrace_test_prctl_replace_ptracer_tid), fd);\n+ if (absl::GetFlag(FLAGS_ptrace_test_prctl_set_ptracer)) {\n+ gvisor::testing::RunPrctlSetPtracer(fd);\n}\nif (absl::GetFlag(\n" }, { "change_type": "MODIFY", "old_path": "test/util/posix_error.h", "new_path": "test/util/posix_error.h", "diff": "@@ -385,7 +385,7 @@ class PosixErrorIsMatcher {\n};\n// Returns a gMock matcher that matches a PosixError or PosixErrorOr<> whose\n-// whose error code matches code_matcher, and whose error message matches\n+// error code matches code_matcher, and whose error message matches\n// message_matcher.\ntemplate <typename ErrorCodeMatcher>\nPosixErrorIsMatcher PosixErrorIs(\n@@ -395,6 +395,14 @@ PosixErrorIsMatcher PosixErrorIs(\nstd::move(message_matcher));\n}\n+// Returns a gMock matcher that matches a PosixError or PosixErrorOr<> whose\n+// error code matches code_matcher.\n+template <typename ErrorCodeMatcher>\n+PosixErrorIsMatcher PosixErrorIs(ErrorCodeMatcher&& code_matcher) {\n+ return PosixErrorIsMatcher(std::forward<ErrorCodeMatcher>(code_matcher),\n+ ::testing::_);\n+}\n+\n// Returns a gMock matcher that matches a PosixErrorOr<> which is ok() and\n// value matches the inner matcher.\ntemplate <typename InnerMatcher>\n" } ]
Go
Apache License 2.0
google/gvisor
Fix PR_SET_PTRACER applicability to non-leader threads. Compare if (!thread_group_leader(tracee)) tracee = rcu_dereference(tracee->group_leader); in security/yama/yama_lsm.c:ptracer_exception_found(). PiperOrigin-RevId: 381074242
259,885
23.06.2021 13:15:44
25,200
dfa4b3b90861a61653d0bef8f144bb4e82d21d78
Move flipcall.packetWindowMmap to memutil.
[ { "change_type": "MODIFY", "old_path": "pkg/flipcall/BUILD", "new_path": "pkg/flipcall/BUILD", "diff": "@@ -10,9 +10,7 @@ go_library(\n\"flipcall_unsafe.go\",\n\"futex_linux.go\",\n\"io.go\",\n- \"packet_window_allocator.go\",\n- \"packet_window_mmap_amd64.go\",\n- \"packet_window_mmap_arm64.go\",\n+ \"packet_window.go\",\n],\nvisibility = [\"//visibility:public\"],\ndeps = [\n" }, { "change_type": "MODIFY", "old_path": "pkg/flipcall/flipcall.go", "new_path": "pkg/flipcall/flipcall.go", "diff": "@@ -22,6 +22,7 @@ import (\n\"sync/atomic\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/memutil\"\n)\n// An Endpoint provides the ability to synchronously transfer data and control\n@@ -96,9 +97,9 @@ func (ep *Endpoint) Init(side EndpointSide, pwd PacketWindowDescriptor, opts ...\nif pwd.Length > math.MaxUint32 {\nreturn fmt.Errorf(\"packet window size (%d) exceeds maximum (%d)\", pwd.Length, math.MaxUint32)\n}\n- m, e := packetWindowMmap(pwd)\n- if e != 0 {\n- return fmt.Errorf(\"failed to mmap packet window: %v\", e)\n+ m, err := memutil.MapFile(0, uintptr(pwd.Length), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED, uintptr(pwd.FD), uintptr(pwd.Offset))\n+ if err != nil {\n+ return fmt.Errorf(\"failed to mmap packet window: %v\", err)\n}\nep.packet = m\nep.dataCap = uint32(pwd.Length) - uint32(PacketHeaderBytes)\n" }, { "change_type": "RENAME", "old_path": "pkg/flipcall/packet_window_allocator.go", "new_path": "pkg/flipcall/packet_window.go", "diff": "" }, { "change_type": "MODIFY", "old_path": "pkg/memutil/BUILD", "new_path": "pkg/memutil/BUILD", "diff": "@@ -4,7 +4,11 @@ package(licenses = [\"notice\"])\ngo_library(\nname = \"memutil\",\n- srcs = [\"memutil_unsafe.go\"],\n+ srcs = [\n+ \"memfd_linux_unsafe.go\",\n+ \"memutil.go\",\n+ \"mmap.go\",\n+ ],\nvisibility = [\"//visibility:public\"],\ndeps = [\"@org_golang_x_sys//unix:go_default_library\"],\n)\n" }, { "change_type": "RENAME", "old_path": "pkg/memutil/memutil_unsafe.go", "new_path": "pkg/memutil/memfd_linux_unsafe.go", "diff": "// +build linux\n-// Package memutil provides a wrapper for the memfd_create() system call.\npackage memutil\nimport (\n" }, { "change_type": "RENAME", "old_path": "pkg/flipcall/packet_window_mmap_arm64.go", "new_path": "pkg/memutil/memutil.go", "diff": "-// Copyright 2020 The gVisor Authors.\n+// Copyright 2018 The gVisor Authors.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n-// +build arm64\n-\n-package flipcall\n-\n-import \"golang.org/x/sys/unix\"\n-\n-// Return a memory mapping of the pwd in memory that can be shared outside the sandbox.\n-func packetWindowMmap(pwd PacketWindowDescriptor) (uintptr, unix.Errno) {\n- m, _, err := unix.RawSyscall6(unix.SYS_MMAP, 0, uintptr(pwd.Length), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED, uintptr(pwd.FD), uintptr(pwd.Offset))\n- return m, err\n-}\n+// Package memutil provides utilities for working with shared memory files.\n+package memutil\n" }, { "change_type": "RENAME", "old_path": "pkg/flipcall/packet_window_mmap_amd64.go", "new_path": "pkg/memutil/mmap.go", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package flipcall\n+package memutil\n-import \"golang.org/x/sys/unix\"\n+import (\n+ \"golang.org/x/sys/unix\"\n+)\n-// Return a memory mapping of the pwd in memory that can be shared outside the sandbox.\n-func packetWindowMmap(pwd PacketWindowDescriptor) (uintptr, unix.Errno) {\n- m, _, err := unix.RawSyscall6(unix.SYS_MMAP, 0, uintptr(pwd.Length), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED, uintptr(pwd.FD), uintptr(pwd.Offset))\n- return m, err\n+// MapFile returns a memory mapping configured by the given options as per\n+// mmap(2).\n+func MapFile(addr, len, prot, flags, fd, offset uintptr) (uintptr, error) {\n+ m, _, e := unix.RawSyscall6(unix.SYS_MMAP, addr, len, prot, flags, fd, offset)\n+ if e != 0 {\n+ return 0, e\n+ }\n+ return m, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Move flipcall.packetWindowMmap to memutil. PiperOrigin-RevId: 381100861
259,885
23.06.2021 17:00:58
25,200
7e0c1d9f1eae5620d38a6434c27442a350828876
Use memutil.MapFile for the memory accounting page.
[ { "change_type": "MODIFY", "old_path": "nogo.yaml", "new_path": "nogo.yaml", "diff": "@@ -177,6 +177,7 @@ analyzers:\n- pkg/sentry/platform/kvm/bluepill_unsafe.go # Special case.\n- pkg/sentry/platform/kvm/machine_unsafe.go # Special case.\n- pkg/sentry/platform/safecopy/safecopy_unsafe.go # Special case.\n+ - pkg/sentry/usage/memory_unsafe.go # Special case.\n- pkg/sentry/vfs/mount_unsafe.go # Special case.\n- pkg/state/decode_unsafe.go # Special case.\nunusedresult:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/usage/memory.go", "new_path": "pkg/sentry/usage/memory.go", "diff": "@@ -132,7 +132,7 @@ func Init() error {\n// always be the case for a newly mapped page from /dev/shm. If we obtain\n// the shared memory through some other means in the future, we may have to\n// explicitly zero the page.\n- mmap, err := unix.Mmap(int(file.Fd()), 0, int(RTMemoryStatsSize), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)\n+ mmap, err := memutil.MapFile(0, RTMemoryStatsSize, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED, file.Fd(), 0)\nif err != nil {\nreturn fmt.Errorf(\"error mapping usage file: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/usage/memory_unsafe.go", "new_path": "pkg/sentry/usage/memory_unsafe.go", "diff": "@@ -21,7 +21,7 @@ import (\n// RTMemoryStatsSize is the size of the RTMemoryStats struct.\nvar RTMemoryStatsSize = unsafe.Sizeof(RTMemoryStats{})\n-// RTMemoryStatsPointer casts the address of the byte slice into a RTMemoryStats pointer.\n-func RTMemoryStatsPointer(b []byte) *RTMemoryStats {\n- return (*RTMemoryStats)(unsafe.Pointer(&b[0]))\n+// RTMemoryStatsPointer casts addr to a RTMemoryStats pointer.\n+func RTMemoryStatsPointer(addr uintptr) *RTMemoryStats {\n+ return (*RTMemoryStats)(unsafe.Pointer(addr))\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Use memutil.MapFile for the memory accounting page. PiperOrigin-RevId: 381145216
259,853
24.06.2021 15:33:26
25,200
2e6195ffe0ad452b2137c32d635d890c9d872e8d
CreateProcessGroup has to check whether a target process stil exists or not A caller of CreateProcessGroup looks up a thread group without locks, so the target process can exit before CreateProcessGroup will be called. Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/sessions.go", "new_path": "pkg/sentry/kernel/sessions.go", "diff": "@@ -369,6 +369,11 @@ func (tg *ThreadGroup) CreateProcessGroup() error {\n// Get the ID for this thread in the current namespace.\nid := tg.pidns.tgids[tg]\n+ // Check whether a process still exists or not.\n+ if id == 0 {\n+ return syserror.ESRCH\n+ }\n+\n// Per above, check for a Session leader or existing group.\nfor s := tg.pidns.owner.sessions.Front(); s != nil; s = s.Next() {\nif s.leader.pidns != tg.pidns {\n" } ]
Go
Apache License 2.0
google/gvisor
CreateProcessGroup has to check whether a target process stil exists or not A caller of CreateProcessGroup looks up a thread group without locks, so the target process can exit before CreateProcessGroup will be called. Reported-by: [email protected] PiperOrigin-RevId: 381351069
259,896
24.06.2021 16:52:41
25,200
3e46b660b97ab3fc995ac3f838fc7ddf1bd96a1b
Delete sentry metrics /watchdog/{stuck_startup_detected, stuck_tasks_detected} These metrics are replaced with WeirdnessMetric with fields watchdog_stuck_startup and watchdog_stuck_tasks.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/watchdog/watchdog.go", "new_path": "pkg/sentry/watchdog/watchdog.go", "diff": "@@ -77,11 +77,6 @@ var DefaultOpts = Opts{\n// trigger it.\nconst descheduleThreshold = 1 * time.Second\n-var (\n- stuckStartup = metric.MustCreateNewUint64Metric(\"/watchdog/stuck_startup_detected\", true /* sync */, \"Incremented once on startup watchdog timeout\")\n- stuckTasks = metric.MustCreateNewUint64Metric(\"/watchdog/stuck_tasks_detected\", true /* sync */, \"Cumulative count of stuck tasks detected\")\n-)\n-\n// Amount of time to wait before dumping the stack to the log again when the same task(s) remains stuck.\nvar stackDumpSameTaskPeriod = time.Minute\n@@ -242,7 +237,6 @@ func (w *Watchdog) waitForStart() {\nreturn\n}\n- stuckStartup.Increment()\nmetric.WeirdnessMetric.Increment(\"watchdog_stuck_startup\")\nvar buf bytes.Buffer\n@@ -316,7 +310,6 @@ func (w *Watchdog) runTurn() {\n// unless they are surrounded by\n// Task.UninterruptibleSleepStart/Finish.\ntc = &offender{lastUpdateTime: lastUpdateTime}\n- stuckTasks.Increment()\nmetric.WeirdnessMetric.Increment(\"watchdog_stuck_tasks\")\nnewTaskFound = true\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Delete sentry metrics /watchdog/{stuck_startup_detected, stuck_tasks_detected} - These metrics are replaced with WeirdnessMetric with fields watchdog_stuck_startup and watchdog_stuck_tasks. PiperOrigin-RevId: 381365617
259,868
24.06.2021 17:45:51
25,200
4470caec4e2fea10f5d116894ca6b3fc9d78789b
Run `:socket_inet_loopback_isolated_test_linux` tests in a container. This creates new user and network namespaces for all tests in `:socket_inet_loopback_isolated_test_linux`.
[ { "change_type": "MODIFY", "old_path": "test/runner/BUILD", "new_path": "test/runner/BUILD", "diff": "@@ -8,6 +8,7 @@ go_binary(\nsrcs = [\"main.go\"],\ndata = [\n\"//runsc\",\n+ \"//test/runner/setup_container\",\n],\nvisibility = [\"//:sandbox\"],\ndeps = [\n" }, { "change_type": "MODIFY", "old_path": "test/runner/defs.bzl", "new_path": "test/runner/defs.bzl", "diff": "@@ -103,6 +103,8 @@ def _syscall_test(\nif platform == \"native\":\ntags.append(\"nogotsan\")\n+ container = \"container\" in tags\n+\nrunner_args = [\n# Arguments are passed directly to runner binary.\n\"--platform=\" + platform,\n@@ -115,6 +117,7 @@ def _syscall_test(\n\"--fuse=\" + str(fuse),\n\"--strace=\" + str(debug),\n\"--debug=\" + str(debug),\n+ \"--container=\" + str(container),\n]\n# Call the rule above.\n" }, { "change_type": "MODIFY", "old_path": "test/runner/main.go", "new_path": "test/runner/main.go", "diff": "@@ -49,6 +49,8 @@ var (\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\nvfs2 = flag.Bool(\"vfs2\", false, \"enable VFS2\")\nfuse = flag.Bool(\"fuse\", false, \"enable FUSE\")\n+ container = flag.Bool(\"container\", false, \"run tests in their own namespaces (user ns, network ns, etc), pretending to be root\")\n+ setupContainerPath = flag.String(\"setup-container\", \"\", \"path to setup_container binary (for use with --container)\")\nrunscPath = flag.String(\"runsc\", \"\", \"path to runsc binary\")\naddUDSTree = flag.Bool(\"add-uds-tree\", false, \"expose a tree of UDS utilities for use in tests\")\n@@ -105,6 +107,27 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {\ncmd.Stderr = os.Stderr\ncmd.SysProcAttr = &unix.SysProcAttr{}\n+ if *container {\n+ // setup_container takes in its target argv as positional arguments.\n+ cmd.Path = *setupContainerPath\n+ cmd.Args = append([]string{cmd.Path}, cmd.Args...)\n+ cmd.SysProcAttr = &unix.SysProcAttr{\n+ Cloneflags: unix.CLONE_NEWUSER | unix.CLONE_NEWNET | unix.CLONE_NEWIPC | unix.CLONE_NEWUTS,\n+ // Set current user/group as root inside the namespace.\n+ UidMappings: []syscall.SysProcIDMap{\n+ {ContainerID: 0, HostID: os.Getuid(), Size: 1},\n+ },\n+ GidMappings: []syscall.SysProcIDMap{\n+ {ContainerID: 0, HostID: os.Getgid(), Size: 1},\n+ },\n+ GidMappingsEnableSetgroups: false,\n+ Credential: &syscall.Credential{\n+ Uid: 0,\n+ Gid: 0,\n+ },\n+ }\n+ }\n+\nif specutils.HasCapabilities(capability.CAP_SYS_ADMIN) {\ncmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWUTS\n}\n@@ -454,6 +477,13 @@ func main() {\n}\n*runscPath = specutils.ExePath\n}\n+ if *container && *setupContainerPath == \"\" {\n+ setupContainer, err := testutil.FindFile(\"test/runner/setup_container/setup_container\")\n+ if err != nil {\n+ fatalf(\"cannot find setup_container: %v\", err)\n+ }\n+ *setupContainerPath = setupContainer\n+ }\n// Make sure stdout and stderr are opened with O_APPEND, otherwise logs\n// from outside the sandbox can (and will) stomp on logs from inside\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/runner/setup_container/BUILD", "diff": "+# setup_container contains a shim binary that runs within the test container\n+# for syscall tests with container=True.\n+\n+load(\"//tools:defs.bzl\", \"cc_binary\")\n+\n+package(licenses = [\"notice\"])\n+\n+cc_binary(\n+ name = \"setup_container\",\n+ testonly = 1,\n+ srcs = [\"setup_container.cc\"],\n+ visibility = [\"//test/runner:__subpackages__\"],\n+ deps = [\n+ \"//test/syscalls/linux:socket_netlink_util\",\n+ \"//test/syscalls/linux:socket_test_util\",\n+ \"//test/util:capability_util\",\n+ \"//test/util:posix_error\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/runner/setup_container/setup_container.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <linux/capability.h>\n+#include <sys/ioctl.h>\n+#include <unistd.h>\n+\n+#include \"test/syscalls/linux/socket_netlink_util.h\"\n+#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/posix_error.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+// SetupContainer sets up the networking settings in the current container.\n+PosixError SetupContainer() {\n+ const PosixErrorOr<bool> have_net_admin = HaveCapability(CAP_NET_ADMIN);\n+ if (!have_net_admin.ok()) {\n+ std::cerr << \"Cannot determine if we have CAP_NET_ADMIN.\" << std::endl;\n+ return have_net_admin.error();\n+ }\n+ if (have_net_admin.ValueOrDie() && !IsRunningOnGvisor()) {\n+ PosixErrorOr<FileDescriptor> sockfd = Socket(AF_INET, SOCK_DGRAM, 0);\n+ if (!sockfd.ok()) {\n+ std::cerr << \"Cannot open socket.\" << std::endl;\n+ return sockfd.error();\n+ }\n+ int sock = sockfd.ValueOrDie().get();\n+ struct ifreq ifr = {};\n+ strncpy(ifr.ifr_name, \"lo\", IFNAMSIZ);\n+ if (ioctl(sock, SIOCGIFFLAGS, &ifr) == -1) {\n+ std::cerr << \"Cannot get 'lo' flags: \" << strerror(errno) << std::endl;\n+ return PosixError(errno);\n+ }\n+ if ((ifr.ifr_flags & IFF_UP) == 0) {\n+ ifr.ifr_flags |= IFF_UP;\n+ if (ioctl(sock, SIOCSIFFLAGS, &ifr) == -1) {\n+ std::cerr << \"Cannot set 'lo' as UP: \" << strerror(errno) << std::endl;\n+ return PosixError(errno);\n+ }\n+ }\n+ }\n+ return NoError();\n+}\n+\n+} // namespace testing\n+} // namespace gvisor\n+\n+using ::gvisor::testing::SetupContainer;\n+\n+// Binary setup_container initializes the container environment in which tests\n+// with container=True will run, then execs the actual test binary.\n+// Usage:\n+// ./setup_container test_binary [arguments forwarded to test_binary...]\n+int main(int argc, char *argv[], char *envp[]) {\n+ if (!SetupContainer().ok()) {\n+ return 1;\n+ }\n+ if (argc < 2) {\n+ std::cerr << \"Must provide arguments to exec.\" << std::endl;\n+ return 2;\n+ }\n+ if (execve(argv[1], &argv[1], envp) == -1) {\n+ std::cerr << \"execv returned errno \" << errno << std::endl;\n+ return 1;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -648,6 +648,7 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\nshard_count = most_shards,\n+ tags = [\"container\"],\ntest = \"//test/syscalls/linux:socket_inet_loopback_isolated_test\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -3196,9 +3196,11 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\":socket_inet_loopback_test_params\",\n+ \":socket_netlink_util\",\n\":socket_test_util\",\ngtest,\n\"//test/util:test_main\",\n+ \"//test/util:test_util\",\n\"@com_google_absl//absl/time\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback_isolated.cc", "new_path": "test/syscalls/linux/socket_inet_loopback_isolated.cc", "diff": "#include \"absl/time/time.h\"\n#include \"test/syscalls/linux/socket_inet_loopback_test_params.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/test_util.h\"\n// Unit tests in this file will run in their own network namespace.\n" } ]
Go
Apache License 2.0
google/gvisor
Run `:socket_inet_loopback_isolated_test_linux` tests in a container. This creates new user and network namespaces for all tests in `:socket_inet_loopback_isolated_test_linux`. PiperOrigin-RevId: 381374120
260,004
24.06.2021 22:38:14
25,200
1f113b96e68fed452e40855db0cf3efa24b2b9b6
Incrementally update checksum when NAT-ing ...instead of calculating a fresh checksum to avoid re-calcalculating a checksum on unchanged bytes. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/checksum.go", "new_path": "pkg/tcpip/header/checksum.go", "diff": "@@ -18,6 +18,7 @@ package header\nimport (\n\"encoding/binary\"\n+ \"fmt\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -234,3 +235,64 @@ func PseudoHeaderChecksum(protocol tcpip.TransportProtocolNumber, srcAddr tcpip.\nreturn Checksum([]byte{0, uint8(protocol)}, xsum)\n}\n+\n+// checksumUpdate2ByteAlignedUint16 updates a uint16 value in a calculated\n+// checksum.\n+//\n+// The value MUST begin at a 2-byte boundary in the original buffer.\n+func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 {\n+ // As per RFC 1071 page 4,\n+ // (4) Incremental Update\n+ //\n+ // ...\n+ //\n+ // To update the checksum, simply add the differences of the\n+ // sixteen bit integers that have been changed. To see why this\n+ // works, observe that every 16-bit integer has an additive inverse\n+ // and that addition is associative. From this it follows that\n+ // given the original value m, the new value m', and the old\n+ // checksum C, the new checksum C' is:\n+ //\n+ // C' = C + (-m) + m' = C + (m' - m)\n+ return ChecksumCombine(xsum, ChecksumCombine(new, ^old))\n+}\n+\n+// checksumUpdate2ByteAlignedAddress updates an address in a calculated\n+// checksum.\n+//\n+// The addresses must have the same length and must contain an even number\n+// of bytes. The address MUST begin at a 2-byte boundary in the original buffer.\n+func checksumUpdate2ByteAlignedAddress(xsum uint16, old, new tcpip.Address) uint16 {\n+ const uint16Bytes = 2\n+\n+ if len(old) != len(new) {\n+ panic(fmt.Sprintf(\"buffer lengths are different; old = %d, new = %d\", len(old), len(new)))\n+ }\n+\n+ if len(old)%uint16Bytes != 0 {\n+ panic(fmt.Sprintf(\"buffer has an odd number of bytes; got = %d\", len(old)))\n+ }\n+\n+ // As per RFC 1071 page 4,\n+ // (4) Incremental Update\n+ //\n+ // ...\n+ //\n+ // To update the checksum, simply add the differences of the\n+ // sixteen bit integers that have been changed. To see why this\n+ // works, observe that every 16-bit integer has an additive inverse\n+ // and that addition is associative. From this it follows that\n+ // given the original value m, the new value m', and the old\n+ // checksum C, the new checksum C' is:\n+ //\n+ // C' = C + (-m) + m' = C + (m' - m)\n+ for len(old) != 0 {\n+ // Convert the 2 byte sequences to uint16 values then apply the increment\n+ // update.\n+ xsum = checksumUpdate2ByteAlignedUint16(xsum, (uint16(old[0])<<8)+uint16(old[1]), (uint16(new[0])<<8)+uint16(new[1]))\n+ old = old[uint16Bytes:]\n+ new = new[uint16Bytes:]\n+ }\n+\n+ return xsum\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/checksum_test.go", "new_path": "pkg/tcpip/header/checksum_test.go", "diff": "@@ -23,6 +23,7 @@ import (\n\"sync\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n@@ -256,3 +257,205 @@ func TestICMPv6Checksum(t *testing.T) {\n})\n}, want, fmt.Sprintf(\"header: {% x} data {% x}\", h, vv.ToView()))\n}\n+\n+func randomAddress(size int) tcpip.Address {\n+ s := make([]byte, size)\n+ for i := 0; i < size; i++ {\n+ s[i] = byte(rand.Uint32())\n+ }\n+ return tcpip.Address(s)\n+}\n+\n+func TestChecksummableNetworkUpdateAddress(t *testing.T) {\n+ tests := []struct {\n+ name string\n+ update func(header.IPv4, tcpip.Address)\n+ }{\n+ {\n+ name: \"SetSourceAddressWithChecksumUpdate\",\n+ update: header.IPv4.SetSourceAddressWithChecksumUpdate,\n+ },\n+ {\n+ name: \"SetDestinationAddressWithChecksumUpdate\",\n+ update: header.IPv4.SetDestinationAddressWithChecksumUpdate,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ for i := 0; i < 1000; i++ {\n+ var origBytes [header.IPv4MinimumSize]byte\n+ header.IPv4(origBytes[:]).Encode(&header.IPv4Fields{\n+ TOS: 1,\n+ TotalLength: header.IPv4MinimumSize,\n+ ID: 2,\n+ Flags: 3,\n+ FragmentOffset: 4,\n+ TTL: 5,\n+ Protocol: 6,\n+ Checksum: 0,\n+ SrcAddr: randomAddress(header.IPv4AddressSize),\n+ DstAddr: randomAddress(header.IPv4AddressSize),\n+ })\n+\n+ addr := randomAddress(header.IPv4AddressSize)\n+\n+ bytesCopy := origBytes\n+ h := header.IPv4(bytesCopy[:])\n+ origXSum := h.CalculateChecksum()\n+ h.SetChecksum(^origXSum)\n+\n+ test.update(h, addr)\n+ got := ^h.Checksum()\n+ h.SetChecksum(0)\n+ want := h.CalculateChecksum()\n+ if got != want {\n+ t.Errorf(\"got h.Checksum() = 0x%x, want = 0x%x; originalBytes = 0x%x, new addr = %s\", got, want, origBytes, addr)\n+ }\n+ }\n+ })\n+ }\n+}\n+\n+func TestChecksummableTransportUpdatePort(t *testing.T) {\n+ // The fields in the pseudo header is not tested here so we just use 0.\n+ const pseudoHeaderXSum = 0\n+\n+ tests := []struct {\n+ name string\n+ transportHdr func(_, _ uint16) (header.ChecksummableTransport, func(uint16) uint16)\n+ proto tcpip.TransportProtocolNumber\n+ }{\n+ {\n+ name: \"TCP\",\n+ transportHdr: func(src, dst uint16) (header.ChecksummableTransport, func(uint16) uint16) {\n+ h := header.TCP(make([]byte, header.TCPMinimumSize))\n+ h.Encode(&header.TCPFields{\n+ SrcPort: src,\n+ DstPort: dst,\n+ SeqNum: 1,\n+ AckNum: 2,\n+ DataOffset: header.TCPMinimumSize,\n+ Flags: 3,\n+ WindowSize: 4,\n+ Checksum: 0,\n+ UrgentPointer: 5,\n+ })\n+ h.SetChecksum(^h.CalculateChecksum(pseudoHeaderXSum))\n+ return h, h.CalculateChecksum\n+ },\n+ proto: header.TCPProtocolNumber,\n+ },\n+ {\n+ name: \"UDP\",\n+ transportHdr: func(src, dst uint16) (header.ChecksummableTransport, func(uint16) uint16) {\n+ h := header.UDP(make([]byte, header.UDPMinimumSize))\n+ h.Encode(&header.UDPFields{\n+ SrcPort: src,\n+ DstPort: dst,\n+ Length: 0,\n+ Checksum: 0,\n+ })\n+ h.SetChecksum(^h.CalculateChecksum(pseudoHeaderXSum))\n+ return h, h.CalculateChecksum\n+ },\n+ proto: header.UDPProtocolNumber,\n+ },\n+ }\n+\n+ for i := 0; i < 1000; i++ {\n+ origSrcPort := uint16(rand.Uint32())\n+ origDstPort := uint16(rand.Uint32())\n+ newPort := uint16(rand.Uint32())\n+\n+ t.Run(fmt.Sprintf(\"OrigSrcPort=%d,OrigDstPort=%d,NewPort=%d\", origSrcPort, origDstPort, newPort), func(*testing.T) {\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ for _, subTest := range []struct {\n+ name string\n+ update func(header.ChecksummableTransport)\n+ }{\n+ {\n+ name: \"Source port\",\n+ update: func(h header.ChecksummableTransport) { h.SetSourcePortWithChecksumUpdate(newPort) },\n+ },\n+ {\n+ name: \"Destination port\",\n+ update: func(h header.ChecksummableTransport) { h.SetDestinationPortWithChecksumUpdate(newPort) },\n+ },\n+ } {\n+ t.Run(subTest.name, func(t *testing.T) {\n+ h, calcXSum := test.transportHdr(origSrcPort, origDstPort)\n+ subTest.update(h)\n+ // TCP and UDP hold the 1s complement of the fully calculated\n+ // checksum.\n+ got := ^h.Checksum()\n+ h.SetChecksum(0)\n+\n+ if want := calcXSum(pseudoHeaderXSum); got != want {\n+ h, _ := test.transportHdr(origSrcPort, origDstPort)\n+ t.Errorf(\"got Checksum() = 0x%x, want = 0x%x; originalBytes = %#v, new port = %d\", got, want, h, newPort)\n+ }\n+ })\n+ }\n+ })\n+ }\n+ })\n+ }\n+}\n+\n+func TestChecksummableTransportUpdatePseudoHeaderAddress(t *testing.T) {\n+ const addressSize = 6\n+\n+ tests := []struct {\n+ name string\n+ transportHdr func() header.ChecksummableTransport\n+ proto tcpip.TransportProtocolNumber\n+ }{\n+ {\n+ name: \"TCP\",\n+ transportHdr: func() header.ChecksummableTransport { return header.TCP(make([]byte, header.TCPMinimumSize)) },\n+ proto: header.TCPProtocolNumber,\n+ },\n+ {\n+ name: \"UDP\",\n+ transportHdr: func() header.ChecksummableTransport { return header.UDP(make([]byte, header.UDPMinimumSize)) },\n+ proto: header.UDPProtocolNumber,\n+ },\n+ }\n+\n+ for i := 0; i < 1000; i++ {\n+ permanent := randomAddress(addressSize)\n+ old := randomAddress(addressSize)\n+ new := randomAddress(addressSize)\n+\n+ t.Run(fmt.Sprintf(\"Permanent=%q,Old=%q,New=%q\", permanent, old, new), func(t *testing.T) {\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ for _, fullChecksum := range []bool{true, false} {\n+ t.Run(fmt.Sprintf(\"FullChecksum=%t\", fullChecksum), func(t *testing.T) {\n+ initialXSum := header.PseudoHeaderChecksum(test.proto, permanent, old, 0)\n+ if fullChecksum {\n+ // TCP and UDP hold the 1s complement of the fully calculated\n+ // checksum.\n+ initialXSum = ^initialXSum\n+ }\n+\n+ h := test.transportHdr()\n+ h.SetChecksum(initialXSum)\n+ h.UpdateChecksumPseudoHeaderAddress(old, new, fullChecksum)\n+\n+ got := h.Checksum()\n+ if fullChecksum {\n+ got = ^got\n+ }\n+ if want := header.PseudoHeaderChecksum(test.proto, permanent, new, 0); got != want {\n+ t.Errorf(\"got Checksum() = 0x%x, want = 0x%x; h = %#v\", got, want, h)\n+ }\n+ })\n+ }\n+ })\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/interfaces.go", "new_path": "pkg/tcpip/header/interfaces.go", "diff": "@@ -53,6 +53,31 @@ type Transport interface {\nPayload() []byte\n}\n+// ChecksummableTransport is a Transport that supports checksumming.\n+type ChecksummableTransport interface {\n+ Transport\n+\n+ // SetSourcePortWithChecksumUpdate sets the source port and updates\n+ // the checksum.\n+ //\n+ // The receiver's checksum must be a fully calculated checksum.\n+ SetSourcePortWithChecksumUpdate(port uint16)\n+\n+ // SetDestinationPortWithChecksumUpdate sets the destination port and updates\n+ // the checksum.\n+ //\n+ // The receiver's checksum must be a fully calculated checksum.\n+ SetDestinationPortWithChecksumUpdate(port uint16)\n+\n+ // UpdateChecksumPseudoHeaderAddress updates the checksum to reflect an\n+ // updated address in the pseudo header.\n+ //\n+ // If fullChecksum is true, the receiver's checksum field is assumed to hold a\n+ // fully calculated checksum. Otherwise, it is assumed to hold a partially\n+ // calculated checksum which only reflects the pseudo header.\n+ UpdateChecksumPseudoHeaderAddress(old, new tcpip.Address, fullChecksum bool)\n+}\n+\n// Network offers generic methods to query and/or update the fields of the\n// header of a network protocol buffer.\ntype Network interface {\n@@ -90,3 +115,16 @@ type Network interface {\n// SetTOS sets the values of the \"type of service\" and \"flow label\" fields.\nSetTOS(t uint8, l uint32)\n}\n+\n+// ChecksummableNetwork is a Network that supports checksumming.\n+type ChecksummableNetwork interface {\n+ Network\n+\n+ // SetSourceAddressAndChecksum sets the source address and updates the\n+ // checksum to reflect the new address.\n+ SetSourceAddressWithChecksumUpdate(tcpip.Address)\n+\n+ // SetDestinationAddressAndChecksum sets the destination address and\n+ // updates the checksum to reflect the new address.\n+ SetDestinationAddressWithChecksumUpdate(tcpip.Address)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv4.go", "new_path": "pkg/tcpip/header/ipv4.go", "diff": "@@ -305,6 +305,18 @@ func (b IPv4) DestinationAddress() tcpip.Address {\nreturn tcpip.Address(b[dstAddr : dstAddr+IPv4AddressSize])\n}\n+// SetSourceAddressWithChecksumUpdate implements ChecksummableNetwork.\n+func (b IPv4) SetSourceAddressWithChecksumUpdate(new tcpip.Address) {\n+ b.SetChecksum(^checksumUpdate2ByteAlignedAddress(^b.Checksum(), b.SourceAddress(), new))\n+ b.SetSourceAddress(new)\n+}\n+\n+// SetDestinationAddressWithChecksumUpdate implements ChecksummableNetwork.\n+func (b IPv4) SetDestinationAddressWithChecksumUpdate(new tcpip.Address) {\n+ b.SetChecksum(^checksumUpdate2ByteAlignedAddress(^b.Checksum(), b.DestinationAddress(), new))\n+ b.SetDestinationAddress(new)\n+}\n+\n// padIPv4OptionsLength returns the total length for IPv4 options of length l\n// after applying padding according to RFC 791:\n// The internet header padding is used to ensure that the internet\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/tcp.go", "new_path": "pkg/tcpip/header/tcp.go", "diff": "@@ -390,6 +390,35 @@ func (b TCP) EncodePartial(partialChecksum, length uint16, seqnum, acknum uint32\nb.SetChecksum(^checksum)\n}\n+// SetSourcePortWithChecksumUpdate implements ChecksummableTransport.\n+func (b TCP) SetSourcePortWithChecksumUpdate(new uint16) {\n+ old := b.SourcePort()\n+ b.SetSourcePort(new)\n+ b.SetChecksum(^checksumUpdate2ByteAlignedUint16(^b.Checksum(), old, new))\n+}\n+\n+// SetDestinationPortWithChecksumUpdate implements ChecksummableTransport.\n+func (b TCP) SetDestinationPortWithChecksumUpdate(new uint16) {\n+ old := b.DestinationPort()\n+ b.SetDestinationPort(new)\n+ b.SetChecksum(^checksumUpdate2ByteAlignedUint16(^b.Checksum(), old, new))\n+}\n+\n+// UpdateChecksumPseudoHeaderAddress implements ChecksummableTransport.\n+func (b TCP) UpdateChecksumPseudoHeaderAddress(old, new tcpip.Address, fullChecksum bool) {\n+ xsum := b.Checksum()\n+ if fullChecksum {\n+ xsum = ^xsum\n+ }\n+\n+ xsum = checksumUpdate2ByteAlignedAddress(xsum, old, new)\n+ if fullChecksum {\n+ xsum = ^xsum\n+ }\n+\n+ b.SetChecksum(xsum)\n+}\n+\n// ParseSynOptions parses the options received in a SYN segment and returns the\n// relevant ones. opts should point to the option part of the TCP header.\nfunc ParseSynOptions(opts []byte, isAck bool) TCPSynOptions {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/udp.go", "new_path": "pkg/tcpip/header/udp.go", "diff": "@@ -130,3 +130,32 @@ func (b UDP) Encode(u *UDPFields) {\nbinary.BigEndian.PutUint16(b[udpLength:], u.Length)\nbinary.BigEndian.PutUint16(b[udpChecksum:], u.Checksum)\n}\n+\n+// SetSourcePortWithChecksumUpdate implements ChecksummableTransport.\n+func (b UDP) SetSourcePortWithChecksumUpdate(new uint16) {\n+ old := b.SourcePort()\n+ b.SetSourcePort(new)\n+ b.SetChecksum(^checksumUpdate2ByteAlignedUint16(^b.Checksum(), old, new))\n+}\n+\n+// SetDestinationPortWithChecksumUpdate implements ChecksummableTransport.\n+func (b UDP) SetDestinationPortWithChecksumUpdate(new uint16) {\n+ old := b.DestinationPort()\n+ b.SetDestinationPort(new)\n+ b.SetChecksum(^checksumUpdate2ByteAlignedUint16(^b.Checksum(), old, new))\n+}\n+\n+// UpdateChecksumPseudoHeaderAddress implements ChecksummableTransport.\n+func (b UDP) UpdateChecksumPseudoHeaderAddress(old, new tcpip.Address, fullChecksum bool) {\n+ xsum := b.Checksum()\n+ if fullChecksum {\n+ xsum = ^xsum\n+ }\n+\n+ xsum = checksumUpdate2ByteAlignedAddress(xsum, old, new)\n+ if fullChecksum {\n+ xsum = ^xsum\n+ }\n+\n+ b.SetChecksum(xsum)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/conntrack.go", "new_path": "pkg/tcpip/stack/conntrack.go", "diff": "@@ -405,16 +405,23 @@ func (ct *ConnTrack) handlePacket(pkt *PacketBuffer, hook Hook, r *Route) bool {\n// validated if checksum offloading is off. It may require IP defrag if the\n// packets are fragmented.\n+ var newAddr tcpip.Address\n+ var newPort uint16\n+\n+ updateSRCFields := false\n+\nswitch hook {\ncase Prerouting, Output:\nif conn.manip == manipDestination {\nswitch dir {\ncase dirOriginal:\n- tcpHeader.SetDestinationPort(conn.reply.srcPort)\n- netHeader.SetDestinationAddress(conn.reply.srcAddr)\n+ newPort = conn.reply.srcPort\n+ newAddr = conn.reply.srcAddr\ncase dirReply:\n- tcpHeader.SetSourcePort(conn.original.dstPort)\n- netHeader.SetSourceAddress(conn.original.dstAddr)\n+ newPort = conn.original.dstPort\n+ newAddr = conn.original.dstAddr\n+\n+ updateSRCFields = true\n}\npkt.NatDone = true\n}\n@@ -422,11 +429,13 @@ func (ct *ConnTrack) handlePacket(pkt *PacketBuffer, hook Hook, r *Route) bool {\nif conn.manip == manipSource {\nswitch dir {\ncase dirOriginal:\n- tcpHeader.SetSourcePort(conn.reply.dstPort)\n- netHeader.SetSourceAddress(conn.reply.dstAddr)\n+ newPort = conn.reply.dstPort\n+ newAddr = conn.reply.dstAddr\n+\n+ updateSRCFields = true\ncase dirReply:\n- tcpHeader.SetDestinationPort(conn.original.srcPort)\n- netHeader.SetDestinationAddress(conn.original.srcAddr)\n+ newPort = conn.original.srcPort\n+ newAddr = conn.original.srcAddr\n}\npkt.NatDone = true\n}\n@@ -437,29 +446,31 @@ func (ct *ConnTrack) handlePacket(pkt *PacketBuffer, hook Hook, r *Route) bool {\nreturn false\n}\n+ fullChecksum := false\n+ updatePseudoHeader := false\nswitch hook {\ncase Prerouting, Input:\ncase Output, Postrouting:\n// Calculate the TCP checksum and set it.\n- tcpHeader.SetChecksum(0)\n- length := uint16(len(tcpHeader) + pkt.Data().Size())\n- xsum := header.PseudoHeaderChecksum(header.TCPProtocolNumber, netHeader.SourceAddress(), netHeader.DestinationAddress(), length)\nif pkt.GSOOptions.Type != GSONone && pkt.GSOOptions.NeedsCsum {\n- tcpHeader.SetChecksum(xsum)\n+ updatePseudoHeader = true\n} else if r.RequiresTXTransportChecksum() {\n- xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())\n- tcpHeader.SetChecksum(^tcpHeader.CalculateChecksum(xsum))\n+ fullChecksum = true\n+ updatePseudoHeader = true\n}\ndefault:\npanic(fmt.Sprintf(\"unrecognized hook = %s\", hook))\n}\n- // After modification, IPv4 packets need a valid checksum.\n- if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber {\n- netHeader := header.IPv4(pkt.NetworkHeader().View())\n- netHeader.SetChecksum(0)\n- netHeader.SetChecksum(^netHeader.CalculateChecksum())\n- }\n+ rewritePacket(\n+ netHeader,\n+ tcpHeader,\n+ updateSRCFields,\n+ fullChecksum,\n+ updatePseudoHeader,\n+ newPort,\n+ newAddr,\n+ )\n// Update the state of tcb.\nconn.mu.Lock()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables_targets.go", "new_path": "pkg/tcpip/stack/iptables_targets.go", "diff": "@@ -133,29 +133,23 @@ func (rt *RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, r\nswitch protocol := pkt.TransportProtocolNumber; protocol {\ncase header.UDPProtocolNumber:\nudpHeader := header.UDP(pkt.TransportHeader().View())\n- udpHeader.SetDestinationPort(rt.Port)\n- // Calculate UDP checksum and set it.\nif hook == Output {\n- udpHeader.SetChecksum(0)\n- netHeader := pkt.Network()\n- netHeader.SetDestinationAddress(address)\n-\n// Only calculate the checksum if offloading isn't supported.\n- if r.RequiresTXTransportChecksum() {\n- length := uint16(pkt.Size()) - uint16(len(pkt.NetworkHeader().View()))\n- xsum := header.PseudoHeaderChecksum(protocol, netHeader.SourceAddress(), netHeader.DestinationAddress(), length)\n- xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())\n- udpHeader.SetChecksum(^udpHeader.CalculateChecksum(xsum))\n- }\n+ requiresChecksum := r.RequiresTXTransportChecksum()\n+ rewritePacket(\n+ pkt.Network(),\n+ udpHeader,\n+ false, /* updateSRCFields */\n+ requiresChecksum,\n+ requiresChecksum,\n+ rt.Port,\n+ address,\n+ )\n+ } else {\n+ udpHeader.SetDestinationPort(rt.Port)\n}\n- // After modification, IPv4 packets need a valid checksum.\n- if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber {\n- netHeader := header.IPv4(pkt.NetworkHeader().View())\n- netHeader.SetChecksum(0)\n- netHeader.SetChecksum(^netHeader.CalculateChecksum())\n- }\npkt.NatDone = true\ncase header.TCPProtocolNumber:\nif ct == nil {\n@@ -214,26 +208,18 @@ func (st *SNATTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, r *Rou\nswitch protocol := pkt.TransportProtocolNumber; protocol {\ncase header.UDPProtocolNumber:\n- udpHeader := header.UDP(pkt.TransportHeader().View())\n- udpHeader.SetChecksum(0)\n- udpHeader.SetSourcePort(st.Port)\n- netHeader := pkt.Network()\n- netHeader.SetSourceAddress(st.Addr)\n-\n// Only calculate the checksum if offloading isn't supported.\n- if r.RequiresTXTransportChecksum() {\n- length := uint16(pkt.Size()) - uint16(len(pkt.NetworkHeader().View()))\n- xsum := header.PseudoHeaderChecksum(protocol, netHeader.SourceAddress(), netHeader.DestinationAddress(), length)\n- xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())\n- udpHeader.SetChecksum(^udpHeader.CalculateChecksum(xsum))\n- }\n+ requiresChecksum := r.RequiresTXTransportChecksum()\n+ rewritePacket(\n+ pkt.Network(),\n+ header.UDP(pkt.TransportHeader().View()),\n+ true, /* updateSRCFields */\n+ requiresChecksum,\n+ requiresChecksum,\n+ st.Port,\n+ st.Addr,\n+ )\n- // After modification, IPv4 packets need a valid checksum.\n- if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber {\n- netHeader := header.IPv4(pkt.NetworkHeader().View())\n- netHeader.SetChecksum(0)\n- netHeader.SetChecksum(^netHeader.CalculateChecksum())\n- }\npkt.NatDone = true\ncase header.TCPProtocolNumber:\nif ct == nil {\n@@ -252,3 +238,42 @@ func (st *SNATTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, r *Rou\nreturn RuleAccept, 0\n}\n+\n+func rewritePacket(n header.Network, t header.ChecksummableTransport, updateSRCFields, fullChecksum, updatePseudoHeader bool, newPort uint16, newAddr tcpip.Address) {\n+ if updateSRCFields {\n+ if fullChecksum {\n+ t.SetSourcePortWithChecksumUpdate(newPort)\n+ } else {\n+ t.SetSourcePort(newPort)\n+ }\n+ } else {\n+ if fullChecksum {\n+ t.SetDestinationPortWithChecksumUpdate(newPort)\n+ } else {\n+ t.SetDestinationPort(newPort)\n+ }\n+ }\n+\n+ if updatePseudoHeader {\n+ var oldAddr tcpip.Address\n+ if updateSRCFields {\n+ oldAddr = n.SourceAddress()\n+ } else {\n+ oldAddr = n.DestinationAddress()\n+ }\n+\n+ t.UpdateChecksumPseudoHeaderAddress(oldAddr, newAddr, fullChecksum)\n+ }\n+\n+ if checksummableNetHeader, ok := n.(header.ChecksummableNetwork); ok {\n+ if updateSRCFields {\n+ checksummableNetHeader.SetSourceAddressWithChecksumUpdate(newAddr)\n+ } else {\n+ checksummableNetHeader.SetDestinationAddressWithChecksumUpdate(newAddr)\n+ }\n+ } else if updateSRCFields {\n+ n.SetSourceAddress(newAddr)\n+ } else {\n+ n.SetDestinationAddress(newAddr)\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Incrementally update checksum when NAT-ing ...instead of calculating a fresh checksum to avoid re-calcalculating a checksum on unchanged bytes. Fixes #5340. PiperOrigin-RevId: 381403888
259,962
25.06.2021 12:23:01
25,200
f00077e8d8d59ee6db93a3ece24c5f3f5156eda5
Remove sndQueue as its pointless now. sndQueue made sense when the worker goroutine and the syscall context held different locks. Now both lock the endpoint lock before doing anything which means adding to sndQueue is pointless as we move it to writeList immediately after that in endpoint.Write() by calling e.drainSendQueue.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/tcp.go", "new_path": "pkg/tcpip/stack/tcp.go", "diff": "@@ -380,9 +380,6 @@ type TCPSndBufState struct {\n// SndClosed indicates that the endpoint has been closed for sends.\nSndClosed bool\n- // SndBufInQueue is the number of bytes in the send queue.\n- SndBufInQueue seqnum.Size\n-\n// PacketTooBigCount is used to notify the main protocol routine how\n// many times a \"packet too big\" control packet is received.\nPacketTooBigCount int\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -909,30 +909,13 @@ func (e *endpoint) sendRaw(data buffer.VectorisedView, flags header.TCPFlags, se\nreturn err\n}\n-func (e *endpoint) handleWrite() {\n- e.sndQueueInfo.sndQueueMu.Lock()\n- next := e.drainSendQueueLocked()\n- e.sndQueueInfo.sndQueueMu.Unlock()\n-\n- e.sendData(next)\n-}\n-\n-// Move packets from send queue to send list.\n-//\n-// Precondition: e.sndBufMu must be locked.\n-func (e *endpoint) drainSendQueueLocked() *segment {\n- first := e.sndQueueInfo.sndQueue.Front()\n- if first != nil {\n- e.snd.writeList.PushBackList(&e.sndQueueInfo.sndQueue)\n- e.sndQueueInfo.SndBufInQueue = 0\n- }\n- return first\n-}\n-\n// Precondition: e.mu must be locked.\nfunc (e *endpoint) sendData(next *segment) {\n// Initialize the next segment to write if it's currently nil.\nif e.snd.writeNext == nil {\n+ if next == nil {\n+ return\n+ }\ne.snd.writeNext = next\n}\n@@ -940,17 +923,6 @@ func (e *endpoint) sendData(next *segment) {\ne.snd.sendData()\n}\n-func (e *endpoint) handleClose() {\n- if !e.EndpointState().connected() {\n- return\n- }\n- // Drain the send queue.\n- e.handleWrite()\n-\n- // Mark send side as closed.\n- e.snd.Closed = true\n-}\n-\n// resetConnectionLocked puts the endpoint in an error state with the given\n// error code and sends a RST if and only if the error is not ErrConnectionReset\n// indicating that the connection is being reset due to receiving a RST. This\n@@ -1402,14 +1374,7 @@ func (e *endpoint) protocolMainLoop(handshake bool, wakerInitDone chan<- struct{\n{\nw: &e.sndQueueInfo.sndWaker,\nf: func() tcpip.Error {\n- e.handleWrite()\n- return nil\n- },\n- },\n- {\n- w: &e.sndQueueInfo.sndCloseWaker,\n- f: func() tcpip.Error {\n- e.handleClose()\n+ e.sendData(nil /* next */)\nreturn nil\n},\n},\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -293,16 +293,9 @@ type sndQueueInfo struct {\nsndQueueMu sync.Mutex `state:\"nosave\"`\nstack.TCPSndBufState\n- // sndQueue holds segments that are ready to be sent.\n- sndQueue segmentList `state:\"wait\"`\n-\n- // sndWaker is used to signal the protocol goroutine when segments are\n- // added to the `sndQueue`.\n+ // sndWaker is used to signal the protocol goroutine when there may be\n+ // segments that need to be sent.\nsndWaker sleep.Waker `state:\"manual\"`\n-\n- // sndCloseWaker is used to notify the protocol goroutine when the send\n- // side is closed.\n- sndCloseWaker sleep.Waker `state:\"manual\"`\n}\n// rcvQueueInfo contains the endpoint's rcvQueue and associated metadata.\n@@ -1558,10 +1551,9 @@ func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp\n// Add data to the send queue.\ns := newOutgoingSegment(e.TransportEndpointInfo.ID, e.stack.Clock(), v)\ne.sndQueueInfo.SndBufUsed += len(v)\n- e.sndQueueInfo.SndBufInQueue += seqnum.Size(len(v))\n- e.sndQueueInfo.sndQueue.PushBack(s)\n+ e.snd.writeList.PushBack(s)\n- return e.drainSendQueueLocked(), len(v), nil\n+ return s, len(v), nil\n}()\n// Return if either we didn't queue anything or if an error occurred while\n// attempting to queue data.\n@@ -2314,7 +2306,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) tcp\n// connection setting here.\nif !handshake {\ne.segmentQueue.mu.Lock()\n- for _, l := range []segmentList{e.segmentQueue.list, e.sndQueueInfo.sndQueue, e.snd.writeList} {\n+ for _, l := range []segmentList{e.segmentQueue.list, e.snd.writeList} {\nfor s := l.Front(); s != nil; s = s.Next() {\ns.id = e.TransportEndpointInfo.ID\ne.sndQueueInfo.sndWaker.Assert()\n@@ -2391,12 +2383,17 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error {\n// Queue fin segment.\ns := newOutgoingSegment(e.TransportEndpointInfo.ID, e.stack.Clock(), nil)\n- e.sndQueueInfo.sndQueue.PushBack(s)\n- e.sndQueueInfo.SndBufInQueue++\n+ e.snd.writeList.PushBack(s)\n// Mark endpoint as closed.\ne.sndQueueInfo.SndClosed = true\ne.sndQueueInfo.sndQueueMu.Unlock()\n- e.handleClose()\n+\n+ // Drain the send queue.\n+ e.sendData(s)\n+\n+ // Mark send side as closed.\n+ e.snd.Closed = true\n+\n// Wake up any writers that maybe waiting for the stream to become\n// writable.\ne.waiterQueue.Notify(waiter.WritableEvents)\n" } ]
Go
Apache License 2.0
google/gvisor
Remove sndQueue as its pointless now. sndQueue made sense when the worker goroutine and the syscall context held different locks. Now both lock the endpoint lock before doing anything which means adding to sndQueue is pointless as we move it to writeList immediately after that in endpoint.Write() by calling e.drainSendQueue. PiperOrigin-RevId: 381523177
259,891
28.06.2021 10:38:34
25,200
27cc5a883a3cc077da884c0d058b69b607ec02f2
check explicitly that AF_PACKET sockets can't write in gVisor
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/packet_socket.cc", "new_path": "test/syscalls/linux/packet_socket.cc", "diff": "@@ -231,9 +231,6 @@ TEST_P(CookedPacketTest, Receive) {\n// Send via a packet socket.\nTEST_P(CookedPacketTest, Send) {\n- // We don't implement writing to packet sockets on gVisor.\n- SKIP_IF(IsRunningOnGvisor());\n-\n// Let's send a UDP packet and receive it using a regular UDP socket.\nFileDescriptor udp_sock =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n@@ -287,6 +284,14 @@ TEST_P(CookedPacketTest, Send) {\nmemcpy(send_buf + sizeof(iphdr), &udphdr, sizeof(udphdr));\nmemcpy(send_buf + sizeof(iphdr) + sizeof(udphdr), kMessage, sizeof(kMessage));\n+ // We don't implement writing to packet sockets on gVisor.\n+ if (IsRunningOnGvisor()) {\n+ ASSERT_THAT(sendto(socket_, send_buf, sizeof(send_buf), 0,\n+ reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),\n+ SyscallFailsWithErrno(EINVAL));\n+ GTEST_SKIP();\n+ }\n+\n// Send it.\nASSERT_THAT(sendto(socket_, send_buf, sizeof(send_buf), 0,\nreinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/packet_socket_raw.cc", "new_path": "test/syscalls/linux/packet_socket_raw.cc", "diff": "@@ -235,9 +235,6 @@ TEST_P(RawPacketTest, Receive) {\n// Send via a packet socket.\nTEST_P(RawPacketTest, Send) {\n- // We don't implement writing to packet sockets on gVisor.\n- SKIP_IF(IsRunningOnGvisor());\n-\n// Let's send a UDP packet and receive it using a regular UDP socket.\nFileDescriptor udp_sock =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n@@ -299,6 +296,14 @@ TEST_P(RawPacketTest, Send) {\nmemcpy(send_buf + sizeof(ethhdr) + sizeof(iphdr) + sizeof(udphdr), kMessage,\nsizeof(kMessage));\n+ // We don't implement writing to packet sockets on gVisor.\n+ if (IsRunningOnGvisor()) {\n+ ASSERT_THAT(sendto(s_, send_buf, sizeof(send_buf), 0,\n+ reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),\n+ SyscallFailsWithErrno(EINVAL));\n+ GTEST_SKIP();\n+ }\n+\n// Send it.\nASSERT_THAT(sendto(s_, send_buf, sizeof(send_buf), 0,\nreinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),\n" } ]
Go
Apache License 2.0
google/gvisor
check explicitly that AF_PACKET sockets can't write in gVisor PiperOrigin-RevId: 381896875
259,891
28.06.2021 11:48:13
25,200
71e1bf1ec737873b14ea6348c973d9ff73b6eaf5
netstack: deflake TestSynRcvdBadSeqNumber There was a race wherein Accept() could fail, then the handshake would complete, and then a waiter would be created to listen for the handshake. In such cases, no notification was ever sent and the test timed out.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -6077,6 +6077,11 @@ func TestSynRcvdBadSeqNumber(t *testing.T) {\n// complete the connection to test that the large SEQ num\n// did not change the state from SYN-RCVD.\n+ // Get setup to be notified about connection establishment.\n+ we, ch := waiter.NewChannelEntry(nil)\n+ c.WQ.EventRegister(&we, waiter.ReadableEvents)\n+ defer c.WQ.EventUnregister(&we)\n+\n// Send ACK to move to ESTABLISHED state.\nc.SendPacket(nil, &context.Headers{\nSrcPort: context.TestPort,\n@@ -6087,26 +6092,11 @@ func TestSynRcvdBadSeqNumber(t *testing.T) {\nRcvWnd: 30000,\n})\n- newEP, _, err := c.EP.Accept(nil)\n- switch err.(type) {\n- case nil, *tcpip.ErrWouldBlock:\n- default:\n- t.Fatalf(\"Accept failed: %s\", err)\n- }\n-\n- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {\n- // Try to accept the connections in the backlog.\n- we, ch := waiter.NewChannelEntry(nil)\n- c.WQ.EventRegister(&we, waiter.ReadableEvents)\n- defer c.WQ.EventUnregister(&we)\n-\n- // Wait for connection to be established.\n<-ch\n- newEP, _, err = c.EP.Accept(nil)\n+ newEP, _, err := c.EP.Accept(nil)\nif err != nil {\nt.Fatalf(\"Accept failed: %s\", err)\n}\n- }\n// Now verify that the TCP socket is usable and in a connected state.\ndata := \"Don't panic\"\n" } ]
Go
Apache License 2.0
google/gvisor
netstack: deflake TestSynRcvdBadSeqNumber There was a race wherein Accept() could fail, then the handshake would complete, and then a waiter would be created to listen for the handshake. In such cases, no notification was ever sent and the test timed out. PiperOrigin-RevId: 381913041
259,891
28.06.2021 14:40:27
25,200
2cbd82c0d6ff397a301def56d4a25eda07b53a9b
code quality: cleanup lint messages dns
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/accept_bind.cc", "new_path": "test/syscalls/linux/accept_bind.cc", "diff": "@@ -37,9 +37,7 @@ TEST_P(AllSocketPairTest, Listen) {\nsockets->first_addr_size()),\nSyscallSucceeds());\n- ASSERT_THAT(listen(sockets->first_fd(),\n- /* backlog = */ 5), // NOLINT(bugprone-argument-comment)\n- SyscallSucceeds());\n+ ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());\n}\nTEST_P(AllSocketPairTest, ListenIncreaseBacklog) {\n@@ -49,10 +47,8 @@ TEST_P(AllSocketPairTest, ListenIncreaseBacklog) {\nsockets->first_addr_size()),\nSyscallSucceeds());\n- ASSERT_THAT(listen(sockets->first_fd(), /* backlog = */ 5),\n- SyscallSucceeds());\n- ASSERT_THAT(listen(sockets->first_fd(), /* backlog = */ 10),\n- SyscallSucceeds());\n+ ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());\n+ ASSERT_THAT(listen(sockets->first_fd(), 10), SyscallSucceeds());\n}\nTEST_P(AllSocketPairTest, ListenDecreaseBacklog) {\n@@ -62,10 +58,8 @@ TEST_P(AllSocketPairTest, ListenDecreaseBacklog) {\nsockets->first_addr_size()),\nSyscallSucceeds());\n- ASSERT_THAT(listen(sockets->first_fd(), /* backlog = */ 5),\n- SyscallSucceeds());\n- ASSERT_THAT(listen(sockets->first_fd(), /* backlog = */ 1),\n- SyscallSucceeds());\n+ ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());\n+ ASSERT_THAT(listen(sockets->first_fd(), 1), SyscallSucceeds());\n}\nTEST_P(AllSocketPairTest, ListenBacklogSizes) {\n" } ]
Go
Apache License 2.0
google/gvisor
code quality: cleanup lint messages dns PiperOrigin-RevId: 381949375
259,884
28.06.2021 15:59:54
25,200
2d899a843b7b36799474bbb811a0bd40bc04efce
Exit early with error message on checkpoint/pause w/ hostinet.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -334,6 +334,11 @@ func (cm *containerManager) ExecuteAsync(args *control.ExecArgs, pid *int32) err\n// Checkpoint pauses a sandbox and saves its state.\nfunc (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {\nlog.Debugf(\"containerManager.Checkpoint\")\n+ // TODO(gvisor.dev/issues/6243): save/restore not supported w/ hostinet\n+ if cm.l.root.conf.Network == config.NetworkHost {\n+ return errors.New(\"checkpoint not supported when using hostinet\")\n+ }\n+\nstate := control.State{\nKernel: cm.l.k,\nWatchdog: cm.l.watchdog,\n@@ -344,6 +349,10 @@ func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {\n// Pause suspends a container.\nfunc (cm *containerManager) Pause(_, _ *struct{}) error {\nlog.Debugf(\"containerManager.Pause\")\n+ // TODO(gvisor.dev/issues/6243): save/restore not supported w/ hostinet\n+ if cm.l.root.conf.Network == config.NetworkHost {\n+ return errors.New(\"pause not supported when using hostinet\")\n+ }\ncm.l.k.Pause()\nreturn nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Exit early with error message on checkpoint/pause w/ hostinet. PiperOrigin-RevId: 381964660
259,885
28.06.2021 17:40:33
25,200
5b2afd24a7ed6d626ede2d06d04378f95c3b62f8
Allow VFS2 gofer client to mmap from sentry page cache when forced.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "new_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "diff": "@@ -678,29 +678,29 @@ func (fd *regularFileFD) Sync(ctx context.Context) error {\n// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\nfunc (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\nd := fd.dentry()\n+ // Force sentry page caching at your own risk.\n+ if !d.fs.opts.forcePageCache {\nswitch d.fs.opts.interop {\ncase InteropModeExclusive:\n// Any mapping is fine.\ncase InteropModeWritethrough:\n- // Shared writable mappings require a host FD, since otherwise we can't\n- // synchronously flush memory-mapped writes to the remote file.\n+ // Shared writable mappings require a host FD, since otherwise we\n+ // can't synchronously flush memory-mapped writes to the remote\n+ // file.\nif opts.Private || !opts.MaxPerms.Write {\nbreak\n}\nfallthrough\ncase InteropModeShared:\n- // All mappings require a host FD to be coherent with other filesystem\n- // users.\n- if d.fs.opts.forcePageCache {\n- // Whether or not we have a host FD, we're not allowed to use it.\n- return syserror.ENODEV\n- }\n+ // All mappings require a host FD to be coherent with other\n+ // filesystem users.\nif atomic.LoadInt32(&d.mmapFD) < 0 {\nreturn syserror.ENODEV\n}\ndefault:\npanic(fmt.Sprintf(\"unknown InteropMode %v\", d.fs.opts.interop))\n}\n+ }\n// After this point, d may be used as a memmap.Mappable.\nd.pf.hostFileMapperInitOnce.Do(d.pf.hostFileMapper.Init)\nopts.SentryOwnedContent = d.fs.opts.forcePageCache\n@@ -708,12 +708,12 @@ func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpt\n}\nfunc (d *dentry) mayCachePages() bool {\n- if d.fs.opts.interop == InteropModeShared {\n- return false\n- }\nif d.fs.opts.forcePageCache {\nreturn true\n}\n+ if d.fs.opts.interop == InteropModeShared {\n+ return false\n+ }\nreturn atomic.LoadInt32(&d.mmapFD) >= 0\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Allow VFS2 gofer client to mmap from sentry page cache when forced. PiperOrigin-RevId: 381982257
259,963
27.05.2021 17:03:44
-10,800
ddbc27365978a7c634354000094f86022d3ecd2f
Fix TUN IFF_NO_PI bug When TUN is created with IFF_NO_PI flag, there will be no Ethernet header and no packet info, therefore, both read and write will fail. This commit fix this bug.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/tun/device.go", "new_path": "pkg/tcpip/link/tun/device.go", "diff": "@@ -207,6 +207,15 @@ func (d *Device) Write(data []byte) (int64, error) {\nprotocol = pktInfoHdr.Protocol()\ncase ethHdr != nil:\nprotocol = ethHdr.Type()\n+ case d.flags.TUN:\n+ // TUN interface with IFF_NO_PI enabled, thus\n+ // we need to determine protocol from version field\n+ version := data[0] >> 4\n+ if version == 4 {\n+ protocol = header.IPv4ProtocolNumber\n+ } else if version == 6 {\n+ protocol = header.IPv6ProtocolNumber\n+ }\n}\n// Try to determine remote link address, default zero.\n@@ -264,13 +273,6 @@ func (d *Device) encodePkt(info *channel.PacketInfo) (buffer.View, bool) {\nvv.AppendView(buffer.View(hdr))\n}\n- // If the packet does not already have link layer header, and the route\n- // does not exist, we can't compute it. This is possibly a raw packet, tun\n- // device doesn't support this at the moment.\n- if info.Pkt.LinkHeader().View().IsEmpty() && len(info.Route.RemoteLinkAddress) == 0 {\n- return nil, false\n- }\n-\n// Ethernet header (TAP only).\nif d.flags.TAP {\n// Add ethernet header if not provided.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/tuntap.cc", "new_path": "test/syscalls/linux/tuntap.cc", "diff": "#include <sys/ioctl.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n+#include <cstddef>\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n@@ -44,6 +45,7 @@ constexpr int kIPLen = 4;\nconstexpr const char kDevNetTun[] = \"/dev/net/tun\";\nconstexpr const char kTapName[] = \"tap0\";\n+constexpr const char kTunName[] = \"tun0\";\n#define kTapIPAddr htonl(0x0a000001) /* Inet 10.0.0.1 */\n#define kTapPeerIPAddr htonl(0x0a000002) /* Inet 10.0.0.2 */\n@@ -413,6 +415,43 @@ TEST_F(TuntapTest, SendUdpTriggersArpResolution) {\n}\n}\n+TEST_F(TuntapTest, TUNNoPacketInfo) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n+\n+ // Interface creation.\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));\n+\n+ struct ifreq ifr_set = {};\n+ ifr_set.ifr_flags = IFF_TUN | IFF_NO_PI;\n+ strncpy(ifr_set.ifr_name, kTunName, IFNAMSIZ);\n+ EXPECT_THAT(ioctl(fd.get(), TUNSETIFF, &ifr_set), SyscallSucceeds());\n+\n+ // Interface setup.\n+ auto link = ASSERT_NO_ERRNO_AND_VALUE(GetLinkByName(kTunName));\n+ const struct in_addr dev_ipv4_addr = {.s_addr = kTapIPAddr};\n+ EXPECT_NO_ERRNO(LinkAddLocalAddr(link.index, AF_INET, 24, &dev_ipv4_addr, sizeof(dev_ipv4_addr)));\n+\n+ ping_pkt ping_req = CreatePingPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);\n+ size_t packet_size = sizeof(ping_req) - offsetof(ping_pkt, ip);\n+\n+ // Send ICMP query\n+ EXPECT_THAT(write(fd.get(), &ping_req.ip, packet_size), SyscallSucceedsWithValue(packet_size));\n+\n+ // Receive loop to process inbound packets.\n+ while (1) {\n+ ping_pkt ping_resp = {};\n+ EXPECT_THAT(read(fd.get(), &ping_resp.ip, packet_size), SyscallSucceedsWithValue(packet_size));\n+\n+ // Process ping response packet.\n+ if (!memcmp(&ping_resp.ip.saddr, &ping_req.ip.daddr, kIPLen) &&\n+ !memcmp(&ping_resp.ip.daddr, &ping_req.ip.saddr, kIPLen) &&\n+ ping_resp.icmp.type == 0 && ping_resp.icmp.code == 0) {\n+ // Ends and passes the test.\n+ break;\n+ }\n+ }\n+}\n+\n// TCPBlockingConnectFailsArpResolution tests for TCP connect to fail on link\n// address resolution failure to a routable, but non existent peer.\nTEST_F(TuntapTest, TCPBlockingConnectFailsArpResolution) {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix TUN IFF_NO_PI bug When TUN is created with IFF_NO_PI flag, there will be no Ethernet header and no packet info, therefore, both read and write will fail. This commit fix this bug.
259,963
29.06.2021 15:09:44
-10,800
e8bc632d07378f4aa0824b5002a51c55b4796d35
Fix iptables List entries Input interface field In Linux the list entries command returns the name of the input interface assigned to the iptable rule. iptables -S > -A FORWARD -i docker0 -o docker0 -j ACCEPT Meanwhile, in gVsior this interface name is ignored. iptables -S > -A FORWARD -o docker0 -j ACCEPT
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/ipv4.go", "new_path": "pkg/sentry/socket/netfilter/ipv4.go", "diff": "@@ -80,6 +80,8 @@ func getEntries4(table stack.Table, tablename linux.TableName) (linux.KernelIPTG\ncopy(entry.Entry.IP.SrcMask[:], rule.Filter.SrcMask)\ncopy(entry.Entry.IP.OutputInterface[:], rule.Filter.OutputInterface)\ncopy(entry.Entry.IP.OutputInterfaceMask[:], rule.Filter.OutputInterfaceMask)\n+ copy(entry.Entry.IP.InputInterface[:], rule.Filter.InputInterface)\n+ copy(entry.Entry.IP.InputInterfaceMask[:], rule.Filter.InputInterfaceMask)\nif rule.Filter.DstInvert {\nentry.Entry.IP.InverseFlags |= linux.IPT_INV_DSTIP\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/ipv6.go", "new_path": "pkg/sentry/socket/netfilter/ipv6.go", "diff": "@@ -80,6 +80,8 @@ func getEntries6(table stack.Table, tablename linux.TableName) (linux.KernelIP6T\ncopy(entry.Entry.IPv6.SrcMask[:], rule.Filter.SrcMask)\ncopy(entry.Entry.IPv6.OutputInterface[:], rule.Filter.OutputInterface)\ncopy(entry.Entry.IPv6.OutputInterfaceMask[:], rule.Filter.OutputInterfaceMask)\n+ copy(entry.Entry.IPv6.InputInterface[:], rule.Filter.InputInterface)\n+ copy(entry.Entry.IPv6.InputInterfaceMask[:], rule.Filter.InputInterfaceMask)\nif rule.Filter.DstInvert {\nentry.Entry.IPv6.InverseFlags |= linux.IP6T_INV_DSTIP\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix iptables List entries Input interface field In Linux the list entries command returns the name of the input interface assigned to the iptable rule. iptables -S > -A FORWARD -i docker0 -o docker0 -j ACCEPT Meanwhile, in gVsior this interface name is ignored. iptables -S > -A FORWARD -o docker0 -j ACCEPT
259,992
29.06.2021 10:53:10
25,200
5f2b3728fc1d71d32912c57d948ba4b15c886f2a
Redirect all calls from `errdefs.ToGRPC` to `utils.ErrToGRPC` This is to ensure that Go 1.13 error wrapping is correctly translated to gRPC errors before returning from the shim. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/shim/BUILD", "new_path": "pkg/shim/BUILD", "diff": "@@ -8,7 +8,6 @@ go_library(\n\"api.go\",\n\"debug.go\",\n\"epoll.go\",\n- \"errors.go\",\n\"options.go\",\n\"service.go\",\n\"service_linux.go\",\n@@ -45,8 +44,6 @@ go_library(\n\"@com_github_gogo_protobuf//types:go_default_library\",\n\"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\",\n\"@com_github_sirupsen_logrus//:go_default_library\",\n- \"@org_golang_google_grpc//codes:go_default_library\",\n- \"@org_golang_google_grpc//status:go_default_library\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n@@ -54,14 +51,10 @@ go_library(\ngo_test(\nname = \"shim_test\",\nsize = \"small\",\n- srcs = [\n- \"errors_test.go\",\n- \"service_test.go\",\n- ],\n+ srcs = [\"service_test.go\"],\nlibrary = \":shim\",\ndeps = [\n\"//pkg/shim/utils\",\n- \"@com_github_containerd_containerd//errdefs:go_default_library\",\n\"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/proc/BUILD", "new_path": "pkg/shim/proc/BUILD", "diff": "@@ -21,6 +21,7 @@ go_library(\n],\ndeps = [\n\"//pkg/shim/runsc\",\n+ \"//pkg/shim/utils\",\n\"@com_github_containerd_console//:go_default_library\",\n\"@com_github_containerd_containerd//errdefs:go_default_library\",\n\"@com_github_containerd_containerd//log:go_default_library\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/proc/init_state.go", "new_path": "pkg/shim/proc/init_state.go", "diff": "@@ -23,6 +23,7 @@ import (\n\"github.com/containerd/containerd/pkg/process\"\nrunc \"github.com/containerd/go-runc\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/shim/utils\"\n)\ntype stateTransition int\n@@ -235,6 +236,6 @@ func handleStoppedKill(signal uint32) error {\n// already been killed.\nreturn nil\ndefault:\n- return errdefs.ToGRPCf(errdefs.ErrNotFound, \"process not found\")\n+ return utils.ErrToGRPCf(errdefs.ErrNotFound, \"process not found\")\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/service.go", "new_path": "pkg/shim/service.go", "diff": "@@ -452,10 +452,10 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (*ta\n}\nprocess, err := newInit(r.Bundle, filepath.Join(r.Bundle, \"work\"), ns, s.platform, config, &s.opts, st.Rootfs)\nif err != nil {\n- return nil, errToGRPC(err)\n+ return nil, utils.ErrToGRPC(err)\n}\nif err := process.Create(ctx, config); err != nil {\n- return nil, errToGRPC(err)\n+ return nil, utils.ErrToGRPC(err)\n}\n// Set up OOM notification on the sandbox's cgroup. This is done on\n@@ -530,10 +530,10 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*typ\np := s.processes[r.ExecID]\ns.mu.Unlock()\nif p != nil {\n- return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, \"id %s\", r.ExecID)\n+ return nil, utils.ErrToGRPCf(errdefs.ErrAlreadyExists, \"id %s\", r.ExecID)\n}\nif s.task == nil {\n- return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n+ return nil, utils.ErrToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n}\nprocess, err := s.task.Exec(ctx, s.bundle, &proc.ExecConfig{\nID: r.ExecID,\n@@ -544,7 +544,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*typ\nSpec: r.Spec,\n})\nif err != nil {\n- return nil, errToGRPC(err)\n+ return nil, utils.ErrToGRPC(err)\n}\ns.mu.Lock()\ns.processes[r.ExecID] = process\n@@ -565,7 +565,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*\nHeight: uint16(r.Height),\n}\nif err := p.Resize(ws); err != nil {\n- return nil, errToGRPC(err)\n+ return nil, utils.ErrToGRPC(err)\n}\nreturn empty, nil\n}\n@@ -615,7 +615,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*types.Em\nlog.L.Debugf(\"Pause, id: %s\", r.ID)\nif s.task == nil {\nlog.L.Debugf(\"Pause error, id: %s: container not created\", r.ID)\n- return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n+ return nil, utils.ErrToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n}\nerr := s.task.Runtime().Pause(ctx, r.ID)\nif err != nil {\n@@ -629,7 +629,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*types.\nlog.L.Debugf(\"Resume, id: %s\", r.ID)\nif s.task == nil {\nlog.L.Debugf(\"Resume error, id: %s: container not created\", r.ID)\n- return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n+ return nil, utils.ErrToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n}\nerr := s.task.Runtime().Resume(ctx, r.ID)\nif err != nil {\n@@ -648,7 +648,7 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*types.Empt\n}\nif err := p.Kill(ctx, r.Signal, r.All); err != nil {\nlog.L.Debugf(\"Kill failed: %v\", err)\n- return nil, errToGRPC(err)\n+ return nil, utils.ErrToGRPC(err)\n}\nlog.L.Debugf(\"Kill succeeded\")\nreturn empty, nil\n@@ -660,7 +660,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.Pi\npids, err := s.getContainerPids(ctx, r.ID)\nif err != nil {\n- return nil, errToGRPC(err)\n+ return nil, utils.ErrToGRPC(err)\n}\nvar processes []*task.ProcessInfo\nfor _, pid := range pids {\n@@ -706,7 +706,7 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*type\n// Checkpoint checkpoints the container.\nfunc (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*types.Empty, error) {\nlog.L.Debugf(\"Checkpoint, id: %s\", r.ID)\n- return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented)\n+ return empty, utils.ErrToGRPC(errdefs.ErrNotImplemented)\n}\n// Connect returns shim information such as the shim's pid.\n@@ -737,7 +737,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.\nlog.L.Debugf(\"Stats, id: %s\", r.ID)\nif s.task == nil {\nlog.L.Debugf(\"Stats error, id: %s: container not created\", r.ID)\n- return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n+ return nil, utils.ErrToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n}\nstats, err := s.task.Stats(ctx, s.id)\nif err != nil {\n@@ -811,7 +811,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.\n// Update updates a running container.\nfunc (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*types.Empty, error) {\n- return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented)\n+ return empty, utils.ErrToGRPC(errdefs.ErrNotImplemented)\n}\n// Wait waits for a process to exit.\n@@ -908,14 +908,14 @@ func (s *service) getProcess(execID string) (process.Process, error) {\nif execID == \"\" {\nif s.task == nil {\n- return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n+ return nil, utils.ErrToGRPCf(errdefs.ErrFailedPrecondition, \"container must be created\")\n}\nreturn s.task, nil\n}\np := s.processes[execID]\nif p == nil {\n- return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, \"process does not exist %s\", execID)\n+ return nil, utils.ErrToGRPCf(errdefs.ErrNotFound, \"process does not exist %s\", execID)\n}\nreturn p, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/utils/BUILD", "new_path": "pkg/shim/utils/BUILD", "diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"utils\",\nsrcs = [\n\"annotations.go\",\n+ \"errors.go\",\n\"utils.go\",\n\"volumes.go\",\n],\n@@ -14,14 +15,23 @@ go_library(\n\"//shim:__subpackages__\",\n],\ndeps = [\n+ \"@com_github_containerd_containerd//errdefs:go_default_library\",\n\"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\",\n+ \"@org_golang_google_grpc//codes:go_default_library\",\n+ \"@org_golang_google_grpc//status:go_default_library\",\n],\n)\ngo_test(\nname = \"utils_test\",\nsize = \"small\",\n- srcs = [\"volumes_test.go\"],\n+ srcs = [\n+ \"errors_test.go\",\n+ \"volumes_test.go\",\n+ ],\nlibrary = \":utils\",\n- deps = [\"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\"],\n+ deps = [\n+ \"@com_github_containerd_containerd//errdefs:go_default_library\",\n+ \"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\",\n+ ],\n)\n" }, { "change_type": "RENAME", "old_path": "pkg/shim/errors.go", "new_path": "pkg/shim/utils/errors.go", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package shim\n+package utils\nimport (\n\"context\"\n\"errors\"\n+ \"fmt\"\n\"github.com/containerd/containerd/errdefs\"\n\"google.golang.org/grpc/codes\"\n\"google.golang.org/grpc/status\"\n)\n-// errToGRPC wraps containerd's ToGRPC error mapper which depends on\n+// ErrToGRPC wraps containerd's ToGRPC error mapper which depends on\n// github.com/pkg/errors to work correctly. Once we upgrade to containerd v1.4,\n// this function can go away and we can use errdefs.ToGRPC directly instead.\n//\n// TODO(gvisor.dev/issue/6232): Remove after upgrading to containerd v1.4\n-func errToGRPC(err error) error {\n+func ErrToGRPC(err error) error {\n+ return errToGRPCMsg(err, err.Error())\n+}\n+\n+// ErrToGRPCf maps the error to grpc error codes, assembling the formatting\n+// string and combining it with the target error string.\n+//\n+// TODO(gvisor.dev/issue/6232): Remove after upgrading to containerd v1.4\n+func ErrToGRPCf(err error, format string, args ...interface{}) error {\n+ formatted := fmt.Sprintf(format, args...)\n+ msg := fmt.Sprintf(\"%s: %s\", formatted, err.Error())\n+ return errToGRPCMsg(err, msg)\n+}\n+\n+func errToGRPCMsg(err error, msg string) error {\nif err == nil {\nreturn nil\n}\n@@ -38,21 +53,21 @@ func errToGRPC(err error) error {\nswitch {\ncase errors.Is(err, errdefs.ErrInvalidArgument):\n- return status.Errorf(codes.InvalidArgument, err.Error())\n+ return status.Errorf(codes.InvalidArgument, msg)\ncase errors.Is(err, errdefs.ErrNotFound):\n- return status.Errorf(codes.NotFound, err.Error())\n+ return status.Errorf(codes.NotFound, msg)\ncase errors.Is(err, errdefs.ErrAlreadyExists):\n- return status.Errorf(codes.AlreadyExists, err.Error())\n+ return status.Errorf(codes.AlreadyExists, msg)\ncase errors.Is(err, errdefs.ErrFailedPrecondition):\n- return status.Errorf(codes.FailedPrecondition, err.Error())\n+ return status.Errorf(codes.FailedPrecondition, msg)\ncase errors.Is(err, errdefs.ErrUnavailable):\n- return status.Errorf(codes.Unavailable, err.Error())\n+ return status.Errorf(codes.Unavailable, msg)\ncase errors.Is(err, errdefs.ErrNotImplemented):\n- return status.Errorf(codes.Unimplemented, err.Error())\n+ return status.Errorf(codes.Unimplemented, msg)\ncase errors.Is(err, context.Canceled):\n- return status.Errorf(codes.Canceled, err.Error())\n+ return status.Errorf(codes.Canceled, msg)\ncase errors.Is(err, context.DeadlineExceeded):\n- return status.Errorf(codes.DeadlineExceeded, err.Error())\n+ return status.Errorf(codes.DeadlineExceeded, msg)\n}\nreturn errdefs.ToGRPC(err)\n" }, { "change_type": "RENAME", "old_path": "pkg/shim/errors_test.go", "new_path": "pkg/shim/utils/errors_test.go", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package shim\n+package utils\nimport (\n\"fmt\"\n@@ -39,8 +39,11 @@ func TestGRPCRoundTripsErrors(t *testing.T) {\n},\n} {\nt.Run(tc.name, func(t *testing.T) {\n- if err := errdefs.FromGRPC(errToGRPC(tc.err)); !tc.test(err) {\n- t.Errorf(\"got %+v\", err)\n+ if err := errdefs.FromGRPC(ErrToGRPC(tc.err)); !tc.test(err) {\n+ t.Errorf(\"errToGRPC got %+v\", err)\n+ }\n+ if err := errdefs.FromGRPC(ErrToGRPCf(tc.err, \"testing %s\", \"123\")); !tc.test(err) {\n+ t.Errorf(\"errToGRPCf got %+v\", err)\n}\n})\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Redirect all calls from `errdefs.ToGRPC` to `utils.ErrToGRPC` This is to ensure that Go 1.13 error wrapping is correctly translated to gRPC errors before returning from the shim. Updates #6225 PiperOrigin-RevId: 382120441
259,992
29.06.2021 14:46:51
25,200
d205926f235258468bfe206388fa1b55cb1ea7fa
Delete PID files right after they are read The PID files are not used after they are read, so there is no point in keeping them around until the shim is deleted. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/shim/proc/BUILD", "new_path": "pkg/shim/proc/BUILD", "diff": "@@ -20,6 +20,7 @@ go_library(\n\"//shim:__subpackages__\",\n],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/shim/runsc\",\n\"//pkg/shim/utils\",\n\"@com_github_containerd_console//:go_default_library\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/proc/exec.go", "new_path": "pkg/shim/proc/exec.go", "diff": "@@ -26,11 +26,13 @@ import (\n\"github.com/containerd/console\"\n\"github.com/containerd/containerd/errdefs\"\n+ \"github.com/containerd/containerd/log\"\n\"github.com/containerd/containerd/pkg/stdio\"\n\"github.com/containerd/fifo\"\nrunc \"github.com/containerd/go-runc\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/shim/runsc\"\n)\n@@ -92,6 +94,12 @@ func (e *execProcess) SetExited(status int) {\n}\nfunc (e *execProcess) setExited(status int) {\n+ if !e.exited.IsZero() {\n+ log.L.Debugf(\"Exec: status already set to %d, ignoring status: %d\", e.status, status)\n+ return\n+ }\n+\n+ log.L.Debugf(\"Exec: setting status: %d\", status)\ne.status = status\ne.exited = time.Now()\ne.parent.Platform.ShutdownConsole(context.Background(), e.console)\n@@ -105,7 +113,7 @@ func (e *execProcess) Delete(ctx context.Context) error {\nreturn e.execState.Delete(ctx)\n}\n-func (e *execProcess) delete(ctx context.Context) error {\n+func (e *execProcess) delete() error {\ne.wg.Wait()\nif e.io != nil {\nfor _, c := range e.closers {\n@@ -113,12 +121,6 @@ func (e *execProcess) delete(ctx context.Context) error {\n}\ne.io.Close()\n}\n- pidfile := filepath.Join(e.path, fmt.Sprintf(\"%s.pid\", e.id))\n- // silently ignore error\n- os.Remove(pidfile)\n- internalPidfile := filepath.Join(e.path, fmt.Sprintf(\"%s-internal.pid\", e.id))\n- // silently ignore error\n- os.Remove(internalPidfile)\nreturn nil\n}\n@@ -171,42 +173,53 @@ func (e *execProcess) Start(ctx context.Context) error {\nreturn e.execState.Start(ctx)\n}\n-func (e *execProcess) start(ctx context.Context) (err error) {\n- var (\n- socket *runc.Socket\n- pidfile = filepath.Join(e.path, fmt.Sprintf(\"%s.pid\", e.id))\n- internalPidfile = filepath.Join(e.path, fmt.Sprintf(\"%s-internal.pid\", e.id))\n- )\n- if e.stdio.Terminal {\n- if socket, err = runc.NewTempConsoleSocket(); err != nil {\n+func (e *execProcess) start(ctx context.Context) error {\n+ var socket *runc.Socket\n+\n+ switch {\n+ case e.stdio.Terminal:\n+ s, err := runc.NewTempConsoleSocket()\n+ if err != nil {\nreturn fmt.Errorf(\"failed to create runc console socket: %w\", err)\n}\n- defer socket.Close()\n- } else if e.stdio.IsNull() {\n- if e.io, err = runc.NewNullIO(); err != nil {\n+ defer s.Close()\n+ socket = s\n+\n+ case e.stdio.IsNull():\n+ io, err := runc.NewNullIO()\n+ if err != nil {\nreturn fmt.Errorf(\"creating new NULL IO: %w\", err)\n}\n- } else {\n- if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil {\n+ e.io = io\n+\n+ default:\n+ io, err := runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio))\n+ if err != nil {\nreturn fmt.Errorf(\"failed to create runc io pipes: %w\", err)\n}\n+ e.io = io\n}\n+\nopts := &runsc.ExecOpts{\n- PidFile: pidfile,\n- InternalPidFile: internalPidfile,\n+ PidFile: filepath.Join(e.path, fmt.Sprintf(\"%s.pid\", e.id)),\n+ InternalPidFile: filepath.Join(e.path, fmt.Sprintf(\"%s-internal.pid\", e.id)),\nIO: e.io,\nDetach: true,\n}\n+ defer func() {\n+ _ = os.Remove(opts.PidFile)\n+ _ = os.Remove(opts.InternalPidFile)\n+ }()\nif socket != nil {\nopts.ConsoleSocket = socket\n}\n+\neventCh := e.parent.Monitor.Subscribe()\n- defer func() {\n- // Unsubscribe if an error is returned.\n- if err != nil {\n+ cu := cleanup.Make(func() {\ne.parent.Monitor.Unsubscribe(eventCh)\n- }\n- }()\n+ })\n+ defer cu.Clean()\n+\nif err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil {\nclose(e.waitBlock)\nreturn e.parent.runtimeError(err, \"OCI runtime exec failed\")\n@@ -234,6 +247,7 @@ func (e *execProcess) start(ctx context.Context) (err error) {\nreturn fmt.Errorf(\"failed to start io pipe copy: %w\", err)\n}\n}\n+\npid, err := runc.ReadPidFile(opts.PidFile)\nif err != nil {\nreturn fmt.Errorf(\"failed to retrieve OCI runtime exec pid: %w\", err)\n@@ -244,6 +258,7 @@ func (e *execProcess) start(ctx context.Context) (err error) {\nreturn fmt.Errorf(\"failed to retrieve OCI runtime exec internal pid: %w\", err)\n}\ne.internalPid = internalPid\n+\ngo func() {\ndefer e.parent.Monitor.Unsubscribe(eventCh)\nfor event := range eventCh {\n@@ -257,21 +272,25 @@ func (e *execProcess) start(ctx context.Context) (err error) {\n}\n}\n}()\n+\n+ cu.Release() // cancel cleanup on success.\nreturn nil\n}\n-func (e *execProcess) Status(ctx context.Context) (string, error) {\n+func (e *execProcess) Status(context.Context) (string, error) {\ne.mu.Lock()\ndefer e.mu.Unlock()\n// if we don't have a pid then the exec process has just been created\nif e.pid == 0 {\nreturn \"created\", nil\n}\n- // if we have a pid and it can be signaled, the process is running\n- // TODO(random-liu): Use `runsc kill --pid`.\n- if err := unix.Kill(e.pid, 0); err == nil {\n- return \"running\", nil\n- }\n- // else if we have a pid but it can nolonger be signaled, it has stopped\n+ // This checks that `runsc exec` process is still running. This process has\n+ // the same lifetime as the process executing inside the container. So instead\n+ // of calling `runsc kill --pid`, just do a quick check that `runsc exec` is\n+ // still running.\n+ if err := unix.Kill(e.pid, 0); err != nil {\n+ // Can't signal the process, it must have exited.\nreturn \"stopped\", nil\n}\n+ return \"running\", nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/proc/exec_state.go", "new_path": "pkg/shim/proc/exec_state.go", "diff": "@@ -63,8 +63,8 @@ func (s *execCreatedState) Start(ctx context.Context) error {\nreturn nil\n}\n-func (s *execCreatedState) Delete(ctx context.Context) error {\n- if err := s.p.delete(ctx); err != nil {\n+func (s *execCreatedState) Delete(context.Context) error {\n+ if err := s.p.delete(); err != nil {\nreturn err\n}\ns.transition(deleted)\n@@ -143,8 +143,8 @@ func (s *execStoppedState) Start(context.Context) error {\nreturn fmt.Errorf(\"cannot start a stopped process\")\n}\n-func (s *execStoppedState) Delete(ctx context.Context) error {\n- if err := s.p.delete(ctx); err != nil {\n+func (s *execStoppedState) Delete(context.Context) error {\n+ if err := s.p.delete(); err != nil {\nreturn err\n}\ns.transition(deleted)\n" } ]
Go
Apache License 2.0
google/gvisor
Delete PID files right after they are read The PID files are not used after they are read, so there is no point in keeping them around until the shim is deleted. Updates #6225 PiperOrigin-RevId: 382169916
259,909
29.06.2021 16:58:12
25,200
90dbb4b0c7e594ba67fec26c2cdb1dfd7d7454de
Add SIOCGIFFLAGS ioctl support to hostinet.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket_unsafe.go", "new_path": "pkg/sentry/socket/hostinet/socket_unsafe.go", "diff": "@@ -67,7 +67,23 @@ func ioctl(ctx context.Context, fd int, io usermem.IO, args arch.SyscallArgument\nAddressSpaceActive: true,\n})\nreturn 0, err\n-\n+ case unix.SIOCGIFFLAGS:\n+ cc := &usermem.IOCopyContext{\n+ Ctx: ctx,\n+ IO: io,\n+ Opts: usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ },\n+ }\n+ var ifr linux.IFReq\n+ if _, err := ifr.CopyIn(cc, args[2].Pointer()); err != nil {\n+ return 0, err\n+ }\n+ if _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), cmd, uintptr(unsafe.Pointer(&ifr))); errno != 0 {\n+ return 0, translateIOSyscallError(errno)\n+ }\n+ _, err := ifr.CopyOut(cc, args[2].Pointer())\n+ return 0, err\ndefault:\nreturn 0, syserror.ENOTTY\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/filter/config.go", "new_path": "runsc/boot/filter/config.go", "diff": "@@ -459,6 +459,10 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.MatchAny{},\nseccomp.EqualTo(unix.TIOCINQ),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(unix.SIOCGIFFLAGS),\n+ },\n},\nunix.SYS_LISTEN: {},\nunix.SYS_READV: {},\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -738,6 +738,7 @@ syscall_test(\n)\nsyscall_test(\n+ add_hostinet = True,\ntest = \"//test/syscalls/linux:socket_netdevice_test\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_netdevice.cc", "new_path": "test/syscalls/linux/socket_netdevice.cc", "diff": "@@ -37,6 +37,7 @@ using ::testing::AnyOf;\nusing ::testing::Eq;\nTEST(NetdeviceTest, Loopback) {\n+ SKIP_IF(IsRunningWithHostinet());\nFileDescriptor sock =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n@@ -60,6 +61,7 @@ TEST(NetdeviceTest, Loopback) {\n}\nTEST(NetdeviceTest, Netmask) {\n+ SKIP_IF(IsRunningWithHostinet());\n// We need an interface index to identify the loopback device.\nFileDescriptor sock =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n@@ -135,6 +137,7 @@ TEST(NetdeviceTest, Netmask) {\n}\nTEST(NetdeviceTest, InterfaceName) {\n+ SKIP_IF(IsRunningWithHostinet());\nFileDescriptor sock =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n@@ -168,6 +171,7 @@ TEST(NetdeviceTest, InterfaceFlags) {\n}\nTEST(NetdeviceTest, InterfaceMTU) {\n+ SKIP_IF(IsRunningWithHostinet());\nFileDescriptor sock =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n@@ -181,6 +185,7 @@ TEST(NetdeviceTest, InterfaceMTU) {\n}\nTEST(NetdeviceTest, EthtoolGetTSInfo) {\n+ SKIP_IF(IsRunningWithHostinet());\nFileDescriptor sock =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n" } ]
Go
Apache License 2.0
google/gvisor
Add SIOCGIFFLAGS ioctl support to hostinet. PiperOrigin-RevId: 382194711
260,004
29.06.2021 21:29:22
25,200
66a79461a23e5e98c53a809eda442393cd6925b3
Support parsing NDP Route Information option This change prepares for a later change which supports the NDP Route Information option to discover more-specific routes, as per RFC 4191. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_options.go", "new_path": "pkg/tcpip/header/ndp_options.go", "diff": "@@ -233,6 +233,17 @@ func (i *NDPOptionIterator) Next() (NDPOption, bool, error) {\ncase ndpNonceOptionType:\nreturn NDPNonceOption(body), false, nil\n+ case ndpRouteInformationType:\n+ if numBodyBytes > ndpRouteInformationMaxLength {\n+ return nil, true, fmt.Errorf(\"got %d bytes for NDP Route Information option's body, expected at max %d bytes: %w\", numBodyBytes, ndpRouteInformationMaxLength, ErrNDPOptMalformedBody)\n+ }\n+ opt := NDPRouteInformation(body)\n+ if err := opt.hasError(); err != nil {\n+ return nil, true, err\n+ }\n+\n+ return opt, false, nil\n+\ncase ndpPrefixInformationType:\n// Make sure the length of a Prefix Information option\n// body is ndpPrefixInformationLength, as per RFC 4861\n@@ -930,3 +941,137 @@ func isUpperLetter(b byte) bool {\nfunc isDigit(b byte) bool {\nreturn b >= '0' && b <= '9'\n}\n+\n+// As per RFC 4191 section 2.3,\n+//\n+// 2.3. Route Information Option\n+//\n+// 0 1 2 3\n+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Type | Length | Prefix Length |Resvd|Prf|Resvd|\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Route Lifetime |\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+// | Prefix (Variable Length) |\n+// . .\n+// . .\n+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+//\n+// Fields:\n+//\n+// Type 24\n+//\n+//\n+// Length 8-bit unsigned integer. The length of the option\n+// (including the Type and Length fields) in units of 8\n+// octets. The Length field is 1, 2, or 3 depending on the\n+// Prefix Length. If Prefix Length is greater than 64, then\n+// Length must be 3. If Prefix Length is greater than 0,\n+// then Length must be 2 or 3. If Prefix Length is zero,\n+// then Length must be 1, 2, or 3.\n+const (\n+ ndpRouteInformationType = ndpOptionIdentifier(24)\n+ ndpRouteInformationMaxLength = 22\n+\n+ ndpRouteInformationPrefixLengthIdx = 0\n+ ndpRouteInformationFlagsIdx = 1\n+ ndpRouteInformationPrfShift = 3\n+ ndpRouteInformationPrfMask = 3 << ndpRouteInformationPrfShift\n+ ndpRouteInformationRouteLifetimeIdx = 2\n+ ndpRouteInformationRoutePrefixIdx = 6\n+)\n+\n+// NDPRouteInformation is the NDP Router Information option, as defined by\n+// RFC 4191 section 2.3.\n+type NDPRouteInformation []byte\n+\n+func (NDPRouteInformation) kind() ndpOptionIdentifier {\n+ return ndpRouteInformationType\n+}\n+\n+func (o NDPRouteInformation) length() int {\n+ return len(o)\n+}\n+\n+func (o NDPRouteInformation) serializeInto(b []byte) int {\n+ return copy(b, o)\n+}\n+\n+// String implements fmt.Stringer.\n+func (o NDPRouteInformation) String() string {\n+ return fmt.Sprintf(\"%T\", o)\n+}\n+\n+// PrefixLength returns the length of the prefix.\n+func (o NDPRouteInformation) PrefixLength() uint8 {\n+ return o[ndpRouteInformationPrefixLengthIdx]\n+}\n+\n+// RoutePreference returns the preference of the route over other routes to the\n+// same destination but through a different router.\n+func (o NDPRouteInformation) RoutePreference() NDPRoutePreference {\n+ return NDPRoutePreference((o[ndpRouteInformationFlagsIdx] & ndpRouteInformationPrfMask) >> ndpRouteInformationPrfShift)\n+}\n+\n+// RouteLifetime returns the lifetime of the route.\n+//\n+// Note, a value of 0 implies the route is now invalid and a value of\n+// infinity/forever is represented by NDPInfiniteLifetime.\n+func (o NDPRouteInformation) RouteLifetime() time.Duration {\n+ return time.Second * time.Duration(binary.BigEndian.Uint32(o[ndpRouteInformationRouteLifetimeIdx:]))\n+}\n+\n+// Prefix returns the prefix of the destination subnet this route is for.\n+func (o NDPRouteInformation) Prefix() (tcpip.Subnet, error) {\n+ prefixLength := int(o.PrefixLength())\n+ if max := IPv6AddressSize * 8; prefixLength > max {\n+ return tcpip.Subnet{}, fmt.Errorf(\"got prefix length = %d, want <= %d\", prefixLength, max)\n+ }\n+\n+ prefix := o[ndpRouteInformationRoutePrefixIdx:]\n+ var addrBytes [IPv6AddressSize]byte\n+ if n := copy(addrBytes[:], prefix); n != len(prefix) {\n+ panic(fmt.Sprintf(\"got copy(addrBytes, prefix) = %d, want = %d\", n, len(prefix)))\n+ }\n+\n+ return tcpip.AddressWithPrefix{\n+ Address: tcpip.Address(addrBytes[:]),\n+ PrefixLen: prefixLength,\n+ }.Subnet(), nil\n+}\n+\n+func (o NDPRouteInformation) hasError() error {\n+ l := len(o)\n+ if l < ndpRouteInformationRoutePrefixIdx {\n+ return fmt.Errorf(\"%T too small, got = %d bytes: %w\", o, l, ErrNDPOptMalformedBody)\n+ }\n+\n+ prefixLength := int(o.PrefixLength())\n+ if max := IPv6AddressSize * 8; prefixLength > max {\n+ return fmt.Errorf(\"got prefix length = %d, want <= %d: %w\", prefixLength, max, ErrNDPOptMalformedBody)\n+ }\n+\n+ // Length 8-bit unsigned integer. The length of the option\n+ // (including the Type and Length fields) in units of 8\n+ // octets. The Length field is 1, 2, or 3 depending on the\n+ // Prefix Length. If Prefix Length is greater than 64, then\n+ // Length must be 3. If Prefix Length is greater than 0,\n+ // then Length must be 2 or 3. If Prefix Length is zero,\n+ // then Length must be 1, 2, or 3.\n+ l += 2 // Add 2 bytes for the type and length bytes.\n+ lengthField := l / lengthByteUnits\n+ if prefixLength > 64 {\n+ if lengthField != 3 {\n+ return fmt.Errorf(\"Length field must be 3 when Prefix Length (%d) is > 64 (got = %d): %w\", prefixLength, lengthField, ErrNDPOptMalformedBody)\n+ }\n+ } else if prefixLength > 0 {\n+ if lengthField != 2 && lengthField != 3 {\n+ return fmt.Errorf(\"Length field must be 2 or 3 when Prefix Length (%d) is between 0 and 64 (got = %d): %w\", prefixLength, lengthField, ErrNDPOptMalformedBody)\n+ }\n+ } else if lengthField == 0 || lengthField > 3 {\n+ return fmt.Errorf(\"Length field must be 1, 2, or 3 when Prefix Length is zero (got = %d): %w\", lengthField, ErrNDPOptMalformedBody)\n+ }\n+\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_test.go", "new_path": "pkg/tcpip/header/ndp_test.go", "diff": "@@ -21,6 +21,7 @@ import (\n\"fmt\"\n\"io\"\n\"regexp\"\n+ \"strings\"\n\"testing\"\n\"time\"\n@@ -58,6 +59,224 @@ func TestNDPNeighborSolicit(t *testing.T) {\n}\n}\n+func TestNDPRouteInformationOption(t *testing.T) {\n+ tests := []struct {\n+ name string\n+\n+ length uint8\n+ prefixLength uint8\n+ prf NDPRoutePreference\n+ lifetimeS uint32\n+ prefixBytes []byte\n+ expectedPrefix tcpip.Subnet\n+\n+ expectedErr error\n+ }{\n+ {\n+ name: \"Length=1 with Prefix Length = 0\",\n+ length: 1,\n+ prefixLength: 0,\n+ prf: MediumRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: IPv6EmptySubnet,\n+ },\n+ {\n+ name: \"Length=1 but Prefix Length > 0\",\n+ length: 1,\n+ prefixLength: 1,\n+ prf: MediumRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedErr: ErrNDPOptMalformedBody,\n+ },\n+ {\n+ name: \"Length=2 with Prefix Length = 0\",\n+ length: 2,\n+ prefixLength: 0,\n+ prf: MediumRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: IPv6EmptySubnet,\n+ },\n+ {\n+ name: \"Length=2 with Prefix Length in [1, 64] (1)\",\n+ length: 2,\n+ prefixLength: 1,\n+ prf: LowRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: tcpip.AddressWithPrefix{\n+ Address: tcpip.Address(strings.Repeat(\"\\x00\", IPv6AddressSize)),\n+ PrefixLen: 1,\n+ }.Subnet(),\n+ },\n+ {\n+ name: \"Length=2 with Prefix Length in [1, 64] (64)\",\n+ length: 2,\n+ prefixLength: 64,\n+ prf: HighRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: tcpip.AddressWithPrefix{\n+ Address: tcpip.Address(strings.Repeat(\"\\x00\", IPv6AddressSize)),\n+ PrefixLen: 64,\n+ }.Subnet(),\n+ },\n+ {\n+ name: \"Length=2 with Prefix Length > 64\",\n+ length: 2,\n+ prefixLength: 65,\n+ prf: HighRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedErr: ErrNDPOptMalformedBody,\n+ },\n+ {\n+ name: \"Length=3 with Prefix Length = 0\",\n+ length: 3,\n+ prefixLength: 0,\n+ prf: MediumRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: IPv6EmptySubnet,\n+ },\n+ {\n+ name: \"Length=3 with Prefix Length in [1, 64] (1)\",\n+ length: 3,\n+ prefixLength: 1,\n+ prf: LowRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: tcpip.AddressWithPrefix{\n+ Address: tcpip.Address(strings.Repeat(\"\\x00\", IPv6AddressSize)),\n+ PrefixLen: 1,\n+ }.Subnet(),\n+ },\n+ {\n+ name: \"Length=3 with Prefix Length in [1, 64] (64)\",\n+ length: 3,\n+ prefixLength: 64,\n+ prf: HighRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: tcpip.AddressWithPrefix{\n+ Address: tcpip.Address(strings.Repeat(\"\\x00\", IPv6AddressSize)),\n+ PrefixLen: 64,\n+ }.Subnet(),\n+ },\n+ {\n+ name: \"Length=3 with Prefix Length in [65, 128] (65)\",\n+ length: 3,\n+ prefixLength: 65,\n+ prf: HighRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: tcpip.AddressWithPrefix{\n+ Address: tcpip.Address(strings.Repeat(\"\\x00\", IPv6AddressSize)),\n+ PrefixLen: 65,\n+ }.Subnet(),\n+ },\n+ {\n+ name: \"Length=3 with Prefix Length in [65, 128] (128)\",\n+ length: 3,\n+ prefixLength: 128,\n+ prf: HighRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedPrefix: tcpip.AddressWithPrefix{\n+ Address: tcpip.Address(strings.Repeat(\"\\x00\", IPv6AddressSize)),\n+ PrefixLen: 128,\n+ }.Subnet(),\n+ },\n+ {\n+ name: \"Length=3 with (invalid) Prefix Length > 128\",\n+ length: 3,\n+ prefixLength: 129,\n+ prf: HighRoutePreference,\n+ lifetimeS: 1,\n+ prefixBytes: nil,\n+ expectedErr: ErrNDPOptMalformedBody,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ expectedRouteInformationBytes := [...]byte{\n+ // Type, Length\n+ 24, test.length,\n+\n+ // Prefix Length, Prf\n+ uint8(test.prefixLength), uint8(test.prf) << 3,\n+\n+ // Route Lifetime\n+ 0, 0, 0, 0,\n+\n+ 0, 0, 0, 0,\n+ 0, 0, 0, 0,\n+ 0, 0, 0, 0,\n+ 0, 0, 0, 0,\n+ }\n+ binary.BigEndian.PutUint32(expectedRouteInformationBytes[4:], test.lifetimeS)\n+ _ = copy(expectedRouteInformationBytes[8:], test.prefixBytes)\n+\n+ opts := NDPOptions(expectedRouteInformationBytes[:test.length*lengthByteUnits])\n+ it, err := opts.Iter(false)\n+ if err != nil {\n+ t.Fatalf(\"got Iter(false) = (_, %s), want = (_, nil)\", err)\n+ }\n+ opt, done, err := it.Next()\n+ if !errors.Is(err, test.expectedErr) {\n+ t.Fatalf(\"got Next() = (_, _, %s), want = (_, _, %s)\", err, test.expectedErr)\n+ }\n+ if want := test.expectedErr != nil; done != want {\n+ t.Fatalf(\"got Next() = (_, %t, _), want = (_, %t, _)\", done, want)\n+ }\n+ if test.expectedErr != nil {\n+ return\n+ }\n+\n+ if got := opt.kind(); got != ndpRouteInformationType {\n+ t.Errorf(\"got kind() = %d, want = %d\", got, ndpRouteInformationType)\n+ }\n+\n+ ri, ok := opt.(NDPRouteInformation)\n+ if !ok {\n+ t.Fatalf(\"got opt = %T, want = NDPRouteInformation\", opt)\n+ }\n+\n+ if got := ri.PrefixLength(); got != test.prefixLength {\n+ t.Errorf(\"got PrefixLength() = %d, want = %d\", got, test.prefixLength)\n+ }\n+ if got := ri.RoutePreference(); got != test.prf {\n+ t.Errorf(\"got RoutePreference() = %d, want = %d\", got, test.prf)\n+ }\n+ if got, want := ri.RouteLifetime(), time.Duration(test.lifetimeS)*time.Second; got != want {\n+ t.Errorf(\"got RouteLifetime() = %s, want = %s\", got, want)\n+ }\n+ if got, err := ri.Prefix(); err != nil {\n+ t.Errorf(\"Prefix(): %s\", err)\n+ } else if got != test.expectedPrefix {\n+ t.Errorf(\"got Prefix() = %s, want = %s\", got, test.expectedPrefix)\n+ }\n+\n+ // Iterator should not return anything else.\n+ {\n+ next, done, err := it.Next()\n+ if err != nil {\n+ t.Errorf(\"got Next() = (_, _, %s), want = (_, _, nil)\", err)\n+ }\n+ if !done {\n+ t.Error(\"got Next() = (_, false, _), want = (_, true, _)\")\n+ }\n+ if next != nil {\n+ t.Errorf(\"got Next() = (%x, _, _), want = (nil, _, _)\", next)\n+ }\n+ }\n+ })\n+ }\n+}\n+\n// TestNDPNeighborAdvert tests the functions of NDPNeighborAdvert.\nfunc TestNDPNeighborAdvert(t *testing.T) {\nb := []byte{\n" } ]
Go
Apache License 2.0
google/gvisor
Support parsing NDP Route Information option This change prepares for a later change which supports the NDP Route Information option to discover more-specific routes, as per RFC 4191. Updates #6172. PiperOrigin-RevId: 382225812
260,004
30.06.2021 18:30:47
25,200
07ffecef83bd31e78786af901c49a7be93b20517
Implement fmt.Stringer for NDPRoutePreference
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_router_advert.go", "new_path": "pkg/tcpip/header/ndp_router_advert.go", "diff": "@@ -16,9 +16,12 @@ package header\nimport (\n\"encoding/binary\"\n+ \"fmt\"\n\"time\"\n)\n+var _ fmt.Stringer = NDPRoutePreference(0)\n+\n// NDPRoutePreference is the preference values for default routers or\n// more-specific routes.\n//\n@@ -64,6 +67,22 @@ const (\nReservedRoutePreference = 0b10\n)\n+// String implements fmt.Stringer.\n+func (p NDPRoutePreference) String() string {\n+ switch p {\n+ case HighRoutePreference:\n+ return \"HighRoutePreference\"\n+ case MediumRoutePreference:\n+ return \"MediumRoutePreference\"\n+ case LowRoutePreference:\n+ return \"LowRoutePreference\"\n+ case ReservedRoutePreference:\n+ return \"ReservedRoutePreference\"\n+ default:\n+ return fmt.Sprintf(\"NDPRoutePreference(%d)\", p)\n+ }\n+}\n+\n// NDPRouterAdvert is an NDP Router Advertisement message. It will only contain\n// the body of an ICMPv6 packet.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_test.go", "new_path": "pkg/tcpip/header/ndp_test.go", "diff": "@@ -1717,3 +1717,32 @@ func TestNDPOptionsIter(t *testing.T) {\nt.Errorf(\"got Next = (%x, _, _), want = (nil, _, _)\", next)\n}\n}\n+\n+func TestNDPRoutePreferenceStringer(t *testing.T) {\n+ p := NDPRoutePreference(0)\n+ for {\n+ var wantStr string\n+ switch p {\n+ case 0b01:\n+ wantStr = \"HighRoutePreference\"\n+ case 0b00:\n+ wantStr = \"MediumRoutePreference\"\n+ case 0b11:\n+ wantStr = \"LowRoutePreference\"\n+ case 0b10:\n+ wantStr = \"ReservedRoutePreference\"\n+ default:\n+ wantStr = fmt.Sprintf(\"NDPRoutePreference(%d)\", p)\n+ }\n+\n+ if gotStr := p.String(); gotStr != wantStr {\n+ t.Errorf(\"got NDPRoutePreference(%d).String() = %s, want = %s\", p, gotStr, wantStr)\n+ }\n+\n+ p++\n+ if p == 0 {\n+ // Overflowed, we hit all values.\n+ break\n+ }\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Implement fmt.Stringer for NDPRoutePreference PiperOrigin-RevId: 382427879
259,992
01.07.2021 14:20:16
25,200
3d4a8824f8e7aafdf6c1d18822b7593fa2e3e6bb
Strace: handle null paths
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/strace.go", "new_path": "pkg/sentry/strace/strace.go", "diff": "@@ -133,6 +133,9 @@ func dump(t *kernel.Task, addr hostarch.Addr, size uint, maximumBlobSize uint) s\n}\nfunc path(t *kernel.Task, addr hostarch.Addr) string {\n+ if addr == 0 {\n+ return \"<null>\"\n+ }\npath, err := t.CopyInString(addr, linux.PATH_MAX)\nif err != nil {\nreturn fmt.Sprintf(\"%#x (error decoding path: %s)\", addr, err)\n" } ]
Go
Apache License 2.0
google/gvisor
Strace: handle null paths PiperOrigin-RevId: 382603592
260,004
02.07.2021 11:28:49
25,200
a51a4b872ebdc5b9d6a74bb92d932c9197514606
Discover more specific routes as per RFC 4191 More-specific route discovery allows hosts to pick a more appropriate router for off-link destinations. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ndp.go", "new_path": "pkg/tcpip/network/ipv6/ndp.go", "diff": "@@ -54,6 +54,11 @@ const (\n// Advertisements, as a host.\ndefaultDiscoverDefaultRouters = true\n+ // defaultDiscoverMoreSpecificRoutes is the default configuration for\n+ // whether or not to discover more-specific routes from incoming Router\n+ // Advertisements, as a host.\n+ defaultDiscoverMoreSpecificRoutes = true\n+\n// defaultDiscoverOnLinkPrefixes is the default configuration for\n// whether or not to discover on-link prefixes from incoming Router\n// Advertisements' Prefix Information option, as a host.\n@@ -352,12 +357,18 @@ type NDPConfigurations struct {\n// DiscoverDefaultRouters determines whether or not default routers are\n// discovered from Router Advertisements, as per RFC 4861 section 6. This\n- // configuration is ignored if HandleRAs is false.\n+ // configuration is ignored if RAs will not be processed (see HandleRAs).\nDiscoverDefaultRouters bool\n+ // DiscoverMoreSpecificRoutes determines whether or not more specific routes\n+ // are discovered from Router Advertisements, as per RFC 4191. This\n+ // configuration is ignored if RAs will not be processed (see HandleRAs).\n+ DiscoverMoreSpecificRoutes bool\n+\n// DiscoverOnLinkPrefixes determines whether or not on-link prefixes are\n// discovered from Router Advertisements' Prefix Information option, as per\n- // RFC 4861 section 6. This configuration is ignored if HandleRAs is false.\n+ // RFC 4861 section 6. This configuration is ignored if RAs will not be\n+ // processed (see HandleRAs).\nDiscoverOnLinkPrefixes bool\n// AutoGenGlobalAddresses determines whether or not an IPv6 endpoint performs\n@@ -408,6 +419,7 @@ func DefaultNDPConfigurations() NDPConfigurations {\nMaxRtrSolicitationDelay: defaultMaxRtrSolicitationDelay,\nHandleRAs: defaultHandleRAs,\nDiscoverDefaultRouters: defaultDiscoverDefaultRouters,\n+ DiscoverMoreSpecificRoutes: defaultDiscoverMoreSpecificRoutes,\nDiscoverOnLinkPrefixes: defaultDiscoverOnLinkPrefixes,\nAutoGenGlobalAddresses: defaultAutoGenGlobalAddresses,\nAutoGenTempGlobalAddresses: defaultAutoGenTempGlobalAddresses,\n@@ -786,6 +798,32 @@ func (ndp *ndpState) handleRA(ip tcpip.Address, ra header.NDPRouterAdvert) {\nif opt.AutonomousAddressConfigurationFlag() {\nndp.handleAutonomousPrefixInformation(opt)\n}\n+\n+ case header.NDPRouteInformation:\n+ if !ndp.configs.DiscoverMoreSpecificRoutes {\n+ continue\n+ }\n+\n+ dest, err := opt.Prefix()\n+ if err != nil {\n+ panic(fmt.Sprintf(\"%T.Prefix(): %s\", opt, err))\n+ }\n+\n+ prf := opt.RoutePreference()\n+ if prf == header.ReservedRoutePreference {\n+ // As per RFC 4191 section 2.3,\n+ //\n+ // Prf (Route Preference)\n+ // 2-bit signed integer. The Route Preference indicates\n+ // whether to prefer the router associated with this prefix\n+ // over others, when multiple identical prefixes (for\n+ // different routers) have been received. If the Reserved\n+ // (10) value is received, the Route Information Option MUST\n+ // be ignored.\n+ continue\n+ }\n+\n+ ndp.handleOffLinkRouteDiscovery(offLinkRoute{dest: dest, router: ip}, opt.RouteLifetime(), prf)\n}\n// TODO(b/141556115): Do (MTU) Parameter Discovery.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -1152,6 +1152,39 @@ func raBufWithPI(ip tcpip.Address, rl uint16, prefix tcpip.AddressWithPrefix, on\n})\n}\n+// raBufWithRIO returns a valid NDP Router Advertisement with a single Route\n+// Information option.\n+//\n+// All fields in the RA will be zero except the RIO option.\n+func raBufWithRIO(t *testing.T, ip tcpip.Address, prefix tcpip.AddressWithPrefix, lifetimeSeconds uint32, prf header.NDPRoutePreference) *stack.PacketBuffer {\n+ // buf will hold the route information option after the Type and Length\n+ // fields.\n+ //\n+ // 2.3. Route Information Option\n+ //\n+ // 0 1 2 3\n+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+ // | Type | Length | Prefix Length |Resvd|Prf|Resvd|\n+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+ // | Route Lifetime |\n+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+ // | Prefix (Variable Length) |\n+ // . .\n+ // . .\n+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+ var buf [22]byte\n+ buf[0] = uint8(prefix.PrefixLen)\n+ buf[1] = byte(prf) << 3\n+ binary.BigEndian.PutUint32(buf[2:], lifetimeSeconds)\n+ if n := copy(buf[6:], prefix.Address); n != len(prefix.Address) {\n+ t.Fatalf(\"got copy(...) = %d, want = %d\", n, len(prefix.Address))\n+ }\n+ return raBufWithOpts(ip, 0 /* router lifetime */, header.NDPOptionsSerializer{\n+ header.NDPRouteInformation(buf[:]),\n+ })\n+}\n+\nfunc TestDynamicConfigurationsDisabled(t *testing.T) {\nconst (\nnicID = 1\n@@ -1308,8 +1341,8 @@ func boolToUint64(v bool) uint64 {\nreturn 0\n}\n-func checkOffLinkRouteEvent(e ndpOffLinkRouteEvent, nicID tcpip.NICID, router tcpip.Address, prf header.NDPRoutePreference, updated bool) string {\n- return cmp.Diff(ndpOffLinkRouteEvent{nicID: nicID, subnet: header.IPv6EmptySubnet, router: router, prf: prf, updated: updated}, e, cmp.AllowUnexported(e))\n+func checkOffLinkRouteEvent(e ndpOffLinkRouteEvent, nicID tcpip.NICID, subnet tcpip.Subnet, router tcpip.Address, prf header.NDPRoutePreference, updated bool) string {\n+ return cmp.Diff(ndpOffLinkRouteEvent{nicID: nicID, subnet: subnet, router: router, prf: prf, updated: updated}, e, cmp.AllowUnexported(e))\n}\nfunc testWithRAs(t *testing.T, f func(*testing.T, ipv6.HandleRAsConfiguration, bool)) {\n@@ -1342,9 +1375,41 @@ func testWithRAs(t *testing.T, f func(*testing.T, ipv6.HandleRAsConfiguration, b\n}\n}\n-func TestRouterDiscovery(t *testing.T) {\n+func TestOffLinkRouteDiscovery(t *testing.T) {\nconst nicID = 1\n+ moreSpecificPrefix := tcpip.AddressWithPrefix{Address: testutil.MustParse6(\"a00::\"), PrefixLen: 16}\n+ tests := []struct {\n+ name string\n+\n+ discoverDefaultRouters bool\n+ discoverMoreSpecificRoutes bool\n+\n+ dest tcpip.Subnet\n+ ra func(*testing.T, tcpip.Address, uint16, header.NDPRoutePreference) *stack.PacketBuffer\n+ }{\n+ {\n+ name: \"Default router discovery\",\n+ discoverDefaultRouters: true,\n+ discoverMoreSpecificRoutes: false,\n+ dest: header.IPv6EmptySubnet,\n+ ra: func(_ *testing.T, router tcpip.Address, lifetimeSeconds uint16, prf header.NDPRoutePreference) *stack.PacketBuffer {\n+ return raBufWithPrf(router, lifetimeSeconds, prf)\n+ },\n+ },\n+ {\n+ name: \"More-specific route discovery\",\n+ discoverDefaultRouters: false,\n+ discoverMoreSpecificRoutes: true,\n+ dest: moreSpecificPrefix.Subnet(),\n+ ra: func(t *testing.T, router tcpip.Address, lifetimeSeconds uint16, prf header.NDPRoutePreference) *stack.PacketBuffer {\n+ return raBufWithRIO(t, router, moreSpecificPrefix, uint32(lifetimeSeconds), prf)\n+ },\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\ntestWithRAs(t, func(t *testing.T, handleRAs ipv6.HandleRAsConfiguration, forwarding bool) {\nndpDisp := ndpDispatcher{\noffLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),\n@@ -1355,7 +1420,8 @@ func TestRouterDiscovery(t *testing.T) {\nNetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{\nNDPConfigs: ipv6.NDPConfigurations{\nHandleRAs: handleRAs,\n- DiscoverDefaultRouters: true,\n+ DiscoverDefaultRouters: test.discoverDefaultRouters,\n+ DiscoverMoreSpecificRoutes: test.discoverMoreSpecificRoutes,\n},\nNDPDisp: &ndpDisp,\n})},\n@@ -1367,7 +1433,7 @@ func TestRouterDiscovery(t *testing.T) {\nselect {\ncase e := <-ndpDisp.offLinkRouteC:\n- if diff := checkOffLinkRouteEvent(e, nicID, addr, prf, updated); diff != \"\" {\n+ if diff := checkOffLinkRouteEvent(e, nicID, test.dest, addr, prf, updated); diff != \"\" {\nt.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\n@@ -1382,7 +1448,7 @@ func TestRouterDiscovery(t *testing.T) {\nselect {\ncase e := <-ndpDisp.offLinkRouteC:\nvar prf header.NDPRoutePreference\n- if diff := checkOffLinkRouteEvent(e, nicID, addr, prf, false); diff != \"\" {\n+ if diff := checkOffLinkRouteEvent(e, nicID, test.dest, addr, prf, false); diff != \"\" {\nt.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\n@@ -1400,28 +1466,38 @@ func TestRouterDiscovery(t *testing.T) {\n// Rx an RA from lladdr2 with zero lifetime. It should not be\n// remembered.\n- e.InjectInbound(header.IPv6ProtocolNumber, raBufSimple(llAddr2, 0))\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 0, header.MediumRoutePreference))\nselect {\ncase <-ndpDisp.offLinkRouteC:\nt.Fatal(\"unexpectedly updated an off-link route with 0 lifetime\")\ndefault:\n}\n- // Rx an RA from lladdr2 with a huge lifetime and reserved preference value\n- // (which should be interpreted as the default (medium) preference value).\n- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPrf(llAddr2, 1000, header.ReservedRoutePreference))\n+ // Discover an off-link route through llAddr2.\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 1000, header.ReservedRoutePreference))\n+ if test.discoverMoreSpecificRoutes {\n+ // The reserved value is considered invalid with more-specific route\n+ // discovery so we inject the same packet but with the default\n+ // (medium) preference value.\n+ select {\n+ case <-ndpDisp.offLinkRouteC:\n+ t.Fatal(\"unexpectedly updated an off-link route with a reserved preference value\")\n+ default:\n+ }\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 1000, header.MediumRoutePreference))\n+ }\nexpectOffLinkRouteEvent(llAddr2, header.MediumRoutePreference, true)\n// Rx an RA from another router (lladdr3) with non-zero lifetime and\n// non-default preference value.\nconst l3LifetimeSeconds = 6\n- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPrf(llAddr3, l3LifetimeSeconds, header.HighRoutePreference))\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr3, l3LifetimeSeconds, header.HighRoutePreference))\nexpectOffLinkRouteEvent(llAddr3, header.HighRoutePreference, true)\n// Rx an RA from lladdr2 with lesser lifetime and default (medium)\n// preference value.\nconst l2LifetimeSeconds = 2\n- e.InjectInbound(header.IPv6ProtocolNumber, raBufSimple(llAddr2, l2LifetimeSeconds))\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, l2LifetimeSeconds, header.MediumRoutePreference))\nselect {\ncase <-ndpDisp.offLinkRouteC:\nt.Fatal(\"should not receive a off-link route event when updating lifetimes for known routers\")\n@@ -1429,7 +1505,7 @@ func TestRouterDiscovery(t *testing.T) {\n}\n// Rx an RA from lladdr2 with a different preference.\n- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPrf(llAddr2, l2LifetimeSeconds, header.LowRoutePreference))\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, l2LifetimeSeconds, header.LowRoutePreference))\nexpectOffLinkRouteEvent(llAddr2, header.LowRoutePreference, true)\n// Wait for lladdr2's router invalidation job to execute. The lifetime\n@@ -1442,11 +1518,11 @@ func TestRouterDiscovery(t *testing.T) {\nexpectAsyncOffLinkRouteInvalidationEvent(llAddr2, l2LifetimeSeconds*time.Second)\n// Rx an RA from lladdr2 with huge lifetime.\n- e.InjectInbound(header.IPv6ProtocolNumber, raBufSimple(llAddr2, 1000))\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 1000, header.MediumRoutePreference))\nexpectOffLinkRouteEvent(llAddr2, header.MediumRoutePreference, true)\n// Rx an RA from lladdr2 with zero lifetime. It should be invalidated.\n- e.InjectInbound(header.IPv6ProtocolNumber, raBufSimple(llAddr2, 0))\n+ e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 0, header.MediumRoutePreference))\nexpectOffLinkRouteEvent(llAddr2, header.MediumRoutePreference, false)\n// Wait for lladdr3's router invalidation job to execute. The lifetime\n@@ -1458,6 +1534,8 @@ func TestRouterDiscovery(t *testing.T) {\n// event after this time, then something is wrong.\nexpectAsyncOffLinkRouteInvalidationEvent(llAddr3, l3LifetimeSeconds*time.Second)\n})\n+ })\n+ }\n}\n// TestRouterDiscoveryMaxRouters tests that only\n@@ -1494,7 +1572,7 @@ func TestRouterDiscoveryMaxRouters(t *testing.T) {\nif i <= ipv6.MaxDiscoveredOffLinkRoutes {\nselect {\ncase e := <-ndpDisp.offLinkRouteC:\n- if diff := checkOffLinkRouteEvent(e, nicID, llAddr, header.MediumRoutePreference, true); diff != \"\" {\n+ if diff := checkOffLinkRouteEvent(e, nicID, header.IPv6EmptySubnet, llAddr, header.MediumRoutePreference, true); diff != \"\" {\nt.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\n@@ -4583,7 +4661,7 @@ func TestNoCleanupNDPStateWhenForwardingEnabled(t *testing.T) {\n)\nselect {\ncase e := <-ndpDisp.offLinkRouteC:\n- if diff := checkOffLinkRouteEvent(e, nicID, llAddr3, header.MediumRoutePreference, true /* discovered */); diff != \"\" {\n+ if diff := checkOffLinkRouteEvent(e, nicID, header.IPv6EmptySubnet, llAddr3, header.MediumRoutePreference, true /* discovered */); diff != \"\" {\nt.Errorf(\"off-link route event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\n" } ]
Go
Apache License 2.0
google/gvisor
Discover more specific routes as per RFC 4191 More-specific route discovery allows hosts to pick a more appropriate router for off-link destinations. Fixes #6172. PiperOrigin-RevId: 382779880
259,891
02.07.2021 18:13:24
25,200
3d32a05a35bde4cfed861c274c32bfc55acc19c9
runsc: validate mount targets
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/chroot.go", "new_path": "runsc/cmd/chroot.go", "diff": "@@ -30,7 +30,7 @@ func mountInChroot(chroot, src, dst, typ string, flags uint32) error {\nchrootDst := filepath.Join(chroot, dst)\nlog.Infof(\"Mounting %q at %q\", src, chrootDst)\n- if err := specutils.Mount(src, chrootDst, typ, flags); err != nil {\n+ if err := specutils.Mount(src, chrootDst, typ, flags, \"/proc\"); err != nil {\nreturn fmt.Errorf(\"error mounting %q at %q: %v\", src, chrootDst, err)\n}\nreturn nil\n@@ -70,11 +70,11 @@ func setUpChroot(pidns bool) error {\n// Convert all shared mounts into slave to be sure that nothing will be\n// propagated outside of our namespace.\n- if err := unix.Mount(\"\", \"/\", \"\", unix.MS_SLAVE|unix.MS_REC, \"\"); err != nil {\n+ if err := specutils.SafeMount(\"\", \"/\", \"\", unix.MS_SLAVE|unix.MS_REC, \"\", \"/proc\"); err != nil {\nreturn fmt.Errorf(\"error converting mounts: %v\", err)\n}\n- if err := unix.Mount(\"runsc-root\", chroot, \"tmpfs\", unix.MS_NOSUID|unix.MS_NODEV|unix.MS_NOEXEC, \"\"); err != nil {\n+ if err := specutils.SafeMount(\"runsc-root\", chroot, \"tmpfs\", unix.MS_NOSUID|unix.MS_NODEV|unix.MS_NOEXEC, \"\", \"/proc\"); err != nil {\nreturn fmt.Errorf(\"error mounting tmpfs in choot: %v\", err)\n}\n@@ -89,7 +89,7 @@ func setUpChroot(pidns bool) error {\n}\n}\n- if err := unix.Mount(\"\", chroot, \"\", unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_BIND, \"\"); err != nil {\n+ if err := specutils.SafeMount(\"\", chroot, \"\", unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_BIND, \"\", \"/proc\"); err != nil {\nreturn fmt.Errorf(\"error remounting chroot in read-only: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -265,7 +265,8 @@ func isReadonlyMount(opts []string) bool {\nfunc setupRootFS(spec *specs.Spec, conf *config.Config) error {\n// Convert all shared mounts into slaves to be sure that nothing will be\n// propagated outside of our namespace.\n- if err := unix.Mount(\"\", \"/\", \"\", unix.MS_SLAVE|unix.MS_REC, \"\"); err != nil {\n+ procPath := \"/proc\"\n+ if err := specutils.SafeMount(\"\", \"/\", \"\", unix.MS_SLAVE|unix.MS_REC, \"\", procPath); err != nil {\nFatalf(\"error converting mounts: %v\", err)\n}\n@@ -278,21 +279,24 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {\n// We need a directory to construct a new root and we know that\n// runsc can't start without /proc, so we can use it for this.\nflags := uintptr(unix.MS_NOSUID | unix.MS_NODEV | unix.MS_NOEXEC)\n- if err := unix.Mount(\"runsc-root\", \"/proc\", \"tmpfs\", flags, \"\"); err != nil {\n+ if err := specutils.SafeMount(\"runsc-root\", \"/proc\", \"tmpfs\", flags, \"\", procPath); err != nil {\nFatalf(\"error mounting tmpfs: %v\", err)\n}\n// Prepare tree structure for pivot_root(2).\nos.Mkdir(\"/proc/proc\", 0755)\nos.Mkdir(\"/proc/root\", 0755)\n+ // This cannot use SafeMount because there's no available procfs. But we\n+ // know that /proc is an empty tmpfs mount, so this is safe.\nif err := unix.Mount(\"runsc-proc\", \"/proc/proc\", \"proc\", flags|unix.MS_RDONLY, \"\"); err != nil {\nFatalf(\"error mounting proc: %v\", err)\n}\nroot = \"/proc/root\"\n+ procPath = \"/proc/proc\"\n}\n// Mount root path followed by submounts.\n- if err := unix.Mount(spec.Root.Path, root, \"bind\", unix.MS_BIND|unix.MS_REC, \"\"); err != nil {\n+ if err := specutils.SafeMount(spec.Root.Path, root, \"bind\", unix.MS_BIND|unix.MS_REC, \"\", procPath); err != nil {\nreturn fmt.Errorf(\"mounting root on root (%q) err: %v\", root, err)\n}\n@@ -300,12 +304,12 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {\nif spec.Linux != nil && spec.Linux.RootfsPropagation != \"\" {\nflags = specutils.PropOptionsToFlags([]string{spec.Linux.RootfsPropagation})\n}\n- if err := unix.Mount(\"\", root, \"\", uintptr(flags), \"\"); err != nil {\n+ if err := specutils.SafeMount(\"\", root, \"\", uintptr(flags), \"\", procPath); err != nil {\nreturn fmt.Errorf(\"mounting root (%q) with flags: %#x, err: %v\", root, flags, err)\n}\n// Replace the current spec, with the clean spec with symlinks resolved.\n- if err := setupMounts(conf, spec.Mounts, root); err != nil {\n+ if err := setupMounts(conf, spec.Mounts, root, procPath); err != nil {\nFatalf(\"error setting up FS: %v\", err)\n}\n@@ -327,7 +331,7 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {\n// to make it read-only for extra safety.\nlog.Infof(\"Remounting root as readonly: %q\", root)\nflags := uintptr(unix.MS_BIND | unix.MS_REMOUNT | unix.MS_RDONLY | unix.MS_REC)\n- if err := unix.Mount(root, root, \"bind\", flags, \"\"); err != nil {\n+ if err := specutils.SafeMount(root, root, \"bind\", flags, \"\", procPath); err != nil {\nreturn fmt.Errorf(\"remounting root as read-only with source: %q, target: %q, flags: %#x, err: %v\", root, root, flags, err)\n}\n}\n@@ -343,10 +347,10 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {\nreturn nil\n}\n-// setupMounts binds mount all mounts specified in the spec in their correct\n+// setupMounts bind mounts all mounts specified in the spec in their correct\n// location inside root. It will resolve relative paths and symlinks. It also\n// creates directories as needed.\n-func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {\n+func setupMounts(conf *config.Config, mounts []specs.Mount, root, procPath string) error {\nfor _, m := range mounts {\nif !specutils.Is9PMount(m, conf.VFS2) {\ncontinue\n@@ -364,14 +368,14 @@ func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {\n}\nlog.Infof(\"Mounting src: %q, dst: %q, flags: %#x\", m.Source, dst, flags)\n- if err := specutils.Mount(m.Source, dst, m.Type, flags); err != nil {\n- return fmt.Errorf(\"mounting %v: %v\", m, err)\n+ if err := specutils.Mount(m.Source, dst, m.Type, flags, procPath); err != nil {\n+ return fmt.Errorf(\"mounting %+v: %v\", m, err)\n}\n// Set propagation options that cannot be set together with other options.\nflags = specutils.PropOptionsToFlags(m.Options)\nif flags != 0 {\n- if err := unix.Mount(\"\", dst, \"\", uintptr(flags), \"\"); err != nil {\n+ if err := specutils.SafeMount(\"\", dst, \"\", uintptr(flags), \"\", procPath); err != nil {\nreturn fmt.Errorf(\"mount dst: %q, flags: %#x, err: %v\", dst, flags, err)\n}\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/specutils/safemount_test/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_binary\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_test(\n+ name = \"safemount_test\",\n+ size = \"small\",\n+ srcs = [\"safemount_test.go\"],\n+ data = [\":safemount_runner\"],\n+ deps = [\n+ \"//pkg/test/testutil\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\n+go_binary(\n+ name = \"safemount_runner\",\n+ srcs = [\"safemount_runner.go\"],\n+ deps = [\n+ \"//runsc/specutils\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/specutils/safemount_test/safemount_runner.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// safemount_runner is used to test the SafeMount function. Because use of\n+// unix.Mount requires privilege, tests must launch this process with\n+// CLONE_NEWNS and CLONE_NEWUSER.\n+package main\n+\n+import (\n+ \"errors\"\n+ \"fmt\"\n+ \"log\"\n+ \"os\"\n+ \"path/filepath\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/runsc/specutils\"\n+)\n+\n+func main() {\n+ // The test temporary directory is the first argument.\n+ tempdir := os.Args[1]\n+\n+ tcs := []struct {\n+ name string\n+ testfunc func() error\n+ }{{\n+ name: \"unix.Mount to folder succeeds\",\n+ testfunc: func() error {\n+ dir2Path := filepath.Join(tempdir, \"subdir2\")\n+ if err := unix.Mount(filepath.Join(tempdir, \"subdir\"), dir2Path, \"bind\", unix.MS_BIND, \"\"); err != nil {\n+ return fmt.Errorf(\"mount: %v\", err)\n+ }\n+ return unix.Unmount(dir2Path, unix.MNT_DETACH)\n+ },\n+ }, {\n+ // unix.Mount doesn't care whether the target is a symlink.\n+ name: \"unix.Mount to symlink succeeds\",\n+ testfunc: func() error {\n+ symlinkPath := filepath.Join(tempdir, \"symlink\")\n+ if err := unix.Mount(filepath.Join(tempdir, \"subdir\"), symlinkPath, \"bind\", unix.MS_BIND, \"\"); err != nil {\n+ return fmt.Errorf(\"mount: %v\", err)\n+ }\n+ return unix.Unmount(symlinkPath, unix.MNT_DETACH)\n+ },\n+ }, {\n+ name: \"SafeMount to folder succeeds\",\n+ testfunc: func() error {\n+ dir2Path := filepath.Join(tempdir, \"subdir2\")\n+ if err := specutils.SafeMount(filepath.Join(tempdir, \"subdir\"), dir2Path, \"bind\", unix.MS_BIND, \"\", \"/proc\"); err != nil {\n+ return fmt.Errorf(\"SafeMount: %v\", err)\n+ }\n+ return unix.Unmount(dir2Path, unix.MNT_DETACH)\n+ },\n+ }, {\n+ name: \"SafeMount to symlink fails\",\n+ testfunc: func() error {\n+ err := specutils.SafeMount(filepath.Join(tempdir, \"subdir\"), filepath.Join(tempdir, \"symlink\"), \"bind\", unix.MS_BIND, \"\", \"/proc\")\n+ if err == nil {\n+ return fmt.Errorf(\"SafeMount didn't fail, but should have\")\n+ }\n+ var symErr *specutils.ErrSymlinkMount\n+ if !errors.As(err, &symErr) {\n+ return fmt.Errorf(\"expected SafeMount to fail with ErrSymlinkMount, but got: %v\", err)\n+ }\n+ return nil\n+ },\n+ }}\n+\n+ for _, tc := range tcs {\n+ if err := runTest(tempdir, tc.testfunc); err != nil {\n+ log.Fatalf(\"failed test %q: %v\", tc.name, err)\n+ }\n+ }\n+}\n+\n+// runTest runs testfunc with the following directory structure:\n+// tempdir/\n+// subdir/\n+// subdir2/\n+// symlink --> ./subdir2\n+func runTest(tempdir string, testfunc func() error) error {\n+ // Create tempdir/subdir/.\n+ dirPath := filepath.Join(tempdir, \"subdir\")\n+ if err := os.Mkdir(dirPath, 0777); err != nil {\n+ return fmt.Errorf(\"os.Mkdir(%s, 0777)\", dirPath)\n+ }\n+ defer os.Remove(dirPath)\n+\n+ // Create tempdir/subdir2/.\n+ dir2Path := filepath.Join(tempdir, \"subdir2\")\n+ if err := os.Mkdir(dir2Path, 0777); err != nil {\n+ return fmt.Errorf(\"os.Mkdir(%s, 0777)\", dir2Path)\n+ }\n+ defer os.Remove(dir2Path)\n+\n+ // Create tempdir/symlink, which points to ./subdir2.\n+ symlinkPath := filepath.Join(tempdir, \"symlink\")\n+ if err := os.Symlink(\"./subdir2\", symlinkPath); err != nil {\n+ return fmt.Errorf(\"failed to create symlink %s: %v\", symlinkPath, err)\n+ }\n+ defer os.Remove(symlinkPath)\n+\n+ // Run the actual test.\n+ return testfunc()\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/specutils/safemount_test/safemount_test.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package safemount_test\n+\n+import (\n+ \"os\"\n+ \"os/exec\"\n+ \"syscall\"\n+ \"testing\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/test/testutil\"\n+)\n+\n+func TestSafeMount(t *testing.T) {\n+ // We run the actual tests in another process, as we need CAP_SYS_ADMIN to\n+ // call mount(2). The new process runs in its own user and mount namespaces.\n+ runner, err := testutil.FindFile(\"runsc/specutils/safemount_test/safemount_runner\")\n+ if err != nil {\n+ t.Fatalf(\"failed to find test runner binary: %v\", err)\n+ }\n+ cmd := exec.Command(runner, t.TempDir())\n+ cmd.SysProcAttr = &unix.SysProcAttr{\n+ Cloneflags: unix.CLONE_NEWNS | unix.CLONE_NEWUSER,\n+ UidMappings: []syscall.SysProcIDMap{\n+ {ContainerID: 0, HostID: os.Getuid(), Size: 1},\n+ },\n+ GidMappings: []syscall.SysProcIDMap{\n+ {ContainerID: 0, HostID: os.Getgid(), Size: 1},\n+ },\n+ GidMappingsEnableSetgroups: false,\n+ Credential: &syscall.Credential{\n+ Uid: 0,\n+ Gid: 0,\n+ },\n+ }\n+ output, err := cmd.CombinedOutput()\n+ if err != nil {\n+ t.Fatalf(\"failed running %s with error: %v\\ntest output:\\n%s\", cmd, err, output)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/specutils.go", "new_path": "runsc/specutils/specutils.go", "diff": "@@ -217,7 +217,7 @@ func ReadMounts(f *os.File) ([]specs.Mount, error) {\n}\nvar mounts []specs.Mount\nif err := json.Unmarshal(bytes, &mounts); err != nil {\n- return nil, fmt.Errorf(\"error unmarshaling mounts: %v\\n %s\", err, string(bytes))\n+ return nil, fmt.Errorf(\"error unmarshaling mounts: %v\\nJSON bytes:\\n%s\", err, string(bytes))\n}\nreturn mounts, nil\n}\n@@ -434,8 +434,10 @@ func DebugLogFile(logPattern, command, test string) (*os.File, error) {\nreturn os.OpenFile(logPattern, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)\n}\n-// Mount creates the mount point and calls Mount with the given flags.\n-func Mount(src, dst, typ string, flags uint32) error {\n+// Mount creates the mount point and calls Mount with the given flags. procPath\n+// is the path to procfs. If it is \"\", procfs is assumed to be mounted at\n+// /proc.\n+func Mount(src, dst, typ string, flags uint32, procPath string) error {\n// Create the mount point inside. The type must be the same as the\n// source (file or directory).\nvar isDir bool\n@@ -468,12 +470,46 @@ func Mount(src, dst, typ string, flags uint32) error {\n}\n// Do the mount.\n- if err := unix.Mount(src, dst, typ, uintptr(flags), \"\"); err != nil {\n+ if err := SafeMount(src, dst, typ, uintptr(flags), \"\", procPath); err != nil {\nreturn fmt.Errorf(\"mount(%q, %q, %d) failed: %v\", src, dst, flags, err)\n}\nreturn nil\n}\n+// ErrSymlinkMount is returned by SafeMount when the mount destination is found\n+// to be a symlink.\n+type ErrSymlinkMount struct {\n+ error\n+}\n+\n+// SafeMount is like unix.Mount, but will fail if dst is a symlink. procPath is\n+// the path to procfs. If it is \"\", procfs is assumed to be mounted at /proc.\n+func SafeMount(src, dst, fstype string, flags uintptr, data, procPath string) error {\n+ // Open the destination.\n+ fd, err := unix.Open(dst, unix.O_PATH|unix.O_CLOEXEC, 0)\n+ if err != nil {\n+ return fmt.Errorf(\"failed to safely mount: Open(%s, _, _): %w\", dst, err)\n+ }\n+ defer unix.Close(fd)\n+\n+ // Use /proc/self/fd/ to verify that we opened the intended destination. This\n+ // guards against dst being a symlink, in which case we could accidentally\n+ // mount over the symlink's target.\n+ if procPath == \"\" {\n+ procPath = \"/proc\"\n+ }\n+ safePath := filepath.Join(procPath, \"self/fd\", strconv.Itoa(fd))\n+ target, err := os.Readlink(safePath)\n+ if err != nil {\n+ return fmt.Errorf(\"failed to safely mount: Readlink(%s): %w\", safePath, err)\n+ }\n+ if dst != target {\n+ return &ErrSymlinkMount{fmt.Errorf(\"failed to safely mount: expected to open %s, but found %s\", dst, target)}\n+ }\n+\n+ return unix.Mount(src, safePath, fstype, flags, data)\n+}\n+\n// ContainsStr returns true if 'str' is inside 'strs'.\nfunc ContainsStr(strs []string, str string) bool {\nfor _, s := range strs {\n" } ]
Go
Apache License 2.0
google/gvisor
runsc: validate mount targets PiperOrigin-RevId: 382845950
259,907
02.07.2021 18:45:05
25,200
add8bca5ba53b37096bc653900cb278e11681461
[op] Make TCPNonBlockingConnectClose more reasonable. This test single handedly causes the syscalls:socket_inet_loopback_test test variants to take more than an hour to run on some of our testing environments. Reduce how aggressively this test tries to replicate a fixed flake. This is a regression test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -707,7 +707,7 @@ TEST_P(SocketInetLoopbackTest, TCPNonBlockingConnectClose) {\n// Try many iterations to catch a race with socket close and handshake\n// completion.\n- for (int i = 0; i < 1000; ++i) {\n+ for (int i = 0; i < 100; ++i) {\nFileDescriptor client = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));\nASSERT_THAT(\n" } ]
Go
Apache License 2.0
google/gvisor
[op] Make TCPNonBlockingConnectClose more reasonable. This test single handedly causes the syscalls:socket_inet_loopback_test test variants to take more than an hour to run on some of our testing environments. Reduce how aggressively this test tries to replicate a fixed flake. This is a regression test. PiperOrigin-RevId: 382849039
259,868
07.07.2021 12:45:39
25,200
cd558fcb05c30bc08bbb9ba47755c2768fa33316
Sentry: Measure the time it takes to initialize the Sentry.
[ { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -1306,6 +1306,15 @@ http_archive(\n],\n)\n+http_archive(\n+ name = \"com_google_protobuf\",\n+ sha256 = \"528927e398f4e290001886894dac17c5c6a2e5548f3fb68004cfb01af901b53a\",\n+ strip_prefix = \"protobuf-3.17.3\",\n+ urls = [\"https://github.com/protocolbuffers/protobuf/archive/v3.17.3.zip\"],\n+)\n+load(\"@com_google_protobuf//:protobuf_deps.bzl\", \"protobuf_deps\")\n+protobuf_deps()\n+\n# Schemas for testing.\nhttp_file(\nname = \"buildkite_pipeline_schema\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/metric/BUILD", "new_path": "pkg/metric/BUILD", "diff": "@@ -4,13 +4,16 @@ package(licenses = [\"notice\"])\ngo_library(\nname = \"metric\",\n- srcs = [\"metric.go\"],\n+ srcs = [\n+ \"metric.go\",\n+ ],\nvisibility = [\"//:sandbox\"],\ndeps = [\n\":metric_go_proto\",\n\"//pkg/eventchannel\",\n\"//pkg/log\",\n\"//pkg/sync\",\n+ \"@org_golang_google_protobuf//types/known/timestamppb\",\n],\n)\n@@ -18,6 +21,9 @@ proto_library(\nname = \"metric\",\nsrcs = [\"metric.proto\"],\nvisibility = [\"//:sandbox\"],\n+ deps = [\n+ \"@com_google_protobuf//:timestamp_proto\",\n+ ],\n)\ngo_test(\n" }, { "change_type": "MODIFY", "old_path": "pkg/metric/metric.go", "new_path": "pkg/metric/metric.go", "diff": "@@ -20,7 +20,9 @@ import (\n\"fmt\"\n\"sort\"\n\"sync/atomic\"\n+ \"time\"\n+ \"google.golang.org/protobuf/types/known/timestamppb\"\n\"gvisor.dev/gvisor/pkg/eventchannel\"\n\"gvisor.dev/gvisor/pkg/log\"\npb \"gvisor.dev/gvisor/pkg/metric/metric_go_proto\"\n@@ -54,6 +56,27 @@ var (\n})\n)\n+// InitStage is the name of a Sentry initialization stage.\n+type InitStage string\n+\n+// List of all Sentry initialization stages.\n+var (\n+ InitRestoreConfig InitStage = \"restore_config\"\n+ InitExecConfig InitStage = \"exec_config\"\n+ InitRestore InitStage = \"restore\"\n+ InitCreateProcess InitStage = \"create_process\"\n+ InitTaskStart InitStage = \"task_start\"\n+\n+ // allStages is the list of allowed stages.\n+ allStages = []InitStage{\n+ InitRestoreConfig,\n+ InitExecConfig,\n+ InitRestore,\n+ InitCreateProcess,\n+ InitTaskStart,\n+ }\n+)\n+\n// Uint64Metric encapsulates a uint64 that represents some kind of metric to be\n// monitored. We currently support metrics with at most one field.\n//\n@@ -98,6 +121,10 @@ func Initialize() error {\nfor _, v := range allMetrics.m {\nm.Metrics = append(m.Metrics, v.metadata)\n}\n+ m.Stages = make([]string, 0, len(allStages))\n+ for _, s := range allStages {\n+ m.Stages = append(m.Stages, string(s))\n+ }\nif err := eventchannel.Emit(&m); err != nil {\nreturn fmt.Errorf(\"unable to emit metric initialize event: %w\", err)\n}\n@@ -287,34 +314,66 @@ func (m *Uint64Metric) IncrementBy(v uint64, fieldValues ...string) {\n}\n}\n-// metricSet holds named metrics.\n+// stageTiming contains timing data for an initialization stage.\n+type stageTiming struct {\n+ stage InitStage\n+ started time.Time\n+ // ended is the zero time when the stage has not ended yet.\n+ ended time.Time\n+}\n+\n+// inProgress returns whether this stage hasn't ended yet.\n+func (s stageTiming) inProgress() bool {\n+ return !s.started.IsZero() && s.ended.IsZero()\n+}\n+\n+// metricSet holds metric data.\ntype metricSet struct {\n+ // Map of metrics.\nm map[string]customUint64Metric\n+\n+ // mu protects the fields below.\n+ mu sync.RWMutex\n+\n+ // Information about the stages reached by the Sentry. Only appended to, so\n+ // reading a shallow copy of the slice header concurrently is safe.\n+ finished []stageTiming\n+\n+ // The current stage in progress.\n+ currentStage stageTiming\n}\n// makeMetricSet returns a new metricSet.\nfunc makeMetricSet() metricSet {\nreturn metricSet{\nm: make(map[string]customUint64Metric),\n+ finished: make([]stageTiming, 0, len(allStages)),\n}\n}\n// Values returns a snapshot of all values in m.\nfunc (m *metricSet) Values() metricValues {\n- vals := make(metricValues)\n+ m.mu.Lock()\n+ stages := m.finished[:]\n+ m.mu.Unlock()\n+\n+ vals := metricValues{\n+ m: make(map[string]interface{}, len(m.m)),\n+ stages: stages,\n+ }\nfor k, v := range m.m {\nfields := v.metadata.GetFields()\nswitch len(fields) {\ncase 0:\n- vals[k] = v.value()\n+ vals.m[k] = v.value()\ncase 1:\nvalues := fields[0].GetAllowedValues()\nfieldsMap := make(map[string]uint64)\nfor _, fieldValue := range values {\nfieldsMap[fieldValue] = v.value(fieldValue)\n}\n- vals[k] = fieldsMap\n+ vals.m[k] = fieldsMap\ndefault:\npanic(fmt.Sprintf(\"Unsupported number of metric fields: %d\", len(fields)))\n}\n@@ -322,10 +381,16 @@ func (m *metricSet) Values() metricValues {\nreturn vals\n}\n-// metricValues contains a copy of the values of all metrics. It is a map\n-// with key as metric name and value can be either uint64 or map[string]uint64\n-// to support metrics with one field.\n-type metricValues map[string]interface{}\n+// metricValues contains a copy of the values of all metrics.\n+type metricValues struct {\n+ // m is a map with key as metric name and value can be either uint64 or\n+ // map[string]uint64 to support metrics with one field.\n+ m map[string]interface{}\n+\n+ // Information on when initialization stages were reached. Does not include\n+ // the currently-ongoing stage, if any.\n+ stages []stageTiming\n+}\nvar (\n// emitMu protects metricsAtLastEmit and ensures that all emitted\n@@ -354,8 +419,8 @@ func EmitMetricUpdate() {\nm := pb.MetricUpdate{}\n// On the first call metricsAtLastEmit will be empty. Include all\n// metrics then.\n- for k, v := range snapshot {\n- prev, ok := metricsAtLastEmit[k]\n+ for k, v := range snapshot.m {\n+ prev, ok := metricsAtLastEmit.m[k]\nswitch t := v.(type) {\ncase uint64:\n// Metric exists and value did not change.\n@@ -386,8 +451,23 @@ func EmitMetricUpdate() {\n}\n}\n+ for s := len(metricsAtLastEmit.stages); s < len(snapshot.stages); s++ {\n+ newStage := snapshot.stages[s]\n+ m.StageTiming = append(m.StageTiming, &pb.StageTiming{\n+ Stage: string(newStage.stage),\n+ Started: &timestamppb.Timestamp{\n+ Seconds: newStage.started.Unix(),\n+ Nanos: int32(newStage.started.Nanosecond()),\n+ },\n+ Ended: &timestamppb.Timestamp{\n+ Seconds: newStage.ended.Unix(),\n+ Nanos: int32(newStage.ended.Nanosecond()),\n+ },\n+ })\n+ }\n+\nmetricsAtLastEmit = snapshot\n- if len(m.Metrics) == 0 {\n+ if len(m.Metrics) == 0 && len(m.StageTiming) == 0 {\nreturn\n}\n@@ -399,9 +479,52 @@ func EmitMetricUpdate() {\nfor _, metric := range m.Metrics {\nlog.Debugf(\"%s: %+v\", metric.Name, metric.Value)\n}\n+ for _, stage := range m.StageTiming {\n+ duration := time.Duration(stage.Ended.Seconds-stage.Started.Seconds)*time.Second + time.Duration(stage.Ended.Nanos-stage.Started.Nanos)*time.Nanosecond\n+ log.Debugf(\"Stage %s took %v\", stage.GetStage(), duration)\n+ }\n}\nif err := eventchannel.Emit(&m); err != nil {\nlog.Warningf(\"Unable to emit metrics: %s\", err)\n}\n}\n+\n+// StartStage should be called when an initialization stage is started.\n+// It returns a function that must be called to indicate that the stage ended.\n+// Alternatively, future calls to StartStage will implicitly indicate that the\n+// previous stage ended.\n+// Stage information will be emitted in the next call to EmitMetricUpdate after\n+// a stage has ended.\n+//\n+// This function may (and is expected to) be called prior to final\n+// initialization of this metric library, as it has to capture early stages\n+// of Sentry initialization.\n+func StartStage(stage InitStage) func() {\n+ now := time.Now()\n+ allMetrics.mu.Lock()\n+ defer allMetrics.mu.Unlock()\n+ if allMetrics.currentStage.inProgress() {\n+ endStage(now)\n+ }\n+ allMetrics.currentStage.stage = stage\n+ allMetrics.currentStage.started = now\n+ return func() {\n+ now := time.Now()\n+ allMetrics.mu.Lock()\n+ defer allMetrics.mu.Unlock()\n+ // The current stage may have been ended by another call to StartStage, so\n+ // double-check prior to clearing the current stage.\n+ if allMetrics.currentStage.inProgress() && allMetrics.currentStage.stage == stage {\n+ endStage(now)\n+ }\n+ }\n+}\n+\n+// endStage marks allMetrics.currentStage as ended, adding it to the list of\n+// finished stages. It assumes allMetrics.mu is locked.\n+func endStage(when time.Time) {\n+ allMetrics.currentStage.ended = when\n+ allMetrics.finished = append(allMetrics.finished, allMetrics.currentStage)\n+ allMetrics.currentStage = stageTiming{}\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/metric/metric.proto", "new_path": "pkg/metric/metric.proto", "diff": "@@ -16,6 +16,8 @@ syntax = \"proto3\";\npackage gvisor;\n+import \"google/protobuf/timestamp.proto\";\n+\n// MetricMetadata contains all of the metadata describing a single metric.\nmessage MetricMetadata {\n// name is the unique name of the metric, usually in a \"directory\" format\n@@ -63,6 +65,7 @@ message MetricMetadata {\n// future MetricUpdates.\nmessage MetricRegistration {\nrepeated MetricMetadata metrics = 1;\n+ repeated string stages = 2;\n}\n// MetricValue the value of a metric at a single point in time.\n@@ -79,9 +82,20 @@ message MetricValue {\nrepeated string field_values = 4;\n}\n+// StageTiming represents a new stage that's been reached by the Sentry.\n+message StageTiming {\n+ string stage = 1;\n+ google.protobuf.Timestamp started = 2;\n+ google.protobuf.Timestamp ended = 3;\n+}\n+\n// MetricUpdate contains new values for multiple distinct metrics.\n//\n// Metrics whose values have not changed are not included.\nmessage MetricUpdate {\nrepeated MetricValue metrics = 1;\n+ // Timing information of initialization stages reached since last update.\n+ // The first MetricUpdate will include multiple entries, since metric\n+ // initialization happens relatively late in the Sentry startup process.\n+ repeated StageTiming stage_timing = 2;\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/metric/metric_test.go", "new_path": "pkg/metric/metric_test.go", "diff": "@@ -16,6 +16,7 @@ package metric\nimport (\n\"testing\"\n+ \"time\"\n\"google.golang.org/protobuf/proto\"\n\"gvisor.dev/gvisor/pkg/eventchannel\"\n@@ -352,3 +353,147 @@ func TestEmitMetricUpdateWithFields(t *testing.T) {\nt.Errorf(\"Field value weird2 not found: %+v\", emitter)\n}\n}\n+\n+func TestMetricUpdateStageTiming(t *testing.T) {\n+ defer reset()\n+\n+ expectedTimings := map[InitStage]struct{ min, max time.Duration }{}\n+ measureStage := func(stage InitStage, body func()) {\n+ stageStarted := time.Now()\n+ endStage := StartStage(stage)\n+ bodyStarted := time.Now()\n+ body()\n+ bodyEnded := time.Now()\n+ endStage()\n+ stageEnded := time.Now()\n+\n+ expectedTimings[stage] = struct{ min, max time.Duration }{\n+ min: bodyEnded.Sub(bodyStarted),\n+ max: stageEnded.Sub(stageStarted),\n+ }\n+ }\n+ checkStage := func(got *pb.StageTiming, want InitStage) {\n+ if InitStage(got.GetStage()) != want {\n+ t.Errorf(\"%v: got stage %q expected %q\", got, got.GetStage(), want)\n+ }\n+ timingBounds, found := expectedTimings[want]\n+ if !found {\n+ t.Fatalf(\"invalid init stage name %q\", want)\n+ }\n+ started := got.Started.AsTime()\n+ ended := got.Ended.AsTime()\n+ duration := ended.Sub(started)\n+ if duration < timingBounds.min {\n+ t.Errorf(\"stage %v: lasted %v, expected at least %v\", want, duration, timingBounds.min)\n+ } else if duration > timingBounds.max {\n+ t.Errorf(\"stage %v: lasted %v, expected no more than %v\", want, duration, timingBounds.max)\n+ }\n+ }\n+\n+ // Test that it's legit to go through stages before metric registration.\n+ measureStage(\"before_first_update_1\", func() {\n+ time.Sleep(100 * time.Millisecond)\n+ })\n+ measureStage(\"before_first_update_2\", func() {\n+ time.Sleep(100 * time.Millisecond)\n+ })\n+\n+ fooMetric, err := NewUint64Metric(\"/foo\", false, pb.MetricMetadata_UNITS_NONE, fooDescription)\n+ if err != nil {\n+ t.Fatalf(\"Cannot register /foo: %v\", err)\n+ }\n+ emitter.Reset()\n+ Initialize()\n+ EmitMetricUpdate()\n+\n+ // We should have gotten the metric registration and the first MetricUpdate.\n+ if len(emitter) != 2 {\n+ t.Fatalf(\"emitter has %d messages (%v), expected %d\", len(emitter), emitter, 2)\n+ }\n+\n+ if registration, ok := emitter[0].(*pb.MetricRegistration); !ok {\n+ t.Errorf(\"first message is not MetricRegistration: %T / %v\", emitter[0], emitter[0])\n+ } else if len(registration.Stages) != len(allStages) {\n+ t.Errorf(\"MetricRegistration has %d stages (%v), expected %d (%v)\", len(registration.Stages), registration.Stages, len(allStages), allStages)\n+ } else {\n+ for i := 0; i < len(allStages); i++ {\n+ if InitStage(registration.Stages[i]) != allStages[i] {\n+ t.Errorf(\"MetricRegistration.Stages[%d]: got %q want %q\", i, registration.Stages[i], allStages[i])\n+ }\n+ }\n+ }\n+\n+ if firstUpdate, ok := emitter[1].(*pb.MetricUpdate); !ok {\n+ t.Errorf(\"second message is not MetricUpdate: %T / %v\", emitter[1], emitter[1])\n+ } else if len(firstUpdate.StageTiming) != 2 {\n+ t.Errorf(\"MetricUpdate has %d stage timings (%v), expected %d\", len(firstUpdate.StageTiming), firstUpdate.StageTiming, 2)\n+ } else {\n+ checkStage(firstUpdate.StageTiming[0], \"before_first_update_1\")\n+ checkStage(firstUpdate.StageTiming[1], \"before_first_update_2\")\n+ }\n+\n+ // Ensure re-emitting doesn't cause another event to be sent.\n+ emitter.Reset()\n+ EmitMetricUpdate()\n+ if len(emitter) != 0 {\n+ t.Fatalf(\"EmitMetricUpdate emitted %d events want %d\", len(emitter), 0)\n+ }\n+\n+ // Generate monitoring data, we should get an event with no stages.\n+ fooMetric.Increment()\n+ emitter.Reset()\n+ EmitMetricUpdate()\n+ if len(emitter) != 1 {\n+ t.Fatalf(\"EmitMetricUpdate emitted %d events want %d\", len(emitter), 1)\n+ } else if update, ok := emitter[0].(*pb.MetricUpdate); !ok {\n+ t.Errorf(\"message is not MetricUpdate: %T / %v\", emitter[1], emitter[1])\n+ } else if len(update.StageTiming) != 0 {\n+ t.Errorf(\"unexpected stage timing information: %v\", update.StageTiming)\n+ }\n+\n+ // Now generate new stages.\n+ measureStage(\"foo_stage_1\", func() {\n+ time.Sleep(100 * time.Millisecond)\n+ })\n+ measureStage(\"foo_stage_2\", func() {\n+ time.Sleep(100 * time.Millisecond)\n+ })\n+ emitter.Reset()\n+ EmitMetricUpdate()\n+ if len(emitter) != 1 {\n+ t.Fatalf(\"EmitMetricUpdate emitted %d events want %d\", len(emitter), 1)\n+ } else if update, ok := emitter[0].(*pb.MetricUpdate); !ok {\n+ t.Errorf(\"message is not MetricUpdate: %T / %v\", emitter[1], emitter[1])\n+ } else if len(update.Metrics) != 0 {\n+ t.Errorf(\"MetricUpdate has %d metric value changes (%v), expected %d\", len(update.Metrics), update.Metrics, 0)\n+ } else if len(update.StageTiming) != 2 {\n+ t.Errorf(\"MetricUpdate has %d stages (%v), expected %d\", len(update.StageTiming), update.StageTiming, 2)\n+ } else {\n+ checkStage(update.StageTiming[0], \"foo_stage_1\")\n+ checkStage(update.StageTiming[1], \"foo_stage_2\")\n+ }\n+\n+ // Now try generating data for both metrics and stages.\n+ fooMetric.Increment()\n+ measureStage(\"last_stage_1\", func() {\n+ time.Sleep(100 * time.Millisecond)\n+ })\n+ measureStage(\"last_stage_2\", func() {\n+ time.Sleep(100 * time.Millisecond)\n+ })\n+ fooMetric.Increment()\n+ emitter.Reset()\n+ EmitMetricUpdate()\n+ if len(emitter) != 1 {\n+ t.Fatalf(\"EmitMetricUpdate emitted %d events want %d\", len(emitter), 1)\n+ } else if update, ok := emitter[0].(*pb.MetricUpdate); !ok {\n+ t.Errorf(\"message is not MetricUpdate: %T / %v\", emitter[1], emitter[1])\n+ } else if len(update.Metrics) != 1 {\n+ t.Errorf(\"MetricUpdate has %d metric value changes (%v), expected %d\", len(update.Metrics), update.Metrics, 1)\n+ } else if len(update.StageTiming) != 2 {\n+ t.Errorf(\"MetricUpdate has %d stages (%v), expected %d\", len(update.StageTiming), update.StageTiming, 2)\n+ } else {\n+ checkStage(update.StageTiming[0], \"last_stage_1\")\n+ checkStage(update.StageTiming[1], \"last_stage_2\")\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/go.bzl", "new_path": "tools/bazeldefs/go.bzl", "diff": "@@ -6,8 +6,11 @@ load(\"@io_bazel_rules_go//proto:def.bzl\", _go_grpc_library = \"go_grpc_library\",\nload(\"//tools/bazeldefs:defs.bzl\", \"select_arch\", \"select_system\")\ngazelle = _gazelle\n+\ngo_embed_data = _go_embed_data\n+\ngo_path = _go_path\n+\nbazel_worker_proto = \"//tools/bazeldefs:worker_protocol_go_proto\"\ndef _go_proto_or_grpc_library(go_library_func, name, **kwargs):\n@@ -15,10 +18,19 @@ def _go_proto_or_grpc_library(go_library_func, name, **kwargs):\n# If importpath is explicit, pass straight through.\ngo_library_func(name = name, **kwargs)\nreturn\n- deps = [\n- dep.replace(\"_proto\", \"_go_proto\")\n- for dep in (kwargs.pop(\"deps\", []) or [])\n- ]\n+ deps = []\n+ for d in (kwargs.pop(\"deps\", []) or []):\n+ if d == \"@com_google_protobuf//:timestamp_proto\":\n+ # Special case: this proto has its Go definitions in a different\n+ # repository.\n+ deps.append(\"@org_golang_google_protobuf//\" +\n+ \"types/known/timestamppb\")\n+ continue\n+ if \"//\" in d:\n+ repo, path = d.split(\"//\", 1)\n+ deps.append(repo + \"//\" + path.replace(\"_proto\", \"_go_proto\"))\n+ else:\n+ deps.append(d.replace(\"_proto\", \"_go_proto\"))\ngo_library_func(\nname = name + \"_go_proto\",\nimportpath = \"gvisor.dev/gvisor/\" + native.package_name() + \"/\" + name + \"_go_proto\",\n@@ -130,18 +142,18 @@ def go_context(ctx, goos = None, goarch = None, std = False):\nelif goarch != go_ctx.sdk.goarch:\nfail(\"Internal GOARCH (%s) doesn't match GoSdk GOARCH (%s).\" % (goarch, go_ctx.sdk.goarch))\nreturn struct(\n- go = go_ctx.go,\nenv = go_ctx.env,\n- nogo_args = [],\n- stdlib_srcs = go_ctx.sdk.srcs,\n- runfiles = depset([go_ctx.go] + go_ctx.sdk.srcs + go_ctx.sdk.tools + go_ctx.stdlib.libs),\n- goos = go_ctx.sdk.goos,\n+ go = go_ctx.go,\ngoarch = go_ctx.sdk.goarch,\n+ goos = go_ctx.sdk.goos,\ngotags = go_ctx.tags,\n+ nogo_args = [],\n+ runfiles = depset([go_ctx.go] + go_ctx.sdk.srcs + go_ctx.sdk.tools + go_ctx.stdlib.libs),\n+ stdlib_srcs = go_ctx.sdk.srcs,\n)\ndef select_goarch():\n- return select_arch(arm64 = \"arm64\", amd64 = \"amd64\")\n+ return select_arch(amd64 = \"amd64\", arm64 = \"arm64\")\ndef select_goos():\nreturn select_system(linux = \"linux\")\n" } ]
Go
Apache License 2.0
google/gvisor
Sentry: Measure the time it takes to initialize the Sentry. PiperOrigin-RevId: 383472507
259,868
08.07.2021 12:01:47
25,200
07f2c8b56b5948759b3df6587a8fcea13fbcc82b
devpts: Notify of echo'd input queue bytes only after locks have been released.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/BUILD", "new_path": "pkg/sentry/fsimpl/devpts/BUILD", "diff": "@@ -60,5 +60,6 @@ go_test(\n\"//pkg/abi/linux\",\n\"//pkg/sentry/contexttest\",\n\"//pkg/usermem\",\n+ \"//pkg/waiter\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/devpts_test.go", "new_path": "pkg/sentry/fsimpl/devpts/devpts_test.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/contexttest\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n)\nfunc TestSimpleMasterToReplica(t *testing.T) {\n@@ -54,3 +55,36 @@ func TestSimpleMasterToReplica(t *testing.T) {\nt.Fatalf(\"written and read strings do not match: got %q, want %q\", outStr, inStr)\n}\n}\n+\n+type callback func(*waiter.Entry, waiter.EventMask)\n+\n+func (cb callback) Callback(entry *waiter.Entry, mask waiter.EventMask) {\n+ cb(entry, mask)\n+}\n+\n+func TestEchoDeadlock(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ termios := linux.DefaultReplicaTermios\n+ termios.LocalFlags |= linux.ECHO\n+ ld := newLineDiscipline(termios)\n+ outBytes := make([]byte, 32)\n+ dst := usermem.BytesIOSequence(outBytes)\n+ entry := &waiter.Entry{Callback: callback(func(*waiter.Entry, waiter.EventMask) {\n+ ld.inputQueueRead(ctx, dst)\n+ })}\n+ ld.masterWaiter.EventRegister(entry, waiter.ReadableEvents)\n+ defer ld.masterWaiter.EventUnregister(entry)\n+ inBytes := []byte(\"hello, tty\\n\")\n+ n, err := ld.inputQueueWrite(ctx, usermem.BytesIOSequence(inBytes))\n+ if err != nil {\n+ t.Fatalf(\"inputQueueWrite: %v\", err)\n+ }\n+ if int(n) != len(inBytes) {\n+ t.Fatalf(\"read wrong length: got %d, want %d\", n, len(inBytes))\n+ }\n+ outStr := string(outBytes[:n])\n+ inStr := string(inBytes)\n+ if outStr != inStr {\n+ t.Fatalf(\"written and read strings do not match: got %q, want %q\", outStr, inStr)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/line_discipline.go", "new_path": "pkg/sentry/fsimpl/devpts/line_discipline.go", "diff": "@@ -70,6 +70,10 @@ const (\n// +------------------------| output queue |<--------------------------+\n// (outputQueueRead) +--------------+ (outputQueueWrite)\n//\n+// There is special handling for the ECHO option, where bytes written to the\n+// input queue are also output back to the terminal by being written to\n+// l.outQueue by the input queue transformer.\n+//\n// Lock order:\n// termiosMu\n// inQueue.mu\n@@ -126,7 +130,6 @@ func (l *lineDiscipline) getTermios(task *kernel.Task, args arch.SyscallArgument\n// setTermios sets a linux.Termios for the tty.\nfunc (l *lineDiscipline) setTermios(task *kernel.Task, args arch.SyscallArguments) (uintptr, error) {\nl.termiosMu.Lock()\n- defer l.termiosMu.Unlock()\noldCanonEnabled := l.termios.LEnabled(linux.ICANON)\n// We must copy a Termios struct, not KernelTermios.\nvar t linux.Termios\n@@ -141,7 +144,10 @@ func (l *lineDiscipline) setTermios(task *kernel.Task, args arch.SyscallArgument\nl.inQueue.pushWaitBufLocked(l)\nl.inQueue.readable = true\nl.inQueue.mu.Unlock()\n+ l.termiosMu.Unlock()\nl.replicaWaiter.Notify(waiter.ReadableEvents)\n+ } else {\n+ l.termiosMu.Unlock()\n}\nreturn 0, err\n@@ -179,28 +185,37 @@ func (l *lineDiscipline) inputQueueReadSize(t *kernel.Task, io usermem.IO, args\nfunc (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\n- defer l.termiosMu.RUnlock()\n- n, pushed, err := l.inQueue.read(ctx, dst, l)\n+ n, pushed, notifyEcho, err := l.inQueue.read(ctx, dst, l)\n+ l.termiosMu.RUnlock()\nif err != nil {\nreturn 0, err\n}\nif n > 0 {\n+ if notifyEcho {\n+ l.masterWaiter.Notify(waiter.ReadableEvents | waiter.WritableEvents)\n+ } else {\nl.masterWaiter.Notify(waiter.WritableEvents)\n+ }\nif pushed {\nl.replicaWaiter.Notify(waiter.ReadableEvents)\n}\nreturn n, nil\n+ } else if notifyEcho {\n+ l.masterWaiter.Notify(waiter.ReadableEvents)\n}\nreturn 0, syserror.ErrWouldBlock\n}\nfunc (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\n- defer l.termiosMu.RUnlock()\n- n, err := l.inQueue.write(ctx, src, l)\n+ n, notifyEcho, err := l.inQueue.write(ctx, src, l)\n+ l.termiosMu.RUnlock()\nif err != nil {\nreturn 0, err\n}\n+ if notifyEcho {\n+ l.masterWaiter.Notify(waiter.ReadableEvents)\n+ }\nif n > 0 {\nl.replicaWaiter.Notify(waiter.ReadableEvents)\nreturn n, nil\n@@ -214,8 +229,9 @@ func (l *lineDiscipline) outputQueueReadSize(t *kernel.Task, io usermem.IO, args\nfunc (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\n- defer l.termiosMu.RUnlock()\n- n, pushed, err := l.outQueue.read(ctx, dst, l)\n+ // Ignore notifyEcho, as it cannot happen when reading from the output queue.\n+ n, pushed, _, err := l.outQueue.read(ctx, dst, l)\n+ l.termiosMu.RUnlock()\nif err != nil {\nreturn 0, err\n}\n@@ -231,8 +247,9 @@ func (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequ\nfunc (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\n- defer l.termiosMu.RUnlock()\n- n, err := l.outQueue.write(ctx, src, l)\n+ // Ignore notifyEcho, as it cannot happen when writing to the output queue.\n+ n, _, err := l.outQueue.write(ctx, src, l)\n+ l.termiosMu.RUnlock()\nif err != nil {\nreturn 0, err\n}\n@@ -246,7 +263,8 @@ func (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSeq\n// transformer is a helper interface to make it easier to stateify queue.\ntype transformer interface {\n// transform functions require queue's mutex to be held.\n- transform(*lineDiscipline, *queue, []byte) int\n+ // The boolean indicates whether there was any echoed bytes.\n+ transform(*lineDiscipline, *queue, []byte) (int, bool)\n}\n// outputQueueTransformer implements transformer. It performs line discipline\n@@ -261,7 +279,7 @@ type outputQueueTransformer struct{}\n// Preconditions:\n// * l.termiosMu must be held for reading.\n// * q.mu must be held.\n-func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {\n+func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) (int, bool) {\n// transformOutput is effectively always in noncanonical mode, as the\n// master termios never has ICANON set.\n@@ -270,7 +288,7 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte\nif len(q.readBuf) > 0 {\nq.readable = true\n}\n- return len(buf)\n+ return len(buf), false\n}\nvar ret int\n@@ -321,7 +339,7 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte\nif len(q.readBuf) > 0 {\nq.readable = true\n}\n- return ret\n+ return ret, false\n}\n// inputQueueTransformer implements transformer. It performs line discipline\n@@ -334,15 +352,17 @@ type inputQueueTransformer struct{}\n// transformed according to flags set in the termios struct. See\n// drivers/tty/n_tty.c:n_tty_receive_char_special for an analogous kernel\n// function.\n+// It returns an extra boolean indicating whether any characters need to be\n+// echoed, in which case we need to notify readers.\n//\n// Preconditions:\n// * l.termiosMu must be held for reading.\n// * q.mu must be held.\n-func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {\n+func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) (int, bool) {\n// If there's a line waiting to be read in canonical mode, don't write\n// anything else to the read buffer.\nif l.termios.LEnabled(linux.ICANON) && q.readable {\n- return 0\n+ return 0, false\n}\nmaxBytes := nonCanonMaxBytes\n@@ -351,6 +371,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)\n}\nvar ret int\n+ var notifyEcho bool\nfor len(buf) > 0 && len(q.readBuf) < canonMaxBytes {\nsize := l.peek(buf)\ncBytes := append([]byte{}, buf[:size]...)\n@@ -397,7 +418,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)\n// Anything written to the readBuf will have to be echoed.\nif l.termios.LEnabled(linux.ECHO) {\nl.outQueue.writeBytes(cBytes, l)\n- l.masterWaiter.Notify(waiter.ReadableEvents)\n+ notifyEcho = true\n}\n// If we finish a line, make it available for reading.\n@@ -412,7 +433,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)\nq.readable = true\n}\n- return ret\n+ return ret, notifyEcho\n}\n// shouldDiscard returns whether c should be discarded. In canonical mode, if\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/queue.go", "new_path": "pkg/sentry/fsimpl/devpts/queue.go", "diff": "@@ -98,17 +98,19 @@ func (q *queue) readableSize(t *kernel.Task, io usermem.IO, args arch.SyscallArg\n}\n-// read reads from q to userspace. It returns the number of bytes read as well\n-// as whether the read caused more readable data to become available (whether\n+// read reads from q to userspace. It returns:\n+// - The number of bytes read\n+// - Whether the read caused more readable data to become available (whether\n// data was pushed from the wait buffer to the read buffer).\n+// - Whether any data was echoed back (need to notify readers).\n//\n// Preconditions: l.termiosMu must be held for reading.\n-func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {\n+func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, bool, error) {\nq.mu.Lock()\ndefer q.mu.Unlock()\nif !q.readable {\n- return 0, false, syserror.ErrWouldBlock\n+ return 0, false, false, syserror.ErrWouldBlock\n}\nif dst.NumBytes() > canonMaxBytes {\n@@ -131,19 +133,20 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl\nreturn n, nil\n}))\nif err != nil {\n- return 0, false, err\n+ return 0, false, false, err\n}\n// Move data from the queue's wait buffer to its read buffer.\n- nPushed := q.pushWaitBufLocked(l)\n+ nPushed, notifyEcho := q.pushWaitBufLocked(l)\n- return int64(n), nPushed > 0, nil\n+ return int64(n), nPushed > 0, notifyEcho, nil\n}\n// write writes to q from userspace.\n+// The returned boolean indicates whether any data was echoed back.\n//\n// Preconditions: l.termiosMu must be held for reading.\n-func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, error) {\n+func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {\nq.mu.Lock()\ndefer q.mu.Unlock()\n@@ -173,44 +176,49 @@ func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscip\nreturn n, nil\n}))\nif err != nil {\n- return 0, err\n+ return 0, false, err\n}\n// Push data from the wait to the read buffer.\n- q.pushWaitBufLocked(l)\n+ _, notifyEcho := q.pushWaitBufLocked(l)\n- return n, nil\n+ return n, notifyEcho, nil\n}\n// writeBytes writes to q from b.\n+// The returned boolean indicates whether any data was echoed back.\n//\n// Preconditions: l.termiosMu must be held for reading.\n-func (q *queue) writeBytes(b []byte, l *lineDiscipline) {\n+func (q *queue) writeBytes(b []byte, l *lineDiscipline) bool {\nq.mu.Lock()\ndefer q.mu.Unlock()\n// Write to the wait buffer.\nq.waitBufAppend(b)\n- q.pushWaitBufLocked(l)\n+ _, notifyEcho := q.pushWaitBufLocked(l)\n+ return notifyEcho\n}\n// pushWaitBufLocked fills the queue's read buffer with data from the wait\n// buffer.\n+// The returned boolean indicates whether any data was echoed back.\n//\n// Preconditions:\n// * l.termiosMu must be held for reading.\n// * q.mu must be locked.\n-func (q *queue) pushWaitBufLocked(l *lineDiscipline) int {\n+func (q *queue) pushWaitBufLocked(l *lineDiscipline) (int, bool) {\nif q.waitBufLen == 0 {\n- return 0\n+ return 0, false\n}\n// Move data from the wait to the read buffer.\nvar total int\nvar i int\n+ var notifyEcho bool\nfor i = 0; i < len(q.waitBuf); i++ {\n- n := q.transform(l, q, q.waitBuf[i])\n+ n, echo := q.transform(l, q, q.waitBuf[i])\ntotal += n\n+ notifyEcho = notifyEcho || echo\nif n != len(q.waitBuf[i]) {\n// The read buffer filled up without consuming the\n// entire buffer.\n@@ -223,7 +231,7 @@ func (q *queue) pushWaitBufLocked(l *lineDiscipline) int {\nq.waitBuf = q.waitBuf[i:]\nq.waitBufLen -= uint64(total)\n- return total\n+ return total, notifyEcho\n}\n// Precondition: q.mu must be locked.\n" } ]
Go
Apache License 2.0
google/gvisor
devpts: Notify of echo'd input queue bytes only after locks have been released. PiperOrigin-RevId: 383684320
259,885
08.07.2021 12:23:43
25,200
fbd4ccf33339a261812521fbc54554850a70676c
Fix async-signal-unsafety in socket test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket.cc", "new_path": "test/syscalls/linux/socket.cc", "diff": "@@ -119,6 +119,9 @@ TEST(SocketTest, UnixSCMRightsOnlyPassedOnce) {\n// Send more than what will fit inside the send/receive buffers, so that it is\n// split into multiple messages.\nconstexpr int kBufSize = 0x100000;\n+ // Heap allocation is async-signal-unsafe and thus cannot occur between fork()\n+ // and execve().\n+ std::vector<char> buf(kBufSize);\npid_t pid = fork();\nif (pid == 0) {\n@@ -127,7 +130,6 @@ TEST(SocketTest, UnixSCMRightsOnlyPassedOnce) {\n// Construct a message with some control message.\nstruct msghdr msg = {};\nchar control[CMSG_SPACE(sizeof(int))] = {};\n- std::vector<char> buf(kBufSize);\nstruct iovec iov = {};\nmsg.msg_control = control;\nmsg.msg_controllen = sizeof(control);\n@@ -154,7 +156,6 @@ TEST(SocketTest, UnixSCMRightsOnlyPassedOnce) {\nstruct msghdr msg = {};\nchar control[CMSG_SPACE(sizeof(int))] = {};\n- std::vector<char> buf(kBufSize);\nstruct iovec iov = {};\nmsg.msg_control = &control;\nmsg.msg_controllen = sizeof(control);\n" } ]
Go
Apache License 2.0
google/gvisor
Fix async-signal-unsafety in socket test. PiperOrigin-RevId: 383689096
259,885
08.07.2021 13:36:49
25,200
052eb90dc15e04dfd8397ca305c507399360dd0e
Replace kernel.ExitStatus with linux.WaitStatus.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/wait.go", "new_path": "pkg/abi/linux/wait.go", "diff": "package linux\n+import (\n+ \"fmt\"\n+)\n+\n// Options for waitpid(2), wait4(2), and/or waitid(2), from\n// include/uapi/linux/wait.h.\nconst (\n@@ -34,3 +38,124 @@ const (\nP_PID = 0x1\nP_PGID = 0x2\n)\n+\n+// WaitStatus represents a thread status, as returned by the wait* family of\n+// syscalls.\n+type WaitStatus uint32\n+\n+// WaitStatusExit returns a WaitStatus representing the given exit status.\n+func WaitStatusExit(status int32) WaitStatus {\n+ return WaitStatus(uint32(status) << 8)\n+}\n+\n+// WaitStatusTerminationSignal returns a WaitStatus representing termination by\n+// the given signal.\n+func WaitStatusTerminationSignal(sig Signal) WaitStatus {\n+ return WaitStatus(uint32(sig))\n+}\n+\n+// WaitStatusStopped returns a WaitStatus representing stoppage by the given\n+// signal or ptrace trap code.\n+func WaitStatusStopped(code uint32) WaitStatus {\n+ return WaitStatus(code<<8 | 0x7f)\n+}\n+\n+// WaitStatusContinued returns a WaitStatus representing continuation by\n+// SIGCONT.\n+func WaitStatusContinued() WaitStatus {\n+ return WaitStatus(0xffff)\n+}\n+\n+// WithCoreDump returns a copy of ws that indicates that a core dump was\n+// generated.\n+//\n+// Preconditions: ws.Signaled().\n+func (ws WaitStatus) WithCoreDump() WaitStatus {\n+ return ws | 0x80\n+}\n+\n+// Exited returns true if ws represents an exit status, consistent with\n+// WIFEXITED.\n+func (ws WaitStatus) Exited() bool {\n+ return ws&0x7f == 0\n+}\n+\n+// Signaled returns true if ws represents a termination by signal, consistent\n+// with WIFSIGNALED.\n+func (ws WaitStatus) Signaled() bool {\n+ // ws&0x7f != 0 (exited) and ws&0x7f != 0x7f (stopped or continued)\n+ return ((ws&0x7f)+1)>>1 != 0\n+}\n+\n+// CoreDumped returns true if ws indicates that a core dump was produced,\n+// consistent with WCOREDUMP.\n+//\n+// Preconditions: ws.Signaled().\n+func (ws WaitStatus) CoreDumped() bool {\n+ return ws&0x80 != 0\n+}\n+\n+// Stopped returns true if ws represents a stoppage, consistent with\n+// WIFSTOPPED.\n+func (ws WaitStatus) Stopped() bool {\n+ return ws&0xff == 0x7f\n+}\n+\n+// Continued returns true if ws represents a continuation by SIGCONT,\n+// consistent with WIFCONTINUED.\n+func (ws WaitStatus) Continued() bool {\n+ return ws == 0xffff\n+}\n+\n+// ExitStatus returns the lower 8 bits of the exit status represented by ws,\n+// consistent with WEXITSTATUS.\n+//\n+// Preconditions: ws.Exited().\n+func (ws WaitStatus) ExitStatus() uint32 {\n+ return uint32((ws & 0xff00) >> 8)\n+}\n+\n+// TerminationSignal returns the termination signal represented by ws,\n+// consistent with WTERMSIG.\n+//\n+// Preconditions: ws.Signaled().\n+func (ws WaitStatus) TerminationSignal() Signal {\n+ return Signal(ws & 0x7f)\n+}\n+\n+// StopSignal returns the stop signal represented by ws, consistent with\n+// WSTOPSIG.\n+//\n+// Preconditions: ws.Stopped().\n+func (ws WaitStatus) StopSignal() Signal {\n+ return Signal((ws & 0xff00) >> 8)\n+}\n+\n+// PtraceEvent returns the PTRACE_EVENT_* field in ws.\n+//\n+// Preconditions: ws.Stopped().\n+func (ws WaitStatus) PtraceEvent() uint32 {\n+ return uint32(ws >> 16)\n+}\n+\n+// String implements fmt.Stringer.String.\n+func (ws WaitStatus) String() string {\n+ switch {\n+ case ws.Exited():\n+ return fmt.Sprintf(\"exit status %d\", ws.ExitStatus())\n+ case ws.Signaled():\n+ if ws.CoreDumped() {\n+ return fmt.Sprintf(\"killed by signal %d (core dumped)\", ws.TerminationSignal())\n+ }\n+ return fmt.Sprintf(\"killed by signal %d\", ws.TerminationSignal())\n+ case ws.Stopped():\n+ if ev := ws.PtraceEvent(); ev != 0 {\n+ return fmt.Sprintf(\"stopped by signal %d (PTRACE_EVENT %d)\", ws.StopSignal(), ev)\n+ }\n+ return fmt.Sprintf(\"stopped by signal %d\", ws.StopSignal())\n+ case ws.Continued():\n+ return \"continued\"\n+ default:\n+ return fmt.Sprintf(\"unknown status %#x\", uint32(ws))\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/control/proc.go", "new_path": "pkg/sentry/control/proc.go", "diff": "@@ -126,7 +126,7 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\n// Wait for completion.\nnewTG.WaitExited()\n- *waitStatus = newTG.ExitStatus().Status()\n+ *waitStatus = uint32(newTG.ExitStatus())\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/control/state.go", "new_path": "pkg/sentry/control/state.go", "diff": "@@ -17,6 +17,7 @@ package control\nimport (\n\"errors\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/state\"\n@@ -67,7 +68,7 @@ func (s *State) Save(o *SaveOpts, _ *struct{}) error {\nlog.Warningf(\"Save failed: exiting...\")\ns.Kernel.SetSaveError(err)\n}\n- s.Kernel.Kill(kernel.ExitStatus{})\n+ s.Kernel.Kill(linux.WaitStatusExit(0))\n},\n}\nreturn saveOpts.Save(s.Kernel.SupervisorContext(), s.Kernel, s.Watchdog)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -1299,11 +1299,11 @@ func (k *Kernel) WaitExited() {\n}\n// Kill requests that all tasks in k immediately exit as if group exiting with\n-// status es. Kill does not wait for tasks to exit.\n-func (k *Kernel) Kill(es ExitStatus) {\n+// status ws. Kill does not wait for tasks to exit.\n+func (k *Kernel) Kill(ws linux.WaitStatus) {\nk.extMu.Lock()\ndefer k.extMu.Unlock()\n- k.tasks.Kill(es)\n+ k.tasks.Kill(ws)\n}\n// Pause requests that all tasks in k temporarily stop executing, and blocks\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/ptrace.go", "new_path": "pkg/sentry/kernel/ptrace.go", "diff": "@@ -912,7 +912,7 @@ func (t *Task) ptraceExit() {\nreturn\n}\nt.tg.signalHandlers.mu.Lock()\n- status := t.exitStatus.Status()\n+ status := t.exitStatus\nt.tg.signalHandlers.mu.Unlock()\nt.Debugf(\"Entering PTRACE_EVENT_EXIT stop\")\nt.ptraceEventLocked(linux.PTRACE_EVENT_EXIT, uint64(status))\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task.go", "new_path": "pkg/sentry/kernel/task.go", "diff": "@@ -232,7 +232,7 @@ type Task struct {\n// exitStatus is the task's exit status.\n//\n// exitStatus is protected by the signal mutex.\n- exitStatus ExitStatus\n+ exitStatus linux.WaitStatus\n// syscallRestartBlock represents a custom restart function to run in\n// restart_syscall(2) to resume an interrupted syscall.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_exit.go", "new_path": "pkg/sentry/kernel/task_exit.go", "diff": "@@ -28,7 +28,6 @@ import (\n\"errors\"\n\"fmt\"\n\"strconv\"\n- \"strings\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n@@ -37,58 +36,6 @@ import (\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n-// An ExitStatus is a value communicated from an exiting task or thread group\n-// to the party that reaps it.\n-//\n-// +stateify savable\n-type ExitStatus struct {\n- // Code is the numeric value passed to the call to exit or exit_group that\n- // caused the exit. If the exit was not caused by such a call, Code is 0.\n- Code int\n-\n- // Signo is the signal that caused the exit. If the exit was not caused by\n- // a signal, Signo is 0.\n- Signo int\n-}\n-\n-func (es ExitStatus) String() string {\n- var b strings.Builder\n- if code := es.Code; code != 0 {\n- if b.Len() != 0 {\n- b.WriteByte(' ')\n- }\n- _, _ = fmt.Fprintf(&b, \"Code=%d\", code)\n- }\n- if signal := es.Signo; signal != 0 {\n- if b.Len() != 0 {\n- b.WriteByte(' ')\n- }\n- _, _ = fmt.Fprintf(&b, \"Signal=%d\", signal)\n- }\n- return b.String()\n-}\n-\n-// Signaled returns true if the ExitStatus indicates that the exiting task or\n-// thread group was killed by a signal.\n-func (es ExitStatus) Signaled() bool {\n- return es.Signo != 0\n-}\n-\n-// Status returns the numeric representation of the ExitStatus returned by e.g.\n-// the wait4() system call.\n-func (es ExitStatus) Status() uint32 {\n- return ((uint32(es.Code) & 0xff) << 8) | (uint32(es.Signo) & 0xff)\n-}\n-\n-// ShellExitCode returns the numeric exit code that Bash would return for an\n-// exit status of es.\n-func (es ExitStatus) ShellExitCode() int {\n- if es.Signaled() {\n- return 128 + es.Signo\n- }\n- return es.Code\n-}\n-\n// TaskExitState represents a step in the task exit path.\n//\n// \"Exiting\" and \"exited\" are often ambiguous; prefer to name specific states.\n@@ -164,13 +111,13 @@ func (t *Task) killedLocked() bool {\nreturn t.pendingSignals.pendingSet&linux.SignalSetOf(linux.SIGKILL) != 0\n}\n-// PrepareExit indicates an exit with status es.\n+// PrepareExit indicates an exit with the given status.\n//\n// Preconditions: The caller must be running on the task goroutine.\n-func (t *Task) PrepareExit(es ExitStatus) {\n+func (t *Task) PrepareExit(ws linux.WaitStatus) {\nt.tg.signalHandlers.mu.Lock()\ndefer t.tg.signalHandlers.mu.Unlock()\n- t.exitStatus = es\n+ t.exitStatus = ws\n}\n// PrepareGroupExit indicates a group exit with status es to t's thread group.\n@@ -181,7 +128,7 @@ func (t *Task) PrepareExit(es ExitStatus) {\n// ptrace.)\n//\n// Preconditions: The caller must be running on the task goroutine.\n-func (t *Task) PrepareGroupExit(es ExitStatus) {\n+func (t *Task) PrepareGroupExit(ws linux.WaitStatus) {\nt.tg.signalHandlers.mu.Lock()\ndefer t.tg.signalHandlers.mu.Unlock()\nif t.tg.exiting || t.tg.execing != nil {\n@@ -199,8 +146,8 @@ func (t *Task) PrepareGroupExit(es ExitStatus) {\nreturn\n}\nt.tg.exiting = true\n- t.tg.exitStatus = es\n- t.exitStatus = es\n+ t.tg.exitStatus = ws\n+ t.exitStatus = ws\nfor sibling := t.tg.tasks.Front(); sibling != nil; sibling = sibling.Next() {\nif sibling != t {\nsibling.killLocked()\n@@ -208,11 +155,11 @@ func (t *Task) PrepareGroupExit(es ExitStatus) {\n}\n}\n-// Kill requests that all tasks in ts exit as if group exiting with status es.\n+// Kill requests that all tasks in ts exit as if group exiting with status ws.\n// Kill does not wait for tasks to exit.\n//\n// Kill has no analogue in Linux; it's provided for save/restore only.\n-func (ts *TaskSet) Kill(es ExitStatus) {\n+func (ts *TaskSet) Kill(ws linux.WaitStatus) {\nts.mu.Lock()\ndefer ts.mu.Unlock()\nts.Root.exiting = true\n@@ -220,7 +167,7 @@ func (ts *TaskSet) Kill(es ExitStatus) {\nt.tg.signalHandlers.mu.Lock()\nif !t.tg.exiting {\nt.tg.exiting = true\n- t.tg.exitStatus = es\n+ t.tg.exitStatus = ws\n}\nt.killLocked()\nt.tg.signalHandlers.mu.Unlock()\n@@ -731,10 +678,10 @@ func (t *Task) exitNotificationSignal(sig linux.Signal, receiver *Task) *linux.S\ninfo.SetUID(int32(t.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow()))\nif t.exitStatus.Signaled() {\ninfo.Code = linux.CLD_KILLED\n- info.SetStatus(int32(t.exitStatus.Signo))\n+ info.SetStatus(int32(t.exitStatus.TerminationSignal()))\n} else {\ninfo.Code = linux.CLD_EXITED\n- info.SetStatus(int32(t.exitStatus.Code))\n+ info.SetStatus(int32(t.exitStatus.ExitStatus()))\n}\n// TODO(b/72102453): Set utime, stime.\nreturn info\n@@ -742,7 +689,7 @@ func (t *Task) exitNotificationSignal(sig linux.Signal, receiver *Task) *linux.S\n// ExitStatus returns t's exit status, which is only guaranteed to be\n// meaningful if t.ExitState() != TaskExitNone.\n-func (t *Task) ExitStatus() ExitStatus {\n+func (t *Task) ExitStatus() linux.WaitStatus {\nt.tg.pidns.owner.mu.RLock()\ndefer t.tg.pidns.owner.mu.RUnlock()\nt.tg.signalHandlers.mu.Lock()\n@@ -752,7 +699,7 @@ func (t *Task) ExitStatus() ExitStatus {\n// ExitStatus returns the exit status that would be returned by a consuming\n// wait*() on tg.\n-func (tg *ThreadGroup) ExitStatus() ExitStatus {\n+func (tg *ThreadGroup) ExitStatus() linux.WaitStatus {\ntg.pidns.owner.mu.RLock()\ndefer tg.pidns.owner.mu.RUnlock()\ntg.signalHandlers.mu.Lock()\n@@ -763,7 +710,9 @@ func (tg *ThreadGroup) ExitStatus() ExitStatus {\nreturn tg.leader.exitStatus\n}\n-// TerminationSignal returns the thread group's termination signal.\n+// TerminationSignal returns the thread group's termination signal, which is\n+// the signal that will be sent to its leader's parent when all threads have\n+// exited.\nfunc (tg *ThreadGroup) TerminationSignal() linux.Signal {\ntg.pidns.owner.mu.RLock()\ndefer tg.pidns.owner.mu.RUnlock()\n@@ -889,8 +838,8 @@ type WaitResult struct {\n// Event is exactly one of the events defined above.\nEvent waiter.EventMask\n- // Status is the numeric status associated with the event.\n- Status uint32\n+ // Status is the wait status associated with the event.\n+ Status linux.WaitStatus\n}\n// Wait waits for an event from a thread group that is a child of t's thread\n@@ -1043,7 +992,7 @@ func (t *Task) waitCollectZombieLocked(target *Task, opts *WaitOptions, asPtrace\n}\npid := t.tg.pidns.tids[target]\nuid := target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow()\n- status := target.exitStatus.Status()\n+ status := target.exitStatus\nif !opts.ConsumeEvent {\nreturn &WaitResult{\nTask: target,\n@@ -1057,7 +1006,7 @@ func (t *Task) waitCollectZombieLocked(target *Task, opts *WaitOptions, asPtrace\n// differ from that reported by a consuming wait; the latter will return\n// the group exit code if one is available.\nif target.tg.exiting {\n- status = target.tg.exitStatus.Status()\n+ status = target.tg.exitStatus\n}\n// t may be (in the thread group of) target's parent, tracer, or both. We\n// don't need to check for !exitTracerAcked because tracees are detached\n@@ -1127,8 +1076,7 @@ func (t *Task) waitCollectChildGroupStopLocked(target *Task, opts *WaitOptions)\nTID: pid,\nUID: uid,\nEvent: EventChildGroupStop,\n- // There is no name for these status constants.\n- Status: (uint32(sig)&0xff)<<8 | 0x7f,\n+ Status: linux.WaitStatusStopped(uint32(sig)),\n}\n}\n@@ -1149,7 +1097,7 @@ func (t *Task) waitCollectGroupContinueLocked(target *Task, opts *WaitOptions) *\nTID: pid,\nUID: uid,\nEvent: EventGroupContinue,\n- Status: 0xffff,\n+ Status: linux.WaitStatusContinued(),\n}\n}\n@@ -1177,7 +1125,7 @@ func (t *Task) waitCollectTraceeStopLocked(target *Task, opts *WaitOptions) *Wai\nTID: pid,\nUID: uid,\nEvent: EventTraceeStop,\n- Status: uint32(code)<<8 | 0x7f,\n+ Status: linux.WaitStatusStopped(uint32(code)),\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_log.go", "new_path": "pkg/sentry/kernel/task_log.go", "diff": "@@ -235,7 +235,7 @@ func (t *Task) traceExitEvent() {\nif !trace.IsEnabled() {\nreturn\n}\n- trace.Logf(t.traceContext, traceCategory, \"exit status: 0x%x\", t.exitStatus.Status())\n+ trace.Logf(t.traceContext, traceCategory, \"exit status: %s\", t.exitStatus)\n}\n// traceExecEvent is called when a task calls exec.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_run.go", "new_path": "pkg/sentry/kernel/task_run.go", "diff": "@@ -377,7 +377,7 @@ func (app *runApp) execute(t *Task) taskRunState {\ndefault:\n// What happened? Can't continue.\nt.Warningf(\"Unexpected SwitchToApp error: %v\", err)\n- t.PrepareExit(ExitStatus{Code: ExtractErrno(err, -1)})\n+ t.PrepareExit(linux.WaitStatusExit(int32(ExtractErrno(err, -1))))\nreturn (*runExit)(nil)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_signals.go", "new_path": "pkg/sentry/kernel/task_signals.go", "diff": "@@ -157,7 +157,8 @@ func (t *Task) PendingSignals() linux.SignalSet {\n// deliverSignal delivers the given signal and returns the following run state.\nfunc (t *Task) deliverSignal(info *linux.SignalInfo, act linux.SigAction) taskRunState {\n- sigact := computeAction(linux.Signal(info.Signo), act)\n+ sig := linux.Signal(info.Signo)\n+ sigact := computeAction(sig, act)\nif t.haveSyscallReturn {\nif sre, ok := syserror.SyscallRestartErrnoFromReturn(t.Arch().Return()); ok {\n@@ -198,14 +199,14 @@ func (t *Task) deliverSignal(info *linux.SignalInfo, act linux.SigAction) taskRu\n}\n// Attach an fault address if appropriate.\n- switch linux.Signal(info.Signo) {\n+ switch sig {\ncase linux.SIGSEGV, linux.SIGFPE, linux.SIGILL, linux.SIGTRAP, linux.SIGBUS:\nucs.FaultAddr = info.Addr()\n}\neventchannel.Emit(ucs)\n- t.PrepareGroupExit(ExitStatus{Signo: int(info.Signo)})\n+ t.PrepareGroupExit(linux.WaitStatusTerminationSignal(sig))\nreturn (*runExit)(nil)\ncase SignalActionStop:\n@@ -225,12 +226,12 @@ func (t *Task) deliverSignal(info *linux.SignalInfo, act linux.SigAction) taskRu\n// Send a forced SIGSEGV. If the signal that couldn't be delivered\n// was a SIGSEGV, force the handler to SIG_DFL.\n- t.forceSignal(linux.SIGSEGV, linux.Signal(info.Signo) == linux.SIGSEGV /* unconditional */)\n+ t.forceSignal(linux.SIGSEGV, sig == linux.SIGSEGV /* unconditional */)\nt.SendSignal(SignalInfoPriv(linux.SIGSEGV))\n}\ndefault:\n- panic(fmt.Sprintf(\"Unknown signal action %+v, %d?\", info, computeAction(linux.Signal(info.Signo), act)))\n+ panic(fmt.Sprintf(\"Unknown signal action %+v, %d?\", info, computeAction(sig, act)))\n}\nreturn (*runInterrupt)(nil)\n}\n@@ -506,7 +507,7 @@ func (tg *ThreadGroup) applySignalSideEffectsLocked(sig linux.Signal) {\n// ignores tg.execing.\nif !tg.exiting {\ntg.exiting = true\n- tg.exitStatus = ExitStatus{Signo: int(linux.SIGKILL)}\n+ tg.exitStatus = linux.WaitStatusTerminationSignal(linux.SIGKILL)\n}\nfor t := tg.tasks.Front(); t != nil; t = t.Next() {\nt.killLocked()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_syscall.go", "new_path": "pkg/sentry/kernel/task_syscall.go", "diff": "@@ -161,7 +161,7 @@ func (t *Task) doSyscall() taskRunState {\n// ok\ncase linux.SECCOMP_RET_KILL_THREAD:\nt.Debugf(\"Syscall %d: killed by seccomp\", sysno)\n- t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)})\n+ t.PrepareExit(linux.WaitStatusTerminationSignal(linux.SIGSYS))\nreturn (*runExit)(nil)\ncase linux.SECCOMP_RET_TRACE:\nt.Debugf(\"Syscall %d: stopping for PTRACE_EVENT_SECCOMP\", sysno)\n@@ -311,7 +311,7 @@ func (t *Task) doVsyscall(addr hostarch.Addr, sysno uintptr) taskRunState {\nreturn &runVsyscallAfterPtraceEventSeccomp{addr, sysno, caller}\ncase linux.SECCOMP_RET_KILL_THREAD:\nt.Debugf(\"vsyscall %d: killed by seccomp\", sysno)\n- t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)})\n+ t.PrepareExit(linux.WaitStatusTerminationSignal(linux.SIGSYS))\nreturn (*runExit)(nil)\ndefault:\npanic(fmt.Sprintf(\"Unknown seccomp result %d\", r))\n@@ -338,7 +338,7 @@ func (r *runVsyscallAfterPtraceEventSeccomp) execute(t *Task) taskRunState {\n// Documentation/prctl/seccomp_filter.txt. On Linux, changing orig_ax or ip\n// causes do_exit(SIGSYS), and changing sp is ignored.\nif (sysno != ^uintptr(0) && sysno != r.sysno) || hostarch.Addr(t.Arch().IP()) != r.addr {\n- t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)})\n+ t.PrepareExit(linux.WaitStatusTerminationSignal(linux.SIGSYS))\nreturn (*runExit)(nil)\n}\nif sysno == ^uintptr(0) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/thread_group.go", "new_path": "pkg/sentry/kernel/thread_group.go", "diff": "@@ -144,7 +144,7 @@ type ThreadGroup struct {\n//\n// While exiting is false, exitStatus is protected by the signal mutex.\n// When exiting becomes true, exitStatus becomes immutable.\n- exitStatus ExitStatus\n+ exitStatus linux.WaitStatus\n// terminationSignal is the signal that this thread group's leader will\n// send to its parent when it exits.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_thread.go", "new_path": "pkg/sentry/syscalls/linux/sys_thread.go", "diff": "@@ -17,7 +17,6 @@ package linux\nimport (\n\"path\"\n- \"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n@@ -188,15 +187,15 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr host\n// Exit implements linux syscall exit(2).\nfunc Exit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- status := int(args[0].Int())\n- t.PrepareExit(kernel.ExitStatus{Code: status})\n+ status := args[0].Int()\n+ t.PrepareExit(linux.WaitStatusExit(status & 0xff))\nreturn 0, kernel.CtrlDoExit, nil\n}\n// ExitGroup implements linux syscall exit_group(2).\nfunc ExitGroup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- status := int(args[0].Int())\n- t.PrepareGroupExit(kernel.ExitStatus{Code: status})\n+ status := args[0].Int()\n+ t.PrepareGroupExit(linux.WaitStatusExit(status & 0xff))\nreturn 0, kernel.CtrlDoExit, nil\n}\n@@ -316,7 +315,7 @@ func wait4(t *kernel.Task, pid int, statusAddr hostarch.Addr, options int, rusag\nreturn 0, err\n}\nif statusAddr != 0 {\n- if _, err := primitive.CopyUint32Out(t, statusAddr, wr.Status); err != nil {\n+ if _, err := primitive.CopyUint32Out(t, statusAddr, uint32(wr.Status)); err != nil {\nreturn 0, err\n}\n}\n@@ -419,23 +418,22 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\n}\nsi.SetPID(int32(wr.TID))\nsi.SetUID(int32(wr.UID))\n- // TODO(b/73541790): convert kernel.ExitStatus to functions and make\n- // WaitResult.Status a linux.WaitStatus.\n- s := unix.WaitStatus(wr.Status)\n+ s := wr.Status\nswitch {\ncase s.Exited():\nsi.Code = linux.CLD_EXITED\nsi.SetStatus(int32(s.ExitStatus()))\ncase s.Signaled():\n- si.Code = linux.CLD_KILLED\n- si.SetStatus(int32(s.Signal()))\n- case s.CoreDump():\n+ if s.CoreDumped() {\nsi.Code = linux.CLD_DUMPED\n- si.SetStatus(int32(s.Signal()))\n+ } else {\n+ si.Code = linux.CLD_KILLED\n+ }\n+ si.SetStatus(int32(s.TerminationSignal()))\ncase s.Stopped():\nif wr.Event == kernel.EventTraceeStop {\nsi.Code = linux.CLD_TRAPPED\n- si.SetStatus(int32(s.TrapCause()))\n+ si.SetStatus(int32(s.PtraceEvent()))\n} else {\nsi.Code = linux.CLD_STOPPED\nsi.SetStatus(int32(s.StopSignal()))\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -1051,7 +1051,7 @@ func (l *Loader) waitPID(tgid kernel.ThreadID, cid string, waitStatus *uint32) e\n// to exit.\nfunc (l *Loader) wait(tg *kernel.ThreadGroup) uint32 {\ntg.WaitExited()\n- return tg.ExitStatus().Status()\n+ return uint32(tg.ExitStatus())\n}\n// WaitForStartSignal waits for a start signal from the control server.\n@@ -1060,7 +1060,7 @@ func (l *Loader) WaitForStartSignal() {\n}\n// WaitExit waits for the root container to exit, and returns its exit status.\n-func (l *Loader) WaitExit() kernel.ExitStatus {\n+func (l *Loader) WaitExit() linux.WaitStatus {\n// Wait for container.\nl.k.WaitExited()\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader_test.go", "new_path": "runsc/boot/loader_test.go", "diff": "@@ -188,8 +188,8 @@ func doRun(t *testing.T, vfsEnabled bool) {\n}\n// Wait for the application to exit. It should succeed.\n- if status := l.WaitExit(); status.Code != 0 || status.Signo != 0 {\n- t.Errorf(\"application exited with status %+v, want 0\", status)\n+ if status := l.WaitExit(); !status.Exited() || status.ExitStatus() != 0 {\n+ t.Errorf(\"application exited with %s, want exit status 0\", status)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/boot.go", "new_path": "runsc/cmd/boot.go", "diff": "@@ -255,7 +255,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nws := l.WaitExit()\nlog.Infof(\"application exiting with %+v\", ws)\nwaitStatus := args[1].(*unix.WaitStatus)\n- *waitStatus = unix.WaitStatus(ws.Status())\n+ *waitStatus = unix.WaitStatus(ws)\nl.Destroy()\nreturn subcommands.ExitSuccess\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Replace kernel.ExitStatus with linux.WaitStatus. PiperOrigin-RevId: 383705129
259,891
08.07.2021 17:53:49
25,200
f8207a823351055a2aaad633b428fe7c1f0585f0
clarify safemount behavior
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/chroot.go", "new_path": "runsc/cmd/chroot.go", "diff": "@@ -30,7 +30,7 @@ func mountInChroot(chroot, src, dst, typ string, flags uint32) error {\nchrootDst := filepath.Join(chroot, dst)\nlog.Infof(\"Mounting %q at %q\", src, chrootDst)\n- if err := specutils.Mount(src, chrootDst, typ, flags, \"/proc\"); err != nil {\n+ if err := specutils.SafeSetupAndMount(src, chrootDst, typ, flags, \"/proc\"); err != nil {\nreturn fmt.Errorf(\"error mounting %q at %q: %v\", src, chrootDst, err)\n}\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -368,7 +368,7 @@ func setupMounts(conf *config.Config, mounts []specs.Mount, root, procPath strin\n}\nlog.Infof(\"Mounting src: %q, dst: %q, flags: %#x\", m.Source, dst, flags)\n- if err := specutils.Mount(m.Source, dst, m.Type, flags, procPath); err != nil {\n+ if err := specutils.SafeSetupAndMount(m.Source, dst, m.Type, flags, procPath); err != nil {\nreturn fmt.Errorf(\"mounting %+v: %v\", m, err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/config/config.go", "new_path": "runsc/config/config.go", "diff": "@@ -142,7 +142,8 @@ type Config struct {\n// Rootless allows the sandbox to be started with a user that is not root.\n// Defense in depth measures are weaker in rootless mode. Specifically, the\n// sandbox and Gofer process run as root inside a user namespace with root\n- // mapped to the caller's user.\n+ // mapped to the caller's user. When using rootless, the container root path\n+ // should not have a symlink.\nRootless bool `flag:\"rootless\"`\n// AlsoLogToStderr allows to send log messages to stderr.\n@@ -175,7 +176,8 @@ type Config struct {\n// TestOnlyAllowRunAsCurrentUserWithoutChroot should only be used in\n// tests. It allows runsc to start the sandbox process as the current\n// user, and without chrooting the sandbox process. This can be\n- // necessary in test environments that have limited capabilities.\n+ // necessary in test environments that have limited capabilities. When\n+ // disabling chroot, the container root path should not have a symlink.\nTestOnlyAllowRunAsCurrentUserWithoutChroot bool `flag:\"TESTONLY-unsafe-nonroot\"`\n// TestOnlyTestNameEnv should only be used in tests. It looks up for the\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/specutils.go", "new_path": "runsc/specutils/specutils.go", "diff": "@@ -434,12 +434,12 @@ func DebugLogFile(logPattern, command, test string) (*os.File, error) {\nreturn os.OpenFile(logPattern, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)\n}\n-// Mount creates the mount point and calls Mount with the given flags. procPath\n-// is the path to procfs. If it is \"\", procfs is assumed to be mounted at\n-// /proc.\n-func Mount(src, dst, typ string, flags uint32, procPath string) error {\n- // Create the mount point inside. The type must be the same as the\n- // source (file or directory).\n+// SafeSetupAndMount creates the mount point and calls Mount with the given\n+// flags. procPath is the path to procfs. If it is \"\", procfs is assumed to be\n+// mounted at /proc.\n+func SafeSetupAndMount(src, dst, typ string, flags uint32, procPath string) error {\n+ // Create the mount point inside. The type must be the same as the source\n+ // (file or directory).\nvar isDir bool\nif typ == \"proc\" {\n// Special case, as there is no source directory for proc mounts.\n@@ -484,6 +484,10 @@ type ErrSymlinkMount struct {\n// SafeMount is like unix.Mount, but will fail if dst is a symlink. procPath is\n// the path to procfs. If it is \"\", procfs is assumed to be mounted at /proc.\n+//\n+// SafeMount can fail when dst contains a symlink. However, it is called in the\n+// normal case with a destination consisting of a known root (/proc/root) and\n+// symlink-free path (from resolveSymlink).\nfunc SafeMount(src, dst, fstype string, flags uintptr, data, procPath string) error {\n// Open the destination.\nfd, err := unix.Open(dst, unix.O_PATH|unix.O_CLOEXEC, 0)\n" } ]
Go
Apache License 2.0
google/gvisor
clarify safemount behavior PiperOrigin-RevId: 383750666
259,905
09.07.2021 10:02:39
-28,800
c4c5f4d92a13aa5357002fe5ddf116433ec4e9a7
runsc: check the error when preparing tree for pivot_root
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -284,8 +284,12 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {\n}\n// Prepare tree structure for pivot_root(2).\n- os.Mkdir(\"/proc/proc\", 0755)\n- os.Mkdir(\"/proc/root\", 0755)\n+ if err := os.Mkdir(\"/proc/proc\", 0755); err != nil {\n+ Fatalf(\"error creating /proc/proc: %v\", err)\n+ }\n+ if err := os.Mkdir(\"/proc/root\", 0755); err != nil {\n+ Fatalf(\"error creating /proc/root: %v\", err)\n+ }\n// This cannot use SafeMount because there's no available procfs. But we\n// know that /proc is an empty tmpfs mount, so this is safe.\nif err := unix.Mount(\"runsc-proc\", \"/proc/proc\", \"proc\", flags|unix.MS_RDONLY, \"\"); err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
runsc: check the error when preparing tree for pivot_root Signed-off-by: Tiwei Bie <[email protected]>
259,858
09.07.2021 15:57:05
25,200
d78713e2da5331a22fc51fb9a9ad33cc1873879c
Drop unnecessary checklocksignore.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_cgroup.go", "new_path": "pkg/sentry/kernel/task_cgroup.go", "diff": "@@ -27,8 +27,6 @@ import (\n// EnterInitialCgroups moves t into an initial set of cgroups.\n//\n// Precondition: t isn't in any cgroups yet, t.cgs is empty.\n-//\n-// +checklocksignore parent.mu is conditionally acquired.\nfunc (t *Task) EnterInitialCgroups(parent *Task) {\nvar inherit map[Cgroup]struct{}\nif parent != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Drop unnecessary checklocksignore. PiperOrigin-RevId: 383940663
259,858
12.07.2021 09:59:40
25,200
1f396d8c16e179aa5b7b9ea1da6b16fb0b167016
Prevent the cleanup script from destroying any "bootstrap" containers.
[ { "change_type": "MODIFY", "old_path": ".buildkite/hooks/post-command", "new_path": ".buildkite/hooks/post-command", "diff": "@@ -56,8 +56,10 @@ if test \"${BUILDKITE_COMMAND_EXIT_STATUS}\" -ne \"0\"; then\nsudo rm -rf \"${HOME}/go\"\nfi\n-# Kill any running containers (clear state).\n-CONTAINERS=\"$(docker ps -q)\"\n-if ! test -z \"${CONTAINERS}\"; then\n- docker container kill ${CONTAINERS} 2>/dev/null || true\n+# Kill any running containers (clear state), except for \"bootstrap\".\n+for container in $(docker ps -q); do\n+ maybe_kill=\"$(docker inspect -f '{{if ne \"/bootstrap\" .Name}}true{{ end }}' \"${container}\")\"\n+ if test -n \"${maybe_kill}\"; then\n+ docker container kill \"${container}\"\nfi\n+done\n\\ No newline at end of file\n" } ]
Go
Apache License 2.0
google/gvisor
Prevent the cleanup script from destroying any "bootstrap" containers. PiperOrigin-RevId: 384257460
259,885
12.07.2021 12:47:08
25,200
9c09db654e3304ce57a2757b33c87e28df7153dc
Fix async-signal-unsafety in chroot test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -479,6 +479,7 @@ cc_binary(\n\"//test/util:cleanup\",\n\"//test/util:file_descriptor\",\n\"//test/util:fs_util\",\n+ \"@com_google_absl//absl/cleanup\",\n\"@com_google_absl//absl/strings\",\ngtest,\n\"//test/util:logging\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/chroot.cc", "new_path": "test/syscalls/linux/chroot.cc", "diff": "#include <syscall.h>\n#include <unistd.h>\n+#include <algorithm>\n#include <string>\n#include <vector>\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n+#include \"absl/cleanup/cleanup.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"test/util/capability_util.h\"\n-#include \"test/util/cleanup.h\"\n#include \"test/util/file_descriptor.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/logging.h\"\n@@ -46,13 +47,52 @@ namespace testing {\nnamespace {\n+// Async-signal-safe conversion from integer to string, appending the string\n+// (including a terminating NUL) to buf, which is a buffer of size len bytes.\n+// Returns the number of bytes written, or 0 if the buffer is too small.\n+//\n+// Preconditions: 2 <= radix <= 16.\n+template <typename T>\n+size_t SafeItoa(T val, char* buf, size_t len, int radix) {\n+ size_t n = 0;\n+#define _WRITE_OR_FAIL(c) \\\n+ do { \\\n+ if (len == 0) { \\\n+ return 0; \\\n+ } \\\n+ buf[n] = (c); \\\n+ n++; \\\n+ len--; \\\n+ } while (false)\n+ if (val == 0) {\n+ _WRITE_OR_FAIL('0');\n+ } else {\n+ // Write digits in reverse order, then reverse them at the end.\n+ bool neg = val < 0;\n+ while (val != 0) {\n+ // C/C++ define modulo such that the result is negative if exactly one of\n+ // the dividend or divisor is negative, so this handles both positive and\n+ // negative values.\n+ char c = \"fedcba9876543210123456789abcdef\"[val % radix + 15];\n+ _WRITE_OR_FAIL(c);\n+ val /= 10;\n+ }\n+ if (neg) {\n+ _WRITE_OR_FAIL('-');\n+ }\n+ std::reverse(buf, buf + n);\n+ }\n+ _WRITE_OR_FAIL('\\0');\n+ return n;\n+#undef _WRITE_OR_FAIL\n+}\n+\nTEST(ChrootTest, Success) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ auto temp_dir = TempPath::CreateDir().ValueOrDie();\n+ const std::string temp_dir_path = temp_dir.path();\n- const auto rest = [] {\n- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- TEST_CHECK_SUCCESS(chroot(temp_dir.path().c_str()));\n- };\n+ const auto rest = [&] { TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str())); };\nEXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n}\n@@ -101,28 +141,34 @@ TEST(ChrootTest, CreatesNewRoot) {\nSyscallSucceeds());\nauto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string new_root_path = new_root.path();\nauto file_in_new_root =\nASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(new_root.path()));\n+ const std::string file_in_new_root_path = file_in_new_root.path();\nconst auto rest = [&] {\n// chroot into new_root.\n- TEST_CHECK_SUCCESS(chroot(new_root.path().c_str()));\n+ TEST_CHECK_SUCCESS(chroot(new_root_path.c_str()));\n// getcwd should return \"(unreachable)\" followed by the initial_cwd.\n- char cwd[1024];\n- TEST_CHECK_SUCCESS(syscall(__NR_getcwd, cwd, sizeof(cwd)));\n- std::string expected_cwd = \"(unreachable)\";\n- expected_cwd += initial_cwd;\n- TEST_CHECK(strcmp(cwd, expected_cwd.c_str()) == 0);\n+ char buf[1024];\n+ TEST_CHECK_SUCCESS(syscall(__NR_getcwd, buf, sizeof(buf)));\n+ constexpr char kUnreachablePrefix[] = \"(unreachable)\";\n+ TEST_CHECK(\n+ strncmp(buf, kUnreachablePrefix, sizeof(kUnreachablePrefix) - 1) == 0);\n+ TEST_CHECK(strcmp(buf + sizeof(kUnreachablePrefix) - 1, initial_cwd) == 0);\n// Should not be able to stat file by its full path.\nstruct stat statbuf;\n- TEST_CHECK_ERRNO(stat(file_in_new_root.path().c_str(), &statbuf), ENOENT);\n+ TEST_CHECK_ERRNO(stat(file_in_new_root_path.c_str(), &statbuf), ENOENT);\n// Should be able to stat file at new rooted path.\n- auto basename = std::string(Basename(file_in_new_root.path()));\n- auto rootedFile = \"/\" + basename;\n- TEST_CHECK_SUCCESS(stat(rootedFile.c_str(), &statbuf));\n+ buf[0] = '/';\n+ absl::string_view basename = Basename(file_in_new_root_path);\n+ TEST_CHECK(basename.length() < (sizeof(buf) - 2));\n+ memcpy(buf + 1, basename.data(), basename.length());\n+ buf[basename.length() + 1] = '\\0';\n+ TEST_CHECK_SUCCESS(stat(buf, &statbuf));\n// Should be able to stat cwd at '.' even though it's outside root.\nTEST_CHECK_SUCCESS(stat(\".\", &statbuf));\n@@ -131,8 +177,8 @@ TEST(ChrootTest, CreatesNewRoot) {\nTEST_CHECK_SUCCESS(chdir(\"/\"));\n// getcwd should return \"/\".\n- TEST_CHECK_SUCCESS(syscall(__NR_getcwd, cwd, sizeof(cwd)));\n- TEST_CHECK_SUCCESS(strcmp(cwd, \"/\") == 0);\n+ TEST_CHECK_SUCCESS(syscall(__NR_getcwd, buf, sizeof(buf)));\n+ TEST_CHECK_SUCCESS(strcmp(buf, \"/\") == 0);\n// Statting '.', '..', '/', and '/..' all return the same dev and inode.\nstruct stat statbuf_dot;\n@@ -160,10 +206,11 @@ TEST(ChrootTest, DotDotFromOpenFD) {\nauto fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(dir_outside_root.path(), O_RDONLY | O_DIRECTORY));\nauto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string new_root_path = new_root.path();\nconst auto rest = [&] {\n// chroot into new_root.\n- TEST_CHECK_SUCCESS(chroot(new_root.path().c_str()));\n+ TEST_CHECK_SUCCESS(chroot(new_root_path.c_str()));\n// openat on fd with path .. will succeed.\nint other_fd;\n@@ -184,15 +231,18 @@ TEST(ChrootTest, ProcFdLinkResolutionInChroot) {\nconst TempPath file_outside_chroot =\nASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const std::string file_outside_chroot_path = file_outside_chroot.path();\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(file_outside_chroot.path(), O_RDONLY));\nconst FileDescriptor proc_fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(\"/proc\", O_DIRECTORY | O_RDONLY | O_CLOEXEC));\n- const auto rest = [&] {\nauto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- TEST_CHECK_SUCCESS(chroot(temp_dir.path().c_str()));\n+ const std::string temp_dir_path = temp_dir.path();\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str()));\n// Opening relative to an already open fd to a node outside the chroot\n// works.\n@@ -201,9 +251,10 @@ TEST(ChrootTest, ProcFdLinkResolutionInChroot) {\n// Proc fd symlinks can escape the chroot if the fd the symlink refers to\n// refers to an object outside the chroot.\n+ char fd_buf[11];\n+ TEST_CHECK(SafeItoa(fd.get(), fd_buf, sizeof(fd_buf), 10));\nstruct stat s = {};\n- TEST_CHECK_SUCCESS(\n- fstatat(proc_self_fd.get(), absl::StrCat(fd.get()).c_str(), &s, 0));\n+ TEST_CHECK_SUCCESS(fstatat(proc_self_fd.get(), fd_buf, &s, 0));\n// Try to stat the stdin fd. Internally, this is handled differently from a\n// proc fd entry pointing to a file, since stdin is backed by a host fd, and\n@@ -223,10 +274,12 @@ TEST(ChrootTest, ProcMemSelfFdsNoEscapeProcOpen) {\nconst FileDescriptor proc =\nASSERT_NO_ERRNO_AND_VALUE(Open(\"/proc\", O_RDONLY));\n- const auto rest = [&] {\n- // Create and enter a chroot directory.\nconst auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- TEST_CHECK_SUCCESS(chroot(temp_dir.path().c_str()));\n+ const std::string temp_dir_path = temp_dir.path();\n+\n+ const auto rest = [&] {\n+ // Enter the chroot directory.\n+ TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str()));\n// Open a file inside the chroot at /foo.\nconst FileDescriptor foo =\n@@ -234,11 +287,15 @@ TEST(ChrootTest, ProcMemSelfFdsNoEscapeProcOpen) {\n// Examine /proc/self/fd/{foo_fd} to see if it exposes the fact that we're\n// inside a chroot, the path should be /foo and NOT {chroot_dir}/foo.\n- const std::string fd_path = absl::StrCat(\"self/fd/\", foo.get());\n+ constexpr char kSelfFdRelpath[] = \"self/fd/\";\n+ char path_buf[20];\n+ strcpy(path_buf, kSelfFdRelpath); // NOLINT: need async-signal-safety\n+ TEST_CHECK(SafeItoa(foo.get(), path_buf + sizeof(kSelfFdRelpath) - 1,\n+ sizeof(path_buf) - (sizeof(kSelfFdRelpath) - 1), 10));\nchar buf[1024] = {};\nsize_t bytes_read = 0;\n- TEST_CHECK_SUCCESS(bytes_read = readlinkat(proc.get(), fd_path.c_str(), buf,\n- sizeof(buf) - 1));\n+ TEST_CHECK_SUCCESS(\n+ bytes_read = readlinkat(proc.get(), path_buf, buf, sizeof(buf) - 1));\n// The link should resolve to something.\nTEST_CHECK(bytes_read > 0);\n@@ -258,10 +315,12 @@ TEST(ChrootTest, ProcMemSelfMapsNoEscapeProcOpen) {\nconst FileDescriptor proc =\nASSERT_NO_ERRNO_AND_VALUE(Open(\"/proc\", O_RDONLY));\n- const auto rest = [&] {\n- // Create and enter a chroot directory.\nconst auto temp_dir = TEST_CHECK_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- TEST_CHECK_SUCCESS(chroot(temp_dir.path().c_str()));\n+ const std::string temp_dir_path = temp_dir.path();\n+\n+ const auto rest = [&] {\n+ // Enter the chroot directory.\n+ TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str()));\n// Open a file inside the chroot at /foo.\nconst FileDescriptor foo =\n@@ -272,9 +331,12 @@ TEST(ChrootTest, ProcMemSelfMapsNoEscapeProcOpen) {\nMAP_PRIVATE, foo.get(), 0);\nTEST_CHECK_SUCCESS(reinterpret_cast<int64_t>(foo_map));\n- // Always unmap.\n- auto cleanup_map =\n- Cleanup([&] { TEST_CHECK_SUCCESS(munmap(foo_map, kPageSize)); });\n+ // Always unmap. Since this function is called between fork() and execve(),\n+ // we can't use gvisor::testing::Cleanup, which uses std::function\n+ // and thus may heap-allocate (which is async-signal-unsafe); instead, use\n+ // absl::Cleanup, which is templated on the callback type.\n+ auto cleanup_map = absl::MakeCleanup(\n+ [&] { TEST_CHECK_SUCCESS(munmap(foo_map, kPageSize)); });\n// Examine /proc/self/maps to be sure that /foo doesn't appear to be\n// mapped with the full chroot path.\n@@ -289,8 +351,8 @@ TEST(ChrootTest, ProcMemSelfMapsNoEscapeProcOpen) {\nTEST_CHECK(bytes_read > 0);\n// Finally we want to make sure the maps don't contain the chroot path\n- TEST_CHECK(std::string(buf, bytes_read).find(temp_dir.path()) ==\n- std::string::npos);\n+ TEST_CHECK(\n+ !absl::StrContains(absl::string_view(buf, bytes_read), temp_dir_path));\n};\nEXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n}\n@@ -302,72 +364,72 @@ TEST(ChrootTest, ProcMountsMountinfoNoEscape) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n// Create nested tmpfs mounts.\n- auto const outer_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- auto const outer_mount = ASSERT_NO_ERRNO_AND_VALUE(\n- Mount(\"none\", outer_dir.path(), \"tmpfs\", 0, \"mode=0700\", 0));\n+ const auto outer_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string outer_dir_path = outer_dir.path();\n+ const auto outer_mount = ASSERT_NO_ERRNO_AND_VALUE(\n+ Mount(\"none\", outer_dir_path, \"tmpfs\", 0, \"mode=0700\", 0));\n+\n+ const auto inner_dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(outer_dir_path));\n+ const std::string inner_dir_path = inner_dir.path();\n+ const auto inner_mount = ASSERT_NO_ERRNO_AND_VALUE(\n+ Mount(\"none\", inner_dir_path, \"tmpfs\", 0, \"mode=0700\", 0));\n+ const std::string inner_dir_in_outer_chroot_path =\n+ absl::StrCat(\"/\", Basename(inner_dir_path));\n- auto const inner_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(outer_dir.path()));\n- auto const inner_mount = ASSERT_NO_ERRNO_AND_VALUE(\n- Mount(\"none\", inner_dir.path(), \"tmpfs\", 0, \"mode=0700\", 0));\n-\n- const auto rest = [&outer_dir, &inner_dir] {\n// Filenames that will be checked for mounts, all relative to /proc dir.\nstd::string paths[3] = {\"mounts\", \"self/mounts\", \"self/mountinfo\"};\nfor (const std::string& path : paths) {\n// We should have both inner and outer mounts.\nconst std::string contents =\n- TEST_CHECK_NO_ERRNO_AND_VALUE(GetContents(JoinPath(\"/proc\", path)));\n- EXPECT_THAT(contents, AllOf(HasSubstr(outer_dir.path()),\n- HasSubstr(inner_dir.path())));\n+ ASSERT_NO_ERRNO_AND_VALUE(GetContents(JoinPath(\"/proc\", path)));\n+ EXPECT_THAT(contents,\n+ AllOf(HasSubstr(outer_dir_path), HasSubstr(inner_dir_path)));\n// We better have at least two mounts: the mounts we created plus the\n// root.\nstd::vector<absl::string_view> submounts =\nabsl::StrSplit(contents, '\\n', absl::SkipWhitespace());\n- TEST_CHECK(submounts.size() > 2);\n+ ASSERT_GT(submounts.size(), 2);\n}\n// Get a FD to /proc before we enter the chroot.\nconst FileDescriptor proc =\n- TEST_CHECK_NO_ERRNO_AND_VALUE(Open(\"/proc\", O_RDONLY));\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(\"/proc\", O_RDONLY));\n+ const auto rest = [&] {\n// Chroot to outer mount.\n- TEST_CHECK_SUCCESS(chroot(outer_dir.path().c_str()));\n+ TEST_CHECK_SUCCESS(chroot(outer_dir_path.c_str()));\n+ char buf[8 * 1024];\nfor (const std::string& path : paths) {\nconst FileDescriptor proc_file =\nTEST_CHECK_NO_ERRNO_AND_VALUE(OpenAt(proc.get(), path, O_RDONLY));\n// Only two mounts visible from this chroot: the inner and outer. Both\n// paths should be relative to the new chroot.\n- const std::string contents =\n- TEST_CHECK_NO_ERRNO_AND_VALUE(GetContentsFD(proc_file.get()));\n- EXPECT_THAT(contents,\n- AllOf(HasSubstr(absl::StrCat(Basename(inner_dir.path()))),\n- Not(HasSubstr(outer_dir.path())),\n- Not(HasSubstr(inner_dir.path()))));\n- std::vector<absl::string_view> submounts =\n- absl::StrSplit(contents, '\\n', absl::SkipWhitespace());\n- TEST_CHECK(submounts.size() == 2);\n+ ssize_t n = ReadFd(proc_file.get(), buf, sizeof(buf));\n+ TEST_PCHECK(n >= 0);\n+ buf[n] = '\\0';\n+ TEST_CHECK(absl::StrContains(buf, Basename(inner_dir_path)));\n+ TEST_CHECK(!absl::StrContains(buf, outer_dir_path));\n+ TEST_CHECK(!absl::StrContains(buf, inner_dir_path));\n+ TEST_CHECK(std::count(buf, buf + n, '\\n') == 2);\n}\n// Chroot to inner mount. We must use an absolute path accessible to our\n// chroot.\n- const std::string inner_dir_basename =\n- absl::StrCat(\"/\", Basename(inner_dir.path()));\n- TEST_CHECK_SUCCESS(chroot(inner_dir_basename.c_str()));\n+ TEST_CHECK_SUCCESS(chroot(inner_dir_in_outer_chroot_path.c_str()));\nfor (const std::string& path : paths) {\nconst FileDescriptor proc_file =\nTEST_CHECK_NO_ERRNO_AND_VALUE(OpenAt(proc.get(), path, O_RDONLY));\n- const std::string contents =\n- TEST_CHECK_NO_ERRNO_AND_VALUE(GetContentsFD(proc_file.get()));\n// Only the inner mount visible from this chroot.\n- std::vector<absl::string_view> submounts =\n- absl::StrSplit(contents, '\\n', absl::SkipWhitespace());\n- TEST_CHECK(submounts.size() == 1);\n+ ssize_t n = ReadFd(proc_file.get(), buf, sizeof(buf));\n+ TEST_PCHECK(n >= 0);\n+ buf[n] = '\\0';\n+ TEST_CHECK(std::count(buf, buf + n, '\\n') == 1);\n}\n};\nEXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n" } ]
Go
Apache License 2.0
google/gvisor
Fix async-signal-unsafety in chroot test. PiperOrigin-RevId: 384295543
259,853
12.07.2021 13:31:34
25,200
ebe99977a47d93ee769121f9463650cfb924e243
Mark all functions that are called from a forked child with go:norace
[ { "change_type": "MODIFY", "old_path": "pkg/seccomp/seccomp_unsafe.go", "new_path": "pkg/seccomp/seccomp_unsafe.go", "diff": "@@ -68,6 +68,10 @@ func SetFilter(instrs []linux.BPFInstruction) error {\n// - Since fork()ed child processes cannot perform heap allocation, it returns\n// a unix.Errno rather than an error.\n//\n+// - The race instrumentation has to be disabled for all functions that are\n+// called in a forked child.\n+//\n+//go:norace\n//go:nosplit\nfunc SetFilterInChild(instrs []linux.BPFInstruction) unix.Errno {\nif _, _, errno := unix.RawSyscall6(unix.SYS_PRCTL, linux.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0, 0); errno != 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess_amd64.go", "new_path": "pkg/sentry/platform/ptrace/subprocess_amd64.go", "diff": "@@ -176,6 +176,7 @@ func patchSignalInfo(regs *arch.Registers, signalInfo *linux.SignalInfo) {\n//\n// This is safe to call in an afterFork context.\n//\n+//go:norace\n//go:nosplit\nfunc enableCpuidFault() {\nunix.RawSyscall6(unix.SYS_ARCH_PRCTL, linux.ARCH_SET_CPUID, 0, 0, 0, 0, 0)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess_linux.go", "new_path": "pkg/sentry/platform/ptrace/subprocess_linux.go", "diff": "@@ -120,6 +120,17 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro\nreturn nil, err\n}\n+ return forkStub(flags, instrs)\n+}\n+\n+// In the child, this function must not acquire any locks, because they might\n+// have been locked at the time of the fork. This means no rescheduling, no\n+// malloc calls, and no new stack segments. For the same reason compiler does\n+// not race instrument it.\n+//\n+//\n+//go:norace\n+func forkStub(flags uintptr, instrs []linux.BPFInstruction) (*thread, error) {\n// Declare all variables up front in order to ensure that there's no\n// need for allocations between beforeFork & afterFork.\nvar (\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go", "new_path": "pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go", "diff": "@@ -26,6 +26,7 @@ import (\n// unmaskAllSignals unmasks all signals on the current thread.\n//\n+//go:norace\n//go:nosplit\nfunc unmaskAllSignals() unix.Errno {\nvar set linux.SignalSet\n" } ]
Go
Apache License 2.0
google/gvisor
Mark all functions that are called from a forked child with go:norace PiperOrigin-RevId: 384305599
259,858
12.07.2021 16:58:30
25,200
275932bf0852431b6f307ba9c45f26073d20ac62
Drop dedicated benchmark lifecycle. Instead, roll the output scraping into the main runner. Pass a perf flag to the runner in order to control leak checking, apply tags via the macro and appropriately disable logging. This may be removed in the future.
[ { "change_type": "MODIFY", "old_path": "test/util/test_util_impl.cc", "new_path": "test/util/test_util_impl.cc", "diff": "#include \"benchmark/benchmark.h\"\n#include \"test/util/logging.h\"\n+extern bool FLAGS_gtest_list_tests;\nextern bool FLAGS_benchmark_list_tests;\nextern std::string FLAGS_benchmark_filter;\n@@ -40,12 +41,18 @@ void TestInit(int* argc, char*** argv) {\n}\nint RunAllTests() {\n- if (FLAGS_benchmark_list_tests || FLAGS_benchmark_filter != \".\") {\n+ if (::testing::FLAGS_gtest_list_tests) {\n+ return RUN_ALL_TESTS();\n+ }\n+ if (FLAGS_benchmark_list_tests) {\nbenchmark::RunSpecifiedBenchmarks();\nreturn 0;\n- } else {\n- return RUN_ALL_TESTS();\n}\n+\n+ // Run selected tests & benchmarks.\n+ int rc = RUN_ALL_TESTS();\n+ benchmark::RunSpecifiedBenchmarks();\n+ return rc;\n}\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Drop dedicated benchmark lifecycle. Instead, roll the output scraping into the main runner. Pass a perf flag to the runner in order to control leak checking, apply tags via the macro and appropriately disable logging. This may be removed in the future. PiperOrigin-RevId: 384348035
259,891
12.07.2021 22:34:44
25,200
e35d20f79c4604c41a3b912b41aae322adf96bc7
netstack: move SO_SNDBUF/RCVBUF clamping logic out of //pkg/tcpip Keeps Linux-specific behavior out of //pkg/tcpip Makes it clearer that clamping is done only for setsockopt calls from users Removes code duplication
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -1682,6 +1682,26 @@ func SetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, level int\nreturn nil\n}\n+func clampBufSize(newSz, min, max int64) int64 {\n+ // packetOverheadFactor is used to multiply the value provided by the user on\n+ // a setsockopt(2) for setting the send/receive buffer sizes sockets.\n+ const packetOverheadFactor = 2\n+\n+ if newSz > max {\n+ newSz = max\n+ }\n+\n+ if newSz < math.MaxInt32/packetOverheadFactor {\n+ newSz *= packetOverheadFactor\n+ if newSz < min {\n+ newSz = min\n+ }\n+ } else {\n+ newSz = math.MaxInt32\n+ }\n+ return newSz\n+}\n+\n// setSockOptSocket implements SetSockOpt when level is SOL_SOCKET.\nfunc setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\nswitch name {\n@@ -1691,7 +1711,9 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\n}\nv := hostarch.ByteOrder.Uint32(optVal)\n- ep.SocketOptions().SetSendBufferSize(int64(v), true /* notify */)\n+ min, max := ep.SocketOptions().SendBufferLimits()\n+ clamped := clampBufSize(int64(v), min, max)\n+ ep.SocketOptions().SetSendBufferSize(clamped, true /* notify */)\nreturn nil\ncase linux.SO_RCVBUF:\n@@ -1700,7 +1722,9 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\n}\nv := hostarch.ByteOrder.Uint32(optVal)\n- ep.SocketOptions().SetReceiveBufferSize(int64(v), true /* notify */)\n+ min, max := ep.SocketOptions().ReceiveBufferLimits()\n+ clamped := clampBufSize(int64(v), min, max)\n+ ep.SocketOptions().SetReceiveBufferSize(clamped, true /* notify */)\nreturn nil\ncase linux.SO_REUSEADDR:\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/socketops.go", "new_path": "pkg/tcpip/socketops.go", "diff": "package tcpip\nimport (\n- \"math\"\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/atomicbitops\"\n\"gvisor.dev/gvisor/pkg/sync\"\n)\n-// PacketOverheadFactor is used to multiply the value provided by the user on a\n-// SetSockOpt for setting the send/receive buffer sizes sockets.\n-const PacketOverheadFactor = 2\n-\n// SocketOptionsHandler holds methods that help define endpoint specific\n// behavior for socket level socket options. These must be implemented by\n// endpoints to get notified when socket level options are set.\n@@ -617,39 +612,20 @@ func (so *SocketOptions) GetSendBufferSize() int64 {\nreturn so.sendBufferSize.Load()\n}\n+// SendBufferLimits returns the [min, max) range of allowable send buffer\n+// sizes.\n+func (so *SocketOptions) SendBufferLimits() (min, max int64) {\n+ limits := so.getSendBufferLimits(so.stackHandler)\n+ return int64(limits.Min), int64(limits.Max)\n+}\n+\n// SetSendBufferSize sets value for SO_SNDBUF option. notify indicates if the\n// stack handler should be invoked to set the send buffer size.\nfunc (so *SocketOptions) SetSendBufferSize(sendBufferSize int64, notify bool) {\n- v := sendBufferSize\n-\n- if !notify {\n- so.sendBufferSize.Store(v)\n- return\n- }\n-\n- // Make sure the send buffer size is within the min and max\n- // allowed.\n- ss := so.getSendBufferLimits(so.stackHandler)\n- min := int64(ss.Min)\n- max := int64(ss.Max)\n- // Validate the send buffer size with min and max values.\n- // Multiply it by factor of 2.\n- if v > max {\n- v = max\n- }\n-\n- if v < math.MaxInt32/PacketOverheadFactor {\n- v *= PacketOverheadFactor\n- if v < min {\n- v = min\n+ if notify {\n+ sendBufferSize = so.handler.OnSetSendBufferSize(sendBufferSize)\n}\n- } else {\n- v = math.MaxInt32\n- }\n-\n- // Notify endpoint about change in buffer size.\n- newSz := so.handler.OnSetSendBufferSize(v)\n- so.sendBufferSize.Store(newSz)\n+ so.sendBufferSize.Store(sendBufferSize)\n}\n// GetReceiveBufferSize gets value for SO_RCVBUF option.\n@@ -657,36 +633,19 @@ func (so *SocketOptions) GetReceiveBufferSize() int64 {\nreturn so.receiveBufferSize.Load()\n}\n-// SetReceiveBufferSize sets value for SO_RCVBUF option.\n-func (so *SocketOptions) SetReceiveBufferSize(receiveBufferSize int64, notify bool) {\n- if !notify {\n- so.receiveBufferSize.Store(receiveBufferSize)\n- return\n- }\n-\n- // Make sure the send buffer size is within the min and max\n- // allowed.\n- v := receiveBufferSize\n- ss := so.getReceiveBufferLimits(so.stackHandler)\n- min := int64(ss.Min)\n- max := int64(ss.Max)\n- // Validate the send buffer size with min and max values.\n- if v > max {\n- v = max\n- }\n-\n- // Multiply it by factor of 2.\n- if v < math.MaxInt32/PacketOverheadFactor {\n- v *= PacketOverheadFactor\n- if v < min {\n- v = min\n- }\n- } else {\n- v = math.MaxInt32\n+// ReceiveBufferLimits returns the [min, max) range of allowable receive buffer\n+// sizes.\n+func (so *SocketOptions) ReceiveBufferLimits() (min, max int64) {\n+ limits := so.getReceiveBufferLimits(so.stackHandler)\n+ return int64(limits.Min), int64(limits.Max)\n}\n+// SetReceiveBufferSize sets the value of the SO_RCVBUF option, optionally\n+// notifying the owning endpoint.\n+func (so *SocketOptions) SetReceiveBufferSize(receiveBufferSize int64, notify bool) {\n+ if notify {\noldSz := so.receiveBufferSize.Load()\n- // Notify endpoint about change in buffer size.\n- newSz := so.handler.OnSetReceiveBufferSize(v, oldSz)\n- so.receiveBufferSize.Store(newSz)\n+ receiveBufferSize = so.handler.OnSetReceiveBufferSize(receiveBufferSize, oldSz)\n+ }\n+ so.receiveBufferSize.Store(receiveBufferSize)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/raw/endpoint.go", "new_path": "pkg/tcpip/transport/raw/endpoint.go", "diff": "@@ -132,7 +132,7 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProt\n// headers included. Because they're write-only, We don't need to\n// register with the stack.\nif !associated {\n- e.ops.SetReceiveBufferSize(0, false)\n+ e.ops.SetReceiveBufferSize(0, false /* notify */)\ne.waiterQueue = nil\nreturn e, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -2147,7 +2147,7 @@ func TestSmallSegReceiveWindowAdvertisement(t *testing.T) {\n// Bump up the receive buffer size such that, when the receive window grows,\n// the scaled window exceeds maxUint16.\n- c.EP.SocketOptions().SetReceiveBufferSize(int64(opt.Max), true)\n+ c.EP.SocketOptions().SetReceiveBufferSize(int64(opt.Max)*2, true /* notify */)\n// Keep the payload size < segment overhead and such that it is a multiple\n// of the window scaled value. This enables the test to perform equality\n@@ -2267,7 +2267,7 @@ func TestNoWindowShrinking(t *testing.T) {\ninitialWnd := header.TCP(header.IPv4(pkt).Payload()).WindowSize() << c.RcvdWindowScale\ninitialLastAcceptableSeq := iss.Add(seqnum.Size(initialWnd))\n// Now shrink the receive buffer to half its original size.\n- c.EP.SocketOptions().SetReceiveBufferSize(int64(rcvBufSize/2), true)\n+ c.EP.SocketOptions().SetReceiveBufferSize(int64(rcvBufSize), true /* notify */)\ndata := generateRandomPayload(t, rcvBufSize)\n// Send a payload of half the size of rcvBufSize.\n@@ -2523,7 +2523,7 @@ func TestScaledWindowAccept(t *testing.T) {\ndefer ep.Close()\n// Set the window size greater than the maximum non-scaled window.\n- ep.SocketOptions().SetReceiveBufferSize(65535*3, true)\n+ ep.SocketOptions().SetReceiveBufferSize(65535*6, true /* notify */)\nif err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {\nt.Fatalf(\"Bind failed: %s\", err)\n@@ -2595,7 +2595,7 @@ func TestNonScaledWindowAccept(t *testing.T) {\ndefer ep.Close()\n// Set the window size greater than the maximum non-scaled window.\n- ep.SocketOptions().SetReceiveBufferSize(65535*3, true)\n+ ep.SocketOptions().SetReceiveBufferSize(65535*6, true /* notify */)\nif err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {\nt.Fatalf(\"Bind failed: %s\", err)\n@@ -3188,7 +3188,7 @@ func TestPassiveSendMSSLessThanMTU(t *testing.T) {\n// Set the buffer size to a deterministic size so that we can check the\n// window scaling option.\nconst rcvBufferSize = 0x20000\n- ep.SocketOptions().SetReceiveBufferSize(rcvBufferSize, true)\n+ ep.SocketOptions().SetReceiveBufferSize(rcvBufferSize*2, true /* notify */)\nif err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {\nt.Fatalf(\"Bind failed: %s\", err)\n@@ -3327,7 +3327,7 @@ func TestSynOptionsOnActiveConnect(t *testing.T) {\n// window scaling option.\nconst rcvBufferSize = 0x20000\nconst wndScale = 3\n- c.EP.SocketOptions().SetReceiveBufferSize(rcvBufferSize, true)\n+ c.EP.SocketOptions().SetReceiveBufferSize(rcvBufferSize*2, true /* notify */)\n// Start connection attempt.\nwe, ch := waiter.NewChannelEntry(nil)\n@@ -4669,52 +4669,6 @@ func TestDefaultBufferSizes(t *testing.T) {\ncheckRecvBufferSize(t, ep, tcp.DefaultReceiveBufferSize*3)\n}\n-func TestMinMaxBufferSizes(t *testing.T) {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},\n- })\n-\n- // Check the default values.\n- ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})\n- if err != nil {\n- t.Fatalf(\"NewEndpoint failed; %s\", err)\n- }\n- defer ep.Close()\n-\n- // Change the min/max values for send/receive\n- {\n- opt := tcpip.TCPReceiveBufferSizeRangeOption{Min: 200, Default: tcp.DefaultReceiveBufferSize * 2, Max: tcp.DefaultReceiveBufferSize * 20}\n- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {\n- t.Fatalf(\"SetTransportProtocolOption(%d, &%#v): %s\", tcp.ProtocolNumber, opt, err)\n- }\n- }\n-\n- {\n- opt := tcpip.TCPSendBufferSizeRangeOption{Min: 300, Default: tcp.DefaultSendBufferSize * 3, Max: tcp.DefaultSendBufferSize * 30}\n- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {\n- t.Fatalf(\"SetTransportProtocolOption(%d, &%#v): %s\", tcp.ProtocolNumber, opt, err)\n- }\n- }\n-\n- // Set values below the min/2.\n- ep.SocketOptions().SetReceiveBufferSize(99, true)\n- checkRecvBufferSize(t, ep, 200)\n-\n- ep.SocketOptions().SetSendBufferSize(149, true)\n-\n- checkSendBufferSize(t, ep, 300)\n-\n- // Set values above the max.\n- ep.SocketOptions().SetReceiveBufferSize(1+tcp.DefaultReceiveBufferSize*20, true)\n- // Values above max are capped at max and then doubled.\n- checkRecvBufferSize(t, ep, tcp.DefaultReceiveBufferSize*20*2)\n-\n- ep.SocketOptions().SetSendBufferSize(1+tcp.DefaultSendBufferSize*30, true)\n- // Values above max are capped at max and then doubled.\n- checkSendBufferSize(t, ep, tcp.DefaultSendBufferSize*30*2)\n-}\n-\nfunc TestBindToDeviceOption(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n@@ -7752,7 +7706,7 @@ func TestIncreaseWindowOnBufferResize(t *testing.T) {\n// Increasing the buffer from should generate an ACK,\n// since window grew from small value to larger equal MSS\n- c.EP.SocketOptions().SetReceiveBufferSize(rcvBuf*2, true)\n+ c.EP.SocketOptions().SetReceiveBufferSize(rcvBuf*4, true /* notify */)\nchecker.IPv4(t, c.GetPacket(),\nchecker.PayloadLen(header.TCPMinimumSize),\nchecker.TCP(\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/testing/context/context.go", "new_path": "pkg/tcpip/transport/tcp/testing/context/context.go", "diff": "@@ -757,7 +757,7 @@ func (c *Context) Create(epRcvBuf int) {\n}\nif epRcvBuf != -1 {\n- c.EP.SocketOptions().SetReceiveBufferSize(int64(epRcvBuf), true /* notify */)\n+ c.EP.SocketOptions().SetReceiveBufferSize(int64(epRcvBuf)*2, true /* notify */)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
netstack: move SO_SNDBUF/RCVBUF clamping logic out of //pkg/tcpip - Keeps Linux-specific behavior out of //pkg/tcpip - Makes it clearer that clamping is done only for setsockopt calls from users - Removes code duplication PiperOrigin-RevId: 384389809
260,004
13.07.2021 10:19:29
25,200
b4caeaf78f533037d191b61fc83919a3ecd379d1
Deflake TestRouterSolicitation Before this change, transmission of the first router solicitation races with the adding of an IPv6 link-local address. This change creates the NIC in the disabled state and is only enabled after the address is added (if required) to avoid this race.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -5356,8 +5356,9 @@ func TestRouterSolicitation(t *testing.T) {\nRandSource: &randSource,\n})\n- if err := s.CreateNIC(nicID, &e); err != nil {\n- t.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n+ opts := stack.NICOptions{Disabled: true}\n+ if err := s.CreateNICWithOptions(nicID, &e, opts); err != nil {\n+ t.Fatalf(\"CreateNICWithOptions(%d, _, %#v) = %s\", nicID, opts, err)\n}\nif addr := test.nicAddr; addr != \"\" {\n@@ -5366,6 +5367,10 @@ func TestRouterSolicitation(t *testing.T) {\n}\n}\n+ if err := s.EnableNIC(nicID); err != nil {\n+ t.Fatalf(\"EnableNIC(%d): %s\", nicID, err)\n+ }\n+\n// Make sure each RS is sent at the right time.\nremaining := test.maxRtrSolicit\nif remaining != 0 {\n" } ]
Go
Apache License 2.0
google/gvisor
Deflake TestRouterSolicitation Before this change, transmission of the first router solicitation races with the adding of an IPv6 link-local address. This change creates the NIC in the disabled state and is only enabled after the address is added (if required) to avoid this race. PiperOrigin-RevId: 384493553
259,891
13.07.2021 11:18:15
25,200
1fe6db8c542431d3d6e229f563fefbd2f962fc81
netstack: atomically update buffer sizes Previously, two calls to set the send or receive buffer size could have raced and left state wherein: The actual size depended on one call The value returned by getsockopt() depended on the other
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/socketops.go", "new_path": "pkg/tcpip/socketops.go", "diff": "@@ -17,7 +17,6 @@ package tcpip\nimport (\n\"sync/atomic\"\n- \"gvisor.dev/gvisor/pkg/atomicbitops\"\n\"gvisor.dev/gvisor/pkg/sync\"\n)\n@@ -208,16 +207,24 @@ type SocketOptions struct {\n// will not change.\ngetSendBufferLimits GetSendBufferLimits `state:\"manual\"`\n+ // sendBufSizeMu protects sendBufferSize and calls to\n+ // handler.OnSetSendBufferSize.\n+ sendBufSizeMu sync.Mutex `state:\"nosave\"`\n+\n// sendBufferSize determines the send buffer size for this socket.\n- sendBufferSize atomicbitops.AlignedAtomicInt64\n+ sendBufferSize int64\n// getReceiveBufferLimits provides the handler to get the min, default and\n// max size for receive buffer. It is initialized at the creation time and\n// will not change.\ngetReceiveBufferLimits GetReceiveBufferLimits `state:\"manual\"`\n+ // receiveBufSizeMu protects receiveBufferSize and calls to\n+ // handler.OnSetReceiveBufferSize.\n+ receiveBufSizeMu sync.Mutex `state:\"nosave\"`\n+\n// receiveBufferSize determines the receive buffer size for this socket.\n- receiveBufferSize atomicbitops.AlignedAtomicInt64\n+ receiveBufferSize int64\n// mu protects the access to the below fields.\nmu sync.Mutex `state:\"nosave\"`\n@@ -607,11 +614,6 @@ func (so *SocketOptions) SetBindToDevice(bindToDevice int32) Error {\nreturn nil\n}\n-// GetSendBufferSize gets value for SO_SNDBUF option.\n-func (so *SocketOptions) GetSendBufferSize() int64 {\n- return so.sendBufferSize.Load()\n-}\n-\n// SendBufferLimits returns the [min, max) range of allowable send buffer\n// sizes.\nfunc (so *SocketOptions) SendBufferLimits() (min, max int64) {\n@@ -619,18 +621,22 @@ func (so *SocketOptions) SendBufferLimits() (min, max int64) {\nreturn int64(limits.Min), int64(limits.Max)\n}\n+// GetSendBufferSize gets value for SO_SNDBUF option.\n+func (so *SocketOptions) GetSendBufferSize() int64 {\n+ so.sendBufSizeMu.Lock()\n+ defer so.sendBufSizeMu.Unlock()\n+ return so.sendBufferSize\n+}\n+\n// SetSendBufferSize sets value for SO_SNDBUF option. notify indicates if the\n// stack handler should be invoked to set the send buffer size.\nfunc (so *SocketOptions) SetSendBufferSize(sendBufferSize int64, notify bool) {\n+ so.sendBufSizeMu.Lock()\n+ defer so.sendBufSizeMu.Unlock()\nif notify {\nsendBufferSize = so.handler.OnSetSendBufferSize(sendBufferSize)\n}\n- so.sendBufferSize.Store(sendBufferSize)\n-}\n-\n-// GetReceiveBufferSize gets value for SO_RCVBUF option.\n-func (so *SocketOptions) GetReceiveBufferSize() int64 {\n- return so.receiveBufferSize.Load()\n+ so.sendBufferSize = sendBufferSize\n}\n// ReceiveBufferLimits returns the [min, max) range of allowable receive buffer\n@@ -640,12 +646,20 @@ func (so *SocketOptions) ReceiveBufferLimits() (min, max int64) {\nreturn int64(limits.Min), int64(limits.Max)\n}\n+// GetReceiveBufferSize gets value for SO_RCVBUF option.\n+func (so *SocketOptions) GetReceiveBufferSize() int64 {\n+ so.receiveBufSizeMu.Lock()\n+ defer so.receiveBufSizeMu.Unlock()\n+ return so.receiveBufferSize\n+}\n+\n// SetReceiveBufferSize sets the value of the SO_RCVBUF option, optionally\n// notifying the owning endpoint.\nfunc (so *SocketOptions) SetReceiveBufferSize(receiveBufferSize int64, notify bool) {\n+ so.receiveBufSizeMu.Lock()\n+ defer so.receiveBufSizeMu.Unlock()\nif notify {\n- oldSz := so.receiveBufferSize.Load()\n- receiveBufferSize = so.handler.OnSetReceiveBufferSize(receiveBufferSize, oldSz)\n+ receiveBufferSize = so.handler.OnSetReceiveBufferSize(receiveBufferSize, so.receiveBufferSize)\n}\n- so.receiveBufferSize.Store(receiveBufferSize)\n+ so.receiveBufferSize = receiveBufferSize\n}\n" } ]
Go
Apache License 2.0
google/gvisor
netstack: atomically update buffer sizes Previously, two calls to set the send or receive buffer size could have raced and left state wherein: - The actual size depended on one call - The value returned by getsockopt() depended on the other PiperOrigin-RevId: 384508720
259,992
13.07.2021 11:33:49
25,200
c16e69a9d5ec3422b648a6d32842442925285a29
Use consistent naming for subcontainers It was confusing to find functions relating to root and non-root containers. Replace "non-root" and "subcontainer" and make naming consistent in Sandbox and controller.
[ { "change_type": "MODIFY", "old_path": "pkg/shim/proc/exec.go", "new_path": "pkg/shim/proc/exec.go", "diff": "@@ -113,7 +113,7 @@ func (e *execProcess) Delete(ctx context.Context) error {\nreturn e.execState.Delete(ctx)\n}\n-func (e *execProcess) delete() error {\n+func (e *execProcess) delete() {\ne.wg.Wait()\nif e.io != nil {\nfor _, c := range e.closers {\n@@ -121,7 +121,6 @@ func (e *execProcess) delete() error {\n}\ne.io.Close()\n}\n- return nil\n}\nfunc (e *execProcess) Resize(ws console.WinSize) error {\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/proc/exec_state.go", "new_path": "pkg/shim/proc/exec_state.go", "diff": "@@ -64,9 +64,7 @@ func (s *execCreatedState) Start(ctx context.Context) error {\n}\nfunc (s *execCreatedState) Delete(context.Context) error {\n- if err := s.p.delete(); err != nil {\n- return err\n- }\n+ s.p.delete()\ns.transition(deleted)\nreturn nil\n}\n@@ -144,9 +142,7 @@ func (s *execStoppedState) Start(context.Context) error {\n}\nfunc (s *execStoppedState) Delete(context.Context) error {\n- if err := s.p.delete(); err != nil {\n- return err\n- }\n+ s.p.delete()\ns.transition(deleted)\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -41,80 +41,74 @@ import (\n)\nconst (\n- // ContainerCheckpoint checkpoints a container.\n- ContainerCheckpoint = \"containerManager.Checkpoint\"\n+ // ContMgrCheckpoint checkpoints a container.\n+ ContMgrCheckpoint = \"containerManager.Checkpoint\"\n- // ContainerCreate creates a container.\n- ContainerCreate = \"containerManager.Create\"\n+ // ContMgrCreateSubcontainer creates a sub-container.\n+ ContMgrCreateSubcontainer = \"containerManager.CreateSubcontainer\"\n- // ContainerDestroy is used to stop a non-root container and free all\n+ // ContMgrDestroySubcontainer is used to stop a sub-container and free all\n// associated resources in the sandbox.\n- ContainerDestroy = \"containerManager.Destroy\"\n+ ContMgrDestroySubcontainer = \"containerManager.DestroySubcontainer\"\n- // ContainerEvent is the URPC endpoint for getting stats about the\n- // container used by \"runsc events\".\n- ContainerEvent = \"containerManager.Event\"\n+ // ContMgrEvent gets stats about the container used by \"runsc events\".\n+ ContMgrEvent = \"containerManager.Event\"\n- // ContainerExecuteAsync is the URPC endpoint for executing a command in a\n- // container.\n- ContainerExecuteAsync = \"containerManager.ExecuteAsync\"\n+ // ContMgrExecuteAsync executes a command in a container.\n+ ContMgrExecuteAsync = \"containerManager.ExecuteAsync\"\n- // ContainerPause pauses the container.\n- ContainerPause = \"containerManager.Pause\"\n+ // ContMgrPause pauses the sandbox (note that individual containers cannot be\n+ // paused).\n+ ContMgrPause = \"containerManager.Pause\"\n- // ContainerProcesses is the URPC endpoint for getting the list of\n- // processes running in a container.\n- ContainerProcesses = \"containerManager.Processes\"\n+ // ContMgrProcesses lists processes running in a container.\n+ ContMgrProcesses = \"containerManager.Processes\"\n- // ContainerRestore restores a container from a statefile.\n- ContainerRestore = \"containerManager.Restore\"\n+ // ContMgrRestore restores a container from a statefile.\n+ ContMgrRestore = \"containerManager.Restore\"\n- // ContainerResume unpauses the paused container.\n- ContainerResume = \"containerManager.Resume\"\n+ // ContMgrResume unpauses the paused sandbox (note that individual containers\n+ // cannot be resumed).\n+ ContMgrResume = \"containerManager.Resume\"\n- // ContainerSignal is used to send a signal to a container.\n- ContainerSignal = \"containerManager.Signal\"\n+ // ContMgrSignal sends a signal to a container.\n+ ContMgrSignal = \"containerManager.Signal\"\n- // ContainerSignalProcess is used to send a signal to a particular\n- // process in a container.\n- ContainerSignalProcess = \"containerManager.SignalProcess\"\n+ // ContMgrStartSubcontainer starts a sub-container inside a running sandbox.\n+ ContMgrStartSubcontainer = \"containerManager.StartSubcontainer\"\n- // ContainerStart is the URPC endpoint for running a non-root container\n- // within a sandbox.\n- ContainerStart = \"containerManager.Start\"\n+ // ContMgrWait waits on the init process of the container and returns its\n+ // ExitStatus.\n+ ContMgrWait = \"containerManager.Wait\"\n- // ContainerWait is used to wait on the init process of the container\n- // and return its ExitStatus.\n- ContainerWait = \"containerManager.Wait\"\n+ // ContMgrWaitPID waits on a process with a certain PID in the sandbox and\n+ // return its ExitStatus.\n+ ContMgrWaitPID = \"containerManager.WaitPID\"\n- // ContainerWaitPID is used to wait on a process with a certain PID in\n- // the sandbox and return its ExitStatus.\n- ContainerWaitPID = \"containerManager.WaitPID\"\n+ // ContMgrRootContainerStart starts a new sandbox with a root container.\n+ ContMgrRootContainerStart = \"containerManager.StartRoot\"\n+)\n- // NetworkCreateLinksAndRoutes is the URPC endpoint for creating links\n- // and routes in a network stack.\n+const (\n+ // NetworkCreateLinksAndRoutes creates links and routes in a network stack.\nNetworkCreateLinksAndRoutes = \"Network.CreateLinksAndRoutes\"\n- // RootContainerStart is the URPC endpoint for starting a new sandbox\n- // with root container.\n- RootContainerStart = \"containerManager.StartRoot\"\n-\n- // SandboxStacks collects sandbox stacks for debugging.\n- SandboxStacks = \"debug.Stacks\"\n+ // DebugStacks collects sandbox stacks for debugging.\n+ DebugStacks = \"debug.Stacks\"\n)\n// Profiling related commands (see pprof.go for more details).\nconst (\n- CPUProfile = \"Profile.CPU\"\n- HeapProfile = \"Profile.Heap\"\n- BlockProfile = \"Profile.Block\"\n- MutexProfile = \"Profile.Mutex\"\n- Trace = \"Profile.Trace\"\n+ ProfileCPU = \"Profile.CPU\"\n+ ProfileHeap = \"Profile.Heap\"\n+ ProfileBlock = \"Profile.Block\"\n+ ProfileMutex = \"Profile.Mutex\"\n+ ProfileTrace = \"Profile.Trace\"\n)\n// Logging related commands (see logging.go for more details).\nconst (\n- ChangeLogging = \"Logging.Change\"\n+ LoggingChange = \"Logging.Change\"\n)\n// ControlSocketAddr generates an abstract unix socket name for the given ID.\n@@ -214,9 +208,9 @@ type CreateArgs struct {\nurpc.FilePayload\n}\n-// Create creates a container within a sandbox.\n-func (cm *containerManager) Create(args *CreateArgs, _ *struct{}) error {\n- log.Debugf(\"containerManager.Create: %s\", args.CID)\n+// CreateSubcontainer creates a container within a sandbox.\n+func (cm *containerManager) CreateSubcontainer(args *CreateArgs, _ *struct{}) error {\n+ log.Debugf(\"containerManager.CreateSubcontainer: %s\", args.CID)\nif len(args.Files) > 1 {\nreturn fmt.Errorf(\"start arguments must have at most 1 files for TTY\")\n@@ -229,7 +223,7 @@ func (cm *containerManager) Create(args *CreateArgs, _ *struct{}) error {\nreturn fmt.Errorf(\"error dup'ing TTY file: %w\", err)\n}\n}\n- return cm.l.createContainer(args.CID, tty)\n+ return cm.l.createSubcontainer(args.CID, tty)\n}\n// StartArgs contains arguments to the Start method.\n@@ -249,13 +243,13 @@ type StartArgs struct {\nurpc.FilePayload\n}\n-// Start runs a created container within a sandbox.\n-func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\n+// StartSubcontainer runs a created container within a sandbox.\n+func (cm *containerManager) StartSubcontainer(args *StartArgs, _ *struct{}) error {\n// Validate arguments.\nif args == nil {\nreturn errors.New(\"start missing arguments\")\n}\n- log.Debugf(\"containerManager.Start, cid: %s, args: %+v\", args.CID, args)\n+ log.Debugf(\"containerManager.StartSubcontainer, cid: %s, args: %+v\", args.CID, args)\nif args.Spec == nil {\nreturn errors.New(\"start arguments missing spec\")\n}\n@@ -303,19 +297,19 @@ func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\n}\n}()\n- if err := cm.l.startContainer(args.Spec, args.Conf, args.CID, stdios, goferFDs); err != nil {\n- log.Debugf(\"containerManager.Start failed, cid: %s, args: %+v, err: %v\", args.CID, args, err)\n+ if err := cm.l.startSubcontainer(args.Spec, args.Conf, args.CID, stdios, goferFDs); err != nil {\n+ log.Debugf(\"containerManager.StartSubcontainer failed, cid: %s, args: %+v, err: %v\", args.CID, args, err)\nreturn err\n}\nlog.Debugf(\"Container started, cid: %s\", args.CID)\nreturn nil\n}\n-// Destroy stops a container if it is still running and cleans up its\n-// filesystem.\n-func (cm *containerManager) Destroy(cid *string, _ *struct{}) error {\n- log.Debugf(\"containerManager.destroy, cid: %s\", *cid)\n- return cm.l.destroyContainer(*cid)\n+// DestroySubcontainer stops a container if it is still running and cleans up\n+// its filesystem.\n+func (cm *containerManager) DestroySubcontainer(cid *string, _ *struct{}) error {\n+ log.Debugf(\"containerManager.DestroySubcontainer, cid: %s\", *cid)\n+ return cm.l.destroySubcontainer(*cid)\n}\n// ExecuteAsync starts running a command on a created or running sandbox. It\n@@ -346,7 +340,7 @@ func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {\nreturn state.Save(o, nil)\n}\n-// Pause suspends a container.\n+// Pause suspends a sandbox.\nfunc (cm *containerManager) Pause(_, _ *struct{}) error {\nlog.Debugf(\"containerManager.Pause\")\n// TODO(gvisor.dev/issues/6243): save/restore not supported w/ hostinet\n@@ -488,7 +482,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nreturn nil\n}\n-// Resume unpauses a container.\n+// Resume unpauses a sandbox.\nfunc (cm *containerManager) Resume(_, _ *struct{}) error {\nlog.Debugf(\"containerManager.Resume\")\ncm.l.k.Unpause()\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -633,8 +633,8 @@ func (l *Loader) run() error {\nreturn l.k.Start()\n}\n-// createContainer creates a new container inside the sandbox.\n-func (l *Loader) createContainer(cid string, tty *fd.FD) error {\n+// createSubcontainer creates a new container inside the sandbox.\n+func (l *Loader) createSubcontainer(cid string, tty *fd.FD) error {\nl.mu.Lock()\ndefer l.mu.Unlock()\n@@ -646,10 +646,10 @@ func (l *Loader) createContainer(cid string, tty *fd.FD) error {\nreturn nil\n}\n-// startContainer starts a child container. It returns the thread group ID of\n+// startSubcontainer starts a child container. It returns the thread group ID of\n// the newly created process. Used FDs are either closed or released. It's safe\n// for the caller to close any remaining files upon return.\n-func (l *Loader) startContainer(spec *specs.Spec, conf *config.Config, cid string, stdioFDs, goferFDs []*fd.FD) error {\n+func (l *Loader) startSubcontainer(spec *specs.Spec, conf *config.Config, cid string, stdioFDs, goferFDs []*fd.FD) error {\n// Create capabilities.\ncaps, err := specutils.Capabilities(conf.EnableRaw, spec.Process.Capabilities)\nif err != nil {\n@@ -851,9 +851,9 @@ func (l *Loader) startGoferMonitor(cid string, goferFDs []*fd.FD) {\n}()\n}\n-// destroyContainer stops a container if it is still running and cleans up its\n-// filesystem.\n-func (l *Loader) destroyContainer(cid string) error {\n+// destroySubcontainer stops a container if it is still running and cleans up\n+// its filesystem.\n+func (l *Loader) destroySubcontainer(cid string) error {\nl.mu.Lock()\ndefer l.mu.Unlock()\n@@ -1001,7 +1001,7 @@ func (l *Loader) waitContainer(cid string, waitStatus *uint32) error {\n// Check for leaks and write coverage report after the root container has\n// exited. This guarantees that the report is written in cases where the\n- // sandbox is killed by a signal after the ContainerWait request is completed.\n+ // sandbox is killed by a signal after the ContMgrWait request is completed.\nif l.root.procArgs.ContainerID == cid {\n// All sentry-created resources should have been released at this point.\nrefsvfs2.DoLeakCheck()\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -310,7 +310,7 @@ func New(conf *config.Config, args Args) (*Container, error) {\ndefer tty.Close()\n}\n- if err := c.Sandbox.CreateContainer(conf, c.ID, tty); err != nil {\n+ if err := c.Sandbox.CreateSubcontainer(conf, c.ID, tty); err != nil {\nreturn nil, err\n}\n}\n@@ -388,7 +388,7 @@ func (c *Container) Start(conf *config.Config) error {\nstdios = []*os.File{os.Stdin, os.Stdout, os.Stderr}\n}\n- return c.Sandbox.StartContainer(c.Spec, conf, c.ID, stdios, goferFiles)\n+ return c.Sandbox.StartSubcontainer(c.Spec, conf, c.ID, stdios, goferFiles)\n}); err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -180,9 +180,9 @@ func New(conf *config.Config, args *Args) (*Sandbox, error) {\nreturn s, nil\n}\n-// CreateContainer creates a non-root container inside the sandbox.\n-func (s *Sandbox) CreateContainer(conf *config.Config, cid string, tty *os.File) error {\n- log.Debugf(\"Create non-root container %q in sandbox %q, PID: %d\", cid, s.ID, s.Pid)\n+// CreateSubcontainer creates a container inside the sandbox.\n+func (s *Sandbox) CreateSubcontainer(conf *config.Config, cid string, tty *os.File) error {\n+ log.Debugf(\"Create sub-container %q in sandbox %q, PID: %d\", cid, s.ID, s.Pid)\nvar files []*os.File\nif tty != nil {\n@@ -202,8 +202,8 @@ func (s *Sandbox) CreateContainer(conf *config.Config, cid string, tty *os.File)\nCID: cid,\nFilePayload: urpc.FilePayload{Files: files},\n}\n- if err := sandboxConn.Call(boot.ContainerCreate, &args, nil); err != nil {\n- return fmt.Errorf(\"creating non-root container %q: %v\", cid, err)\n+ if err := sandboxConn.Call(boot.ContMgrCreateSubcontainer, &args, nil); err != nil {\n+ return fmt.Errorf(\"creating sub-container %q: %v\", cid, err)\n}\nreturn nil\n}\n@@ -224,16 +224,16 @@ func (s *Sandbox) StartRoot(spec *specs.Spec, conf *config.Config) error {\n// Send a message to the sandbox control server to start the root\n// container.\n- if err := conn.Call(boot.RootContainerStart, &s.ID, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrRootContainerStart, &s.ID, nil); err != nil {\nreturn fmt.Errorf(\"starting root container: %v\", err)\n}\nreturn nil\n}\n-// StartContainer starts running a non-root container inside the sandbox.\n-func (s *Sandbox) StartContainer(spec *specs.Spec, conf *config.Config, cid string, stdios, goferFiles []*os.File) error {\n- log.Debugf(\"Start non-root container %q in sandbox %q, PID: %d\", cid, s.ID, s.Pid)\n+// StartSubcontainer starts running a sub-container inside the sandbox.\n+func (s *Sandbox) StartSubcontainer(spec *specs.Spec, conf *config.Config, cid string, stdios, goferFiles []*os.File) error {\n+ log.Debugf(\"Start sub-container %q in sandbox %q, PID: %d\", cid, s.ID, s.Pid)\nif err := s.configureStdios(conf, stdios); err != nil {\nreturn err\n@@ -258,8 +258,8 @@ func (s *Sandbox) StartContainer(spec *specs.Spec, conf *config.Config, cid stri\nCID: cid,\nFilePayload: payload,\n}\n- if err := sandboxConn.Call(boot.ContainerStart, &args, nil); err != nil {\n- return fmt.Errorf(\"starting non-root container %v: %v\", spec.Process.Args, err)\n+ if err := sandboxConn.Call(boot.ContMgrStartSubcontainer, &args, nil); err != nil {\n+ return fmt.Errorf(\"starting sub-container %v: %v\", spec.Process.Args, err)\n}\nreturn nil\n}\n@@ -301,7 +301,7 @@ func (s *Sandbox) Restore(cid string, spec *specs.Spec, conf *config.Config, fil\n}\n// Restore the container and start the root container.\n- if err := conn.Call(boot.ContainerRestore, &opt, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrRestore, &opt, nil); err != nil {\nreturn fmt.Errorf(\"restoring container %q: %v\", cid, err)\n}\n@@ -319,7 +319,7 @@ func (s *Sandbox) Processes(cid string) ([]*control.Process, error) {\ndefer conn.Close()\nvar pl []*control.Process\n- if err := conn.Call(boot.ContainerProcesses, &cid, &pl); err != nil {\n+ if err := conn.Call(boot.ContMgrProcesses, &cid, &pl); err != nil {\nreturn nil, fmt.Errorf(\"retrieving process data from sandbox: %v\", err)\n}\nreturn pl, nil\n@@ -347,7 +347,7 @@ func (s *Sandbox) Execute(conf *config.Config, args *control.ExecArgs) (int32, e\n// Send a message to the sandbox control server to start the container.\nvar pid int32\n- if err := conn.Call(boot.ContainerExecuteAsync, args, &pid); err != nil {\n+ if err := conn.Call(boot.ContMgrExecuteAsync, args, &pid); err != nil {\nreturn 0, fmt.Errorf(\"executing command %q in sandbox: %v\", args, err)\n}\nreturn pid, nil\n@@ -365,7 +365,7 @@ func (s *Sandbox) Event(cid string) (*boot.EventOut, error) {\nvar e boot.EventOut\n// TODO(b/129292330): Pass in the container id (cid) here. The sandbox\n// should return events only for that container.\n- if err := conn.Call(boot.ContainerEvent, nil, &e); err != nil {\n+ if err := conn.Call(boot.ContMgrEvent, nil, &e); err != nil {\nreturn nil, fmt.Errorf(\"retrieving event data from sandbox: %v\", err)\n}\ne.Event.ID = cid\n@@ -814,7 +814,7 @@ func (s *Sandbox) Wait(cid string) (unix.WaitStatus, error) {\n// Try the Wait RPC to the sandbox.\nvar ws unix.WaitStatus\n- err = conn.Call(boot.ContainerWait, &cid, &ws)\n+ err = conn.Call(boot.ContMgrWait, &cid, &ws)\nconn.Close()\nif err == nil {\nif s.IsRootContainer(cid) {\n@@ -865,7 +865,7 @@ func (s *Sandbox) WaitPID(cid string, pid int32) (unix.WaitStatus, error) {\nPID: pid,\nCID: cid,\n}\n- if err := conn.Call(boot.ContainerWaitPID, args, &ws); err != nil {\n+ if err := conn.Call(boot.ContMgrWaitPID, args, &ws); err != nil {\nreturn ws, fmt.Errorf(\"waiting on PID %d in sandbox %q: %v\", pid, s.ID, err)\n}\nreturn ws, nil\n@@ -915,7 +915,7 @@ func (s *Sandbox) SignalContainer(cid string, sig unix.Signal, all bool) error {\nSigno: int32(sig),\nMode: mode,\n}\n- if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrSignal, &args, nil); err != nil {\nreturn fmt.Errorf(\"signaling container %q: %v\", cid, err)\n}\nreturn nil\n@@ -944,7 +944,7 @@ func (s *Sandbox) SignalProcess(cid string, pid int32, sig unix.Signal, fgProces\nPID: pid,\nMode: mode,\n}\n- if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrSignal, &args, nil); err != nil {\nreturn fmt.Errorf(\"signaling container %q PID %d: %v\", cid, pid, err)\n}\nreturn nil\n@@ -966,7 +966,7 @@ func (s *Sandbox) Checkpoint(cid string, f *os.File) error {\n},\n}\n- if err := conn.Call(boot.ContainerCheckpoint, &opt, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrCheckpoint, &opt, nil); err != nil {\nreturn fmt.Errorf(\"checkpointing container %q: %v\", cid, err)\n}\nreturn nil\n@@ -981,7 +981,7 @@ func (s *Sandbox) Pause(cid string) error {\n}\ndefer conn.Close()\n- if err := conn.Call(boot.ContainerPause, nil, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrPause, nil, nil); err != nil {\nreturn fmt.Errorf(\"pausing container %q: %v\", cid, err)\n}\nreturn nil\n@@ -996,7 +996,7 @@ func (s *Sandbox) Resume(cid string) error {\n}\ndefer conn.Close()\n- if err := conn.Call(boot.ContainerResume, nil, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrResume, nil, nil); err != nil {\nreturn fmt.Errorf(\"resuming container %q: %v\", cid, err)\n}\nreturn nil\n@@ -1024,7 +1024,7 @@ func (s *Sandbox) Stacks() (string, error) {\ndefer conn.Close()\nvar stacks string\n- if err := conn.Call(boot.SandboxStacks, nil, &stacks); err != nil {\n+ if err := conn.Call(boot.DebugStacks, nil, &stacks); err != nil {\nreturn \"\", fmt.Errorf(\"getting sandbox %q stacks: %v\", s.ID, err)\n}\nreturn stacks, nil\n@@ -1043,7 +1043,7 @@ func (s *Sandbox) HeapProfile(f *os.File, delay time.Duration) error {\nFilePayload: urpc.FilePayload{Files: []*os.File{f}},\nDelay: delay,\n}\n- return conn.Call(boot.HeapProfile, &opts, nil)\n+ return conn.Call(boot.ProfileHeap, &opts, nil)\n}\n// CPUProfile collects a CPU profile.\n@@ -1059,7 +1059,7 @@ func (s *Sandbox) CPUProfile(f *os.File, duration time.Duration) error {\nFilePayload: urpc.FilePayload{Files: []*os.File{f}},\nDuration: duration,\n}\n- return conn.Call(boot.CPUProfile, &opts, nil)\n+ return conn.Call(boot.ProfileCPU, &opts, nil)\n}\n// BlockProfile writes a block profile to the given file.\n@@ -1075,7 +1075,7 @@ func (s *Sandbox) BlockProfile(f *os.File, duration time.Duration) error {\nFilePayload: urpc.FilePayload{Files: []*os.File{f}},\nDuration: duration,\n}\n- return conn.Call(boot.BlockProfile, &opts, nil)\n+ return conn.Call(boot.ProfileBlock, &opts, nil)\n}\n// MutexProfile writes a mutex profile to the given file.\n@@ -1091,7 +1091,7 @@ func (s *Sandbox) MutexProfile(f *os.File, duration time.Duration) error {\nFilePayload: urpc.FilePayload{Files: []*os.File{f}},\nDuration: duration,\n}\n- return conn.Call(boot.MutexProfile, &opts, nil)\n+ return conn.Call(boot.ProfileMutex, &opts, nil)\n}\n// Trace collects an execution trace.\n@@ -1107,7 +1107,7 @@ func (s *Sandbox) Trace(f *os.File, duration time.Duration) error {\nFilePayload: urpc.FilePayload{Files: []*os.File{f}},\nDuration: duration,\n}\n- return conn.Call(boot.Trace, &opts, nil)\n+ return conn.Call(boot.ProfileTrace, &opts, nil)\n}\n// ChangeLogging changes logging options.\n@@ -1119,7 +1119,7 @@ func (s *Sandbox) ChangeLogging(args control.LoggingArgs) error {\n}\ndefer conn.Close()\n- if err := conn.Call(boot.ChangeLogging, &args, nil); err != nil {\n+ if err := conn.Call(boot.LoggingChange, &args, nil); err != nil {\nreturn fmt.Errorf(\"changing sandbox %q logging: %v\", s.ID, err)\n}\nreturn nil\n@@ -1150,7 +1150,7 @@ func (s *Sandbox) destroyContainer(cid string) error {\nreturn err\n}\ndefer conn.Close()\n- if err := conn.Call(boot.ContainerDestroy, &cid, nil); err != nil {\n+ if err := conn.Call(boot.ContMgrDestroySubcontainer, &cid, nil); err != nil {\nreturn fmt.Errorf(\"destroying container %q: %v\", cid, err)\n}\nreturn nil\n" } ]
Go
Apache License 2.0
google/gvisor
Use consistent naming for subcontainers It was confusing to find functions relating to root and non-root containers. Replace "non-root" and "subcontainer" and make naming consistent in Sandbox and controller. PiperOrigin-RevId: 384512518
259,992
13.07.2021 17:18:26
25,200
85a0a353ad185946d39463fddb3ec2cb37876371
Replace whitelist with allowlist
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/control/logging.go", "new_path": "pkg/sentry/control/logging.go", "diff": "@@ -50,20 +50,20 @@ type LoggingArgs struct {\n// enable strace at all. If this flag is false then a completely\n// pristine copy of the syscall table will be swapped in. This\n// approach is used to remain consistent with an empty strace\n- // whitelist meaning trace all system calls.\n+ // allowlist meaning trace all system calls.\nEnableStrace bool\n- // Strace is the whitelist of syscalls to trace to log. If this\n- // and StraceEventWhitelist are empty trace all system calls.\n- StraceWhitelist []string\n+ // Strace is the allowlist of syscalls to trace to log. If this\n+ // and StraceEventAllowlist are empty trace all system calls.\n+ StraceAllowlist []string\n// SetEventStrace is a flag used to indicate that event strace\n// related arguments were passed in.\nSetEventStrace bool\n- // StraceEventWhitelist is the whitelist of syscalls to trace\n+ // StraceEventAllowlist is the allowlist of syscalls to trace\n// to event log.\n- StraceEventWhitelist []string\n+ StraceEventAllowlist []string\n}\n// Logging provides functions related to logging.\n@@ -107,13 +107,13 @@ func (l *Logging) Change(args *LoggingArgs, code *int) error {\nfunc (l *Logging) configureStrace(args *LoggingArgs) error {\nif args.EnableStrace {\n- // Install the whitelist specified.\n- if len(args.StraceWhitelist) > 0 {\n- if err := strace.Enable(args.StraceWhitelist, strace.SinkTypeLog); err != nil {\n+ // Install the allowlist specified.\n+ if len(args.StraceAllowlist) > 0 {\n+ if err := strace.Enable(args.StraceAllowlist, strace.SinkTypeLog); err != nil {\nreturn err\n}\n} else {\n- // For convenience, if strace is enabled but whitelist\n+ // For convenience, if strace is enabled but allowlist\n// is empty, enable everything to log.\nstrace.EnableAll(strace.SinkTypeLog)\n}\n@@ -125,8 +125,8 @@ func (l *Logging) configureStrace(args *LoggingArgs) error {\n}\nfunc (l *Logging) configureEventStrace(args *LoggingArgs) error {\n- if len(args.StraceEventWhitelist) > 0 {\n- if err := strace.Enable(args.StraceEventWhitelist, strace.SinkTypeEvent); err != nil {\n+ if len(args.StraceEventAllowlist) > 0 {\n+ if err := strace.Enable(args.StraceEventAllowlist, strace.SinkTypeEvent); err != nil {\nreturn err\n}\n} else {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/strace.go", "new_path": "pkg/sentry/strace/strace.go", "diff": "@@ -819,10 +819,10 @@ func convertToSyscallFlag(sinks SinkType) uint32 {\nreturn ret\n}\n-// Enable enables the syscalls in whitelist in all syscall tables.\n+// Enable enables the syscalls in allowlist in all syscall tables.\n//\n// Preconditions: Initialize has been called.\n-func Enable(whitelist []string, sinks SinkType) error {\n+func Enable(allowlist []string, sinks SinkType) error {\nflags := convertToSyscallFlag(sinks)\nfor _, table := range kernel.SyscallTables() {\n// Is this known?\n@@ -832,7 +832,7 @@ func Enable(whitelist []string, sinks SinkType) error {\n}\n// Convert to a set of system calls numbers.\n- wl, err := sys.ConvertToSysnoMap(whitelist)\n+ wl, err := sys.ConvertToSysnoMap(allowlist)\nif err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/debug.go", "new_path": "runsc/cmd/debug.go", "diff": "@@ -166,7 +166,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nlog.Infof(\"Enabling strace for syscalls: %s\", d.strace)\nargs.SetStrace = true\nargs.EnableStrace = true\n- args.StraceWhitelist = strings.Split(d.strace, \",\")\n+ args.StraceAllowlist = strings.Split(d.strace, \",\")\n}\nif len(d.logLevel) != 0 {\n" }, { "change_type": "MODIFY", "old_path": "test/runtimes/runner/lib/lib.go", "new_path": "test/runtimes/runner/lib/lib.go", "diff": "@@ -152,7 +152,7 @@ func getTests(ctx context.Context, d *dockerutil.Container, lang, image string,\nreturn itests, nil\n}\n-// getBlacklist reads the exclude file and returns a set of test names to\n+// getExcludes reads the exclude file and returns a set of test names to\n// exclude.\nfunc getExcludes(excludeFile string) (map[string]struct{}, error) {\nexcludes := make(map[string]struct{})\n" }, { "change_type": "MODIFY", "old_path": "website/cmd/server/main.go", "new_path": "website/cmd/server/main.go", "diff": "@@ -258,7 +258,7 @@ const pprofFixedPrefix = \"https://storage.googleapis.com/\"\n// allowedBuckets enforces constraints on the pprof target.\n//\n// If the continuous integration system is changed in the future to use\n-// additional buckets, they may be whitelisted here. See registerProfile.\n+// additional buckets, they may be allowed here. See registerProfile.\nvar allowedBuckets = map[string]bool{\n\"gvisor-buildkite\": true,\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Replace whitelist with allowlist PiperOrigin-RevId: 384586164
259,891
14.07.2021 13:40:00
25,200
a1044cb88192ad3891500c867c22f5cb388054e5
testing: shrink exhaustion test size to avoid timeouts Tested via: ``` bazel test \ //test/syscalls:socket_ipv4_udp_unbound_loopback_nogotsan_test_runsc_ptrace --runs_per_test=2000 ```
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ipv4_udp_unbound_loopback_nogotsan.cc", "new_path": "test/syscalls/linux/socket_ipv4_udp_unbound_loopback_nogotsan.cc", "diff": "@@ -31,7 +31,7 @@ using IPv4UDPUnboundSocketNogotsanTest = SimpleSocketTest;\n// We disable S/R because this test creates a large number of sockets.\nTEST_P(IPv4UDPUnboundSocketNogotsanTest, UDPConnectPortExhaustion) {\nauto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n- constexpr int kClients = 65536;\n+ const int kClients = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());\n// Bind the first socket to the loopback and take note of the selected port.\nauto addr = V4Loopback();\nASSERT_THAT(bind(receiver1->get(), AsSockAddr(&addr.addr), addr.addr_len),\n@@ -61,7 +61,7 @@ TEST_P(IPv4UDPUnboundSocketNogotsanTest, UDPConnectPortExhaustion) {\n// We disable S/R because this test creates a large number of sockets.\nTEST_P(IPv4UDPUnboundSocketNogotsanTest, UDPBindPortExhaustion) {\nauto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n- constexpr int kClients = 65536;\n+ const int kClients = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());\nauto addr = V4Loopback();\n// Disable cooperative S/R as we are making too many syscalls.\nDisableSave ds;\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_test_util.cc", "new_path": "test/syscalls/linux/socket_test_util.cc", "diff": "@@ -1093,15 +1093,22 @@ PosixErrorOr<int> MaybeLimitEphemeralPorts() {\nif (!access(kRangeFile, W_OK)) {\nASSIGN_OR_RETURN_ERRNO(FileDescriptor fd,\nOpen(kRangeFile, O_WRONLY | O_TRUNC, 0));\n- max = min + 50;\n- const std::string small_range = absl::StrFormat(\"%d %d\", min, max);\n+ int newMax = min + 50;\n+ const std::string small_range = absl::StrFormat(\"%d %d\", min, newMax);\nint n = write(fd.get(), small_range.c_str(), small_range.size());\nif (n < 0) {\n+ // Hostinet doesn't allow modifying the host port range. And if we're root\n+ // (as we are in some tests), access and open will succeed even if the\n+ // file mode is readonly.\n+ if (errno != EACCES) {\nreturn PosixError(\nerrno,\nabsl::StrFormat(\"write(%d [%s], \\\"%s\\\", %d)\", fd.get(), kRangeFile,\nsmall_range.c_str(), small_range.size()));\n}\n+ } else {\n+ max = newMax;\n+ }\n}\nreturn max - min;\n}\n" } ]
Go
Apache License 2.0
google/gvisor
testing: shrink exhaustion test size to avoid timeouts Tested via: ``` bazel test \ //test/syscalls:socket_ipv4_udp_unbound_loopback_nogotsan_test_runsc_ptrace --runs_per_test=2000 ``` PiperOrigin-RevId: 384773477
259,992
14.07.2021 15:17:40
25,200
d02be7858ac5d185751df9a15a27340e349d19e5
Replace whitelist with allowlist from docs
[ { "change_type": "MODIFY", "old_path": "website/blog/2019-11-18-security-basics.md", "new_path": "website/blog/2019-11-18-security-basics.md", "diff": "@@ -188,11 +188,11 @@ for direct access to some files. And most files will be remotely accessed\nthrough the Gofers, in which case no FDs are donated to the Sentry.\nThe Sentry itself is only allowed access to specific\n-[whitelisted syscalls](https://github.com/google/gvisor/blob/master/runsc/config/config.go).\n+[allowlisted syscalls](https://github.com/google/gvisor/blob/master/runsc/config/config.go).\nWithout networking, the Sentry needs 53 host syscalls in order to function, and\n-with networking, it uses an additional 15[^8]. By limiting the whitelist to only\n+with networking, it uses an additional 15[^8]. By limiting the allowlist to only\nthese needed syscalls, we radically reduce the amount of host OS attack surface.\n-If any attempts are made to call something outside the whitelist, it is\n+If any attempts are made to call something outside the allowlist, it is\nimmediately blocked and the sandbox is killed by the Host OS.\n### Sentry/Gofer Interface:\n@@ -281,6 +281,8 @@ other ways the community can contribute to help make gVisor safe, fast and\nstable.\n<br>\n<br>\n+**Updated (2021-07-14):** this post was updated to use more inclusive language.\n+<br>\n--------------------------------------------------------------------------------\n" } ]
Go
Apache License 2.0
google/gvisor
Replace whitelist with allowlist from docs PiperOrigin-RevId: 384796852
260,001
14.07.2021 17:43:50
25,200
5c20fd3bbd30b0a9ecb32995b98cf194dc9600d7
Add verity symlink tests
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -883,6 +883,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:symlink_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:verity_symlink_test\",\n+)\n+\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:sync_test\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -3722,6 +3722,23 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"verity_symlink_test\",\n+ testonly = 1,\n+ srcs = [\"verity_symlink.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ \"//test/util:capability_util\",\n+ gtest,\n+ \"//test/util:fs_util\",\n+ \"//test/util:mount_util\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"//test/util:verity_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"sync_test\",\ntestonly = 1,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_getdents.cc", "new_path": "test/syscalls/linux/verity_getdents.cc", "diff": "@@ -58,16 +58,16 @@ class GetDentsTest : public ::testing::Test {\n};\nTEST_F(GetDentsTest, GetDents) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nstd::vector<std::string> expect = {\".\", \"..\", filename_};\nEXPECT_NO_ERRNO(DirContains(verity_dir, expect, /*exclude=*/{}));\n}\nTEST_F(GetDentsTest, Deleted) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nEXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),\nSyscallSucceeds());\n@@ -77,8 +77,8 @@ TEST_F(GetDentsTest, Deleted) {\n}\nTEST_F(GetDentsTest, Renamed) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nstd::string new_file_name = \"renamed-\" + filename_;\nEXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_ioctl.cc", "new_path": "test/syscalls/linux/verity_ioctl.cc", "diff": "@@ -105,8 +105,8 @@ TEST_F(IoctlTest, Measure) {\n}\nTEST_F(IoctlTest, Mount) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Make sure the file can be open and read in the mounted verity fs.\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -117,8 +117,8 @@ TEST_F(IoctlTest, Mount) {\n}\nTEST_F(IoctlTest, NonExistingFile) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Confirm that opening a non-existing file in the verity-enabled directory\n// triggers the expected error instead of verification failure.\n@@ -128,8 +128,8 @@ TEST_F(IoctlTest, NonExistingFile) {\n}\nTEST_F(IoctlTest, ModifiedFile) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Modify the file and check verification failure upon reading from it.\nauto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -143,8 +143,8 @@ TEST_F(IoctlTest, ModifiedFile) {\n}\nTEST_F(IoctlTest, ModifiedMerkle) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Modify the Merkle file and check verification failure upon opening the\n// corresponding file.\n@@ -158,8 +158,8 @@ TEST_F(IoctlTest, ModifiedMerkle) {\n}\nTEST_F(IoctlTest, ModifiedDirMerkle) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Modify the Merkle file for the parent directory and check verification\n// failure upon opening the corresponding file.\n@@ -173,8 +173,8 @@ TEST_F(IoctlTest, ModifiedDirMerkle) {\n}\nTEST_F(IoctlTest, Stat) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nstruct stat st;\nEXPECT_THAT(stat(JoinPath(verity_dir, filename_).c_str(), &st),\n@@ -182,8 +182,8 @@ TEST_F(IoctlTest, Stat) {\n}\nTEST_F(IoctlTest, ModifiedStat) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nEXPECT_THAT(chmod(JoinPath(tmpfs_dir_.path(), filename_).c_str(), 0644),\nSyscallSucceeds());\n@@ -193,8 +193,8 @@ TEST_F(IoctlTest, ModifiedStat) {\n}\nTEST_F(IoctlTest, DeleteFile) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nEXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),\nSyscallSucceeds());\n@@ -203,8 +203,8 @@ TEST_F(IoctlTest, DeleteFile) {\n}\nTEST_F(IoctlTest, DeleteMerkle) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nEXPECT_THAT(\nunlink(MerklePath(JoinPath(tmpfs_dir_.path(), filename_)).c_str()),\n@@ -214,8 +214,8 @@ TEST_F(IoctlTest, DeleteMerkle) {\n}\nTEST_F(IoctlTest, RenameFile) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nstd::string new_file_name = \"renamed-\" + filename_;\nEXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),\n@@ -226,8 +226,8 @@ TEST_F(IoctlTest, RenameFile) {\n}\nTEST_F(IoctlTest, RenameMerkle) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nstd::string new_file_name = \"renamed-\" + filename_;\nEXPECT_THAT(\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_mmap.cc", "new_path": "test/syscalls/linux/verity_mmap.cc", "diff": "@@ -57,8 +57,8 @@ class MmapTest : public ::testing::Test {\n};\nTEST_F(MmapTest, MmapRead) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Make sure the file can be open and mmapped in the mounted verity fs.\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -71,8 +71,8 @@ TEST_F(MmapTest, MmapRead) {\n}\nTEST_F(MmapTest, ModifiedBeforeMmap) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Modify the file and check verification failure upon mmapping.\nauto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -90,8 +90,8 @@ TEST_F(MmapTest, ModifiedBeforeMmap) {\n}\nTEST_F(MmapTest, ModifiedAfterMmap) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(JoinPath(verity_dir, filename_), O_RDONLY, 0777));\n@@ -126,8 +126,8 @@ INSTANTIATE_TEST_SUITE_P(\n::testing::ValuesIn({MAP_SHARED, MAP_PRIVATE})));\nTEST_P(MmapParamTest, Mmap) {\n- std::string verity_dir =\n- ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n// Make sure the file can be open and mmapped in the mounted verity fs.\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/syscalls/linux/verity_symlink.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <stdint.h>\n+#include <stdlib.h>\n+#include <sys/mount.h>\n+#include <sys/stat.h>\n+\n+#include \"gmock/gmock.h\"\n+#include \"gtest/gtest.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/mount_util.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+#include \"test/util/verity_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+const char kSymlink[] = \"verity_symlink\";\n+\n+class SymlinkTest : public ::testing::Test {\n+ protected:\n+ void SetUp() override {\n+ // Verity is implemented in VFS2.\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ // Mount a tmpfs file system, to be wrapped by a verity fs.\n+ tmpfs_dir_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(mount(\"\", tmpfs_dir_.path().c_str(), \"tmpfs\", 0, \"\"),\n+ SyscallSucceeds());\n+\n+ // Create a new file in the tmpfs mount.\n+ file_ = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(tmpfs_dir_.path(), kContents, 0777));\n+ filename_ = Basename(file_.path());\n+\n+ // Create a symlink to the file.\n+ ASSERT_THAT(symlink(file_.path().c_str(),\n+ JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),\n+ SyscallSucceeds());\n+ }\n+\n+ TempPath tmpfs_dir_;\n+ TempPath file_;\n+ std::string filename_;\n+};\n+\n+TEST_F(SymlinkTest, Success) {\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_,\n+ {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\n+\n+ char buf[256];\n+ EXPECT_THAT(\n+ readlink(JoinPath(verity_dir, kSymlink).c_str(), buf, sizeof(buf)),\n+ SyscallSucceeds());\n+ auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(verity_dir, kSymlink).c_str(), O_RDONLY, 0777));\n+ EXPECT_THAT(ReadFd(verity_fd.get(), buf, sizeof(kContents)),\n+ SyscallSucceeds());\n+}\n+\n+TEST_F(SymlinkTest, DeleteLink) {\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_,\n+ {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\n+\n+ ASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),\n+ SyscallSucceeds());\n+ char buf[256];\n+ EXPECT_THAT(\n+ readlink(JoinPath(verity_dir, kSymlink).c_str(), buf, sizeof(buf)),\n+ SyscallFailsWithErrno(EIO));\n+ EXPECT_THAT(open(JoinPath(verity_dir, kSymlink).c_str(), O_RDONLY, 0777),\n+ SyscallFailsWithErrno(EIO));\n+}\n+\n+TEST_F(SymlinkTest, ModifyLink) {\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n+ MountVerity(tmpfs_dir_.path(), filename_,\n+ {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\n+\n+ ASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),\n+ SyscallSucceeds());\n+\n+ std::string newlink = \"newlink\";\n+ ASSERT_THAT(symlink(JoinPath(tmpfs_dir_.path(), newlink).c_str(),\n+ JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),\n+ SyscallSucceeds());\n+ char buf[256];\n+ EXPECT_THAT(\n+ readlink(JoinPath(verity_dir, kSymlink).c_str(), buf, sizeof(buf)),\n+ SyscallFailsWithErrno(EIO));\n+ EXPECT_THAT(open(JoinPath(verity_dir, kSymlink).c_str(), O_RDONLY, 0777),\n+ SyscallFailsWithErrno(EIO));\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/util/verity_util.cc", "new_path": "test/util/verity_util.cc", "diff": "@@ -55,17 +55,26 @@ PosixError FlipRandomBit(int fd, int size) {\n}\nPosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,\n- std::string filename) {\n+ std::string filename,\n+ std::vector<EnableTarget> targets) {\n// Mount a verity fs on the existing tmpfs mount.\nstd::string mount_opts = \"lower_path=\" + tmpfs_dir;\nASSIGN_OR_RETURN_ERRNO(TempPath verity_dir, TempPath::CreateDir());\nRETURN_ERROR_IF_SYSCALL_FAIL(\nmount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()));\n- // Enable both the file and the directory.\n+ // Enable the file, symlink(if provided) and the directory.\nASSIGN_OR_RETURN_ERRNO(\nauto fd, Open(JoinPath(verity_dir.path(), filename), O_RDONLY, 0777));\nRETURN_ERROR_IF_SYSCALL_FAIL(ioctl(fd.get(), FS_IOC_ENABLE_VERITY));\n+\n+ for (const EnableTarget& target : targets) {\n+ ASSIGN_OR_RETURN_ERRNO(\n+ auto target_fd,\n+ Open(JoinPath(verity_dir.path(), target.path), target.flags, 0777));\n+ RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(target_fd.get(), FS_IOC_ENABLE_VERITY));\n+ }\n+\nASSIGN_OR_RETURN_ERRNO(auto dir_fd, Open(verity_dir.path(), O_RDONLY, 0777));\nRETURN_ERROR_IF_SYSCALL_FAIL(ioctl(dir_fd.get(), FS_IOC_ENABLE_VERITY));\n" }, { "change_type": "MODIFY", "old_path": "test/util/verity_util.h", "new_path": "test/util/verity_util.h", "diff": "#include <stdint.h>\n+#include <vector>\n+\n#include \"test/util/posix_error.h\"\nnamespace gvisor {\n@@ -44,6 +46,13 @@ struct fsverity_digest {\nunsigned char digest[];\n};\n+struct EnableTarget {\n+ std::string path;\n+ int flags;\n+\n+ EnableTarget(std::string path, int flags) : path(path), flags(flags) {}\n+};\n+\nconstexpr int kMaxDigestSize = 64;\nconstexpr int kDefaultDigestSize = 32;\nconstexpr char kContents[] = \"foobarbaz\";\n@@ -67,7 +76,8 @@ PosixError FlipRandomBit(int fd, int size);\n// Mount a verity on the tmpfs and enable both the file and the direcotry. Then\n// mount a new verity with measured root hash.\nPosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,\n- std::string filename);\n+ std::string filename,\n+ std::vector<EnableTarget> targets);\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Add verity symlink tests PiperOrigin-RevId: 384823097
259,891
15.07.2021 15:32:22
25,200
cd45d7b6c893aa763cdc3ef2f4ac86444b622927
netstack: support SO_RCVBUFFORCE TCP is fully supported. As with SO_RCVBUF, other transport protocols perform no-ops per DefaultSocketOptionsHandler.OnSetReceiveBufferSize.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -49,6 +49,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n\"gvisor.dev/gvisor/pkg/sentry/inet\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\nktime \"gvisor.dev/gvisor/pkg/sentry/kernel/time\"\n\"gvisor.dev/gvisor/pkg/sentry/socket\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/netfilter\"\n@@ -1682,12 +1683,12 @@ func SetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, level int\nreturn nil\n}\n-func clampBufSize(newSz, min, max int64) int64 {\n+func clampBufSize(newSz, min, max int64, ignoreMax bool) int64 {\n// packetOverheadFactor is used to multiply the value provided by the user on\n// a setsockopt(2) for setting the send/receive buffer sizes sockets.\nconst packetOverheadFactor = 2\n- if newSz > max {\n+ if !ignoreMax && newSz > max {\nnewSz = max\n}\n@@ -1712,7 +1713,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\nv := hostarch.ByteOrder.Uint32(optVal)\nmin, max := ep.SocketOptions().SendBufferLimits()\n- clamped := clampBufSize(int64(v), min, max)\n+ clamped := clampBufSize(int64(v), min, max, false /* ignoreMax */)\nep.SocketOptions().SetSendBufferSize(clamped, true /* notify */)\nreturn nil\n@@ -1723,7 +1724,22 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\nv := hostarch.ByteOrder.Uint32(optVal)\nmin, max := ep.SocketOptions().ReceiveBufferLimits()\n- clamped := clampBufSize(int64(v), min, max)\n+ clamped := clampBufSize(int64(v), min, max, false /* ignoreMax */)\n+ ep.SocketOptions().SetReceiveBufferSize(clamped, true /* notify */)\n+ return nil\n+\n+ case linux.SO_RCVBUFFORCE:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ if creds := auth.CredentialsFromContext(t); !creds.HasCapability(linux.CAP_NET_ADMIN) {\n+ return syserr.ErrNotPermitted\n+ }\n+\n+ v := hostarch.ByteOrder.Uint32(optVal)\n+ min, max := ep.SocketOptions().ReceiveBufferLimits()\n+ clamped := clampBufSize(int64(v), min, max, true /* ignoreMax */)\nep.SocketOptions().SetReceiveBufferSize(clamped, true /* notify */)\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -509,7 +509,6 @@ func SetSockOptEmitUnimplementedEvent(t *kernel.Task, name int) {\nlinux.SO_ATTACH_REUSEPORT_EBPF,\nlinux.SO_CNX_ADVICE,\nlinux.SO_DETACH_FILTER,\n- linux.SO_RCVBUFFORCE,\nlinux.SO_SNDBUFFORCE:\nt.Kernel().EmitUnimplementedEvent(t)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/socketops.go", "new_path": "pkg/tcpip/socketops.go", "diff": "@@ -54,7 +54,7 @@ type SocketOptionsHandler interface {\n// buffer size. It also returns the newly set value.\nOnSetSendBufferSize(v int64) (newSz int64)\n- // OnSetReceiveBufferSize is invoked to set the SO_RCVBUFSIZE.\n+ // OnSetReceiveBufferSize is invoked by SO_RCVBUF and SO_RCVBUFFORCE.\nOnSetReceiveBufferSize(v, oldSz int64) (newSz int64)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -2395,6 +2395,7 @@ cc_library(\n\"@com_google_absl//absl/strings\",\n\"@com_google_absl//absl/strings:str_format\",\ngtest,\n+ \"//test/util:capability_util\",\n\"//test/util:test_util\",\n],\nalwayslink = 1,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_generic_test_cases.cc", "new_path": "test/syscalls/linux/socket_generic_test_cases.cc", "diff": "#include \"test/syscalls/linux/socket_generic.h\"\n+#ifdef __linux__\n+#include <linux/capability.h>\n+#endif // __linux__\n#include <stdio.h>\n#include <sys/ioctl.h>\n#include <sys/socket.h>\n#include \"absl/strings/string_view.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/syscalls/linux/unix_domain_socket_test_util.h\"\n+#include \"test/util/capability_util.h\"\n#include \"test/util/test_util.h\"\n// This file is a generic socket test file. It must be built with another file\n@@ -400,6 +404,46 @@ TEST_P(AllSocketPairTest, RcvBufSucceeds) {\nEXPECT_GT(size, 0);\n}\n+#ifdef __linux__\n+\n+// Check that setting SO_RCVBUFFORCE above max is not clamped to the maximum\n+// receive buffer size.\n+TEST_P(AllSocketPairTest, SetSocketRecvBufForceAboveMax) {\n+ std::unique_ptr<SocketPair> sockets =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ // Discover maxmimum buffer size by setting to a really large value.\n+ constexpr int kRcvBufSz = 0xffffffff;\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,\n+ sizeof(kRcvBufSz)),\n+ SyscallSucceeds());\n+\n+ int max = 0;\n+ socklen_t max_len = sizeof(max);\n+ ASSERT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &max, &max_len),\n+ SyscallSucceeds());\n+\n+ int above_max = max + 1;\n+ int sso = setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUFFORCE,\n+ &above_max, sizeof(above_max));\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN))) {\n+ ASSERT_THAT(sso, SyscallFailsWithErrno(EPERM));\n+ return;\n+ }\n+ ASSERT_THAT(sso, SyscallSucceeds());\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &val, &val_len),\n+ SyscallSucceeds());\n+ // The system doubles the passed-in maximum.\n+ ASSERT_EQ(above_max * 2, val);\n+}\n+\n+#endif // __linux__\n+\nTEST_P(AllSocketPairTest, GetSndBufSucceeds) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nint size = 0;\n" }, { "change_type": "MODIFY", "old_path": "test/util/capability_util.cc", "new_path": "test/util/capability_util.cc", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+#ifdef __linux__\n+\n#include \"test/util/capability_util.h\"\n#include <linux/capability.h>\n@@ -79,3 +81,5 @@ PosixErrorOr<bool> CanCreateUserNamespace() {\n} // namespace testing\n} // namespace gvisor\n+\n+#endif // __linux__\n" }, { "change_type": "MODIFY", "old_path": "test/util/capability_util.h", "new_path": "test/util/capability_util.h", "diff": "#ifndef GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_\n#define GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_\n+#ifdef __linux__\n+\n#include <errno.h>\n#include <linux/capability.h>\n#include <sys/syscall.h>\n@@ -120,4 +122,7 @@ class AutoCapability {\n} // namespace testing\n} // namespace gvisor\n+\n+#endif // __linux__\n+\n#endif // GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_\n" } ]
Go
Apache License 2.0
google/gvisor
netstack: support SO_RCVBUFFORCE TCP is fully supported. As with SO_RCVBUF, other transport protocols perform no-ops per DefaultSocketOptionsHandler.OnSetReceiveBufferSize. PiperOrigin-RevId: 385023239
259,891
15.07.2021 16:06:18
25,200
6415efa514db436e33375f67f38c9f783491b692
buildkite: bump Go version to 1.16 We're currently on 1.13, which can cause build issues with code targeting later versions.
[ { "change_type": "MODIFY", "old_path": ".buildkite/hooks/pre-command", "new_path": ".buildkite/hooks/pre-command", "diff": "@@ -8,9 +8,16 @@ function install_pkgs() {\ndone\n}\ninstall_pkgs make \"linux-headers-$(uname -r)\" linux-libc-dev \\\n- graphviz jq curl binutils gnupg gnupg-agent golang-go \\\n+ graphviz jq curl binutils gnupg gnupg-agent gcc \\\napt-transport-https ca-certificates software-properties-common\n+# Install Go 1.16, as only 1.13 is available via apt.\n+declare -r go_archive=go1.16.6.linux-amd64.tar.gz\n+wget \"https://golang.org/dl/${go_archive}\"\n+sudo tar -xzf \"${go_archive}\" -C /usr/local\n+sudo ln -s /usr/local/go/bin/go /usr/bin/go\n+rm \"${go_archive}\"\n+\n# Setup for parallelization with PARTITION and TOTAL_PARTITIONS.\nexport PARTITION=${BUILDKITE_PARALLEL_JOB:-0}\nPARTITION=$((${PARTITION}+1)) # 1-indexed, but PARALLEL_JOB is 0-indexed.\n" }, { "change_type": "MODIFY", "old_path": "tools/installers/containerd.sh", "new_path": "tools/installers/containerd.sh", "diff": "@@ -36,9 +36,10 @@ install_helper() {\nmkdir -p \"${GOPATH}\"/src/$(dirname \"${PACKAGE}\") && \\\ngit clone https://\"${PACKAGE}\" \"${GOPATH}\"/src/\"${PACKAGE}\"\n- # Checkout and build the repository.\n+ # Checkout and build the repository. We use a pre-GO111MODULE containerd.\n(cd \"${GOPATH}\"/src/\"${PACKAGE}\" && \\\ngit checkout \"${TAG}\" && \\\n+ export GO111MODULE=off && \\\nmake && \\\nmake install)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
buildkite: bump Go version to 1.16 We're currently on 1.13, which can cause build issues with code targeting later versions. PiperOrigin-RevId: 385029528
259,868
15.07.2021 18:26:01
25,200
b6baa377d85db823c9d1c15658e843d6683835a3
Update gVisor release signing key to a version that does not expire.
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/install.md", "new_path": "g3doc/user_guide/install.md", "diff": "@@ -55,7 +55,10 @@ sudo apt-get install -y \\\nsoftware-properties-common\n```\n-Next, the configure the key used to sign archives and the repository:\n+Next, configure the key used to sign archives and the repository.\n+\n+NOTE: The key was updated on 2021-07-13 to replace the expired key. If you get\n+errors about the key being expired, run the `apt-key add` command below again.\n```bash\ncurl -fsSL https://gvisor.dev/archive.key | sudo apt-key add -\n" }, { "change_type": "MODIFY", "old_path": "website/archive.key", "new_path": "website/archive.key", "diff": "@@ -11,19 +11,19 @@ lzqkT3VSMXieImTASosK5L5Q8rryvgCeI9tQLn9EpYFCtU3LXvVgTreGNEEjMOnL\ndR7yOU+Fs775stn6ucqmdYarx7CvKUrNAhgEeHMonLe1cjYScF7NfLO1GIrQKJR2\nDE0f+uJZ52inOkO8ufh3WVQJSYszuS3HCY7w5oj1aP38k/y9zZdZvVvwAWZaiqBQ\niwjVs6Kub76VVZZhRDf4iYs8k1Zh64nXdfQt250d8U5yMPF3wIJ+c1yhxwARAQAB\n-tCpUaGUgZ1Zpc29yIEF1dGhvcnMgPGd2aXNvci1ib3RAZ29vZ2xlLmNvbT6JAlQE\n-EwEKAD4WIQRvHfheOnHCSRjnJ9VvxtVU4yvZQwUCXSZ4BgIbAwUJA8JnAAULCQgH\n-AgYVCgkICwIEFgIDAQIeAQIXgAAKCRBvxtVU4yvZQ5WFD/9VZXMW5I2rKV+2gTHT\n-CsW74kZVi1VFdAVYiUJZXw2jJNtcg3xdgBcscYPyecyka/6TS2q7q2fOGAzCZkcR\n-e3lLzkGAngMlZ7PdHAE0PDMNFaeMZW0dxNH68vn7AiA1y2XwENnxVec7iXQH6aX5\n-xUNg2OCiv5f6DJItHc/Q4SvFUi8QK7TT/GYE1RJXVJlLqfO6y4V8SeqfM+FHpHZM\n-gzrwdTgsNiEm4lMjWcgb2Ib4i2JUVAjIRPfcpysiV5E7c3SPXyu4bOovKKlbhiJ1\n-Q1M9M0zHik34Kjf4YNO1EW936j7Msd181CJt5Bl9XvlhPb8gey/ygpIvcicLx6M5\n-lRJTy4z1TtkmtZ7E8EbJZWoPTaHlA6hoMtGeE35j3vMZN1qZYaYt26eFOxxhh7PA\n-J0h1lS7T2O8u1c2JKhKvajtdmbqbJgI8FRhVsMoVBnqDK5aE9MOAso36OibfweEL\n-8iV2z8JnBpWtbbUEaWro4knPtbLJbQFvXVietm3cFsbGg+DMIwI6x6HcU91IEFYI\n-Sv4orK7xgLuM+f6dxo/Wel3ht18dg3x3krBLALTYBidRfnQYYR3sTfLquB8b5WaY\n-o829L2Bop9GBygdLevkHHN5It6q8CVpn0H5HEJMNaDOX1LcPbf0CKwkkAVCBd9YZ\n-eAX38ds9LliK7XPXdC4c+zEkGA==\n-=x8TG\n+tCpUaGUgZ1Zpc29yIEF1dGhvcnMgPGd2aXNvci1ib3RAZ29vZ2xlLmNvbT6JAk4E\n+EwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQRvHfheOnHCSRjnJ9Vv\n+xtVU4yvZQwUCYO4TxQAKCRBvxtVU4yvZQ9UoEACLPV7CnEA2bjCPi0NCWB/Mo1WL\n+evqv7Wv7vmXzI1K9DrqOhxuamQW75SVXg1df0hTJWbKFmDAip6NEC2Rg5P+A8hHj\n+nW/VG+q4ZFT662jDhnXQiO9L7EZzjyqNF4yWYzzgnqEu/SmGkDLDYiUCcGBqS2oE\n+EQfk7RHJSLMJXAnNDH7OUDgrirSssg/dlQ5uAHA9Au80VvC5fsTKza8b3Aydw3SV\n+iB8/Yuikbl8wKbpSGiXtR4viElXjNips0+mBqaUk2xpqSBrsfN+FezcInVXaXFeq\n+xtpq2/3M3DYbqCRjqeyd9wNi92FHdOusNrK4MYe0pAYbGjc65BwH+F0T4oJ8ZSJV\n+lIt+FZ0MqM1T97XadybYFsJh8qvajQpZEPL+zzNncc4f1d80e7+lwIZV/al0FZWW\n+Zlp7TpbeO/uW+lHs5W14YKwaQVh1whapKXTrATipNOOSCw2hnfrT8V7Hy55QWaGZ\n+f4/kfy929EeCP16d/LqOClv0j0RBr6NhRBQ0l/BE/mXjJwIk6nKwi+Yi4ek1ARi6\n+AlCMLn9AZF7aTGpvCiftzIrlyDfVZT5IX03TayxRHZ4b1Rj8eyJaHcjI49u83gkr\n+4LGX08lEawn9nxFSx4RCg2swGiYw5F436wwwAIozqJuDASeTa3QND3au5v0oYWnl\n+umDySUl5wPaAaALgzA==\n+=5/8T\n-----END PGP PUBLIC KEY BLOCK-----\n" } ]
Go
Apache License 2.0
google/gvisor
Update gVisor release signing key to a version that does not expire. PiperOrigin-RevId: 385051420
259,992
15.07.2021 18:51:10
25,200
628d7d3a4662a14625895df6ff1ca7a9dbf3a5d7
Fix refcount increments in gofer.filesystem.Sync. fs.renameMu is released and reacquired in `dentry.destroyLocked()` allowing a dentry to be in `fs.syncableDentries` with a negative reference count. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -39,27 +39,15 @@ import (\n// Sync implements vfs.FilesystemImpl.Sync.\nfunc (fs *filesystem) Sync(ctx context.Context) error {\n// Snapshot current syncable dentries and special file FDs.\n- fs.renameMu.RLock()\nfs.syncMu.Lock()\nds := make([]*dentry, 0, len(fs.syncableDentries))\nfor d := range fs.syncableDentries {\n- // It's safe to use IncRef here even though fs.syncableDentries doesn't\n- // hold references since we hold fs.renameMu. Note that we can't use\n- // TryIncRef since cached dentries at zero references should still be\n- // synced.\n- d.IncRef()\nds = append(ds, d)\n}\n- fs.renameMu.RUnlock()\nsffds := make([]*specialFileFD, 0, len(fs.specialFileFDs))\nfor sffd := range fs.specialFileFDs {\n- // As above, fs.specialFileFDs doesn't hold references. However, unlike\n- // dentries, an FD that has reached zero references can't be\n- // resurrected, so we can use TryIncRef.\n- if sffd.vfsfd.TryIncRef() {\nsffds = append(sffds, sffd)\n}\n- }\nfs.syncMu.Unlock()\n// Return the first error we encounter, but sync everything we can\n@@ -68,9 +56,7 @@ func (fs *filesystem) Sync(ctx context.Context) error {\n// Sync syncable dentries.\nfor _, d := range ds {\n- err := d.syncCachedFile(ctx, true /* forFilesystemSync */)\n- d.DecRef(ctx)\n- if err != nil {\n+ if err := d.syncCachedFile(ctx, true /* forFilesystemSync */); err != nil {\nctx.Infof(\"gofer.filesystem.Sync: dentry.syncCachedFile failed: %v\", err)\nif retErr == nil {\nretErr = err\n@@ -81,9 +67,7 @@ func (fs *filesystem) Sync(ctx context.Context) error {\n// Sync special files, which may be writable but do not use dentry shared\n// handles (so they won't be synced by the above).\nfor _, sffd := range sffds {\n- err := sffd.sync(ctx, true /* forFilesystemSync */)\n- sffd.vfsfd.DecRef(ctx)\n- if err != nil {\n+ if err := sffd.sync(ctx, true /* forFilesystemSync */); err != nil {\nctx.Infof(\"gofer.filesystem.Sync: specialFileFD.sync failed: %v\", err)\nif retErr == nil {\nretErr = err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -582,10 +582,10 @@ func (fs *filesystem) Release(ctx context.Context) {\nd.dataMu.Unlock()\n// Close host FDs if they exist.\nif d.readFD >= 0 {\n- unix.Close(int(d.readFD))\n+ _ = unix.Close(int(d.readFD))\n}\nif d.writeFD >= 0 && d.readFD != d.writeFD {\n- unix.Close(int(d.writeFD))\n+ _ = unix.Close(int(d.writeFD))\n}\nd.readFD = -1\nd.writeFD = -1\n@@ -1637,18 +1637,18 @@ func (d *dentry) destroyLocked(ctx context.Context) {\nd.dataMu.Unlock()\n// Clunk open fids and close open host FDs.\nif !d.readFile.isNil() {\n- d.readFile.close(ctx)\n+ _ = d.readFile.close(ctx)\n}\nif !d.writeFile.isNil() && d.readFile != d.writeFile {\n- d.writeFile.close(ctx)\n+ _ = d.writeFile.close(ctx)\n}\nd.readFile = p9file{}\nd.writeFile = p9file{}\nif d.readFD >= 0 {\n- unix.Close(int(d.readFD))\n+ _ = unix.Close(int(d.readFD))\n}\nif d.writeFD >= 0 && d.readFD != d.writeFD {\n- unix.Close(int(d.writeFD))\n+ _ = unix.Close(int(d.writeFD))\n}\nd.readFD = -1\nd.writeFD = -1\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/special_file.go", "new_path": "pkg/sentry/fsimpl/gofer/special_file.go", "diff": "@@ -42,6 +42,11 @@ import (\ntype specialFileFD struct {\nfileDescription\n+ // releaseMu synchronizes the closing of fd.handle with fd.sync(). It's safe\n+ // to access fd.handle without locking for operations that require a ref to\n+ // be held by the caller, e.g. vfs.FileDescriptionImpl implementations.\n+ releaseMu sync.RWMutex `state:\"nosave\"`\n+\n// handle is used for file I/O. handle is immutable.\nhandle handle `state:\"nosave\"`\n@@ -117,7 +122,10 @@ func (fd *specialFileFD) Release(ctx context.Context) {\nif fd.haveQueue {\nfdnotifier.RemoveFD(fd.handle.fd)\n}\n+ fd.releaseMu.Lock()\nfd.handle.close(ctx)\n+ fd.releaseMu.Unlock()\n+\nfs := fd.vfsfd.Mount().Filesystem().Impl().(*filesystem)\nfs.syncMu.Lock()\ndelete(fs.specialFileFDs, fd)\n@@ -373,6 +381,13 @@ func (fd *specialFileFD) Sync(ctx context.Context) error {\n}\nfunc (fd *specialFileFD) sync(ctx context.Context, forFilesystemSync bool) error {\n+ // Locks to ensure it didn't race with fd.Release().\n+ fd.releaseMu.RLock()\n+ defer fd.releaseMu.RUnlock()\n+\n+ if !fd.handle.isOpen() {\n+ return nil\n+ }\nerr := func() error {\n// If we have a host FD, fsyncing it is likely to be faster than an fsync\n// RPC.\n" } ]
Go
Apache License 2.0
google/gvisor
Fix refcount increments in gofer.filesystem.Sync. fs.renameMu is released and reacquired in `dentry.destroyLocked()` allowing a dentry to be in `fs.syncableDentries` with a negative reference count. Fixes #5263 PiperOrigin-RevId: 385054337
259,853
20.07.2021 12:04:47
25,200
7ced03b3831983e35cb25ad0a9d5632ea322f9a4
ring0: Initialize sentryXCR0 from Kernel.init() Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/ring0/kernel_amd64.go", "new_path": "pkg/ring0/kernel_amd64.go", "diff": "@@ -19,6 +19,7 @@ package ring0\nimport (\n\"encoding/binary\"\n\"reflect\"\n+ \"sync\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n@@ -30,6 +31,8 @@ func HaltAndWriteFSBase(regs *arch.Registers)\n// init initializes architecture-specific state.\nfunc (k *Kernel) init(maxCPUs int) {\n+ initSentryXCR0()\n+\nentrySize := reflect.TypeOf(kernelEntry{}).Size()\nvar (\nentries []kernelEntry\n@@ -257,7 +260,16 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\nreturn\n}\n-var sentryXCR0 = xgetbv(0)\n+var (\n+ sentryXCR0 uintptr\n+ sentryXCR0Once sync.Once\n+)\n+\n+// initSentryXCR0 saves a value of XCR0 in the host mode. It is used to\n+// initialize XCR0 of guest vCPU-s.\n+func initSentryXCR0() {\n+ sentryXCR0Once.Do(func() { sentryXCR0 = xgetbv(0) })\n+}\n// startGo is the CPU entrypoint.\n//\n" } ]
Go
Apache License 2.0
google/gvisor
ring0: Initialize sentryXCR0 from Kernel.init() Fixes #6300 PiperOrigin-RevId: 385840917
259,992
20.07.2021 20:53:33
25,200
990cd1a950955e25dc8935a6aca61906307a0851
Don't kill container when volume is unmounted The gofer session is killed when a gofer backed volume is unmounted. The gofer monitor catches the disconnect and kills the container. This changes the gofer monitor to only care about the rootfs connections, which cannot be unmounted. Fixes
[ { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -742,8 +742,11 @@ func (l *Loader) createContainerProcess(root bool, cid string, info *containerIn\n// ours either way.\ninfo.procArgs.FDTable = fdTable\n- // Setup the child container file system.\n- l.startGoferMonitor(cid, info.goferFDs)\n+ // Gofer FDs must be ordered and the first FD is always the rootfs.\n+ if len(info.goferFDs) < 1 {\n+ return nil, nil, nil, fmt.Errorf(\"rootfs gofer FD not found\")\n+ }\n+ l.startGoferMonitor(cid, int32(info.goferFDs[0].FD()))\nmntr := newContainerMounter(info, l.k, l.mountHints, kernel.VFS2Enabled)\nif root {\n@@ -816,17 +819,21 @@ func (l *Loader) createContainerProcess(root bool, cid string, info *containerIn\n}\n// startGoferMonitor runs a goroutine to monitor gofer's health. It polls on\n-// the gofer FDs looking for disconnects, and kills the container processes if a\n-// disconnect occurs in any of the gofer FDs.\n-func (l *Loader) startGoferMonitor(cid string, goferFDs []*fd.FD) {\n+// the gofer FD looking for disconnects, and kills the container processes if\n+// the rootfs FD disconnects.\n+//\n+// Note that other gofer mounts are allowed to be unmounted and disconnected.\n+func (l *Loader) startGoferMonitor(cid string, rootfsGoferFD int32) {\n+ if rootfsGoferFD < 0 {\n+ panic(fmt.Sprintf(\"invalid FD: %d\", rootfsGoferFD))\n+ }\ngo func() {\nlog.Debugf(\"Monitoring gofer health for container %q\", cid)\n- var events []unix.PollFd\n- for _, goferFD := range goferFDs {\n- events = append(events, unix.PollFd{\n- Fd: int32(goferFD.FD()),\n+ events := []unix.PollFd{\n+ {\n+ Fd: rootfsGoferFD,\nEvents: unix.POLLHUP | unix.POLLRDHUP,\n- })\n+ },\n}\n_, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {\n// Use ppoll instead of poll because it's already whilelisted in seccomp.\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/integration_test.go", "new_path": "test/e2e/integration_test.go", "diff": "@@ -44,6 +44,12 @@ import (\n// defaultWait is the default wait time used for tests.\nconst defaultWait = time.Minute\n+func TestMain(m *testing.M) {\n+ dockerutil.EnsureSupportedDockerVersion()\n+ flag.Parse()\n+ os.Exit(m.Run())\n+}\n+\n// httpRequestSucceeds sends a request to a given url and checks that the status is OK.\nfunc httpRequestSucceeds(client http.Client, server string, port int) error {\nurl := fmt.Sprintf(\"http://%s:%d\", server, port)\n@@ -712,8 +718,27 @@ func TestStdiosChown(t *testing.T) {\n}\n}\n-func TestMain(m *testing.M) {\n- dockerutil.EnsureSupportedDockerVersion()\n- flag.Parse()\n- os.Exit(m.Run())\n+func TestUnmount(t *testing.T) {\n+ ctx := context.Background()\n+ d := dockerutil.MakeContainer(ctx, t)\n+ defer d.CleanUp(ctx)\n+\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"sub-mount\")\n+ if err != nil {\n+ t.Fatalf(\"TempDir(): %v\", err)\n+ }\n+ opts := dockerutil.RunOpts{\n+ Image: \"basic/alpine\",\n+ Privileged: true, // Required for umount\n+ Mounts: []mount.Mount{\n+ {\n+ Type: mount.TypeBind,\n+ Source: dir,\n+ Target: \"/foo\",\n+ },\n+ },\n+ }\n+ if _, err := d.Run(ctx, opts, \"umount\", \"/foo\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Don't kill container when volume is unmounted The gofer session is killed when a gofer backed volume is unmounted. The gofer monitor catches the disconnect and kills the container. This changes the gofer monitor to only care about the rootfs connections, which cannot be unmounted. Fixes #6259 PiperOrigin-RevId: 385929039
259,992
20.07.2021 22:18:47
25,200
0184f1a662b893a1634e9b2cf3adff57971b668c
Add fsstress test to goferfs
[ { "change_type": "MODIFY", "old_path": "test/fsstress/BUILD", "new_path": "test/fsstress/BUILD", "diff": "@@ -14,7 +14,11 @@ go_test(\n\"manual\",\n\"local\",\n],\n- deps = [\"//pkg/test/dockerutil\"],\n+ deps = [\n+ \"//pkg/test/dockerutil\",\n+ \"//pkg/test/testutil\",\n+ \"@com_github_docker_docker//api/types/mount:go_default_library\",\n+ ],\n)\ngo_library(\n" }, { "change_type": "MODIFY", "old_path": "test/fsstress/fsstress_test.go", "new_path": "test/fsstress/fsstress_test.go", "diff": "@@ -18,6 +18,8 @@ package fsstress\nimport (\n\"context\"\n\"flag\"\n+ \"fmt\"\n+ \"io/ioutil\"\n\"math/rand\"\n\"os\"\n\"strconv\"\n@@ -25,7 +27,9 @@ import (\n\"testing\"\n\"time\"\n+ \"github.com/docker/docker/api/types/mount\"\n\"gvisor.dev/gvisor/pkg/test/dockerutil\"\n+ \"gvisor.dev/gvisor/pkg/test/testutil\"\n)\nfunc init() {\n@@ -42,6 +46,7 @@ type config struct {\noperations string\nprocesses string\ntarget string\n+ mounts []mount.Mount\n}\nfunc fsstress(t *testing.T, conf config) {\n@@ -52,8 +57,19 @@ func fsstress(t *testing.T, conf config) {\nconst image = \"basic/fsstress\"\nseed := strconv.FormatUint(uint64(rand.Uint32()), 10)\nargs := []string{\"-d\", conf.target, \"-n\", conf.operations, \"-p\", conf.processes, \"-s\", seed, \"-X\"}\n- t.Logf(\"Repro: docker run --rm --runtime=%s gvisor.dev/images/%s %s\", dockerutil.Runtime(), image, strings.Join(args, \" \"))\n- out, err := d.Run(ctx, dockerutil.RunOpts{Image: image}, args...)\n+ opts := dockerutil.RunOpts{\n+ Image: image,\n+ Mounts: conf.mounts,\n+ }\n+ var mounts string\n+ if len(conf.mounts) > 0 {\n+ mounts = \" -v \"\n+ for _, m := range conf.mounts {\n+ mounts += fmt.Sprintf(\"-v <any_dir>:%s\", m.Target)\n+ }\n+ }\n+ t.Logf(\"Repro: docker run --rm --runtime=%s%s gvisor.dev/images/%s %s\", dockerutil.Runtime(), mounts, image, strings.Join(args, \" \"))\n+ out, err := d.Run(ctx, opts, args...)\nif err != nil {\nt.Fatalf(\"docker run failed: %v\\noutput: %s\", err, out)\n}\n@@ -64,6 +80,39 @@ func fsstress(t *testing.T, conf config) {\n}\n}\n+func TestFsstressGofer(t *testing.T) {\n+ // This takes between 30-60s to run on my machine. Adjust as needed.\n+ cfg := config{\n+ operations: \"500\",\n+ processes: \"20\",\n+ target: \"/test\",\n+ }\n+ fsstress(t, cfg)\n+}\n+\n+func TestFsstressGoferShared(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"fsstress\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir() failed: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ // This takes between 30-60s to run on my machine. Adjust as needed.\n+ cfg := config{\n+ operations: \"500\",\n+ processes: \"20\",\n+ target: \"/test\",\n+ mounts: []mount.Mount{\n+ {\n+ Source: dir,\n+ Target: \"/test\",\n+ Type: \"bind\",\n+ },\n+ },\n+ }\n+ fsstress(t, cfg)\n+}\n+\nfunc TestFsstressTmpfs(t *testing.T) {\n// This takes between 10s to run on my machine. Adjust as needed.\ncfg := config{\n" } ]
Go
Apache License 2.0
google/gvisor
Add fsstress test to goferfs PiperOrigin-RevId: 385937353
260,004
20.07.2021 22:47:34
25,200
9e805ce937ef2f2934e72f873ea4ae8451801c82
Expose local address from raw sockets
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/raw/endpoint.go", "new_path": "pkg/tcpip/transport/raw/endpoint.go", "diff": "@@ -455,8 +455,21 @@ func (e *endpoint) Bind(addr tcpip.FullAddress) tcpip.Error {\n}\n// GetLocalAddress implements tcpip.Endpoint.GetLocalAddress.\n-func (*endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) {\n- return tcpip.FullAddress{}, &tcpip.ErrNotSupported{}\n+func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) {\n+ e.mu.RLock()\n+ defer e.mu.RUnlock()\n+\n+ addr := e.BindAddr\n+ if e.connected {\n+ addr = e.route.LocalAddress()\n+ }\n+\n+ return tcpip.FullAddress{\n+ NIC: e.RegisterNICID,\n+ Addr: addr,\n+ // Linux returns the protocol in the port field.\n+ Port: uint16(e.TransProto),\n+ }, nil\n}\n// GetRemoteAddress implements tcpip.Endpoint.GetRemoteAddress.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket.cc", "new_path": "test/syscalls/linux/raw_socket.cc", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+#include <arpa/inet.h>\n#include <linux/capability.h>\n#include <linux/filter.h>\n#include <netinet/in.h>\n@@ -76,6 +77,20 @@ class RawSocketTest : public ::testing::TestWithParam<std::tuple<int, int>> {\nreturn 0;\n}\n+ uint16_t Port(struct sockaddr* s) {\n+ if (Family() == AF_INET) {\n+ return ntohs(reinterpret_cast<struct sockaddr_in*>(s)->sin_port);\n+ }\n+ return ntohs(reinterpret_cast<struct sockaddr_in6*>(s)->sin6_port);\n+ }\n+\n+ void* Addr(struct sockaddr* s) {\n+ if (Family() == AF_INET) {\n+ return &(reinterpret_cast<struct sockaddr_in*>(s)->sin_addr);\n+ }\n+ return &(reinterpret_cast<struct sockaddr_in6*>(s)->sin6_addr);\n+ }\n+\n// The socket used for both reading and writing.\nint s_;\n@@ -181,6 +196,54 @@ TEST_P(RawSocketTest, FailAccept) {\nASSERT_THAT(accept(s_, &saddr, &addrlen), SyscallFailsWithErrno(ENOTSUP));\n}\n+TEST_P(RawSocketTest, BindThenGetSockName) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ struct sockaddr* addr = reinterpret_cast<struct sockaddr*>(&addr_);\n+ ASSERT_THAT(bind(s_, addr, AddrLen()), SyscallSucceeds());\n+ struct sockaddr_storage saddr_storage;\n+ struct sockaddr* saddr = reinterpret_cast<struct sockaddr*>(&saddr_storage);\n+ socklen_t saddrlen = AddrLen();\n+ ASSERT_THAT(getsockname(s_, saddr, &saddrlen), SyscallSucceeds());\n+ ASSERT_EQ(saddrlen, AddrLen());\n+\n+ // The port is expected to hold the protocol number.\n+ EXPECT_EQ(Port(saddr), Protocol());\n+\n+ char addrbuf[INET6_ADDRSTRLEN], saddrbuf[INET6_ADDRSTRLEN];\n+ const char* addrstr =\n+ inet_ntop(addr->sa_family, Addr(addr), addrbuf, sizeof(addrbuf));\n+ ASSERT_NE(addrstr, nullptr);\n+ const char* saddrstr =\n+ inet_ntop(saddr->sa_family, Addr(saddr), saddrbuf, sizeof(saddrbuf));\n+ ASSERT_NE(saddrstr, nullptr);\n+ EXPECT_STREQ(saddrstr, addrstr);\n+}\n+\n+TEST_P(RawSocketTest, ConnectThenGetSockName) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ struct sockaddr* addr = reinterpret_cast<struct sockaddr*>(&addr_);\n+ ASSERT_THAT(connect(s_, addr, AddrLen()), SyscallSucceeds());\n+ struct sockaddr_storage saddr_storage;\n+ struct sockaddr* saddr = reinterpret_cast<struct sockaddr*>(&saddr_storage);\n+ socklen_t saddrlen = AddrLen();\n+ ASSERT_THAT(getsockname(s_, saddr, &saddrlen), SyscallSucceeds());\n+ ASSERT_EQ(saddrlen, AddrLen());\n+\n+ // The port is expected to hold the protocol number.\n+ EXPECT_EQ(Port(saddr), Protocol());\n+\n+ char addrbuf[INET6_ADDRSTRLEN], saddrbuf[INET6_ADDRSTRLEN];\n+ const char* addrstr =\n+ inet_ntop(addr->sa_family, Addr(addr), addrbuf, sizeof(addrbuf));\n+ ASSERT_NE(addrstr, nullptr);\n+ const char* saddrstr =\n+ inet_ntop(saddr->sa_family, Addr(saddr), saddrbuf, sizeof(saddrbuf));\n+ ASSERT_NE(saddrstr, nullptr);\n+ EXPECT_STREQ(saddrstr, addrstr);\n+}\n+\n// Test that getpeername() returns nothing before connect().\nTEST_P(RawSocketTest, FailGetPeerNameBeforeConnect) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n" } ]
Go
Apache License 2.0
google/gvisor
Expose local address from raw sockets PiperOrigin-RevId: 385940836
259,896
20.07.2021 23:11:14
25,200
a4d743db59bc8c05d646ca3d5cd94498cf411d02
Enable RACK by default in netstack.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -478,7 +478,7 @@ type endpoint struct {\n// shutdownFlags represent the current shutdown state of the endpoint.\nshutdownFlags tcpip.ShutdownFlags\n- // tcpRecovery is the loss deteoction algorithm used by TCP.\n+ // tcpRecovery is the loss recovery algorithm used by TCP.\ntcpRecovery tcpip.TCPRecovery\n// sack holds TCP SACK related information for this endpoint.\n@@ -869,8 +869,6 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQue\ne.maxSynRetries = uint8(synRetries)\n}\n- s.TransportProtocolOption(ProtocolNumber, &e.tcpRecovery)\n-\nif p := s.GetTCPProbe(); p != nil {\ne.probe = p\n}\n@@ -2922,6 +2920,7 @@ func (e *endpoint) maybeEnableSACKPermitted(synOpts *header.TCPSynOptions) {\n}\nif bool(v) && synOpts.SACKPermitted {\ne.SACKPermitted = true\n+ e.stack.TransportProtocolOption(ProtocolNumber, &e.tcpRecovery)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/protocol.go", "new_path": "pkg/tcpip/transport/tcp/protocol.go", "diff": "@@ -478,8 +478,7 @@ func NewProtocol(s *stack.Stack) stack.TransportProtocol {\nminRTO: MinRTO,\nmaxRTO: MaxRTO,\nmaxRetries: MaxRetries,\n- // TODO(gvisor.dev/issue/5243): Set recovery to tcpip.TCPRACKLossDetection.\n- recovery: 0,\n+ recovery: tcpip.TCPRACKLossDetection,\n}\np.dispatcher.init(s.Rand(), runtime.GOMAXPROCS(0))\nreturn &p\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_rack_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_rack_test.go", "diff": "@@ -36,9 +36,9 @@ const (\nlatency = 5 * time.Millisecond\n)\n-func setStackRACKPermitted(t *testing.T, c *context.Context) {\n+func setStackTCPRecovery(t *testing.T, c *context.Context, recovery int) {\nt.Helper()\n- opt := tcpip.TCPRACKLossDetection\n+ opt := tcpip.TCPRecovery(recovery)\nif err := c.Stack().SetTransportProtocolOption(header.TCPProtocolNumber, &opt); err != nil {\nt.Fatalf(\"c.s.SetTransportProtocolOption(%d, &%v(%v)): %s\", header.TCPProtocolNumber, opt, opt, err)\n}\n@@ -70,7 +70,6 @@ func TestRACKUpdate(t *testing.T) {\nclose(probeDone)\n})\nsetStackSACKPermitted(t, c, true)\n- setStackRACKPermitted(t, c)\ncreateConnectedWithSACKAndTS(c)\ndata := make([]byte, maxPayload)\n@@ -129,7 +128,6 @@ func TestRACKDetectReorder(t *testing.T) {\nclose(probeDone)\n})\nsetStackSACKPermitted(t, c, true)\n- setStackRACKPermitted(t, c)\ncreateConnectedWithSACKAndTS(c)\ndata := make([]byte, ackNumToVerify*maxPayload)\nfor i := range data {\n@@ -162,8 +160,8 @@ func TestRACKDetectReorder(t *testing.T) {\nfunc sendAndReceiveWithSACK(t *testing.T, c *context.Context, numPackets int, enableRACK bool) []byte {\nsetStackSACKPermitted(t, c, true)\n- if enableRACK {\n- setStackRACKPermitted(t, c)\n+ if !enableRACK {\n+ setStackTCPRecovery(t, c, 0)\n}\ncreateConnectedWithSACKAndTS(c)\n@@ -998,7 +996,6 @@ func TestRACKWithWindowFull(t *testing.T) {\ndefer c.Cleanup()\nsetStackSACKPermitted(t, c, true)\n- setStackRACKPermitted(t, c)\ncreateConnectedWithSACKAndTS(c)\nseq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_sack_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_sack_test.go", "diff": "@@ -61,6 +61,7 @@ func TestSackPermittedConnect(t *testing.T) {\ndefer c.Cleanup()\nsetStackSACKPermitted(t, c, sackEnabled)\n+ setStackTCPRecovery(t, c, 0)\nrep := createConnectedWithSACKPermittedOption(c)\ndata := []byte{1, 2, 3}\n@@ -105,6 +106,7 @@ func TestSackDisabledConnect(t *testing.T) {\ndefer c.Cleanup()\nsetStackSACKPermitted(t, c, sackEnabled)\n+ setStackTCPRecovery(t, c, 0)\nrep := c.CreateConnectedWithOptions(header.TCPSynOptions{})\n@@ -166,6 +168,7 @@ func TestSackPermittedAccept(t *testing.T) {\n}\n}\nsetStackSACKPermitted(t, c, sackEnabled)\n+ setStackTCPRecovery(t, c, 0)\nrep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS, SACKPermitted: tc.sackPermitted})\n// Now verify no SACK blocks are\n@@ -239,6 +242,7 @@ func TestSackDisabledAccept(t *testing.T) {\n}\nsetStackSACKPermitted(t, c, sackEnabled)\n+ setStackTCPRecovery(t, c, 0)\nrep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS})\n@@ -386,6 +390,7 @@ func TestSACKRecovery(t *testing.T) {\nlog.Printf(\"state: %+v\\n\", s)\n})\nsetStackSACKPermitted(t, c, true)\n+ setStackTCPRecovery(t, c, 0)\ncreateConnectedWithSACKAndTS(c)\nconst iterations = 3\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_net.cc", "new_path": "test/syscalls/linux/proc_net.cc", "diff": "@@ -498,13 +498,7 @@ TEST(ProcSysNetIpv4Recovery, CanReadAndWrite) {\n// Check initial value is set to 1.\nEXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),\nSyscallSucceedsWithValue(sizeof(to_write) + 1));\n- if (IsRunningOnGvisor()) {\n- // TODO(gvisor.dev/issue/5243): TCPRACKLossDetection = 1 should be turned on\n- // by default.\n- EXPECT_EQ(strcmp(buf, \"0\\n\"), 0);\n- } else {\nEXPECT_EQ(strcmp(buf, \"1\\n\"), 0);\n- }\n// Set tcp_recovery to one of the allowed constants.\nEXPECT_THAT(PwriteFd(fd.get(), &to_write, sizeof(to_write), 0),\n" } ]
Go
Apache License 2.0
google/gvisor
Enable RACK by default in netstack. PiperOrigin-RevId: 385944428
259,885
21.07.2021 12:51:41
25,200
a89b2f005b714dbe472ebda702020bb6fb1d9c0a
Use atomics when checking for parent setgid in VFS2 tmpfs file creation. Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -397,8 +397,8 @@ func (i *inode) init(impl interface{}, fs *filesystem, kuid auth.KUID, kgid auth\n}\n// Inherit the group and setgid bit as in fs/inode.c:inode_init_owner().\n- if parentDir != nil && parentDir.inode.mode&linux.S_ISGID == linux.S_ISGID {\n- kgid = auth.KGID(parentDir.inode.gid)\n+ if parentDir != nil && atomic.LoadUint32(&parentDir.inode.mode)&linux.S_ISGID == linux.S_ISGID {\n+ kgid = auth.KGID(atomic.LoadUint32(&parentDir.inode.gid))\nif mode&linux.S_IFDIR == linux.S_IFDIR {\nmode |= linux.S_ISGID\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Use atomics when checking for parent setgid in VFS2 tmpfs file creation. Reported-by: [email protected] PiperOrigin-RevId: 386075453
259,884
21.07.2021 14:17:13
25,200
c259978dbd793fedd8d1068b875c763ffba09d26
Fix required Linux version in networking docs.
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/networking.md", "new_path": "g3doc/user_guide/networking.md", "diff": "@@ -61,7 +61,7 @@ Add the following `runtimeArgs` to your Docker configuration\n### Disable GSO {#gso}\n-If your Linux is older than 4.14.17, you can disable Generic Segmentation\n+If your Linux is older than 4.14.77, you can disable Generic Segmentation\nOffload (GSO) to run with a kernel that is newer than 3.17. Add the\n`--gso=false` flag to your Docker runtime configuration\n(`/etc/docker/daemon.json`) and restart the Docker daemon:\n" } ]
Go
Apache License 2.0
google/gvisor
Fix required Linux version in networking docs. PiperOrigin-RevId: 386093826
259,896
21.07.2021 18:03:48
25,200
f1f746dddcc0eb97c04b7d4a521962edb30cbea8
Add metric to count number of segments acknowledged by DSACK. Creates new metric "/tcp/segments_acked_with_dsack" to count the number of segments acked with DSACK. Added check to verify the metric is getting incremented when a DSACK is sent in the unit tests.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -274,6 +274,7 @@ var Metrics = tcpip.Stats{\nTimeouts: mustCreateMetric(\"/netstack/tcp/timeouts\", \"Number of times RTO expired.\"),\nChecksumErrors: mustCreateMetric(\"/netstack/tcp/checksum_errors\", \"Number of segments dropped due to bad checksums.\"),\nFailedPortReservations: mustCreateMetric(\"/netstack/tcp/failed_port_reservations\", \"Number of time TCP failed to reserve a port.\"),\n+ SegmentsAckedWithDSACK: mustCreateMetric(\"/netstack/tcp/segments_acked_with_dsack\", \"Number of segments for which DSACK was received.\"),\n},\nUDP: tcpip.UDPStats{\nPacketsReceived: mustCreateMetric(\"/netstack/udp/packets_received\", \"Number of UDP datagrams received via HandlePacket.\"),\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -1845,6 +1845,10 @@ type TCPStats struct {\n// FailedPortReservations is the number of times TCP failed to reserve\n// a port.\nFailedPortReservations *StatCounter\n+\n+ // SegmentsAckedWithDSACK is the number of segments acknowledged with\n+ // DSACK.\n+ SegmentsAckedWithDSACK *StatCounter\n}\n// UDPStats collects UDP-specific stats.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -1154,6 +1154,13 @@ func (s *sender) walkSACK(rcvdSeg *segment) {\nidx := 0\nn := len(rcvdSeg.parsedOptions.SACKBlocks)\nif checkDSACK(rcvdSeg) {\n+ dsackBlock := rcvdSeg.parsedOptions.SACKBlocks[0]\n+ numDSACK := uint64(dsackBlock.End-dsackBlock.Start) / uint64(s.MaxPayloadSize)\n+ // numDSACK can be zero when DSACK is sent for subsegments.\n+ if numDSACK < 1 {\n+ numDSACK = 1\n+ }\n+ s.ep.stack.Stats().TCP.SegmentsAckedWithDSACK.IncrementBy(numDSACK)\ns.rc.setDSACKSeen(true)\nidx = 1\nn--\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_rack_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_rack_test.go", "diff": "@@ -540,6 +540,28 @@ func TestRACKDetectDSACK(t *testing.T) {\ncase invalidDSACKDetected:\nt.Fatalf(\"RACK DSACK detected when there is no duplicate SACK\")\n}\n+\n+ metricPollFn := func() error {\n+ tcpStats := c.Stack().Stats().TCP\n+ stats := []struct {\n+ stat *tcpip.StatCounter\n+ name string\n+ want uint64\n+ }{\n+ // Check DSACK was received for one segment.\n+ {tcpStats.SegmentsAckedWithDSACK, \"stats.TCP.SegmentsAckedWithDSACK\", 1},\n+ }\n+ for _, s := range stats {\n+ if got, want := s.stat.Value(), s.want; got != want {\n+ return fmt.Errorf(\"got %s.Value() = %d, want = %d\", s.name, got, want)\n+ }\n+ }\n+ return nil\n+ }\n+\n+ if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {\n+ t.Error(err)\n+ }\n}\n// TestRACKDetectDSACKWithOutOfOrder tests that RACK detects DSACK with out of\n@@ -680,6 +702,28 @@ func TestRACKDetectDSACKSingleDup(t *testing.T) {\ncase invalidDSACKDetected:\nt.Fatalf(\"RACK DSACK detected when there is no duplicate SACK\")\n}\n+\n+ metricPollFn := func() error {\n+ tcpStats := c.Stack().Stats().TCP\n+ stats := []struct {\n+ stat *tcpip.StatCounter\n+ name string\n+ want uint64\n+ }{\n+ // Check DSACK was received for a subsegment.\n+ {tcpStats.SegmentsAckedWithDSACK, \"stats.TCP.SegmentsAckedWithDSACK\", 1},\n+ }\n+ for _, s := range stats {\n+ if got, want := s.stat.Value(), s.want; got != want {\n+ return fmt.Errorf(\"got %s.Value() = %d, want = %d\", s.name, got, want)\n+ }\n+ }\n+ return nil\n+ }\n+\n+ if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {\n+ t.Error(err)\n+ }\n}\n// TestRACKDetectDSACKDupWithCumulativeACK tests DSACK for two non-contiguous\n" } ]
Go
Apache License 2.0
google/gvisor
Add metric to count number of segments acknowledged by DSACK. - Creates new metric "/tcp/segments_acked_with_dsack" to count the number of segments acked with DSACK. - Added check to verify the metric is getting incremented when a DSACK is sent in the unit tests. PiperOrigin-RevId: 386135949
259,885
22.07.2021 13:39:08
25,200
d5fb4623ea75571f9a04e5694c18f397ba204ad6
Replace kernel package types for clone and unshare with linux package types.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/clone.go", "new_path": "pkg/abi/linux/clone.go", "diff": "@@ -16,13 +16,16 @@ package linux\n// Clone constants per clone(2).\nconst (\n+ CSIGNAL = 0xff\n+\nCLONE_VM = 0x100\nCLONE_FS = 0x200\nCLONE_FILES = 0x400\nCLONE_SIGHAND = 0x800\n- CLONE_PARENT = 0x8000\n+ CLONE_PIDFD = 0x1000\nCLONE_PTRACE = 0x2000\nCLONE_VFORK = 0x4000\n+ CLONE_PARENT = 0x8000\nCLONE_THREAD = 0x10000\nCLONE_NEWNS = 0x20000\nCLONE_SYSVSEM = 0x40000\n@@ -32,10 +35,30 @@ const (\nCLONE_DETACHED = 0x400000\nCLONE_UNTRACED = 0x800000\nCLONE_CHILD_SETTID = 0x1000000\n+ CLONE_NEWCGROUP = 0x2000000\nCLONE_NEWUTS = 0x4000000\nCLONE_NEWIPC = 0x8000000\nCLONE_NEWUSER = 0x10000000\nCLONE_NEWPID = 0x20000000\nCLONE_NEWNET = 0x40000000\nCLONE_IO = 0x80000000\n+\n+ // Only passable via clone3(2).\n+ CLONE_CLEAR_SIGHAND = 0x100000000\n+ CLONE_INTO_CGROUP = 0x200000000\n)\n+\n+// CloneArgs is struct clone_args, from include/uapi/linux/sched.h.\n+type CloneArgs struct {\n+ Flags uint64\n+ Pidfd uint64\n+ ChildTID uint64\n+ ParentTID uint64\n+ ExitSignal uint64\n+ Stack uint64\n+ StackSize uint64\n+ TLS uint64\n+ SetTID uint64\n+ SetTIDSize uint64\n+ Cgroup uint64\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/ptrace.go", "new_path": "pkg/sentry/kernel/ptrace.go", "diff": "@@ -768,14 +768,14 @@ const (\n// ptraceClone is called at the end of a clone or fork syscall to check if t\n// should enter PTRACE_EVENT_CLONE, PTRACE_EVENT_FORK, or PTRACE_EVENT_VFORK\n// stop. child is the new task.\n-func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions) bool {\n+func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, args *linux.CloneArgs) bool {\nif !t.hasTracer() {\nreturn false\n}\nt.tg.pidns.owner.mu.Lock()\ndefer t.tg.pidns.owner.mu.Unlock()\nevent := false\n- if !opts.Untraced {\n+ if args.Flags&linux.CLONE_UNTRACED == 0 {\nswitch kind {\ncase ptraceCloneKindClone:\nif t.ptraceOpts.TraceClone {\n@@ -810,7 +810,7 @@ func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions\n// clone(2)'s documentation of CLONE_UNTRACED and CLONE_PTRACE is\n// confusingly wrong; see kernel/fork.c:_do_fork() => copy_process() =>\n// include/linux/ptrace.h:ptrace_init_task().\n- if event || opts.InheritTracer {\n+ if event || args.Flags&linux.CLONE_PTRACE != 0 {\ntracer := t.Tracer()\nif tracer != nil {\nchild.ptraceTracer.Store(tracer)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_clone.go", "new_path": "pkg/sentry/kernel/task_clone.go", "diff": "@@ -26,140 +26,39 @@ import (\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n-// SharingOptions controls what resources are shared by a new task created by\n-// Task.Clone, or an existing task affected by Task.Unshare.\n-type SharingOptions struct {\n- // If NewAddressSpace is true, the task should have an independent virtual\n- // address space.\n- NewAddressSpace bool\n-\n- // If NewSignalHandlers is true, the task should use an independent set of\n- // signal handlers.\n- NewSignalHandlers bool\n-\n- // If NewThreadGroup is true, the task should be the leader of its own\n- // thread group. TerminationSignal is the signal that the thread group\n- // will send to its parent when it exits. If NewThreadGroup is false,\n- // TerminationSignal is ignored.\n- NewThreadGroup bool\n- TerminationSignal linux.Signal\n-\n- // If NewPIDNamespace is true:\n- //\n- // - In the context of Task.Clone, the new task should be the init task\n- // (TID 1) in a new PID namespace.\n- //\n- // - In the context of Task.Unshare, the task should create a new PID\n- // namespace, and all subsequent clones of the task should be members of\n- // the new PID namespace.\n- NewPIDNamespace bool\n-\n- // If NewUserNamespace is true, the task should have an independent user\n- // namespace.\n- NewUserNamespace bool\n-\n- // If NewNetworkNamespace is true, the task should have an independent\n- // network namespace.\n- NewNetworkNamespace bool\n-\n- // If NewFiles is true, the task should use an independent file descriptor\n- // table.\n- NewFiles bool\n-\n- // If NewFSContext is true, the task should have an independent FSContext.\n- NewFSContext bool\n-\n- // If NewUTSNamespace is true, the task should have an independent UTS\n- // namespace.\n- NewUTSNamespace bool\n-\n- // If NewIPCNamespace is true, the task should have an independent IPC\n- // namespace.\n- NewIPCNamespace bool\n-}\n-\n-// CloneOptions controls the behavior of Task.Clone.\n-type CloneOptions struct {\n- // SharingOptions defines the set of resources that the new task will share\n- // with its parent.\n- SharingOptions\n-\n- // Stack is the initial stack pointer of the new task. If Stack is 0, the\n- // new task will start with the same stack pointer as its parent.\n- Stack hostarch.Addr\n-\n- // If SetTLS is true, set the new task's TLS (thread-local storage)\n- // descriptor to TLS. If SetTLS is false, TLS is ignored.\n- SetTLS bool\n- TLS hostarch.Addr\n-\n- // If ChildClearTID is true, when the child exits, 0 is written to the\n- // address ChildTID in the child's memory, and if the write is successful a\n- // futex wake on the same address is performed.\n- //\n- // If ChildSetTID is true, the child's thread ID (in the child's PID\n- // namespace) is written to address ChildTID in the child's memory. (As in\n- // Linux, failed writes are silently ignored.)\n- ChildClearTID bool\n- ChildSetTID bool\n- ChildTID hostarch.Addr\n-\n- // If ParentSetTID is true, the child's thread ID (in the parent's PID\n- // namespace) is written to address ParentTID in the parent's memory. (As\n- // in Linux, failed writes are silently ignored.)\n- //\n- // Older versions of the clone(2) man page state that CLONE_PARENT_SETTID\n- // causes the child's thread ID to be written to ptid in both the parent\n- // and child's memory, but this is a documentation error fixed by\n- // 87ab04792ced (\"clone.2: Fix description of CLONE_PARENT_SETTID\").\n- ParentSetTID bool\n- ParentTID hostarch.Addr\n-\n- // If Vfork is true, place the parent in vforkStop until the cloned task\n- // releases its TaskImage.\n- Vfork bool\n-\n- // If Untraced is true, do not report PTRACE_EVENT_CLONE/FORK/VFORK for\n- // this clone(), and do not ptrace-attach the caller's tracer to the new\n- // task. (PTRACE_EVENT_VFORK_DONE will still be reported if appropriate).\n- Untraced bool\n-\n- // If InheritTracer is true, ptrace-attach the caller's tracer to the new\n- // task, even if no PTRACE_EVENT_CLONE/FORK/VFORK event would be reported\n- // for it. If both Untraced and InheritTracer are true, no event will be\n- // reported, but tracer inheritance will still occur.\n- InheritTracer bool\n-}\n-\n// Clone implements the clone(2) syscall and returns the thread ID of the new\n// task in t's PID namespace. Clone may return both a non-zero thread ID and a\n// non-nil error.\n//\n// Preconditions: The caller must be running Task.doSyscallInvoke on the task\n// goroutine.\n-func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n+func (t *Task) Clone(args *linux.CloneArgs) (ThreadID, *SyscallControl, error) {\n// Since signal actions may refer to application signal handlers by virtual\n// address, any set of signal handlers must refer to the same address\n// space.\n- if !opts.NewSignalHandlers && opts.NewAddressSpace {\n+ if args.Flags&(linux.CLONE_SIGHAND|linux.CLONE_VM) == linux.CLONE_SIGHAND {\nreturn 0, nil, linuxerr.EINVAL\n}\n// In order for the behavior of thread-group-directed signals to be sane,\n// all tasks in a thread group must share signal handlers.\n- if !opts.NewThreadGroup && opts.NewSignalHandlers {\n+ if args.Flags&(linux.CLONE_THREAD|linux.CLONE_SIGHAND) == linux.CLONE_THREAD {\nreturn 0, nil, linuxerr.EINVAL\n}\n// All tasks in a thread group must be in the same PID namespace.\n- if !opts.NewThreadGroup && (opts.NewPIDNamespace || t.childPIDNamespace != nil) {\n+ if (args.Flags&linux.CLONE_THREAD != 0) && (args.Flags&linux.CLONE_NEWPID != 0 || t.childPIDNamespace != nil) {\nreturn 0, nil, linuxerr.EINVAL\n}\n// The two different ways of specifying a new PID namespace are\n// incompatible.\n- if opts.NewPIDNamespace && t.childPIDNamespace != nil {\n+ if args.Flags&linux.CLONE_NEWPID != 0 && t.childPIDNamespace != nil {\nreturn 0, nil, linuxerr.EINVAL\n}\n// Thread groups and FS contexts cannot span user namespaces.\n- if opts.NewUserNamespace && (!opts.NewThreadGroup || !opts.NewFSContext) {\n+ if args.Flags&linux.CLONE_NEWUSER != 0 && args.Flags&(linux.CLONE_THREAD|linux.CLONE_FS) != 0 {\n+ return 0, nil, linuxerr.EINVAL\n+ }\n+ // args.ExitSignal must be a valid signal.\n+ if args.ExitSignal != 0 && !linux.Signal(args.ExitSignal).IsValid() {\nreturn 0, nil, linuxerr.EINVAL\n}\n@@ -174,7 +73,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n// user_namespaces(7)\ncreds := t.Credentials()\nuserns := creds.UserNamespace\n- if opts.NewUserNamespace {\n+ if args.Flags&linux.CLONE_NEWUSER != 0 {\nvar err error\n// \"EPERM (since Linux 3.9): CLONE_NEWUSER was specified in flags and\n// the caller is in a chroot environment (i.e., the caller's root\n@@ -189,21 +88,19 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\nreturn 0, nil, err\n}\n}\n- if (opts.NewPIDNamespace || opts.NewNetworkNamespace || opts.NewUTSNamespace) && !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, userns) {\n+ if args.Flags&(linux.CLONE_NEWPID|linux.CLONE_NEWNET|linux.CLONE_NEWUTS|linux.CLONE_NEWIPC) != 0 && !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, userns) {\nreturn 0, nil, linuxerr.EPERM\n}\nutsns := t.UTSNamespace()\n- if opts.NewUTSNamespace {\n+ if args.Flags&linux.CLONE_NEWUTS != 0 {\n// Note that this must happen after NewUserNamespace so we get\n// the new userns if there is one.\nutsns = t.UTSNamespace().Clone(userns)\n}\nipcns := t.IPCNamespace()\n- if opts.NewIPCNamespace {\n- // Note that \"If CLONE_NEWIPC is set, then create the process in a new IPC\n- // namespace\"\n+ if args.Flags&linux.CLONE_NEWIPC != 0 {\nipcns = NewIPCNamespace(userns)\n} else {\nipcns.IncRef()\n@@ -214,7 +111,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\ndefer cu.Clean()\nnetns := t.NetworkNamespace()\n- if opts.NewNetworkNamespace {\n+ if args.Flags&linux.CLONE_NEWNET != 0 {\nnetns = inet.NewNamespace(netns)\n}\n@@ -227,7 +124,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n})\n}\n- image, err := t.image.Fork(t, t.k, !opts.NewAddressSpace)\n+ image, err := t.image.Fork(t, t.k, args.Flags&linux.CLONE_VM != 0)\nif err != nil {\nreturn 0, nil, err\n}\n@@ -236,17 +133,17 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n})\n// clone() returns 0 in the child.\nimage.Arch.SetReturn(0)\n- if opts.Stack != 0 {\n- image.Arch.SetStack(uintptr(opts.Stack))\n+ if args.Stack != 0 {\n+ image.Arch.SetStack(uintptr(args.Stack))\n}\n- if opts.SetTLS {\n- if !image.Arch.SetTLS(uintptr(opts.TLS)) {\n+ if args.Flags&linux.CLONE_SETTLS != 0 {\n+ if !image.Arch.SetTLS(uintptr(args.TLS)) {\nreturn 0, nil, linuxerr.EPERM\n}\n}\nvar fsContext *FSContext\n- if opts.NewFSContext {\n+ if args.Flags&linux.CLONE_FS == 0 {\nfsContext = t.fsContext.Fork()\n} else {\nfsContext = t.fsContext\n@@ -254,7 +151,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n}\nvar fdTable *FDTable\n- if opts.NewFiles {\n+ if args.Flags&linux.CLONE_FILES == 0 {\nfdTable = t.fdTable.Fork(t)\n} else {\nfdTable = t.fdTable\n@@ -264,22 +161,22 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\npidns := t.tg.pidns\nif t.childPIDNamespace != nil {\npidns = t.childPIDNamespace\n- } else if opts.NewPIDNamespace {\n+ } else if args.Flags&linux.CLONE_NEWPID != 0 {\npidns = pidns.NewChild(userns)\n}\ntg := t.tg\nrseqAddr := hostarch.Addr(0)\nrseqSignature := uint32(0)\n- if opts.NewThreadGroup {\n+ if args.Flags&linux.CLONE_THREAD == 0 {\nif tg.mounts != nil {\ntg.mounts.IncRef()\n}\nsh := t.tg.signalHandlers\n- if opts.NewSignalHandlers {\n+ if args.Flags&linux.CLONE_SIGHAND == 0 {\nsh = sh.Fork()\n}\n- tg = t.k.NewThreadGroup(tg.mounts, pidns, sh, opts.TerminationSignal, tg.limits.GetCopy())\n+ tg = t.k.NewThreadGroup(tg.mounts, pidns, sh, linux.Signal(args.ExitSignal), tg.limits.GetCopy())\ntg.oomScoreAdj = atomic.LoadInt32(&t.tg.oomScoreAdj)\nrseqAddr = t.rseqAddr\nrseqSignature = t.rseqSignature\n@@ -304,7 +201,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\nRSeqSignature: rseqSignature,\nContainerID: t.ContainerID(),\n}\n- if opts.NewThreadGroup {\n+ if args.Flags&linux.CLONE_THREAD == 0 {\ncfg.Parent = t\n} else {\ncfg.InheritParent = t\n@@ -322,7 +219,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n//\n// However kernel/fork.c:copy_process() adds a limitation to this:\n// \"sigaltstack should be cleared when sharing the same VM\".\n- if opts.NewAddressSpace || opts.Vfork {\n+ if args.Flags&linux.CLONE_VM == 0 || args.Flags&linux.CLONE_VFORK != 0 {\nnt.SetSignalStack(t.SignalStack())\n}\n@@ -347,35 +244,35 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\ncopiedFilters := append([]bpf.Program(nil), f.([]bpf.Program)...)\nnt.syscallFilters.Store(copiedFilters)\n}\n- if opts.Vfork {\n+ if args.Flags&linux.CLONE_VFORK != 0 {\nnt.vforkParent = t\n}\n- if opts.ChildClearTID {\n- nt.SetClearTID(opts.ChildTID)\n+ if args.Flags&linux.CLONE_CHILD_CLEARTID != 0 {\n+ nt.SetClearTID(hostarch.Addr(args.ChildTID))\n}\n- if opts.ChildSetTID {\n+ if args.Flags&linux.CLONE_CHILD_SETTID != 0 {\nctid := nt.ThreadID()\n- ctid.CopyOut(nt.CopyContext(t, usermem.IOOpts{AddressSpaceActive: false}), opts.ChildTID)\n+ ctid.CopyOut(nt.CopyContext(t, usermem.IOOpts{AddressSpaceActive: false}), hostarch.Addr(args.ChildTID))\n}\nntid := t.tg.pidns.IDOfTask(nt)\n- if opts.ParentSetTID {\n- ntid.CopyOut(t, opts.ParentTID)\n+ if args.Flags&linux.CLONE_PARENT_SETTID != 0 {\n+ ntid.CopyOut(t, hostarch.Addr(args.ParentTID))\n}\nkind := ptraceCloneKindClone\n- if opts.Vfork {\n+ if args.Flags&linux.CLONE_VFORK != 0 {\nkind = ptraceCloneKindVfork\n- } else if opts.TerminationSignal == linux.SIGCHLD {\n+ } else if linux.Signal(args.ExitSignal) == linux.SIGCHLD {\nkind = ptraceCloneKindFork\n}\n- if t.ptraceClone(kind, nt, opts) {\n- if opts.Vfork {\n+ if t.ptraceClone(kind, nt, args) {\n+ if args.Flags&linux.CLONE_VFORK != 0 {\nreturn ntid, &SyscallControl{next: &runSyscallAfterPtraceEventClone{vforkChild: nt, vforkChildTID: ntid}}, nil\n}\nreturn ntid, &SyscallControl{next: &runSyscallAfterPtraceEventClone{}}, nil\n}\n- if opts.Vfork {\n+ if args.Flags&linux.CLONE_VFORK != 0 {\nt.maybeBeginVforkStop(nt)\nreturn ntid, &SyscallControl{next: &runSyscallAfterVforkStop{childTID: ntid}}, nil\n}\n@@ -446,27 +343,35 @@ func (r *runSyscallAfterVforkStop) execute(t *Task) taskRunState {\n}\n// Unshare changes the set of resources t shares with other tasks, as specified\n-// by opts.\n+// by flags.\n//\n// Preconditions: The caller must be running on the task goroutine.\n-func (t *Task) Unshare(opts *SharingOptions) error {\n- // In Linux unshare(2), NewThreadGroup implies NewSignalHandlers and\n- // NewSignalHandlers implies NewAddressSpace. All three flags are no-ops if\n- // t is the only task using its MM, which due to clone(2)'s rules imply\n- // that it is also the only task using its signal handlers / in its thread\n- // group, and cause EINVAL to be returned otherwise.\n+func (t *Task) Unshare(flags int32) error {\n+ // \"CLONE_THREAD, CLONE_SIGHAND, and CLONE_VM can be specified in flags if\n+ // the caller is single threaded (i.e., it is not sharing its address space\n+ // with another process or thread). In this case, these flags have no\n+ // effect. (Note also that specifying CLONE_THREAD automatically implies\n+ // CLONE_VM, and specifying CLONE_VM automatically implies CLONE_SIGHAND.)\n+ // If the process is multithreaded, then the use of these flags results in\n+ // an error.\" - unshare(2). This is incorrect (cf.\n+ // kernel/fork.c:ksys_unshare()):\n+ //\n+ // - CLONE_THREAD does not imply CLONE_VM.\n+ //\n+ // - CLONE_SIGHAND implies CLONE_THREAD.\n+ //\n+ // - Only CLONE_VM requires that the caller is not sharing its address\n+ // space with another thread. CLONE_SIGHAND requires that the caller is not\n+ // sharing its signal handlers, and CLONE_THREAD requires that the caller\n+ // is the only thread in its thread group.\n//\n// Since we don't count the number of tasks using each address space or set\n- // of signal handlers, we reject NewSignalHandlers and NewAddressSpace\n- // altogether, and interpret NewThreadGroup as requiring that t be the only\n- // member of its thread group. This seems to be logically coherent, in the\n- // sense that clone(2) allows a task to share signal handlers and address\n- // spaces with tasks in other thread groups.\n- if opts.NewAddressSpace || opts.NewSignalHandlers {\n+ // of signal handlers, we reject CLONE_VM and CLONE_SIGHAND altogether.\n+ if flags&(linux.CLONE_VM|linux.CLONE_SIGHAND) != 0 {\nreturn linuxerr.EINVAL\n}\ncreds := t.Credentials()\n- if opts.NewThreadGroup {\n+ if flags&linux.CLONE_THREAD != 0 {\nt.tg.signalHandlers.mu.Lock()\nif t.tg.tasksCount != 1 {\nt.tg.signalHandlers.mu.Unlock()\n@@ -476,7 +381,7 @@ func (t *Task) Unshare(opts *SharingOptions) error {\n// This isn't racy because we're the only living task, and therefore\n// the only task capable of creating new ones, in our thread group.\n}\n- if opts.NewUserNamespace {\n+ if flags&linux.CLONE_NEWUSER != 0 {\nif t.IsChrooted() {\nreturn linuxerr.EPERM\n}\n@@ -492,7 +397,7 @@ func (t *Task) Unshare(opts *SharingOptions) error {\ncreds = t.Credentials()\n}\nhaveCapSysAdmin := t.HasCapability(linux.CAP_SYS_ADMIN)\n- if opts.NewPIDNamespace {\n+ if flags&linux.CLONE_NEWPID != 0 {\nif !haveCapSysAdmin {\nreturn linuxerr.EPERM\n}\n@@ -500,14 +405,14 @@ func (t *Task) Unshare(opts *SharingOptions) error {\n}\nt.mu.Lock()\n// Can't defer unlock: DecRefs must occur without holding t.mu.\n- if opts.NewNetworkNamespace {\n+ if flags&linux.CLONE_NEWNET != 0 {\nif !haveCapSysAdmin {\nt.mu.Unlock()\nreturn linuxerr.EPERM\n}\nt.netns = inet.NewNamespace(t.netns)\n}\n- if opts.NewUTSNamespace {\n+ if flags&linux.CLONE_NEWUTS != 0 {\nif !haveCapSysAdmin {\nt.mu.Unlock()\nreturn linuxerr.EPERM\n@@ -516,7 +421,7 @@ func (t *Task) Unshare(opts *SharingOptions) error {\n// new user namespace is used if there is one.\nt.utsns = t.utsns.Clone(creds.UserNamespace)\n}\n- if opts.NewIPCNamespace {\n+ if flags&linux.CLONE_NEWIPC != 0 {\nif !haveCapSysAdmin {\nt.mu.Unlock()\nreturn linuxerr.EPERM\n@@ -527,12 +432,12 @@ func (t *Task) Unshare(opts *SharingOptions) error {\nt.ipcns = NewIPCNamespace(creds.UserNamespace)\n}\nvar oldFDTable *FDTable\n- if opts.NewFiles {\n+ if flags&linux.CLONE_FILES != 0 {\noldFDTable = t.fdTable\nt.fdTable = oldFDTable.Fork(t)\n}\nvar oldFSContext *FSContext\n- if opts.NewFSContext {\n+ if flags&linux.CLONE_FS != 0 {\noldFSContext = t.fsContext\nt.fsContext = oldFSContext.Fork()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_thread.go", "new_path": "pkg/sentry/syscalls/linux/sys_thread.go", "diff": "@@ -31,11 +31,6 @@ import (\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n-const (\n- // exitSignalMask is the signal mask to be sent at exit. Same as CSIGNAL in linux.\n- exitSignalMask = 0xff\n-)\n-\nvar (\n// ExecMaxTotalSize is the maximum length of all argv and envv entries.\n//\n@@ -201,33 +196,16 @@ func ExitGroup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\n// clone is used by Clone, Fork, and VFork.\nfunc clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {\n- opts := kernel.CloneOptions{\n- SharingOptions: kernel.SharingOptions{\n- NewAddressSpace: flags&linux.CLONE_VM == 0,\n- NewSignalHandlers: flags&linux.CLONE_SIGHAND == 0,\n- NewThreadGroup: flags&linux.CLONE_THREAD == 0,\n- TerminationSignal: linux.Signal(flags & exitSignalMask),\n- NewPIDNamespace: flags&linux.CLONE_NEWPID == linux.CLONE_NEWPID,\n- NewUserNamespace: flags&linux.CLONE_NEWUSER == linux.CLONE_NEWUSER,\n- NewNetworkNamespace: flags&linux.CLONE_NEWNET == linux.CLONE_NEWNET,\n- NewFiles: flags&linux.CLONE_FILES == 0,\n- NewFSContext: flags&linux.CLONE_FS == 0,\n- NewUTSNamespace: flags&linux.CLONE_NEWUTS == linux.CLONE_NEWUTS,\n- NewIPCNamespace: flags&linux.CLONE_NEWIPC == linux.CLONE_NEWIPC,\n- },\n- Stack: stack,\n- SetTLS: flags&linux.CLONE_SETTLS == linux.CLONE_SETTLS,\n- TLS: tls,\n- ChildClearTID: flags&linux.CLONE_CHILD_CLEARTID == linux.CLONE_CHILD_CLEARTID,\n- ChildSetTID: flags&linux.CLONE_CHILD_SETTID == linux.CLONE_CHILD_SETTID,\n- ChildTID: childTID,\n- ParentSetTID: flags&linux.CLONE_PARENT_SETTID == linux.CLONE_PARENT_SETTID,\n- ParentTID: parentTID,\n- Vfork: flags&linux.CLONE_VFORK == linux.CLONE_VFORK,\n- Untraced: flags&linux.CLONE_UNTRACED == linux.CLONE_UNTRACED,\n- InheritTracer: flags&linux.CLONE_PTRACE == linux.CLONE_PTRACE,\n- }\n- ntid, ctrl, err := t.Clone(&opts)\n+ args := linux.CloneArgs{\n+ Flags: uint64(uint32(flags) &^ linux.CSIGNAL),\n+ Pidfd: uint64(parentTID),\n+ ChildTID: uint64(childTID),\n+ ParentTID: uint64(parentTID),\n+ ExitSignal: uint64(flags & linux.CSIGNAL),\n+ Stack: uint64(stack),\n+ TLS: uint64(tls),\n+ }\n+ ntid, ctrl, err := t.Clone(&args)\nreturn uintptr(ntid), ctrl, err\n}\n@@ -460,29 +438,16 @@ func SetTidAddress(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel\n// Unshare implements linux syscall unshare(2).\nfunc Unshare(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nflags := args[0].Int()\n- opts := kernel.SharingOptions{\n- NewAddressSpace: flags&linux.CLONE_VM == linux.CLONE_VM,\n- NewSignalHandlers: flags&linux.CLONE_SIGHAND == linux.CLONE_SIGHAND,\n- NewThreadGroup: flags&linux.CLONE_THREAD == linux.CLONE_THREAD,\n- NewPIDNamespace: flags&linux.CLONE_NEWPID == linux.CLONE_NEWPID,\n- NewUserNamespace: flags&linux.CLONE_NEWUSER == linux.CLONE_NEWUSER,\n- NewNetworkNamespace: flags&linux.CLONE_NEWNET == linux.CLONE_NEWNET,\n- NewFiles: flags&linux.CLONE_FILES == linux.CLONE_FILES,\n- NewFSContext: flags&linux.CLONE_FS == linux.CLONE_FS,\n- NewUTSNamespace: flags&linux.CLONE_NEWUTS == linux.CLONE_NEWUTS,\n- NewIPCNamespace: flags&linux.CLONE_NEWIPC == linux.CLONE_NEWIPC,\n- }\n// \"CLONE_NEWPID automatically implies CLONE_THREAD as well.\" - unshare(2)\n- if opts.NewPIDNamespace {\n- opts.NewThreadGroup = true\n+ if flags&linux.CLONE_NEWPID != 0 {\n+ flags |= linux.CLONE_THREAD\n}\n// \"... specifying CLONE_NEWUSER automatically implies CLONE_THREAD. Since\n// Linux 3.9, CLONE_NEWUSER also automatically implies CLONE_FS.\"\n- if opts.NewUserNamespace {\n- opts.NewThreadGroup = true\n- opts.NewFSContext = true\n+ if flags&linux.CLONE_NEWUSER != 0 {\n+ flags |= linux.CLONE_THREAD | linux.CLONE_FS\n}\n- return 0, nil, t.Unshare(&opts)\n+ return 0, nil, t.Unshare(flags)\n}\n// SchedYield implements linux syscall sched_yield(2).\n" } ]
Go
Apache License 2.0
google/gvisor
Replace kernel package types for clone and unshare with linux package types. PiperOrigin-RevId: 386312456
259,891
22.07.2021 14:15:00
25,200
70626b3da217373eb74420e2c3426cc9283265fa
buildkite: move golang installation to VM boot Rather than re-downloading and installing Go for each pipeline run, we should just do it at boot.
[ { "change_type": "MODIFY", "old_path": ".buildkite/hooks/pre-command", "new_path": ".buildkite/hooks/pre-command", "diff": "@@ -11,14 +11,6 @@ install_pkgs make \"linux-headers-$(uname -r)\" linux-libc-dev \\\ngraphviz jq curl binutils gnupg gnupg-agent gcc pkg-config \\\napt-transport-https ca-certificates software-properties-common\n-# Install Go 1.16, as only 1.13 is available via apt. If it's installed via apt,\n-# remove it.\n-sudo apt-get autoremove -y golang-go\n-declare -r go_archive=go1.16.6.linux-amd64.tar.gz\n-wget -c \"https://golang.org/dl/${go_archive}\"\n-sudo tar -xzf \"${go_archive}\" -C /usr/local\n-sudo ln -s /usr/local/go/bin/go /usr/bin/go\n-\n# Setup for parallelization with PARTITION and TOTAL_PARTITIONS.\nexport PARTITION=${BUILDKITE_PARALLEL_JOB:-0}\nPARTITION=$((${PARTITION}+1)) # 1-indexed, but PARALLEL_JOB is 0-indexed.\n" } ]
Go
Apache License 2.0
google/gvisor
buildkite: move golang installation to VM boot Rather than re-downloading and installing Go for each pipeline run, we should just do it at boot. PiperOrigin-RevId: 386320005
259,853
22.07.2021 15:37:37
25,200
47f025461e6fdf8da99c780b17c2227696e83845
runsc: Wait child processes without timeouts * First, we don't need to poll child processes. * Second, the 5 seconds timeout is too small if a host is overloaded. * Third, this can hide bugs in the code when we wait a process that isn't going to exit.
[ { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -789,30 +789,31 @@ func (c *Container) stop() error {\n}\nfunc (c *Container) waitForStopped() error {\n- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n- defer cancel()\n- b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n- op := func() error {\n+ if c.GoferPid == 0 {\n+ return nil\n+ }\n+\nif c.IsSandboxRunning() {\nif err := c.SignalContainer(unix.Signal(0), false); err == nil {\nreturn fmt.Errorf(\"container is still running\")\n}\n}\n- if c.GoferPid == 0 {\n- return nil\n- }\n+\nif c.goferIsChild {\n// The gofer process is a child of the current process,\n// so we can wait it and collect its zombie.\n- wpid, err := unix.Wait4(int(c.GoferPid), nil, unix.WNOHANG, nil)\n- if err != nil {\n+ if _, err := unix.Wait4(int(c.GoferPid), nil, 0, nil); err != nil {\nreturn fmt.Errorf(\"error waiting the gofer process: %v\", err)\n}\n- if wpid == 0 {\n- return fmt.Errorf(\"gofer is still running\")\n+ c.GoferPid = 0\n+ return nil\n}\n- } else if err := unix.Kill(c.GoferPid, 0); err == nil {\n+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n+ defer cancel()\n+ b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n+ op := func() error {\n+ if err := unix.Kill(c.GoferPid, 0); err == nil {\nreturn fmt.Errorf(\"gofer is still running\")\n}\nc.GoferPid = 0\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -1157,10 +1157,6 @@ func (s *Sandbox) destroyContainer(cid string) error {\n}\nfunc (s *Sandbox) waitForStopped() error {\n- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n- defer cancel()\n- b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n- op := func() error {\nif s.child {\ns.statusMu.Lock()\ndefer s.statusMu.Unlock()\n@@ -1169,15 +1165,18 @@ func (s *Sandbox) waitForStopped() error {\n}\n// The sandbox process is a child of the current process,\n// so we can wait it and collect its zombie.\n- wpid, err := unix.Wait4(int(s.Pid), &s.status, unix.WNOHANG, nil)\n- if err != nil {\n+ if _, err := unix.Wait4(int(s.Pid), &s.status, 0, nil); err != nil {\nreturn fmt.Errorf(\"error waiting the sandbox process: %v\", err)\n}\n- if wpid == 0 {\n- return fmt.Errorf(\"sandbox is still running\")\n- }\ns.Pid = 0\n- } else if s.IsRunning() {\n+ return nil\n+ }\n+\n+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n+ defer cancel()\n+ b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n+ op := func() error {\n+ if s.IsRunning() {\nreturn fmt.Errorf(\"sandbox is still running\")\n}\nreturn nil\n" } ]
Go
Apache License 2.0
google/gvisor
runsc: Wait child processes without timeouts * First, we don't need to poll child processes. * Second, the 5 seconds timeout is too small if a host is overloaded. * Third, this can hide bugs in the code when we wait a process that isn't going to exit. PiperOrigin-RevId: 386337586
259,853
22.07.2021 15:53:06
25,200
0690c25e062ad1b8dc162b4b4d70ab59879b9bff
kvm: set CR0.NE = 1 CR0.NE enables internal x87 floating point error reporting when set, else enables PC style x87 error detection. On AMD, the #MF exception isn't generated if CR0.NE isn't set.
[ { "change_type": "MODIFY", "old_path": "pkg/ring0/kernel_amd64.go", "new_path": "pkg/ring0/kernel_amd64.go", "diff": "@@ -177,7 +177,7 @@ func (c *CPU) TSS() (uint64, uint16, *SegmentDescriptor) {\n//\n//go:nosplit\nfunc (c *CPU) CR0() uint64 {\n- return _CR0_PE | _CR0_PG | _CR0_AM | _CR0_ET\n+ return _CR0_PE | _CR0_PG | _CR0_AM | _CR0_ET | _CR0_NE\n}\n// CR4 returns the CPU's CR4 value.\n" }, { "change_type": "MODIFY", "old_path": "pkg/ring0/x86.go", "new_path": "pkg/ring0/x86.go", "diff": "@@ -25,6 +25,7 @@ import (\nconst (\n_CR0_PE = 1 << 0\n_CR0_ET = 1 << 4\n+ _CR0_NE = 1 << 5\n_CR0_AM = 1 << 18\n_CR0_PG = 1 << 31\n" } ]
Go
Apache License 2.0
google/gvisor
kvm: set CR0.NE = 1 CR0.NE enables internal x87 floating point error reporting when set, else enables PC style x87 error detection. On AMD, the #MF exception isn't generated if CR0.NE isn't set. PiperOrigin-RevId: 386340269
259,985
23.07.2021 10:21:31
25,200
c3c5c55d134dcc22480984e3882072b936bae899
Handle EINTR from socket syscalls in send/recv benchmark. The benchmark check fails if any of the socket syscalls fail with EINTR. We see this manifest in S/R lifecycles since S/R has a high probability of aborting these syscalls with EINTR.
[ { "change_type": "MODIFY", "old_path": "test/perf/linux/send_recv_benchmark.cc", "new_path": "test/perf/linux/send_recv_benchmark.cc", "diff": "@@ -80,6 +80,9 @@ void BM_Recvmsg(benchmark::State& state) {\nint64_t bytes_received = 0;\nfor (auto ignored : state) {\nint n = recvmsg(recv_socket.get(), recv_msg.header(), 0);\n+ if (n == -1 && errno == EINTR) {\n+ continue;\n+ }\nTEST_CHECK(n > 0);\nbytes_received += n;\n}\n@@ -108,6 +111,9 @@ void BM_Sendmsg(benchmark::State& state) {\nint64_t bytes_sent = 0;\nfor (auto ignored : state) {\nint n = sendmsg(send_socket.get(), send_msg.header(), 0);\n+ if (n == -1 && errno == EINTR) {\n+ continue;\n+ }\nTEST_CHECK(n > 0);\nbytes_sent += n;\n}\n@@ -137,6 +143,9 @@ void BM_Recvfrom(benchmark::State& state) {\nfor (auto ignored : state) {\nint n = recvfrom(recv_socket.get(), recv_buffer, kMessageSize, 0, nullptr,\nnullptr);\n+ if (n == -1 && errno == EINTR) {\n+ continue;\n+ }\nTEST_CHECK(n > 0);\nbytes_received += n;\n}\n@@ -166,6 +175,9 @@ void BM_Sendto(benchmark::State& state) {\nint64_t bytes_sent = 0;\nfor (auto ignored : state) {\nint n = sendto(send_socket.get(), send_buffer, kMessageSize, 0, nullptr, 0);\n+ if (n == -1 && errno == EINTR) {\n+ continue;\n+ }\nTEST_CHECK(n > 0);\nbytes_sent += n;\n}\n@@ -247,6 +259,9 @@ void BM_RecvmsgWithControlBuf(benchmark::State& state) {\nint64_t bytes_received = 0;\nfor (auto ignored : state) {\nint n = recvmsg(recv_socket.get(), recv_msg.header(), 0);\n+ if (n == -1 && errno == EINTR) {\n+ continue;\n+ }\nTEST_CHECK(n > 0);\nbytes_received += n;\n}\n@@ -316,7 +331,11 @@ void BM_SendmsgTCP(benchmark::State& state) {\nScopedThread t([&recv_msg, &recv_socket, &notification] {\nwhile (!notification.HasBeenNotified()) {\n- TEST_CHECK(recvmsg(recv_socket.get(), recv_msg.header(), 0) >= 0);\n+ int rc = recvmsg(recv_socket.get(), recv_msg.header(), 0);\n+ if (rc == -1 && errno == EINTR) {\n+ continue;\n+ }\n+ TEST_CHECK(rc >= 0);\n}\n});\n" } ]
Go
Apache License 2.0
google/gvisor
Handle EINTR from socket syscalls in send/recv benchmark. The benchmark check fails if any of the socket syscalls fail with EINTR. We see this manifest in S/R lifecycles since S/R has a high probability of aborting these syscalls with EINTR. PiperOrigin-RevId: 386480365
259,891
23.07.2021 12:04:49
25,200
ea0d9a8f48f13bd15f9450848bd515a337750192
buildkite: deflake itimer test The self-admittedly arbitrary threshold of 20% of alarms being sent to the main thread was being breached. I saw a run of of ~20-23% [1]. We should allow for a little more breathing room. 1
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/itimer.cc", "new_path": "test/syscalls/linux/itimer.cc", "diff": "@@ -197,9 +197,9 @@ int TestSIGALRMToMainThread() {\n// (but don't guarantee it), so we expect to see most samples on the main\n// thread.\n//\n- // The number of SIGALRMs delivered to a worker should not exceed 20%\n+ // The number of SIGALRMs delivered to a worker should not exceed 40%\n// of the number of total signals expected (this is somewhat arbitrary).\n- const int worker_threshold = result.expected_total / 5;\n+ const int worker_threshold = result.expected_total / 5 * 2;\n//\n// Linux only guarantees timers will never expire before the requested time.\n@@ -230,7 +230,8 @@ TEST(ItimerTest, DeliversSIGALRMToMainThread) {\n// Not required anymore.\nkill.Release();\n- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;\n+ EXPECT_EQ(WIFEXITED(status) && WEXITSTATUS(status), 0)\n+ << WIFEXITED(status) << \" \" << WEXITSTATUS(status);\n}\n// Signals are delivered to threads fairly.\n" } ]
Go
Apache License 2.0
google/gvisor
buildkite: deflake itimer test The self-admittedly arbitrary threshold of 20% of alarms being sent to the main thread was being breached. I saw a run of of ~20-23% [1]. We should allow for a little more breathing room. 1 - https://buildkite.com/gvisor/pipeline/builds/7400 PiperOrigin-RevId: 386503482
259,891
23.07.2021 12:44:10
25,200
dc000e7b7a951dc96b5a2eab46c0b3478cb355ed
update bug number in loopback tests
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -308,7 +308,7 @@ TEST_P(SocketInetLoopbackTest, TCPListenShutdownListen) {\nsockaddr_storage conn_addr = connector.addr;\nASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));\n- // TODO(b/157236388): Remove Disable save after bug is fixed. S/R test can\n+ // TODO(b/153489135): Remove Disable save after bug is fixed. S/R test can\n// fail because the last socket may not be delivered to the accept queue\n// by the time connect returns.\nDisableSave ds;\n@@ -751,7 +751,7 @@ TEST_P(SocketInetLoopbackTest, TCPNonBlockingConnectClose) {\n}\n}\n-// TODO(b/157236388): Remove once bug is fixed. Test fails w/\n+// TODO(b/153489135): Remove once bug is fixed. Test fails w/\n// random save as established connections which can't be delivered to the accept\n// queue because the queue is full are not correctly delivered after restore\n// causing the last accept to timeout on the restore.\n@@ -801,7 +801,7 @@ TEST_P(SocketInetLoopbackTest, TCPAcceptBacklogSizes) {\n}\n}\n-// TODO(b/157236388): Remove once bug is fixed. Test fails w/\n+// TODO(b/153489135): Remove once bug is fixed. Test fails w/\n// random save as established connections which can't be delivered to the accept\n// queue because the queue is full are not correctly delivered after restore\n// causing the last accept to timeout on the restore.\n@@ -892,7 +892,7 @@ TEST_P(SocketInetLoopbackTest, TCPBacklog) {\nASSERT_GE(client_conns, accepted_conns);\n}\n-// TODO(b/157236388): Remove once bug is fixed. Test fails w/\n+// TODO(b/153489135): Remove once bug is fixed. Test fails w/\n// random save as established connections which can't be delivered to the accept\n// queue because the queue is full are not correctly delivered after restore\n// causing the last accept to timeout on the restore.\n@@ -1136,7 +1136,7 @@ TEST_P(SocketInetLoopbackTest, TCPAcceptAfterReset) {\nsockaddr_storage conn_addr = connector.addr;\nASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));\n- // TODO(b/157236388): Reenable Cooperative S/R once bug is fixed.\n+ // TODO(b/153489135): Reenable Cooperative S/R once bug is fixed.\nDisableSave ds;\nASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),\nconnector.addr_len),\n" } ]
Go
Apache License 2.0
google/gvisor
update bug number in loopback tests PiperOrigin-RevId: 386511209
259,909
23.07.2021 12:47:08
25,200
0eea96057a8559ae542a0cccfd61ceddc26ceb35
Add support for SIOCGIFCONF ioctl in hostinet.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket_unsafe.go", "new_path": "pkg/sentry/socket/hostinet/socket_unsafe.go", "diff": "@@ -67,7 +67,7 @@ func ioctl(ctx context.Context, fd int, io usermem.IO, args arch.SyscallArgument\nAddressSpaceActive: true,\n})\nreturn 0, err\n- case unix.SIOCGIFFLAGS:\n+ case unix.SIOCGIFFLAGS, unix.SIOCGIFCONF:\ncc := &usermem.IOCopyContext{\nCtx: ctx,\nIO: io,\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/filter/config.go", "new_path": "runsc/boot/filter/config.go", "diff": "@@ -463,6 +463,10 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.MatchAny{},\nseccomp.EqualTo(unix.SIOCGIFFLAGS),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(unix.SIOCGIFCONF),\n+ },\n},\nunix.SYS_LISTEN: {},\nunix.SYS_READV: {},\n" } ]
Go
Apache License 2.0
google/gvisor
Add support for SIOCGIFCONF ioctl in hostinet. PiperOrigin-RevId: 386511818
259,985
23.07.2021 13:34:24
25,200
3d0a9300050ad9a72d452ec862827e35e3f38dcc
Don't panic on user-controlled state in semaphore syscalls. Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/semaphore/semaphore.go", "new_path": "pkg/sentry/kernel/semaphore/semaphore.go", "diff": "@@ -214,15 +214,14 @@ func (r *Registry) Remove(id ipc.ID, creds *auth.Credentials) error {\nr.mu.Lock()\ndefer r.mu.Unlock()\n- r.reg.Remove(id, creds)\n-\nindex, found := r.findIndexByID(id)\nif !found {\n- // Inconsistent state.\n- panic(fmt.Sprintf(\"unable to find an index for ID: %d\", id))\n+ return linuxerr.EINVAL\n}\ndelete(r.indexes, index)\n+ r.reg.Remove(id, creds)\n+\nreturn nil\n}\n@@ -245,7 +244,8 @@ func (r *Registry) newSetLocked(ctx context.Context, key ipc.Key, creator fs.Fil\nindex, found := r.findFirstAvailableIndex()\nif !found {\n- panic(\"unable to find an available index\")\n+ // See linux, ipc/sem.c:newary().\n+ return nil, linuxerr.ENOSPC\n}\nr.indexes[index] = set.obj.ID\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/semaphore.cc", "new_path": "test/syscalls/linux/semaphore.cc", "diff": "@@ -1019,6 +1019,17 @@ TEST(SemaphoreTest, SemInfo) {\nEXPECT_EQ(info.semvmx, kSemVmx);\n}\n+TEST(SempahoreTest, RemoveNonExistentSemaphore) {\n+ EXPECT_THAT(semctl(-1, 0, IPC_RMID), SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST(SempahoreTest, RemoveDeletedSemaphore) {\n+ int id;\n+ EXPECT_THAT(id = semget(IPC_PRIVATE, 1, 0), SyscallSucceeds());\n+ EXPECT_THAT(semctl(id, 0, IPC_RMID), SyscallSucceeds());\n+ EXPECT_THAT(semctl(id, 0, IPC_RMID), SyscallFailsWithErrno(EINVAL));\n+}\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Don't panic on user-controlled state in semaphore syscalls. Reported-by: [email protected] PiperOrigin-RevId: 386521361
260,001
23.07.2021 14:33:27
25,200
d2479383639ab15301c4535a91cbbbeb22635a7e
Add verity open benchmark test
[ { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "@@ -139,3 +139,10 @@ syscall_test(\ndebug = False,\ntest = \"//test/perf/linux:write_benchmark\",\n)\n+\n+syscall_test(\n+ size = \"large\",\n+ debug = False,\n+ test = \"//test/perf/linux:verity_open_benchmark\",\n+ vfs1 = False,\n+)\n" }, { "change_type": "MODIFY", "old_path": "test/perf/linux/BUILD", "new_path": "test/perf/linux/BUILD", "diff": "@@ -370,3 +370,22 @@ cc_binary(\n\"//test/util:test_main\",\n],\n)\n+\n+cc_binary(\n+ name = \"verity_open_benchmark\",\n+ testonly = 1,\n+ srcs = [\n+ \"verity_open_benchmark.cc\",\n+ ],\n+ deps = [\n+ gbenchmark,\n+ gtest,\n+ \"//test/util:capability_util\",\n+ \"//test/util:fs_util\",\n+ \"//test/util:logging\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"//test/util:verity_util\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/perf/linux/verity_open_benchmark.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <fcntl.h>\n+#include <stdlib.h>\n+#include <sys/mount.h>\n+#include <unistd.h>\n+\n+#include <memory>\n+#include <string>\n+#include <vector>\n+\n+#include \"gtest/gtest.h\"\n+#include \"benchmark/benchmark.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/logging.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+#include \"test/util/verity_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+void BM_Open(benchmark::State& state) {\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ const int size = state.range(0);\n+ std::vector<TempPath> cache;\n+ std::vector<EnableTarget> targets;\n+\n+ // Mount a tmpfs file system to be wrapped by a verity fs.\n+ TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ TEST_CHECK(mount(\"\", dir.path().c_str(), \"tmpfs\", 0, \"\") == 0);\n+\n+ for (int i = 0; i < size; i++) {\n+ auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n+ targets.emplace_back(\n+ EnableTarget(std::string(Basename(path.path())), O_RDONLY));\n+ cache.emplace_back(std::move(path));\n+ }\n+\n+ std::string verity_dir =\n+ TEST_CHECK_NO_ERRNO_AND_VALUE(MountVerity(dir.path(), targets));\n+\n+ unsigned int seed = 1;\n+ for (auto _ : state) {\n+ const int chosen = rand_r(&seed) % size;\n+ int fd = open(JoinPath(verity_dir, targets[chosen].path).c_str(), O_RDONLY);\n+ TEST_CHECK(fd != -1);\n+ close(fd);\n+ }\n+}\n+\n+BENCHMARK(BM_Open)->Range(1, 128)->UseRealTime();\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/runner/defs.bzl", "new_path": "test/runner/defs.bzl", "diff": "@@ -135,6 +135,7 @@ def syscall_test(\nadd_overlay = False,\nadd_uds_tree = False,\nadd_hostinet = False,\n+ vfs1 = True,\nvfs2 = True,\nfuse = False,\ndebug = True,\n@@ -148,6 +149,7 @@ def syscall_test(\nadd_overlay: add an overlay test.\nadd_uds_tree: add a UDS test.\nadd_hostinet: add a hostinet test.\n+ vfs1: enable VFS1 tests. Could be false only if vfs2 is true.\nvfs2: enable VFS2 support.\nfuse: enable FUSE support.\ndebug: enable debug output.\n@@ -157,7 +159,7 @@ def syscall_test(\nif not tags:\ntags = []\n- if vfs2 and not fuse:\n+ if vfs2 and vfs1 and not fuse:\n# Generate a vfs1 plain test. Most testing will now be\n# biased towards vfs2, with only a single vfs1 case.\n_syscall_test(\n@@ -171,7 +173,7 @@ def syscall_test(\n**kwargs\n)\n- if not fuse:\n+ if vfs1 and not fuse:\n# Generate a native test if fuse is not required.\n_syscall_test(\ntest = test,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_getdents.cc", "new_path": "test/syscalls/linux/verity_getdents.cc", "diff": "@@ -59,7 +59,7 @@ class GetDentsTest : public ::testing::Test {\nTEST_F(GetDentsTest, GetDents) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nstd::vector<std::string> expect = {\".\", \"..\", filename_};\nEXPECT_NO_ERRNO(DirContains(verity_dir, expect, /*exclude=*/{}));\n@@ -67,7 +67,7 @@ TEST_F(GetDentsTest, GetDents) {\nTEST_F(GetDentsTest, Deleted) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nEXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),\nSyscallSucceeds());\n@@ -78,7 +78,7 @@ TEST_F(GetDentsTest, Deleted) {\nTEST_F(GetDentsTest, Renamed) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nstd::string new_file_name = \"renamed-\" + filename_;\nEXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_ioctl.cc", "new_path": "test/syscalls/linux/verity_ioctl.cc", "diff": "@@ -106,7 +106,7 @@ TEST_F(IoctlTest, Measure) {\nTEST_F(IoctlTest, Mount) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Make sure the file can be open and read in the mounted verity fs.\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -118,7 +118,7 @@ TEST_F(IoctlTest, Mount) {\nTEST_F(IoctlTest, NonExistingFile) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Confirm that opening a non-existing file in the verity-enabled directory\n// triggers the expected error instead of verification failure.\n@@ -129,7 +129,7 @@ TEST_F(IoctlTest, NonExistingFile) {\nTEST_F(IoctlTest, ModifiedFile) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Modify the file and check verification failure upon reading from it.\nauto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -144,7 +144,7 @@ TEST_F(IoctlTest, ModifiedFile) {\nTEST_F(IoctlTest, ModifiedMerkle) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Modify the Merkle file and check verification failure upon opening the\n// corresponding file.\n@@ -159,7 +159,7 @@ TEST_F(IoctlTest, ModifiedMerkle) {\nTEST_F(IoctlTest, ModifiedDirMerkle) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Modify the Merkle file for the parent directory and check verification\n// failure upon opening the corresponding file.\n@@ -174,7 +174,7 @@ TEST_F(IoctlTest, ModifiedDirMerkle) {\nTEST_F(IoctlTest, Stat) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nstruct stat st;\nEXPECT_THAT(stat(JoinPath(verity_dir, filename_).c_str(), &st),\n@@ -183,7 +183,7 @@ TEST_F(IoctlTest, Stat) {\nTEST_F(IoctlTest, ModifiedStat) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nEXPECT_THAT(chmod(JoinPath(tmpfs_dir_.path(), filename_).c_str(), 0644),\nSyscallSucceeds());\n@@ -194,7 +194,7 @@ TEST_F(IoctlTest, ModifiedStat) {\nTEST_F(IoctlTest, DeleteFile) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nEXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),\nSyscallSucceeds());\n@@ -204,7 +204,7 @@ TEST_F(IoctlTest, DeleteFile) {\nTEST_F(IoctlTest, DeleteMerkle) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nEXPECT_THAT(\nunlink(MerklePath(JoinPath(tmpfs_dir_.path(), filename_)).c_str()),\n@@ -215,7 +215,7 @@ TEST_F(IoctlTest, DeleteMerkle) {\nTEST_F(IoctlTest, RenameFile) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nstd::string new_file_name = \"renamed-\" + filename_;\nEXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),\n@@ -227,7 +227,7 @@ TEST_F(IoctlTest, RenameFile) {\nTEST_F(IoctlTest, RenameMerkle) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nstd::string new_file_name = \"renamed-\" + filename_;\nEXPECT_THAT(\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_mmap.cc", "new_path": "test/syscalls/linux/verity_mmap.cc", "diff": "@@ -58,7 +58,7 @@ class MmapTest : public ::testing::Test {\nTEST_F(MmapTest, MmapRead) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Make sure the file can be open and mmapped in the mounted verity fs.\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -72,7 +72,7 @@ TEST_F(MmapTest, MmapRead) {\nTEST_F(MmapTest, ModifiedBeforeMmap) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Modify the file and check verification failure upon mmapping.\nauto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -91,7 +91,7 @@ TEST_F(MmapTest, ModifiedBeforeMmap) {\nTEST_F(MmapTest, ModifiedAfterMmap) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(JoinPath(verity_dir, filename_), O_RDONLY, 0777));\n@@ -127,7 +127,7 @@ INSTANTIATE_TEST_SUITE_P(\nTEST_P(MmapParamTest, Mmap) {\nstd::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));\n+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));\n// Make sure the file can be open and mmapped in the mounted verity fs.\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_symlink.cc", "new_path": "test/syscalls/linux/verity_symlink.cc", "diff": "@@ -62,9 +62,9 @@ class SymlinkTest : public ::testing::Test {\n};\nTEST_F(SymlinkTest, Success) {\n- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_,\n- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(MountVerity(\n+ tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY),\n+ EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\nchar buf[256];\nEXPECT_THAT(\n@@ -77,9 +77,9 @@ TEST_F(SymlinkTest, Success) {\n}\nTEST_F(SymlinkTest, DeleteLink) {\n- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_,\n- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(MountVerity(\n+ tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY),\n+ EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\nASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),\nSyscallSucceeds());\n@@ -92,9 +92,9 @@ TEST_F(SymlinkTest, DeleteLink) {\n}\nTEST_F(SymlinkTest, ModifyLink) {\n- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(\n- MountVerity(tmpfs_dir_.path(), filename_,\n- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\n+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(MountVerity(\n+ tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY),\n+ EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));\nASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),\nSyscallSucceeds());\n" }, { "change_type": "MODIFY", "old_path": "test/util/verity_util.cc", "new_path": "test/util/verity_util.cc", "diff": "@@ -54,20 +54,14 @@ PosixError FlipRandomBit(int fd, int size) {\nreturn NoError();\n}\n-PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,\n- std::string filename,\n+PosixErrorOr<std::string> MountVerity(std::string lower_dir,\nstd::vector<EnableTarget> targets) {\n- // Mount a verity fs on the existing tmpfs mount.\n- std::string mount_opts = \"lower_path=\" + tmpfs_dir;\n+ // Mount a verity fs on the existing mount.\n+ std::string mount_opts = \"lower_path=\" + lower_dir;\nASSIGN_OR_RETURN_ERRNO(TempPath verity_dir, TempPath::CreateDir());\nRETURN_ERROR_IF_SYSCALL_FAIL(\nmount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()));\n- // Enable the file, symlink(if provided) and the directory.\n- ASSIGN_OR_RETURN_ERRNO(\n- auto fd, Open(JoinPath(verity_dir.path(), filename), O_RDONLY, 0777));\n- RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(fd.get(), FS_IOC_ENABLE_VERITY));\n-\nfor (const EnableTarget& target : targets) {\nASSIGN_OR_RETURN_ERRNO(\nauto target_fd,\n@@ -92,6 +86,7 @@ PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,\nASSIGN_OR_RETURN_ERRNO(TempPath verity_with_hash_dir, TempPath::CreateDir());\nRETURN_ERROR_IF_SYSCALL_FAIL(mount(\"\", verity_with_hash_dir.path().c_str(),\n\"verity\", 0, mount_opts.c_str()));\n+\n// Verity directories should not be deleted. Release the TempPath objects to\n// prevent those directories from being deleted by the destructor.\nverity_dir.release();\n" }, { "change_type": "MODIFY", "old_path": "test/util/verity_util.h", "new_path": "test/util/verity_util.h", "diff": "@@ -76,7 +76,6 @@ PosixError FlipRandomBit(int fd, int size);\n// Mount a verity on the tmpfs and enable both the file and the direcotry. Then\n// mount a new verity with measured root hash.\nPosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,\n- std::string filename,\nstd::vector<EnableTarget> targets);\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Add verity open benchmark test PiperOrigin-RevId: 386533065
259,885
23.07.2021 19:51:11
25,200
9ba8c40a3a3c7fed40d9137fed8a87fa9d536a22
Clean up logic for when a VFS2 gofer regular file close causes a flushf.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "new_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "diff": "@@ -79,18 +79,23 @@ func (fd *regularFileFD) OnClose(ctx context.Context) error {\nif !fd.vfsfd.IsWritable() {\nreturn nil\n}\n- // Skip flushing if there are client-buffered writes, since (as with the\n- // VFS1 client) we don't flush buffered writes on close anyway.\nd := fd.dentry()\n- if d.fs.opts.interop != InteropModeExclusive {\n- return nil\n- }\n+ if d.fs.opts.interop == InteropModeExclusive {\n+ // d may have dirty pages that we won't write back now (and wouldn't\n+ // have in VFS1), making a flushf RPC ineffective. If this is the case,\n+ // skip the flushf.\n+ //\n+ // Note that it's also possible to have dirty pages under other interop\n+ // modes if forcePageCache is in effect; we conservatively assume that\n+ // applications have some way of tolerating this and still want the\n+ // flushf.\nd.dataMu.RLock()\nhaveDirtyPages := !d.dirty.IsEmpty()\nd.dataMu.RUnlock()\nif haveDirtyPages {\nreturn nil\n}\n+ }\nd.handleMu.RLock()\ndefer d.handleMu.RUnlock()\nif d.writeFile.isNil() {\n@@ -707,14 +712,8 @@ func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpt\nreturn vfs.GenericConfigureMMap(&fd.vfsfd, d, opts)\n}\n-func (d *dentry) mayCachePages() bool {\n- if d.fs.opts.forcePageCache {\n- return true\n- }\n- if d.fs.opts.interop == InteropModeShared {\n- return false\n- }\n- return atomic.LoadInt32(&d.mmapFD) >= 0\n+func (fs *filesystem) mayCachePagesInMemoryFile() bool {\n+ return fs.opts.forcePageCache || fs.opts.interop != InteropModeShared\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n@@ -726,7 +725,7 @@ func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar host\nfor _, r := range mapped {\nd.pf.hostFileMapper.IncRefOn(r)\n}\n- if d.mayCachePages() {\n+ if d.fs.mayCachePagesInMemoryFile() {\n// d.Evict() will refuse to evict memory-mapped pages, so tell the\n// MemoryFile to not bother trying.\nmf := d.fs.mfp.MemoryFile()\n@@ -745,7 +744,7 @@ func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar h\nfor _, r := range unmapped {\nd.pf.hostFileMapper.DecRefOn(r)\n}\n- if d.mayCachePages() {\n+ if d.fs.mayCachePagesInMemoryFile() {\n// Pages that are no longer referenced by any application memory\n// mappings are now considered unused; allow MemoryFile to evict them\n// when necessary.\n" } ]
Go
Apache License 2.0
google/gvisor
Clean up logic for when a VFS2 gofer regular file close causes a flushf. PiperOrigin-RevId: 386577891
259,885
27.07.2021 18:11:03
25,200
964fb3ca768756fbc58d1d9312c53886964ae608
Use go:build directives in generated files. Build constraints are now inferred from go:build directives rather than +build directives. +build directives are still emitted in generated files as required in Go 1.16 and earlier. Note that go/build/constraint was added in Go 1.16, so gVisor now requires Go 1.16.
[ { "change_type": "ADD", "old_path": null, "new_path": "tools/constraintutil/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"constraintutil\",\n+ srcs = [\"constraintutil.go\"],\n+ marshal = False,\n+ stateify = False,\n+ visibility = [\"//tools:__subpackages__\"],\n+)\n+\n+go_test(\n+ name = \"constraintutil_test\",\n+ size = \"small\",\n+ srcs = [\"constraintutil_test.go\"],\n+ library = \":constraintutil\",\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tools/constraintutil/constraintutil.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package constraintutil provides utilities for working with Go build\n+// constraints.\n+package constraintutil\n+\n+import (\n+ \"bufio\"\n+ \"bytes\"\n+ \"fmt\"\n+ \"go/build/constraint\"\n+ \"io\"\n+ \"os\"\n+ \"strings\"\n+)\n+\n+// FromReader extracts the build constraint from the Go source or assembly file\n+// whose contents are read by r.\n+func FromReader(r io.Reader) (constraint.Expr, error) {\n+ // See go/build.parseFileHeader() for the \"official\" logic that this is\n+ // derived from.\n+ const (\n+ slashStar = \"/*\"\n+ starSlash = \"*/\"\n+ gobuildPrefix = \"//go:build\"\n+ )\n+ s := bufio.NewScanner(r)\n+ var (\n+ inSlashStar = false // between /* and */\n+ haveGobuild = false\n+ e constraint.Expr\n+ )\n+Lines:\n+ for s.Scan() {\n+ line := bytes.TrimSpace(s.Bytes())\n+ if !inSlashStar && constraint.IsGoBuild(string(line)) {\n+ if haveGobuild {\n+ return nil, fmt.Errorf(\"multiple go:build directives\")\n+ }\n+ haveGobuild = true\n+ var err error\n+ e, err = constraint.Parse(string(line))\n+ if err != nil {\n+ return nil, err\n+ }\n+ }\n+ ThisLine:\n+ for len(line) > 0 {\n+ if inSlashStar {\n+ if i := bytes.Index(line, []byte(starSlash)); i >= 0 {\n+ inSlashStar = false\n+ line = bytes.TrimSpace(line[i+len(starSlash):])\n+ continue ThisLine\n+ }\n+ continue Lines\n+ }\n+ if bytes.HasPrefix(line, []byte(\"//\")) {\n+ continue Lines\n+ }\n+ // Note that if /* appears in the line, but not at the beginning,\n+ // then the line is still non-empty, so skipping this and\n+ // terminating below is correct.\n+ if bytes.HasPrefix(line, []byte(slashStar)) {\n+ inSlashStar = true\n+ line = bytes.TrimSpace(line[len(slashStar):])\n+ continue ThisLine\n+ }\n+ // A non-empty non-comment line terminates scanning for go:build.\n+ break Lines\n+ }\n+ }\n+ return e, s.Err()\n+}\n+\n+// FromString extracts the build constraint from the Go source or assembly file\n+// containing the given data. If no build constraint applies to the file, it\n+// returns nil.\n+func FromString(str string) (constraint.Expr, error) {\n+ return FromReader(strings.NewReader(str))\n+}\n+\n+// FromFile extracts the build constraint from the Go source or assembly file\n+// at the given path. If no build constraint applies to the file, it returns\n+// nil.\n+func FromFile(path string) (constraint.Expr, error) {\n+ f, err := os.Open(path)\n+ if err != nil {\n+ return nil, err\n+ }\n+ defer f.Close()\n+ return FromReader(f)\n+}\n+\n+// Combine returns a constraint.Expr that evaluates to true iff all expressions\n+// in es evaluate to true. If es is empty, Combine returns nil.\n+//\n+// Preconditions: All constraint.Exprs in es are non-nil.\n+func Combine(es []constraint.Expr) constraint.Expr {\n+ switch len(es) {\n+ case 0:\n+ return nil\n+ case 1:\n+ return es[0]\n+ default:\n+ a := &constraint.AndExpr{es[0], es[1]}\n+ for i := 2; i < len(es); i++ {\n+ a = &constraint.AndExpr{a, es[i]}\n+ }\n+ return a\n+ }\n+}\n+\n+// CombineFromFiles returns a build constraint expression that evaluates to\n+// true iff the build constraints from all of the given Go source or assembly\n+// files evaluate to true. If no build constraints apply to any of the given\n+// files, it returns nil.\n+func CombineFromFiles(paths []string) (constraint.Expr, error) {\n+ var es []constraint.Expr\n+ for _, path := range paths {\n+ e, err := FromFile(path)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed to read build constraints from %q: %v\", path, err)\n+ }\n+ if e != nil {\n+ es = append(es, e)\n+ }\n+ }\n+ return Combine(es), nil\n+}\n+\n+// Lines returns a string containing build constraint directives for the given\n+// constraint.Expr, including two trailing newlines, as appropriate for a Go\n+// source or assembly file. At least a go:build directive will be emitted; if\n+// the constraint is expressible using +build directives as well, then +build\n+// directives will also be emitted.\n+//\n+// If e is nil, Lines returns the empty string.\n+func Lines(e constraint.Expr) string {\n+ if e == nil {\n+ return \"\"\n+ }\n+\n+ var b strings.Builder\n+ b.WriteString(\"//go:build \")\n+ b.WriteString(e.String())\n+ b.WriteByte('\\n')\n+\n+ if pblines, err := constraint.PlusBuildLines(e); err == nil {\n+ for _, line := range pblines {\n+ b.WriteString(line)\n+ b.WriteByte('\\n')\n+ }\n+ }\n+\n+ b.WriteByte('\\n')\n+ return b.String()\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tools/constraintutil/constraintutil_test.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package constraintutil\n+\n+import (\n+ \"go/build/constraint\"\n+ \"testing\"\n+)\n+\n+func TestFileParsing(t *testing.T) {\n+ for _, test := range []struct {\n+ name string\n+ data string\n+ expr string\n+ }{\n+ {\n+ name: \"Empty\",\n+ },\n+ {\n+ name: \"NoConstraint\",\n+ data: \"// copyright header\\n\\npackage main\",\n+ },\n+ {\n+ name: \"ConstraintOnFirstLine\",\n+ data: \"//go:build amd64\\n#include \\\"textflag.h\\\"\",\n+ expr: \"amd64\",\n+ },\n+ {\n+ name: \"ConstraintAfterSlashSlashComment\",\n+ data: \"// copyright header\\n\\n//go:build linux\\n\\npackage newlib\",\n+ expr: \"linux\",\n+ },\n+ {\n+ name: \"ConstraintAfterSlashStarComment\",\n+ data: \"/*\\ncopyright header\\n*/\\n\\n//go:build !race\\n\\npackage oldlib\",\n+ expr: \"!race\",\n+ },\n+ {\n+ name: \"ConstraintInSlashSlashComment\",\n+ data: \"// blah blah //go:build windows\",\n+ },\n+ {\n+ name: \"ConstraintInSlashStarComment\",\n+ data: \"/*\\n//go:build windows\\n*/\",\n+ },\n+ {\n+ name: \"ConstraintAfterPackageClause\",\n+ data: \"package oops\\n//go:build race\",\n+ },\n+ {\n+ name: \"ConstraintAfterCppInclude\",\n+ data: \"#include \\\"textflag.h\\\"\\n//go:build arm64\",\n+ },\n+ } {\n+ t.Run(test.name, func(t *testing.T) {\n+ e, err := FromString(test.data)\n+ if err != nil {\n+ t.Fatalf(\"FromString(%q) failed: %v\", test.data, err)\n+ }\n+ if e == nil {\n+ if len(test.expr) != 0 {\n+ t.Errorf(\"FromString(%q): got no constraint, wanted %q\", test.data, test.expr)\n+ }\n+ } else {\n+ got := e.String()\n+ if len(test.expr) == 0 {\n+ t.Errorf(\"FromString(%q): got %q, wanted no constraint\", test.data, got)\n+ } else if got != test.expr {\n+ t.Errorf(\"FromString(%q): got %q, wanted %q\", test.data, got, test.expr)\n+ }\n+ }\n+ })\n+ }\n+}\n+\n+func TestCombine(t *testing.T) {\n+ for _, test := range []struct {\n+ name string\n+ in []string\n+ out string\n+ }{\n+ {\n+ name: \"0\",\n+ },\n+ {\n+ name: \"1\",\n+ in: []string{\"amd64 || arm64\"},\n+ out: \"amd64 || arm64\",\n+ },\n+ {\n+ name: \"2\",\n+ in: []string{\"amd64\", \"amd64 && linux\"},\n+ out: \"amd64 && amd64 && linux\",\n+ },\n+ {\n+ name: \"3\",\n+ in: []string{\"amd64\", \"amd64 || arm64\", \"amd64 || riscv64\"},\n+ out: \"amd64 && (amd64 || arm64) && (amd64 || riscv64)\",\n+ },\n+ } {\n+ t.Run(test.name, func(t *testing.T) {\n+ inexprs := make([]constraint.Expr, 0, len(test.in))\n+ for _, estr := range test.in {\n+ line := \"//go:build \" + estr\n+ e, err := constraint.Parse(line)\n+ if err != nil {\n+ t.Fatalf(\"constraint.Parse(%q) failed: %v\", line, err)\n+ }\n+ inexprs = append(inexprs, e)\n+ }\n+ outexpr := Combine(inexprs)\n+ if outexpr == nil {\n+ if len(test.out) != 0 {\n+ t.Errorf(\"Combine(%v): got no constraint, wanted %q\", test.in, test.out)\n+ }\n+ } else {\n+ got := outexpr.String()\n+ if len(test.out) == 0 {\n+ t.Errorf(\"Combine(%v): got %q, wanted no constraint\", test.in, got)\n+ } else if got != test.out {\n+ t.Errorf(\"Combine(%v): got %q, wanted %q\", test.in, got, test.out)\n+ }\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "tools/go_generics/go_merge/BUILD", "new_path": "tools/go_generics/go_merge/BUILD", "diff": "@@ -7,6 +7,6 @@ go_binary(\nsrcs = [\"main.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n- \"//tools/tags\",\n+ \"//tools/constraintutil\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "tools/go_generics/go_merge/main.go", "new_path": "tools/go_generics/go_merge/main.go", "diff": "@@ -25,9 +25,8 @@ import (\n\"os\"\n\"path/filepath\"\n\"strconv\"\n- \"strings\"\n- \"gvisor.dev/gvisor/tools/tags\"\n+ \"gvisor.dev/gvisor/tools/constraintutil\"\n)\nvar (\n@@ -131,6 +130,12 @@ func main() {\n}\nf.Decls = newDecls\n+ // Infer build constraints for the output file.\n+ bcexpr, err := constraintutil.CombineFromFiles(flag.Args())\n+ if err != nil {\n+ fatalf(\"Failed to read build constraints: %v\\n\", err)\n+ }\n+\n// Write the output file.\nvar buf bytes.Buffer\nif err := format.Node(&buf, fset, f); err != nil {\n@@ -141,9 +146,7 @@ func main() {\nfatalf(\"opening output: %v\\n\", err)\n}\ndefer outf.Close()\n- if t := tags.Aggregate(flag.Args()); len(t) > 0 {\n- fmt.Fprintf(outf, \"%s\\n\\n\", strings.Join(t.Lines(), \"\\n\"))\n- }\n+ outf.WriteString(constraintutil.Lines(bcexpr))\nif _, err := outf.Write(buf.Bytes()); err != nil {\nfatalf(\"write: %v\\n\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/BUILD", "new_path": "tools/go_marshal/gomarshal/BUILD", "diff": "@@ -18,5 +18,5 @@ go_library(\nvisibility = [\n\"//:sandbox\",\n],\n- deps = [\"//tools/tags\"],\n+ deps = [\"//tools/constraintutil\"],\n)\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator.go", "new_path": "tools/go_marshal/gomarshal/generator.go", "diff": "@@ -25,7 +25,7 @@ import (\n\"sort\"\n\"strings\"\n- \"gvisor.dev/gvisor/tools/tags\"\n+ \"gvisor.dev/gvisor/tools/constraintutil\"\n)\n// List of identifiers we use in generated code that may conflict with a\n@@ -123,16 +123,18 @@ func (g *Generator) writeHeader() error {\nvar b sourceBuffer\nb.emit(\"// Automatically generated marshal implementation. See tools/go_marshal.\\n\\n\")\n- // Emit build tags.\n- b.emit(\"// If there are issues with build tag aggregation, see\\n\")\n- b.emit(\"// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here\\n\")\n+ bcexpr, err := constraintutil.CombineFromFiles(g.inputs)\n+ if err != nil {\n+ return err\n+ }\n+ if bcexpr != nil {\n+ // Emit build constraints.\n+ b.emit(\"// If there are issues with build constraint aggregation, see\\n\")\n+ b.emit(\"// tools/go_marshal/gomarshal/generator.go:writeHeader(). The constraints here\\n\")\nb.emit(\"// come from the input set of files used to generate this file. This input set\\n\")\n- b.emit(\"// is filtered based on pre-defined file suffixes related to build tags, see \\n\")\n- b.emit(\"// tools/defs.bzl:calculate_sets().\\n\\n\")\n-\n- if t := tags.Aggregate(g.inputs); len(t) > 0 {\n- b.emit(strings.Join(t.Lines(), \"\\n\"))\n- b.emit(\"\\n\\n\")\n+ b.emit(\"// is filtered based on pre-defined file suffixes related to build constraints,\\n\")\n+ b.emit(\"// see tools/defs.bzl:calculate_sets().\\n\\n\")\n+ b.emit(constraintutil.Lines(bcexpr))\n}\n// Package header.\n@@ -553,11 +555,12 @@ func (g *Generator) writeTests(ts []*testGenerator) error {\nb.reset()\nb.emit(\"// Automatically generated marshal tests. See tools/go_marshal.\\n\\n\")\n- // Emit build tags.\n- if t := tags.Aggregate(g.inputs); len(t) > 0 {\n- b.emit(strings.Join(t.Lines(), \"\\n\"))\n- b.emit(\"\\n\\n\")\n+ // Emit build constraints.\n+ bcexpr, err := constraintutil.CombineFromFiles(g.inputs)\n+ if err != nil {\n+ return err\n}\n+ b.emit(constraintutil.Lines(bcexpr))\nb.emit(\"package %s\\n\\n\", g.pkg)\nif err := b.write(g.outputTest); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "tools/go_stateify/BUILD", "new_path": "tools/go_stateify/BUILD", "diff": "@@ -6,7 +6,7 @@ go_binary(\nname = \"stateify\",\nsrcs = [\"main.go\"],\nvisibility = [\"//:sandbox\"],\n- deps = [\"//tools/tags\"],\n+ deps = [\"//tools/constraintutil\"],\n)\nbzl_library(\n" }, { "change_type": "MODIFY", "old_path": "tools/go_stateify/main.go", "new_path": "tools/go_stateify/main.go", "diff": "@@ -28,7 +28,7 @@ import (\n\"strings\"\n\"sync\"\n- \"gvisor.dev/gvisor/tools/tags\"\n+ \"gvisor.dev/gvisor/tools/constraintutil\"\n)\nvar (\n@@ -214,10 +214,13 @@ func main() {\n// Automated warning.\nfmt.Fprint(outputFile, \"// automatically generated by stateify.\\n\\n\")\n- // Emit build tags.\n- if t := tags.Aggregate(flag.Args()); len(t) > 0 {\n- fmt.Fprintf(outputFile, \"%s\\n\\n\", strings.Join(t.Lines(), \"\\n\"))\n+ // Emit build constraints.\n+ bcexpr, err := constraintutil.CombineFromFiles(flag.Args())\n+ if err != nil {\n+ fmt.Fprintf(os.Stderr, \"Failed to infer build constraints: %v\", err)\n+ os.Exit(1)\n}\n+ outputFile.WriteString(constraintutil.Lines(bcexpr))\n// Emit the package name.\n_, pkg := filepath.Split(*fullPkg)\n" }, { "change_type": "DELETE", "old_path": "tools/tags/BUILD", "new_path": null, "diff": "-load(\"//tools:defs.bzl\", \"go_library\")\n-\n-package(licenses = [\"notice\"])\n-\n-go_library(\n- name = \"tags\",\n- srcs = [\"tags.go\"],\n- marshal = False,\n- stateify = False,\n- visibility = [\"//tools:__subpackages__\"],\n-)\n" }, { "change_type": "DELETE", "old_path": "tools/tags/tags.go", "new_path": null, "diff": "-// Copyright 2020 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// Package tags is a utility for parsing build tags.\n-package tags\n-\n-import (\n- \"fmt\"\n- \"io/ioutil\"\n- \"strings\"\n-)\n-\n-// OrSet is a set of tags on a single line.\n-//\n-// Note that tags may include \",\", and we don't distinguish this case in the\n-// logic below. Ideally, this constraints can be split into separate top-level\n-// build tags in order to resolve any issues.\n-type OrSet []string\n-\n-// Line returns the line for this or.\n-func (or OrSet) Line() string {\n- return fmt.Sprintf(\"// +build %s\", strings.Join([]string(or), \" \"))\n-}\n-\n-// AndSet is the set of all OrSets.\n-type AndSet []OrSet\n-\n-// Lines returns the lines to be printed.\n-func (and AndSet) Lines() (ls []string) {\n- for _, or := range and {\n- ls = append(ls, or.Line())\n- }\n- return\n-}\n-\n-// Join joins this AndSet with another.\n-func (and AndSet) Join(other AndSet) AndSet {\n- return append(and, other...)\n-}\n-\n-// Tags returns the unique set of +build tags.\n-//\n-// Derived form the runtime's canBuild.\n-func Tags(file string) (tags AndSet) {\n- data, err := ioutil.ReadFile(file)\n- if err != nil {\n- return nil\n- }\n- // Check file contents for // +build lines.\n- for _, p := range strings.Split(string(data), \"\\n\") {\n- p = strings.TrimSpace(p)\n- if p == \"\" {\n- continue\n- }\n- if !strings.HasPrefix(p, \"//\") {\n- break\n- }\n- if !strings.Contains(p, \"+build\") {\n- continue\n- }\n- fields := strings.Fields(p[2:])\n- if len(fields) < 1 || fields[0] != \"+build\" {\n- continue\n- }\n- tags = append(tags, OrSet(fields[1:]))\n- }\n- return tags\n-}\n-\n-// Aggregate aggregates all tags from a set of files.\n-//\n-// Note that these may be in conflict, in which case the build will fail.\n-func Aggregate(files []string) (tags AndSet) {\n- for _, file := range files {\n- tags = tags.Join(Tags(file))\n- }\n- return tags\n-}\n" } ]
Go
Apache License 2.0
google/gvisor
Use go:build directives in generated files. Build constraints are now inferred from go:build directives rather than +build directives. +build directives are still emitted in generated files as required in Go 1.16 and earlier. Note that go/build/constraint was added in Go 1.16, so gVisor now requires Go 1.16. PiperOrigin-RevId: 387240779
259,885
28.07.2021 13:47:14
25,200
44efc282feb0f02196bc775d25166b3f0bb30b7d
Lock gofer.dentry.dataMu before SetAttr RPC modifying file size.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1161,6 +1161,13 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs\nif !d.isSynthetic() {\nif stat.Mask != 0 {\n+ if stat.Mask&linux.STATX_SIZE != 0 {\n+ // d.dataMu must be held around the update to both the remote\n+ // file's size and d.size to serialize with writeback (which\n+ // might otherwise write data back up to the old d.size after\n+ // the remote file has been truncated).\n+ d.dataMu.Lock()\n+ }\nif err := d.file.setAttr(ctx, p9.SetAttrMask{\nPermissions: stat.Mask&linux.STATX_MODE != 0,\nUID: stat.Mask&linux.STATX_UID != 0,\n@@ -1180,13 +1187,16 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs\nMTimeSeconds: uint64(stat.Mtime.Sec),\nMTimeNanoSeconds: uint64(stat.Mtime.Nsec),\n}); err != nil {\n+ if stat.Mask&linux.STATX_SIZE != 0 {\n+ d.dataMu.Unlock() // +checklocksforce: locked conditionally above\n+ }\nreturn err\n}\nif stat.Mask&linux.STATX_SIZE != 0 {\n// d.size should be kept up to date, and privatized\n// copy-on-write mappings of truncated pages need to be\n// invalidated, even if InteropModeShared is in effect.\n- d.updateSizeLocked(stat.Size)\n+ d.updateSizeAndUnlockDataMuLocked(stat.Size) // +checklocksforce: locked conditionally above\n}\n}\nif d.fs.opts.interop == InteropModeShared {\n@@ -1249,6 +1259,14 @@ func (d *dentry) doAllocate(ctx context.Context, offset, length uint64, allocate\n// Preconditions: d.metadataMu must be locked.\nfunc (d *dentry) updateSizeLocked(newSize uint64) {\nd.dataMu.Lock()\n+ d.updateSizeAndUnlockDataMuLocked(newSize)\n+}\n+\n+// Preconditions: d.metadataMu and d.dataMu must be locked.\n+//\n+// Postconditions: d.dataMu is unlocked.\n+// +checklocksrelease:d.dataMu\n+func (d *dentry) updateSizeAndUnlockDataMuLocked(newSize uint64) {\noldSize := d.size\natomic.StoreUint64(&d.size, newSize)\n// d.dataMu must be unlocked to lock d.mapsMu and invalidate mappings\n@@ -1257,9 +1275,9 @@ func (d *dentry) updateSizeLocked(newSize uint64) {\n// contents beyond the new d.size. (We are still holding d.metadataMu,\n// so we can't race with Write or another truncate.)\nd.dataMu.Unlock()\n- if d.size < oldSize {\n+ if newSize < oldSize {\noldpgend, _ := hostarch.PageRoundUp(oldSize)\n- newpgend, _ := hostarch.PageRoundUp(d.size)\n+ newpgend, _ := hostarch.PageRoundUp(newSize)\nif oldpgend != newpgend {\nd.mapsMu.Lock()\nd.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{\n@@ -1275,8 +1293,8 @@ func (d *dentry) updateSizeLocked(newSize uint64) {\n// truncated pages have been removed from the remote file, they\n// should be dropped without being written back.\nd.dataMu.Lock()\n- d.cache.Truncate(d.size, d.fs.mfp.MemoryFile())\n- d.dirty.KeepClean(memmap.MappableRange{d.size, oldpgend})\n+ d.cache.Truncate(newSize, d.fs.mfp.MemoryFile())\n+ d.dirty.KeepClean(memmap.MappableRange{newSize, oldpgend})\nd.dataMu.Unlock()\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Lock gofer.dentry.dataMu before SetAttr RPC modifying file size. PiperOrigin-RevId: 387427887
260,001
28.07.2021 14:01:12
25,200
6bf7d0514be98b6df51436d9a23943e3cc2fdf69
Add verity read benchmark tests
[ { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "@@ -146,3 +146,10 @@ syscall_test(\ntest = \"//test/perf/linux:verity_open_benchmark\",\nvfs1 = False,\n)\n+\n+syscall_test(\n+ size = \"large\",\n+ debug = False,\n+ test = \"//test/perf/linux:verity_read_benchmark\",\n+ vfs1 = False,\n+)\n" }, { "change_type": "MODIFY", "old_path": "test/perf/linux/BUILD", "new_path": "test/perf/linux/BUILD", "diff": "@@ -389,3 +389,22 @@ cc_binary(\n\"//test/util:verity_util\",\n],\n)\n+\n+cc_binary(\n+ name = \"verity_read_benchmark\",\n+ testonly = 1,\n+ srcs = [\n+ \"verity_read_benchmark.cc\",\n+ ],\n+ deps = [\n+ gbenchmark,\n+ gtest,\n+ \"//test/util:capability_util\",\n+ \"//test/util:fs_util\",\n+ \"//test/util:logging\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"//test/util:verity_util\",\n+ ],\n+)\n" }, { "change_type": "MODIFY", "old_path": "test/perf/linux/verity_open_benchmark.cc", "new_path": "test/perf/linux/verity_open_benchmark.cc", "diff": "@@ -36,8 +36,6 @@ namespace testing {\nnamespace {\nvoid BM_Open(benchmark::State& state) {\n- SKIP_IF(IsRunningWithVFS1());\n-\nconst int size = state.range(0);\nstd::vector<TempPath> cache;\nstd::vector<EnableTarget> targets;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/perf/linux/verity_read_benchmark.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <fcntl.h>\n+#include <stdlib.h>\n+#include <sys/mount.h>\n+#include <unistd.h>\n+\n+#include <memory>\n+#include <string>\n+#include <vector>\n+\n+#include \"gtest/gtest.h\"\n+#include \"benchmark/benchmark.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/logging.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+#include \"test/util/verity_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+void BM_VerityRead(benchmark::State& state) {\n+ const int size = state.range(0);\n+ const std::string contents(size, 0);\n+\n+ // Mount a tmpfs file system to be wrapped by a verity fs.\n+ TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ TEST_CHECK(mount(\"\", dir.path().c_str(), \"tmpfs\", 0, \"\") == 0);\n+\n+ auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\n+ dir.path(), contents, TempPath::kDefaultFileMode));\n+ std::string filename = std::string(Basename(path.path()));\n+\n+ std::string verity_dir = TEST_CHECK_NO_ERRNO_AND_VALUE(\n+ MountVerity(dir.path(), {EnableTarget(filename, O_RDONLY)}));\n+\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(JoinPath(verity_dir, filename), O_RDONLY));\n+ std::vector<char> buf(size);\n+ for (auto _ : state) {\n+ TEST_CHECK(PreadFd(fd.get(), buf.data(), buf.size(), 0) == size);\n+ }\n+\n+ state.SetBytesProcessed(static_cast<int64_t>(size) *\n+ static_cast<int64_t>(state.iterations()));\n+}\n+\n+BENCHMARK(BM_VerityRead)->Range(1, 1 << 26)->UseRealTime();\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Add verity read benchmark tests PiperOrigin-RevId: 387431049
259,910
02.08.2021 01:13:42
-28,800
7b300f556ca6415afb80d3dfbbff2ec2aa93d4ca
Add -y to install docker-ce when build image
[ { "change_type": "MODIFY", "old_path": "images/default/Dockerfile", "new_path": "images/default/Dockerfile", "diff": "@@ -15,7 +15,7 @@ RUN add-apt-repository \\\n\"deb https://download.docker.com/linux/ubuntu \\\n$(lsb_release -cs) \\\nstable\"\n-RUN apt-get install docker-ce-cli\n+RUN apt-get -y install docker-ce-cli\n# Install gcloud.\nRUN curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-289.0.0-linux-x86_64.tar.gz | \\\n" } ]
Go
Apache License 2.0
google/gvisor
Add -y to install docker-ce when build image
260,001
03.08.2021 10:54:20
25,200
ceab3327c5bf9b9962d776b85a8a99407ab172f4
Add verity open_read_close benchmark test
[ { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "@@ -160,3 +160,10 @@ syscall_test(\ntest = \"//test/perf/linux:verity_read_benchmark\",\nvfs1 = False,\n)\n+\n+syscall_test(\n+ size = \"large\",\n+ debug = False,\n+ test = \"//test/perf/linux:verity_open_read_close_benchmark\",\n+ vfs1 = False,\n+)\n" }, { "change_type": "MODIFY", "old_path": "test/perf/linux/BUILD", "new_path": "test/perf/linux/BUILD", "diff": "@@ -424,3 +424,22 @@ cc_binary(\n\"//test/util:verity_util\",\n],\n)\n+\n+cc_binary(\n+ name = \"verity_open_read_close_benchmark\",\n+ testonly = 1,\n+ srcs = [\n+ \"verity_open_read_close_benchmark.cc\",\n+ ],\n+ deps = [\n+ gbenchmark,\n+ gtest,\n+ \"//test/util:capability_util\",\n+ \"//test/util:fs_util\",\n+ \"//test/util:logging\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"//test/util:verity_util\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/perf/linux/verity_open_read_close_benchmark.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <fcntl.h>\n+#include <stdlib.h>\n+#include <sys/mount.h>\n+#include <unistd.h>\n+\n+#include <memory>\n+#include <string>\n+#include <vector>\n+\n+#include \"gtest/gtest.h\"\n+#include \"benchmark/benchmark.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/logging.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+#include \"test/util/verity_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+void BM_VerityOpenReadClose(benchmark::State& state) {\n+ const int size = state.range(0);\n+\n+ // Mount a tmpfs file system to be wrapped by a verity fs.\n+ TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ TEST_CHECK(mount(\"\", dir.path().c_str(), \"tmpfs\", 0, \"\") == 0);\n+\n+ std::vector<TempPath> cache;\n+ std::vector<EnableTarget> targets;\n+\n+ for (int i = 0; i < size; i++) {\n+ auto file = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(dir.path(), \"some contents\", 0644));\n+ targets.emplace_back(\n+ EnableTarget(std::string(Basename(file.path())), O_RDONLY));\n+ cache.emplace_back(std::move(file));\n+ }\n+\n+ std::string verity_dir =\n+ TEST_CHECK_NO_ERRNO_AND_VALUE(MountVerity(dir.path(), targets));\n+\n+ char buf[1];\n+ unsigned int seed = 1;\n+ for (auto _ : state) {\n+ const int chosen = rand_r(&seed) % size;\n+ int fd = open(JoinPath(verity_dir, targets[chosen].path).c_str(), O_RDONLY);\n+ TEST_CHECK(fd != -1);\n+ TEST_CHECK(read(fd, buf, 1) == 1);\n+ close(fd);\n+ }\n+}\n+\n+BENCHMARK(BM_VerityOpenReadClose)->Range(1000, 16384)->UseRealTime();\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Add verity open_read_close benchmark test PiperOrigin-RevId: 388494554
260,001
03.08.2021 13:46:38
25,200
8caf231cb14128938a08208a0580e37e20be1fc1
Add Lifecycle controls Also change runsc pause/resume cmd to access Lifecycle instead of containerManager.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/control/BUILD", "new_path": "pkg/sentry/control/BUILD", "diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"control\",\nsrcs = [\n\"control.go\",\n+ \"lifecycle.go\",\n\"logging.go\",\n\"pprof.go\",\n\"proc.go\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/control/lifecycle.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package control\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+)\n+\n+// Lifecycle provides functions related to starting and stopping tasks.\n+type Lifecycle struct {\n+ Kernel *kernel.Kernel\n+}\n+\n+// Pause pauses all tasks, blocking until they are stopped.\n+func (l *Lifecycle) Pause(_, _ *struct{}) error {\n+ l.Kernel.Pause()\n+ return nil\n+}\n+\n+// Resume resumes all tasks.\n+func (l *Lifecycle) Resume(_, _ *struct{}) error {\n+ l.Kernel.Unpause()\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -57,20 +57,12 @@ const (\n// ContMgrExecuteAsync executes a command in a container.\nContMgrExecuteAsync = \"containerManager.ExecuteAsync\"\n- // ContMgrPause pauses the sandbox (note that individual containers cannot be\n- // paused).\n- ContMgrPause = \"containerManager.Pause\"\n-\n// ContMgrProcesses lists processes running in a container.\nContMgrProcesses = \"containerManager.Processes\"\n// ContMgrRestore restores a container from a statefile.\nContMgrRestore = \"containerManager.Restore\"\n- // ContMgrResume unpauses the paused sandbox (note that individual containers\n- // cannot be resumed).\n- ContMgrResume = \"containerManager.Resume\"\n-\n// ContMgrSignal sends a signal to a container.\nContMgrSignal = \"containerManager.Signal\"\n@@ -111,6 +103,12 @@ const (\nLoggingChange = \"Logging.Change\"\n)\n+// Lifecycle related commands (see lifecycle.go for more details).\n+const (\n+ LifecyclePause = \"Lifecycle.Pause\"\n+ LifecycleResume = \"Lifecycle.Resume\"\n+)\n+\n// ControlSocketAddr generates an abstract unix socket name for the given ID.\nfunc ControlSocketAddr(id string) string {\nreturn fmt.Sprintf(\"\\x00runsc-sandbox.%s\", id)\n@@ -152,6 +150,7 @@ func newController(fd int, l *Loader) (*controller, error) {\nctrl.srv.Register(&debug{})\nctrl.srv.Register(&control.Logging{})\n+ ctrl.srv.Register(&control.Lifecycle{l.k})\nif l.root.conf.ProfileEnable {\nctrl.srv.Register(control.NewProfile(l.k))\n@@ -340,17 +339,6 @@ func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {\nreturn state.Save(o, nil)\n}\n-// Pause suspends a sandbox.\n-func (cm *containerManager) Pause(_, _ *struct{}) error {\n- log.Debugf(\"containerManager.Pause\")\n- // TODO(gvisor.dev/issues/6243): save/restore not supported w/ hostinet\n- if cm.l.root.conf.Network == config.NetworkHost {\n- return errors.New(\"pause not supported when using hostinet\")\n- }\n- cm.l.k.Pause()\n- return nil\n-}\n-\n// RestoreOpts contains options related to restoring a container's file system.\ntype RestoreOpts struct {\n// FilePayload contains the state file to be restored, followed by the\n@@ -482,13 +470,6 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nreturn nil\n}\n-// Resume unpauses a sandbox.\n-func (cm *containerManager) Resume(_, _ *struct{}) error {\n- log.Debugf(\"containerManager.Resume\")\n- cm.l.k.Unpause()\n- return nil\n-}\n-\n// Wait waits for the init process in the given container.\nfunc (cm *containerManager) Wait(cid *string, waitStatus *uint32) error {\nlog.Debugf(\"containerManager.Wait, cid: %s\", *cid)\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -981,7 +981,7 @@ func (s *Sandbox) Pause(cid string) error {\n}\ndefer conn.Close()\n- if err := conn.Call(boot.ContMgrPause, nil, nil); err != nil {\n+ if err := conn.Call(boot.LifecyclePause, nil, nil); err != nil {\nreturn fmt.Errorf(\"pausing container %q: %v\", cid, err)\n}\nreturn nil\n@@ -996,7 +996,7 @@ func (s *Sandbox) Resume(cid string) error {\n}\ndefer conn.Close()\n- if err := conn.Call(boot.ContMgrResume, nil, nil); err != nil {\n+ if err := conn.Call(boot.LifecycleResume, nil, nil); err != nil {\nreturn fmt.Errorf(\"resuming container %q: %v\", cid, err)\n}\nreturn nil\n" } ]
Go
Apache License 2.0
google/gvisor
Add Lifecycle controls Also change runsc pause/resume cmd to access Lifecycle instead of containerManager. PiperOrigin-RevId: 388534928
260,001
04.08.2021 16:41:27
25,200
cbb99336cee7d37f4050875a95946ca88b7ac690
Add Fs controls Add Fs controls and implement "cat" command.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/control/BUILD", "new_path": "pkg/sentry/control/BUILD", "diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"control\",\nsrcs = [\n\"control.go\",\n+ \"fs.go\",\n\"lifecycle.go\",\n\"logging.go\",\n\"pprof.go\",\n@@ -17,6 +18,7 @@ go_library(\n],\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/context\",\n\"//pkg/fd\",\n\"//pkg/log\",\n\"//pkg/sentry/fdimport\",\n@@ -36,6 +38,7 @@ go_library(\n\"//pkg/sync\",\n\"//pkg/tcpip/link/sniffer\",\n\"//pkg/urpc\",\n+ \"//pkg/usermem\",\n],\n)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/control/fs.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package control\n+\n+import (\n+ \"fmt\"\n+ \"io\"\n+ \"os\"\n+\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.dev/gvisor/pkg/urpc\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+)\n+\n+// CatOpts contains options for the Cat RPC call.\n+type CatOpts struct {\n+ // Files are the filesystem paths for the files to cat.\n+ Files []string `json:\"files\"`\n+\n+ // FilePayload contains the destination for output.\n+ urpc.FilePayload\n+}\n+\n+// Fs includes fs-related functions.\n+type Fs struct {\n+ Kernel *kernel.Kernel\n+}\n+\n+// Cat is a RPC stub which prints out and returns the content of the files.\n+func (f *Fs) Cat(o *CatOpts, _ *struct{}) error {\n+ // Create an output stream.\n+ if len(o.FilePayload.Files) != 1 {\n+ return ErrInvalidFiles\n+ }\n+\n+ output := o.FilePayload.Files[0]\n+ for _, file := range o.Files {\n+ if err := cat(f.Kernel, file, output); err != nil {\n+ return fmt.Errorf(\"cannot read from file %s: %v\", file, err)\n+ }\n+ }\n+\n+ return nil\n+}\n+\n+// fileReader encapsulates a fs.File and provides an io.Reader interface.\n+type fileReader struct {\n+ ctx context.Context\n+ file *fs.File\n+}\n+\n+// Read implements io.Reader.Read.\n+func (f *fileReader) Read(p []byte) (int, error) {\n+ n, err := f.file.Readv(f.ctx, usermem.BytesIOSequence(p))\n+ return int(n), err\n+}\n+\n+func cat(k *kernel.Kernel, path string, output *os.File) error {\n+ ctx := k.SupervisorContext()\n+ mns := k.GlobalInit().Leader().MountNamespace()\n+ root := mns.Root()\n+ defer root.DecRef(ctx)\n+\n+ remainingTraversals := uint(fs.DefaultTraversalLimit)\n+ d, err := mns.FindInode(ctx, root, nil, path, &remainingTraversals)\n+ if err != nil {\n+ return fmt.Errorf(\"cannot find file %s: %v\", path, err)\n+ }\n+ defer d.DecRef(ctx)\n+\n+ file, err := d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true})\n+ if err != nil {\n+ return fmt.Errorf(\"cannot get file for path %s: %v\", path, err)\n+ }\n+ defer file.DecRef(ctx)\n+\n+ _, err = io.Copy(output, &fileReader{ctx: ctx, file: file})\n+ return err\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -109,6 +109,11 @@ const (\nLifecycleResume = \"Lifecycle.Resume\"\n)\n+// Filesystem related commands (see fs.go for more details).\n+const (\n+ FsCat = \"Fs.Cat\"\n+)\n+\n// ControlSocketAddr generates an abstract unix socket name for the given ID.\nfunc ControlSocketAddr(id string) string {\nreturn fmt.Sprintf(\"\\x00runsc-sandbox.%s\", id)\n@@ -151,6 +156,7 @@ func newController(fd int, l *Loader) (*controller, error) {\nctrl.srv.Register(&debug{})\nctrl.srv.Register(&control.Logging{})\nctrl.srv.Register(&control.Lifecycle{l.k})\n+ ctrl.srv.Register(&control.Fs{l.k})\nif l.root.conf.ProfileEnable {\nctrl.srv.Register(control.NewProfile(l.k))\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/debug.go", "new_path": "runsc/cmd/debug.go", "diff": "@@ -48,6 +48,7 @@ type Debug struct {\ndelay time.Duration\nduration time.Duration\nps bool\n+ cat stringSlice\n}\n// Name implements subcommands.Command.\n@@ -81,6 +82,7 @@ func (d *Debug) SetFlags(f *flag.FlagSet) {\nf.StringVar(&d.logLevel, \"log-level\", \"\", \"The log level to set: warning (0), info (1), or debug (2).\")\nf.StringVar(&d.logPackets, \"log-packets\", \"\", \"A boolean value to enable or disable packet logging: true or false.\")\nf.BoolVar(&d.ps, \"ps\", false, \"lists processes\")\n+ f.Var(&d.cat, \"cat\", \"reads files and print to standard output\")\n}\n// Execute implements subcommands.Command.Execute.\n@@ -367,5 +369,11 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nreturn subcommands.ExitFailure\n}\n+ if d.cat != nil {\n+ if err := c.Cat(d.cat, os.Stdout); err != nil {\n+ return Errorf(\"Cat failed: %v\", err)\n+ }\n+ }\n+\nreturn subcommands.ExitSuccess\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -646,6 +646,12 @@ func (c *Container) Resume() error {\nreturn c.saveLocked()\n}\n+// Cat prints out the content of the files.\n+func (c *Container) Cat(files []string, out *os.File) error {\n+ log.Debugf(\"Cat in container, cid: %s, files: %+v\", c.ID, files)\n+ return c.Sandbox.Cat(c.ID, files, out)\n+}\n+\n// State returns the metadata of the container.\nfunc (c *Container) State() specs.State {\nreturn specs.State{\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -442,6 +442,11 @@ func configs(t *testing.T, opts ...configOption) map[string]*config.Config {\nreturn all\n}\n+// sleepSpec generates a spec with sleep 1000 and a conf.\n+func sleepSpecConf(t *testing.T) (*specs.Spec, *config.Config) {\n+ return testutil.NewSpecWithArgs(\"sleep\", \"1000\"), testutil.TestConfig(t)\n+}\n+\n// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.\n// It verifies after each step that the container can be loaded from disk, and\n// has the correct status.\n@@ -455,7 +460,7 @@ func TestLifecycle(t *testing.T) {\nt.Run(name, func(t *testing.T) {\n// The container will just sleep for a long time. We will kill it before\n// it finishes sleeping.\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec, _ := sleepSpecConf(t)\nrootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\n@@ -903,7 +908,7 @@ func TestExecProcList(t *testing.T) {\nfor name, conf := range configs(t, all...) {\nt.Run(name, func(t *testing.T) {\nconst uid = 343\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec, _ := sleepSpecConf(t)\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\n@@ -1422,8 +1427,7 @@ func TestPauseResume(t *testing.T) {\n// with calls to pause and resume and that pausing and resuming only\n// occurs given the correct state.\nfunc TestPauseResumeStatus(t *testing.T) {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\n- conf := testutil.TestConfig(t)\n+ spec, conf := sleepSpecConf(t)\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -1490,7 +1494,7 @@ func TestCapabilities(t *testing.T) {\nfor name, conf := range configs(t, all...) {\nt.Run(name, func(t *testing.T) {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec, _ := sleepSpecConf(t)\nrootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -1640,7 +1644,7 @@ func TestMountNewDir(t *testing.T) {\nfunc TestReadonlyRoot(t *testing.T) {\nfor name, conf := range configs(t, all...) {\nt.Run(name, func(t *testing.T) {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec, _ := sleepSpecConf(t)\nspec.Root.Readonly = true\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n@@ -1692,7 +1696,7 @@ func TestReadonlyMount(t *testing.T) {\nif err != nil {\nt.Fatalf(\"ioutil.TempDir() failed: %v\", err)\n}\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec, _ := sleepSpecConf(t)\nspec.Mounts = append(spec.Mounts, specs.Mount{\nDestination: dir,\nSource: dir,\n@@ -1852,7 +1856,7 @@ func doAbbreviatedIDsTest(t *testing.T, vfs2 bool) {\n\"baz-\" + testutil.RandomContainerID(),\n}\nfor _, cid := range cids {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec, _ := sleepSpecConf(t)\nbundleDir, cleanup, err := testutil.SetupBundleDir(spec)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -2229,7 +2233,7 @@ func TestMountPropagation(t *testing.T) {\nt.Fatalf(\"mount(%q, MS_SHARED): %v\", srcMnt, err)\n}\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"1000\")\n+ spec, conf := sleepSpecConf(t)\npriv := filepath.Join(tmpDir, \"priv\")\nslave := filepath.Join(tmpDir, \"slave\")\n@@ -2248,7 +2252,6 @@ func TestMountPropagation(t *testing.T) {\n},\n}\n- conf := testutil.TestConfig(t)\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -2563,12 +2566,11 @@ func TestRlimits(t *testing.T) {\n// TestRlimitsExec sets limit to number of open files and checks that the limit\n// is propagated to exec'd processes.\nfunc TestRlimitsExec(t *testing.T) {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec, conf := sleepSpecConf(t)\nspec.Process.Rlimits = []specs.POSIXRlimit{\n{Type: \"RLIMIT_NOFILE\", Hard: 1000, Soft: 100},\n}\n- conf := testutil.TestConfig(t)\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -2597,3 +2599,59 @@ func TestRlimitsExec(t *testing.T) {\nt.Errorf(\"ulimit result, got: %q, want: %q\", got, want)\n}\n}\n+\n+// TestCat creates a file and checks that cat generates the expected output.\n+func TestCat(t *testing.T) {\n+ f, err := ioutil.TempFile(testutil.TmpDir(), \"test-case\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempFile failed: %v\", err)\n+ }\n+ defer os.RemoveAll(f.Name())\n+\n+ content := \"test-cat\"\n+ if _, err := f.WriteString(content); err != nil {\n+ t.Fatalf(\"f.WriteString(): %v\", err)\n+ }\n+ f.Close()\n+\n+ spec, conf := sleepSpecConf(t)\n+\n+ _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ args := Args{\n+ ID: testutil.RandomContainerID(),\n+ Spec: spec,\n+ BundleDir: bundleDir,\n+ }\n+\n+ cont, err := New(conf, args)\n+ if err != nil {\n+ t.Fatalf(\"Creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"starting container: %v\", err)\n+ }\n+\n+ r, w, err := os.Pipe()\n+ if err != nil {\n+ t.Fatalf(\"os.Create(): %v\", err)\n+ }\n+\n+ if err := cont.Cat([]string{f.Name()}, w); err != nil {\n+ t.Fatalf(\"error cat from container: %v\", err)\n+ }\n+\n+ buf := make([]byte, 1024)\n+ if _, err := r.Read(buf); err != nil {\n+ t.Fatalf(\"Read out: %v\", err)\n+ }\n+ if got, want := string(buf), content; !strings.Contains(got, want) {\n+ t.Errorf(\"out got %s, want include %s\", buf, want)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -1002,6 +1002,24 @@ func (s *Sandbox) Resume(cid string) error {\nreturn nil\n}\n+// Cat sends the cat call for a container in the sandbox.\n+func (s *Sandbox) Cat(cid string, files []string, out *os.File) error {\n+ log.Debugf(\"Cat sandbox %q\", s.ID)\n+ conn, err := s.sandboxConnect()\n+ if err != nil {\n+ return err\n+ }\n+ defer conn.Close()\n+\n+ if err := conn.Call(boot.FsCat, &control.CatOpts{\n+ Files: files,\n+ FilePayload: urpc.FilePayload{Files: []*os.File{out}},\n+ }, nil); err != nil {\n+ return fmt.Errorf(\"Cat container %q: %v\", cid, err)\n+ }\n+ return nil\n+}\n+\n// IsRunning returns true if the sandbox or gofer process is running.\nfunc (s *Sandbox) IsRunning() bool {\nif s.Pid != 0 {\n" } ]
Go
Apache License 2.0
google/gvisor
Add Fs controls Add Fs controls and implement "cat" command. PiperOrigin-RevId: 388812540
260,001
04.08.2021 17:15:09
25,200
b9780f96be58ba0cff8612eef4e909431421e936
Add verity_randread benchmark test
[ { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "@@ -161,6 +161,13 @@ syscall_test(\nvfs1 = False,\n)\n+syscall_test(\n+ size = \"large\",\n+ debug = False,\n+ test = \"//test/perf/linux:verity_randread_benchmark\",\n+ vfs1 = False,\n+)\n+\nsyscall_test(\nsize = \"large\",\ndebug = False,\n" }, { "change_type": "MODIFY", "old_path": "test/perf/linux/BUILD", "new_path": "test/perf/linux/BUILD", "diff": "@@ -425,6 +425,25 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"verity_randread_benchmark\",\n+ testonly = 1,\n+ srcs = [\n+ \"verity_randread_benchmark.cc\",\n+ ],\n+ deps = [\n+ gbenchmark,\n+ gtest,\n+ \"//test/util:capability_util\",\n+ \"//test/util:fs_util\",\n+ \"//test/util:logging\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"//test/util:verity_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"verity_open_read_close_benchmark\",\ntestonly = 1,\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/perf/linux/verity_randread_benchmark.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <fcntl.h>\n+#include <stdlib.h>\n+#include <sys/mount.h>\n+#include <sys/stat.h>\n+#include <sys/uio.h>\n+#include <unistd.h>\n+\n+#include \"gtest/gtest.h\"\n+#include \"benchmark/benchmark.h\"\n+#include \"test/util/logging.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+#include \"test/util/verity_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+// Create a 1GB file that will be read from at random positions. This should\n+// invalid any performance gains from caching.\n+const uint64_t kFileSize = Megabytes(1024);\n+\n+// How many bytes to write at once to initialize the file used to read from.\n+const uint32_t kWriteSize = 65536;\n+\n+// Largest benchmarked read unit.\n+const uint32_t kMaxRead = Megabytes(64);\n+\n+// Global test state, initialized once per process lifetime.\n+struct GlobalState {\n+ explicit GlobalState() {\n+ // Mount a tmpfs file system to be wrapped by a verity fs.\n+ tmp_dir_ = TempPath::CreateDir().ValueOrDie();\n+ TEST_CHECK(mount(\"\", tmp_dir_.path().c_str(), \"tmpfs\", 0, \"\") == 0);\n+ file_ = TempPath::CreateFileIn(tmp_dir_.path()).ValueOrDie();\n+ filename_ = std::string(Basename(file_.path()));\n+\n+ FileDescriptor fd = Open(file_.path(), O_WRONLY).ValueOrDie();\n+\n+ // Try to minimize syscalls by using maximum size writev() requests.\n+ std::vector<char> buffer(kWriteSize);\n+ RandomizeBuffer(buffer.data(), buffer.size());\n+ const std::vector<std::vector<struct iovec>> iovecs_list =\n+ GenerateIovecs(kFileSize + kMaxRead, buffer.data(), buffer.size());\n+ for (const auto& iovecs : iovecs_list) {\n+ TEST_CHECK(writev(fd.get(), iovecs.data(), iovecs.size()) >= 0);\n+ }\n+ verity_dir_ =\n+ MountVerity(tmp_dir_.path(), {EnableTarget(filename_, O_RDONLY)})\n+ .ValueOrDie();\n+ }\n+ TempPath tmp_dir_;\n+ TempPath file_;\n+ std::string verity_dir_;\n+ std::string filename_;\n+};\n+\n+GlobalState& GetGlobalState() {\n+ // This gets created only once throughout the lifetime of the process.\n+ // Use a dynamically allocated object (that is never deleted) to avoid order\n+ // of destruction of static storage variables issues.\n+ static GlobalState* const state =\n+ // The actual file size is the maximum random seek range (kFileSize) + the\n+ // maximum read size so we can read that number of bytes at the end of the\n+ // file.\n+ new GlobalState();\n+ return *state;\n+}\n+\n+void BM_VerityRandRead(benchmark::State& state) {\n+ const int size = state.range(0);\n+\n+ GlobalState& global_state = GetGlobalState();\n+ FileDescriptor verity_fd = ASSERT_NO_ERRNO_AND_VALUE(Open(\n+ JoinPath(global_state.verity_dir_, global_state.filename_), O_RDONLY));\n+ std::vector<char> buf(size);\n+\n+ unsigned int seed = 1;\n+ for (auto _ : state) {\n+ TEST_CHECK(PreadFd(verity_fd.get(), buf.data(), buf.size(),\n+ rand_r(&seed) % kFileSize) == size);\n+ }\n+\n+ state.SetBytesProcessed(static_cast<int64_t>(size) *\n+ static_cast<int64_t>(state.iterations()));\n+}\n+\n+BENCHMARK(BM_VerityRandRead)->Range(1, kMaxRead)->UseRealTime();\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Add verity_randread benchmark test PiperOrigin-RevId: 388819374
259,858
05.08.2021 11:15:14
25,200
919a7da6d792dea17cf59d66fb303788b7e7dbab
Reload the configuration during postinst rather than restarting. Fixes
[ { "change_type": "MODIFY", "old_path": "debian/postinst.sh", "new_path": "debian/postinst.sh", "diff": "@@ -22,7 +22,7 @@ fi\nif [ -f /etc/docker/daemon.json ]; then\nrunsc install\nif systemctl is-active -q docker; then\n- systemctl restart docker || echo \"unable to restart docker; you must do so manually.\" >&2\n+ systemctl reload docker || echo \"unable to reload docker; you must do so manually.\" >&2\nfi\nfi\n" } ]
Go
Apache License 2.0
google/gvisor
Reload the configuration during postinst rather than restarting. Fixes #6408 PiperOrigin-RevId: 388978814
259,985
05.08.2021 16:37:32
25,200
a72efae969e6affc406efa9bccaa23b09e99b43c
Skip mmap test cases if underlying FS doesn't support maps. For file-based mmap tests, the underlying file system may not support mmaps depending on the sandbox configuration. This is case when caching is disabled for goferfs.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/mmap.cc", "new_path": "test/syscalls/linux/mmap.cc", "diff": "@@ -793,6 +793,19 @@ class MMapFileTest : public MMapTest {\nASSERT_THAT(unlink(filename_.c_str()), SyscallSucceeds());\n}\n+ bool FSSupportsMap() const {\n+ bool supported = true;\n+ void* ret = mmap(nullptr, 1, PROT_NONE, 0, fd_.get(), 0);\n+ if (ret == MAP_FAILED && errno != ENODEV) {\n+ supported = false;\n+ }\n+ if (ret != MAP_FAILED) {\n+ munmap(ret, 1);\n+ }\n+\n+ return supported;\n+ }\n+\nssize_t Read(char* buf, size_t count) {\nssize_t len = 0;\ndo {\n@@ -840,12 +853,14 @@ class MMapFileParamTest\n// MAP_POPULATE allowed.\n// There isn't a good way to verify it actually did anything.\nTEST_P(MMapFileParamTest, MapPopulate) {\n+ SKIP_IF(!FSSupportsMap());\nASSERT_THAT(Map(0, kPageSize, prot(), flags() | MAP_POPULATE, fd_.get(), 0),\nSyscallSucceeds());\n}\n// MAP_POPULATE on a short file.\nTEST_P(MMapFileParamTest, MapPopulateShort) {\n+ SKIP_IF(!FSSupportsMap());\nASSERT_THAT(\nMap(0, 2 * kPageSize, prot(), flags() | MAP_POPULATE, fd_.get(), 0),\nSyscallSucceeds());\n@@ -853,6 +868,7 @@ TEST_P(MMapFileParamTest, MapPopulateShort) {\n// Read contents from mapped file.\nTEST_F(MMapFileTest, Read) {\n+ SKIP_IF(!FSSupportsMap());\nsize_t len = strlen(kFileContents);\nASSERT_EQ(len, Write(kFileContents, len));\n@@ -866,6 +882,7 @@ TEST_F(MMapFileTest, Read) {\n// Map at an offset.\nTEST_F(MMapFileTest, MapOffset) {\n+ SKIP_IF(!FSSupportsMap());\nASSERT_THAT(lseek(fd_.get(), kPageSize, SEEK_SET), SyscallSucceeds());\nsize_t len = strlen(kFileContents);\n@@ -881,6 +898,7 @@ TEST_F(MMapFileTest, MapOffset) {\n}\nTEST_F(MMapFileTest, MapOffsetBeyondEnd) {\n+ SKIP_IF(!FSSupportsMap());\nSetupGvisorDeathTest();\nuintptr_t addr;\n@@ -897,6 +915,7 @@ TEST_F(MMapFileTest, MapOffsetBeyondEnd) {\n// Verify mmap fails when sum of length and offset overflows.\nTEST_F(MMapFileTest, MapLengthPlusOffsetOverflows) {\n+ SKIP_IF(!FSSupportsMap());\nconst size_t length = static_cast<size_t>(-kPageSize);\nconst off_t offset = kPageSize;\nASSERT_THAT(Map(0, length, PROT_READ, MAP_PRIVATE, fd_.get(), offset),\n@@ -905,6 +924,7 @@ TEST_F(MMapFileTest, MapLengthPlusOffsetOverflows) {\n// MAP_PRIVATE PROT_WRITE is allowed on read-only FDs.\nTEST_F(MMapFileTest, WritePrivateOnReadOnlyFd) {\n+ SKIP_IF(!FSSupportsMap());\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(filename_, O_RDONLY));\n@@ -921,6 +941,7 @@ TEST_F(MMapFileTest, WritePrivateOnReadOnlyFd) {\n// MAP_SHARED PROT_WRITE not allowed on read-only FDs.\nTEST_F(MMapFileTest, WriteSharedOnReadOnlyFd) {\n+ SKIP_IF(!FSSupportsMap());\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(filename_, O_RDONLY));\n@@ -932,6 +953,7 @@ TEST_F(MMapFileTest, WriteSharedOnReadOnlyFd) {\n// Mmap not allowed on O_PATH FDs.\nTEST_F(MMapFileTest, MmapFileWithOpath) {\n+ SKIP_IF(!FSSupportsMap());\nSKIP_IF(IsRunningWithVFS1());\nconst TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\nconst FileDescriptor fd =\n@@ -944,6 +966,7 @@ TEST_F(MMapFileTest, MmapFileWithOpath) {\n// The FD must be readable.\nTEST_P(MMapFileParamTest, WriteOnlyFd) {\n+ SKIP_IF(!FSSupportsMap());\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(filename_, O_WRONLY));\n@@ -955,6 +978,7 @@ TEST_P(MMapFileParamTest, WriteOnlyFd) {\n// Overwriting the contents of a file mapped MAP_SHARED PROT_READ\n// should cause the new data to be reflected in the mapping.\nTEST_F(MMapFileTest, ReadSharedConsistentWithOverwrite) {\n+ SKIP_IF(!FSSupportsMap());\n// Start from scratch.\nEXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());\n@@ -994,6 +1018,7 @@ TEST_F(MMapFileTest, ReadSharedConsistentWithOverwrite) {\n// Partially overwriting a file mapped MAP_SHARED PROT_READ should be reflected\n// in the mapping.\nTEST_F(MMapFileTest, ReadSharedConsistentWithPartialOverwrite) {\n+ SKIP_IF(!FSSupportsMap());\n// Start from scratch.\nEXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());\n@@ -1034,6 +1059,7 @@ TEST_F(MMapFileTest, ReadSharedConsistentWithPartialOverwrite) {\n// Overwriting a file mapped MAP_SHARED PROT_READ should be reflected in the\n// mapping and the file.\nTEST_F(MMapFileTest, ReadSharedConsistentWithWriteAndFile) {\n+ SKIP_IF(!FSSupportsMap());\n// Start from scratch.\nEXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());\n@@ -1077,6 +1103,7 @@ TEST_F(MMapFileTest, ReadSharedConsistentWithWriteAndFile) {\n// Write data to mapped file.\nTEST_F(MMapFileTest, WriteShared) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,\nfd_.get(), 0),\n@@ -1101,6 +1128,7 @@ TEST_F(MMapFileTest, WriteShared) {\n// Write data to portion of mapped page beyond the end of the file.\n// These writes are not reflected in the file.\nTEST_F(MMapFileTest, WriteSharedBeyondEnd) {\n+ SKIP_IF(!FSSupportsMap());\n// The file is only half of a page. We map an entire page. Writes to the\n// end of the mapping must not be reflected in the file.\nuintptr_t addr;\n@@ -1137,6 +1165,7 @@ TEST_F(MMapFileTest, WriteSharedBeyondEnd) {\n// The portion of a mapped page that becomes part of the file after a truncate\n// is reflected in the file.\nTEST_F(MMapFileTest, WriteSharedTruncateUp) {\n+ SKIP_IF(!FSSupportsMap());\n// The file is only half of a page. We map an entire page. Writes to the\n// end of the mapping must not be reflected in the file.\nuintptr_t addr;\n@@ -1174,6 +1203,7 @@ TEST_F(MMapFileTest, WriteSharedTruncateUp) {\n}\nTEST_F(MMapFileTest, ReadSharedTruncateDownThenUp) {\n+ SKIP_IF(!FSSupportsMap());\n// Start from scratch.\nEXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());\n@@ -1213,6 +1243,7 @@ TEST_F(MMapFileTest, ReadSharedTruncateDownThenUp) {\n}\nTEST_F(MMapFileTest, WriteSharedTruncateDownThenUp) {\n+ SKIP_IF(!FSSupportsMap());\n// The file is only half of a page. We map an entire page. Writes to the\n// end of the mapping must not be reflected in the file.\nuintptr_t addr;\n@@ -1247,6 +1278,7 @@ TEST_F(MMapFileTest, WriteSharedTruncateDownThenUp) {\n}\nTEST_F(MMapFileTest, ReadSharedTruncateSIGBUS) {\n+ SKIP_IF(!FSSupportsMap());\nSetupGvisorDeathTest();\n// Start from scratch.\n@@ -1277,6 +1309,7 @@ TEST_F(MMapFileTest, ReadSharedTruncateSIGBUS) {\n}\nTEST_F(MMapFileTest, WriteSharedTruncateSIGBUS) {\n+ SKIP_IF(!FSSupportsMap());\nSetupGvisorDeathTest();\nuintptr_t addr;\n@@ -1298,6 +1331,7 @@ TEST_F(MMapFileTest, WriteSharedTruncateSIGBUS) {\n}\nTEST_F(MMapFileTest, ReadSharedTruncatePartialPage) {\n+ SKIP_IF(!FSSupportsMap());\n// Start from scratch.\nEXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());\n@@ -1327,6 +1361,7 @@ TEST_F(MMapFileTest, ReadSharedTruncatePartialPage) {\n// Page can still be accessed and contents are intact after truncating a partial\n// page.\nTEST_F(MMapFileTest, WriteSharedTruncatePartialPage) {\n+ SKIP_IF(!FSSupportsMap());\n// Expand the file to a full page.\nEXPECT_THAT(ftruncate(fd_.get(), kPageSize), SyscallSucceeds());\n@@ -1354,6 +1389,7 @@ TEST_F(MMapFileTest, WriteSharedTruncatePartialPage) {\n// MAP_PRIVATE writes are not carried through to the underlying file.\nTEST_F(MMapFileTest, WritePrivate) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,\nfd_.get(), 0),\n@@ -1378,6 +1414,7 @@ TEST_F(MMapFileTest, WritePrivate) {\n// SIGBUS raised when reading or writing past end of a mapped file.\nTEST_P(MMapFileParamTest, SigBusDeath) {\n+ SKIP_IF(!FSSupportsMap());\nSetupGvisorDeathTest();\nuintptr_t addr;\n@@ -1406,6 +1443,7 @@ TEST_P(MMapFileParamTest, SigBusDeath) {\n//\n// See b/27877699.\nTEST_P(MMapFileParamTest, NoSigBusOnPagesBeforeEOF) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, 2 * kPageSize, prot(), flags(), fd_.get(), 0),\nSyscallSucceeds());\n@@ -1424,6 +1462,7 @@ TEST_P(MMapFileParamTest, NoSigBusOnPagesBeforeEOF) {\n// Tests that SIGBUS is not raised when reading or writing from a file-mapped\n// page containing EOF, *after* the EOF.\nTEST_P(MMapFileParamTest, NoSigBusOnPageContainingEOF) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, 2 * kPageSize, prot(), flags(), fd_.get(), 0),\nSyscallSucceeds());\n@@ -1446,6 +1485,7 @@ TEST_P(MMapFileParamTest, NoSigBusOnPageContainingEOF) {\n// page cache (which does not yet support writing to shared mappings), a bug\n// caused reads to fail unnecessarily on such mappings. See b/28913513.\nTEST_F(MMapFileTest, ReadingWritableSharedFilePageSucceeds) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nsize_t len = strlen(kFileContents);\n@@ -1463,6 +1503,7 @@ TEST_F(MMapFileTest, ReadingWritableSharedFilePageSucceeds) {\n// read past end of file (resulting in a fault in sentry context in the gVisor\n// case). See b/28913513.\nTEST_F(MMapFileTest, InternalSigBus) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,\nfd_.get(), 0),\n@@ -1483,6 +1524,7 @@ TEST_F(MMapFileTest, InternalSigBus) {\n// /dev/zero to a shared mapping (so that the SIGBUS isn't caught during\n// copy-on-write breaking).\nTEST_F(MMapFileTest, InternalSigBusZeroing) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,\nfd_.get(), 0),\n@@ -1578,6 +1620,7 @@ TEST_F(MMapTest, NoReserve) {\n// Map more than the gVisor page-cache map unit (64k) and ensure that\n// it is consistent with reading from the file.\nTEST_F(MMapFileTest, Bug38498194) {\n+ SKIP_IF(!FSSupportsMap());\n// Choose a sufficiently large map unit.\nconstexpr int kSize = 4 * 1024 * 1024;\nEXPECT_THAT(ftruncate(fd_.get(), kSize), SyscallSucceeds());\n@@ -1606,6 +1649,7 @@ TEST_F(MMapFileTest, Bug38498194) {\n// Tests that reading from a file to a memory mapping of the same file does not\n// deadlock. See b/34813270.\nTEST_F(MMapFileTest, SelfRead) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,\nfd_.get(), 0),\n@@ -1618,6 +1662,7 @@ TEST_F(MMapFileTest, SelfRead) {\n// Tests that writing to a file from a memory mapping of the same file does not\n// deadlock. Regression test for b/34813270.\nTEST_F(MMapFileTest, SelfWrite) {\n+ SKIP_IF(!FSSupportsMap());\nuintptr_t addr;\nASSERT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),\nSyscallSucceeds());\n@@ -1633,8 +1678,12 @@ TEST(MMapDeathTest, TruncateAfterCOWBreak) {\nauto const temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\nauto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_RDWR));\nASSERT_THAT(ftruncate(fd.get(), kPageSize), SyscallSucceeds());\n- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(Mmap(\n- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd.get(), 0));\n+\n+ auto maybe_mapping = Mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,\n+ MAP_PRIVATE, fd.get(), 0);\n+ // Does FS support mmap?\n+ SKIP_IF(maybe_mapping.error().errno_value() == ENODEV);\n+ auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(std::move(maybe_mapping));\n// Write to this mapping, causing the page to be copied for write.\nmemset(mapping.ptr(), 'a', mapping.len());\n@@ -1661,8 +1710,12 @@ TEST(MMapNoFixtureTest, MapReadOnlyAfterCreateWriteOnly) {\nauto const wo_fd = ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_WRONLY));\nASSERT_THAT(ftruncate(wo_fd.get(), kPageSize), SyscallSucceeds());\n- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(\n- Mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, ro_fd.get(), 0));\n+ auto maybe_mapping =\n+ Mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, ro_fd.get(), 0);\n+ // Does FS support mmap?\n+ SKIP_IF(maybe_mapping.error().errno_value() == ENODEV);\n+ auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(std::move(maybe_mapping));\n+\nstd::vector<char> buf(kPageSize);\n// The test passes if this survives.\nstd::copy(static_cast<char*>(mapping.ptr()),\n" } ]
Go
Apache License 2.0
google/gvisor
Skip mmap test cases if underlying FS doesn't support maps. For file-based mmap tests, the underlying file system may not support mmaps depending on the sandbox configuration. This is case when caching is disabled for goferfs. PiperOrigin-RevId: 389052722
259,985
05.08.2021 20:13:54
25,200
569f605f438dd10e5ffa1d5eb129ba1d15bbf34c
Correctly handle interruptions in blocking msgqueue syscalls. Reported-by: Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/msgqueue/msgqueue.go", "new_path": "pkg/sentry/kernel/msgqueue/msgqueue.go", "diff": "@@ -208,11 +208,13 @@ func (r *Registry) FindByID(id ipc.ID) (*Queue, error) {\n// Send appends a message to the message queue, and returns an error if sending\n// fails. See msgsnd(2).\n-func (q *Queue) Send(ctx context.Context, m Message, b Blocker, wait bool, pid int32) (err error) {\n+func (q *Queue) Send(ctx context.Context, m Message, b Blocker, wait bool, pid int32) error {\n// Try to perform a non-blocking send using queue.append. If EWOULDBLOCK\n// is returned, start the blocking procedure. Otherwise, return normally.\ncreds := auth.CredentialsFromContext(ctx)\n- if err := q.append(ctx, m, creds, pid); err != linuxerr.EWOULDBLOCK {\n+\n+ // Fast path: first attempt a non-blocking push.\n+ if err := q.push(ctx, m, creds, pid); err != linuxerr.EWOULDBLOCK {\nreturn err\n}\n@@ -220,25 +222,30 @@ func (q *Queue) Send(ctx context.Context, m Message, b Blocker, wait bool, pid i\nreturn linuxerr.EAGAIN\n}\n+ // Slow path: at this point, the queue was found to be full, and we were\n+ // asked to block.\n+\ne, ch := waiter.NewChannelEntry(nil)\nq.senders.EventRegister(&e, waiter.EventOut)\n+ defer q.senders.EventUnregister(&e)\n+ // Note: we need to check again before blocking the first time since space\n+ // may have become available.\nfor {\n- if err = q.append(ctx, m, creds, pid); err != linuxerr.EWOULDBLOCK {\n- break\n- }\n- b.Block(ch)\n+ if err := q.push(ctx, m, creds, pid); err != linuxerr.EWOULDBLOCK {\n+ return err\n}\n-\n- q.senders.EventUnregister(&e)\n+ if err := b.Block(ch); err != nil {\nreturn err\n}\n+ }\n+}\n-// append appends a message to the queue's message list and notifies waiting\n+// push appends a message to the queue's message list and notifies waiting\n// receivers that a message has been inserted. It returns an error if adding\n// the message would cause the queue to exceed its maximum capacity, which can\n// be used as a signal to block the task. Other errors should be returned as is.\n-func (q *Queue) append(ctx context.Context, m Message, creds *auth.Credentials, pid int32) error {\n+func (q *Queue) push(ctx context.Context, m Message, creds *auth.Credentials, pid int32) error {\nif m.Type <= 0 {\nreturn linuxerr.EINVAL\n}\n@@ -295,15 +302,14 @@ func (q *Queue) append(ctx context.Context, m Message, creds *auth.Credentials,\n}\n// Receive removes a message from the queue and returns it. See msgrcv(2).\n-func (q *Queue) Receive(ctx context.Context, b Blocker, mType int64, maxSize int64, wait, truncate, except bool, pid int32) (msg *Message, err error) {\n+func (q *Queue) Receive(ctx context.Context, b Blocker, mType int64, maxSize int64, wait, truncate, except bool, pid int32) (*Message, error) {\nif maxSize < 0 || maxSize > maxMessageBytes {\nreturn nil, linuxerr.EINVAL\n}\nmax := uint64(maxSize)\n-\n- // Try to perform a non-blocking receive using queue.pop. If EWOULDBLOCK\n- // is returned, start the blocking procedure. Otherwise, return normally.\ncreds := auth.CredentialsFromContext(ctx)\n+\n+ // Fast path: first attempt a non-blocking pop.\nif msg, err := q.pop(ctx, creds, mType, max, truncate, except, pid); err != linuxerr.EWOULDBLOCK {\nreturn msg, err\n}\n@@ -312,24 +318,30 @@ func (q *Queue) Receive(ctx context.Context, b Blocker, mType int64, maxSize int\nreturn nil, linuxerr.ENOMSG\n}\n+ // Slow path: at this point, the queue was found to be empty, and we were\n+ // asked to block.\n+\ne, ch := waiter.NewChannelEntry(nil)\nq.receivers.EventRegister(&e, waiter.EventIn)\n+ defer q.receivers.EventUnregister(&e)\n+ // Note: we need to check again before blocking the first time since a\n+ // message may have become available.\nfor {\n- if msg, err = q.pop(ctx, creds, mType, max, truncate, except, pid); err != linuxerr.EWOULDBLOCK {\n- break\n+ if msg, err := q.pop(ctx, creds, mType, max, truncate, except, pid); err != linuxerr.EWOULDBLOCK {\n+ return msg, err\n+ }\n+ if err := b.Block(ch); err != nil {\n+ return nil, err\n}\n- b.Block(ch)\n}\n- q.receivers.EventUnregister(&e)\n- return msg, err\n}\n// pop pops the first message from the queue that matches the given type. It\n// returns an error for all the cases specified in msgrcv(2). If the queue is\n// empty or no message of the specified type is available, a EWOULDBLOCK error\n// is returned, which can then be used as a signal to block the process or fail.\n-func (q *Queue) pop(ctx context.Context, creds *auth.Credentials, mType int64, maxSize uint64, truncate, except bool, pid int32) (msg *Message, _ error) {\n+func (q *Queue) pop(ctx context.Context, creds *auth.Credentials, mType int64, maxSize uint64, truncate, except bool, pid int32) (*Message, error) {\nq.mu.Lock()\ndefer q.mu.Unlock()\n@@ -350,6 +362,7 @@ func (q *Queue) pop(ctx context.Context, creds *auth.Credentials, mType int64, m\n}\n// Get a message from the queue.\n+ var msg *Message\nswitch {\ncase mType == 0:\nmsg = q.messages.Front()\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -4173,10 +4173,11 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\"//test/util:capability_util\",\n+ \"//test/util:signal_util\",\n\"//test/util:temp_path\",\n- \"//test/util:test_main\",\n\"//test/util:test_util\",\n\"//test/util:thread_util\",\n+ \"@com_google_absl//absl/synchronization\",\n\"@com_google_absl//absl/time\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/msgqueue.cc", "new_path": "test/syscalls/linux/msgqueue.cc", "diff": "// limitations under the License.\n#include <errno.h>\n+#include <signal.h>\n#include <sys/ipc.h>\n#include <sys/msg.h>\n#include <sys/types.h>\n+#include \"absl/synchronization/notification.h\"\n#include \"absl/time/clock.h\"\n#include \"test/util/capability_util.h\"\n+#include \"test/util/signal_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n#include \"test/util/thread_util.h\"\n@@ -31,6 +34,8 @@ constexpr int msgMax = 8192; // Max size for message in bytes.\nconstexpr int msgMni = 32000; // Max number of identifiers.\nconstexpr int msgMnb = 16384; // Default max size of message queue in bytes.\n+constexpr int kInterruptSignal = SIGALRM;\n+\n// Queue is a RAII class used to automatically clean message queues.\nclass Queue {\npublic:\n@@ -73,6 +78,12 @@ bool operator==(msgbuf& a, msgbuf& b) {\nreturn a.mtype == b.mtype;\n}\n+// msgmax represents a buffer for the largest possible single message.\n+struct msgmax {\n+ int64_t mtype;\n+ char mtext[msgMax];\n+};\n+\n// Test simple creation and retrieval for msgget(2).\nTEST(MsgqueueTest, MsgGet) {\nconst TempPath keyfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n@@ -310,13 +321,6 @@ TEST(MsgqueueTest, MsgOpLimits) {\nSyscallFailsWithErrno(EINVAL));\n// Limit for queue.\n- // Use a buffer with the maximum mount of bytes that can be transformed to\n- // make it easier to exhaust the queue limit.\n- struct msgmax {\n- int64_t mtype;\n- char mtext[msgMax];\n- };\n-\nmsgmax limit{1, \"\"};\nfor (size_t i = 0, msgCount = msgMnb / msgMax; i < msgCount; i++) {\nEXPECT_THAT(msgsnd(queue.get(), &limit, sizeof(limit.mtext), 0),\n@@ -470,13 +474,6 @@ TEST(MsgqueueTest, MsgSndBlocking) {\nQueue queue(msgget(IPC_PRIVATE, 0600));\nASSERT_THAT(queue.get(), SyscallSucceeds());\n- // Use a buffer with the maximum mount of bytes that can be transformed to\n- // make it easier to exhaust the queue limit.\n- struct msgmax {\n- int64_t mtype;\n- char mtext[msgMax];\n- };\n-\nmsgmax buf{1, \"\"}; // Has max amount of bytes.\nconst size_t msgCount = msgMnb / msgMax; // Number of messages that can be\n@@ -494,6 +491,8 @@ TEST(MsgqueueTest, MsgSndBlocking) {\nSyscallSucceeds());\n});\n+ const DisableSave ds; // Too many syscalls.\n+\n// To increase the chance of the last msgsnd blocking before doing a msgrcv,\n// we use MSG_COPY option to copy the last index in the queue. As long as\n// MSG_COPY fails, the queue hasn't yet been filled. When MSG_COPY succeeds,\n@@ -516,15 +515,9 @@ TEST(MsgqueueTest, MsgSndRmWhileBlocking) {\nQueue queue(msgget(IPC_PRIVATE, 0600));\nASSERT_THAT(queue.get(), SyscallSucceeds());\n- // Use a buffer with the maximum mount of bytes that can be transformed to\n- // make it easier to exhaust the queue limit.\n- struct msgmax {\n- int64_t mtype;\n- char mtext[msgMax];\n- };\n+ // Number of messages that can be sent without blocking.\n+ const size_t msgCount = msgMnb / msgMax;\n- const size_t msgCount = msgMnb / msgMax; // Number of messages that can be\n- // sent without blocking.\nScopedThread t([&] {\n// Fill the queue.\nmsgmax buf{1, \"\"};\n@@ -540,6 +533,8 @@ TEST(MsgqueueTest, MsgSndRmWhileBlocking) {\nEXPECT_TRUE((errno == EIDRM || errno == EINVAL));\n});\n+ const DisableSave ds; // Too many syscalls.\n+\n// Similar to MsgSndBlocking, we do this to increase the chance of msgsnd\n// blocking before removing the queue.\nmsgmax rcv;\n@@ -627,6 +622,105 @@ TEST(MsgqueueTest, MsgOpGeneral) {\nScopedThread s10(sender(4));\n}\n+void empty_sighandler(int sig, siginfo_t* info, void* context) {}\n+\n+TEST(MsgqueueTest, InterruptRecv) {\n+ Queue queue(msgget(IPC_PRIVATE, 0600));\n+ char buf[64];\n+\n+ absl::Notification done, exit;\n+\n+ // Thread calling msgrcv with no corresponding send. It would block forever,\n+ // but we'll interrupt with a signal below.\n+ ScopedThread t([&] {\n+ struct sigaction sa = {};\n+ sa.sa_sigaction = empty_sighandler;\n+ sigfillset(&sa.sa_mask);\n+ sa.sa_flags = SA_SIGINFO;\n+ auto cleanup_sigaction =\n+ ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kInterruptSignal, sa));\n+ auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(\n+ ScopedSignalMask(SIG_UNBLOCK, kInterruptSignal));\n+\n+ EXPECT_THAT(msgrcv(queue.get(), &buf, sizeof(buf), 0, 0),\n+ SyscallFailsWithErrno(EINTR));\n+\n+ done.Notify();\n+ exit.WaitForNotification();\n+ });\n+\n+ const DisableSave ds; // Too many syscalls.\n+\n+ // We want the signal to arrive while msgrcv is blocking, but not after the\n+ // thread has exited. Signals that arrive before msgrcv are no-ops.\n+ do {\n+ EXPECT_THAT(kill(getpid(), kInterruptSignal), SyscallSucceeds());\n+ absl::SleepFor(absl::Milliseconds(100)); // Rate limit.\n+ } while (!done.HasBeenNotified());\n+\n+ exit.Notify();\n+ t.Join();\n+}\n+\n+TEST(MsgqueueTest, InterruptSend) {\n+ Queue queue(msgget(IPC_PRIVATE, 0600));\n+ msgmax buf{1, \"\"};\n+ // Number of messages that can be sent without blocking.\n+ const size_t msgCount = msgMnb / msgMax;\n+\n+ // Fill the queue.\n+ for (size_t i = 0; i < msgCount; i++) {\n+ ASSERT_THAT(msgsnd(queue.get(), &buf, sizeof(buf.mtext), 0),\n+ SyscallSucceeds());\n+ }\n+\n+ absl::Notification done, exit;\n+\n+ // Thread calling msgsnd on a full queue. It would block forever, but we'll\n+ // interrupt with a signal below.\n+ ScopedThread t([&] {\n+ struct sigaction sa = {};\n+ sa.sa_sigaction = empty_sighandler;\n+ sigfillset(&sa.sa_mask);\n+ sa.sa_flags = SA_SIGINFO;\n+ auto cleanup_sigaction =\n+ ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kInterruptSignal, sa));\n+ auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(\n+ ScopedSignalMask(SIG_UNBLOCK, kInterruptSignal));\n+\n+ EXPECT_THAT(msgsnd(queue.get(), &buf, sizeof(buf.mtext), 0),\n+ SyscallFailsWithErrno(EINTR));\n+\n+ done.Notify();\n+ exit.WaitForNotification();\n+ });\n+\n+ const DisableSave ds; // Too many syscalls.\n+\n+ // We want the signal to arrive while msgsnd is blocking, but not after the\n+ // thread has exited. Signals that arrive before msgsnd are no-ops.\n+ do {\n+ EXPECT_THAT(kill(getpid(), kInterruptSignal), SyscallSucceeds());\n+ absl::SleepFor(absl::Milliseconds(100)); // Rate limit.\n+ } while (!done.HasBeenNotified());\n+\n+ exit.Notify();\n+ t.Join();\n+}\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n+\n+int main(int argc, char** argv) {\n+ // Some tests depend on delivering a signal to the main thread. Block the\n+ // target signal so that any other threads created by TestInit will also have\n+ // the signal blocked.\n+ sigset_t set;\n+ sigemptyset(&set);\n+ sigaddset(&set, gvisor::testing::kInterruptSignal);\n+ TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);\n+\n+ gvisor::testing::TestInit(&argc, &argv);\n+ return gvisor::testing::RunAllTests();\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Correctly handle interruptions in blocking msgqueue syscalls. Reported-by: [email protected] Reported-by: [email protected] PiperOrigin-RevId: 389084629
260,004
09.08.2021 10:16:59
25,200
34ec00c5e775479b15ae8ad69456cef02f0a545a
Run raw IP socket syscall tests on Fuchsia + Do not check for CAP_NET_RAW on Fuchsia Fuchsia does not support capabilities the same way Linux does. Instead emulate the check for CAP_NET_RAW by checking if a raw IP sockets may be created.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -7,6 +7,9 @@ package(\nexports_files(\n[\n+ \"raw_socket.cc\",\n+ \"raw_socket_hdrincl.cc\",\n+ \"raw_socket_icmp.cc\",\n\"socket.cc\",\n\"socket_inet_loopback.cc\",\n\"socket_inet_loopback_isolated.cc\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket.cc", "new_path": "test/syscalls/linux/raw_socket.cc", "diff": "// limitations under the License.\n#include <arpa/inet.h>\n-#include <linux/capability.h>\n-#include <linux/filter.h>\n#include <netinet/in.h>\n#include <netinet/ip.h>\n#include <netinet/ip6.h>\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket_hdrincl.cc", "new_path": "test/syscalls/linux/raw_socket_hdrincl.cc", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-#include <linux/capability.h>\n#include <netinet/in.h>\n#include <netinet/ip.h>\n#include <netinet/ip_icmp.h>\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket_icmp.cc", "new_path": "test/syscalls/linux/raw_socket_icmp.cc", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-#include <linux/capability.h>\n#include <netinet/in.h>\n#include <netinet/ip.h>\n#include <netinet/ip_icmp.h>\n" }, { "change_type": "MODIFY", "old_path": "test/util/BUILD", "new_path": "test/util/BUILD", "diff": "@@ -8,13 +8,21 @@ package(\ncc_library(\nname = \"capability_util\",\ntestonly = 1,\n- srcs = [\"capability_util.cc\"],\n- hdrs = [\"capability_util.h\"],\n+ srcs = [\n+ \"fuchsia_capability_util.cc\",\n+ \"linux_capability_util.cc\",\n+ ],\n+ hdrs = [\n+ \"capability_util.h\",\n+ \"fuchsia_capability_util.h\",\n+ \"linux_capability_util.h\",\n+ ],\ndeps = [\n\":cleanup\",\n\":memory_util\",\n\":posix_error\",\n\":save_util\",\n+ \":socket_util\",\n\":test_util\",\n\"@com_google_absl//absl/strings\",\n],\n" }, { "change_type": "MODIFY", "old_path": "test/util/capability_util.h", "new_path": "test/util/capability_util.h", "diff": "#ifndef GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_\n#define GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_\n-#ifdef __linux__\n-\n-#include <errno.h>\n-#include <linux/capability.h>\n-#include <sys/syscall.h>\n-#include <unistd.h>\n-\n-#include \"test/util/cleanup.h\"\n-#include \"test/util/posix_error.h\"\n-#include \"test/util/save_util.h\"\n-#include \"test/util/test_util.h\"\n-\n-#ifndef _LINUX_CAPABILITY_VERSION_3\n-#error Expecting _LINUX_CAPABILITY_VERSION_3 support\n+#if defined(__Fuchsia__)\n+#include \"test/util/fuchsia_capability_util.h\"\n+#elif defined(__linux__)\n+#include \"test/util/linux_capability_util.h\"\n+#else\n+#error \"Unhandled platform\"\n#endif\n-namespace gvisor {\n-namespace testing {\n-\n-// HaveCapability returns true if the process has the specified EFFECTIVE\n-// capability.\n-inline PosixErrorOr<bool> HaveCapability(int cap) {\n- if (!cap_valid(cap)) {\n- return PosixError(EINVAL, \"Invalid capability\");\n- }\n-\n- struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};\n- struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};\n- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));\n- MaybeSave();\n-\n- return (caps[CAP_TO_INDEX(cap)].effective & CAP_TO_MASK(cap)) != 0;\n-}\n-\n-// SetCapability sets the specified EFFECTIVE capability.\n-inline PosixError SetCapability(int cap, bool set) {\n- if (!cap_valid(cap)) {\n- return PosixError(EINVAL, \"Invalid capability\");\n- }\n-\n- struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};\n- struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};\n- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));\n- MaybeSave();\n-\n- if (set) {\n- caps[CAP_TO_INDEX(cap)].effective |= CAP_TO_MASK(cap);\n- } else {\n- caps[CAP_TO_INDEX(cap)].effective &= ~CAP_TO_MASK(cap);\n- }\n- header = {_LINUX_CAPABILITY_VERSION_3, 0};\n- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capset, &header, &caps));\n- MaybeSave();\n-\n- return NoError();\n-}\n-\n-// DropPermittedCapability drops the specified PERMITTED. The EFFECTIVE\n-// capabilities must be a subset of PERMITTED, so those are dropped as well.\n-inline PosixError DropPermittedCapability(int cap) {\n- if (!cap_valid(cap)) {\n- return PosixError(EINVAL, \"Invalid capability\");\n- }\n-\n- struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};\n- struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};\n- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));\n- MaybeSave();\n-\n- caps[CAP_TO_INDEX(cap)].effective &= ~CAP_TO_MASK(cap);\n- caps[CAP_TO_INDEX(cap)].permitted &= ~CAP_TO_MASK(cap);\n-\n- header = {_LINUX_CAPABILITY_VERSION_3, 0};\n- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capset, &header, &caps));\n- MaybeSave();\n-\n- return NoError();\n-}\n-\n-PosixErrorOr<bool> CanCreateUserNamespace();\n-\n-class AutoCapability {\n- public:\n- AutoCapability(int cap, bool set) : cap_(cap), set_(set) {\n- const bool has = EXPECT_NO_ERRNO_AND_VALUE(HaveCapability(cap));\n- if (set != has) {\n- EXPECT_NO_ERRNO(SetCapability(cap_, set_));\n- applied_ = true;\n- }\n- }\n-\n- ~AutoCapability() {\n- if (applied_) {\n- EXPECT_NO_ERRNO(SetCapability(cap_, !set_));\n- }\n- }\n-\n- private:\n- int cap_;\n- bool set_;\n- bool applied_ = false;\n-};\n-\n-} // namespace testing\n-} // namespace gvisor\n-\n-#endif // __linux__\n-\n#endif // GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/util/fuchsia_capability_util.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#ifdef __Fuchsia__\n+\n+#include \"test/util/fuchsia_capability_util.h\"\n+\n+#include <netinet/in.h>\n+#include <sys/socket.h>\n+\n+#include \"test/util/socket_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+PosixErrorOr<bool> HaveCapability(int cap) {\n+ if (cap == CAP_NET_RAW) {\n+ auto s = Socket(AF_INET, SOCK_RAW, IPPROTO_UDP);\n+ if (s.ok()) {\n+ return true;\n+ }\n+ if (s.error().errno_value() == EPERM) {\n+ return false;\n+ }\n+ return s.error();\n+ }\n+\n+ return false;\n+}\n+\n+} // namespace testing\n+} // namespace gvisor\n+\n+#endif // __Fuchsia__\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/util/fuchsia_capability_util.h", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Utilities for testing capabilities on Fuchsia.\n+\n+#ifndef GVISOR_TEST_UTIL_FUCHSIA_CAPABILITY_UTIL_H_\n+#define GVISOR_TEST_UTIL_FUCHSIA_CAPABILITY_UTIL_H_\n+\n+#ifdef __Fuchsia__\n+\n+#include \"test/util/posix_error.h\"\n+\n+#ifdef CAP_NET_RAW\n+#error \"Fuchsia should not define CAP_NET_RAW\"\n+#endif // CAP_NET_RAW\n+#define CAP_NET_RAW 0\n+\n+namespace gvisor {\n+namespace testing {\n+\n+// HaveCapability returns true if the process has the specified EFFECTIVE\n+// capability.\n+PosixErrorOr<bool> HaveCapability(int cap);\n+\n+} // namespace testing\n+} // namespace gvisor\n+\n+#endif // __Fuchsia__\n+\n+#endif // GVISOR_TEST_UTIL_FUCHSIA_CAPABILITY_UTIL_H_\n" }, { "change_type": "RENAME", "old_path": "test/util/capability_util.cc", "new_path": "test/util/linux_capability_util.cc", "diff": "#ifdef __linux__\n-#include \"test/util/capability_util.h\"\n+#include \"test/util/linux_capability_util.h\"\n#include <linux/capability.h>\n#include <sched.h>\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/util/linux_capability_util.h", "diff": "+// Copyright 2018 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Utilities for testing capabilities on Linux.\n+\n+#ifndef GVISOR_TEST_UTIL_LINUX_CAPABILITY_UTIL_H_\n+#define GVISOR_TEST_UTIL_LINUX_CAPABILITY_UTIL_H_\n+\n+#ifdef __linux__\n+\n+#include <errno.h>\n+#include <linux/capability.h>\n+#include <sys/syscall.h>\n+#include <unistd.h>\n+\n+#include \"test/util/cleanup.h\"\n+#include \"test/util/posix_error.h\"\n+#include \"test/util/save_util.h\"\n+#include \"test/util/test_util.h\"\n+\n+#ifndef _LINUX_CAPABILITY_VERSION_3\n+#error Expecting _LINUX_CAPABILITY_VERSION_3 support\n+#endif\n+\n+namespace gvisor {\n+namespace testing {\n+\n+// HaveCapability returns true if the process has the specified EFFECTIVE\n+// capability.\n+inline PosixErrorOr<bool> HaveCapability(int cap) {\n+ if (!cap_valid(cap)) {\n+ return PosixError(EINVAL, \"Invalid capability\");\n+ }\n+\n+ struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};\n+ struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};\n+ RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));\n+ MaybeSave();\n+\n+ return (caps[CAP_TO_INDEX(cap)].effective & CAP_TO_MASK(cap)) != 0;\n+}\n+\n+// SetCapability sets the specified EFFECTIVE capability.\n+inline PosixError SetCapability(int cap, bool set) {\n+ if (!cap_valid(cap)) {\n+ return PosixError(EINVAL, \"Invalid capability\");\n+ }\n+\n+ struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};\n+ struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};\n+ RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));\n+ MaybeSave();\n+\n+ if (set) {\n+ caps[CAP_TO_INDEX(cap)].effective |= CAP_TO_MASK(cap);\n+ } else {\n+ caps[CAP_TO_INDEX(cap)].effective &= ~CAP_TO_MASK(cap);\n+ }\n+ header = {_LINUX_CAPABILITY_VERSION_3, 0};\n+ RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capset, &header, &caps));\n+ MaybeSave();\n+\n+ return NoError();\n+}\n+\n+// DropPermittedCapability drops the specified PERMITTED. The EFFECTIVE\n+// capabilities must be a subset of PERMITTED, so those are dropped as well.\n+inline PosixError DropPermittedCapability(int cap) {\n+ if (!cap_valid(cap)) {\n+ return PosixError(EINVAL, \"Invalid capability\");\n+ }\n+\n+ struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};\n+ struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};\n+ RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));\n+ MaybeSave();\n+\n+ caps[CAP_TO_INDEX(cap)].effective &= ~CAP_TO_MASK(cap);\n+ caps[CAP_TO_INDEX(cap)].permitted &= ~CAP_TO_MASK(cap);\n+\n+ header = {_LINUX_CAPABILITY_VERSION_3, 0};\n+ RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capset, &header, &caps));\n+ MaybeSave();\n+\n+ return NoError();\n+}\n+\n+PosixErrorOr<bool> CanCreateUserNamespace();\n+\n+class AutoCapability {\n+ public:\n+ AutoCapability(int cap, bool set) : cap_(cap), set_(set) {\n+ const bool has = EXPECT_NO_ERRNO_AND_VALUE(HaveCapability(cap));\n+ if (set != has) {\n+ EXPECT_NO_ERRNO(SetCapability(cap_, set_));\n+ applied_ = true;\n+ }\n+ }\n+\n+ ~AutoCapability() {\n+ if (applied_) {\n+ EXPECT_NO_ERRNO(SetCapability(cap_, !set_));\n+ }\n+ }\n+\n+ private:\n+ int cap_;\n+ bool set_;\n+ bool applied_ = false;\n+};\n+\n+} // namespace testing\n+} // namespace gvisor\n+\n+#endif // __linux__\n+\n+#endif // GVISOR_TEST_UTIL_LINUX_CAPABILITY_UTIL_H_\n" } ]
Go
Apache License 2.0
google/gvisor
Run raw IP socket syscall tests on Fuchsia + Do not check for CAP_NET_RAW on Fuchsia Fuchsia does not support capabilities the same way Linux does. Instead emulate the check for CAP_NET_RAW by checking if a raw IP sockets may be created. PiperOrigin-RevId: 389663218