author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,860 | 23.06.2020 20:04:15 | 25,200 | 399c52888db609296fd1341ed0daa994ad2d02b0 | Resolve remaining inotify TODOs.
Also refactor HandleDeletion().
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/kernfs.go",
"new_path": "pkg/sentry/fsimpl/kernfs/kernfs.go",
"diff": "@@ -227,19 +227,17 @@ func (d *Dentry) destroy() {\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\n//\n-// TODO(gvisor.dev/issue/1479): Implement inotify.\n+// Although Linux technically supports inotify on pseudo filesystems (inotify\n+// is implemented at the vfs layer), it is not particularly useful. It is left\n+// unimplemented until someone actually needs it.\nfunc (d *Dentry) InotifyWithParent(events, cookie uint32, et vfs.EventType) {}\n// Watches implements vfs.DentryImpl.Watches.\n-//\n-// TODO(gvisor.dev/issue/1479): Implement inotify.\nfunc (d *Dentry) Watches() *vfs.Watches {\nreturn nil\n}\n// OnZeroWatches implements vfs.Dentry.OnZeroWatches.\n-//\n-// TODO(gvisor.dev/issue/1479): Implement inotify.\nfunc (d *Dentry) OnZeroWatches() {}\n// InsertChild inserts child into the vfs dentry cache with the given name under\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/anonfs.go",
"new_path": "pkg/sentry/vfs/anonfs.go",
"diff": "@@ -300,17 +300,15 @@ func (d *anonDentry) DecRef() {\n// InotifyWithParent implements DentryImpl.InotifyWithParent.\n//\n-// TODO(gvisor.dev/issue/1479): Implement inotify.\n+// Although Linux technically supports inotify on pseudo filesystems (inotify\n+// is implemented at the vfs layer), it is not particularly useful. It is left\n+// unimplemented until someone actually needs it.\nfunc (d *anonDentry) InotifyWithParent(events, cookie uint32, et EventType) {}\n// Watches implements DentryImpl.Watches.\n-//\n-// TODO(gvisor.dev/issue/1479): Implement inotify.\nfunc (d *anonDentry) Watches() *Watches {\nreturn nil\n}\n// OnZeroWatches implements Dentry.OnZeroWatches.\n-//\n-// TODO(gvisor.dev/issue/1479): Implement inotify.\nfunc (d *anonDentry) OnZeroWatches() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/filesystem.go",
"new_path": "pkg/sentry/vfs/filesystem.go",
"diff": "@@ -524,8 +524,6 @@ type FilesystemImpl interface {\n//\n// Preconditions: vd.Mount().Filesystem().Impl() == this FilesystemImpl.\nPrependPath(ctx context.Context, vfsroot, vd VirtualDentry, b *fspath.Builder) error\n-\n- // TODO(gvisor.dev/issue/1479): inotify_add_watch()\n}\n// PrependPathAtVFSRootError is returned by implementations of\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/inotify.go",
"new_path": "pkg/sentry/vfs/inotify.go",
"diff": "@@ -120,6 +120,7 @@ func NewInotifyFD(ctx context.Context, vfsObj *VirtualFilesystem, flags uint32)\n// watches and frees all resources for an inotify instance.\nfunc (i *Inotify) Release() {\nvar ds []*Dentry\n+\n// We need to hold i.mu to avoid a race with concurrent calls to\n// Inotify.handleDeletion from Watches. There's no risk of Watches\n// accessing this Inotify after the destructor ends, because we remove all\n@@ -307,19 +308,6 @@ func (i *Inotify) nextWatchIDLocked() int32 {\nreturn i.nextWatchMinusOne\n}\n-// handleDeletion handles the deletion of the target of watch w. It removes w\n-// from i.watches and a watch removal event is generated.\n-func (i *Inotify) handleDeletion(w *Watch) {\n- i.mu.Lock()\n- _, found := i.watches[w.wd]\n- delete(i.watches, w.wd)\n- i.mu.Unlock()\n-\n- if found {\n- i.queueEvent(newEvent(w.wd, \"\", linux.IN_IGNORED, 0))\n- }\n-}\n-\n// AddWatch constructs a new inotify watch and adds it to the target. It\n// returns the watch descriptor returned by inotify_add_watch(2).\n//\n@@ -484,8 +472,9 @@ func (w *Watches) Notify(name string, events, cookie uint32, et EventType, unlin\nw.mu.RUnlock()\n}\n-// HandleDeletion is called when the watch target is destroyed to emit\n-// the appropriate events.\n+// HandleDeletion is called when the watch target is destroyed. Clear the\n+// watch set, detach watches from the inotify instances they belong to, and\n+// generate the appropriate events.\nfunc (w *Watches) HandleDeletion() {\nw.Notify(\"\", linux.IN_DELETE_SELF, 0, InodeEvent, true /* unlinked */)\n@@ -505,9 +494,23 @@ func (w *Watches) HandleDeletion() {\nw.ws = nil\nw.mu.Unlock()\n+ // Remove each watch from its owner's watch set, and generate a corresponding\n+ // watch removal event.\nfor _, watch := range ws {\n- // TODO(gvisor.dev/issue/1479): consider refactoring this.\n- watch.handleDeletion()\n+ i := watch.owner\n+ i.mu.Lock()\n+ _, found := i.watches[watch.wd]\n+ delete(i.watches, watch.wd)\n+\n+ // Release mutex before notifying waiters because we don't control what\n+ // they can do.\n+ i.mu.Unlock()\n+\n+ // If watch was not found, it was removed from the inotify instance before\n+ // we could get to it, in which case we should not generate an event.\n+ if found {\n+ i.queueEvent(newEvent(watch.wd, \"\", linux.IN_IGNORED, 0))\n+ }\n}\n}\n@@ -559,11 +562,6 @@ func (w *Watch) Notify(name string, events uint32, cookie uint32) {\nw.owner.queueEvent(newEvent(w.wd, name, matchedEvents, cookie))\n}\n-// handleDeletion handles the deletion of w's target.\n-func (w *Watch) handleDeletion() {\n- w.owner.handleDeletion(w)\n-}\n-\n// Event represents a struct inotify_event from linux.\n//\n// +stateify savable\n"
}
] | Go | Apache License 2.0 | google/gvisor | Resolve remaining inotify TODOs.
Also refactor HandleDeletion().
Updates #1479.
PiperOrigin-RevId: 317989000 |
259,860 | 24.06.2020 19:20:58 | 25,200 | b5e814445a4db5df7f4f58027422a5dba97ea766 | Fix procfs bugs in vfs2.
Support writing on proc/[pid]/{uid,gid}map
Return EIO for writing to static files.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/dynamic_bytes_file.go",
"new_path": "pkg/sentry/fsimpl/kernfs/dynamic_bytes_file.go",
"diff": "@@ -101,12 +101,12 @@ func (fd *DynamicBytesFD) Seek(ctx context.Context, offset int64, whence int32)\nreturn fd.DynamicBytesFileDescriptionImpl.Seek(ctx, offset, whence)\n}\n-// Read implmenets vfs.FileDescriptionImpl.Read.\n+// Read implements vfs.FileDescriptionImpl.Read.\nfunc (fd *DynamicBytesFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\nreturn fd.DynamicBytesFileDescriptionImpl.Read(ctx, dst, opts)\n}\n-// PRead implmenets vfs.FileDescriptionImpl.PRead.\n+// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (fd *DynamicBytesFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\nreturn fd.DynamicBytesFileDescriptionImpl.PRead(ctx, dst, offset, opts)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"new_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"diff": "@@ -293,6 +293,8 @@ func (a *InodeAttrs) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *aut\n// inode numbers are immutable after node creation.\n// TODO(gvisor.dev/issue/1193): Implement other stat fields like timestamps.\n+ // Also, STATX_SIZE will need some special handling, because read-only static\n+ // files should return EIO for truncate operations.\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task_files.go",
"new_path": "pkg/sentry/fsimpl/proc/task_files.go",
"diff": "@@ -35,6 +35,10 @@ import (\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n+// \"There is an (arbitrary) limit on the number of lines in the file. As at\n+// Linux 3.18, the limit is five lines.\" - user_namespaces(7)\n+const maxIDMapLines = 5\n+\n// mm gets the kernel task's MemoryManager. No additional reference is taken on\n// mm here. This is safe because MemoryManager.destroy is required to leave the\n// MemoryManager in a state where it's still usable as a DynamicBytesSource.\n@@ -283,7 +287,8 @@ func (d *commData) Generate(ctx context.Context, buf *bytes.Buffer) error {\nreturn nil\n}\n-// idMapData implements vfs.DynamicBytesSource for /proc/[pid]/{gid_map|uid_map}.\n+// idMapData implements vfs.WritableDynamicBytesSource for\n+// /proc/[pid]/{gid_map|uid_map}.\n//\n// +stateify savable\ntype idMapData struct {\n@@ -309,6 +314,59 @@ func (d *idMapData) Generate(ctx context.Context, buf *bytes.Buffer) error {\nreturn nil\n}\n+func (d *idMapData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {\n+ // \"In addition, the number of bytes written to the file must be less than\n+ // the system page size, and the write must be performed at the start of\n+ // the file ...\" - user_namespaces(7)\n+ srclen := src.NumBytes()\n+ if srclen >= usermem.PageSize || offset != 0 {\n+ return 0, syserror.EINVAL\n+ }\n+ b := make([]byte, srclen)\n+ if _, err := src.CopyIn(ctx, b); err != nil {\n+ return 0, err\n+ }\n+\n+ // Truncate from the first NULL byte.\n+ var nul int64\n+ nul = int64(bytes.IndexByte(b, 0))\n+ if nul == -1 {\n+ nul = srclen\n+ }\n+ b = b[:nul]\n+ // Remove the last \\n.\n+ if nul >= 1 && b[nul-1] == '\\n' {\n+ b = b[:nul-1]\n+ }\n+ lines := bytes.SplitN(b, []byte(\"\\n\"), maxIDMapLines+1)\n+ if len(lines) > maxIDMapLines {\n+ return 0, syserror.EINVAL\n+ }\n+\n+ entries := make([]auth.IDMapEntry, len(lines))\n+ for i, l := range lines {\n+ var e auth.IDMapEntry\n+ _, err := fmt.Sscan(string(l), &e.FirstID, &e.FirstParentID, &e.Length)\n+ if err != nil {\n+ return 0, syserror.EINVAL\n+ }\n+ entries[i] = e\n+ }\n+ var err error\n+ if d.gids {\n+ err = d.task.UserNamespace().SetGIDMap(ctx, entries)\n+ } else {\n+ err = d.task.UserNamespace().SetUIDMap(ctx, entries)\n+ }\n+ if err != nil {\n+ return 0, err\n+ }\n+\n+ // On success, Linux's kernel/user_namespace.c:map_write() always returns\n+ // count, even if fewer bytes were used.\n+ return int64(srclen), nil\n+}\n+\n// mapsData implements vfs.DynamicBytesSource for /proc/[pid]/maps.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description_impl_util.go",
"new_path": "pkg/sentry/vfs/file_description_impl_util.go",
"diff": "@@ -327,7 +327,7 @@ func (fd *DynamicBytesFileDescriptionImpl) pwriteLocked(ctx context.Context, src\nwritable, ok := fd.data.(WritableDynamicBytesSource)\nif !ok {\n- return 0, syserror.EINVAL\n+ return 0, syserror.EIO\n}\nn, err := writable.Write(ctx, src, offset)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description_impl_util_test.go",
"new_path": "pkg/sentry/vfs/file_description_impl_util_test.go",
"diff": "@@ -155,11 +155,11 @@ func TestGenCountFD(t *testing.T) {\n}\n// Write and PWrite fails.\n- if _, err := fd.Write(ctx, ioseq, WriteOptions{}); err != syserror.EINVAL {\n- t.Errorf(\"Write: got err %v, wanted %v\", err, syserror.EINVAL)\n+ if _, err := fd.Write(ctx, ioseq, WriteOptions{}); err != syserror.EIO {\n+ t.Errorf(\"Write: got err %v, wanted %v\", err, syserror.EIO)\n}\n- if _, err := fd.PWrite(ctx, ioseq, 0, WriteOptions{}); err != syserror.EINVAL {\n- t.Errorf(\"Write: got err %v, wanted %v\", err, syserror.EINVAL)\n+ if _, err := fd.PWrite(ctx, ioseq, 0, WriteOptions{}); err != syserror.EIO {\n+ t.Errorf(\"Write: got err %v, wanted %v\", err, syserror.EIO)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -479,6 +479,7 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:proc_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -498,6 +499,7 @@ syscall_test(\nsyscall_test(\ntest = \"//test/syscalls/linux:proc_pid_uid_gid_map_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc.cc",
"new_path": "test/syscalls/linux/proc.cc",
"diff": "@@ -754,8 +754,53 @@ TEST(ProcCpuinfo, RequiredFieldsArePresent) {\n}\n}\n-TEST(ProcCpuinfo, DeniesWrite) {\n+TEST(ProcCpuinfo, DeniesWriteNonRoot) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_FOWNER)));\n+\n+ // Do setuid in a separate thread so that after finishing this test, the\n+ // process can still open files the test harness created before starting this\n+ // test. Otherwise, the files are created by root (UID before the test), but\n+ // cannot be opened by the `uid` set below after the test. After calling\n+ // setuid(non-zero-UID), there is no way to get root privileges back.\n+ ScopedThread([&] {\n+ // Use syscall instead of glibc setuid wrapper because we want this setuid\n+ // call to only apply to this task. POSIX threads, however, require that all\n+ // threads have the same UIDs, so using the setuid wrapper sets all threads'\n+ // real UID.\n+ // Also drops capabilities.\n+ constexpr int kNobody = 65534;\n+ EXPECT_THAT(syscall(SYS_setuid, kNobody), SyscallSucceeds());\nEXPECT_THAT(open(\"/proc/cpuinfo\", O_WRONLY), SyscallFailsWithErrno(EACCES));\n+ // TODO(gvisor.dev/issue/1193): Properly support setting size attributes in\n+ // kernfs.\n+ if (!IsRunningOnGvisor() || IsRunningWithVFS1()) {\n+ EXPECT_THAT(truncate(\"/proc/cpuinfo\", 123),\n+ SyscallFailsWithErrno(EACCES));\n+ }\n+ });\n+}\n+\n+// With root privileges, it is possible to open /proc/cpuinfo with write mode,\n+// but all write operations will return EIO.\n+TEST(ProcCpuinfo, DeniesWriteRoot) {\n+ // VFS1 does not behave differently for root/non-root.\n+ SKIP_IF(IsRunningWithVFS1());\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_FOWNER)));\n+\n+ int fd;\n+ EXPECT_THAT(fd = open(\"/proc/cpuinfo\", O_WRONLY), SyscallSucceeds());\n+ if (fd > 0) {\n+ EXPECT_THAT(write(fd, \"x\", 1), SyscallFailsWithErrno(EIO));\n+ EXPECT_THAT(pwrite(fd, \"x\", 1, 123), SyscallFailsWithErrno(EIO));\n+ }\n+ // TODO(gvisor.dev/issue/1193): Properly support setting size attributes in\n+ // kernfs.\n+ if (!IsRunningOnGvisor() || IsRunningWithVFS1()) {\n+ if (fd > 0) {\n+ EXPECT_THAT(ftruncate(fd, 123), SyscallFailsWithErrno(EIO));\n+ }\n+ EXPECT_THAT(truncate(\"/proc/cpuinfo\", 123), SyscallFailsWithErrno(EIO));\n+ }\n}\n// Sanity checks that uptime is present.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix procfs bugs in vfs2.
- Support writing on proc/[pid]/{uid,gid}map
- Return EIO for writing to static files.
Updates #2923.
PiperOrigin-RevId: 318188503 |
259,858 | 25.06.2020 09:17:17 | 25,200 | 00ee5abaa70965b1baad996f53d3dc2a17805fcf | Drop unused markdown links. | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -113,7 +113,6 @@ See [SECURITY.md](SECURITY.md).\nSee [Contributing.md](CONTRIBUTING.md).\n[bazel]: https://bazel.build\n-[community]: https://gvisor.googlesource.com/community\n[docker]: https://www.docker.com\n[gvisor-users-list]: https://groups.google.com/forum/#!forum/gvisor-users\n[gvisor-dev]: https://gvisor.dev\n"
},
{
"change_type": "MODIFY",
"old_path": "SECURITY.md",
"new_path": "SECURITY.md",
"diff": "@@ -7,5 +7,4 @@ prompt response, typically within 48 hours.\nPolicies for security list access, vulnerability embargo, and vulnerability\ndisclosure are outlined in the [governance policy](GOVERNANCE.md).\n-[community]: https://gvisor.googlesource.com/community\n[gvisor-security-list]: https://groups.google.com/forum/#!forum/gvisor-security\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop unused markdown links.
PiperOrigin-RevId: 318284693 |
260,022 | 17.06.2020 16:23:27 | 14,400 | a63db7d90303280de9431f369e5a9c8db351a9e8 | Moved FUSE device under the fuse directory | [
{
"change_type": "DELETE",
"old_path": "pkg/sentry/devices/miscdev/miscdev.go",
"new_path": null,
"diff": "-// Copyright 2020 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// Package miscdev implements \"misc\" character devices, as implemented in Linux\n-// by drivers/char/misc.c and fs/fuse/dev.c.\n-package miscdev\n-\n-import (\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/context\"\n- \"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs\"\n- \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n-)\n-\n-// miscDevMajor is the major device number for devices defined in this package.\n-const miscDevMajor = linux.MISC_MAJOR\n-\n-// Register registers all devices implemented by this package in vfsObj.\n-func Register(vfsObj *vfs.VirtualFilesystem) error {\n- for minor, dev := range map[uint32]vfs.Device{\n- fuseDevMinor: fuseDevice{},\n- } {\n- if err := vfsObj.RegisterDevice(vfs.CharDevice, miscDevMajor, minor, dev, &vfs.RegisterDeviceOptions{\n- GroupName: \"misc\",\n- }); err != nil {\n- return err\n- }\n- }\n- return nil\n-}\n-\n-// CreateDevtmpfsFiles creates device special files in dev representing all\n-// devices implemented by this package.\n-func CreateDevtmpfsFiles(ctx context.Context, dev *devtmpfs.Accessor) error {\n- for minor, name := range map[uint32]string{\n- fuseDevMinor: \"fuse\",\n- } {\n- if err := dev.CreateDeviceFile(ctx, name, vfs.CharDevice, miscDevMajor, minor, 0666 /* mode */); err != nil {\n- return err\n- }\n- }\n- return nil\n-}\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/devices/miscdev/BUILD",
"new_path": "pkg/sentry/fsimpl/fuse/BUILD",
"diff": "@@ -3,10 +3,9 @@ load(\"//tools:defs.bzl\", \"go_library\")\nlicenses([\"notice\"])\ngo_library(\n- name = \"miscdev\",\n+ name = \"fuse\",\nsrcs = [\n- \"fuse.go\",\n- \"miscdev.go\",\n+ \"dev.go\",\n],\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/devices/miscdev/fuse.go",
"new_path": "pkg/sentry/fsimpl/fuse/dev.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package miscdev\n+package fuse\nimport (\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n@@ -28,7 +30,7 @@ type fuseDevice struct{}\n// Open implements vfs.Device.Open.\nfunc (fuseDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n- var fd FUSEDeviceFile\n+ var fd DeviceFD\nif err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{\nUseDentryMetadata: true,\n}); err != nil {\n@@ -37,8 +39,8 @@ func (fuseDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, op\nreturn &fd.vfsfd, nil\n}\n-// FUSEDeviceFile implements vfs.FileDescriptionImpl for /dev/fuse.\n-type FUSEDeviceFile struct {\n+// DeviceFD implements vfs.FileDescriptionImpl for /dev/fuse.\n+type DeviceFD struct {\nvfsfd vfs.FileDescription\nvfs.FileDescriptionDefaultImpl\nvfs.DentryMetadataFileDescriptionImpl\n@@ -50,29 +52,49 @@ type FUSEDeviceFile struct {\n}\n// Release implements vfs.FileDescriptionImpl.Release.\n-func (fd *FUSEDeviceFile) Release() {}\n+func (fd *DeviceFD) Release() {}\n// PRead implements vfs.FileDescriptionImpl.PRead.\n-func (fd *FUSEDeviceFile) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\n+func (fd *DeviceFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\nreturn 0, syserror.ENOSYS\n}\n// Read implements vfs.FileDescriptionImpl.Read.\n-func (fd *FUSEDeviceFile) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+func (fd *DeviceFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\nreturn 0, syserror.ENOSYS\n}\n// PWrite implements vfs.FileDescriptionImpl.PWrite.\n-func (fd *FUSEDeviceFile) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\n+func (fd *DeviceFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\nreturn 0, syserror.ENOSYS\n}\n// Write implements vfs.FileDescriptionImpl.Write.\n-func (fd *FUSEDeviceFile) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+func (fd *DeviceFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\nreturn 0, syserror.ENOSYS\n}\n// Seek implements vfs.FileDescriptionImpl.Seek.\n-func (fd *FUSEDeviceFile) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\n+func (fd *DeviceFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\nreturn 0, syserror.ENOSYS\n}\n+\n+// Register registers the FUSE device with vfsObj.\n+func Register(vfsObj *vfs.VirtualFilesystem) error {\n+ if err := vfsObj.RegisterDevice(vfs.CharDevice, linux.MISC_MAJOR, fuseDevMinor, fuseDevice{}, &vfs.RegisterDeviceOptions{\n+ GroupName: \"misc\",\n+ }); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+// CreateDevtmpfsFile creates a device special file in devtmpfs.\n+func CreateDevtmpfsFile(ctx context.Context, dev *devtmpfs.Accessor) error {\n+ if err := dev.CreateDeviceFile(ctx, \"fuse\", vfs.CharDevice, linux.MISC_MAJOR, fuseDevMinor, 0666 /* mode */); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -55,6 +55,7 @@ go_library(\n\"//pkg/sentry/fs/user\",\n\"//pkg/sentry/fsimpl/devpts\",\n\"//pkg/sentry/fsimpl/devtmpfs\",\n+ \"//pkg/sentry/fsimpl/fuse\",\n\"//pkg/sentry/fsimpl/gofer\",\n\"//pkg/sentry/fsimpl/host\",\n\"//pkg/sentry/fsimpl/overlay\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -31,6 +31,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fs/user\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/devpts\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fsimpl/fuse\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/gofer\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/overlay\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/proc\"\n@@ -79,6 +80,9 @@ func registerFilesystems(ctx context.Context, vfsObj *vfs.VirtualFilesystem, cre\n}\nif err := ttydev.Register(vfsObj); err != nil {\nreturn fmt.Errorf(\"registering ttydev: %w\", err)\n+\n+ if err := fuse.Register(vfsObj); err != nil {\n+ return fmt.Errorf(\"registering /dev/fuse: %w\", err)\n}\nif err := tundev.Register(vfsObj); err != nil {\nreturn fmt.Errorf(\"registering tundev: %v\", err)\n@@ -101,6 +105,9 @@ func registerFilesystems(ctx context.Context, vfsObj *vfs.VirtualFilesystem, cre\nif err := tundev.CreateDevtmpfsFiles(ctx, a); err != nil {\nreturn fmt.Errorf(\"creating tundev devtmpfs files: %v\", err)\n}\n+ if err := fuse.CreateDevtmpfsFile(ctx, a); err != nil {\n+ return fmt.Errorf(\"creating devtmpfs fuse device file: %w\", err)\n+ }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/dev.cc",
"new_path": "test/syscalls/linux/dev.cc",
"diff": "@@ -165,7 +165,8 @@ TEST(DevTest, WriteDevFuse) {\n}\nTEST(DevTest, TTYExists) {\n- SKIP_IF(!IsRunningWithVFS1());\n+ // Run test if running on VFS1 or on Linux.\n+ SKIP_IF(!IsRunningWithVFS1() && IsRunningOnGvisor());\nstruct stat statbuf = {};\nASSERT_THAT(stat(\"/dev/tty\", &statbuf), SyscallSucceeds());\n"
}
] | Go | Apache License 2.0 | google/gvisor | Moved FUSE device under the fuse directory |
260,022 | 23.06.2020 14:25:38 | 14,400 | 2828806fb015bbbec0f4a48670d1eb048f21099a | Test that the fuse device can be opened | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -80,9 +80,10 @@ func registerFilesystems(ctx context.Context, vfsObj *vfs.VirtualFilesystem, cre\n}\nif err := ttydev.Register(vfsObj); err != nil {\nreturn fmt.Errorf(\"registering ttydev: %w\", err)\n+ }\nif err := fuse.Register(vfsObj); err != nil {\n- return fmt.Errorf(\"registering /dev/fuse: %w\", err)\n+ return fmt.Errorf(\"registering fusedev: %w\", err)\n}\nif err := tundev.Register(vfsObj); err != nil {\nreturn fmt.Errorf(\"registering tundev: %v\", err)\n@@ -106,7 +107,7 @@ func registerFilesystems(ctx context.Context, vfsObj *vfs.VirtualFilesystem, cre\nreturn fmt.Errorf(\"creating tundev devtmpfs files: %v\", err)\n}\nif err := fuse.CreateDevtmpfsFile(ctx, a); err != nil {\n- return fmt.Errorf(\"creating devtmpfs fuse device file: %w\", err)\n+ return fmt.Errorf(\"creating fusedev devtmpfs files: %w\", err)\n}\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/dev.cc",
"new_path": "test/syscalls/linux/dev.cc",
"diff": "@@ -146,34 +146,21 @@ TEST(DevTest, WriteDevFull) {\nEXPECT_THAT(WriteFd(fd.get(), \"a\", 1), SyscallFailsWithErrno(ENOSPC));\n}\n-TEST(DevTest, ReadDevFuse) {\n- SKIP_IF(IsRunningWithVFS1());\n-\n- const FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_RDONLY));\n- std::vector<char> buf(1);\n- EXPECT_THAT(ReadFd(fd.get(), buf.data(), sizeof(buf)), SyscallFailsWithErrno(ENOSYS));\n-}\n-\n-TEST(DevTest, WriteDevFuse) {\n- SKIP_IF(IsRunningWithVFS1());\n-\n- const FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_WRONLY));\n- const char* testStr = \"test\";\n- EXPECT_THAT(WriteFd(fd.get(), testStr, sizeof(testStr)), SyscallFailsWithErrno(ENOSYS));\n-}\n-\nTEST(DevTest, TTYExists) {\n- // Run test if running on VFS1 or on Linux.\n- SKIP_IF(!IsRunningWithVFS1() && IsRunningOnGvisor());\n-\nstruct stat statbuf = {};\nASSERT_THAT(stat(\"/dev/tty\", &statbuf), SyscallSucceeds());\n// Check that it's a character device with rw-rw-rw- permissions.\nEXPECT_EQ(statbuf.st_mode, S_IFCHR | 0666);\n}\n+TEST(DevTest, OpenDevFuse) {\n+ // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n+ // device registration is complete.\n+ SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());\n+\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_RDONLY));\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Test that the fuse device can be opened |
259,860 | 26.06.2020 13:46:01 | 25,200 | 54a31e219ca9d6086a367213a92d2a72ce3af07b | Support inotify IN_ONESHOT.
Also, while we're here, make sure that gofer inotify events are generated when
files are created in remote revalidating mode.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -389,7 +389,15 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\n// RPC will fail with EEXIST like we would have. If the RPC succeeds, and a\n// stale dentry exists, the dentry will fail revalidation next time it's\n// used.\n- return createInRemoteDir(parent, name)\n+ if err := createInRemoteDir(parent, name); err != nil {\n+ return err\n+ }\n+ ev := linux.IN_CREATE\n+ if dir {\n+ ev |= linux.IN_ISDIR\n+ }\n+ parent.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */)\n+ return nil\n}\nif child := parent.children[name]; child != nil {\nreturn syserror.EEXIST\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/inotify.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/inotify.go",
"diff": "@@ -81,7 +81,7 @@ func InotifyAddWatch(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kern\n// \"EINVAL: The given event mask contains no valid events.\"\n// -- inotify_add_watch(2)\n- if validBits := mask & linux.ALL_INOTIFY_BITS; validBits == 0 {\n+ if mask&linux.ALL_INOTIFY_BITS == 0 {\nreturn 0, nil, syserror.EINVAL\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/inotify.go",
"new_path": "pkg/sentry/vfs/inotify.go",
"diff": "@@ -447,29 +447,51 @@ func (w *Watches) Remove(id uint64) {\nreturn\n}\n- if _, ok := w.ws[id]; !ok {\n- // While there's technically no problem with silently ignoring a missing\n- // watch, this is almost certainly a bug.\n- panic(fmt.Sprintf(\"Attempt to remove a watch, but no watch found with provided id %+v.\", id))\n- }\n+ // It is possible for w.Remove() to be called for the same watch multiple\n+ // times. See the treatment of one-shot watches in Watches.Notify().\n+ if _, ok := w.ws[id]; ok {\ndelete(w.ws, id)\n}\n+}\n// Notify queues a new event with watches in this set. Watches with\n// IN_EXCL_UNLINK are skipped if the event is coming from a child that has been\n// unlinked.\nfunc (w *Watches) Notify(name string, events, cookie uint32, et EventType, unlinked bool) {\n- // N.B. We don't defer the unlocks because Notify is in the hot path of\n- // all IO operations, and the defer costs too much for small IO\n- // operations.\n+ var hasExpired bool\nw.mu.RLock()\nfor _, watch := range w.ws {\nif unlinked && watch.ExcludeUnlinked() && et == PathEvent {\ncontinue\n}\n- watch.Notify(name, events, cookie)\n+ if watch.Notify(name, events, cookie) {\n+ hasExpired = true\n+ }\n+ }\n+ w.mu.RUnlock()\n+\n+ if hasExpired {\n+ w.cleanupExpiredWatches()\n+ }\n+}\n+\n+// This function is relatively expensive and should only be called where there\n+// are expired watches.\n+func (w *Watches) cleanupExpiredWatches() {\n+ // Because of lock ordering, we cannot acquire Inotify.mu for each watch\n+ // owner while holding w.mu. As a result, store expired watches locally\n+ // before removing.\n+ var toRemove []*Watch\n+ w.mu.RLock()\n+ for _, watch := range w.ws {\n+ if atomic.LoadInt32(&watch.expired) == 1 {\n+ toRemove = append(toRemove, watch)\n+ }\n}\nw.mu.RUnlock()\n+ for _, watch := range toRemove {\n+ watch.owner.RmWatch(watch.wd)\n+ }\n}\n// HandleDeletion is called when the watch target is destroyed. Clear the\n@@ -478,16 +500,10 @@ func (w *Watches) Notify(name string, events, cookie uint32, et EventType, unlin\nfunc (w *Watches) HandleDeletion() {\nw.Notify(\"\", linux.IN_DELETE_SELF, 0, InodeEvent, true /* unlinked */)\n- // We can't hold w.mu while calling watch.handleDeletion to preserve lock\n- // ordering w.r.t to the owner inotify instances. Instead, atomically move\n- // the watches map into a local variable so we can iterate over it safely.\n- //\n- // Because of this however, it is possible for the watches' owners to reach\n- // this inode while the inode has no refs. This is still safe because the\n- // owners can only reach the inode until this function finishes calling\n- // watch.handleDeletion below and the inode is guaranteed to exist in the\n- // meantime. But we still have to be very careful not to rely on inode state\n- // that may have been already destroyed.\n+ // As in Watches.Notify, we can't hold w.mu while acquiring Inotify.mu for\n+ // the owner of each watch being deleted. Instead, atomically store the\n+ // watches map in a local variable and set it to nil so we can iterate over\n+ // it with the assurance that there will be no concurrent accesses.\nvar ws map[uint64]*Watch\nw.mu.Lock()\nws = w.ws\n@@ -519,17 +535,28 @@ func (w *Watches) HandleDeletion() {\n// +stateify savable\ntype Watch struct {\n// Inotify instance which owns this watch.\n+ //\n+ // This field is immutable after creation.\nowner *Inotify\n// Descriptor for this watch. This is unique across an inotify instance.\n+ //\n+ // This field is immutable after creation.\nwd int32\n// target is a dentry representing the watch target. Its watch set contains this watch.\n+ //\n+ // This field is immutable after creation.\ntarget *Dentry\n// Events being monitored via this watch. Must be accessed with atomic\n// memory operations.\nmask uint32\n+\n+ // expired is set to 1 to indicate that this watch is a one-shot that has\n+ // already sent a notification and therefore can be removed. Must be accessed\n+ // with atomic memory operations.\n+ expired int32\n}\n// OwnerID returns the id of the inotify instance that owns this watch.\n@@ -546,12 +573,20 @@ func (w *Watch) ExcludeUnlinked() bool {\nreturn atomic.LoadUint32(&w.mask)&linux.IN_EXCL_UNLINK != 0\n}\n-// Notify queues a new event on this watch.\n-func (w *Watch) Notify(name string, events uint32, cookie uint32) {\n+// Notify queues a new event on this watch. Returns true if this is a one-shot\n+// watch that should be deleted, after this event was successfully queued.\n+func (w *Watch) Notify(name string, events uint32, cookie uint32) bool {\n+ if atomic.LoadInt32(&w.expired) == 1 {\n+ // This is a one-shot watch that is already in the process of being\n+ // removed. This may happen if a second event reaches the watch target\n+ // before this watch has been removed.\n+ return false\n+ }\n+\nmask := atomic.LoadUint32(&w.mask)\nif mask&events == 0 {\n// We weren't watching for this event.\n- return\n+ return false\n}\n// Event mask should include bits matched from the watch plus all control\n@@ -560,6 +595,11 @@ func (w *Watch) Notify(name string, events uint32, cookie uint32) {\neffectiveMask := unmaskableBits | mask\nmatchedEvents := effectiveMask & events\nw.owner.queueEvent(newEvent(w.wd, name, matchedEvents, cookie))\n+ if mask&linux.IN_ONESHOT != 0 {\n+ atomic.StoreInt32(&w.expired, 1)\n+ return true\n+ }\n+ return false\n}\n// Event represents a struct inotify_event from linux.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/inotify.cc",
"new_path": "test/syscalls/linux/inotify.cc",
"diff": "@@ -1485,20 +1485,26 @@ TEST(Inotify, DuplicateWatchReturnsSameWatchDescriptor) {\nTEST(Inotify, UnmatchedEventsAreDiscarded) {\nconst TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- const TempPath file1 =\n+ TempPath file1 =\nASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n- ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(fd.get(), file1.path(), IN_ACCESS));\n+ const int wd = ASSERT_NO_ERRNO_AND_VALUE(\n+ InotifyAddWatch(fd.get(), file1.path(), IN_ACCESS));\n- const FileDescriptor file1_fd =\n+ FileDescriptor file1_fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_WRONLY));\n- const std::vector<Event> events =\n- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));\n+ std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));\n// We only asked for access events, the open event should be discarded.\nASSERT_THAT(events, Are({}));\n+\n+ // IN_IGNORED events are always generated, regardless of the mask.\n+ file1_fd.reset();\n+ file1.reset();\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));\n+ ASSERT_THAT(events, Are({Event(IN_IGNORED, wd)}));\n}\nTEST(Inotify, AddWatchWithInvalidEventMaskFails) {\n@@ -2073,6 +2079,38 @@ TEST(Inotify, ExcludeUnlinkInodeEvents_NoRandomSave) {\n}));\n}\n+TEST(Inotify, OneShot) {\n+ // TODO(gvisor.dev/issue/1624): IN_ONESHOT not supported in VFS1.\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+\n+ const int wd = ASSERT_NO_ERRNO_AND_VALUE(\n+ InotifyAddWatch(inotify_fd.get(), file.path(), IN_MODIFY | IN_ONESHOT));\n+\n+ // Open an fd, write to it, and then close it.\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));\n+ ASSERT_THAT(write(fd.get(), \"x\", 1), SyscallSucceedsWithValue(1));\n+ fd.reset();\n+\n+ // We should get a single event followed by IN_IGNORED indicating removal\n+ // of the one-shot watch. Prior activity (i.e. open) that is not in the mask\n+ // should not trigger removal, and activity after removal (i.e. close) should\n+ // not generate events.\n+ std::vector<Event> events =\n+ ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({\n+ Event(IN_MODIFY, wd),\n+ Event(IN_IGNORED, wd),\n+ }));\n+\n+ // The watch should already have been removed.\n+ EXPECT_THAT(inotify_rm_watch(inotify_fd.get(), wd),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n// This test helps verify that the lock order of filesystem and inotify locks\n// is respected when inotify instances and watch targets are concurrently being\n// destroyed.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support inotify IN_ONESHOT.
Also, while we're here, make sure that gofer inotify events are generated when
files are created in remote revalidating mode.
Updates #1479.
PiperOrigin-RevId: 318536354 |
259,898 | 26.06.2020 15:07:35 | 25,200 | e6a90baef1e6921c560c939f4b591163004613b9 | Support IPv6 extension headers in packetimpact tests.
IPv6 HopByHop Options Extension Header
IPv6 Destination Options Extension Header | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/testbench/layers.go",
"new_path": "test/packetimpact/testbench/layers.go",
"diff": "@@ -477,6 +477,10 @@ func (l *IPv6) ToBytes() ([]byte, error) {\nfields.NextHeader = uint8(header.UDPProtocolNumber)\ncase *ICMPv6:\nfields.NextHeader = uint8(header.ICMPv6ProtocolNumber)\n+ case *IPv6HopByHopOptionsExtHdr:\n+ fields.NextHeader = uint8(header.IPv6HopByHopOptionsExtHdrIdentifier)\n+ case *IPv6DestinationOptionsExtHdr:\n+ fields.NextHeader = uint8(header.IPv6DestinationOptionsExtHdrIdentifier)\ndefault:\n// TODO(b/150301488): Support more protocols as needed.\nreturn nil, fmt.Errorf(\"ToBytes can't deduce the IPv6 header's next protocol: %#v\", n)\n@@ -495,6 +499,25 @@ func (l *IPv6) ToBytes() ([]byte, error) {\nreturn h, nil\n}\n+// nextIPv6PayloadParser finds the corresponding parser for nextHeader.\n+func nextIPv6PayloadParser(nextHeader uint8) layerParser {\n+ switch tcpip.TransportProtocolNumber(nextHeader) {\n+ case header.TCPProtocolNumber:\n+ return parseTCP\n+ case header.UDPProtocolNumber:\n+ return parseUDP\n+ case header.ICMPv6ProtocolNumber:\n+ return parseICMPv6\n+ }\n+ switch header.IPv6ExtensionHeaderIdentifier(nextHeader) {\n+ case header.IPv6HopByHopOptionsExtHdrIdentifier:\n+ return parseIPv6HopByHopOptionsExtHdr\n+ case header.IPv6DestinationOptionsExtHdrIdentifier:\n+ return parseIPv6DestinationOptionsExtHdr\n+ }\n+ return parsePayload\n+}\n+\n// parseIPv6 parses the bytes assuming that they start with an ipv6 header and\n// continues parsing further encapsulations.\nfunc parseIPv6(b []byte) (Layer, layerParser) {\n@@ -509,18 +532,7 @@ func parseIPv6(b []byte) (Layer, layerParser) {\nSrcAddr: Address(h.SourceAddress()),\nDstAddr: Address(h.DestinationAddress()),\n}\n- var nextParser layerParser\n- switch h.TransportProtocol() {\n- case header.TCPProtocolNumber:\n- nextParser = parseTCP\n- case header.UDPProtocolNumber:\n- nextParser = parseUDP\n- case header.ICMPv6ProtocolNumber:\n- nextParser = parseICMPv6\n- default:\n- // Assume that the rest is a payload.\n- nextParser = parsePayload\n- }\n+ nextParser := nextIPv6PayloadParser(h.NextHeader())\nreturn &ipv6, nextParser\n}\n@@ -538,6 +550,123 @@ func (l *IPv6) merge(other Layer) error {\nreturn mergeLayer(l, other)\n}\n+// IPv6HopByHopOptionsExtHdr can construct and match an IPv6HopByHopOptions\n+// Extension Header.\n+type IPv6HopByHopOptionsExtHdr struct {\n+ LayerBase\n+ NextHeader *header.IPv6ExtensionHeaderIdentifier\n+ Options []byte\n+}\n+\n+// IPv6DestinationOptionsExtHdr can construct and match an IPv6DestinationOptions\n+// Extension Header.\n+type IPv6DestinationOptionsExtHdr struct {\n+ LayerBase\n+ NextHeader *header.IPv6ExtensionHeaderIdentifier\n+ Options []byte\n+}\n+\n+// ipv6OptionsExtHdrToBytes serializes an options extension header into bytes.\n+func ipv6OptionsExtHdrToBytes(nextHeader *header.IPv6ExtensionHeaderIdentifier, options []byte) []byte {\n+ length := len(options) + 2\n+ bytes := make([]byte, length)\n+ if nextHeader == nil {\n+ bytes[0] = byte(header.IPv6NoNextHeaderIdentifier)\n+ } else {\n+ bytes[0] = byte(*nextHeader)\n+ }\n+ // ExtHdrLen field is the length of the extension header\n+ // in 8-octet unit, ignoring the first 8 octets.\n+ // https://tools.ietf.org/html/rfc2460#section-4.3\n+ // https://tools.ietf.org/html/rfc2460#section-4.6\n+ bytes[1] = uint8((length - 8) / 8)\n+ copy(bytes[2:], options)\n+ return bytes\n+}\n+\n+// IPv6ExtHdrIdent is a helper routine that allocates a new\n+// header.IPv6ExtensionHeaderIdentifier value to store v and returns a pointer\n+// to it.\n+func IPv6ExtHdrIdent(id header.IPv6ExtensionHeaderIdentifier) *header.IPv6ExtensionHeaderIdentifier {\n+ return &id\n+}\n+\n+// ToBytes implements Layer.ToBytes\n+func (l *IPv6HopByHopOptionsExtHdr) ToBytes() ([]byte, error) {\n+ return ipv6OptionsExtHdrToBytes(l.NextHeader, l.Options), nil\n+}\n+\n+// ToBytes implements Layer.ToBytes\n+func (l *IPv6DestinationOptionsExtHdr) ToBytes() ([]byte, error) {\n+ return ipv6OptionsExtHdrToBytes(l.NextHeader, l.Options), nil\n+}\n+\n+// parseIPv6ExtHdr parses an IPv6 extension header and returns the NextHeader\n+// field, the rest of the payload and a parser function for the corresponding\n+// next extension header.\n+func parseIPv6ExtHdr(b []byte) (header.IPv6ExtensionHeaderIdentifier, []byte, layerParser) {\n+ nextHeader := b[0]\n+ // For HopByHop and Destination options extension headers,\n+ // This field is the length of the extension header in\n+ // 8-octet units, not including the first 8 octets.\n+ // https://tools.ietf.org/html/rfc2460#section-4.3\n+ // https://tools.ietf.org/html/rfc2460#section-4.6\n+ length := b[1]*8 + 8\n+ data := b[2:length]\n+ nextParser := nextIPv6PayloadParser(nextHeader)\n+ return header.IPv6ExtensionHeaderIdentifier(nextHeader), data, nextParser\n+}\n+\n+// parseIPv6HopByHopOptionsExtHdr parses the bytes assuming that they start\n+// with an IPv6 HopByHop Options Extension Header.\n+func parseIPv6HopByHopOptionsExtHdr(b []byte) (Layer, layerParser) {\n+ nextHeader, options, nextParser := parseIPv6ExtHdr(b)\n+ return &IPv6HopByHopOptionsExtHdr{NextHeader: &nextHeader, Options: options}, nextParser\n+}\n+\n+// parseIPv6DestinationOptionsExtHdr parses the bytes assuming that they start\n+// with an IPv6 Destination Options Extension Header.\n+func parseIPv6DestinationOptionsExtHdr(b []byte) (Layer, layerParser) {\n+ nextHeader, options, nextParser := parseIPv6ExtHdr(b)\n+ return &IPv6DestinationOptionsExtHdr{NextHeader: &nextHeader, Options: options}, nextParser\n+}\n+\n+func (l *IPv6HopByHopOptionsExtHdr) length() int {\n+ return len(l.Options) + 2\n+}\n+\n+func (l *IPv6HopByHopOptionsExtHdr) match(other Layer) bool {\n+ return equalLayer(l, other)\n+}\n+\n+// merge overrides the values in l with the values from other but only in fields\n+// where the value is not nil.\n+func (l *IPv6HopByHopOptionsExtHdr) merge(other Layer) error {\n+ return mergeLayer(l, other)\n+}\n+\n+func (l *IPv6HopByHopOptionsExtHdr) String() string {\n+ return stringLayer(l)\n+}\n+\n+func (l *IPv6DestinationOptionsExtHdr) length() int {\n+ return len(l.Options) + 2\n+}\n+\n+func (l *IPv6DestinationOptionsExtHdr) match(other Layer) bool {\n+ return equalLayer(l, other)\n+}\n+\n+// merge overrides the values in l with the values from other but only in fields\n+// where the value is not nil.\n+func (l *IPv6DestinationOptionsExtHdr) merge(other Layer) error {\n+ return mergeLayer(l, other)\n+}\n+\n+func (l *IPv6DestinationOptionsExtHdr) String() string {\n+ return stringLayer(l)\n+}\n+\n// ICMPv6 can construct and match an ICMPv6 encapsulation.\ntype ICMPv6 struct {\nLayerBase\n@@ -567,8 +696,15 @@ func (l *ICMPv6) ToBytes() ([]byte, error) {\nif l.Checksum != nil {\nh.SetChecksum(*l.Checksum)\n} else {\n- ipv6 := l.Prev().(*IPv6)\n+ // It is possible that the ICMPv6 header does not follow the IPv6 header\n+ // immediately, there could be one or more extension headers in between.\n+ // We need to search forward to find the IPv6 header.\n+ for prev := l.Prev(); prev != nil; prev = prev.Prev() {\n+ if ipv6, ok := prev.(*IPv6); ok {\nh.SetChecksum(header.ICMPv6Checksum(h, *ipv6.SrcAddr, *ipv6.DstAddr, buffer.VectorisedView{}))\n+ break\n+ }\n+ }\n}\nreturn h, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/testbench/layers_test.go",
"new_path": "test/packetimpact/testbench/layers_test.go",
"diff": "@@ -505,3 +505,114 @@ func TestTCPOptions(t *testing.T) {\n})\n}\n}\n+\n+func TestIPv6ExtHdrOptions(t *testing.T) {\n+ for _, tt := range []struct {\n+ description string\n+ wantBytes []byte\n+ wantLayers Layers\n+ }{\n+ {\n+ description: \"IPv6/HopByHop\",\n+ wantBytes: []byte{\n+ // IPv6 Header\n+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x40, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,\n+ // HopByHop Options\n+ 0x3b, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,\n+ },\n+ wantLayers: []Layer{\n+ &IPv6{\n+ SrcAddr: Address(tcpip.Address(net.ParseIP(\"::1\"))),\n+ DstAddr: Address(tcpip.Address(net.ParseIP(\"fe80::dead:beef\"))),\n+ },\n+ &IPv6HopByHopOptionsExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),\n+ Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},\n+ },\n+ &Payload{\n+ Bytes: nil,\n+ },\n+ },\n+ },\n+ {\n+ description: \"IPv6/HopByHop/Payload\",\n+ wantBytes: []byte{\n+ // IPv6 Header\n+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x40, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,\n+ // HopByHop Options\n+ 0x3b, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,\n+ // Sample Data\n+ 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61,\n+ },\n+ wantLayers: []Layer{\n+ &IPv6{\n+ SrcAddr: Address(tcpip.Address(net.ParseIP(\"::1\"))),\n+ DstAddr: Address(tcpip.Address(net.ParseIP(\"fe80::dead:beef\"))),\n+ },\n+ &IPv6HopByHopOptionsExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),\n+ Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},\n+ },\n+ &Payload{\n+ Bytes: []byte(\"Sample Data\"),\n+ },\n+ },\n+ },\n+ {\n+ description: \"IPv6/HopByHop/Destination/ICMPv6\",\n+ wantBytes: []byte{\n+ // IPv6 Header\n+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x40, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,\n+ // HopByHop Options\n+ 0x3c, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,\n+ // Destination Options\n+ 0x3a, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,\n+ // ICMPv6 Param Problem\n+ 0x04, 0x00, 0x5f, 0x98, 0x00, 0x00, 0x00, 0x06,\n+ },\n+ wantLayers: []Layer{\n+ &IPv6{\n+ SrcAddr: Address(tcpip.Address(net.ParseIP(\"::1\"))),\n+ DstAddr: Address(tcpip.Address(net.ParseIP(\"fe80::dead:beef\"))),\n+ },\n+ &IPv6HopByHopOptionsExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6DestinationOptionsExtHdrIdentifier),\n+ Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},\n+ },\n+ &IPv6DestinationOptionsExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6ExtensionHeaderIdentifier(header.ICMPv6ProtocolNumber)),\n+ Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},\n+ },\n+ &ICMPv6{\n+ Type: ICMPv6Type(header.ICMPv6ParamProblem),\n+ Code: Byte(0),\n+ Checksum: Uint16(0x5f98),\n+ NDPPayload: []byte{0x00, 0x00, 0x00, 0x06},\n+ },\n+ },\n+ },\n+ } {\n+ t.Run(tt.description, func(t *testing.T) {\n+ layers := parse(parseIPv6, tt.wantBytes)\n+ if !layers.match(tt.wantLayers) {\n+ t.Fatalf(\"match failed with diff: %s\", layers.diff(tt.wantLayers))\n+ }\n+ gotBytes, err := layers.ToBytes()\n+ if err != nil {\n+ t.Fatalf(\"ToBytes() failed on %s: %s\", &layers, err)\n+ }\n+ if !bytes.Equal(tt.wantBytes, gotBytes) {\n+ t.Fatalf(\"mismatching bytes, gotBytes: %x, wantBytes: %x\", gotBytes, tt.wantBytes)\n+ }\n+ })\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support IPv6 extension headers in packetimpact tests.
- IPv6 HopByHop Options Extension Header
- IPv6 Destination Options Extension Header
PiperOrigin-RevId: 318551425 |
259,891 | 26.06.2020 16:23:15 | 25,200 | 9cfc15497581824f1c6ba2b9f9ee653d0be0bc5a | Require CAP_SYS_ADMIN in the root user namespace for TTY theft | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/thread_group.go",
"new_path": "pkg/sentry/kernel/thread_group.go",
"diff": "@@ -366,7 +366,8 @@ func (tg *ThreadGroup) SetControllingTTY(tty *TTY, arg int32) error {\n// terminal is stolen, and all processes that had it as controlling\n// terminal lose it.\" - tty_ioctl(4)\nif tty.tg != nil && tg.processGroup.session != tty.tg.processGroup.session {\n- if !auth.CredentialsFromContext(tg.leader).HasCapability(linux.CAP_SYS_ADMIN) || arg != 1 {\n+ // Stealing requires CAP_SYS_ADMIN in the root user namespace.\n+ if creds := auth.CredentialsFromContext(tg.leader); !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, creds.UserNamespace.Root()) || arg != 1 {\nreturn syserror.EPERM\n}\n// Steal the TTY away. Unlike TIOCNOTTY, don't send signals.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/pty_root.cc",
"new_path": "test/syscalls/linux/pty_root.cc",
"diff": "namespace gvisor {\nnamespace testing {\n-// These tests should be run as root.\nnamespace {\n+// StealTTY tests whether privileged processes can steal controlling terminals.\n+// If the stealing process has CAP_SYS_ADMIN in the root user namespace, the\n+// test ensures that stealing works. If it has non-root CAP_SYS_ADMIN, it\n+// ensures stealing fails.\nTEST(JobControlRootTest, StealTTY) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ bool true_root = true;\n+ if (!IsRunningOnGvisor()) {\n+ // If running in Linux, we may only have CAP_SYS_ADMIN in a non-root user\n+ // namespace (i.e. we are not truly root). We use init_module as a proxy for\n+ // whether we are true root, as it returns EPERM immediately.\n+ ASSERT_THAT(syscall(SYS_init_module, nullptr, 0, nullptr), SyscallFails());\n+ true_root = errno != EPERM;\n+\n// Make this a session leader, which also drops the controlling terminal.\n// In the gVisor test environment, this test will be run as the session\n// leader already (as the sentry init process).\n- if (!IsRunningOnGvisor()) {\nASSERT_THAT(setsid(), SyscallSucceeds());\n}\n@@ -53,8 +63,8 @@ TEST(JobControlRootTest, StealTTY) {\nASSERT_THAT(setsid(), SyscallSucceeds());\n// We shouldn't be able to steal the terminal with the wrong arg value.\nTEST_PCHECK(ioctl(slave.get(), TIOCSCTTY, 0));\n- // We should be able to steal it here.\n- TEST_PCHECK(!ioctl(slave.get(), TIOCSCTTY, 1));\n+ // We should be able to steal it if we are true root.\n+ TEST_PCHECK(true_root == !ioctl(slave.get(), TIOCSCTTY, 1));\n_exit(0);\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Require CAP_SYS_ADMIN in the root user namespace for TTY theft
PiperOrigin-RevId: 318563543 |
259,891 | 26.06.2020 19:04:59 | 25,200 | 66d1665441461a5226ba0c884e22888d58f393b6 | IPv6 raw sockets. Needed for ip6tables.
IPv6 raw sockets never include the IPv6 header. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -94,7 +94,7 @@ func NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, trans\n}\nfunc newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, waiterQueue *waiter.Queue, associated bool) (tcpip.Endpoint, *tcpip.Error) {\n- if netProto != header.IPv4ProtocolNumber {\n+ if netProto != header.IPv4ProtocolNumber && netProto != header.IPv6ProtocolNumber {\nreturn nil, tcpip.ErrUnknownProtocol\n}\n@@ -215,6 +215,11 @@ func (e *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMess\n// Write implements tcpip.Endpoint.Write.\nfunc (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, <-chan struct{}, *tcpip.Error) {\n+ // We can create, but not write to, unassociated IPv6 endpoints.\n+ if !e.associated && e.TransportEndpointInfo.NetProto == header.IPv6ProtocolNumber {\n+ return 0, nil, tcpip.ErrInvalidOptionValue\n+ }\n+\nn, ch, err := e.write(p, opts)\nswitch err {\ncase nil:\n@@ -319,12 +324,6 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, <-c\nreturn 0, nil, tcpip.ErrNoRoute\n}\n- // We don't support IPv6 yet, so this has to be an IPv4 address.\n- if len(opts.To.Addr) != header.IPv4AddressSize {\n- e.mu.RUnlock()\n- return 0, nil, tcpip.ErrInvalidEndpointState\n- }\n-\n// Find the route to the destination. If BindAddress is 0,\n// FindRoute will choose an appropriate source address.\nroute, err := e.stack.FindRoute(nic, e.BindAddr, opts.To.Addr, e.NetProto, false)\n@@ -354,17 +353,13 @@ func (e *endpoint) finishWrite(payloadBytes []byte, route *stack.Route) (int64,\n}\n}\n- switch e.NetProto {\n- case header.IPv4ProtocolNumber:\nif !e.associated {\nif err := route.WriteHeaderIncludedPacket(&stack.PacketBuffer{\nData: buffer.View(payloadBytes).ToVectorisedView(),\n}); err != nil {\nreturn 0, nil, err\n}\n- break\n- }\n-\n+ } else {\nhdr := buffer.NewPrependable(len(payloadBytes) + int(route.MaxHeaderLength()))\nif err := route.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: e.TransProto, TTL: route.DefaultTTL(), TOS: stack.DefaultTOS}, &stack.PacketBuffer{\nHeader: hdr,\n@@ -373,9 +368,6 @@ func (e *endpoint) finishWrite(payloadBytes []byte, route *stack.Route) (int64,\n}); err != nil {\nreturn 0, nil, err\n}\n-\n- default:\n- return 0, nil, tcpip.ErrUnknownProtocol\n}\nreturn int64(len(payloadBytes)), nil, nil\n@@ -400,11 +392,6 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error {\nreturn tcpip.ErrInvalidEndpointState\n}\n- // We don't support IPv6 yet.\n- if len(addr.Addr) != header.IPv4AddressSize {\n- return tcpip.ErrInvalidEndpointState\n- }\n-\nnic := addr.NIC\nif e.bound {\nif e.BindNICID == 0 {\n@@ -470,14 +457,8 @@ func (e *endpoint) Bind(addr tcpip.FullAddress) *tcpip.Error {\ne.mu.Lock()\ndefer e.mu.Unlock()\n- // Callers must provide an IPv4 address or no network address (for\n- // binding to a NIC, but not an address).\n- if len(addr.Addr) != 0 && len(addr.Addr) != 4 {\n- return tcpip.ErrInvalidEndpointState\n- }\n-\n// If a local address was specified, verify that it's valid.\n- if len(addr.Addr) == header.IPv4AddressSize && e.stack.CheckLocalAddress(addr.NIC, e.NetProto, addr.Addr) == 0 {\n+ if e.stack.CheckLocalAddress(addr.NIC, e.NetProto, addr.Addr) == 0 {\nreturn tcpip.ErrBadLocalAddress\n}\n@@ -680,9 +661,19 @@ func (e *endpoint) HandlePacket(route *stack.Route, pkt *stack.PacketBuffer) {\n},\n}\n- headers := append(buffer.View(nil), pkt.NetworkHeader...)\n+ // Raw IPv4 endpoints return the IP header, but IPv6 endpoints do not.\n+ // We copy headers' underlying bytes because pkt.*Header may point to\n+ // the middle of a slice, and another struct may point to the \"outer\"\n+ // slice. Save/restore doesn't support overlapping slices and will fail.\n+ var combinedVV buffer.VectorisedView\n+ if e.TransportEndpointInfo.NetProto == header.IPv4ProtocolNumber {\n+ headers := make(buffer.View, 0, len(pkt.NetworkHeader)+len(pkt.TransportHeader))\n+ headers = append(headers, pkt.NetworkHeader...)\nheaders = append(headers, pkt.TransportHeader...)\n- combinedVV := headers.ToVectorisedView()\n+ combinedVV = headers.ToVectorisedView()\n+ } else {\n+ combinedVV = append(buffer.View(nil), pkt.TransportHeader...).ToVectorisedView()\n+ }\ncombinedVV.Append(pkt.Data)\npacket.data = combinedVV\npacket.timestampNS = e.stack.NowNanoseconds()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -547,7 +547,7 @@ syscall_test(\n)\nsyscall_test(\n- test = \"//test/syscalls/linux:raw_socket_ipv4_test\",\n+ test = \"//test/syscalls/linux:raw_socket_test\",\nvfs2 = \"True\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -1800,9 +1800,9 @@ cc_binary(\n)\ncc_binary(\n- name = \"raw_socket_ipv4_test\",\n+ name = \"raw_socket_test\",\ntestonly = 1,\n- srcs = [\"raw_socket_ipv4.cc\"],\n+ srcs = [\"raw_socket.cc\"],\nlinkstatic = 1,\ndeps = [\n\":socket_test_util\",\n"
},
{
"change_type": "RENAME",
"old_path": "test/syscalls/linux/raw_socket_ipv4.cc",
"new_path": "test/syscalls/linux/raw_socket.cc",
"diff": "#include <linux/capability.h>\n#include <netinet/in.h>\n#include <netinet/ip.h>\n+#include <netinet/ip6.h>\n#include <netinet/ip_icmp.h>\n#include <poll.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <unistd.h>\n-\n#include <algorithm>\n#include \"gtest/gtest.h\"\n@@ -39,7 +39,7 @@ namespace testing {\nnamespace {\n// Fixture for tests parameterized by protocol.\n-class RawSocketTest : public ::testing::TestWithParam<int> {\n+class RawSocketTest : public ::testing::TestWithParam<std::tuple<int, int>> {\nprotected:\n// Creates a socket to be used in tests.\nvoid SetUp() override;\n@@ -50,36 +50,58 @@ class RawSocketTest : public ::testing::TestWithParam<int> {\n// Sends buf via s_.\nvoid SendBuf(const char* buf, int buf_len);\n- // Sends buf to the provided address via the provided socket.\n- void SendBufTo(int sock, const struct sockaddr_in& addr, const char* buf,\n- int buf_len);\n-\n// Reads from s_ into recv_buf.\nvoid ReceiveBuf(char* recv_buf, size_t recv_buf_len);\n- int Protocol() { return GetParam(); }\n+ void ReceiveBufFrom(int sock, char* recv_buf, size_t recv_buf_len);\n+\n+ int Protocol() { return std::get<0>(GetParam()); }\n+\n+ int Family() { return std::get<1>(GetParam()); }\n+\n+ socklen_t AddrLen() {\n+ if (Family() == AF_INET) {\n+ return sizeof(sockaddr_in);\n+ }\n+ return sizeof(sockaddr_in6);\n+ }\n+\n+ int HdrLen() {\n+ if (Family() == AF_INET) {\n+ return sizeof(struct iphdr);\n+ }\n+ // IPv6 raw sockets don't include the header.\n+ return 0;\n+ }\n// The socket used for both reading and writing.\nint s_;\n// The loopback address.\n- struct sockaddr_in addr_;\n+ struct sockaddr_storage addr_;\n};\nvoid RawSocketTest::SetUp() {\nif (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {\n- ASSERT_THAT(socket(AF_INET, SOCK_RAW, Protocol()),\n+ ASSERT_THAT(socket(Family(), SOCK_RAW, Protocol()),\nSyscallFailsWithErrno(EPERM));\nGTEST_SKIP();\n}\n- ASSERT_THAT(s_ = socket(AF_INET, SOCK_RAW, Protocol()), SyscallSucceeds());\n+ ASSERT_THAT(s_ = socket(Family(), SOCK_RAW, Protocol()), SyscallSucceeds());\naddr_ = {};\n// We don't set ports because raw sockets don't have a notion of ports.\n- addr_.sin_addr.s_addr = htonl(INADDR_LOOPBACK);\n- addr_.sin_family = AF_INET;\n+ if (Family() == AF_INET) {\n+ struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&addr_);\n+ sin->sin_family = AF_INET;\n+ sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);\n+ } else {\n+ struct sockaddr_in6* sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr_);\n+ sin6->sin6_family = AF_INET6;\n+ sin6->sin6_addr = in6addr_loopback;\n+ }\n}\nvoid RawSocketTest::TearDown() {\n@@ -96,7 +118,7 @@ TEST_P(RawSocketTest, MultipleCreation) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nint s2;\n- ASSERT_THAT(s2 = socket(AF_INET, SOCK_RAW, Protocol()), SyscallSucceeds());\n+ ASSERT_THAT(s2 = socket(Family(), SOCK_RAW, Protocol()), SyscallSucceeds());\nASSERT_THAT(close(s2), SyscallSucceeds());\n}\n@@ -114,7 +136,7 @@ TEST_P(RawSocketTest, ShutdownWriteNoop) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nASSERT_THAT(shutdown(s_, SHUT_WR), SyscallSucceeds());\n@@ -129,7 +151,7 @@ TEST_P(RawSocketTest, ShutdownReadNoop) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nASSERT_THAT(shutdown(s_, SHUT_RD), SyscallSucceeds());\n@@ -137,9 +159,8 @@ TEST_P(RawSocketTest, ShutdownReadNoop) {\nconstexpr char kBuf[] = \"gdg\";\nASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));\n- constexpr size_t kReadSize = sizeof(kBuf) + sizeof(struct iphdr);\n- char c[kReadSize];\n- ASSERT_THAT(read(s_, &c, sizeof(c)), SyscallSucceedsWithValue(kReadSize));\n+ std::vector<char> c(sizeof(kBuf) + HdrLen());\n+ ASSERT_THAT(read(s_, c.data(), c.size()), SyscallSucceedsWithValue(c.size()));\n}\n// Test that listen() fails.\n@@ -173,7 +194,7 @@ TEST_P(RawSocketTest, GetPeerName) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nstruct sockaddr saddr;\nsocklen_t addrlen = sizeof(saddr);\n@@ -223,7 +244,7 @@ TEST_P(RawSocketTest, ConnectToLoopback) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\n}\n@@ -242,7 +263,7 @@ TEST_P(RawSocketTest, BindToLocalhost) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\n}\n@@ -250,12 +271,18 @@ TEST_P(RawSocketTest, BindToLocalhost) {\nTEST_P(RawSocketTest, BindToInvalid) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n- struct sockaddr_in bind_addr = {};\n- bind_addr.sin_family = AF_INET;\n- bind_addr.sin_addr = {1}; // 1.0.0.0 - An address that we can't bind to.\n+ struct sockaddr_storage bind_addr = addr_;\n+ if (Family() == AF_INET) {\n+ struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&bind_addr);\n+ sin->sin_addr = {1}; // 1.0.0.0 - An address that we can't bind to.\n+ } else {\n+ struct sockaddr_in6* sin6 =\n+ reinterpret_cast<struct sockaddr_in6*>(&bind_addr);\n+ memset(&sin6->sin6_addr.s6_addr, 0, sizeof(sin6->sin6_addr.s6_addr));\n+ sin6->sin6_addr.s6_addr[0] = 1; // 1: - An address that we can't bind to.\n+ }\nASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&bind_addr),\n- sizeof(bind_addr)),\n- SyscallFailsWithErrno(EADDRNOTAVAIL));\n+ AddrLen()), SyscallFailsWithErrno(EADDRNOTAVAIL));\n}\n// Send and receive an packet.\n@@ -267,9 +294,9 @@ TEST_P(RawSocketTest, SendAndReceive) {\nASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));\n// Receive the packet and make sure it's identical.\n- char recv_buf[sizeof(kBuf) + sizeof(struct iphdr)];\n- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf, sizeof(recv_buf)));\n- EXPECT_EQ(memcmp(recv_buf + sizeof(struct iphdr), kBuf, sizeof(kBuf)), 0);\n+ std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());\n+ ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));\n+ EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);\n}\n// We should be able to create multiple raw sockets for the same protocol and\n@@ -278,22 +305,23 @@ TEST_P(RawSocketTest, MultipleSocketReceive) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nint s2;\n- ASSERT_THAT(s2 = socket(AF_INET, SOCK_RAW, Protocol()), SyscallSucceeds());\n+ ASSERT_THAT(s2 = socket(Family(), SOCK_RAW, Protocol()), SyscallSucceeds());\n// Arbitrary.\nconstexpr char kBuf[] = \"TB10\";\nASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));\n// Receive it on socket 1.\n- char recv_buf1[sizeof(kBuf) + sizeof(struct iphdr)];\n- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf1, sizeof(recv_buf1)));\n+ std::vector<char> recv_buf1(sizeof(kBuf) + HdrLen());\n+ ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf1.data(), recv_buf1.size()));\n// Receive it on socket 2.\n- char recv_buf2[sizeof(kBuf) + sizeof(struct iphdr)];\n- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(s2, recv_buf2, sizeof(recv_buf2)));\n+ std::vector<char> recv_buf2(sizeof(kBuf) + HdrLen());\n+ ASSERT_NO_FATAL_FAILURE(ReceiveBufFrom(s2, recv_buf2.data(),\n+ recv_buf2.size()));\n- EXPECT_EQ(memcmp(recv_buf1 + sizeof(struct iphdr),\n- recv_buf2 + sizeof(struct iphdr), sizeof(kBuf)),\n+ EXPECT_EQ(memcmp(recv_buf1.data() + HdrLen(),\n+ recv_buf2.data() + HdrLen(), sizeof(kBuf)),\n0);\nASSERT_THAT(close(s2), SyscallSucceeds());\n@@ -304,7 +332,7 @@ TEST_P(RawSocketTest, SendAndReceiveViaConnect) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\n// Arbitrary.\n@@ -313,9 +341,9 @@ TEST_P(RawSocketTest, SendAndReceiveViaConnect) {\nSyscallSucceedsWithValue(sizeof(kBuf)));\n// Receive the packet and make sure it's identical.\n- char recv_buf[sizeof(kBuf) + sizeof(struct iphdr)];\n- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf, sizeof(recv_buf)));\n- EXPECT_EQ(memcmp(recv_buf + sizeof(struct iphdr), kBuf, sizeof(kBuf)), 0);\n+ std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());\n+ ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));\n+ EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);\n}\n// Bind to localhost, then send and receive packets.\n@@ -323,7 +351,7 @@ TEST_P(RawSocketTest, BindSendAndReceive) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\n// Arbitrary.\n@@ -331,9 +359,9 @@ TEST_P(RawSocketTest, BindSendAndReceive) {\nASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));\n// Receive the packet and make sure it's identical.\n- char recv_buf[sizeof(kBuf) + sizeof(struct iphdr)];\n- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf, sizeof(recv_buf)));\n- EXPECT_EQ(memcmp(recv_buf + sizeof(struct iphdr), kBuf, sizeof(kBuf)), 0);\n+ std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());\n+ ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));\n+ EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);\n}\n// Bind and connect to localhost and send/receive packets.\n@@ -341,10 +369,10 @@ TEST_P(RawSocketTest, BindConnectSendAndReceive) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\n// Arbitrary.\n@@ -352,9 +380,9 @@ TEST_P(RawSocketTest, BindConnectSendAndReceive) {\nASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));\n// Receive the packet and make sure it's identical.\n- char recv_buf[sizeof(kBuf) + sizeof(struct iphdr)];\n- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf, sizeof(recv_buf)));\n- EXPECT_EQ(memcmp(recv_buf + sizeof(struct iphdr), kBuf, sizeof(kBuf)), 0);\n+ std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());\n+ ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));\n+ EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);\n}\n// Check that setting SO_RCVBUF below min is clamped to the minimum\n@@ -580,20 +608,16 @@ TEST_P(RawSocketTest, SetSocketSendBuf) {\nASSERT_EQ(quarter_sz, val);\n}\n-void RawSocketTest::SendBuf(const char* buf, int buf_len) {\n- ASSERT_NO_FATAL_FAILURE(SendBufTo(s_, addr_, buf, buf_len));\n-}\n-\n// Test that receive buffer limits are not enforced when the recv buffer is\n// empty.\nTEST_P(RawSocketTest, RecvBufLimitsEmptyRecvBuffer) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nint min = 0;\n@@ -616,10 +640,10 @@ TEST_P(RawSocketTest, RecvBufLimitsEmptyRecvBuffer) {\nASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));\n// Receive the packet and make sure it's identical.\n- std::vector<char> recv_buf(buf.size() + sizeof(struct iphdr));\n+ std::vector<char> recv_buf(buf.size() + HdrLen());\nASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));\nEXPECT_EQ(\n- memcmp(recv_buf.data() + sizeof(struct iphdr), buf.data(), buf.size()),\n+ memcmp(recv_buf.data() + HdrLen(), buf.data(), buf.size()),\n0);\n}\n@@ -631,10 +655,10 @@ TEST_P(RawSocketTest, RecvBufLimitsEmptyRecvBuffer) {\nRandomizeBuffer(buf.data(), buf.size());\nASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));\n// Receive the packet and make sure it's identical.\n- std::vector<char> recv_buf(buf.size() + sizeof(struct iphdr));\n+ std::vector<char> recv_buf(buf.size() + HdrLen());\nASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));\nEXPECT_EQ(\n- memcmp(recv_buf.data() + sizeof(struct iphdr), buf.data(), buf.size()),\n+ memcmp(recv_buf.data() + HdrLen(), buf.data(), buf.size()),\n0);\n}\n}\n@@ -652,10 +676,10 @@ TEST_P(RawSocketTest, RecvBufLimits) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nASSERT_THAT(\n- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\n+ connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\nSyscallSucceeds());\nint min = 0;\n@@ -716,16 +740,16 @@ TEST_P(RawSocketTest, RecvBufLimits) {\n// Verify that the expected number of packets are available to be read.\nfor (int i = 0; i < sent - 1; i++) {\n// Receive the packet and make sure it's identical.\n- std::vector<char> recv_buf(buf.size() + sizeof(struct iphdr));\n+ std::vector<char> recv_buf(buf.size() + HdrLen());\nASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));\n- EXPECT_EQ(memcmp(recv_buf.data() + sizeof(struct iphdr), buf.data(),\n+ EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), buf.data(),\nbuf.size()),\n0);\n}\n// Assert that the last packet is dropped because the receive buffer should\n// be full after the first four packets.\n- std::vector<char> recv_buf(buf.size() + sizeof(struct iphdr));\n+ std::vector<char> recv_buf(buf.size() + HdrLen());\nstruct iovec iov = {};\niov.iov_base = static_cast<void*>(const_cast<char*>(recv_buf.data()));\niov.iov_len = buf.size();\n@@ -740,30 +764,54 @@ TEST_P(RawSocketTest, RecvBufLimits) {\n}\n}\n-void RawSocketTest::SendBufTo(int sock, const struct sockaddr_in& addr,\n- const char* buf, int buf_len) {\n+void RawSocketTest::SendBuf(const char* buf, int buf_len) {\n// It's safe to use const_cast here because sendmsg won't modify the iovec or\n// address.\nstruct iovec iov = {};\niov.iov_base = static_cast<void*>(const_cast<char*>(buf));\niov.iov_len = static_cast<size_t>(buf_len);\nstruct msghdr msg = {};\n- msg.msg_name = static_cast<void*>(const_cast<struct sockaddr_in*>(&addr));\n- msg.msg_namelen = sizeof(addr);\n+ msg.msg_name = static_cast<void*>(&addr_);\n+ msg.msg_namelen = AddrLen();\nmsg.msg_iov = &iov;\nmsg.msg_iovlen = 1;\nmsg.msg_control = NULL;\nmsg.msg_controllen = 0;\nmsg.msg_flags = 0;\n- ASSERT_THAT(sendmsg(sock, &msg, 0), SyscallSucceedsWithValue(buf_len));\n+ ASSERT_THAT(sendmsg(s_, &msg, 0), SyscallSucceedsWithValue(buf_len));\n}\nvoid RawSocketTest::ReceiveBuf(char* recv_buf, size_t recv_buf_len) {\n- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(s_, recv_buf, recv_buf_len));\n+ ASSERT_NO_FATAL_FAILURE(ReceiveBufFrom(s_, recv_buf, recv_buf_len));\n+}\n+\n+void RawSocketTest::ReceiveBufFrom(int sock, char* recv_buf,\n+ size_t recv_buf_len) {\n+ ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(sock, recv_buf, recv_buf_len));\n}\nINSTANTIATE_TEST_SUITE_P(AllInetTests, RawSocketTest,\n- ::testing::Values(IPPROTO_TCP, IPPROTO_UDP));\n+ ::testing::Combine(\n+ ::testing::Values(IPPROTO_TCP, IPPROTO_UDP),\n+ ::testing::Values(AF_INET, AF_INET6)));\n+\n+// AF_INET6+SOCK_RAW+IPPROTO_RAW sockets can be created, but not written to.\n+TEST(RawSocketTest, IPv6ProtoRaw) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int sock;\n+ ASSERT_THAT(sock = socket(AF_INET6, SOCK_RAW, IPPROTO_RAW),\n+ SyscallSucceeds());\n+\n+ // Verify that writing yields EINVAL.\n+ char buf[] = \"This is such a weird little edge case\";\n+ struct sockaddr_in6 sin6 = {};\n+ sin6.sin6_family = AF_INET6;\n+ sin6.sin6_addr = in6addr_loopback;\n+ ASSERT_THAT(sendto(sock, buf, sizeof(buf), 0 /* flags */,\n+ reinterpret_cast<struct sockaddr*>(&sin6), sizeof(sin6)),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n} // namespace\n"
}
] | Go | Apache License 2.0 | google/gvisor | IPv6 raw sockets. Needed for ip6tables.
IPv6 raw sockets never include the IPv6 header.
PiperOrigin-RevId: 318582989 |
259,860 | 26.06.2020 19:40:52 | 25,200 | 85be13d9a3faae27d0991b55cfb7944c9f1e1284 | Add tests for eventfd/timerfd/inotify operations that should return ESPIPE. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/eventfd.cc",
"new_path": "test/syscalls/linux/eventfd.cc",
"diff": "@@ -100,20 +100,21 @@ TEST(EventfdTest, SmallRead) {\nASSERT_THAT(read(efd.get(), &l, 4), SyscallFailsWithErrno(EINVAL));\n}\n-TEST(EventfdTest, PreadIllegalSeek) {\n- FileDescriptor efd =\n- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));\n-\n- uint64_t l = 0;\n- ASSERT_THAT(pread(efd.get(), &l, 4, 0), SyscallFailsWithErrno(ESPIPE));\n+TEST(EventfdTest, IllegalSeek) {\n+ FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));\n+ EXPECT_THAT(lseek(efd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n}\n-TEST(EventfdTest, PwriteIllegalSeek) {\n- FileDescriptor efd =\n- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));\n+TEST(EventfdTest, IllegalPread) {\n+ FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));\n+ int l;\n+ EXPECT_THAT(pread(efd.get(), &l, sizeof(l), 0),\n+ SyscallFailsWithErrno(ESPIPE));\n+}\n- uint64_t l = 0;\n- ASSERT_THAT(pwrite(efd.get(), &l, 4, 0), SyscallFailsWithErrno(ESPIPE));\n+TEST(EventfdTest, IllegalPwrite) {\n+ FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));\n+ EXPECT_THAT(pwrite(efd.get(), \"x\", 1, 0), SyscallFailsWithErrno(ESPIPE));\n}\nTEST(EventfdTest, BigWrite) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/inotify.cc",
"new_path": "test/syscalls/linux/inotify.cc",
"diff": "@@ -333,9 +333,27 @@ PosixErrorOr<int> InotifyAddWatch(int fd, const std::string& path,\nreturn wd;\n}\n-TEST(Inotify, InotifyFdNotWritable) {\n+TEST(Inotify, IllegalSeek) {\nconst FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));\n- EXPECT_THAT(write(fd.get(), \"x\", 1), SyscallFailsWithErrno(EBADF));\n+ EXPECT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n+}\n+\n+TEST(Inotify, IllegalPread) {\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));\n+ int val;\n+ EXPECT_THAT(pread(fd.get(), &val, sizeof(val), 0),\n+ SyscallFailsWithErrno(ESPIPE));\n+}\n+\n+TEST(Inotify, IllegalPwrite) {\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));\n+ EXPECT_THAT(pwrite(fd.get(), \"x\", 1, 0), SyscallFailsWithErrno(ESPIPE));\n+}\n+\n+TEST(Inotify, IllegalWrite) {\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));\n+ int val = 0;\n+ EXPECT_THAT(write(fd.get(), &val, sizeof(val)), SyscallFailsWithErrno(EBADF));\n}\nTEST(Inotify, InitFlags) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/timerfd.cc",
"new_path": "test/syscalls/linux/timerfd.cc",
"diff": "@@ -204,16 +204,33 @@ TEST_P(TimerfdTest, SetAbsoluteTime) {\nEXPECT_EQ(1, val);\n}\n-TEST_P(TimerfdTest, IllegalReadWrite) {\n+TEST_P(TimerfdTest, IllegalSeek) {\n+ auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));\n+ if (!IsRunningWithVFS1()) {\n+ EXPECT_THAT(lseek(tfd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n+ }\n+}\n+\n+TEST_P(TimerfdTest, IllegalPread) {\n+ auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));\n+ int val;\n+ EXPECT_THAT(pread(tfd.get(), &val, sizeof(val), 0),\n+ SyscallFailsWithErrno(ESPIPE));\n+}\n+\n+TEST_P(TimerfdTest, IllegalPwrite) {\n+ auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));\n+ EXPECT_THAT(pwrite(tfd.get(), \"x\", 1, 0), SyscallFailsWithErrno(ESPIPE));\n+ if (!IsRunningWithVFS1()) {\n+ }\n+}\n+\n+TEST_P(TimerfdTest, IllegalWrite) {\nauto const tfd =\nASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), TFD_NONBLOCK));\nuint64_t val = 0;\n- EXPECT_THAT(PreadFd(tfd.get(), &val, sizeof(val), 0),\n- SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(WriteFd(tfd.get(), &val, sizeof(val)),\n+ EXPECT_THAT(write(tfd.get(), &val, sizeof(val)),\nSyscallFailsWithErrno(EINVAL));\n- EXPECT_THAT(PwriteFd(tfd.get(), &val, sizeof(val), 0),\n- SyscallFailsWithErrno(ESPIPE));\n}\nstd::string PrintClockId(::testing::TestParamInfo<int> info) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add tests for eventfd/timerfd/inotify operations that should return ESPIPE.
PiperOrigin-RevId: 318585377 |
259,854 | 26.06.2020 21:09:25 | 25,200 | bab3c36efb5486fdfbfa52d8baf810c7a7c7efd8 | Add style guide. | [
{
"change_type": "MODIFY",
"old_path": "CONTRIBUTING.md",
"new_path": "CONTRIBUTING.md",
"diff": "@@ -37,10 +37,8 @@ Dependencies can be added by using `go mod get`. In order to keep the\n### Coding Guidelines\n-All Go code should conform to the [Go style guidelines][gostyle]. C++ code\n-should conform to the [Google C++ Style Guide][cppstyle] and the guidelines\n-described for tests. Note that code may be automatically formatted per the\n-guidelines when merged.\n+All code should comply with the [style guide](g3doc/style.md). Note that code\n+may be automatically formatted per the guidelines when merged.\nAs a secure runtime, we need to maintain the safety of all of code included in\ngVisor. The following rules help mitigate issues.\n@@ -125,9 +123,7 @@ Contributions made by corporations are covered by a different agreement than the\none above, the\n[Software Grant and Corporate Contributor License Agreement][gccla].\n-[cppstyle]: https://google.github.io/styleguide/cppguide.html\n[gcla]: https://cla.developers.google.com/about/google-individual\n[gccla]: https://cla.developers.google.com/about/google-corporate\n[github]: https://github.com/google/gvisor/compare\n[gvisor-dev-list]: https://groups.google.com/forum/#!forum/gvisor-dev\n-[gostyle]: https://github.com/golang/go/wiki/CodeReviewComments\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "g3doc/style.md",
"diff": "+# Provisional style guide\n+\n+> These guidelines are new and may change. This note will be removed when\n+> consensus is reached.\n+\n+Not all existing code will comply with this style guide, but new code should.\n+Further, it is a goal to eventually update all existing code to be in\n+compliance.\n+\n+## All code\n+\n+### Early exit\n+\n+All code, unless it substantially increases the line count or complexity, should\n+use early exits from loops and functions where possible.\n+\n+## Go specific\n+\n+All Go code should comply with the [Go Code Review Comments][gostyle] and\n+[Effective Go][effective_go] guides, as well as the additional guidelines\n+described below.\n+\n+### Mutexes\n+\n+#### Naming\n+\n+Mutexes should be named mu or xxxMu. Mutexes as a general rule should not be\n+exported. Instead, export methods which use the mutexes to avoid leaky\n+abstractions.\n+\n+#### Location\n+\n+Mutexes should be sibling fields to the fields that they protect. Mutexes should\n+not be declared as global variables, instead use a struct (anonymous ok, but\n+naming conventions still apply).\n+\n+Mutexes should be ordered before the fields that they protect.\n+\n+#### Comments\n+\n+Mutexes should have a comment on their declaration explaining any ordering\n+requirements (or pointing to where this information can be found), if\n+applicable. There is no need for a comment explaining which fields are\n+protected.\n+\n+Each field or variable protected by a mutex should state as such in a comment on\n+the field or variable declaration.\n+\n+### Unused returns\n+\n+Unused returns should be explicitly ignored with underscores. If there is a\n+function which is commonly used without using its return(s), a wrapper function\n+should be declared which explicitly ignores the returns. That said, in many\n+cases, it may make sense for the wrapper to check the returns.\n+\n+### Formatting verbs\n+\n+Built-in types should use their associated verbs (e.g. %d for integral types),\n+but other types should use a %v variant, even if they implement fmt.Stringer.\n+The built-in `error` type should use %w when formatted with `fmt.Errorf`, but\n+only then.\n+\n+### Wrapping\n+\n+Comments should be wrapped at 80 columns with a 2 space tab size.\n+\n+Code does not need to be wrapped, but if wrapping would make it more readable,\n+it should be wrapped with each subcomponent of the thing being wrapped on its\n+own line. For example, if a struct is split between lines, each field should be\n+on its own line.\n+\n+#### Example\n+\n+```go\n+_ = exec.Cmd{\n+ Path: \"/foo/bar\",\n+ Args: []string{\"-baz\"},\n+}\n+```\n+\n+## C++ specific\n+\n+C++ code should conform to the [Google C++ Style Guide][cppstyle] and the\n+guidelines described for tests.\n+\n+[cppstyle]: https://google.github.io/styleguide/cppguide.html\n+[gostyle]: https://github.com/golang/go/wiki/CodeReviewComments\n+[effective_go]: https://golang.org/doc/effective_go.html\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add style guide.
PiperOrigin-RevId: 318591900 |
259,860 | 27.06.2020 14:38:20 | 25,200 | 02d552d07c4415978d2ce418fb16baf238d0ff78 | Support sticky bit in vfs2.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -16,6 +16,7 @@ package gofer\nimport (\n\"sync\"\n+ \"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n@@ -464,21 +465,61 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b\ndefer mntns.DecRef()\nparent.dirMu.Lock()\ndefer parent.dirMu.Unlock()\n+\nchild, ok := parent.children[name]\nif ok && child == nil {\nreturn syserror.ENOENT\n}\n- // We only need a dentry representing the file at name if it can be a mount\n- // point. If child is nil, then it can't be a mount point. If child is\n- // non-nil but stale, the actual file can't be a mount point either; we\n- // detect this case by just speculatively calling PrepareDeleteDentry and\n- // only revalidating the dentry if that fails (indicating that the existing\n- // dentry is a mount point).\n+\n+ sticky := atomic.LoadUint32(&parent.mode)&linux.ModeSticky != 0\n+ if sticky {\n+ if !ok {\n+ // If the sticky bit is set, we need to retrieve the child to determine\n+ // whether removing it is allowed.\n+ child, err = fs.stepLocked(ctx, rp, parent, false /* mayFollowSymlinks */, &ds)\n+ if err != nil {\n+ return err\n+ }\n+ } else if child != nil && !child.cachedMetadataAuthoritative() {\n+ // Make sure the dentry representing the file at name is up to date\n+ // before examining its metadata.\n+ child, err = fs.revalidateChildLocked(ctx, vfsObj, parent, name, child, &ds)\n+ if err != nil {\n+ return err\n+ }\n+ }\n+ if err := parent.mayDelete(rp.Credentials(), child); err != nil {\n+ return err\n+ }\n+ }\n+\n+ // If a child dentry exists, prepare to delete it. This should fail if it is\n+ // a mount point. We detect mount points by speculatively calling\n+ // PrepareDeleteDentry, which fails if child is a mount point. However, we\n+ // may need to revalidate the file in this case to make sure that it has not\n+ // been deleted or replaced on the remote fs, in which case the mount point\n+ // will have disappeared. If calling PrepareDeleteDentry fails again on the\n+ // up-to-date dentry, we can be sure that it is a mount point.\n+ //\n+ // Also note that if child is nil, then it can't be a mount point.\nif child != nil {\n+ // Hold child.dirMu so we can check child.children and\n+ // child.syntheticChildren. We don't access these fields until a bit later,\n+ // but locking child.dirMu after calling vfs.PrepareDeleteDentry() would\n+ // create an inconsistent lock ordering between dentry.dirMu and\n+ // vfs.Dentry.mu (in the VFS lock order, it would make dentry.dirMu both \"a\n+ // FilesystemImpl lock\" and \"a lock acquired by a FilesystemImpl between\n+ // PrepareDeleteDentry and CommitDeleteDentry). To avoid this, lock\n+ // child.dirMu before calling PrepareDeleteDentry.\nchild.dirMu.Lock()\ndefer child.dirMu.Unlock()\nif err := vfsObj.PrepareDeleteDentry(mntns, &child.vfsd); err != nil {\n- if parent.cachedMetadataAuthoritative() {\n+ // We can skip revalidation in several cases:\n+ // - We are not in InteropModeShared\n+ // - The parent directory is synthetic, in which case the child must also\n+ // be synthetic\n+ // - We already updated the child during the sticky bit check above\n+ if parent.cachedMetadataAuthoritative() || sticky {\nreturn err\n}\nchild, err = fs.revalidateChildLocked(ctx, vfsObj, parent, name, child, &ds)\n@@ -1100,7 +1141,8 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\nreturn err\n}\n}\n- if err := oldParent.checkPermissions(rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {\n+ creds := rp.Credentials()\n+ if err := oldParent.checkPermissions(creds, vfs.MayWrite|vfs.MayExec); err != nil {\nreturn err\n}\nvfsObj := rp.VirtualFilesystem()\n@@ -1115,12 +1157,15 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\nif renamed == nil {\nreturn syserror.ENOENT\n}\n+ if err := oldParent.mayDelete(creds, renamed); err != nil {\n+ return err\n+ }\nif renamed.isDir() {\nif renamed == newParent || genericIsAncestorDentry(renamed, newParent) {\nreturn syserror.EINVAL\n}\nif oldParent != newParent {\n- if err := renamed.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {\n+ if err := renamed.checkPermissions(creds, vfs.MayWrite); err != nil {\nreturn err\n}\n}\n@@ -1131,7 +1176,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\n}\nif oldParent != newParent {\n- if err := newParent.checkPermissions(rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {\n+ if err := newParent.checkPermissions(creds, vfs.MayWrite|vfs.MayExec); err != nil {\nreturn err\n}\nnewParent.dirMu.Lock()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -1003,6 +1003,10 @@ func (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes)\nreturn vfs.GenericCheckPermissions(creds, ats, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid)))\n}\n+func (d *dentry) mayDelete(creds *auth.Credentials, child *dentry) error {\n+ return vfs.CheckDeleteSticky(creds, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&child.uid)))\n+}\n+\nfunc dentryUIDFromP9UID(uid p9.UID) uint32 {\nif !uid.Ok() {\nreturn uint32(auth.OverflowUID)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"new_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"diff": "@@ -471,6 +471,8 @@ func (o *OrderedChildren) Unlink(ctx context.Context, name string, child *vfs.De\nif err := o.checkExistingLocked(name, child); err != nil {\nreturn err\n}\n+\n+ // TODO(gvisor.dev/issue/3027): Check sticky bit before removing.\no.removeLocked(name)\nreturn nil\n}\n@@ -518,6 +520,8 @@ func (o *OrderedChildren) Rename(ctx context.Context, oldname, newname string, c\nif err := o.checkExistingLocked(oldname, child); err != nil {\nreturn nil, err\n}\n+\n+ // TODO(gvisor.dev/issue/3027): Check sticky bit before removing.\nreplaced := dst.replaceChildLocked(newname, child)\nreturn replaced, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/directory.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/directory.go",
"diff": "@@ -81,6 +81,10 @@ func (dir *directory) removeChildLocked(child *dentry) {\ndir.iterMu.Unlock()\n}\n+func (dir *directory) mayDelete(creds *auth.Credentials, child *dentry) error {\n+ return vfs.CheckDeleteSticky(creds, linux.FileMode(atomic.LoadUint32(&dir.inode.mode)), auth.KUID(atomic.LoadUint32(&child.inode.uid)))\n+}\n+\ntype directoryFD struct {\nfileDescription\nvfs.DirectoryFileDescriptionDefaultImpl\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -492,6 +492,9 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\nif !ok {\nreturn syserror.ENOENT\n}\n+ if err := oldParentDir.mayDelete(rp.Credentials(), renamed); err != nil {\n+ return err\n+ }\n// Note that we don't need to call rp.CheckMount(), since if renamed is a\n// mount point then we want to rename the mount point, not anything in the\n// mounted filesystem.\n@@ -606,6 +609,9 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error\nif !ok {\nreturn syserror.ENOENT\n}\n+ if err := parentDir.mayDelete(rp.Credentials(), child); err != nil {\n+ return err\n+ }\nchildDir, ok := child.inode.impl.(*directory)\nif !ok {\nreturn syserror.ENOTDIR\n@@ -716,6 +722,9 @@ func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error\nif !ok {\nreturn syserror.ENOENT\n}\n+ if err := parentDir.mayDelete(rp.Credentials(), child); err != nil {\n+ return err\n+ }\nif child.inode.isDir() {\nreturn syserror.EISDIR\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/permissions.go",
"new_path": "pkg/sentry/vfs/permissions.go",
"diff": "@@ -230,6 +230,20 @@ func CheckSetStat(ctx context.Context, creds *auth.Credentials, stat *linux.Stat\nreturn nil\n}\n+// CheckDeleteSticky checks whether the sticky bit is set on a directory with\n+// the given file mode, and if so, checks whether creds has permission to\n+// remove a file owned by childKUID from a directory with the given mode.\n+// CheckDeleteSticky is consistent with fs/linux.h:check_sticky().\n+func CheckDeleteSticky(creds *auth.Credentials, parentMode linux.FileMode, childKUID auth.KUID) error {\n+ if parentMode&linux.ModeSticky == 0 {\n+ return nil\n+ }\n+ if CanActAsOwner(creds, childKUID) {\n+ return nil\n+ }\n+ return syserror.EPERM\n+}\n+\n// CanActAsOwner returns true if creds can act as the owner of a file with the\n// given owning UID, consistent with Linux's\n// fs/inode.c:inode_owner_or_capable().\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -942,6 +942,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:sticky_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/sticky.cc",
"new_path": "test/syscalls/linux/sticky.cc",
"diff": "@@ -40,11 +40,14 @@ namespace {\nTEST(StickyTest, StickyBitPermDenied) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));\n- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- EXPECT_THAT(chmod(dir.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n- const FileDescriptor dirfd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY));\n- ASSERT_THAT(mkdirat(dirfd.get(), \"NewDir\", 0755), SyscallSucceeds());\n+ const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(parent.path(), \"some content\", 0755));\n+ const TempPath dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirWith(parent.path(), 0755));\n+ const TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateSymlinkTo(parent.path(), file.path()));\n// Drop privileges and change IDs only in child thread, or else this parent\n// thread won't be able to open some log files after the test ends.\n@@ -62,18 +65,26 @@ TEST(StickyTest, StickyBitPermDenied) {\nsyscall(SYS_setresuid, -1, absl::GetFlag(FLAGS_scratch_uid), -1),\nSyscallSucceeds());\n- EXPECT_THAT(unlinkat(dirfd.get(), \"NewDir\", AT_REMOVEDIR),\n+ std::string new_path = NewTempAbsPath();\n+ EXPECT_THAT(rename(file.path().c_str(), new_path.c_str()),\nSyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(unlink(file.path().c_str()), SyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(unlink(link.path().c_str()), SyscallFailsWithErrno(EPERM));\n});\n}\nTEST(StickyTest, StickyBitSameUID) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));\n- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- EXPECT_THAT(chmod(dir.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n- std::string path = JoinPath(dir.path(), \"NewDir\");\n- ASSERT_THAT(mkdir(path.c_str(), 0755), SyscallSucceeds());\n+ const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(parent.path(), \"some content\", 0755));\n+ const TempPath dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirWith(parent.path(), 0755));\n+ const TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateSymlinkTo(parent.path(), file.path()));\n// Drop privileges and change IDs only in child thread, or else this parent\n// thread won't be able to open some log files after the test ends.\n@@ -89,18 +100,26 @@ TEST(StickyTest, StickyBitSameUID) {\nSyscallSucceeds());\n// We still have the same EUID.\n- EXPECT_THAT(rmdir(path.c_str()), SyscallSucceeds());\n+ std::string new_path = NewTempAbsPath();\n+ EXPECT_THAT(rename(file.path().c_str(), new_path.c_str()),\n+ SyscallSucceeds());\n+ EXPECT_THAT(unlink(new_path.c_str()), SyscallSucceeds());\n+ EXPECT_THAT(rmdir(dir.path().c_str()), SyscallSucceeds());\n+ EXPECT_THAT(unlink(link.path().c_str()), SyscallSucceeds());\n});\n}\nTEST(StickyTest, StickyBitCapFOWNER) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));\n- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- EXPECT_THAT(chmod(dir.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n- const FileDescriptor dirfd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY));\n- ASSERT_THAT(mkdirat(dirfd.get(), \"NewDir\", 0755), SyscallSucceeds());\n+ const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(parent.path(), \"some content\", 0755));\n+ const TempPath dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirWith(parent.path(), 0755));\n+ const TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateSymlinkTo(parent.path(), file.path()));\n// Drop privileges and change IDs only in child thread, or else this parent\n// thread won't be able to open some log files after the test ends.\n@@ -117,8 +136,12 @@ TEST(StickyTest, StickyBitCapFOWNER) {\nSyscallSucceeds());\nEXPECT_NO_ERRNO(SetCapability(CAP_FOWNER, true));\n- EXPECT_THAT(unlinkat(dirfd.get(), \"NewDir\", AT_REMOVEDIR),\n+ std::string new_path = NewTempAbsPath();\n+ EXPECT_THAT(rename(file.path().c_str(), new_path.c_str()),\nSyscallSucceeds());\n+ EXPECT_THAT(unlink(new_path.c_str()), SyscallSucceeds());\n+ EXPECT_THAT(rmdir(dir.path().c_str()), SyscallSucceeds());\n+ EXPECT_THAT(unlink(link.path().c_str()), SyscallSucceeds());\n});\n}\n} // namespace\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support sticky bit in vfs2.
Updates #2923.
PiperOrigin-RevId: 318648128 |
259,860 | 27.06.2020 21:32:16 | 25,200 | e8f1a5c1f652ba7abb8c4bd842d6afdcab03865a | Port GETOWN, SETOWN fcntls to vfs2.
Also make some fixes to vfs1's F_SETOWN. The fcntl test now entirely passes
on vfs2.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/fcntl.go",
"new_path": "pkg/abi/linux/fcntl.go",
"diff": "@@ -55,7 +55,7 @@ type Flock struct {\n_ [4]byte\n}\n-// Flags for F_SETOWN_EX and F_GETOWN_EX.\n+// Owner types for F_SETOWN_EX and F_GETOWN_EX.\nconst (\nF_OWNER_TID = 0\nF_OWNER_PID = 1\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fasync/BUILD",
"new_path": "pkg/sentry/kernel/fasync/BUILD",
"diff": "@@ -11,6 +11,7 @@ go_library(\n\"//pkg/sentry/fs\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n+ \"//pkg/sentry/vfs\",\n\"//pkg/sync\",\n\"//pkg/waiter\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fasync/fasync.go",
"new_path": "pkg/sentry/kernel/fasync/fasync.go",
"diff": "@@ -20,15 +20,21 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n-// New creates a new FileAsync.\n+// New creates a new fs.FileAsync.\nfunc New() fs.FileAsync {\nreturn &FileAsync{}\n}\n+// NewVFS2 creates a new vfs.FileAsync.\n+func NewVFS2() vfs.FileAsync {\n+ return &FileAsync{}\n+}\n+\n// FileAsync sends signals when the registered file is ready for IO.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -900,15 +900,21 @@ func fGetOwn(t *kernel.Task, file *fs.File) int32 {\n//\n// If who is positive, it represents a PID. If negative, it represents a PGID.\n// If the PID or PGID is invalid, the owner is silently unset.\n-func fSetOwn(t *kernel.Task, file *fs.File, who int32) {\n+func fSetOwn(t *kernel.Task, file *fs.File, who int32) error {\na := file.Async(fasync.New).(*fasync.FileAsync)\nif who < 0 {\n+ // Check for overflow before flipping the sign.\n+ if who-1 > who {\n+ return syserror.EINVAL\n+ }\npg := t.PIDNamespace().ProcessGroupWithID(kernel.ProcessGroupID(-who))\na.SetOwnerProcessGroup(t, pg)\n- }\n+ } else {\ntg := t.PIDNamespace().ThreadGroupWithID(kernel.ThreadID(who))\na.SetOwnerThreadGroup(t, tg)\n}\n+ return nil\n+}\n// Fcntl implements linux syscall fcntl(2).\nfunc Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n@@ -1042,8 +1048,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\ncase linux.F_GETOWN:\nreturn uintptr(fGetOwn(t, file)), nil, nil\ncase linux.F_SETOWN:\n- fSetOwn(t, file, args[2].Int())\n- return 0, nil, nil\n+ return 0, nil, fSetOwn(t, file, args[2].Int())\ncase linux.F_GETOWN_EX:\naddr := args[2].Pointer()\nowner := fGetOwnEx(t, file)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/BUILD",
"new_path": "pkg/sentry/syscalls/linux/vfs2/BUILD",
"diff": "@@ -54,6 +54,7 @@ go_library(\n\"//pkg/sentry/fsimpl/tmpfs\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n+ \"//pkg/sentry/kernel/fasync\",\n\"//pkg/sentry/kernel/pipe\",\n\"//pkg/sentry/kernel/time\",\n\"//pkg/sentry/limits\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/fd.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/fd.go",
"diff": "@@ -20,6 +20,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/fasync\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/pipe\"\nslinux \"gvisor.dev/gvisor/pkg/sentry/syscalls/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n@@ -154,6 +155,47 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn 0, nil, err\n}\nreturn uintptr(n), nil, nil\n+ case linux.F_GETOWN:\n+ a := file.AsyncHandler()\n+ if a == nil {\n+ return 0, nil, nil\n+ }\n+ owner := getAsyncOwner(t, a.(*fasync.FileAsync))\n+ if owner.Type == linux.F_OWNER_PGRP {\n+ return uintptr(-owner.PID), nil, nil\n+ }\n+ return uintptr(owner.PID), nil, nil\n+ case linux.F_SETOWN:\n+ who := args[2].Int()\n+ ownerType := int32(linux.F_OWNER_PID)\n+ if who < 0 {\n+ // Check for overflow before flipping the sign.\n+ if who-1 > who {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ ownerType = linux.F_OWNER_PGRP\n+ who = -who\n+ }\n+ a := file.SetAsyncHandler(fasync.NewVFS2).(*fasync.FileAsync)\n+ return 0, nil, setAsyncOwner(t, a, ownerType, who)\n+ case linux.F_GETOWN_EX:\n+ a := file.AsyncHandler()\n+ if a == nil {\n+ return 0, nil, nil\n+ }\n+ addr := args[2].Pointer()\n+ owner := getAsyncOwner(t, a.(*fasync.FileAsync))\n+ _, err := t.CopyOut(addr, &owner)\n+ return 0, nil, err\n+ case linux.F_SETOWN_EX:\n+ addr := args[2].Pointer()\n+ var owner linux.FOwnerEx\n+ n, err := t.CopyIn(addr, &owner)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ a := file.SetAsyncHandler(fasync.NewVFS2).(*fasync.FileAsync)\n+ return uintptr(n), nil, setAsyncOwner(t, a, owner.Type, owner.PID)\ncase linux.F_GETPIPE_SZ:\npipefile, ok := file.Impl().(*pipe.VFSPipeFD)\nif !ok {\n@@ -177,6 +219,57 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n}\n}\n+func getAsyncOwner(t *kernel.Task, a *fasync.FileAsync) linux.FOwnerEx {\n+ ot, otg, opg := a.Owner()\n+ switch {\n+ case ot != nil:\n+ return linux.FOwnerEx{\n+ Type: linux.F_OWNER_TID,\n+ PID: int32(t.PIDNamespace().IDOfTask(ot)),\n+ }\n+ case otg != nil:\n+ return linux.FOwnerEx{\n+ Type: linux.F_OWNER_PID,\n+ PID: int32(t.PIDNamespace().IDOfThreadGroup(otg)),\n+ }\n+ case opg != nil:\n+ return linux.FOwnerEx{\n+ Type: linux.F_OWNER_PGRP,\n+ PID: int32(t.PIDNamespace().IDOfProcessGroup(opg)),\n+ }\n+ default:\n+ return linux.FOwnerEx{}\n+ }\n+}\n+\n+func setAsyncOwner(t *kernel.Task, a *fasync.FileAsync, ownerType, pid int32) error {\n+ switch ownerType {\n+ case linux.F_OWNER_TID:\n+ task := t.PIDNamespace().TaskWithID(kernel.ThreadID(pid))\n+ if task == nil {\n+ return syserror.ESRCH\n+ }\n+ a.SetOwnerTask(t, task)\n+ return nil\n+ case linux.F_OWNER_PID:\n+ tg := t.PIDNamespace().ThreadGroupWithID(kernel.ThreadID(pid))\n+ if tg == nil {\n+ return syserror.ESRCH\n+ }\n+ a.SetOwnerThreadGroup(t, tg)\n+ return nil\n+ case linux.F_OWNER_PGRP:\n+ pg := t.PIDNamespace().ProcessGroupWithID(kernel.ProcessGroupID(pid))\n+ if pg == nil {\n+ return syserror.ESRCH\n+ }\n+ a.SetOwnerProcessGroup(t, pg)\n+ return nil\n+ default:\n+ return syserror.EINVAL\n+ }\n+}\n+\nfunc posixLock(t *kernel.Task, args arch.SyscallArguments, file *vfs.FileDescription, cmd int32) error {\n// Copy in the lock request.\nflockAddr := args[2].Pointer()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description.go",
"new_path": "pkg/sentry/vfs/file_description.go",
"diff": "@@ -42,11 +42,20 @@ type FileDescription struct {\n// operations.\nrefs int64\n+ // flagsMu protects statusFlags and asyncHandler below.\n+ flagsMu sync.Mutex\n+\n// statusFlags contains status flags, \"initialized by open(2) and possibly\n- // modified by fcntl()\" - fcntl(2). statusFlags is accessed using atomic\n- // memory operations.\n+ // modified by fcntl()\" - fcntl(2). statusFlags can be read using atomic\n+ // memory operations when it does not need to be synchronized with an\n+ // access to asyncHandler.\nstatusFlags uint32\n+ // asyncHandler handles O_ASYNC signal generation. It is set with the\n+ // F_SETOWN or F_SETOWN_EX fcntls. For asyncHandler to be used, O_ASYNC must\n+ // also be set by fcntl(2).\n+ asyncHandler FileAsync\n+\n// epolls is the set of epollInterests registered for this FileDescription.\n// epolls is protected by epollMu.\nepollMu sync.Mutex\n@@ -193,6 +202,13 @@ func (fd *FileDescription) DecRef() {\nfd.vd.mount.EndWrite()\n}\nfd.vd.DecRef()\n+ fd.flagsMu.Lock()\n+ // TODO(gvisor.dev/issue/1663): We may need to unregister during save, as we do in VFS1.\n+ if fd.statusFlags&linux.O_ASYNC != 0 && fd.asyncHandler != nil {\n+ fd.asyncHandler.Unregister(fd)\n+ }\n+ fd.asyncHandler = nil\n+ fd.flagsMu.Unlock()\n} else if refs < 0 {\npanic(\"FileDescription.DecRef() called without holding a reference\")\n}\n@@ -276,7 +292,18 @@ func (fd *FileDescription) SetStatusFlags(ctx context.Context, creds *auth.Crede\n}\n// TODO(jamieliu): FileDescriptionImpl.SetOAsync()?\nconst settableFlags = linux.O_APPEND | linux.O_ASYNC | linux.O_DIRECT | linux.O_NOATIME | linux.O_NONBLOCK\n- atomic.StoreUint32(&fd.statusFlags, (oldFlags&^settableFlags)|(flags&settableFlags))\n+ fd.flagsMu.Lock()\n+ if fd.asyncHandler != nil {\n+ // Use fd.statusFlags instead of oldFlags, which may have become outdated,\n+ // to avoid double registering/unregistering.\n+ if fd.statusFlags&linux.O_ASYNC == 0 && flags&linux.O_ASYNC != 0 {\n+ fd.asyncHandler.Register(fd)\n+ } else if fd.statusFlags&linux.O_ASYNC != 0 && flags&linux.O_ASYNC == 0 {\n+ fd.asyncHandler.Unregister(fd)\n+ }\n+ }\n+ fd.statusFlags = (oldFlags &^ settableFlags) | (flags & settableFlags)\n+ fd.flagsMu.Unlock()\nreturn nil\n}\n@@ -533,17 +560,23 @@ func (fd *FileDescription) StatFS(ctx context.Context) (linux.Statfs, error) {\nreturn fd.impl.StatFS(ctx)\n}\n-// Readiness returns fd's I/O readiness.\n+// Readiness implements waiter.Waitable.Readiness.\n+//\n+// It returns fd's I/O readiness.\nfunc (fd *FileDescription) Readiness(mask waiter.EventMask) waiter.EventMask {\nreturn fd.impl.Readiness(mask)\n}\n-// EventRegister registers e for I/O readiness events in mask.\n+// EventRegister implements waiter.Waitable.EventRegister.\n+//\n+// It registers e for I/O readiness events in mask.\nfunc (fd *FileDescription) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\nfd.impl.EventRegister(e, mask)\n}\n-// EventUnregister unregisters e for I/O readiness events.\n+// EventUnregister implements waiter.Waitable.EventUnregister.\n+//\n+// It unregisters e for I/O readiness events.\nfunc (fd *FileDescription) EventUnregister(e *waiter.Entry) {\nfd.impl.EventUnregister(e)\n}\n@@ -770,3 +803,32 @@ func (fd *FileDescription) LockPOSIX(ctx context.Context, uid lock.UniqueID, t l\nfunc (fd *FileDescription) UnlockPOSIX(ctx context.Context, uid lock.UniqueID, start, end uint64, whence int16) error {\nreturn fd.impl.UnlockPOSIX(ctx, uid, start, end, whence)\n}\n+\n+// A FileAsync sends signals to its owner when w is ready for IO. This is only\n+// implemented by pkg/sentry/fasync:FileAsync, but we unfortunately need this\n+// interface to avoid circular dependencies.\n+type FileAsync interface {\n+ Register(w waiter.Waitable)\n+ Unregister(w waiter.Waitable)\n+}\n+\n+// AsyncHandler returns the FileAsync for fd.\n+func (fd *FileDescription) AsyncHandler() FileAsync {\n+ fd.flagsMu.Lock()\n+ defer fd.flagsMu.Unlock()\n+ return fd.asyncHandler\n+}\n+\n+// SetAsyncHandler sets fd.asyncHandler if it has not been set before and\n+// returns it.\n+func (fd *FileDescription) SetAsyncHandler(newHandler func() FileAsync) FileAsync {\n+ fd.flagsMu.Lock()\n+ defer fd.flagsMu.Unlock()\n+ if fd.asyncHandler == nil {\n+ fd.asyncHandler = newHandler()\n+ if fd.statusFlags&linux.O_ASYNC != 0 {\n+ fd.asyncHandler.Register(fd)\n+ }\n+ }\n+ return fd.asyncHandler\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -805,6 +805,7 @@ cc_binary(\n\"//test/util:save_util\",\n\"//test/util:temp_path\",\n\"//test/util:test_util\",\n+ \"//test/util:thread_util\",\n\"//test/util:timer_util\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/fcntl.cc",
"new_path": "test/syscalls/linux/fcntl.cc",
"diff": "#include \"test/util/save_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n+#include \"test/util/thread_util.h\"\n#include \"test/util/timer_util.h\"\nABSL_FLAG(std::string, child_setlock_on, \"\",\n@@ -953,15 +954,18 @@ TEST(FcntlTest, DupAfterO_ASYNC) {\nEXPECT_EQ(after & O_ASYNC, O_ASYNC);\n}\n-TEST(FcntlTest, GetOwn) {\n+TEST(FcntlTest, GetOwnNone) {\nFileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n- EXPECT_EQ(syscall(__NR_fcntl, s.get(), F_GETOWN), 0);\n+ // Use the raw syscall because the glibc wrapper may convert F_{GET,SET}OWN\n+ // into F_{GET,SET}OWN_EX.\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(0));\nMaybeSave();\n}\n-TEST(FcntlTest, GetOwnEx) {\n+TEST(FcntlTest, GetOwnExNone) {\nFileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n@@ -970,6 +974,70 @@ TEST(FcntlTest, GetOwnEx) {\nSyscallSucceedsWithValue(0));\n}\n+TEST(FcntlTest, SetOwnInvalidPid) {\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, 12345678),\n+ SyscallFailsWithErrno(ESRCH));\n+}\n+\n+TEST(FcntlTest, SetOwnInvalidPgrp) {\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, -12345678),\n+ SyscallFailsWithErrno(ESRCH));\n+}\n+\n+TEST(FcntlTest, SetOwnPid) {\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ pid_t pid;\n+ EXPECT_THAT(pid = getpid(), SyscallSucceeds());\n+\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, pid), SyscallSucceeds());\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(pid));\n+ MaybeSave();\n+}\n+\n+TEST(FcntlTest, SetOwnPgrp) {\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ pid_t pgid;\n+ EXPECT_THAT(pgid = getpgrp(), SyscallSucceeds());\n+\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, -pgid), SyscallSucceeds());\n+\n+ // Verify with F_GETOWN_EX; using F_GETOWN on Linux may incorrectly treat the\n+ // negative return value as an error, converting the return value to -1 and\n+ // setting errno accordingly.\n+ f_owner_ex got_owner = {};\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &got_owner),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(got_owner.type, F_OWNER_PGRP);\n+ EXPECT_EQ(got_owner.pid, pgid);\n+ MaybeSave();\n+}\n+\n+// F_SETOWN flips the sign of negative values, an operation that is guarded\n+// against overflow.\n+TEST(FcntlTest, SetOwnOverflow) {\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, INT_MIN),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\nTEST(FcntlTest, SetOwnExInvalidType) {\nFileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n@@ -1027,7 +1095,8 @@ TEST(FcntlTest, SetOwnExTid) {\nASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),\nSyscallSucceeds());\n- EXPECT_EQ(syscall(__NR_fcntl, s.get(), F_GETOWN), owner.pid);\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(owner.pid));\nMaybeSave();\n}\n@@ -1042,7 +1111,8 @@ TEST(FcntlTest, SetOwnExPid) {\nASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),\nSyscallSucceeds());\n- EXPECT_EQ(syscall(__NR_fcntl, s.get(), F_GETOWN), owner.pid);\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(owner.pid));\nMaybeSave();\n}\n@@ -1050,18 +1120,21 @@ TEST(FcntlTest, SetOwnExPgrp) {\nFileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n- f_owner_ex owner = {};\n- owner.type = F_OWNER_PGRP;\n- EXPECT_THAT(owner.pid = getpgrp(), SyscallSucceeds());\n+ f_owner_ex set_owner = {};\n+ set_owner.type = F_OWNER_PGRP;\n+ EXPECT_THAT(set_owner.pid = getpgrp(), SyscallSucceeds());\n- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &set_owner),\nSyscallSucceeds());\n- // NOTE(igudger): I don't understand why, but this is flaky on Linux.\n- // GetOwnExPgrp (below) does not have this issue.\n- SKIP_IF(!IsRunningOnGvisor());\n-\n- EXPECT_EQ(syscall(__NR_fcntl, s.get(), F_GETOWN), -owner.pid);\n+ // Verify with F_GETOWN_EX; using F_GETOWN on Linux may incorrectly treat the\n+ // negative return value as an error, converting the return value to -1 and\n+ // setting errno accordingly.\n+ f_owner_ex got_owner = {};\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &got_owner),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(got_owner.type, set_owner.type);\n+ EXPECT_EQ(got_owner.pid, set_owner.pid);\nMaybeSave();\n}\n@@ -1119,6 +1192,45 @@ TEST(FcntlTest, GetOwnExPgrp) {\nEXPECT_EQ(got_owner.pid, set_owner.pid);\n}\n+// Make sure that making multiple concurrent changes to async signal generation\n+// does not cause any race issues.\n+TEST(FcntlTest, SetFlSetOwnDoNotRace) {\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ pid_t pid;\n+ EXPECT_THAT(pid = getpid(), SyscallSucceeds());\n+\n+ constexpr absl::Duration runtime = absl::Milliseconds(300);\n+ auto setAsync = [&s, &runtime] {\n+ for (auto start = absl::Now(); absl::Now() - start < runtime;) {\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETFL, O_ASYNC),\n+ SyscallSucceeds());\n+ sched_yield();\n+ }\n+ };\n+ auto resetAsync = [&s, &runtime] {\n+ for (auto start = absl::Now(); absl::Now() - start < runtime;) {\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETFL, 0), SyscallSucceeds());\n+ sched_yield();\n+ }\n+ };\n+ auto setOwn = [&s, &pid, &runtime] {\n+ for (auto start = absl::Now(); absl::Now() - start < runtime;) {\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, pid),\n+ SyscallSucceeds());\n+ sched_yield();\n+ }\n+ };\n+\n+ std::list<ScopedThread> threads;\n+ for (int i = 0; i < 10; i++) {\n+ threads.emplace_back(setAsync);\n+ threads.emplace_back(resetAsync);\n+ threads.emplace_back(setOwn);\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Port GETOWN, SETOWN fcntls to vfs2.
Also make some fixes to vfs1's F_SETOWN. The fcntl test now entirely passes
on vfs2.
Fixes #2920.
PiperOrigin-RevId: 318669529 |
259,983 | 30.06.2020 09:03:14 | 25,200 | 09b7791e89d9d487d03cf03cf2ba2d0fb1c9386a | Add missing ICRNL flag in master termios test
Closes | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/pty.cc",
"new_path": "test/syscalls/linux/pty.cc",
"diff": "@@ -634,6 +634,11 @@ TEST_F(PtyTest, TermiosAffectsSlave) {\n// Verify this by setting ICRNL (which rewrites input \\r to \\n) and verify that\n// it has no effect on the master.\nTEST_F(PtyTest, MasterTermiosUnchangable) {\n+ struct kernel_termios master_termios = {};\n+ EXPECT_THAT(ioctl(master_.get(), TCGETS, &master_termios), SyscallSucceeds());\n+ master_termios.c_lflag |= ICRNL;\n+ EXPECT_THAT(ioctl(master_.get(), TCSETS, &master_termios), SyscallSucceeds());\n+\nchar c = '\\r';\nASSERT_THAT(WriteFd(slave_.get(), &c, 1), SyscallSucceedsWithValue(1));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add missing ICRNL flag in master termios test
Closes #2768 |
259,860 | 30.06.2020 15:28:06 | 25,200 | 44071cc7fae8a7143b0cd386b402375aafecd979 | Remove struct packing to fix compiler warning.
Waddress-of-packed-member warns on inet_aton() being used with a packed struct
member. This was added in cl/291990716. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_netlink_route.cc",
"new_path": "test/syscalls/linux/socket_netlink_route.cc",
"diff": "@@ -595,7 +595,7 @@ TEST(NetlinkRouteTest, GetRouteRequest) {\nASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));\nuint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));\n- struct __attribute__((__packed__)) request {\n+ struct request {\nstruct nlmsghdr hdr;\nstruct rtmsg rtm;\nstruct nlattr nla;\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove struct packing to fix compiler warning.
-Waddress-of-packed-member warns on inet_aton() being used with a packed struct
member. This was added in cl/291990716.
PiperOrigin-RevId: 319111253 |
259,962 | 30.06.2020 19:00:13 | 25,200 | c4bdd0118f5dc9090979697e31a18731968b4066 | Add missing newline in /sys/devices/systen/cpu/onine | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/sys/sys.go",
"new_path": "pkg/sentry/fsimpl/sys/sys.go",
"diff": "@@ -138,7 +138,7 @@ type cpuFile struct {\n// Generate implements vfs.DynamicBytesSource.Generate.\nfunc (c *cpuFile) Generate(ctx context.Context, buf *bytes.Buffer) error {\n- fmt.Fprintf(buf, \"0-%d\", c.maxCores-1)\n+ fmt.Fprintf(buf, \"0-%d\\n\", c.maxCores-1)\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/sys/sys_test.go",
"new_path": "pkg/sentry/fsimpl/sys/sys_test.go",
"diff": "@@ -51,7 +51,7 @@ func TestReadCPUFile(t *testing.T) {\nk := kernel.KernelFromContext(s.Ctx)\nmaxCPUCores := k.ApplicationCores()\n- expected := fmt.Sprintf(\"0-%d\", maxCPUCores-1)\n+ expected := fmt.Sprintf(\"0-%d\\n\", maxCPUCores-1)\nfor _, fname := range []string{\"online\", \"possible\", \"present\"} {\npop := s.PathOpAtRoot(fmt.Sprintf(\"devices/system/cpu/%s\", fname))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add missing newline in /sys/devices/systen/cpu/onine
PiperOrigin-RevId: 319143410 |
259,860 | 30.06.2020 20:47:02 | 25,200 | 20d571b0c181023cc02521ad746a2b6d91e6794d | Allow O_DIRECT on vfs2 tmpfs files.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -407,7 +407,7 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open\ncase *regularFile:\nvar fd regularFileFD\nfd.LockFD.Init(&d.inode.locks)\n- if err := fd.vfsfd.Init(&fd, opts.Flags, rp.Mount(), &d.vfsd, &vfs.FileDescriptionOptions{}); err != nil {\n+ if err := fd.vfsfd.Init(&fd, opts.Flags, rp.Mount(), &d.vfsd, &vfs.FileDescriptionOptions{AllowDirectIO: true}); err != nil {\nreturn nil, err\n}\nif opts.Flags&linux.O_TRUNC != 0 {\n@@ -423,7 +423,7 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open\n}\nvar fd directoryFD\nfd.LockFD.Init(&d.inode.locks)\n- if err := fd.vfsfd.Init(&fd, opts.Flags, rp.Mount(), &d.vfsd, &vfs.FileDescriptionOptions{}); err != nil {\n+ if err := fd.vfsfd.Init(&fd, opts.Flags, rp.Mount(), &d.vfsd, &vfs.FileDescriptionOptions{AllowDirectIO: true}); err != nil {\nreturn nil, err\n}\nreturn &fd.vfsfd, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description.go",
"new_path": "pkg/sentry/vfs/file_description.go",
"diff": "@@ -91,8 +91,7 @@ type FileDescription struct {\n// FileDescriptionOptions contains options to FileDescription.Init().\ntype FileDescriptionOptions struct {\n- // If AllowDirectIO is true, allow O_DIRECT to be set on the file. This is\n- // usually only the case if O_DIRECT would actually have an effect.\n+ // If AllowDirectIO is true, allow O_DIRECT to be set on the file.\nAllowDirectIO bool\n// If DenyPRead is true, calls to FileDescription.PRead() return ESPIPE.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow O_DIRECT on vfs2 tmpfs files.
Updates #2923.
PiperOrigin-RevId: 319153792 |
259,860 | 30.06.2020 20:59:32 | 25,200 | 43f5dd95a1c58a6e3260c31093bfc3f97885f4b0 | Fix index calculation for /proc/[pid]/cmdline.
We were truncating buf using a index relative to the middle of the slice (i.e.
where envv begins), but we need to calculate the index relative to the entire
slice.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task_files.go",
"new_path": "pkg/sentry/fsimpl/proc/task_files.go",
"diff": "@@ -231,8 +231,9 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n// Linux will return envp up to and including the first NULL character,\n// so find it.\n- if end := bytes.IndexByte(buf.Bytes()[ar.Length():], 0); end != -1 {\n- buf.Truncate(end)\n+ envStart := int(ar.Length())\n+ if nullIdx := bytes.IndexByte(buf.Bytes()[envStart:], 0); nullIdx != -1 {\n+ buf.Truncate(envStart + nullIdx)\n}\n}\n@@ -300,7 +301,7 @@ type idMapData struct {\nvar _ dynamicInode = (*idMapData)(nil)\n-// Generate implements vfs.DynamicBytesSource.Generate.\n+// Generate implements vfs.WritableDynamicBytesSource.Generate.\nfunc (d *idMapData) Generate(ctx context.Context, buf *bytes.Buffer) error {\nvar entries []auth.IDMapEntry\nif d.gids {\n@@ -314,6 +315,7 @@ func (d *idMapData) Generate(ctx context.Context, buf *bytes.Buffer) error {\nreturn nil\n}\n+// Write implements vfs.WritableDynamicBytesSource.Write.\nfunc (d *idMapData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {\n// \"In addition, the number of bytes written to the file must be less than\n// the system page size, and the write must be performed at the start of\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix index calculation for /proc/[pid]/cmdline.
We were truncating buf using a index relative to the middle of the slice (i.e.
where envv begins), but we need to calculate the index relative to the entire
slice.
Updates #2923.
PiperOrigin-RevId: 319154950 |
259,962 | 01.07.2020 06:57:38 | 25,200 | b8f165ab1cb4d4345d8a25f95a5bc0fece2d49c7 | Fix HTTPD benchmarks to correctly serve files from /tmp.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "benchmarks/workloads/httpd/BUILD",
"new_path": "benchmarks/workloads/httpd/BUILD",
"diff": "@@ -9,5 +9,6 @@ pkg_tar(\nname = \"tar\",\nsrcs = [\n\"Dockerfile\",\n+ \"apache2-tmpdir.conf\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmarks/workloads/httpd/Dockerfile",
"new_path": "benchmarks/workloads/httpd/Dockerfile",
"diff": "@@ -6,16 +6,16 @@ RUN set -x \\\napache2 \\\n&& rm -rf /var/lib/apt/lists/*\n-# Link the htdoc directory to tmp.\n-RUN mkdir -p /usr/local/apache2/htdocs && \\\n- cd /usr/local/apache2 && ln -s /tmp htdocs\n-\n# Generate a bunch of relevant files.\nRUN mkdir -p /local && \\\nfor size in 1 10 100 1000 1024 10240; do \\\ndd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \\\ndone\n+# Rewrite DocumentRoot to point to /tmp/html instead of the default path.\n+RUN sed -i 's/DocumentRoot.*\\/var\\/www\\/html$/DocumentRoot \\/tmp\\/html/' /etc/apache2/sites-enabled/000-default.conf\n+COPY ./apache2-tmpdir.conf /etc/apache2/sites-enabled/apache2-tmpdir.conf\n+\n# Standard settings.\nENV APACHE_RUN_DIR /tmp\nENV APACHE_RUN_USER nobody\n@@ -24,4 +24,4 @@ ENV APACHE_LOG_DIR /tmp\nENV APACHE_PID_FILE /tmp/apache.pid\n# Copy on start-up; serve everything from /tmp (including the configuration).\n-CMD [\"sh\", \"-c\", \"cp -a /local/* /tmp && apache2 -c \\\"ServerName localhost\\\" -c \\\"DocumentRoot /tmp\\\" -X\"]\n+CMD [\"sh\", \"-c\", \"mkdir -p /tmp/html && cp -a /local/* /tmp/html && apache2 -X\"]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "benchmarks/workloads/httpd/apache2-tmpdir.conf",
"diff": "+<Directory /tmp/html/>\n+ Options Indexes FollowSymLinks\n+ AllowOverride None\n+ Require all granted\n+</Directory>\n\\ No newline at end of file\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix HTTPD benchmarks to correctly serve files from /tmp.
Fixes #3113
PiperOrigin-RevId: 319216671 |
259,860 | 01.07.2020 08:40:31 | 25,200 | cda2979b63fad37a33706f8aa430664a9c4d0b3b | Complete async signal delivery support in vfs2.
Support FIOASYNC, FIO{SET,GET}OWN, SIOC{G,S}PGRP (refactor getting/setting
owner in the process).
Unset signal recipient when setting owner with pid == 0 and
valid owner type.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fasync/fasync.go",
"new_path": "pkg/sentry/kernel/fasync/fasync.go",
"diff": "@@ -176,3 +176,13 @@ func (a *FileAsync) SetOwnerProcessGroup(requester *kernel.Task, recipient *kern\na.recipientTG = nil\na.recipientPG = recipient\n}\n+\n+// ClearOwner unsets the current signal recipient.\n+func (a *FileAsync) ClearOwner() {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+ a.requester = nil\n+ a.recipientT = nil\n+ a.recipientTG = nil\n+ a.recipientPG = nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/fd.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/fd.go",
"diff": "@@ -156,11 +156,10 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n}\nreturn uintptr(n), nil, nil\ncase linux.F_GETOWN:\n- a := file.AsyncHandler()\n- if a == nil {\n+ owner, hasOwner := getAsyncOwner(t, file)\n+ if !hasOwner {\nreturn 0, nil, nil\n}\n- owner := getAsyncOwner(t, a.(*fasync.FileAsync))\nif owner.Type == linux.F_OWNER_PGRP {\nreturn uintptr(-owner.PID), nil, nil\n}\n@@ -176,26 +175,21 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nownerType = linux.F_OWNER_PGRP\nwho = -who\n}\n- a := file.SetAsyncHandler(fasync.NewVFS2).(*fasync.FileAsync)\n- return 0, nil, setAsyncOwner(t, a, ownerType, who)\n+ return 0, nil, setAsyncOwner(t, file, ownerType, who)\ncase linux.F_GETOWN_EX:\n- a := file.AsyncHandler()\n- if a == nil {\n+ owner, hasOwner := getAsyncOwner(t, file)\n+ if !hasOwner {\nreturn 0, nil, nil\n}\n- addr := args[2].Pointer()\n- owner := getAsyncOwner(t, a.(*fasync.FileAsync))\n- _, err := t.CopyOut(addr, &owner)\n+ _, err := t.CopyOut(args[2].Pointer(), &owner)\nreturn 0, nil, err\ncase linux.F_SETOWN_EX:\n- addr := args[2].Pointer()\nvar owner linux.FOwnerEx\n- n, err := t.CopyIn(addr, &owner)\n+ n, err := t.CopyIn(args[2].Pointer(), &owner)\nif err != nil {\nreturn 0, nil, err\n}\n- a := file.SetAsyncHandler(fasync.NewVFS2).(*fasync.FileAsync)\n- return uintptr(n), nil, setAsyncOwner(t, a, owner.Type, owner.PID)\n+ return uintptr(n), nil, setAsyncOwner(t, file, owner.Type, owner.PID)\ncase linux.F_GETPIPE_SZ:\npipefile, ok := file.Impl().(*pipe.VFSPipeFD)\nif !ok {\n@@ -219,30 +213,48 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n}\n}\n-func getAsyncOwner(t *kernel.Task, a *fasync.FileAsync) linux.FOwnerEx {\n- ot, otg, opg := a.Owner()\n+func getAsyncOwner(t *kernel.Task, fd *vfs.FileDescription) (ownerEx linux.FOwnerEx, hasOwner bool) {\n+ a := fd.AsyncHandler()\n+ if a == nil {\n+ return linux.FOwnerEx{}, false\n+ }\n+\n+ ot, otg, opg := a.(*fasync.FileAsync).Owner()\nswitch {\ncase ot != nil:\nreturn linux.FOwnerEx{\nType: linux.F_OWNER_TID,\nPID: int32(t.PIDNamespace().IDOfTask(ot)),\n- }\n+ }, true\ncase otg != nil:\nreturn linux.FOwnerEx{\nType: linux.F_OWNER_PID,\nPID: int32(t.PIDNamespace().IDOfThreadGroup(otg)),\n- }\n+ }, true\ncase opg != nil:\nreturn linux.FOwnerEx{\nType: linux.F_OWNER_PGRP,\nPID: int32(t.PIDNamespace().IDOfProcessGroup(opg)),\n+ }, true\n+ default:\n+ return linux.FOwnerEx{}, true\n+ }\n}\n+\n+func setAsyncOwner(t *kernel.Task, fd *vfs.FileDescription, ownerType, pid int32) error {\n+ switch ownerType {\n+ case linux.F_OWNER_TID, linux.F_OWNER_PID, linux.F_OWNER_PGRP:\n+ // Acceptable type.\ndefault:\n- return linux.FOwnerEx{}\n+ return syserror.EINVAL\n}\n+\n+ a := fd.SetAsyncHandler(fasync.NewVFS2).(*fasync.FileAsync)\n+ if pid == 0 {\n+ a.ClearOwner()\n+ return nil\n}\n-func setAsyncOwner(t *kernel.Task, a *fasync.FileAsync, ownerType, pid int32) error {\nswitch ownerType {\ncase linux.F_OWNER_TID:\ntask := t.PIDNamespace().TaskWithID(kernel.ThreadID(pid))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/ioctl.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/ioctl.go",
"diff": "@@ -57,6 +57,49 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nflags &^= linux.O_NONBLOCK\n}\nreturn 0, nil, file.SetStatusFlags(t, t.Credentials(), flags)\n+\n+ case linux.FIOASYNC:\n+ var set int32\n+ if _, err := t.CopyIn(args[2].Pointer(), &set); err != nil {\n+ return 0, nil, err\n+ }\n+ flags := file.StatusFlags()\n+ if set != 0 {\n+ flags |= linux.O_ASYNC\n+ } else {\n+ flags &^= linux.O_ASYNC\n+ }\n+ file.SetStatusFlags(t, t.Credentials(), flags)\n+ return 0, nil, nil\n+\n+ case linux.FIOGETOWN, linux.SIOCGPGRP:\n+ var who int32\n+ owner, hasOwner := getAsyncOwner(t, file)\n+ if hasOwner {\n+ if owner.Type == linux.F_OWNER_PGRP {\n+ who = -owner.PID\n+ } else {\n+ who = owner.PID\n+ }\n+ }\n+ _, err := t.CopyOut(args[2].Pointer(), &who)\n+ return 0, nil, err\n+\n+ case linux.FIOSETOWN, linux.SIOCSPGRP:\n+ var who int32\n+ if _, err := t.CopyIn(args[2].Pointer(), &who); err != nil {\n+ return 0, nil, err\n+ }\n+ ownerType := int32(linux.F_OWNER_PID)\n+ if who < 0 {\n+ // Check for overflow before flipping the sign.\n+ if who-1 > who {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ ownerType = linux.F_OWNER_PGRP\n+ who = -who\n+ }\n+ return 0, nil, setAsyncOwner(t, file, ownerType, who)\n}\nret, err := file.Ioctl(t, t.MemoryManager(), args)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -288,6 +288,7 @@ syscall_test(\nsize = \"medium\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:ioctl_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/fcntl.cc",
"new_path": "test/syscalls/linux/fcntl.cc",
"diff": "@@ -1031,6 +1031,30 @@ TEST(FcntlTest, SetOwnPgrp) {\nMaybeSave();\n}\n+TEST(FcntlTest, SetOwnUnset) {\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ // Set and unset pid.\n+ pid_t pid;\n+ EXPECT_THAT(pid = getpid(), SyscallSucceeds());\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, pid), SyscallSucceeds());\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, 0), SyscallSucceeds());\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(0));\n+\n+ // Set and unset pgid.\n+ pid_t pgid;\n+ EXPECT_THAT(pgid = getpgrp(), SyscallSucceeds());\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, -pgid), SyscallSucceeds());\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, 0), SyscallSucceeds());\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(0));\n+ MaybeSave();\n+}\n+\n// F_SETOWN flips the sign of negative values, an operation that is guarded\n// against overflow.\nTEST(FcntlTest, SetOwnOverflow) {\n@@ -1141,6 +1165,39 @@ TEST(FcntlTest, SetOwnExPgrp) {\nMaybeSave();\n}\n+TEST(FcntlTest, SetOwnExUnset) {\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n+\n+ // Set and unset pid.\n+ f_owner_ex owner = {};\n+ owner.type = F_OWNER_PID;\n+ EXPECT_THAT(owner.pid = getpid(), SyscallSucceeds());\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),\n+ SyscallSucceeds());\n+ owner.pid = 0;\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),\n+ SyscallSucceeds());\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(0));\n+\n+ // Set and unset pgid.\n+ owner.type = F_OWNER_PGRP;\n+ EXPECT_THAT(owner.pid = getpgrp(), SyscallSucceeds());\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),\n+ SyscallSucceeds());\n+ owner.pid = 0;\n+ ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),\n+ SyscallSucceeds());\n+\n+ EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),\n+ SyscallSucceedsWithValue(0));\n+ MaybeSave();\n+}\n+\nTEST(FcntlTest, GetOwnExTid) {\nFileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Complete async signal delivery support in vfs2.
- Support FIOASYNC, FIO{SET,GET}OWN, SIOC{G,S}PGRP (refactor getting/setting
owner in the process).
- Unset signal recipient when setting owner with pid == 0 and
valid owner type.
Updates #2923.
PiperOrigin-RevId: 319231420 |
259,881 | 01.07.2020 08:51:57 | 25,200 | 068716ddf36f4dcb3d88e92b90774dcba2fe4db8 | Fix FAQ URL
The existing gvisor.dev/faq link returns 404 because the full URL has
mistakenly been capitalized. | [
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/BUILD",
"new_path": "g3doc/user_guide/BUILD",
"diff": "@@ -33,7 +33,7 @@ doc(\nname = \"FAQ\",\nsrc = \"FAQ.md\",\ncategory = \"User Guide\",\n- permalink = \"/docs/user_guide/FAQ/\",\n+ permalink = \"/docs/user_guide/faq/\",\nweight = \"90\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "website/_includes/footer-links.html",
"new_path": "website/_includes/footer-links.html",
"diff": "<ul class=\"list-unstyled\">\n<li><a href=\"https://github.com/google/gvisor/issues\">Issues</a></li>\n<li><a href=\"/docs\">Documentation</a></li>\n- <li><a href=\"/docs/user_guide/FAQ\">FAQ</a></li>\n+ <li><a href=\"/docs/user_guide/faq\">FAQ</a></li>\n</ul>\n</div>\n<div class=\"col-sm-3 col-md-2\">\n"
},
{
"change_type": "MODIFY",
"old_path": "website/cmd/server/main.go",
"new_path": "website/cmd/server/main.go",
"diff": "@@ -35,6 +35,10 @@ var redirects = map[string]string{\n// For links.\n\"/faq\": \"/docs/user_guide/faq/\",\n+ // From 2020-05-12 to 2020-06-30, the FAQ URL was uppercase. Redirect that\n+ // back to maintain any links.\n+ \"/docs/user_guide/FAQ/\": \"/docs/user_guide/faq/\",\n+\n// Redirects to compatibility docs.\n\"/c\": \"/docs/user_guide/compatibility/\",\n\"/c/linux/amd64\": \"/docs/user_guide/compatibility/linux/amd64/\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix FAQ URL
The existing gvisor.dev/faq link returns 404 because the full URL has
mistakenly been capitalized.
PiperOrigin-RevId: 319233173 |
259,975 | 01.07.2020 13:13:04 | 25,200 | 6a90c88b97481a6d81b05f09d5c8ed7158225dd5 | Port fallocate to VFS2. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/p9.go",
"new_path": "pkg/p9/p9.go",
"diff": "@@ -1091,6 +1091,19 @@ type AllocateMode struct {\nUnshare bool\n}\n+// ToAllocateMode returns an AllocateMode from a fallocate(2) mode.\n+func ToAllocateMode(mode uint64) AllocateMode {\n+ return AllocateMode{\n+ KeepSize: mode&unix.FALLOC_FL_KEEP_SIZE != 0,\n+ PunchHole: mode&unix.FALLOC_FL_PUNCH_HOLE != 0,\n+ NoHideStale: mode&unix.FALLOC_FL_NO_HIDE_STALE != 0,\n+ CollapseRange: mode&unix.FALLOC_FL_COLLAPSE_RANGE != 0,\n+ ZeroRange: mode&unix.FALLOC_FL_ZERO_RANGE != 0,\n+ InsertRange: mode&unix.FALLOC_FL_INSERT_RANGE != 0,\n+ Unshare: mode&unix.FALLOC_FL_UNSHARE_RANGE != 0,\n+ }\n+}\n+\n// ToLinux converts to a value compatible with fallocate(2)'s mode.\nfunc (a *AllocateMode) ToLinux() uint32 {\nrv := uint32(0)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"diff": "@@ -24,6 +24,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/pkg/p9\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n\"gvisor.dev/gvisor/pkg/sentry/memmap\"\n@@ -67,6 +68,34 @@ func (fd *regularFileFD) OnClose(ctx context.Context) error {\nreturn d.handle.file.flush(ctx)\n}\n+// Allocate implements vfs.FileDescriptionImpl.Allocate.\n+func (fd *regularFileFD) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+\n+ d := fd.dentry()\n+ d.metadataMu.Lock()\n+ defer d.metadataMu.Unlock()\n+\n+ size := offset + length\n+\n+ // Allocating a smaller size is a noop.\n+ if size <= d.size {\n+ return nil\n+ }\n+\n+ d.handleMu.Lock()\n+ defer d.handleMu.Unlock()\n+\n+ err := d.handle.file.allocate(ctx, p9.ToAllocateMode(mode), offset, length)\n+ if err != nil {\n+ return err\n+ }\n+ d.size = size\n+ if !d.cachedMetadataAuthoritative() {\n+ d.touchCMtimeLocked()\n+ }\n+ return nil\n+}\n+\n// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\nif offset < 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/host/host.go",
"new_path": "pkg/sentry/fsimpl/host/host.go",
"diff": "@@ -543,6 +543,16 @@ func (f *fileDescription) Release() {\n// noop\n}\n+// Allocate implements vfs.FileDescriptionImpl.\n+func (f *fileDescription) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ if !f.inode.seekable {\n+ return syserror.ESPIPE\n+ }\n+\n+ // TODO(gvisor.dev/issue/2923): Implement Allocate for non-pipe hostfds.\n+ return syserror.EOPNOTSUPP\n+}\n+\n// PRead implements FileDescriptionImpl.\nfunc (f *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\ni := f.inode\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/fd_impl_util.go",
"new_path": "pkg/sentry/fsimpl/kernfs/fd_impl_util.go",
"diff": "@@ -236,6 +236,11 @@ func (fd *GenericDirectoryFD) SetStat(ctx context.Context, opts vfs.SetStatOptio\nreturn inode.SetStat(ctx, fd.filesystem(), creds, opts)\n}\n+// Allocate implements vfs.FileDescriptionImpl.Allocate.\n+func (fd *GenericDirectoryFD) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ return fd.DirectoryFileDescriptionDefaultImpl.Allocate(ctx, mode, offset, length)\n+}\n+\n// LockPOSIX implements vfs.FileDescriptionImpl.LockPOSIX.\nfunc (fd *GenericDirectoryFD) LockPOSIX(ctx context.Context, uid fslock.UniqueID, t fslock.LockType, start, length uint64, whence int16, block fslock.Blocker) error {\nreturn fd.Locks().LockPOSIX(ctx, &fd.vfsfd, uid, t, start, length, whence, block)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go",
"diff": "@@ -274,6 +274,21 @@ func (fd *regularFileFD) Release() {\n// noop\n}\n+// Allocate implements vfs.FileDescriptionImpl.Allocate.\n+func (fd *regularFileFD) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ f := fd.inode().impl.(*regularFile)\n+\n+ f.inode.mu.Lock()\n+ defer f.inode.mu.Unlock()\n+ oldSize := f.size\n+ size := offset + length\n+ if oldSize >= size {\n+ return nil\n+ }\n+ _, err := f.truncateLocked(size)\n+ return err\n+}\n+\n// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\nif offset < 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/vfs.go",
"new_path": "pkg/sentry/kernel/pipe/vfs.go",
"diff": "@@ -200,6 +200,11 @@ func (fd *VFSPipeFD) Readiness(mask waiter.EventMask) waiter.EventMask {\n}\n}\n+// Allocate implements vfs.FileDescriptionImpl.Allocate.\n+func (fd *VFSPipeFD) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ return syserror.ESPIPE\n+}\n+\n// EventRegister implements waiter.Waitable.EventRegister.\nfunc (fd *VFSPipeFD) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\nfd.pipe.EventRegister(e, mask)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket_vfs2.go",
"new_path": "pkg/sentry/socket/hostinet/socket_vfs2.go",
"diff": "@@ -96,7 +96,12 @@ func (s *socketVFS2) Ioctl(ctx context.Context, uio usermem.IO, args arch.Syscal\nreturn ioctl(ctx, s.fd, uio, args)\n}\n-// PRead implements vfs.FileDescriptionImpl.\n+// Allocate implements vfs.FileDescriptionImpl.Allocate.\n+func (s *socketVFS2) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ return syserror.ENODEV\n+}\n+\n+// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (s *socketVFS2) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\nreturn 0, syserror.ESPIPE\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/filesystem.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/filesystem.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.dev/gvisor/pkg/sentry/limits\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n@@ -239,6 +240,55 @@ func renameat(t *kernel.Task, olddirfd int32, oldpathAddr usermem.Addr, newdirfd\n})\n}\n+// Fallocate implements linux system call fallocate(2).\n+func Fallocate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ fd := args[0].Int()\n+ mode := args[1].Uint64()\n+ offset := args[2].Int64()\n+ length := args[3].Int64()\n+\n+ file := t.GetFileVFS2(fd)\n+\n+ if file == nil {\n+ return 0, nil, syserror.EBADF\n+ }\n+ defer file.DecRef()\n+\n+ if !file.IsWritable() {\n+ return 0, nil, syserror.EBADF\n+ }\n+\n+ if mode != 0 {\n+ return 0, nil, syserror.ENOTSUP\n+ }\n+\n+ if offset < 0 || length <= 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+\n+ size := offset + length\n+\n+ if size < 0 {\n+ return 0, nil, syserror.EFBIG\n+ }\n+\n+ limit := limits.FromContext(t).Get(limits.FileSize).Cur\n+\n+ if uint64(size) >= limit {\n+ t.SendSignal(&arch.SignalInfo{\n+ Signo: int32(linux.SIGXFSZ),\n+ Code: arch.SignalInfoUser,\n+ })\n+ return 0, nil, syserror.EFBIG\n+ }\n+\n+ return 0, nil, file.Impl().Allocate(t, mode, uint64(offset), uint64(length))\n+\n+ // File length modified, generate notification.\n+ // TODO(gvisor.dev/issue/1479): Reenable when Inotify is ported.\n+ // file.Dirent.InotifyEvent(linux.IN_MODIFY, 0)\n+}\n+\n// Rmdir implements Linux syscall rmdir(2).\nfunc Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\npathAddr := args[0].Pointer()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go",
"diff": "@@ -138,7 +138,7 @@ func Override() {\ns.Table[282] = syscalls.Supported(\"signalfd\", Signalfd)\ns.Table[283] = syscalls.Supported(\"timerfd_create\", TimerfdCreate)\ns.Table[284] = syscalls.Supported(\"eventfd\", Eventfd)\n- delete(s.Table, 285) // fallocate\n+ s.Table[285] = syscalls.PartiallySupported(\"fallocate\", Fallocate, \"Not all options are supported.\", nil)\ns.Table[286] = syscalls.Supported(\"timerfd_settime\", TimerfdSettime)\ns.Table[287] = syscalls.Supported(\"timerfd_gettime\", TimerfdGettime)\ns.Table[288] = syscalls.Supported(\"accept4\", Accept4)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description.go",
"new_path": "pkg/sentry/vfs/file_description.go",
"diff": "@@ -354,6 +354,10 @@ type FileDescriptionImpl interface {\n// represented by the FileDescription.\nStatFS(ctx context.Context) (linux.Statfs, error)\n+ // Allocate grows file represented by FileDescription to offset + length bytes.\n+ // Only mode == 0 is supported currently.\n+ Allocate(ctx context.Context, mode, offset, length uint64) error\n+\n// waiter.Waitable methods may be used to poll for I/O events.\nwaiter.Waitable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description_impl_util.go",
"new_path": "pkg/sentry/vfs/file_description_impl_util.go",
"diff": "@@ -56,6 +56,12 @@ func (FileDescriptionDefaultImpl) StatFS(ctx context.Context) (linux.Statfs, err\nreturn linux.Statfs{}, syserror.ENOSYS\n}\n+// Allocate implements FileDescriptionImpl.Allocate analogously to\n+// fallocate called on regular file, directory or FIFO in Linux.\n+func (FileDescriptionDefaultImpl) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ return syserror.ENODEV\n+}\n+\n// Readiness implements waiter.Waitable.Readiness analogously to\n// file_operations::poll == NULL in Linux.\nfunc (FileDescriptionDefaultImpl) Readiness(mask waiter.EventMask) waiter.EventMask {\n@@ -158,6 +164,11 @@ func (FileDescriptionDefaultImpl) Removexattr(ctx context.Context, name string)\n// implementations of non-directory I/O methods that return EISDIR.\ntype DirectoryFileDescriptionDefaultImpl struct{}\n+// Allocate implements DirectoryFileDescriptionDefaultImpl.Allocate.\n+func (DirectoryFileDescriptionDefaultImpl) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ return syserror.EISDIR\n+}\n+\n// PRead implements FileDescriptionImpl.PRead.\nfunc (DirectoryFileDescriptionDefaultImpl) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {\nreturn 0, syserror.EISDIR\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/inotify.go",
"new_path": "pkg/sentry/vfs/inotify.go",
"diff": "@@ -148,6 +148,11 @@ func (i *Inotify) Release() {\n}\n}\n+// Allocate implements FileDescription.Allocate.\n+func (i *Inotify) Allocate(ctx context.Context, mode, offset, length uint64) error {\n+ panic(\"Allocate should not be called on read-only inotify fds\")\n+}\n+\n// EventRegister implements waiter.Waitable.\nfunc (i *Inotify) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\ni.queue.EventRegister(e, mask)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -199,6 +199,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:fallocate_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -748,9 +748,14 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\":file_base\",\n+ \":socket_test_util\",\n\"//test/util:cleanup\",\n+ \"//test/util:eventfd_util\",\n\"//test/util:file_descriptor\",\n+ \"@com_google_absl//absl/strings\",\n+ \"@com_google_absl//absl/time\",\ngtest,\n+ \"//test/util:posix_error\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/fallocate.cc",
"new_path": "test/syscalls/linux/fallocate.cc",
"diff": "#include <errno.h>\n#include <fcntl.h>\n#include <signal.h>\n+#include <sys/eventfd.h>\n#include <sys/resource.h>\n+#include <sys/signalfd.h>\n+#include <sys/socket.h>\n#include <sys/stat.h>\n+#include <sys/timerfd.h>\n#include <syscall.h>\n#include <time.h>\n#include <unistd.h>\n+#include <ctime>\n+\n#include \"gtest/gtest.h\"\n+#include \"absl/strings/str_cat.h\"\n+#include \"absl/time/time.h\"\n#include \"test/syscalls/linux/file_base.h\"\n+#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/util/cleanup.h\"\n+#include \"test/util/eventfd_util.h\"\n#include \"test/util/file_descriptor.h\"\n+#include \"test/util/posix_error.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -70,6 +81,12 @@ TEST_F(AllocateTest, Fallocate) {\nASSERT_THAT(fallocate(test_file_fd_.get(), 0, 39, 1), SyscallSucceeds());\nASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());\nEXPECT_EQ(buf.st_size, 40);\n+\n+ // Given length 0 should fail with EINVAL.\n+ ASSERT_THAT(fallocate(test_file_fd_.get(), 0, 50, 0),\n+ SyscallFailsWithErrno(EINVAL));\n+ ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, 40);\n}\nTEST_F(AllocateTest, FallocateInvalid) {\n@@ -136,6 +153,34 @@ TEST_F(AllocateTest, FallocateRlimit) {\nASSERT_THAT(sigprocmask(SIG_UNBLOCK, &new_mask, nullptr), SyscallSucceeds());\n}\n+TEST_F(AllocateTest, FallocateOtherFDs) {\n+ int fd;\n+ ASSERT_THAT(fd = timerfd_create(CLOCK_MONOTONIC, 0), SyscallSucceeds());\n+ auto timer_fd = FileDescriptor(fd);\n+ EXPECT_THAT(fallocate(timer_fd.get(), 0, 0, 10),\n+ SyscallFailsWithErrno(ENODEV));\n+\n+ sigset_t mask;\n+ sigemptyset(&mask);\n+ ASSERT_THAT(fd = signalfd(-1, &mask, 0), SyscallSucceeds());\n+ auto sfd = FileDescriptor(fd);\n+ EXPECT_THAT(fallocate(sfd.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));\n+\n+ auto efd =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));\n+ EXPECT_THAT(fallocate(efd.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));\n+\n+ auto sockfd = ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n+ EXPECT_THAT(fallocate(sockfd.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));\n+\n+ int socks[2];\n+ ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, PF_UNIX, socks),\n+ SyscallSucceeds());\n+ auto sock0 = FileDescriptor(socks[0]);\n+ auto sock1 = FileDescriptor(socks[1]);\n+ EXPECT_THAT(fallocate(sock0.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));\n+}\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Port fallocate to VFS2.
PiperOrigin-RevId: 319283715 |
259,860 | 01.07.2020 15:17:39 | 25,200 | e4b2087602a9217098559347d82d316a8cef8140 | Use directory fds in sticky test to avoid permission issues.
After we change credentials, it is possible that we no longer have access to
the sticky directory where we are trying to delete files. Use an open fd so
this is not an issue. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/sticky.cc",
"new_path": "test/syscalls/linux/sticky.cc",
"diff": "@@ -42,12 +42,15 @@ TEST(StickyTest, StickyBitPermDenied) {\nconst TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nEXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateFileWith(parent.path(), \"some content\", 0755));\n- const TempPath dir =\n- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirWith(parent.path(), 0755));\n- const TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateSymlinkTo(parent.path(), file.path()));\n+\n+ // After changing credentials below, we need to use an open fd to make\n+ // modifications in the parent dir, because there is no guarantee that we will\n+ // still have the ability to open it.\n+ const FileDescriptor parent_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent.path(), O_DIRECTORY));\n+ ASSERT_THAT(openat(parent_fd.get(), \"file\", O_CREAT), SyscallSucceeds());\n+ ASSERT_THAT(mkdirat(parent_fd.get(), \"dir\", 0777), SyscallSucceeds());\n+ ASSERT_THAT(symlinkat(\"xyz\", parent_fd.get(), \"link\"), SyscallSucceeds());\n// Drop privileges and change IDs only in child thread, or else this parent\n// thread won't be able to open some log files after the test ends.\n@@ -65,12 +68,14 @@ TEST(StickyTest, StickyBitPermDenied) {\nsyscall(SYS_setresuid, -1, absl::GetFlag(FLAGS_scratch_uid), -1),\nSyscallSucceeds());\n- std::string new_path = NewTempAbsPath();\n- EXPECT_THAT(rename(file.path().c_str(), new_path.c_str()),\n+ EXPECT_THAT(renameat(parent_fd.get(), \"file\", parent_fd.get(), \"file2\"),\n+ SyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"file\", 0),\n+ SyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"dir\", AT_REMOVEDIR),\n+ SyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"link\", 0),\nSyscallFailsWithErrno(EPERM));\n- EXPECT_THAT(unlink(file.path().c_str()), SyscallFailsWithErrno(EPERM));\n- EXPECT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(EPERM));\n- EXPECT_THAT(unlink(link.path().c_str()), SyscallFailsWithErrno(EPERM));\n});\n}\n@@ -79,12 +84,15 @@ TEST(StickyTest, StickyBitSameUID) {\nconst TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nEXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateFileWith(parent.path(), \"some content\", 0755));\n- const TempPath dir =\n- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirWith(parent.path(), 0755));\n- const TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateSymlinkTo(parent.path(), file.path()));\n+\n+ // After changing credentials below, we need to use an open fd to make\n+ // modifications in the parent dir, because there is no guarantee that we will\n+ // still have the ability to open it.\n+ const FileDescriptor parent_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent.path(), O_DIRECTORY));\n+ ASSERT_THAT(openat(parent_fd.get(), \"file\", O_CREAT), SyscallSucceeds());\n+ ASSERT_THAT(mkdirat(parent_fd.get(), \"dir\", 0777), SyscallSucceeds());\n+ ASSERT_THAT(symlinkat(\"xyz\", parent_fd.get(), \"link\"), SyscallSucceeds());\n// Drop privileges and change IDs only in child thread, or else this parent\n// thread won't be able to open some log files after the test ends.\n@@ -100,12 +108,12 @@ TEST(StickyTest, StickyBitSameUID) {\nSyscallSucceeds());\n// We still have the same EUID.\n- std::string new_path = NewTempAbsPath();\n- EXPECT_THAT(rename(file.path().c_str(), new_path.c_str()),\n+ EXPECT_THAT(renameat(parent_fd.get(), \"file\", parent_fd.get(), \"file2\"),\n+ SyscallSucceeds());\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"file2\", 0), SyscallSucceeds());\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"dir\", AT_REMOVEDIR),\nSyscallSucceeds());\n- EXPECT_THAT(unlink(new_path.c_str()), SyscallSucceeds());\n- EXPECT_THAT(rmdir(dir.path().c_str()), SyscallSucceeds());\n- EXPECT_THAT(unlink(link.path().c_str()), SyscallSucceeds());\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"link\", 0), SyscallSucceeds());\n});\n}\n@@ -114,12 +122,15 @@ TEST(StickyTest, StickyBitCapFOWNER) {\nconst TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nEXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());\n- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateFileWith(parent.path(), \"some content\", 0755));\n- const TempPath dir =\n- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirWith(parent.path(), 0755));\n- const TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateSymlinkTo(parent.path(), file.path()));\n+\n+ // After changing credentials below, we need to use an open fd to make\n+ // modifications in the parent dir, because there is no guarantee that we will\n+ // still have the ability to open it.\n+ const FileDescriptor parent_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent.path(), O_DIRECTORY));\n+ ASSERT_THAT(openat(parent_fd.get(), \"file\", O_CREAT), SyscallSucceeds());\n+ ASSERT_THAT(mkdirat(parent_fd.get(), \"dir\", 0777), SyscallSucceeds());\n+ ASSERT_THAT(symlinkat(\"xyz\", parent_fd.get(), \"link\"), SyscallSucceeds());\n// Drop privileges and change IDs only in child thread, or else this parent\n// thread won't be able to open some log files after the test ends.\n@@ -136,12 +147,12 @@ TEST(StickyTest, StickyBitCapFOWNER) {\nSyscallSucceeds());\nEXPECT_NO_ERRNO(SetCapability(CAP_FOWNER, true));\n- std::string new_path = NewTempAbsPath();\n- EXPECT_THAT(rename(file.path().c_str(), new_path.c_str()),\n+ EXPECT_THAT(renameat(parent_fd.get(), \"file\", parent_fd.get(), \"file2\"),\n+ SyscallSucceeds());\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"file2\", 0), SyscallSucceeds());\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"dir\", AT_REMOVEDIR),\nSyscallSucceeds());\n- EXPECT_THAT(unlink(new_path.c_str()), SyscallSucceeds());\n- EXPECT_THAT(rmdir(dir.path().c_str()), SyscallSucceeds());\n- EXPECT_THAT(unlink(link.path().c_str()), SyscallSucceeds());\n+ EXPECT_THAT(unlinkat(parent_fd.get(), \"link\", 0), SyscallSucceeds());\n});\n}\n} // namespace\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use directory fds in sticky test to avoid permission issues.
After we change credentials, it is possible that we no longer have access to
the sticky directory where we are trying to delete files. Use an open fd so
this is not an issue.
PiperOrigin-RevId: 319306255 |
260,023 | 01.07.2020 15:46:02 | 25,200 | 31b27adf9b63dcefd0a753908bf984aa1f78b394 | TCP receive should block when in SYN-SENT state.
The application can choose to initiate a non-blocking connect and
later block on a read, when the endpoint is still in SYN-SENT state. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1212,6 +1212,16 @@ func (e *endpoint) SetOwner(owner tcpip.PacketOwner) {\n// Read reads data from the endpoint.\nfunc (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\ne.LockUser()\n+ defer e.UnlockUser()\n+\n+ // When in SYN-SENT state, let the caller block on the receive.\n+ // An application can initiate a non-blocking connect and then block\n+ // on a receive. It can expect to read any data after the handshake\n+ // is complete. RFC793, section 3.9, p58.\n+ if e.EndpointState() == StateSynSent {\n+ return buffer.View{}, tcpip.ControlMessages{}, tcpip.ErrWouldBlock\n+ }\n+\n// The endpoint can be read if it's connected, or if it's already closed\n// but has some pending unread data. Also note that a RST being received\n// would cause the state to become StateError so we should allow the\n@@ -1221,7 +1231,6 @@ func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages,\nif s := e.EndpointState(); !s.connected() && s != StateClose && bufUsed == 0 {\ne.rcvListMu.Unlock()\nhe := e.HardError\n- e.UnlockUser()\nif s == StateError {\nreturn buffer.View{}, tcpip.ControlMessages{}, he\n}\n@@ -1231,7 +1240,6 @@ func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages,\nv, err := e.readLocked()\ne.rcvListMu.Unlock()\n- e.UnlockUser()\nif err == tcpip.ErrClosedForReceive {\ne.stats.ReadErrors.ReadClosed.Increment()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/BUILD",
"new_path": "test/packetimpact/tests/BUILD",
"diff": "@@ -183,8 +183,6 @@ packetimpact_go_test(\npacketimpact_go_test(\nname = \"tcp_queue_receive_in_syn_sent\",\nsrcs = [\"tcp_queue_receive_in_syn_sent_test.go\"],\n- # TODO(b/157658105): Fix netstack then remove the line below.\n- expect_netstack_failure = True,\ndeps = [\n\"//pkg/tcpip/header\",\n\"//test/packetimpact/testbench\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go",
"new_path": "test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go",
"diff": "@@ -35,7 +35,20 @@ func init() {\ntestbench.RegisterFlags(flag.CommandLine)\n}\n+// TestQueueReceiveInSynSent tests receive behavior when the TCP state\n+// is SYN-SENT.\n+// It tests for 2 variants where the receive is blocked and:\n+// (1) we complete handshake and send sample data.\n+// (2) we send a TCP RST.\nfunc TestQueueReceiveInSynSent(t *testing.T) {\n+ for _, tt := range []struct {\n+ description string\n+ reset bool\n+ }{\n+ {description: \"Send DATA\", reset: false},\n+ {description: \"Send RST\", reset: true},\n+ } {\n+ t.Run(tt.description, func(t *testing.T) {\ndut := testbench.NewDUT(t)\ndefer dut.TearDown()\n@@ -53,28 +66,55 @@ func TestQueueReceiveInSynSent(t *testing.T) {\nt.Fatalf(\"expected a SYN from DUT, but got none: %s\", err)\n}\n- // Issue RECEIVE call in SYN-SENT, this should be queued for process until the connection\n- // is established.\n+ if _, _, err := dut.RecvWithErrno(context.Background(), socket, int32(len(sampleData)), 0); err != syscall.Errno(unix.EWOULDBLOCK) {\n+ t.Fatalf(\"expected error %s, got %s\", syscall.Errno(unix.EWOULDBLOCK), err)\n+ }\n+\n+ // Test blocking read.\ndut.SetNonBlocking(socket, false)\n+\nvar wg sync.WaitGroup\ndefer wg.Wait()\nwg.Add(1)\n+ var block sync.WaitGroup\n+ block.Add(1)\ngo func() {\ndefer wg.Done()\nctx, cancel := context.WithTimeout(context.Background(), time.Second*3)\ndefer cancel()\n+\n+ block.Done()\n+ // Issue RECEIVE call in SYN-SENT, this should be queued for\n+ // process until the connection is established.\nn, buff, err := dut.RecvWithErrno(ctx, socket, int32(len(sampleData)), 0)\n+ if tt.reset {\n+ if err != syscall.Errno(unix.ECONNREFUSED) {\n+ t.Errorf(\"expected error %s, got %s\", syscall.Errno(unix.ECONNREFUSED), err)\n+ }\n+ if n != -1 {\n+ t.Errorf(\"expected return value %d, got %d\", -1, n)\n+ }\n+ return\n+ }\nif n == -1 {\n- t.Fatalf(\"failed to recv on DUT: %s\", err)\n+ t.Errorf(\"failed to recv on DUT: %s\", err)\n}\nif got := buff[:n]; !bytes.Equal(got, sampleData) {\n- t.Fatalf(\"received data don't match, got:\\n%s, want:\\n%s\", hex.Dump(got), hex.Dump(sampleData))\n+ t.Errorf(\"received data doesn't match, got:\\n%s, want:\\n%s\", hex.Dump(got), hex.Dump(sampleData))\n}\n}()\n- // The following sleep is used to prevent the connection from being established while the\n- // RPC is in flight.\n- time.Sleep(time.Second)\n+ // Wait for the goroutine to be scheduled and before it\n+ // blocks on endpoint receive.\n+ block.Wait()\n+ // The following sleep is used to prevent the connection\n+ // from being established before we are blocked on Recv.\n+ time.Sleep(100 * time.Millisecond)\n+\n+ if tt.reset {\n+ conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)})\n+ return\n+ }\n// Bring the connection to Established.\nconn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)})\n@@ -82,6 +122,11 @@ func TestQueueReceiveInSynSent(t *testing.T) {\nt.Fatalf(\"expected an ACK from DUT, but got none: %s\", err)\n}\n- // Send sample data to DUT.\n+ // Send sample payload and expect an ACK.\nconn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})\n+ if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {\n+ t.Fatalf(\"expected an ACK from DUT, but got none: %s\", err)\n+ }\n+ })\n+ }\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | TCP receive should block when in SYN-SENT state.
The application can choose to initiate a non-blocking connect and
later block on a read, when the endpoint is still in SYN-SENT state.
PiperOrigin-RevId: 319311016 |
259,860 | 01.07.2020 17:09:26 | 25,200 | 65d99855583a21b6ea511ea74aa52318d0a1e5b2 | Port vfs1 implementation of sync_file_range to vfs2.
Currently, we always perform a full-file sync which could be extremely
expensive for some applications. Although vfs1 did not fully support
sync_file_range, there were some optimizations that allowed us skip some
unnecessary write-outs.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"diff": "@@ -235,8 +235,5 @@ func (fd *specialFileFD) Seek(ctx context.Context, offset int64, whence int32) (\n// Sync implements vfs.FileDescriptionImpl.Sync.\nfunc (fd *specialFileFD) Sync(ctx context.Context) error {\n- if !fd.vfsfd.IsWritable() {\n- return nil\n- }\n- return fd.handle.sync(ctx)\n+ return fd.dentry().syncSharedHandle(ctx)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/sync.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/sync.go",
"diff": "@@ -65,10 +65,8 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel\nnbytes := args[2].Int64()\nflags := args[3].Uint()\n- if offset < 0 {\n- return 0, nil, syserror.EINVAL\n- }\n- if nbytes < 0 {\n+ // Check for negative values and overflow.\n+ if offset < 0 || offset+nbytes < 0 {\nreturn 0, nil, syserror.EINVAL\n}\nif flags&^(linux.SYNC_FILE_RANGE_WAIT_BEFORE|linux.SYNC_FILE_RANGE_WRITE|linux.SYNC_FILE_RANGE_WAIT_AFTER) != 0 {\n@@ -81,7 +79,37 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel\n}\ndefer file.DecRef()\n- // TODO(gvisor.dev/issue/1897): Avoid writeback of data ranges outside of\n- // [offset, offset+nbytes).\n- return 0, nil, file.Sync(t)\n+ // TODO(gvisor.dev/issue/1897): Currently, the only file syncing we support\n+ // is a full-file sync, i.e. fsync(2). As a result, there are severe\n+ // limitations on how much we support sync_file_range:\n+ // - In Linux, sync_file_range(2) doesn't write out the file's metadata, even\n+ // if the file size is changed. We do.\n+ // - We always sync the entire file instead of [offset, offset+nbytes).\n+ // - We do not support the use of WAIT_BEFORE without WAIT_AFTER. For\n+ // correctness, we would have to perform a write-out every time WAIT_BEFORE\n+ // was used, but this would be much more expensive than expected if there\n+ // were no write-out operations in progress.\n+ // - Whenever WAIT_AFTER is used, we sync the file.\n+ // - Ignore WRITE. If this flag is used with WAIT_AFTER, then the file will\n+ // be synced anyway. If this flag is used without WAIT_AFTER, then it is\n+ // safe (and less expensive) to do nothing, because the syscall will not\n+ // wait for the write-out to complete--we only need to make sure that the\n+ // next time WAIT_BEFORE or WAIT_AFTER are used, the write-out completes.\n+ // - According to fs/sync.c, WAIT_BEFORE|WAIT_AFTER \"will detect any I/O\n+ // errors or ENOSPC conditions and will return those to the caller, after\n+ // clearing the EIO and ENOSPC flags in the address_space.\" We don't do\n+ // this.\n+\n+ if flags&linux.SYNC_FILE_RANGE_WAIT_BEFORE != 0 &&\n+ flags&linux.SYNC_FILE_RANGE_WAIT_AFTER == 0 {\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ return 0, nil, syserror.ENOSYS\n+ }\n+\n+ if flags&linux.SYNC_FILE_RANGE_WAIT_AFTER != 0 {\n+ if err := file.Sync(t); err != nil {\n+ return 0, nil, syserror.ConvertIntr(err, kernel.ERESTARTSYS)\n+ }\n+ }\n+ return 0, nil, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -962,6 +962,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:sync_file_range_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n"
}
] | Go | Apache License 2.0 | google/gvisor | Port vfs1 implementation of sync_file_range to vfs2.
Currently, we always perform a full-file sync which could be extremely
expensive for some applications. Although vfs1 did not fully support
sync_file_range, there were some optimizations that allowed us skip some
unnecessary write-outs.
Updates #2923, #1897.
PiperOrigin-RevId: 319324213 |
259,860 | 01.07.2020 17:39:07 | 25,200 | 3b26d2121e3afbbc005b5e919000d55a235b4bbe | Remove maxSendBufferSize from vfs2.
Complements cl/315991648. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/host/socket.go",
"new_path": "pkg/sentry/fsimpl/host/socket.go",
"diff": "@@ -47,11 +47,6 @@ func newEndpoint(ctx context.Context, hostFD int, queue *waiter.Queue) (transpor\nreturn ep, nil\n}\n-// maxSendBufferSize is the maximum host send buffer size allowed for endpoint.\n-//\n-// N.B. 8MB is the default maximum on Linux (2 * sysctl_wmem_max).\n-const maxSendBufferSize = 8 << 20\n-\n// ConnectedEndpoint is an implementation of transport.ConnectedEndpoint and\n// transport.Receiver. It is backed by a host fd that was imported at sentry\n// startup. This fd is shared with a hostfs inode, which retains ownership of\n@@ -114,10 +109,6 @@ func (c *ConnectedEndpoint) init() *syserr.Error {\nif err != nil {\nreturn syserr.FromError(err)\n}\n- if sndbuf > maxSendBufferSize {\n- log.Warningf(\"Socket send buffer too large: %d\", sndbuf)\n- return syserr.ErrInvalidEndpointState\n- }\nc.stype = linux.SockType(stype)\nc.sndbuf = int64(sndbuf)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove maxSendBufferSize from vfs2.
Complements cl/315991648.
PiperOrigin-RevId: 319327853 |
259,907 | 01.07.2020 19:47:51 | 25,200 | 52b44719d6e14ec299d0d953b4dc07a712b897fa | [vfs2][gofer] Update file size to 0 on O_TRUNC
Some Open:TruncateXxx syscall tests were failing because the file size was
not being updated when the file was opened with O_TRUNC.
Fixes Truncate tests in test/syscalls:open_test_runsc_ptrace_vfs2.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -869,11 +869,22 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\nif err := d.checkPermissions(rp.Credentials(), ats); err != nil {\nreturn nil, err\n}\n+\n+ trunc := opts.Flags&linux.O_TRUNC != 0 && d.fileType() == linux.S_IFREG\n+ if trunc {\n+ // Lock metadataMu *while* we open a regular file with O_TRUNC because\n+ // open(2) will change the file size on server.\n+ d.metadataMu.Lock()\n+ defer d.metadataMu.Unlock()\n+ }\n+\n+ var vfd *vfs.FileDescription\n+ var err error\nmnt := rp.Mount()\nswitch d.fileType() {\ncase linux.S_IFREG:\nif !d.fs.opts.regularFilesUseSpecialFileFD {\n- if err := d.ensureSharedHandle(ctx, ats&vfs.MayRead != 0, ats&vfs.MayWrite != 0, opts.Flags&linux.O_TRUNC != 0); err != nil {\n+ if err := d.ensureSharedHandle(ctx, ats&vfs.MayRead != 0, ats&vfs.MayWrite != 0, trunc); err != nil {\nreturn nil, err\n}\nfd := ®ularFileFD{}\n@@ -883,7 +894,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\n}); err != nil {\nreturn nil, err\n}\n- return &fd.vfsfd, nil\n+ vfd = &fd.vfsfd\n}\ncase linux.S_IFDIR:\n// Can't open directories with O_CREAT.\n@@ -923,7 +934,25 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\nreturn d.pipe.Open(ctx, mnt, &d.vfsd, opts.Flags, &d.locks)\n}\n}\n- return d.openSpecialFileLocked(ctx, mnt, opts)\n+\n+ if vfd == nil {\n+ if vfd, err = d.openSpecialFileLocked(ctx, mnt, opts); err != nil {\n+ return nil, err\n+ }\n+ }\n+\n+ if trunc {\n+ // If no errors occured so far then update file size in memory. This\n+ // step is required even if !d.cachedMetadataAuthoritative() because\n+ // d.mappings has to be updated.\n+ // d.metadataMu has already been acquired if trunc == true.\n+ d.updateFileSizeLocked(0)\n+\n+ if d.cachedMetadataAuthoritative() {\n+ d.touchCMtimeLocked()\n+ }\n+ }\n+ return vfd, err\n}\nfunc (d *dentry) connectSocketLocked(ctx context.Context, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -794,9 +794,7 @@ func (d *dentry) updateFromP9Attrs(mask p9.AttrMask, attr *p9.Attr) {\natomic.StoreUint32(&d.nlink, uint32(attr.NLink))\n}\nif mask.Size {\n- d.dataMu.Lock()\n- atomic.StoreUint64(&d.size, attr.Size)\n- d.dataMu.Unlock()\n+ d.updateFileSizeLocked(attr.Size)\n}\nd.metadataMu.Unlock()\n}\n@@ -964,9 +962,16 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, stat *lin\n}\natomic.StoreInt64(&d.ctime, now)\nif stat.Mask&linux.STATX_SIZE != 0 {\n+ d.updateFileSizeLocked(stat.Size)\n+ }\n+ return nil\n+}\n+\n+// Preconditions: d.metadataMu must be locked.\n+func (d *dentry) updateFileSizeLocked(newSize uint64) {\nd.dataMu.Lock()\noldSize := d.size\n- d.size = stat.Size\n+ d.size = newSize\n// d.dataMu must be unlocked to lock d.mapsMu and invalidate mappings\n// below. This allows concurrent calls to Read/Translate/etc. These\n// functions synchronize with truncation by refusing to use cache\n@@ -996,8 +1001,6 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, stat *lin\nd.dataMu.Unlock()\n}\n}\n- return nil\n-}\nfunc (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes) error {\nreturn vfs.GenericCheckPermissions(creds, ats, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid)))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/time.go",
"new_path": "pkg/sentry/fsimpl/gofer/time.go",
"diff": "@@ -36,7 +36,7 @@ func statxTimestampFromDentry(ns int64) linux.StatxTimestamp {\n}\n}\n-// Preconditions: fs.interop != InteropModeShared.\n+// Preconditions: d.cachedMetadataAuthoritative() == true.\nfunc (d *dentry) touchAtime(mnt *vfs.Mount) {\nif mnt.Flags.NoATime {\nreturn\n@@ -51,8 +51,8 @@ func (d *dentry) touchAtime(mnt *vfs.Mount) {\nmnt.EndWrite()\n}\n-// Preconditions: fs.interop != InteropModeShared. The caller has successfully\n-// called vfs.Mount.CheckBeginWrite().\n+// Preconditions: d.cachedMetadataAuthoritative() == true. The caller has\n+// successfully called vfs.Mount.CheckBeginWrite().\nfunc (d *dentry) touchCtime() {\nnow := d.fs.clock.Now().Nanoseconds()\nd.metadataMu.Lock()\n@@ -60,8 +60,8 @@ func (d *dentry) touchCtime() {\nd.metadataMu.Unlock()\n}\n-// Preconditions: fs.interop != InteropModeShared. The caller has successfully\n-// called vfs.Mount.CheckBeginWrite().\n+// Preconditions: d.cachedMetadataAuthoritative() == true. The caller has\n+// successfully called vfs.Mount.CheckBeginWrite().\nfunc (d *dentry) touchCMtime() {\nnow := d.fs.clock.Now().Nanoseconds()\nd.metadataMu.Lock()\n@@ -70,6 +70,8 @@ func (d *dentry) touchCMtime() {\nd.metadataMu.Unlock()\n}\n+// Preconditions: d.cachedMetadataAuthoritative() == true. The caller has\n+// locked d.metadataMu.\nfunc (d *dentry) touchCMtimeLocked() {\nnow := d.fs.clock.Now().Nanoseconds()\natomic.StoreInt64(&d.mtime, now)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -410,7 +410,7 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open\nif err := fd.vfsfd.Init(&fd, opts.Flags, rp.Mount(), &d.vfsd, &vfs.FileDescriptionOptions{AllowDirectIO: true}); err != nil {\nreturn nil, err\n}\n- if opts.Flags&linux.O_TRUNC != 0 {\n+ if !afterCreate && opts.Flags&linux.O_TRUNC != 0 {\nif _, err := impl.truncate(0); err != nil {\nreturn nil, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2][gofer] Update file size to 0 on O_TRUNC
Some Open:TruncateXxx syscall tests were failing because the file size was
not being updated when the file was opened with O_TRUNC.
Fixes Truncate tests in test/syscalls:open_test_runsc_ptrace_vfs2.
Updates #2923
PiperOrigin-RevId: 319340127 |
259,907 | 01.07.2020 21:04:43 | 25,200 | 514955c1a8f3927c928a57935d511ffbbf9c6f01 | [vfs2][gofer] Fix mmap syscall test.
We were not invalidating mappings when the file size changed in shared mode.
Enabled the syscall test for vfs2.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -900,6 +900,12 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, stat *lin\n}\nd.metadataMu.Lock()\ndefer d.metadataMu.Unlock()\n+ if stat.Mask&linux.STATX_SIZE != 0 {\n+ // The size needs to be changed even when\n+ // !d.cachedMetadataAuthoritative() because d.mappings has to be\n+ // updated.\n+ d.updateFileSizeLocked(stat.Size)\n+ }\nif !d.isSynthetic() {\nif stat.Mask != 0 {\nif err := d.file.setAttr(ctx, p9.SetAttrMask{\n@@ -961,9 +967,6 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, stat *lin\nstat.Mask |= linux.STATX_MTIME\n}\natomic.StoreInt64(&d.ctime, now)\n- if stat.Mask&linux.STATX_SIZE != 0 {\n- d.updateFileSizeLocked(stat.Size)\n- }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -357,6 +357,7 @@ syscall_test(\nsize = \"medium\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:mmap_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2][gofer] Fix mmap syscall test.
We were not invalidating mappings when the file size changed in shared mode.
Enabled the syscall test for vfs2.
Updates #2923
PiperOrigin-RevId: 319346569 |
259,883 | 06.07.2020 20:53:51 | -28,800 | 057a2666fa50ac0d0f3b0472a6e1f3d909cc7a12 | hostinet: fix not specifying family field
Creating sockets by hostinet with VFS2 fails due to triggerring a
seccomp violation. In essence, we fails to pass down the field of
family.
We fix this by passing down this field, family.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket.go",
"new_path": "pkg/sentry/socket/hostinet/socket.go",
"diff": "@@ -708,6 +708,6 @@ func (p *socketProvider) Pair(t *kernel.Task, stype linux.SockType, protocol int\nfunc init() {\nfor _, family := range []int{syscall.AF_INET, syscall.AF_INET6} {\nsocket.RegisterProvider(family, &socketProvider{family})\n- socket.RegisterProviderVFS2(family, &socketProviderVFS2{})\n+ socket.RegisterProviderVFS2(family, &socketProviderVFS2{family})\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | hostinet: fix not specifying family field
Creating sockets by hostinet with VFS2 fails due to triggerring a
seccomp violation. In essence, we fails to pass down the field of
family.
We fix this by passing down this field, family.
Fixes #3141
Signed-off-by: Jianfeng Tan <[email protected]> |
260,003 | 06.07.2020 12:09:03 | 25,200 | 1e5b0a973293d9a6bf7b60349763ee3e71343dea | Shard some slow tests.
stack_x_test: 2m -> 20s
tcp_x_test: 80s -> 25s | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/BUILD",
"new_path": "pkg/tcpip/stack/BUILD",
"diff": "@@ -79,6 +79,7 @@ go_test(\n\"transport_demuxer_test.go\",\n\"transport_test.go\",\n],\n+ shard_count = 20,\ndeps = [\n\":stack\",\n\"//pkg/rand\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -86,6 +86,7 @@ go_test(\n\"tcp_test.go\",\n\"tcp_timestamp_test.go\",\n],\n+ shard_count = 10,\ndeps = [\n\":tcp\",\n\"//pkg/sync\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Shard some slow tests.
stack_x_test: 2m -> 20s
tcp_x_test: 80s -> 25s
PiperOrigin-RevId: 319828101 |
259,992 | 06.07.2020 15:38:51 | 25,200 | bd43368f491a02b050cd9f87f69185dc74386f2b | Add inode number to synthetic dentries
Reserve the MSB from ino for synthetic dentries to prevent
conflict with regular dentries. Log warning in case MSB is
set for regular dentries.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/directory.go",
"new_path": "pkg/sentry/fsimpl/gofer/directory.go",
"diff": "@@ -85,6 +85,7 @@ func (d *dentry) createSyntheticChildLocked(opts *createSyntheticOpts) {\nd2 := &dentry{\nrefs: 1, // held by d\nfs: d.fs,\n+ ino: d.fs.nextSyntheticIno(),\nmode: uint32(opts.mode),\nuid: uint32(opts.kuid),\ngid: uint32(opts.kgid),\n@@ -184,13 +185,13 @@ func (d *dentry) getDirents(ctx context.Context) ([]vfs.Dirent, error) {\n{\nName: \".\",\nType: linux.DT_DIR,\n- Ino: d.ino,\n+ Ino: uint64(d.ino),\nNextOff: 1,\n},\n{\nName: \"..\",\nType: uint8(atomic.LoadUint32(&parent.mode) >> 12),\n- Ino: parent.ino,\n+ Ino: uint64(parent.ino),\nNextOff: 2,\n},\n}\n@@ -226,7 +227,7 @@ func (d *dentry) getDirents(ctx context.Context) ([]vfs.Dirent, error) {\n}\ndirent := vfs.Dirent{\nName: p9d.Name,\n- Ino: p9d.QID.Path,\n+ Ino: uint64(inoFromPath(p9d.QID.Path)),\nNextOff: int64(len(dirents) + 1),\n}\n// p9 does not expose 9P2000.U's DMDEVICE, DMNAMEDPIPE, or\n@@ -259,7 +260,7 @@ func (d *dentry) getDirents(ctx context.Context) ([]vfs.Dirent, error) {\ndirents = append(dirents, vfs.Dirent{\nName: child.name,\nType: uint8(atomic.LoadUint32(&child.mode) >> 12),\n- Ino: child.ino,\n+ Ino: uint64(child.ino),\nNextOff: int64(len(dirents) + 1),\n})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -214,9 +214,8 @@ func (fs *filesystem) revalidateChildLocked(ctx context.Context, vfsObj *vfs.Vir\nreturn nil, err\n}\nif child != nil {\n- if !file.isNil() && qid.Path == child.ino {\n- // The file at this path hasn't changed. Just update cached\n- // metadata.\n+ if !file.isNil() && inoFromPath(qid.Path) == child.ino {\n+ // The file at this path hasn't changed. Just update cached metadata.\nfile.close(ctx)\nchild.updateFromP9Attrs(attrMask, &attr)\nreturn child, nil\n@@ -1499,3 +1498,7 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe\ndefer fs.renameMu.RUnlock()\nreturn genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b)\n}\n+\n+func (fs *filesystem) nextSyntheticIno() inodeNumber {\n+ return inodeNumber(atomic.AddUint64(&fs.syntheticSeq, 1) | syntheticInoMask)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -110,6 +110,26 @@ type filesystem struct {\nsyncMu sync.Mutex\nsyncableDentries map[*dentry]struct{}\nspecialFileFDs map[*specialFileFD]struct{}\n+\n+ // syntheticSeq stores a counter to used to generate unique inodeNumber for\n+ // synthetic dentries.\n+ syntheticSeq uint64\n+}\n+\n+// inodeNumber represents inode number reported in Dirent.Ino. For regular\n+// dentries, it comes from QID.Path from the 9P server. Synthetic dentries\n+// have have their inodeNumber generated sequentially, with the MSB reserved to\n+// prevent conflicts with regular dentries.\n+type inodeNumber uint64\n+\n+// Reserve MSB for synthetic mounts.\n+const syntheticInoMask = uint64(1) << 63\n+\n+func inoFromPath(path uint64) inodeNumber {\n+ if path&syntheticInoMask != 0 {\n+ log.Warningf(\"Dropping MSB from ino, collision is possible. Original: %d, new: %d\", path, path&^syntheticInoMask)\n+ }\n+ return inodeNumber(path &^ syntheticInoMask)\n}\ntype filesystemOptions struct {\n@@ -585,7 +605,7 @@ type dentry struct {\n// Cached metadata; protected by metadataMu and accessed using atomic\n// memory operations unless otherwise specified.\nmetadataMu sync.Mutex\n- ino uint64 // immutable\n+ ino inodeNumber // immutable\nmode uint32 // type is immutable, perms are mutable\nuid uint32 // auth.KUID, but stored as raw uint32 for sync/atomic\ngid uint32 // auth.KGID, but ...\n@@ -704,7 +724,7 @@ func (fs *filesystem) newDentry(ctx context.Context, file p9file, qid p9.QID, ma\nd := &dentry{\nfs: fs,\nfile: file,\n- ino: qid.Path,\n+ ino: inoFromPath(qid.Path),\nmode: uint32(attr.Mode),\nuid: uint32(fs.opts.dfltuid),\ngid: uint32(fs.opts.dfltgid),\n@@ -846,7 +866,7 @@ func (d *dentry) statTo(stat *linux.Statx) {\nstat.UID = atomic.LoadUint32(&d.uid)\nstat.GID = atomic.LoadUint32(&d.gid)\nstat.Mode = uint16(atomic.LoadUint32(&d.mode))\n- stat.Ino = d.ino\n+ stat.Ino = uint64(d.ino)\nstat.Size = atomic.LoadUint64(&d.size)\n// This is consistent with regularFileFD.Seek(), which treats regular files\n// as having no holes.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add inode number to synthetic dentries
Reserve the MSB from ino for synthetic dentries to prevent
conflict with regular dentries. Log warning in case MSB is
set for regular dentries.
Updates #1487
PiperOrigin-RevId: 319869858 |
260,003 | 06.07.2020 16:46:27 | 25,200 | 15c56d92d8b12b7a7bc72aa8a7d1751682e68302 | Fix NonBlockingWrite3 not writing b3 if b2 is zero-length. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/rawfile/rawfile_unsafe.go",
"new_path": "pkg/tcpip/link/rawfile/rawfile_unsafe.go",
"diff": "@@ -69,13 +69,12 @@ func NonBlockingWrite(fd int, buf []byte) *tcpip.Error {\n// NonBlockingWrite3 writes up to three byte slices to a file descriptor in a\n// single syscall. It fails if partial data is written.\nfunc NonBlockingWrite3(fd int, b1, b2, b3 []byte) *tcpip.Error {\n- // If the is no second buffer, issue a regular write.\n- if len(b2) == 0 {\n+ // If there is no second and third buffer, issue a regular write.\n+ if len(b2) == 0 && len(b3) == 0 {\nreturn NonBlockingWrite(fd, b1)\n}\n- // We have two buffers. Build the iovec that represents them and issue\n- // a writev syscall.\n+ // Build the iovec that represents them and issue a writev syscall.\niovec := [3]syscall.Iovec{\n{\nBase: &b1[0],\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix NonBlockingWrite3 not writing b3 if b2 is zero-length.
PiperOrigin-RevId: 319882171 |
259,962 | 06.07.2020 17:21:03 | 25,200 | b0f656184ef249182882dc323babf9e7c8ac3fc0 | Add support for SO_RCVBUF/SO_SNDBUF for AF_PACKET sockets.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint.go",
"new_path": "pkg/tcpip/transport/packet/endpoint.go",
"diff": "package packet\nimport (\n+ \"fmt\"\n+\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -73,6 +75,7 @@ type endpoint struct {\n// The following fields are protected by mu.\nmu sync.RWMutex `state:\"nosave\"`\nsndBufSize int\n+ sndBufSizeMax int\nclosed bool\nstats tcpip.TransportEndpointStats `state:\"nosave\"`\nbound bool\n@@ -92,6 +95,17 @@ func NewEndpoint(s *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumb\nsndBufSize: 32 * 1024,\n}\n+ // Override with stack defaults.\n+ var ss stack.SendBufferSizeOption\n+ if err := s.Option(&ss); err == nil {\n+ ep.sndBufSizeMax = ss.Default\n+ }\n+\n+ var rs stack.ReceiveBufferSizeOption\n+ if err := s.Option(&rs); err == nil {\n+ ep.rcvBufSizeMax = rs.Default\n+ }\n+\nif err := s.RegisterPacketEndpoint(0, netProto, ep); err != nil {\nreturn nil, err\n}\n@@ -274,8 +288,47 @@ func (ep *endpoint) SetSockOptBool(opt tcpip.SockOptBool, v bool) *tcpip.Error {\n// SetSockOptInt implements tcpip.Endpoint.SetSockOptInt.\nfunc (ep *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) *tcpip.Error {\n+ switch opt {\n+ case tcpip.SendBufferSizeOption:\n+ // Make sure the send buffer size is within the min and max\n+ // allowed.\n+ var ss stack.SendBufferSizeOption\n+ if err := ep.stack.Option(&ss); err != nil {\n+ panic(fmt.Sprintf(\"s.Option(%#v) = %s\", ss, err))\n+ }\n+ if v > ss.Max {\n+ v = ss.Max\n+ }\n+ if v < ss.Min {\n+ v = ss.Min\n+ }\n+ ep.mu.Lock()\n+ ep.sndBufSizeMax = v\n+ ep.mu.Unlock()\n+ return nil\n+\n+ case tcpip.ReceiveBufferSizeOption:\n+ // Make sure the receive buffer size is within the min and max\n+ // allowed.\n+ var rs stack.ReceiveBufferSizeOption\n+ if err := ep.stack.Option(&rs); err != nil {\n+ panic(fmt.Sprintf(\"s.Option(%#v) = %s\", rs, err))\n+ }\n+ if v > rs.Max {\n+ v = rs.Max\n+ }\n+ if v < rs.Min {\n+ v = rs.Min\n+ }\n+ ep.rcvMu.Lock()\n+ ep.rcvBufSizeMax = v\n+ ep.rcvMu.Unlock()\n+ return nil\n+\n+ default:\nreturn tcpip.ErrUnknownProtocolOption\n}\n+}\n// GetSockOpt implements tcpip.Endpoint.GetSockOpt.\nfunc (ep *endpoint) GetSockOpt(opt interface{}) *tcpip.Error {\n@@ -289,7 +342,32 @@ func (ep *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\n// GetSockOptInt implements tcpip.Endpoint.GetSockOptInt.\nfunc (ep *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, *tcpip.Error) {\n- return 0, tcpip.ErrNotSupported\n+ switch opt {\n+ case tcpip.ReceiveQueueSizeOption:\n+ v := 0\n+ ep.rcvMu.Lock()\n+ if !ep.rcvList.Empty() {\n+ p := ep.rcvList.Front()\n+ v = p.data.Size()\n+ }\n+ ep.rcvMu.Unlock()\n+ return v, nil\n+\n+ case tcpip.SendBufferSizeOption:\n+ ep.mu.Lock()\n+ v := ep.sndBufSizeMax\n+ ep.mu.Unlock()\n+ return v, nil\n+\n+ case tcpip.ReceiveBufferSizeOption:\n+ ep.rcvMu.Lock()\n+ v := ep.rcvBufSizeMax\n+ ep.rcvMu.Unlock()\n+ return v, nil\n+\n+ default:\n+ return -1, tcpip.ErrUnknownProtocolOption\n+ }\n}\n// HandlePacket implements stack.PacketEndpoint.HandlePacket.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket_raw.cc",
"new_path": "test/syscalls/linux/packet_socket_raw.cc",
"diff": "@@ -97,7 +97,7 @@ class RawPacketTest : public ::testing::TestWithParam<int> {\nint GetLoopbackIndex();\n// The socket used for both reading and writing.\n- int socket_;\n+ int s_;\n};\nvoid RawPacketTest::SetUp() {\n@@ -108,34 +108,58 @@ void RawPacketTest::SetUp() {\n}\nif (!IsRunningOnGvisor()) {\n+ // Ensure that looped back packets aren't rejected by the kernel.\nFileDescriptor acceptLocal = ASSERT_NO_ERRNO_AND_VALUE(\n- Open(\"/proc/sys/net/ipv4/conf/lo/accept_local\", O_RDONLY));\n+ Open(\"/proc/sys/net/ipv4/conf/lo/accept_local\", O_RDWR));\nFileDescriptor routeLocalnet = ASSERT_NO_ERRNO_AND_VALUE(\n- Open(\"/proc/sys/net/ipv4/conf/lo/route_localnet\", O_RDONLY));\n+ Open(\"/proc/sys/net/ipv4/conf/lo/route_localnet\", O_RDWR));\nchar enabled;\n+ ASSERT_THAT(read(acceptLocal.get(), &enabled, 1),\n+ SyscallSucceedsWithValue(1));\n+ if (enabled != '1') {\n+ enabled = '1';\n+ ASSERT_THAT(lseek(acceptLocal.get(), 0, SEEK_SET),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(write(acceptLocal.get(), &enabled, 1),\n+ SyscallSucceedsWithValue(1));\n+ ASSERT_THAT(lseek(acceptLocal.get(), 0, SEEK_SET),\n+ SyscallSucceedsWithValue(0));\nASSERT_THAT(read(acceptLocal.get(), &enabled, 1),\nSyscallSucceedsWithValue(1));\nASSERT_EQ(enabled, '1');\n+ }\n+\n+ ASSERT_THAT(read(routeLocalnet.get(), &enabled, 1),\n+ SyscallSucceedsWithValue(1));\n+ if (enabled != '1') {\n+ enabled = '1';\n+ ASSERT_THAT(lseek(routeLocalnet.get(), 0, SEEK_SET),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(write(routeLocalnet.get(), &enabled, 1),\n+ SyscallSucceedsWithValue(1));\n+ ASSERT_THAT(lseek(routeLocalnet.get(), 0, SEEK_SET),\n+ SyscallSucceedsWithValue(0));\nASSERT_THAT(read(routeLocalnet.get(), &enabled, 1),\nSyscallSucceedsWithValue(1));\nASSERT_EQ(enabled, '1');\n}\n+ }\n- ASSERT_THAT(socket_ = socket(AF_PACKET, SOCK_RAW, htons(GetParam())),\n+ ASSERT_THAT(s_ = socket(AF_PACKET, SOCK_RAW, htons(GetParam())),\nSyscallSucceeds());\n}\nvoid RawPacketTest::TearDown() {\n// TearDown will be run even if we skip the test.\nif (ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {\n- EXPECT_THAT(close(socket_), SyscallSucceeds());\n+ EXPECT_THAT(close(s_), SyscallSucceeds());\n}\n}\nint RawPacketTest::GetLoopbackIndex() {\nstruct ifreq ifr;\nsnprintf(ifr.ifr_name, IFNAMSIZ, \"lo\");\n- EXPECT_THAT(ioctl(socket_, SIOCGIFINDEX, &ifr), SyscallSucceeds());\n+ EXPECT_THAT(ioctl(s_, SIOCGIFINDEX, &ifr), SyscallSucceeds());\nEXPECT_NE(ifr.ifr_ifindex, 0);\nreturn ifr.ifr_ifindex;\n}\n@@ -149,7 +173,7 @@ TEST_P(RawPacketTest, Receive) {\n// Wait for the socket to become readable.\nstruct pollfd pfd = {};\n- pfd.fd = socket_;\n+ pfd.fd = s_;\npfd.events = POLLIN;\nEXPECT_THAT(RetryEINTR(poll)(&pfd, 1, 2000), SyscallSucceedsWithValue(1));\n@@ -159,7 +183,7 @@ TEST_P(RawPacketTest, Receive) {\nchar buf[64];\nstruct sockaddr_ll src = {};\nsocklen_t src_len = sizeof(src);\n- ASSERT_THAT(recvfrom(socket_, buf, sizeof(buf), 0,\n+ ASSERT_THAT(recvfrom(s_, buf, sizeof(buf), 0,\nreinterpret_cast<struct sockaddr*>(&src), &src_len),\nSyscallSucceedsWithValue(packet_size));\n// sockaddr_ll ends with an 8 byte physical address field, but ethernet\n@@ -277,7 +301,7 @@ TEST_P(RawPacketTest, Send) {\nsizeof(kMessage));\n// Send it.\n- ASSERT_THAT(sendto(socket_, send_buf, sizeof(send_buf), 0,\n+ ASSERT_THAT(sendto(s_, send_buf, sizeof(send_buf), 0,\nreinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),\nSyscallSucceedsWithValue(sizeof(send_buf)));\n@@ -286,13 +310,13 @@ TEST_P(RawPacketTest, Send) {\npfd.fd = udp_sock.get();\npfd.events = POLLIN;\nASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 5000), SyscallSucceedsWithValue(1));\n- pfd.fd = socket_;\n+ pfd.fd = s_;\npfd.events = POLLIN;\nASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 5000), SyscallSucceedsWithValue(1));\n// Receive on the packet socket.\nchar recv_buf[sizeof(send_buf)];\n- ASSERT_THAT(recv(socket_, recv_buf, sizeof(recv_buf), 0),\n+ ASSERT_THAT(recv(s_, recv_buf, sizeof(recv_buf), 0),\nSyscallSucceedsWithValue(sizeof(recv_buf)));\nASSERT_EQ(memcmp(recv_buf, send_buf, sizeof(send_buf)), 0);\n@@ -309,6 +333,229 @@ TEST_P(RawPacketTest, Send) {\nEXPECT_EQ(src.sin_addr.s_addr, htonl(INADDR_LOOPBACK));\n}\n+// Check that setting SO_RCVBUF below min is clamped to the minimum\n+// receive buffer size.\n+TEST_P(RawPacketTest, SetSocketRecvBufBelowMin) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ // Discover minimum receive buf size by trying to set it to zero.\n+ // See:\n+ // https://github.com/torvalds/linux/blob/a5dc8300df75e8b8384b4c82225f1e4a0b4d9b55/net/core/sock.c#L820\n+ constexpr int kRcvBufSz = 0;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),\n+ SyscallSucceeds());\n+\n+ int min = 0;\n+ socklen_t min_len = sizeof(min);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),\n+ SyscallSucceeds());\n+\n+ // Linux doubles the value so let's use a value that when doubled will still\n+ // be smaller than min.\n+ int below_min = min / 2 - 1;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &below_min, sizeof(below_min)),\n+ SyscallSucceeds());\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),\n+ SyscallSucceeds());\n+\n+ ASSERT_EQ(min, val);\n+}\n+\n+// Check that setting SO_RCVBUF above max is clamped to the maximum\n+// receive buffer size.\n+TEST_P(RawPacketTest, SetSocketRecvBufAboveMax) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ // Discover max buf size by trying to set the largest possible buffer size.\n+ constexpr int kRcvBufSz = 0xffffffff;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),\n+ SyscallSucceeds());\n+\n+ int max = 0;\n+ socklen_t max_len = sizeof(max);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &max, &max_len),\n+ SyscallSucceeds());\n+\n+ int above_max = max + 1;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &above_max, sizeof(above_max)),\n+ SyscallSucceeds());\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(max, val);\n+}\n+\n+// Check that setting SO_RCVBUF min <= kRcvBufSz <= max is honored.\n+TEST_P(RawPacketTest, SetSocketRecvBuf) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int max = 0;\n+ int min = 0;\n+ {\n+ // Discover max buf size by trying to set a really large buffer size.\n+ constexpr int kRcvBufSz = 0xffffffff;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),\n+ SyscallSucceeds());\n+\n+ max = 0;\n+ socklen_t max_len = sizeof(max);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &max, &max_len),\n+ SyscallSucceeds());\n+ }\n+\n+ {\n+ // Discover minimum buffer size by trying to set a zero size receive buffer\n+ // size.\n+ // See:\n+ // https://github.com/torvalds/linux/blob/a5dc8300df75e8b8384b4c82225f1e4a0b4d9b55/net/core/sock.c#L820\n+ constexpr int kRcvBufSz = 0;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),\n+ SyscallSucceeds());\n+\n+ socklen_t min_len = sizeof(min);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),\n+ SyscallSucceeds());\n+ }\n+\n+ int quarter_sz = min + (max - min) / 4;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &quarter_sz, sizeof(quarter_sz)),\n+ SyscallSucceeds());\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),\n+ SyscallSucceeds());\n+\n+ // Linux doubles the value set by SO_SNDBUF/SO_RCVBUF.\n+ // TODO(gvisor.dev/issue/2926): Remove when Netstack matches linux behavior.\n+ if (!IsRunningOnGvisor()) {\n+ quarter_sz *= 2;\n+ }\n+ ASSERT_EQ(quarter_sz, val);\n+}\n+\n+// Check that setting SO_SNDBUF below min is clamped to the minimum\n+// receive buffer size.\n+TEST_P(RawPacketTest, SetSocketSendBufBelowMin) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ // Discover minimum buffer size by trying to set it to zero.\n+ constexpr int kSndBufSz = 0;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),\n+ SyscallSucceeds());\n+\n+ int min = 0;\n+ socklen_t min_len = sizeof(min);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &min, &min_len),\n+ SyscallSucceeds());\n+\n+ // Linux doubles the value so let's use a value that when doubled will still\n+ // be smaller than min.\n+ int below_min = min / 2 - 1;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &below_min, sizeof(below_min)),\n+ SyscallSucceeds());\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),\n+ SyscallSucceeds());\n+\n+ ASSERT_EQ(min, val);\n+}\n+\n+// Check that setting SO_SNDBUF above max is clamped to the maximum\n+// send buffer size.\n+TEST_P(RawPacketTest, SetSocketSendBufAboveMax) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ // Discover maximum buffer size by trying to set it to a large value.\n+ constexpr int kSndBufSz = 0xffffffff;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),\n+ SyscallSucceeds());\n+\n+ int max = 0;\n+ socklen_t max_len = sizeof(max);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &max, &max_len),\n+ SyscallSucceeds());\n+\n+ int above_max = max + 1;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &above_max, sizeof(above_max)),\n+ SyscallSucceeds());\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(max, val);\n+}\n+\n+// Check that setting SO_SNDBUF min <= kSndBufSz <= max is honored.\n+TEST_P(RawPacketTest, SetSocketSendBuf) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int max = 0;\n+ int min = 0;\n+ {\n+ // Discover maximum buffer size by trying to set it to a large value.\n+ constexpr int kSndBufSz = 0xffffffff;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),\n+ SyscallSucceeds());\n+\n+ max = 0;\n+ socklen_t max_len = sizeof(max);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &max, &max_len),\n+ SyscallSucceeds());\n+ }\n+\n+ {\n+ // Discover minimum buffer size by trying to set it to zero.\n+ constexpr int kSndBufSz = 0;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),\n+ SyscallSucceeds());\n+\n+ socklen_t min_len = sizeof(min);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &min, &min_len),\n+ SyscallSucceeds());\n+ }\n+\n+ int quarter_sz = min + (max - min) / 4;\n+ ASSERT_THAT(\n+ setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &quarter_sz, sizeof(quarter_sz)),\n+ SyscallSucceeds());\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),\n+ SyscallSucceeds());\n+\n+ // Linux doubles the value set by SO_SNDBUF/SO_RCVBUF.\n+ // TODO(gvisor.dev/issue/2926): Remove the gvisor special casing when Netstack\n+ // matches linux behavior.\n+ if (!IsRunningOnGvisor()) {\n+ quarter_sz *= 2;\n+ }\n+\n+ ASSERT_EQ(quarter_sz, val);\n+}\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, RawPacketTest,\n::testing::Values(ETH_P_IP, ETH_P_ALL));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for SO_RCVBUF/SO_SNDBUF for AF_PACKET sockets.
Updates #2746
PiperOrigin-RevId: 319887810 |
259,992 | 06.07.2020 17:24:25 | 25,200 | 937912a4847cb1fd2acb79c0dd8fbc4b27695156 | Ensure sync is called for readonly file
Calling sync on a readonly file flushes metadata that
may have been modified, like last access time.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/handle.go",
"new_path": "pkg/sentry/fsimpl/gofer/handle.go",
"diff": "@@ -126,11 +126,16 @@ func (h *handle) writeFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, o\n}\nfunc (h *handle) sync(ctx context.Context) error {\n+ // Handle most common case first.\nif h.fd >= 0 {\nctx.UninterruptibleSleepStart(false)\nerr := syscall.Fsync(int(h.fd))\nctx.UninterruptibleSleepFinish(false)\nreturn err\n}\n+ if h.file.isNil() {\n+ // File hasn't been touched, there is nothing to sync.\n+ return nil\n+ }\nreturn h.file.fsync(ctx)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"diff": "@@ -582,21 +582,20 @@ func (fd *regularFileFD) Sync(ctx context.Context) error {\nfunc (d *dentry) syncSharedHandle(ctx context.Context) error {\nd.handleMu.RLock()\n- if !d.handleWritable {\n- d.handleMu.RUnlock()\n- return nil\n- }\n+ defer d.handleMu.RUnlock()\n+\n+ if d.handleWritable {\nd.dataMu.Lock()\n// Write dirty cached data to the remote file.\nerr := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size, d.fs.mfp.MemoryFile(), d.handle.writeFromBlocksAt)\nd.dataMu.Unlock()\n- if err == nil {\n- // Sync the remote file.\n- err = d.handle.sync(ctx)\n- }\n- d.handleMu.RUnlock()\n+ if err != nil {\nreturn err\n}\n+ }\n+ // Sync the remote file.\n+ return d.handle.sync(ctx)\n+}\n// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\nfunc (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ensure sync is called for readonly file
Calling sync on a readonly file flushes metadata that
may have been modified, like last access time.
Updates #1198
PiperOrigin-RevId: 319888290 |
259,867 | 07.07.2020 16:32:15 | -7,200 | c0ea7d9e9e6403db4718eac79ac480ef55b9073c | README.md: Commpatibility > Compatibility | [
{
"change_type": "MODIFY",
"old_path": "g3doc/README.md",
"new_path": "g3doc/README.md",
"diff": "@@ -152,7 +152,7 @@ The application is a normal Linux binary provided to gVisor in an OCI runtime\nbundle. gVisor aims to provide an environment equivalent to Linux v4.4, so\napplications should be able to run unmodified. However, gVisor does not\npresently implement every system call, `/proc` file, or `/sys` file so some\n-incompatibilities may occur. See [Commpatibility](./user_guide/compatibility.md)\n+incompatibilities may occur. See [Compatibility](./user_guide/compatibility.md)\nfor more information.\n[9p]: https://en.wikipedia.org/wiki/9P_(protocol)\n"
}
] | Go | Apache License 2.0 | google/gvisor | README.md: Commpatibility > Compatibility |
259,907 | 07.07.2020 15:33:32 | 25,200 | 10930189c3cd938e7526c55188ba2d814a7b8a43 | Fix mknod and inotify syscall test
This change fixes a few things:
creating sockets using mknod(2) is supported via vfs2
fsgofer can create regular files via mknod(2)
mode = 0 for mknod(2) will be interpreted as regular file in vfs2 as well
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -277,7 +277,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\ncreds := rp.Credentials()\nvar childInode *inode\nswitch opts.Mode.FileType() {\n- case 0, linux.S_IFREG:\n+ case linux.S_IFREG:\nchildInode = fs.newRegularFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode)\ncase linux.S_IFIFO:\nchildInode = fs.newNamedPipe(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/filesystem.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/filesystem.go",
"diff": "@@ -107,7 +107,7 @@ func Mknod(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\naddr := args[0].Pointer()\nmode := args[1].ModeT()\ndev := args[2].Uint()\n- return 0, nil, mknodat(t, linux.AT_FDCWD, addr, mode, dev)\n+ return 0, nil, mknodat(t, linux.AT_FDCWD, addr, linux.FileMode(mode), dev)\n}\n// Mknodat implements Linux syscall mknodat(2).\n@@ -116,10 +116,10 @@ func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\naddr := args[1].Pointer()\nmode := args[2].ModeT()\ndev := args[3].Uint()\n- return 0, nil, mknodat(t, dirfd, addr, mode, dev)\n+ return 0, nil, mknodat(t, dirfd, addr, linux.FileMode(mode), dev)\n}\n-func mknodat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode uint, dev uint32) error {\n+func mknodat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode linux.FileMode, dev uint32) error {\npath, err := copyInPath(t, addr)\nif err != nil {\nreturn err\n@@ -129,9 +129,14 @@ func mknodat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode uint, dev uint\nreturn err\n}\ndefer tpop.Release()\n+\n+ // \"Zero file type is equivalent to type S_IFREG.\" - mknod(2)\n+ if mode.FileType() == 0 {\n+ mode |= linux.ModeRegular\n+ }\nmajor, minor := linux.DecodeDeviceID(dev)\nreturn t.Kernel().VFS().MknodAt(t, t.Credentials(), &tpop.pop, &vfs.MknodOptions{\n- Mode: linux.FileMode(mode &^ t.FSContext().Umask()),\n+ Mode: mode &^ linux.FileMode(t.FSContext().Umask()),\nDevMajor: uint32(major),\nDevMinor: minor,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/filter/config.go",
"new_path": "runsc/fsgofer/filter/config.go",
"diff": "@@ -128,6 +128,7 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_MADVISE: {},\nunix.SYS_MEMFD_CREATE: {}, /// Used by flipcall.PacketWindowAllocator.Init().\nsyscall.SYS_MKDIRAT: {},\n+ syscall.SYS_MKNODAT: {},\n// Used by the Go runtime as a temporarily workaround for a Linux\n// 5.2-5.4 bug.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -139,7 +139,7 @@ func (a *attachPoint) Attach() (p9.File, error) {\nreturn nil, fmt.Errorf(\"unable to open %q: %v\", a.prefix, err)\n}\n- stat, err := stat(f.FD())\n+ stat, err := fstat(f.FD())\nif err != nil {\nreturn nil, fmt.Errorf(\"unable to stat %q: %v\", a.prefix, err)\n}\n@@ -352,7 +352,7 @@ func newFDMaybe(file *fd.FD) *fd.FD {\nreturn dup\n}\n-func stat(fd int) (syscall.Stat_t, error) {\n+func fstat(fd int) (syscall.Stat_t, error) {\nvar stat syscall.Stat_t\nif err := syscall.Fstat(fd, &stat); err != nil {\nreturn syscall.Stat_t{}, err\n@@ -360,6 +360,14 @@ func stat(fd int) (syscall.Stat_t, error) {\nreturn stat, nil\n}\n+func stat(path string) (syscall.Stat_t, error) {\n+ var stat syscall.Stat_t\n+ if err := syscall.Stat(path, &stat); err != nil {\n+ return syscall.Stat_t{}, err\n+ }\n+ return stat, nil\n+}\n+\nfunc fchown(fd int, uid p9.UID, gid p9.GID) error {\nreturn syscall.Fchownat(fd, \"\", int(uid), int(gid), linux.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW)\n}\n@@ -388,7 +396,7 @@ func (l *localFile) Open(flags p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n}\n}\n- stat, err := stat(newFile.FD())\n+ stat, err := fstat(newFile.FD())\nif err != nil {\nif newFile != l.file {\nnewFile.Close()\n@@ -449,7 +457,7 @@ func (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid\nif err := fchown(child.FD(), uid, gid); err != nil {\nreturn nil, nil, p9.QID{}, 0, extractErrno(err)\n}\n- stat, err := stat(child.FD())\n+ stat, err := fstat(child.FD())\nif err != nil {\nreturn nil, nil, p9.QID{}, 0, extractErrno(err)\n}\n@@ -497,7 +505,7 @@ func (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID)\nif err := fchown(f.FD(), uid, gid); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- stat, err := stat(f.FD())\n+ stat, err := fstat(f.FD())\nif err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n@@ -517,7 +525,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\nreturn nil, nil, extractErrno(err)\n}\n- stat, err := stat(newFile.FD())\n+ stat, err := fstat(newFile.FD())\nif err != nil {\nnewFile.Close()\nreturn nil, nil, extractErrno(err)\n@@ -542,7 +550,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\nif err != nil {\nreturn nil, nil, extractErrno(err)\n}\n- stat, err := stat(f.FD())\n+ stat, err := fstat(f.FD())\nif err != nil {\nf.Close()\nreturn nil, nil, extractErrno(err)\n@@ -592,7 +600,7 @@ func (l *localFile) FSync() error {\n// GetAttr implements p9.File.\nfunc (l *localFile) GetAttr(_ p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) {\n- stat, err := stat(l.file.FD())\n+ stat, err := fstat(l.file.FD())\nif err != nil {\nreturn p9.QID{}, p9.AttrMask{}, p9.Attr{}, extractErrno(err)\n}\n@@ -880,7 +888,7 @@ func (l *localFile) Symlink(target, newName string, uid p9.UID, gid p9.GID) (p9.\nif err := fchown(f.FD(), uid, gid); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- stat, err := stat(f.FD())\n+ stat, err := fstat(f.FD())\nif err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n@@ -907,15 +915,41 @@ func (l *localFile) Link(target p9.File, newName string) error {\n}\n// Mknod implements p9.File.\n-//\n-// Not implemented.\n-func (*localFile) Mknod(_ string, _ p9.FileMode, _ uint32, _ uint32, _ p9.UID, _ p9.GID) (p9.QID, error) {\n+func (l *localFile) Mknod(name string, mode p9.FileMode, _ uint32, _ uint32, uid p9.UID, gid p9.GID) (p9.QID, error) {\n+ conf := l.attachPoint.conf\n+ if conf.ROMount {\n+ if conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\n+ return p9.QID{}, syscall.EROFS\n+ }\n+\n+ hostPath := path.Join(l.hostPath, name)\n+\n+ // Return EEXIST if the file already exists.\n+ if _, err := stat(hostPath); err == nil {\n+ return p9.QID{}, syscall.EEXIST\n+ }\n+\n// From mknod(2) man page:\n// \"EPERM: [...] if the filesystem containing pathname does not support\n// the type of node requested.\"\n+ if mode.FileType() != p9.ModeRegular {\nreturn p9.QID{}, syscall.EPERM\n}\n+ // Allow Mknod to create regular files.\n+ if err := syscall.Mknod(hostPath, uint32(mode), 0); err != nil {\n+ return p9.QID{}, err\n+ }\n+\n+ stat, err := stat(hostPath)\n+ if err != nil {\n+ return p9.QID{}, extractErrno(err)\n+ }\n+ return l.attachPoint.makeQID(stat), nil\n+}\n+\n// UnlinkAt implements p9.File.\nfunc (l *localFile) UnlinkAt(name string, flags uint32) error {\nconf := l.attachPoint.conf\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -283,6 +283,7 @@ syscall_test(\nsize = \"medium\",\nadd_overlay = False, # TODO(gvisor.dev/issue/317): enable when fixed.\ntest = \"//test/syscalls/linux:inotify_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -351,6 +352,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:mknod_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mknod.cc",
"new_path": "test/syscalls/linux/mknod.cc",
"diff": "#include <errno.h>\n#include <fcntl.h>\n#include <sys/stat.h>\n+#include <sys/types.h>\n#include <sys/un.h>\n#include <unistd.h>\n@@ -39,7 +40,28 @@ TEST(MknodTest, RegularFile) {\nEXPECT_THAT(mknod(node1.c_str(), 0, 0), SyscallSucceeds());\n}\n-TEST(MknodTest, MknodAtRegularFile) {\n+TEST(MknodTest, RegularFilePermissions) {\n+ const std::string node = NewTempAbsPath();\n+ mode_t newUmask = 0077;\n+ umask(newUmask);\n+\n+ // Attempt to open file with mode 0777. Not specifying file type should create\n+ // a regualar file.\n+ mode_t perms = S_IRWXU | S_IRWXG | S_IRWXO;\n+ EXPECT_THAT(mknod(node.c_str(), perms, 0), SyscallSucceeds());\n+\n+ // In the absence of a default ACL, the permissions of the created node are\n+ // (mode & ~umask). -- mknod(2)\n+ mode_t wantPerms = perms & ~newUmask;\n+ struct stat st;\n+ ASSERT_THAT(stat(node.c_str(), &st), SyscallSucceeds());\n+ ASSERT_EQ(st.st_mode & 0777, wantPerms);\n+\n+ // \"Zero file type is equivalent to type S_IFREG.\" - mknod(2)\n+ ASSERT_EQ(st.st_mode & S_IFMT, S_IFREG);\n+}\n+\n+TEST(MknodTest, MknodAtFIFO) {\nconst TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nconst std::string fifo_relpath = NewTempRelPath();\nconst std::string fifo = JoinPath(dir.path(), fifo_relpath);\n@@ -72,7 +94,7 @@ TEST(MknodTest, MknodOnExistingPathFails) {\nTEST(MknodTest, UnimplementedTypesReturnError) {\nconst std::string path = NewTempAbsPath();\n- if (IsRunningOnGvisor()) {\n+ if (IsRunningWithVFS1()) {\nASSERT_THAT(mknod(path.c_str(), S_IFSOCK, 0),\nSyscallFailsWithErrno(EOPNOTSUPP));\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix mknod and inotify syscall test
This change fixes a few things:
- creating sockets using mknod(2) is supported via vfs2
- fsgofer can create regular files via mknod(2)
- mode = 0 for mknod(2) will be interpreted as regular file in vfs2 as well
Updates #2923
PiperOrigin-RevId: 320074267 |
260,003 | 07.07.2020 15:55:40 | 25,200 | 7e4d2d63eef3f643222181b3a227bfc7ebe44db2 | icmp: When setting TransportHeader, remove from the Data portion.
The current convention is when a header is set to pkt.XxxHeader field, it
gets removed from pkt.Data. ICMP does not currently follow this convention. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -1358,16 +1358,19 @@ func (n *NIC) DeliverTransportPacket(r *Route, protocol tcpip.TransportProtocolN\n// TransportHeader is nil only when pkt is an ICMP packet or was reassembled\n// from fragments.\nif pkt.TransportHeader == nil {\n- // TODO(gvisor.dev/issue/170): ICMP packets don't have their\n- // TransportHeader fields set. See icmp/protocol.go:protocol.Parse for a\n+ // TODO(gvisor.dev/issue/170): ICMP packets don't have their TransportHeader\n+ // fields set yet, parse it here. See icmp/protocol.go:protocol.Parse for a\n// full explanation.\nif protocol == header.ICMPv4ProtocolNumber || protocol == header.ICMPv6ProtocolNumber {\n+ // ICMP packets may be longer, but until icmp.Parse is implemented, here\n+ // we parse it using the minimum size.\ntransHeader, ok := pkt.Data.PullUp(transProto.MinimumPacketSize())\nif !ok {\nn.stack.stats.MalformedRcvdPackets.Increment()\nreturn\n}\npkt.TransportHeader = transHeader\n+ pkt.Data.TrimFront(len(pkt.TransportHeader))\n} else {\n// This is either a bad packet or was re-assembled from fragments.\ntransProto.Parse(pkt)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -744,15 +744,15 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, pk\n// Only accept echo replies.\nswitch e.NetProto {\ncase header.IPv4ProtocolNumber:\n- h, ok := pkt.Data.PullUp(header.ICMPv4MinimumSize)\n- if !ok || header.ICMPv4(h).Type() != header.ICMPv4EchoReply {\n+ h := header.ICMPv4(pkt.TransportHeader)\n+ if len(h) < header.ICMPv4MinimumSize || h.Type() != header.ICMPv4EchoReply {\ne.stack.Stats().DroppedPackets.Increment()\ne.stats.ReceiveErrors.MalformedPacketsReceived.Increment()\nreturn\n}\ncase header.IPv6ProtocolNumber:\n- h, ok := pkt.Data.PullUp(header.ICMPv6MinimumSize)\n- if !ok || header.ICMPv6(h).Type() != header.ICMPv6EchoReply {\n+ h := header.ICMPv6(pkt.TransportHeader)\n+ if len(h) < header.ICMPv6MinimumSize || h.Type() != header.ICMPv6EchoReply {\ne.stack.Stats().DroppedPackets.Increment()\ne.stats.ReceiveErrors.MalformedPacketsReceived.Increment()\nreturn\n@@ -786,7 +786,9 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, pk\n},\n}\n- packet.data = pkt.Data\n+ // ICMP socket's data includes ICMP header.\n+ packet.data = pkt.TransportHeader.ToVectorisedView()\n+ packet.data.Append(pkt.Data)\ne.rcvList.PushBack(packet)\ne.rcvBufSize += packet.data.Size()\n"
}
] | Go | Apache License 2.0 | google/gvisor | icmp: When setting TransportHeader, remove from the Data portion.
The current convention is when a header is set to pkt.XxxHeader field, it
gets removed from pkt.Data. ICMP does not currently follow this convention.
PiperOrigin-RevId: 320078606 |
259,907 | 07.07.2020 21:35:47 | 25,200 | efa2615eb008a642dc542176759dc560c5f48a2d | [vfs2] Remove VFS1 usage in VDSO.
Removed VDSO dependency on VFS1.
Resolves | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"new_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"diff": "@@ -73,7 +73,7 @@ func Boot() (*kernel.Kernel, error) {\nk.SetMemoryFile(mf)\n// Pass k as the platform since it is savable, unlike the actual platform.\n- vdso, err := loader.PrepareVDSO(nil, k)\n+ vdso, err := loader.PrepareVDSO(k)\nif err != nil {\nreturn nil, fmt.Errorf(\"creating vdso: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/BUILD",
"new_path": "pkg/sentry/loader/BUILD",
"diff": "@@ -30,9 +30,6 @@ go_library(\n\"//pkg/rand\",\n\"//pkg/safemem\",\n\"//pkg/sentry/arch\",\n- \"//pkg/sentry/fs\",\n- \"//pkg/sentry/fs/anon\",\n- \"//pkg/sentry/fs/fsutil\",\n\"//pkg/sentry/fsbridge\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/limits\",\n@@ -45,6 +42,5 @@ go_library(\n\"//pkg/syserr\",\n\"//pkg/syserror\",\n\"//pkg/usermem\",\n- \"//pkg/waiter\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -90,14 +90,23 @@ type elfInfo struct {\nsharedObject bool\n}\n+// fullReader interface extracts the ReadFull method from fsbridge.File so that\n+// client code does not need to define an entire fsbridge.File when only read\n+// functionality is needed.\n+//\n+// TODO(gvisor.dev/issue/1035): Once VFS2 ships, rewrite this to wrap\n+// vfs.FileDescription's PRead/Read instead.\n+type fullReader interface {\n+ // ReadFull is the same as fsbridge.File.ReadFull.\n+ ReadFull(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error)\n+}\n+\n// parseHeader parse the ELF header, verifying that this is a supported ELF\n// file and returning the ELF program headers.\n//\n// This is similar to elf.NewFile, except that it is more strict about what it\n// accepts from the ELF, and it doesn't parse unnecessary parts of the file.\n-//\n-// ctx may be nil if f does not need it.\n-func parseHeader(ctx context.Context, f fsbridge.File) (elfInfo, error) {\n+func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {\n// Check ident first; it will tell us the endianness of the rest of the\n// structs.\nvar ident [elf.EI_NIDENT]byte\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -27,7 +27,6 @@ import (\n\"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/rand\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n- \"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fsbridge\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/mm\"\n@@ -80,22 +79,6 @@ type LoadArgs struct {\nFeatures *cpuid.FeatureSet\n}\n-// readFull behaves like io.ReadFull for an *fs.File.\n-func readFull(ctx context.Context, f *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {\n- var total int64\n- for dst.NumBytes() > 0 {\n- n, err := f.Preadv(ctx, dst, offset+total)\n- total += n\n- if err == io.EOF && total != 0 {\n- return total, io.ErrUnexpectedEOF\n- } else if err != nil {\n- return total, err\n- }\n- dst = dst.DropFirst64(n)\n- }\n- return total, nil\n-}\n-\n// openPath opens args.Filename and checks that it is valid for loading.\n//\n// openPath returns an *fs.Dirent and *fs.File for args.Filename, which is not\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/vdso.go",
"new_path": "pkg/sentry/loader/vdso.go",
"diff": "@@ -26,10 +26,6 @@ import (\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n- \"gvisor.dev/gvisor/pkg/sentry/fs\"\n- \"gvisor.dev/gvisor/pkg/sentry/fs/anon\"\n- \"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n- \"gvisor.dev/gvisor/pkg/sentry/fsbridge\"\n\"gvisor.dev/gvisor/pkg/sentry/memmap\"\n\"gvisor.dev/gvisor/pkg/sentry/mm\"\n\"gvisor.dev/gvisor/pkg/sentry/pgalloc\"\n@@ -37,7 +33,6 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/usage\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n- \"gvisor.dev/gvisor/pkg/waiter\"\n)\nconst vdsoPrelink = 0xffffffffff700000\n@@ -55,52 +50,11 @@ func (f *fileContext) Value(key interface{}) interface{} {\n}\n}\n-// byteReader implements fs.FileOperations for reading from a []byte source.\n-type byteReader struct {\n- fsutil.FileNoFsync `state:\"nosave\"`\n- fsutil.FileNoIoctl `state:\"nosave\"`\n- fsutil.FileNoMMap `state:\"nosave\"`\n- fsutil.FileNoSplice `state:\"nosave\"`\n- fsutil.FileNoopFlush `state:\"nosave\"`\n- fsutil.FileNoopRelease `state:\"nosave\"`\n- fsutil.FileNotDirReaddir `state:\"nosave\"`\n- fsutil.FilePipeSeek `state:\"nosave\"`\n- fsutil.FileUseInodeUnstableAttr `state:\"nosave\"`\n- waiter.AlwaysReady `state:\"nosave\"`\n-\n+type byteFullReader struct {\ndata []byte\n}\n-var _ fs.FileOperations = (*byteReader)(nil)\n-\n-// newByteReaderFile creates a fake file to read data from.\n-//\n-// TODO(gvisor.dev/issue/2921): Convert to VFS2.\n-func newByteReaderFile(ctx context.Context, data []byte) *fs.File {\n- // Create a fake inode.\n- inode := fs.NewInode(\n- ctx,\n- &fsutil.SimpleFileInode{},\n- fs.NewPseudoMountSource(ctx),\n- fs.StableAttr{\n- Type: fs.Anonymous,\n- DeviceID: anon.PseudoDevice.DeviceID(),\n- InodeID: anon.PseudoDevice.NextIno(),\n- BlockSize: usermem.PageSize,\n- })\n-\n- // Use the fake inode to create a fake dirent.\n- dirent := fs.NewTransientDirent(inode)\n- defer dirent.DecRef()\n-\n- // Use the fake dirent to make a fake file.\n- flags := fs.FileFlags{Read: true, Pread: true}\n- return fs.NewFile(&fileContext{Context: context.Background()}, dirent, flags, &byteReader{\n- data: data,\n- })\n-}\n-\n-func (b *byteReader) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {\n+func (b *byteFullReader) ReadFull(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) {\nif offset < 0 {\nreturn 0, syserror.EINVAL\n}\n@@ -111,10 +65,6 @@ func (b *byteReader) Read(ctx context.Context, file *fs.File, dst usermem.IOSequ\nreturn int64(n), err\n}\n-func (b *byteReader) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) {\n- panic(\"Write not supported\")\n-}\n-\n// validateVDSO checks that the VDSO can be loaded by loadVDSO.\n//\n// VDSOs are special (see below). Since we are going to map the VDSO directly\n@@ -130,7 +80,7 @@ func (b *byteReader) Write(ctx context.Context, file *fs.File, src usermem.IOSeq\n// * PT_LOAD segments don't extend beyond the end of the file.\n//\n// ctx may be nil if f does not need it.\n-func validateVDSO(ctx context.Context, f fsbridge.File, size uint64) (elfInfo, error) {\n+func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, error) {\ninfo, err := parseHeader(ctx, f)\nif err != nil {\nlog.Infof(\"Unable to parse VDSO header: %v\", err)\n@@ -248,13 +198,12 @@ func getSymbolValueFromVDSO(symbol string) (uint64, error) {\n// PrepareVDSO validates the system VDSO and returns a VDSO, containing the\n// param page for updating by the kernel.\n-func PrepareVDSO(ctx context.Context, mfp pgalloc.MemoryFileProvider) (*VDSO, error) {\n- vdsoFile := fsbridge.NewFSFile(newByteReaderFile(ctx, vdsoBin))\n+func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {\n+ vdsoFile := &byteFullReader{data: vdsoBin}\n// First make sure the VDSO is valid. vdsoFile does not use ctx, so a\n// nil context can be passed.\ninfo, err := validateVDSO(nil, vdsoFile, uint64(len(vdsoBin)))\n- vdsoFile.DecRef()\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -227,9 +227,7 @@ func New(args Args) (*Loader, error) {\n// Create VDSO.\n//\n// Pass k as the platform since it is savable, unlike the actual platform.\n- //\n- // FIXME(b/109889800): Use non-nil context.\n- vdso, err := loader.PrepareVDSO(nil, k)\n+ vdso, err := loader.PrepareVDSO(k)\nif err != nil {\nreturn nil, fmt.Errorf(\"creating vdso: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2] Remove VFS1 usage in VDSO.
Removed VDSO dependency on VFS1.
Resolves #2921
PiperOrigin-RevId: 320122176 |
259,907 | 08.07.2020 12:03:28 | 25,200 | e3db9bda60580df127ea445fc1f862864c5451f9 | Enable shards in runtime test runner.
Fixed an issue with the runtime test runner which enables us to run tests in
shards. We had to touch the status file as indicated by an env var. | [
{
"change_type": "MODIFY",
"old_path": "pkg/test/testutil/testutil.go",
"new_path": "pkg/test/testutil/testutil.go",
"diff": "@@ -482,6 +482,21 @@ func IsStatic(filename string) (bool, error) {\nreturn true, nil\n}\n+// TouchShardStatusFile indicates to Bazel that the test runner supports\n+// sharding by creating or updating the last modified date of the file\n+// specified by TEST_SHARD_STATUS_FILE.\n+//\n+// See https://docs.bazel.build/versions/master/test-encyclopedia.html#role-of-the-test-runner.\n+func TouchShardStatusFile() error {\n+ if statusFile := os.Getenv(\"TEST_SHARD_STATUS_FILE\"); statusFile != \"\" {\n+ cmd := exec.Command(\"touch\", statusFile)\n+ if b, err := cmd.CombinedOutput(); err != nil {\n+ return fmt.Errorf(\"touch %q failed:\\n output: %s\\n error: %s\", statusFile, string(b), err.Error())\n+ }\n+ }\n+ return nil\n+}\n+\n// TestIndicesForShard returns indices for this test shard based on the\n// TEST_SHARD_INDEX and TEST_TOTAL_SHARDS environment vars.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/runner/main.go",
"new_path": "test/runtimes/runner/main.go",
"diff": "@@ -63,6 +63,11 @@ func runTests() int {\nd := dockerutil.MakeDocker(testutil.DefaultLogger(*lang))\ndefer d.CleanUp()\n+ if err := testutil.TouchShardStatusFile(); err != nil {\n+ fmt.Fprintf(os.Stderr, \"error touching status shard file: %v\\n\", err)\n+ return 1\n+ }\n+\n// Get a slice of tests to run. This will also start a single Docker\n// container that will be used to run each test. The final test will\n// stop the Docker container.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable shards in runtime test runner.
Fixed an issue with the runtime test runner which enables us to run tests in
shards. We had to touch the status file as indicated by an env var.
PiperOrigin-RevId: 320236205 |
259,891 | 08.07.2020 10:54:23 | 25,200 | 14ff2ea9bfc83fb37afe8a5e17e8b8173f85eb68 | ip6tables: handle both IPv4 and v6 addresses
Enabling IPv6 in Docker caused IPv4 tests to fail because localAddrs
didn't distinguish between address types. Example failure: | [
{
"change_type": "MODIFY",
"old_path": "test/iptables/filter_input.go",
"new_path": "test/iptables/filter_input.go",
"diff": "@@ -618,7 +618,7 @@ func (FilterInputDestination) Name() string {\n// ContainerAction implements TestCase.ContainerAction.\nfunc (FilterInputDestination) ContainerAction(ip net.IP) error {\n- addrs, err := localAddrs()\n+ addrs, err := localAddrs(false)\nif err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/iptables/iptables_test.go",
"new_path": "test/iptables/iptables_test.go",
"diff": "@@ -17,6 +17,7 @@ package iptables\nimport (\n\"fmt\"\n\"net\"\n+ \"reflect\"\n\"testing\"\n\"gvisor.dev/gvisor/pkg/test/dockerutil\"\n@@ -315,3 +316,28 @@ func TestInputSource(t *testing.T) {\nfunc TestInputInvertSource(t *testing.T) {\nsingleTest(t, FilterInputInvertSource{})\n}\n+\n+func TestFilterAddrs(t *testing.T) {\n+ tcs := []struct {\n+ ipv6 bool\n+ addrs []string\n+ want []string\n+ }{\n+ {\n+ ipv6: false,\n+ addrs: []string{\"192.168.0.1\", \"192.168.0.2/24\", \"::1\", \"::2/128\"},\n+ want: []string{\"192.168.0.1\", \"192.168.0.2\"},\n+ },\n+ {\n+ ipv6: true,\n+ addrs: []string{\"192.168.0.1\", \"192.168.0.2/24\", \"::1\", \"::2/128\"},\n+ want: []string{\"::1\", \"::2\"},\n+ },\n+ }\n+\n+ for _, tc := range tcs {\n+ if got := filterAddrs(tc.addrs, tc.ipv6); !reflect.DeepEqual(got, tc.want) {\n+ t.Errorf(\"%v with IPv6 %t: got %v, but wanted %v\", tc.addrs, tc.ipv6, got, tc.want)\n+ }\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/iptables/iptables_util.go",
"new_path": "test/iptables/iptables_util.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"fmt\"\n\"net\"\n\"os/exec\"\n+ \"strings\"\n\"time\"\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n@@ -157,8 +158,10 @@ func connectTCP(ip net.IP, port int, timeout time.Duration) error {\nreturn nil\n}\n-// localAddrs returns a list of local network interface addresses.\n-func localAddrs() ([]string, error) {\n+// localAddrs returns a list of local network interface addresses. When ipv6 is\n+// true, only IPv6 addresses are returned. Otherwise only IPv4 addresses are\n+// returned.\n+func localAddrs(ipv6 bool) ([]string, error) {\naddrs, err := net.InterfaceAddrs()\nif err != nil {\nreturn nil, err\n@@ -167,7 +170,19 @@ func localAddrs() ([]string, error) {\nfor _, addr := range addrs {\naddrStrs = append(addrStrs, addr.String())\n}\n- return addrStrs, nil\n+ return filterAddrs(addrStrs, ipv6), nil\n+}\n+\n+func filterAddrs(addrs []string, ipv6 bool) []string {\n+ addrStrs := make([]string, 0, len(addrs))\n+ for _, addr := range addrs {\n+ // Add only IPv4 or only IPv6 addresses.\n+ parts := strings.Split(addr, \"/\")\n+ if isIPv6 := net.ParseIP(parts[0]).To4() == nil; isIPv6 == ipv6 {\n+ addrStrs = append(addrStrs, parts[0])\n+ }\n+ }\n+ return addrStrs\n}\n// getInterfaceName returns the name of the interface other than loopback.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/iptables/nat.go",
"new_path": "test/iptables/nat.go",
"diff": "@@ -241,7 +241,7 @@ func (NATPreRedirectIP) Name() string {\n// ContainerAction implements TestCase.ContainerAction.\nfunc (NATPreRedirectIP) ContainerAction(ip net.IP) error {\n- addrs, err := localAddrs()\n+ addrs, err := localAddrs(false)\nif err != nil {\nreturn err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | ip6tables: handle both IPv4 and v6 addresses
Enabling IPv6 in Docker caused IPv4 tests to fail because localAddrs
didn't distinguish between address types. Example failure:
https://source.cloud.google.com/results/invocations/203b2401-3333-4bec-9a56-72cc53d68ddd/log |
259,881 | 08.07.2020 15:46:25 | 25,200 | a75d9f7bee72b9d7611cb015e473ac0bed3d9b02 | Drop empty line | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/network.go",
"new_path": "runsc/sandbox/network.go",
"diff": "@@ -134,7 +134,6 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, hardwareG\nreturn err\n}\nif isRoot {\n-\nreturn fmt.Errorf(\"cannot run with network enabled in root network namespace\")\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop empty line
PiperOrigin-RevId: 320281516 |
259,992 | 08.07.2020 17:10:35 | 25,200 | c4815af9475cc4680c6d598d9c930de892c98aae | Add shared mount hints to VFS2
Container restart test is disabled for VFS2 for now.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"new_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"diff": "@@ -62,6 +62,7 @@ func Boot() (*kernel.Kernel, error) {\nreturn nil, fmt.Errorf(\"creating platform: %v\", err)\n}\n+ kernel.VFS2Enabled = true\nk := &kernel.Kernel{\nPlatform: plat,\n}\n@@ -103,11 +104,6 @@ func Boot() (*kernel.Kernel, error) {\nreturn nil, fmt.Errorf(\"initializing kernel: %v\", err)\n}\n- kernel.VFS2Enabled = true\n-\n- if err := k.VFS().Init(); err != nil {\n- return nil, fmt.Errorf(\"VFS init: %v\", err)\n- }\nk.VFS().MustRegisterFilesystemType(tmpfs.Name, &tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{\nAllowUserMount: true,\nAllowUserList: true,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -221,14 +221,14 @@ func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *V\n// Load the executable itself.\nloaded, ac, file, newArgv, err := loadExecutable(ctx, args)\nif err != nil {\n- return 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Failed to load %s: %v\", args.Filename, err), syserr.FromError(err).ToLinux())\n+ return 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"failed to load %s: %v\", args.Filename, err), syserr.FromError(err).ToLinux())\n}\ndefer file.DecRef()\n// Load the VDSO.\nvdsoAddr, err := loadVDSO(ctx, args.MemoryManager, vdso, loaded)\nif err != nil {\n- return 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Error loading VDSO: %v\", err), syserr.FromError(err).ToLinux())\n+ return 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"error loading VDSO: %v\", err), syserr.FromError(err).ToLinux())\n}\n// Setup the heap. brk starts at the next page after the end of the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/vfs.go",
"new_path": "pkg/sentry/vfs/vfs.go",
"diff": "@@ -123,6 +123,9 @@ type VirtualFilesystem struct {\n// Init initializes a new VirtualFilesystem with no mounts or FilesystemTypes.\nfunc (vfs *VirtualFilesystem) Init() error {\n+ if vfs.mountpoints != nil {\n+ panic(\"VFS already initialized\")\n+ }\nvfs.mountpoints = make(map[*Dentry]map[*Mount]struct{})\nvfs.devices = make(map[devTuple]*registeredDevice)\nvfs.anonBlockDevMinorNext = 1\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -29,6 +29,7 @@ import (\n_ \"gvisor.dev/gvisor/pkg/sentry/fs/sys\"\n_ \"gvisor.dev/gvisor/pkg/sentry/fs/tmpfs\"\n_ \"gvisor.dev/gvisor/pkg/sentry/fs/tty\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -390,6 +391,10 @@ type mountHint struct {\n// root is the inode where the volume is mounted. For mounts with 'pod' share\n// the volume is mounted once and then bind mounted inside the containers.\nroot *fs.Inode\n+\n+ // vfsMount is the master mount for the volume. For mounts with 'pod' share\n+ // the master volume is bind mounted inside the containers.\n+ vfsMount *vfs.Mount\n}\nfunc (m *mountHint) setField(key, val string) error {\n@@ -571,9 +576,9 @@ func newContainerMounter(spec *specs.Spec, goferFDs []int, k *kernel.Kernel, hin\n// processHints processes annotations that container hints about how volumes\n// should be mounted (e.g. a volume shared between containers). It must be\n// called for the root container only.\n-func (c *containerMounter) processHints(conf *Config) error {\n+func (c *containerMounter) processHints(conf *Config, creds *auth.Credentials) error {\nif conf.VFS2 {\n- return nil\n+ return c.processHintsVFS2(conf, creds)\n}\nctx := c.k.SupervisorContext()\nfor _, hint := range c.hints.mounts {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -298,6 +298,12 @@ func New(args Args) (*Loader, error) {\nreturn nil, fmt.Errorf(\"initializing kernel: %v\", err)\n}\n+ if kernel.VFS2Enabled {\n+ if err := registerFilesystems(k); err != nil {\n+ return nil, fmt.Errorf(\"registering filesystems: %w\", err)\n+ }\n+ }\n+\nif err := adjustDirentCache(k); err != nil {\nreturn nil, err\n}\n@@ -559,7 +565,7 @@ func (l *Loader) run() error {\nl.startGoferMonitor(l.sandboxID, l.goferFDs)\nmntr := newContainerMounter(l.spec, l.goferFDs, l.k, l.mountHints)\n- if err := mntr.processHints(l.conf); err != nil {\n+ if err := mntr.processHints(l.conf, l.rootProcArgs.Credentials); err != nil {\nreturn err\n}\nif err := setupContainerFS(ctx, l.conf, mntr, &l.rootProcArgs); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -480,7 +480,7 @@ func TestCreateMountNamespaceVFS2(t *testing.T) {\ndefer loaderCleanup()\nmntr := newContainerMounter(l.spec, l.goferFDs, l.k, l.mountHints)\n- if err := mntr.processHints(l.conf); err != nil {\n+ if err := mntr.processHints(l.conf, l.rootProcArgs.Credentials); err != nil {\nt.Fatalf(\"failed process hints: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -43,7 +43,11 @@ import (\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n-func registerFilesystems(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials) error {\n+func registerFilesystems(k *kernel.Kernel) error {\n+ ctx := k.SupervisorContext()\n+ creds := auth.NewRootCredentials(k.RootUserNamespace())\n+ vfsObj := k.VFS()\n+\nvfsObj.MustRegisterFilesystemType(devpts.Name, &devpts.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{\nAllowUserList: true,\n// TODO(b/29356795): Users may mount this once the terminals are in a\n@@ -113,9 +117,6 @@ func registerFilesystems(ctx context.Context, vfsObj *vfs.VirtualFilesystem, cre\n}\nfunc setupContainerVFS2(ctx context.Context, conf *Config, mntr *containerMounter, procArgs *kernel.CreateProcessArgs) error {\n- if err := mntr.k.VFS().Init(); err != nil {\n- return fmt.Errorf(\"failed to initialize VFS: %w\", err)\n- }\nmns, err := mntr.setupVFS2(ctx, conf, procArgs)\nif err != nil {\nreturn fmt.Errorf(\"failed to setupFS: %w\", err)\n@@ -144,10 +145,6 @@ func (c *containerMounter) setupVFS2(ctx context.Context, conf *Config, procArgs\nrootProcArgs.MaxSymlinkTraversals = linux.MaxSymlinkTraversals\nrootCtx := procArgs.NewContext(c.k)\n- if err := registerFilesystems(rootCtx, c.k.VFS(), rootCreds); err != nil {\n- return nil, fmt.Errorf(\"register filesystems: %w\", err)\n- }\n-\nmns, err := c.createMountNamespaceVFS2(rootCtx, conf, rootCreds)\nif err != nil {\nreturn nil, fmt.Errorf(\"creating mount namespace: %w\", err)\n@@ -182,8 +179,14 @@ func (c *containerMounter) mountSubmountsVFS2(ctx context.Context, conf *Config,\nfor i := range mounts {\nsubmount := &mounts[i]\nlog.Debugf(\"Mounting %q to %q, type: %s, options: %s\", submount.Source, submount.Destination, submount.Type, submount.Options)\n+ if hint := c.hints.findMount(submount.Mount); hint != nil && hint.isSupported() {\n+ if err := c.mountSharedSubmountVFS2(ctx, conf, mns, creds, submount.Mount, hint); err != nil {\n+ return fmt.Errorf(\"mount shared mount %q to %q: %v\", hint.name, submount.Destination, err)\n+ }\n+ } else {\nif err := c.mountSubmountVFS2(ctx, conf, mns, creds, submount); err != nil {\n- return err\n+ return fmt.Errorf(\"mount submount %q: %w\", submount.Destination, err)\n+ }\n}\n}\n@@ -257,20 +260,18 @@ func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *Config,\n// getMountNameAndOptionsVFS2 retrieves the fsName, opts, and useOverlay values\n// used for mounts.\nfunc (c *containerMounter) getMountNameAndOptionsVFS2(conf *Config, m *mountAndFD) (string, *vfs.MountOptions, error) {\n- var (\n- fsName string\n- data []string\n- )\n+ fsName := m.Type\n+ var data []string\n// Find filesystem name and FS specific data field.\nswitch m.Type {\ncase devpts.Name, devtmpfs.Name, proc.Name, sys.Name:\n- fsName = m.Type\n+ // Nothing to do.\n+\ncase nonefs:\nfsName = sys.Name\n- case tmpfs.Name:\n- fsName = m.Type\n+ case tmpfs.Name:\nvar err error\ndata, err = parseAndFilterOptions(m.Options, tmpfsAllowedData...)\nif err != nil {\n@@ -279,10 +280,16 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *Config, m *mountAndF\ncase bind:\nfsName = gofer.Name\n+ if m.fd == 0 {\n+ // Check that an FD was provided to fails fast. Technically FD=0 is valid,\n+ // but unlikely to be correct in this context.\n+ return \"\", nil, fmt.Errorf(\"9P mount requires a connection FD\")\n+ }\ndata = p9MountData(m.fd, c.getMountAccessType(m.Mount), true /* vfs2 */)\ndefault:\nlog.Warningf(\"ignoring unknown filesystem type %q\", m.Type)\n+ return \"\", nil, nil\n}\nopts := &vfs.MountOptions{\n@@ -322,7 +329,7 @@ func (c *containerMounter) makeSyntheticMount(ctx context.Context, currentPath s\n}\n_, err := c.k.VFS().StatAt(ctx, creds, target, &vfs.StatOptions{})\nif err == nil {\n- // Mount point exists, nothing else to do.\n+ log.Debugf(\"Mount point %q already exists\", currentPath)\nreturn nil\n}\nif err != syserror.ENOENT {\n@@ -400,3 +407,76 @@ func (c *containerMounter) mountTmpVFS2(ctx context.Context, conf *Config, creds\nreturn fmt.Errorf(`stating \"/tmp\" inside container: %w`, err)\n}\n}\n+\n+// processHintsVFS2 processes annotations that container hints about how volumes\n+// should be mounted (e.g. a volume shared between containers). It must be\n+// called for the root container only.\n+func (c *containerMounter) processHintsVFS2(conf *Config, creds *auth.Credentials) error {\n+ ctx := c.k.SupervisorContext()\n+ for _, hint := range c.hints.mounts {\n+ // TODO(b/142076984): Only support tmpfs for now. Bind mounts require a\n+ // common gofer to mount all shared volumes.\n+ if hint.mount.Type != tmpfs.Name {\n+ continue\n+ }\n+\n+ log.Infof(\"Mounting master of shared mount %q from %q type %q\", hint.name, hint.mount.Source, hint.mount.Type)\n+ mnt, err := c.mountSharedMasterVFS2(ctx, conf, hint, creds)\n+ if err != nil {\n+ return fmt.Errorf(\"mounting shared master %q: %v\", hint.name, err)\n+ }\n+ hint.vfsMount = mnt\n+ }\n+ return nil\n+}\n+\n+// mountSharedMasterVFS2 mounts the master of a volume that is shared among\n+// containers in a pod.\n+func (c *containerMounter) mountSharedMasterVFS2(ctx context.Context, conf *Config, hint *mountHint, creds *auth.Credentials) (*vfs.Mount, error) {\n+ // Map mount type to filesystem name, and parse out the options that we are\n+ // capable of dealing with.\n+ mntFD := &mountAndFD{Mount: hint.mount}\n+ fsName, opts, err := c.getMountNameAndOptionsVFS2(conf, mntFD)\n+ if err != nil {\n+ return nil, err\n+ }\n+ if len(fsName) == 0 {\n+ return nil, fmt.Errorf(\"mount type not supported %q\", hint.mount.Type)\n+ }\n+ return c.k.VFS().MountDisconnected(ctx, creds, \"\", fsName, opts)\n+}\n+\n+// mountSharedSubmount binds mount to a previously mounted volume that is shared\n+// among containers in the same pod.\n+func (c *containerMounter) mountSharedSubmountVFS2(ctx context.Context, conf *Config, mns *vfs.MountNamespace, creds *auth.Credentials, mount specs.Mount, source *mountHint) error {\n+ if err := source.checkCompatible(mount); err != nil {\n+ return err\n+ }\n+\n+ _, opts, err := c.getMountNameAndOptionsVFS2(conf, &mountAndFD{Mount: mount})\n+ if err != nil {\n+ return err\n+ }\n+ newMnt, err := c.k.VFS().NewDisconnectedMount(source.vfsMount.Filesystem(), source.vfsMount.Root(), opts)\n+ if err != nil {\n+ return err\n+ }\n+ defer newMnt.DecRef()\n+\n+ root := mns.Root()\n+ defer root.DecRef()\n+ if err := c.makeSyntheticMount(ctx, mount.Destination, root, creds); err != nil {\n+ return err\n+ }\n+\n+ target := &vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(mount.Destination),\n+ }\n+ if err := c.k.VFS().ConnectMountAt(ctx, creds, newMnt, target); err != nil {\n+ return err\n+ }\n+ log.Infof(\"Mounted %q type shared bind to %q\", mount.Destination, source.name)\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -100,19 +100,20 @@ type execDesc struct {\nc *Container\ncmd []string\nwant int\n- desc string\n+ name string\n}\n-func execMany(execs []execDesc) error {\n+func execMany(t *testing.T, execs []execDesc) {\nfor _, exec := range execs {\n+ t.Run(exec.name, func(t *testing.T) {\nargs := &control.ExecArgs{Argv: exec.cmd}\nif ws, err := exec.c.executeSync(args); err != nil {\n- return fmt.Errorf(\"error executing %+v: %v\", args, err)\n+ t.Errorf(\"error executing %+v: %v\", args, err)\n} else if ws.ExitStatus() != exec.want {\n- return fmt.Errorf(\"%q: exec %q got exit status: %d, want: %d\", exec.desc, exec.cmd, ws.ExitStatus(), exec.want)\n+ t.Errorf(\"%q: exec %q got exit status: %d, want: %d\", exec.name, exec.cmd, ws.ExitStatus(), exec.want)\n}\n+ })\n}\n- return nil\n}\nfunc createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) {\n@@ -1072,7 +1073,7 @@ func TestMultiContainerContainerDestroyStress(t *testing.T) {\n// Test that pod shared mounts are properly mounted in 2 containers and that\n// changes from one container is reflected in the other.\nfunc TestMultiContainerSharedMount(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configsWithVFS2(t, all...) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -1110,84 +1111,82 @@ func TestMultiContainerSharedMount(t *testing.T) {\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"-d\", mnt0.Destination},\n- desc: \"directory is mounted in container0\",\n+ name: \"directory is mounted in container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"-d\", mnt1.Destination},\n- desc: \"directory is mounted in container1\",\n+ name: \"directory is mounted in container1\",\n},\n{\nc: containers[0],\n- cmd: []string{\"/usr/bin/touch\", file0},\n- desc: \"create file in container0\",\n+ cmd: []string{\"/bin/touch\", file0},\n+ name: \"create file in container0\",\n},\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"-f\", file0},\n- desc: \"file appears in container0\",\n+ name: \"file appears in container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"-f\", file1},\n- desc: \"file appears in container1\",\n+ name: \"file appears in container1\",\n},\n{\nc: containers[1],\ncmd: []string{\"/bin/rm\", file1},\n- desc: \"file removed from container1\",\n+ name: \"remove file from container1\",\n},\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"!\", \"-f\", file0},\n- desc: \"file removed from container0\",\n+ name: \"file removed from container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"!\", \"-f\", file1},\n- desc: \"file removed from container1\",\n+ name: \"file removed from container1\",\n},\n{\nc: containers[1],\ncmd: []string{\"/bin/mkdir\", file1},\n- desc: \"create directory in container1\",\n+ name: \"create directory in container1\",\n},\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"-d\", file0},\n- desc: \"dir appears in container0\",\n+ name: \"dir appears in container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"-d\", file1},\n- desc: \"dir appears in container1\",\n+ name: \"dir appears in container1\",\n},\n{\nc: containers[0],\ncmd: []string{\"/bin/rmdir\", file0},\n- desc: \"create directory in container0\",\n+ name: \"remove directory from container0\",\n},\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"!\", \"-d\", file0},\n- desc: \"dir removed from container0\",\n+ name: \"dir removed from container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"!\", \"-d\", file1},\n- desc: \"dir removed from container1\",\n+ name: \"dir removed from container1\",\n},\n}\n- if err := execMany(execs); err != nil {\n- t.Fatal(err.Error())\n- }\n+ execMany(t, execs)\n})\n}\n}\n// Test that pod mounts are mounted as readonly when requested.\nfunc TestMultiContainerSharedMountReadonly(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configsWithVFS2(t, all...) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -1225,35 +1224,34 @@ func TestMultiContainerSharedMountReadonly(t *testing.T) {\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"-d\", mnt0.Destination},\n- desc: \"directory is mounted in container0\",\n+ name: \"directory is mounted in container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"-d\", mnt1.Destination},\n- desc: \"directory is mounted in container1\",\n+ name: \"directory is mounted in container1\",\n},\n{\nc: containers[0],\n- cmd: []string{\"/usr/bin/touch\", file0},\n+ cmd: []string{\"/bin/touch\", file0},\nwant: 1,\n- desc: \"fails to write to container0\",\n+ name: \"fails to write to container0\",\n},\n{\nc: containers[1],\n- cmd: []string{\"/usr/bin/touch\", file1},\n+ cmd: []string{\"/bin/touch\", file1},\nwant: 1,\n- desc: \"fails to write to container1\",\n+ name: \"fails to write to container1\",\n},\n}\n- if err := execMany(execs); err != nil {\n- t.Fatal(err.Error())\n- }\n+ execMany(t, execs)\n})\n}\n}\n// Test that shared pod mounts continue to work after container is restarted.\nfunc TestMultiContainerSharedMountRestart(t *testing.T) {\n+ //TODO(gvisor.dev/issue/1487): This is failing with VFS2.\nfor name, conf := range configs(t, all...) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\n@@ -1291,23 +1289,21 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {\nexecs := []execDesc{\n{\nc: containers[0],\n- cmd: []string{\"/usr/bin/touch\", file0},\n- desc: \"create file in container0\",\n+ cmd: []string{\"/bin/touch\", file0},\n+ name: \"create file in container0\",\n},\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"-f\", file0},\n- desc: \"file appears in container0\",\n+ name: \"file appears in container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"-f\", file1},\n- desc: \"file appears in container1\",\n+ name: \"file appears in container1\",\n},\n}\n- if err := execMany(execs); err != nil {\n- t.Fatal(err.Error())\n- }\n+ execMany(t, execs)\ncontainers[1].Destroy()\n@@ -1334,32 +1330,30 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"-f\", file0},\n- desc: \"file is still in container0\",\n+ name: \"file is still in container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"-f\", file1},\n- desc: \"file is still in container1\",\n+ name: \"file is still in container1\",\n},\n{\nc: containers[1],\ncmd: []string{\"/bin/rm\", file1},\n- desc: \"file removed from container1\",\n+ name: \"file removed from container1\",\n},\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"!\", \"-f\", file0},\n- desc: \"file removed from container0\",\n+ name: \"file removed from container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"!\", \"-f\", file1},\n- desc: \"file removed from container1\",\n+ name: \"file removed from container1\",\n},\n}\n- if err := execMany(execs); err != nil {\n- t.Fatal(err.Error())\n- }\n+ execMany(t, execs)\n})\n}\n}\n@@ -1367,13 +1361,13 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {\n// Test that unsupported pod mounts options are ignored when matching master and\n// slave mounts.\nfunc TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {\n+ for name, conf := range configsWithVFS2(t, all...) {\n+ t.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\nt.Fatalf(\"error creating root dir: %v\", err)\n}\ndefer cleanup()\n-\n- conf := testutil.TestConfig(t)\nconf.RootDir = rootDir\n// Setup the containers.\n@@ -1404,16 +1398,16 @@ func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {\n{\nc: containers[0],\ncmd: []string{\"/usr/bin/test\", \"-d\", mnt0.Destination},\n- desc: \"directory is mounted in container0\",\n+ name: \"directory is mounted in container0\",\n},\n{\nc: containers[1],\ncmd: []string{\"/usr/bin/test\", \"-d\", mnt1.Destination},\n- desc: \"directory is mounted in container1\",\n+ name: \"directory is mounted in container1\",\n},\n}\n- if err := execMany(execs); err != nil {\n- t.Fatal(err.Error())\n+ execMany(t, execs)\n+ })\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add shared mount hints to VFS2
Container restart test is disabled for VFS2 for now.
Updates #1487
PiperOrigin-RevId: 320296401 |
260,022 | 07.07.2020 21:48:25 | 14,400 | abffebde7be2dcdb4564e45f845d7c150ced0ccb | Gate FUSE behind a runsc flag
This change gates all FUSE commands (by gating /dev/fuse) behind a runsc
flag. In order to use FUSE commands, use the --fuse flag with the --vfs2
flag. Check if FUSE is enabled by running dmesg in the sandbox. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/BUILD",
"new_path": "pkg/sentry/fsimpl/fuse/BUILD",
"diff": "@@ -12,6 +12,7 @@ go_library(\n\"//pkg/abi/linux\",\n\"//pkg/context\",\n\"//pkg/sentry/fsimpl/devtmpfs\",\n+ \"//pkg/sentry/kernel\",\n\"//pkg/sentry/vfs\",\n\"//pkg/syserror\",\n\"//pkg/usermem\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/dev.go",
"new_path": "pkg/sentry/fsimpl/fuse/dev.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n@@ -30,6 +31,10 @@ type fuseDevice struct{}\n// Open implements vfs.Device.Open.\nfunc (fuseDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ if !kernel.FUSEEnabled {\n+ return nil, syserror.ENOENT\n+ }\n+\nvar fd DeviceFD\nif err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{\nUseDentryMetadata: true,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -81,6 +81,10 @@ import (\n// easy access everywhere. To be removed once VFS2 becomes the default.\nvar VFS2Enabled = false\n+// FUSEEnabled is set to true when FUSE is enabled. Added as a global for allow\n+// easy access everywhere. To be removed once FUSE is completed.\n+var FUSEEnabled = false\n+\n// Kernel represents an emulated Linux kernel. It must be initialized by calling\n// Init() or LoadFrom().\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/syslog.go",
"new_path": "pkg/sentry/kernel/syslog.go",
"diff": "@@ -98,6 +98,15 @@ func (s *syslog) Log() []byte {\ns.msg = append(s.msg, []byte(fmt.Sprintf(format, time, selectMessage()))...)\n}\n+ if VFS2Enabled {\n+ time += rand.Float64() / 2\n+ s.msg = append(s.msg, []byte(fmt.Sprintf(format, time, \"Setting up VFS2...\"))...)\n+ if FUSEEnabled {\n+ time += rand.Float64() / 2\n+ s.msg = append(s.msg, []byte(fmt.Sprintf(format, time, \"Setting up FUSE...\"))...)\n+ }\n+ }\n+\ntime += rand.Float64() / 2\ns.msg = append(s.msg, []byte(fmt.Sprintf(format, time, \"Ready!\"))...)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -274,6 +274,9 @@ type Config struct {\n// Enables VFS2 (not plumbled through yet).\nVFS2 bool\n+\n+ // Enables FUSE usage (not plumbled through yet).\n+ FUSE bool\n}\n// ToFlags returns a slice of flags that correspond to the given Config.\n@@ -325,5 +328,9 @@ func (c *Config) ToFlags() []string {\nf = append(f, \"--vfs2=true\")\n}\n+ if c.FUSE {\n+ f = append(f, \"--fuse=true\")\n+ }\n+\nreturn f\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -205,6 +205,10 @@ func New(args Args) (*Loader, error) {\n// Is this a VFSv2 kernel?\nif args.Conf.VFS2 {\nkernel.VFS2Enabled = true\n+ if args.Conf.FUSE {\n+ kernel.FUSEEnabled = true\n+ }\n+\nvfs2.Override()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -86,9 +86,12 @@ func registerFilesystems(k *kernel.Kernel) error {\nreturn fmt.Errorf(\"registering ttydev: %w\", err)\n}\n+ if kernel.FUSEEnabled {\nif err := fuse.Register(vfsObj); err != nil {\nreturn fmt.Errorf(\"registering fusedev: %w\", err)\n}\n+ }\n+\nif err := tundev.Register(vfsObj); err != nil {\nreturn fmt.Errorf(\"registering tundev: %v\", err)\n}\n@@ -110,9 +113,12 @@ func registerFilesystems(k *kernel.Kernel) error {\nif err := tundev.CreateDevtmpfsFiles(ctx, a); err != nil {\nreturn fmt.Errorf(\"creating tundev devtmpfs files: %v\", err)\n}\n+\n+ if kernel.FUSEEnabled {\nif err := fuse.CreateDevtmpfsFile(ctx, a); err != nil {\nreturn fmt.Errorf(\"creating fusedev devtmpfs files: %w\", err)\n}\n+ }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -88,6 +88,7 @@ var (\nreferenceLeakMode = flag.String(\"ref-leak-mode\", \"disabled\", \"sets reference leak check mode: disabled (default), log-names, log-traces.\")\ncpuNumFromQuota = flag.Bool(\"cpu-num-from-quota\", false, \"set cpu number to cpu quota (least integer greater or equal to quota value, but not less than 2)\")\nvfs2Enabled = flag.Bool(\"vfs2\", false, \"TEST ONLY; use while VFSv2 is landing. This uses the new experimental VFS layer.\")\n+ fuseEnabled = flag.Bool(\"fuse\", false, \"TEST ONLY; use while FUSE in VFSv2 is landing. This allows the use of the new experimental FUSE filesystem.\")\n// Test flags, not to be used outside tests, ever.\ntestOnlyAllowRunAsCurrentUserWithoutChroot = flag.Bool(\"TESTONLY-unsafe-nonroot\", false, \"TEST ONLY; do not ever use! This skips many security measures that isolate the host from the sandbox.\")\n@@ -242,6 +243,7 @@ func main() {\nOverlayfsStaleRead: *overlayfsStaleRead,\nCPUNumFromQuota: *cpuNumFromQuota,\nVFS2: *vfs2Enabled,\n+ FUSE: *fuseEnabled,\nQDisc: queueingDiscipline,\nTestOnlyAllowRunAsCurrentUserWithoutChroot: *testOnlyAllowRunAsCurrentUserWithoutChroot,\nTestOnlyTestNameEnv: *testOnlyTestNameEnv,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -61,7 +61,8 @@ def _syscall_test(\nfile_access = \"exclusive\",\noverlay = False,\nadd_uds_tree = False,\n- vfs2 = False):\n+ vfs2 = False,\n+ fuse = False):\n# Prepend \"runsc\" to non-native platform names.\nfull_platform = platform if platform == \"native\" else \"runsc_\" + platform\n@@ -73,6 +74,8 @@ def _syscall_test(\nname += \"_overlay\"\nif vfs2:\nname += \"_vfs2\"\n+ if fuse:\n+ name += \"_fuse\"\nif network != \"none\":\nname += \"_\" + network + \"net\"\n@@ -107,6 +110,7 @@ def _syscall_test(\n\"--overlay=\" + str(overlay),\n\"--add-uds-tree=\" + str(add_uds_tree),\n\"--vfs2=\" + str(vfs2),\n+ \"--fuse=\" + str(fuse),\n]\n# Call the rule above.\n@@ -129,6 +133,7 @@ def syscall_test(\nadd_uds_tree = False,\nadd_hostinet = False,\nvfs2 = False,\n+ fuse = False,\ntags = None):\n\"\"\"syscall_test is a macro that will create targets for all platforms.\n@@ -188,6 +193,19 @@ def syscall_test(\nvfs2 = True,\n)\n+ if vfs2 and fuse:\n+ _syscall_test(\n+ test = test,\n+ shard_count = shard_count,\n+ size = size,\n+ platform = default_platform,\n+ use_tmpfs = use_tmpfs,\n+ add_uds_tree = add_uds_tree,\n+ tags = platforms[default_platform] + vfs2_tags,\n+ vfs2 = True,\n+ fuse = True,\n+ )\n+\n# TODO(gvisor.dev/issue/1487): Enable VFS2 overlay tests.\nif add_overlay:\n_syscall_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/runner.go",
"new_path": "test/runner/runner.go",
"diff": "@@ -47,6 +47,7 @@ var (\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"mounts root in exclusive or shared mode\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\nvfs2 = flag.Bool(\"vfs2\", false, \"enable VFS2\")\n+ fuse = flag.Bool(\"fuse\", false, \"enable FUSE\")\nparallel = flag.Bool(\"parallel\", false, \"run tests in parallel\")\nrunscPath = flag.String(\"runsc\", \"\", \"path to runsc binary\")\n@@ -149,6 +150,9 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\n}\nif *vfs2 {\nargs = append(args, \"-vfs2\")\n+ if *fuse {\n+ args = append(args, \"-fuse\")\n+ }\n}\nif *debug {\nargs = append(args, \"-debug\", \"-log-packets=true\")\n@@ -358,6 +362,12 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\nvfsVar := \"GVISOR_VFS\"\nif *vfs2 {\nenv = append(env, vfsVar+\"=VFS2\")\n+ fuseVar := \"FUSE_ENABLED\"\n+ if *fuse {\n+ env = append(env, fuseVar+\"=TRUE\")\n+ } else {\n+ env = append(env, fuseVar+\"=FALSE\")\n+ }\n} else {\nenv = append(env, vfsVar+\"=VFS1\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -146,6 +146,7 @@ syscall_test(\n)\nsyscall_test(\n+ fuse = \"True\",\ntest = \"//test/syscalls/linux:dev_test\",\nvfs2 = \"True\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/dev.cc",
"new_path": "test/syscalls/linux/dev.cc",
"diff": "@@ -156,7 +156,7 @@ TEST(DevTest, TTYExists) {\nTEST(DevTest, OpenDevFuse) {\n// Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n// device registration is complete.\n- SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());\n+ SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor() || !IsFUSEEnabled());\nASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_RDONLY));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/test_util.cc",
"new_path": "test/util/test_util.cc",
"diff": "@@ -42,6 +42,7 @@ namespace testing {\nconstexpr char kGvisorNetwork[] = \"GVISOR_NETWORK\";\nconstexpr char kGvisorVfs[] = \"GVISOR_VFS\";\n+constexpr char kFuseEnabled[] = \"FUSE_ENABLED\";\nbool IsRunningOnGvisor() { return GvisorPlatform() != Platform::kNative; }\n@@ -68,6 +69,11 @@ bool IsRunningWithVFS1() {\nreturn strcmp(env, \"VFS1\") == 0;\n}\n+bool IsFUSEEnabled() {\n+ const char* env = getenv(kFuseEnabled);\n+ return env && strcmp(env, \"TRUE\") == 0;\n+}\n+\n// Inline cpuid instruction. Preserve %ebx/%rbx register. In PIC compilations\n// %ebx contains the address of the global offset table. %rbx is occasionally\n// used to address stack variables in presence of dynamic allocas.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/test_util.h",
"new_path": "test/util/test_util.h",
"diff": "@@ -225,6 +225,7 @@ const std::string GvisorPlatform();\nbool IsRunningWithHostinet();\n// TODO(gvisor.dev/issue/1624): Delete once VFS1 is gone.\nbool IsRunningWithVFS1();\n+bool IsFUSEEnabled();\n#ifdef __linux__\nvoid SetupGvisorDeathTest();\n"
}
] | Go | Apache License 2.0 | google/gvisor | Gate FUSE behind a runsc flag
This change gates all FUSE commands (by gating /dev/fuse) behind a runsc
flag. In order to use FUSE commands, use the --fuse flag with the --vfs2
flag. Check if FUSE is enabled by running dmesg in the sandbox. |
259,881 | 09.07.2020 09:03:14 | 25,200 | 8d2910a04dec5ef2c79034d35fce68e9f414d144 | Explain how to bypass the Docker proxy
Neither myself nor bhaskerh@ can consistently remember how to do this. | [
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/debugging.md",
"new_path": "g3doc/user_guide/debugging.md",
"diff": "@@ -129,3 +129,13 @@ go tool pprof -top /usr/local/bin/runsc /tmp/cpu.prof\n```\n[pprof]: https://github.com/google/pprof/blob/master/doc/README.md\n+\n+### Docker Proxy\n+\n+When forwarding a port to the container, Docker will likely route traffic\n+through the [docker-proxy][]. This proxy may make profiling noisy, so it can be\n+helpful to bypass it. Do so by sending traffic directly to the container IP and\n+port. e.g., if the `docker0` IP is `192.168.9.1`, the container IP is likely a\n+subsequent IP, such as `192.168.9.2`.\n+\n+[docker-proxy]: https://windsock.io/the-docker-proxy/\n"
}
] | Go | Apache License 2.0 | google/gvisor | Explain how to bypass the Docker proxy
Neither myself nor bhaskerh@ can consistently remember how to do this.
PiperOrigin-RevId: 320407005 |
259,962 | 09.07.2020 16:24:43 | 25,200 | 5946f111827fa4e342a2e6e9c043c198d2e5cb03 | Add support for IP_HDRINCL IP option for raw sockets.
Updates
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -2112,13 +2112,22 @@ func setSockOptIP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *s\n}\nreturn syserr.TranslateNetstackError(ep.SetSockOptBool(tcpip.ReceiveIPPacketInfoOption, v != 0))\n+ case linux.IP_HDRINCL:\n+ if len(optVal) == 0 {\n+ return nil\n+ }\n+ v, err := parseIntOrChar(optVal)\n+ if err != nil {\n+ return err\n+ }\n+ return syserr.TranslateNetstackError(ep.SetSockOptBool(tcpip.IPHdrIncludedOption, v != 0))\n+\ncase linux.IP_ADD_SOURCE_MEMBERSHIP,\nlinux.IP_BIND_ADDRESS_NO_PORT,\nlinux.IP_BLOCK_SOURCE,\nlinux.IP_CHECKSUM,\nlinux.IP_DROP_SOURCE_MEMBERSHIP,\nlinux.IP_FREEBIND,\n- linux.IP_HDRINCL,\nlinux.IP_IPSEC_POLICY,\nlinux.IP_MINTTL,\nlinux.IP_MSFILTER,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -648,6 +648,11 @@ const (\n// whether an IPv6 socket is to be restricted to sending and receiving\n// IPv6 packets only.\nV6OnlyOption\n+\n+ // IPHdrIncludedOption is used by SetSockOpt to indicate for a raw\n+ // endpoint that all packets being written have an IP header and the\n+ // endpoint should not attach an IP header.\n+ IPHdrIncludedOption\n)\n// SockOptInt represents socket options which values have the int type.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -63,6 +63,7 @@ type endpoint struct {\nstack *stack.Stack `state:\"manual\"`\nwaiterQueue *waiter.Queue\nassociated bool\n+ hdrIncluded bool\n// The following fields are used to manage the receive queue and are\n// protected by rcvMu.\n@@ -108,6 +109,7 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProt\nrcvBufSizeMax: 32 * 1024,\nsndBufSizeMax: 32 * 1024,\nassociated: associated,\n+ hdrIncluded: !associated,\n}\n// Override with stack defaults.\n@@ -182,10 +184,6 @@ func (e *endpoint) SetOwner(owner tcpip.PacketOwner) {\n// Read implements tcpip.Endpoint.Read.\nfunc (e *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\n- if !e.associated {\n- return buffer.View{}, tcpip.ControlMessages{}, tcpip.ErrInvalidOptionValue\n- }\n-\ne.rcvMu.Lock()\n// If there's no data to read, return that read would block or that the\n@@ -263,7 +261,7 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, <-c\n// If this is an unassociated socket and callee provided a nonzero\n// destination address, route using that address.\n- if !e.associated {\n+ if e.hdrIncluded {\nip := header.IPv4(payloadBytes)\nif !ip.IsValid(len(payloadBytes)) {\ne.mu.RUnlock()\n@@ -353,7 +351,7 @@ func (e *endpoint) finishWrite(payloadBytes []byte, route *stack.Route) (int64,\n}\n}\n- if !e.associated {\n+ if e.hdrIncluded {\nif err := route.WriteHeaderIncludedPacket(&stack.PacketBuffer{\nData: buffer.View(payloadBytes).ToVectorisedView(),\n}); err != nil {\n@@ -513,6 +511,13 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n// SetSockOptBool implements tcpip.Endpoint.SetSockOptBool.\nfunc (e *endpoint) SetSockOptBool(opt tcpip.SockOptBool, v bool) *tcpip.Error {\n+ switch opt {\n+ case tcpip.IPHdrIncludedOption:\n+ e.mu.Lock()\n+ e.hdrIncluded = v\n+ e.mu.Unlock()\n+ return nil\n+ }\nreturn tcpip.ErrUnknownProtocolOption\n}\n@@ -577,6 +582,12 @@ func (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\ncase tcpip.KeepaliveEnabledOption:\nreturn false, nil\n+ case tcpip.IPHdrIncludedOption:\n+ e.mu.Lock()\n+ v := e.hdrIncluded\n+ e.mu.Unlock()\n+ return v, nil\n+\ndefault:\nreturn false, tcpip.ErrUnknownProtocolOption\n}\n@@ -616,8 +627,15 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, *tcpip.Error) {\nfunc (e *endpoint) HandlePacket(route *stack.Route, pkt *stack.PacketBuffer) {\ne.rcvMu.Lock()\n- // Drop the packet if our buffer is currently full.\n- if e.rcvClosed {\n+ // Drop the packet if our buffer is currently full or if this is an unassociated\n+ // endpoint (i.e endpoint created w/ IPPROTO_RAW). Such endpoints are send only\n+ // See: https://man7.org/linux/man-pages/man7/raw.7.html\n+ //\n+ // An IPPROTO_RAW socket is send only. If you really want to receive\n+ // all IP packets, use a packet(7) socket with the ETH_P_IP protocol.\n+ // Note that packet sockets don't reassemble IP fragments, unlike raw\n+ // sockets.\n+ if e.rcvClosed || !e.associated {\ne.rcvMu.Unlock()\ne.stack.Stats().DroppedPackets.Increment()\ne.stats.ReceiveErrors.ClosedReceiver.Increment()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket_hdrincl.cc",
"new_path": "test/syscalls/linux/raw_socket_hdrincl.cc",
"diff": "@@ -167,7 +167,7 @@ TEST_F(RawHDRINCL, NotReadable) {\n// nothing to be read.\nchar buf[117];\nASSERT_THAT(RetryEINTR(recv)(socket_, buf, sizeof(buf), MSG_DONTWAIT),\n- SyscallFailsWithErrno(EINVAL));\n+ SyscallFailsWithErrno(EAGAIN));\n}\n// Test that we can connect() to a valid IP (loopback).\n@@ -332,6 +332,74 @@ TEST_F(RawHDRINCL, SendAndReceiveDifferentAddress) {\nEXPECT_EQ(absl::gbswap_32(recv_iphdr.daddr), INADDR_LOOPBACK);\n}\n+// Send and receive a packet w/ the IP_HDRINCL option set.\n+TEST_F(RawHDRINCL, SendAndReceiveIPHdrIncl) {\n+ int port = 40000;\n+ if (!IsRunningOnGvisor()) {\n+ port = static_cast<short>(ASSERT_NO_ERRNO_AND_VALUE(\n+ PortAvailable(0, AddressFamily::kIpv4, SocketType::kUdp, false)));\n+ }\n+\n+ FileDescriptor recv_sock =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_UDP));\n+\n+ FileDescriptor send_sock =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_UDP));\n+\n+ // Enable IP_HDRINCL option so that we can build and send w/ an IP\n+ // header.\n+ constexpr int kSockOptOn = 1;\n+ ASSERT_THAT(setsockopt(send_sock.get(), SOL_IP, IP_HDRINCL, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ // This is not strictly required but we do it to make sure that setting\n+ // IP_HDRINCL on a non IPPROTO_RAW socket does not prevent it from receiving\n+ // packets.\n+ ASSERT_THAT(setsockopt(recv_sock.get(), SOL_IP, IP_HDRINCL, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ // Construct a packet with an IP header, UDP header, and payload.\n+ constexpr char kPayload[] = \"toto\";\n+ char packet[sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kPayload)];\n+ ASSERT_TRUE(\n+ FillPacket(packet, sizeof(packet), port, kPayload, sizeof(kPayload)));\n+\n+ socklen_t addrlen = sizeof(addr_);\n+ ASSERT_NO_FATAL_FAILURE(sendto(send_sock.get(), &packet, sizeof(packet), 0,\n+ reinterpret_cast<struct sockaddr*>(&addr_),\n+ addrlen));\n+\n+ // Receive the payload.\n+ char recv_buf[sizeof(packet)];\n+ struct sockaddr_in src;\n+ socklen_t src_size = sizeof(src);\n+ ASSERT_THAT(recvfrom(recv_sock.get(), recv_buf, sizeof(recv_buf), 0,\n+ reinterpret_cast<struct sockaddr*>(&src), &src_size),\n+ SyscallSucceedsWithValue(sizeof(packet)));\n+ EXPECT_EQ(\n+ memcmp(kPayload, recv_buf + sizeof(struct iphdr) + sizeof(struct udphdr),\n+ sizeof(kPayload)),\n+ 0);\n+ // The network stack should have set the source address.\n+ EXPECT_EQ(src.sin_family, AF_INET);\n+ EXPECT_EQ(absl::gbswap_32(src.sin_addr.s_addr), INADDR_LOOPBACK);\n+ struct iphdr iphdr = {};\n+ memcpy(&iphdr, recv_buf, sizeof(iphdr));\n+ EXPECT_NE(iphdr.id, 0);\n+\n+ // Also verify that the packet we just sent was not delivered to the\n+ // IPPROTO_RAW socket.\n+ {\n+ char recv_buf[sizeof(packet)];\n+ struct sockaddr_in src;\n+ socklen_t src_size = sizeof(src);\n+ ASSERT_THAT(recvfrom(socket_, recv_buf, sizeof(recv_buf), MSG_DONTWAIT,\n+ reinterpret_cast<struct sockaddr*>(&src), &src_size),\n+ SyscallFailsWithErrno(EAGAIN));\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for IP_HDRINCL IP option for raw sockets.
Updates #2746
Fixes #3158
PiperOrigin-RevId: 320497190 |
259,858 | 05.05.2020 18:11:56 | 25,200 | 60e19587cebc3cbc781088cc01080c71ad7900a0 | Update canonical paths. | [
{
"change_type": "MODIFY",
"old_path": "pkg/shim/v1/proc/exec.go",
"new_path": "pkg/shim/v1/proc/exec.go",
"diff": "@@ -34,7 +34,7 @@ import (\n\"github.com/pkg/errors\"\n\"golang.org/x/sys/unix\"\n- runsc \"github.com/google/gvisor-containerd-shim/pkg/go-runsc\"\n+ \"gvisor.dev/gvisor/pkg/shim/runsc\"\n)\ntype execProcess struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/shim/v1/proc/init.go",
"new_path": "pkg/shim/v1/proc/init.go",
"diff": "@@ -35,7 +35,7 @@ import (\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"github.com/pkg/errors\"\n- runsc \"github.com/google/gvisor-containerd-shim/pkg/go-runsc\"\n+ \"gvisor.dev/gvisor/pkg/shim/runsc\"\n)\n// InitPidFile name of the file that contains the init pid\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/shim/v1/proc/utils.go",
"new_path": "pkg/shim/v1/proc/utils.go",
"diff": "@@ -22,7 +22,7 @@ import (\n\"strings\"\n\"time\"\n- runsc \"github.com/google/gvisor-containerd-shim/pkg/go-runsc\"\n+ \"gvisor.dev/gvisor/pkg/shim/runsc\"\n)\nconst (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/shim/v1/shim/service.go",
"new_path": "pkg/shim/v1/shim/service.go",
"diff": "@@ -42,9 +42,9 @@ import (\n\"google.golang.org/grpc/codes\"\n\"google.golang.org/grpc/status\"\n- runsc \"github.com/google/gvisor-containerd-shim/pkg/go-runsc\"\n- \"github.com/google/gvisor-containerd-shim/pkg/v1/proc\"\n- \"github.com/google/gvisor-containerd-shim/pkg/v1/utils\"\n+ \"gvisor.dev/gvisor/pkg/shim/runsc\"\n+ \"gvisor.dev/gvisor/pkg/shim/v1/proc\"\n+ \"gvisor.dev/gvisor/pkg/shim/v1/utils\"\n)\nvar (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/shim/v2/service.go",
"new_path": "pkg/shim/v2/service.go",
"diff": "@@ -46,10 +46,10 @@ import (\n\"github.com/sirupsen/logrus\"\n\"golang.org/x/sys/unix\"\n- runsc \"github.com/google/gvisor-containerd-shim/pkg/go-runsc\"\n- \"github.com/google/gvisor-containerd-shim/pkg/v1/proc\"\n- \"github.com/google/gvisor-containerd-shim/pkg/v1/utils\"\n- \"github.com/google/gvisor-containerd-shim/pkg/v2/options\"\n+ \"gvisor.dev/gvisor/pkg/shim/runsc\"\n+ \"gvisor.dev/gvisor/pkg/shim/v1/proc\"\n+ \"gvisor.dev/gvisor/pkg/shim/v1/utils\"\n+ \"gvisor.dev/gvisor/pkg/shim/v2/options\"\n)\nvar (\n"
},
{
"change_type": "MODIFY",
"old_path": "shim/v1/main.go",
"new_path": "shim/v1/main.go",
"diff": "@@ -18,7 +18,7 @@ package main\nimport (\n\"github.com/containerd/containerd/runtime/v2/shim\"\n- runsc \"github.com/google/gvisor-containerd-shim/pkg/v2\"\n+ runsc \"gvisor.dev/gvisor/pkg/shim/v2\"\n)\nfunc main() {\n"
},
{
"change_type": "MODIFY",
"old_path": "shim/v2/main.go",
"new_path": "shim/v2/main.go",
"diff": "@@ -45,8 +45,8 @@ import (\n\"github.com/sirupsen/logrus\"\n\"golang.org/x/sys/unix\"\n- runsc \"github.com/google/gvisor-containerd-shim/pkg/go-runsc\"\n- \"github.com/google/gvisor-containerd-shim/pkg/v1/shim\"\n+ \"gvisor.dev/gvisor/pkg/shim/runsc\"\n+ \"gvisor.dev/gvisor/pkg/shim/v1/shim\"\n)\nvar (\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update canonical paths. |
259,858 | 19.05.2020 21:01:23 | 25,200 | 2f24ab339736315659f26699ab50aa2982d7e890 | Allow arbitrary Go commands for go_mod.sh. | [
{
"change_type": "RENAME",
"old_path": "tools/go_mod.sh",
"new_path": "tools/go.sh",
"diff": "set -eo pipefail\n# Build the :gopath target.\n-bazel build //:gopath\n+make build TARGETS=\":gopath\"\ndeclare -r gopathdir=\"bazel-bin/gopath/src/gvisor.dev/gvisor/\"\n# Copy go.mod and execute the command.\ncp -a go.mod go.sum \"${gopathdir}\"\n-(cd \"${gopathdir}\" && go mod \"$@\")\n+(cd \"${gopathdir}\" && go \"$@\")\ncp -a \"${gopathdir}/go.mod\" \"${gopathdir}/go.sum\" .\n# Cleanup the WORKSPACE file.\n-bazel run //:gazelle -- update-repos -from_file=go.mod\n+make run TARGETS=\":gazelle\" ARGS=\"update-repos -from_file=go.mod\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow arbitrary Go commands for go_mod.sh. |
259,962 | 11.07.2020 06:21:34 | 25,200 | 216dcebc066c82907b0de790a77a3deb6a734805 | Stub out SO_DETACH_FILTER.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1754,6 +1754,11 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\nreturn nil\n+ case linux.SO_DETACH_FILTER:\n+ // optval is ignored.\n+ var v tcpip.SocketDetachFilterOption\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(v))\n+\ndefault:\nsocket.SetSockOptEmitUnimplementedEvent(t, name)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -855,7 +855,10 @@ type OutOfBandInlineOption int\n// a default TTL.\ntype DefaultTTLOption uint8\n-//\n+// SocketDetachFilterOption is used by SetSockOpt to detach a previously attached\n+// classic BPF filter on a given endpoint.\n+type SocketDetachFilterOption int\n+\n// IPPacketInfo is the message structure for IP_PKTINFO.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -344,6 +344,10 @@ func (e *endpoint) Peek([][]byte) (int64, tcpip.ControlMessages, *tcpip.Error) {\n// SetSockOpt sets a socket option.\nfunc (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n+ switch opt.(type) {\n+ case tcpip.SocketDetachFilterOption:\n+ return nil\n+ }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint.go",
"new_path": "pkg/tcpip/transport/packet/endpoint.go",
"diff": "@@ -278,8 +278,14 @@ func (ep *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\n// used with SetSockOpt, and this function always returns\n// tcpip.ErrNotSupported.\nfunc (ep *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n+ switch opt.(type) {\n+ case tcpip.SocketDetachFilterOption:\n+ return nil\n+\n+ default:\nreturn tcpip.ErrUnknownProtocolOption\n}\n+}\n// SetSockOptBool implements tcpip.Endpoint.SetSockOptBool.\nfunc (ep *endpoint) SetSockOptBool(opt tcpip.SockOptBool, v bool) *tcpip.Error {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -506,8 +506,14 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\n// SetSockOpt implements tcpip.Endpoint.SetSockOpt.\nfunc (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n+ switch opt.(type) {\n+ case tcpip.SocketDetachFilterOption:\n+ return nil\n+\n+ default:\nreturn tcpip.ErrUnknownProtocolOption\n}\n+}\n// SetSockOptBool implements tcpip.Endpoint.SetSockOptBool.\nfunc (e *endpoint) SetSockOptBool(opt tcpip.SockOptBool, v bool) *tcpip.Error {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1792,6 +1792,9 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\ne.deferAccept = time.Duration(v)\ne.UnlockUser()\n+ case tcpip.SocketDetachFilterOption:\n+ return nil\n+\ndefault:\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -816,6 +816,9 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\ne.mu.Lock()\ne.bindToDevice = id\ne.mu.Unlock()\n+\n+ case tcpip.SocketDetachFilterOption:\n+ return nil\n}\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -1330,6 +1330,7 @@ cc_binary(\nname = \"packet_socket_raw_test\",\ntestonly = 1,\nsrcs = [\"packet_socket_raw.cc\"],\n+ defines = select_system(),\nlinkstatic = 1,\ndeps = [\n\":socket_test_util\",\n@@ -1809,6 +1810,7 @@ cc_binary(\nname = \"raw_socket_test\",\ntestonly = 1,\nsrcs = [\"raw_socket.cc\"],\n+ defines = select_system(),\nlinkstatic = 1,\ndeps = [\n\":socket_test_util\",\n@@ -3407,6 +3409,7 @@ cc_binary(\nname = \"tcp_socket_test\",\ntestonly = 1,\nsrcs = [\"tcp_socket.cc\"],\n+ defines = select_system(),\nlinkstatic = 1,\ndeps = [\n\":socket_test_util\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket_raw.cc",
"new_path": "test/syscalls/linux/packet_socket_raw.cc",
"diff": "#include <arpa/inet.h>\n#include <linux/capability.h>\n+#ifndef __fuchsia__\n+#include <linux/filter.h>\n+#endif // __fuchsia__\n#include <linux/if_arp.h>\n#include <linux/if_packet.h>\n#include <net/ethernet.h>\n@@ -556,6 +559,37 @@ TEST_P(RawPacketTest, SetSocketSendBuf) {\nASSERT_EQ(quarter_sz, val);\n}\n+#ifndef __fuchsia__\n+\n+TEST_P(RawPacketTest, SetSocketDetachFilterNoInstalledFilter) {\n+ // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.\n+ //\n+ // gVisor returns no error on SO_DETACH_FILTER even if there is no filter\n+ // attached unlike linux which does return ENOENT in such cases. This is\n+ // because gVisor doesn't support SO_ATTACH_FILTER and just silently returns\n+ // success.\n+ if (IsRunningOnGvisor()) {\n+ constexpr int val = 0;\n+ ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallSucceeds());\n+ return;\n+ }\n+ constexpr int val = 0;\n+ ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallFailsWithErrno(ENOENT));\n+}\n+\n+TEST_P(RawPacketTest, GetSocketDetachFilter) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),\n+ SyscallFailsWithErrno(ENOPROTOOPT));\n+}\n+\n+#endif // __fuchsia__\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, RawPacketTest,\n::testing::Values(ETH_P_IP, ETH_P_ALL));\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket.cc",
"new_path": "test/syscalls/linux/raw_socket.cc",
"diff": "// limitations under the License.\n#include <linux/capability.h>\n+#ifndef __fuchsia__\n+#include <linux/filter.h>\n+#endif // __fuchsia__\n#include <netinet/in.h>\n#include <netinet/ip.h>\n#include <netinet/ip6.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <unistd.h>\n+\n#include <algorithm>\n#include \"gtest/gtest.h\"\n@@ -790,10 +794,30 @@ void RawSocketTest::ReceiveBufFrom(int sock, char* recv_buf,\nASSERT_NO_FATAL_FAILURE(RecvNoCmsg(sock, recv_buf, recv_buf_len));\n}\n-INSTANTIATE_TEST_SUITE_P(AllInetTests, RawSocketTest,\n- ::testing::Combine(\n- ::testing::Values(IPPROTO_TCP, IPPROTO_UDP),\n- ::testing::Values(AF_INET, AF_INET6)));\n+#ifndef __fuchsia__\n+\n+TEST_P(RawSocketTest, SetSocketDetachFilterNoInstalledFilter) {\n+ // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.\n+ if (IsRunningOnGvisor()) {\n+ constexpr int val = 0;\n+ ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallSucceeds());\n+ return;\n+ }\n+\n+ constexpr int val = 0;\n+ ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallFailsWithErrno(ENOENT));\n+}\n+\n+TEST_P(RawSocketTest, GetSocketDetachFilter) {\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),\n+ SyscallFailsWithErrno(ENOPROTOOPT));\n+}\n+\n+#endif // __fuchsia__\n// AF_INET6+SOCK_RAW+IPPROTO_RAW sockets can be created, but not written to.\nTEST(RawSocketTest, IPv6ProtoRaw) {\n@@ -813,6 +837,11 @@ TEST(RawSocketTest, IPv6ProtoRaw) {\nSyscallFailsWithErrno(EINVAL));\n}\n+INSTANTIATE_TEST_SUITE_P(\n+ AllInetTests, RawSocketTest,\n+ ::testing::Combine(::testing::Values(IPPROTO_TCP, IPPROTO_UDP),\n+ ::testing::Values(AF_INET, AF_INET6)));\n+\n} // namespace\n} // namespace testing\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "// limitations under the License.\n#include <fcntl.h>\n+#ifndef __fuchsia__\n+#include <linux/filter.h>\n+#endif // __fuchsia__\n#include <netinet/in.h>\n#include <netinet/tcp.h>\n#include <poll.h>\n@@ -1559,6 +1562,63 @@ TEST_P(SimpleTcpSocketTest, SetTCPWindowClampAboveHalfMinRcvBuf) {\n}\n}\n+#ifndef __fuchsia__\n+\n+// TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.\n+// gVisor currently silently ignores attaching a filter.\n+TEST_P(SimpleTcpSocketTest, SetSocketAttachDetachFilter) {\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+ // Program generated using sudo tcpdump -i lo tcp and port 1234 -dd\n+ struct sock_filter code[] = {\n+ {0x28, 0, 0, 0x0000000c}, {0x15, 0, 6, 0x000086dd},\n+ {0x30, 0, 0, 0x00000014}, {0x15, 0, 15, 0x00000006},\n+ {0x28, 0, 0, 0x00000036}, {0x15, 12, 0, 0x000004d2},\n+ {0x28, 0, 0, 0x00000038}, {0x15, 10, 11, 0x000004d2},\n+ {0x15, 0, 10, 0x00000800}, {0x30, 0, 0, 0x00000017},\n+ {0x15, 0, 8, 0x00000006}, {0x28, 0, 0, 0x00000014},\n+ {0x45, 6, 0, 0x00001fff}, {0xb1, 0, 0, 0x0000000e},\n+ {0x48, 0, 0, 0x0000000e}, {0x15, 2, 0, 0x000004d2},\n+ {0x48, 0, 0, 0x00000010}, {0x15, 0, 1, 0x000004d2},\n+ {0x6, 0, 0, 0x00040000}, {0x6, 0, 0, 0x00000000},\n+ };\n+ struct sock_fprog bpf = {\n+ .len = ABSL_ARRAYSIZE(code),\n+ .filter = code,\n+ };\n+ ASSERT_THAT(\n+ setsockopt(s.get(), SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf)),\n+ SyscallSucceeds());\n+\n+ constexpr int val = 0;\n+ ASSERT_THAT(\n+ setsockopt(s.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallSucceeds());\n+}\n+\n+TEST_P(SimpleTcpSocketTest, SetSocketDetachFilterNoInstalledFilter) {\n+ // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.\n+ SKIP_IF(IsRunningOnGvisor());\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+ constexpr int val = 0;\n+ ASSERT_THAT(\n+ setsockopt(s.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallFailsWithErrno(ENOENT));\n+}\n+\n+TEST_P(SimpleTcpSocketTest, GetSocketDetachFilter) {\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),\n+ SyscallFailsWithErrno(ENOPROTOOPT));\n+}\n+\n+#endif // __fuchsia__\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, SimpleTcpSocketTest,\n::testing::Values(AF_INET, AF_INET6));\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/udp_socket_test_cases.cc",
"new_path": "test/syscalls/linux/udp_socket_test_cases.cc",
"diff": "#include <arpa/inet.h>\n#include <fcntl.h>\n+#ifndef __fuchsia__\n+#include <linux/filter.h>\n+#endif // __fuchsia__\n#include <netinet/in.h>\n#include <poll.h>\n#include <sys/ioctl.h>\n@@ -1723,5 +1726,56 @@ TEST_P(UdpSocketTest, RecvBufLimits) {\n}\n}\n+#ifndef __fuchsia__\n+\n+// TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.\n+// gVisor currently silently ignores attaching a filter.\n+TEST_P(UdpSocketTest, SetSocketDetachFilter) {\n+ // Program generated using sudo tcpdump -i lo udp and port 1234 -dd\n+ struct sock_filter code[] = {\n+ {0x28, 0, 0, 0x0000000c}, {0x15, 0, 6, 0x000086dd},\n+ {0x30, 0, 0, 0x00000014}, {0x15, 0, 15, 0x00000011},\n+ {0x28, 0, 0, 0x00000036}, {0x15, 12, 0, 0x000004d2},\n+ {0x28, 0, 0, 0x00000038}, {0x15, 10, 11, 0x000004d2},\n+ {0x15, 0, 10, 0x00000800}, {0x30, 0, 0, 0x00000017},\n+ {0x15, 0, 8, 0x00000011}, {0x28, 0, 0, 0x00000014},\n+ {0x45, 6, 0, 0x00001fff}, {0xb1, 0, 0, 0x0000000e},\n+ {0x48, 0, 0, 0x0000000e}, {0x15, 2, 0, 0x000004d2},\n+ {0x48, 0, 0, 0x00000010}, {0x15, 0, 1, 0x000004d2},\n+ {0x6, 0, 0, 0x00040000}, {0x6, 0, 0, 0x00000000},\n+ };\n+ struct sock_fprog bpf = {\n+ .len = ABSL_ARRAYSIZE(code),\n+ .filter = code,\n+ };\n+ ASSERT_THAT(\n+ setsockopt(sock_.get(), SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf)),\n+ SyscallSucceeds());\n+\n+ constexpr int val = 0;\n+ ASSERT_THAT(\n+ setsockopt(sock_.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallSucceeds());\n+}\n+\n+TEST_P(UdpSocketTest, SetSocketDetachFilterNoInstalledFilter) {\n+ // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.\n+ SKIP_IF(IsRunningOnGvisor());\n+ constexpr int val = 0;\n+ ASSERT_THAT(\n+ setsockopt(sock_.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),\n+ SyscallFailsWithErrno(ENOENT));\n+}\n+\n+TEST_P(UdpSocketTest, GetSocketDetachFilter) {\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(\n+ getsockopt(sock_.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),\n+ SyscallFailsWithErrno(ENOPROTOOPT));\n+}\n+\n+#endif // __fuchsia__\n+\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Stub out SO_DETACH_FILTER.
Updates #2746
PiperOrigin-RevId: 320757963 |
260,004 | 12.07.2020 17:20:50 | 25,200 | 9c32fd3f4d8f6e63d922c1c58b7d1f1f504fa2bc | Do not copy sleep.Waker
sleep.Waker's fields are modified as values. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sleep/BUILD",
"new_path": "pkg/sleep/BUILD",
"diff": "@@ -12,6 +12,7 @@ go_library(\n\"sleep_unsafe.go\",\n],\nvisibility = [\"//:sandbox\"],\n+ deps = [\"//pkg/sync\"],\n)\ngo_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sleep/sleep_test.go",
"new_path": "pkg/sleep/sleep_test.go",
"diff": "@@ -379,10 +379,7 @@ func TestRace(t *testing.T) {\n// TestRaceInOrder tests that multiple wakers can continuously send wake requests to\n// the sleeper and that the wakers are retrieved in the order asserted.\nfunc TestRaceInOrder(t *testing.T) {\n- const wakers = 100\n- const wakeRequests = 10000\n-\n- w := make([]Waker, wakers)\n+ w := make([]Waker, 10000)\ns := Sleeper{}\n// Associate each waker and start goroutines that will assert them.\n@@ -390,18 +387,15 @@ func TestRaceInOrder(t *testing.T) {\ns.AddWaker(&w[i], i)\n}\ngo func() {\n- n := 0\n- for n < wakeRequests {\n- wk := w[n%len(w)]\n- wk.Assert()\n- n++\n+ for i := range w {\n+ w[i].Assert()\n}\n}()\n// Wait for all wake up notifications from all wakers.\n- for i := 0; i < wakeRequests; i++ {\n- v, _ := s.Fetch(true)\n- if got, want := v, i%wakers; got != want {\n+ for want := range w {\n+ got, _ := s.Fetch(true)\n+ if got != want {\nt.Fatalf(\"got %d want %d\", got, want)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sleep/sleep_unsafe.go",
"new_path": "pkg/sleep/sleep_unsafe.go",
"diff": "@@ -75,6 +75,8 @@ package sleep\nimport (\n\"sync/atomic\"\n\"unsafe\"\n+\n+ \"gvisor.dev/gvisor/pkg/sync\"\n)\nconst (\n@@ -323,7 +325,12 @@ func (s *Sleeper) enqueueAssertedWaker(w *Waker) {\n//\n// This struct is thread-safe, that is, its methods can be called concurrently\n// by multiple goroutines.\n+//\n+// Note, it is not safe to copy a Waker as its fields are modified by value\n+// (the pointer fields are individually modified with atomic operations).\ntype Waker struct {\n+ _ sync.NoCopy\n+\n// s is the sleeper that this waker can wake up. Only one sleeper at a\n// time is allowed. This field can have three classes of values:\n// nil -- the waker is not asserted: it either is not associated with\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sync/BUILD",
"new_path": "pkg/sync/BUILD",
"diff": "@@ -33,6 +33,7 @@ go_library(\n\"aliases.go\",\n\"memmove_unsafe.go\",\n\"mutex_unsafe.go\",\n+ \"nocopy.go\",\n\"norace_unsafe.go\",\n\"race_unsafe.go\",\n\"rwmutex_unsafe.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sync/nocopy.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package sync\n+\n+// NoCopy may be embedded into structs which must not be copied\n+// after the first use.\n+//\n+// See https://golang.org/issues/8005#issuecomment-190753527\n+// for details.\n+type NoCopy struct{}\n+\n+// Lock is a no-op used by -copylocks checker from `go vet`.\n+func (*NoCopy) Lock() {}\n+\n+// Unlock is a no-op used by -copylocks checker from `go vet`.\n+func (*NoCopy) Unlock() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/packet_buffer.go",
"new_path": "pkg/tcpip/stack/packet_buffer.go",
"diff": "package stack\nimport (\n+ \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n)\n@@ -24,7 +25,7 @@ import (\n// multiple endpoints. Clone() should be called in such cases so that\n// modifications to the Data field do not affect other copies.\ntype PacketBuffer struct {\n- _ noCopy\n+ _ sync.NoCopy\n// PacketBufferEntry is used to build an intrusive list of\n// PacketBuffers.\n@@ -102,14 +103,3 @@ func (pk *PacketBuffer) Clone() *PacketBuffer {\nNatDone: pk.NatDone,\n}\n}\n-\n-// noCopy may be embedded into structs which must not be copied\n-// after the first use.\n-//\n-// See https://golang.org/issues/8005#issuecomment-190753527\n-// for details.\n-type noCopy struct{}\n-\n-// Lock is a no-op used by -copylocks checker from `go vet`.\n-func (*noCopy) Lock() {}\n-func (*noCopy) Unlock() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/timer.go",
"new_path": "pkg/tcpip/timer.go",
"diff": "package tcpip\nimport (\n- \"sync\"\n\"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/sync\"\n)\n// cancellableTimerInstance is a specific instance of CancellableTimer.\n@@ -92,6 +93,8 @@ func (t *cancellableTimerInstance) stop() {\n// Note, it is not safe to copy a CancellableTimer as its timer instance creates\n// a closure over the address of the CancellableTimer.\ntype CancellableTimer struct {\n+ _ sync.NoCopy\n+\n// The active instance of a cancellable timer.\ninstance cancellableTimerInstance\n@@ -157,22 +160,6 @@ func (t *CancellableTimer) Reset(d time.Duration) {\n}\n}\n-// Lock is a no-op used by the copylocks checker from go vet.\n-//\n-// See CancellableTimer for details about why it shouldn't be copied.\n-//\n-// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for more\n-// details about the copylocks checker.\n-func (*CancellableTimer) Lock() {}\n-\n-// Unlock is a no-op used by the copylocks checker from go vet.\n-//\n-// See CancellableTimer for details about why it shouldn't be copied.\n-//\n-// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for more\n-// details about the copylocks checker.\n-func (*CancellableTimer) Unlock() {}\n-\n// NewCancellableTimer returns an unscheduled CancellableTimer with the given\n// locker and fn.\n//\n"
}
] | Go | Apache License 2.0 | google/gvisor | Do not copy sleep.Waker
sleep.Waker's fields are modified as values.
PiperOrigin-RevId: 320873451 |
259,858 | 13.07.2020 09:37:47 | 25,200 | 7ff4649b3c61bbf70ea8d76b509a7ae620a45ac7 | Use host networking for build container.
This will allow the use of default credentials. | [
{
"change_type": "MODIFY",
"old_path": "tools/bazel.mk",
"new_path": "tools/bazel.mk",
"diff": "@@ -70,6 +70,7 @@ bazel-server-start: load-default ## Starts the bazel server.\n-v \"$(CURDIR):$(CURDIR)\" \\\n--workdir \"$(CURDIR)\" \\\n--entrypoint \"\" \\\n+ --network=host \\\n$(FULL_DOCKER_RUN_OPTIONS) \\\ngvisor.dev/images/default \\\nsh -c \"groupadd --gid $(GID) --non-unique $(USER) && \\\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use host networking for build container.
This will allow the use of default credentials.
PiperOrigin-RevId: 320972241 |
259,891 | 13.07.2020 11:59:26 | 25,200 | 43c209f48e0aa9024705583cc6f0fafa7d6380ca | garbage collect connections
As in Linux, we must periodically clean up unused connections. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/BUILD",
"new_path": "pkg/tcpip/stack/BUILD",
"diff": "@@ -27,6 +27,18 @@ go_template_instance(\n},\n)\n+go_template_instance(\n+ name = \"tuple_list\",\n+ out = \"tuple_list.go\",\n+ package = \"stack\",\n+ prefix = \"tuple\",\n+ template = \"//pkg/ilist:generic_list\",\n+ types = {\n+ \"Element\": \"*tuple\",\n+ \"Linker\": \"*tuple\",\n+ },\n+)\n+\ngo_library(\nname = \"stack\",\nsrcs = [\n@@ -35,6 +47,7 @@ go_library(\n\"forwarder.go\",\n\"icmp_rate_limit.go\",\n\"iptables.go\",\n+ \"iptables_state.go\",\n\"iptables_targets.go\",\n\"iptables_types.go\",\n\"linkaddrcache.go\",\n@@ -50,6 +63,7 @@ go_library(\n\"stack_global_state.go\",\n\"stack_options.go\",\n\"transport_demuxer.go\",\n+ \"tuple_list.go\",\n],\nvisibility = [\"//visibility:public\"],\ndeps = [\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "package stack\nimport (\n+ \"encoding/binary\"\n\"sync\"\n+ \"time\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/hash/jenkins\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack\"\n)\n@@ -30,6 +33,10 @@ import (\n//\n// Currently, only TCP tracking is supported.\n+// Our hash table has 16K buckets.\n+// TODO(gvisor.dev/issue/170): These should be tunable.\n+const numBuckets = 1 << 14\n+\n// Direction of the tuple.\ntype direction int\n@@ -48,7 +55,12 @@ const (\n// tuple holds a connection's identifying and manipulating data in one\n// direction. It is immutable.\n+//\n+// +stateify savable\ntype tuple struct {\n+ // tupleEntry is used to build an intrusive list of tuples.\n+ tupleEntry\n+\ntupleID\n// conn is the connection tracking entry this tuple belongs to.\n@@ -61,6 +73,8 @@ type tuple struct {\n// tupleID uniquely identifies a connection in one direction. It currently\n// contains enough information to distinguish between any TCP or UDP\n// connection, and will need to be extended to support other protocols.\n+//\n+// +stateify savable\ntype tupleID struct {\nsrcAddr tcpip.Address\nsrcPort uint16\n@@ -83,6 +97,8 @@ func (ti tupleID) reply() tupleID {\n}\n// conn is a tracked connection.\n+//\n+// +stateify savable\ntype conn struct {\n// original is the tuple in original direction. It is immutable.\noriginal tuple\n@@ -98,22 +114,67 @@ type conn struct {\ntcbHook Hook\n// mu protects tcb.\n- mu sync.Mutex\n+ mu sync.Mutex `state:\"nosave\"`\n// tcb is TCB control block. It is used to keep track of states\n// of tcp connection and is protected by mu.\ntcb tcpconntrack.TCB\n+\n+ // lastUsed is the last time the connection saw a relevant packet, and\n+ // is updated by each packet on the connection. It is protected by mu.\n+ lastUsed time.Time `state:\".(unixTime)\"`\n+}\n+\n+// timedOut returns whether the connection timed out based on its state.\n+func (cn *conn) timedOut(now time.Time) bool {\n+ const establishedTimeout = 5 * 24 * time.Hour\n+ const defaultTimeout = 120 * time.Second\n+ cn.mu.Lock()\n+ defer cn.mu.Unlock()\n+ if cn.tcb.State() == tcpconntrack.ResultAlive {\n+ // Use the same default as Linux, which doesn't delete\n+ // established connections for 5(!) days.\n+ return now.Sub(cn.lastUsed) > establishedTimeout\n+ }\n+ // Use the same default as Linux, which lets connections in most states\n+ // other than established remain for <= 120 seconds.\n+ return now.Sub(cn.lastUsed) > defaultTimeout\n}\n// ConnTrack tracks all connections created for NAT rules. Most users are\n// expected to only call handlePacket and createConnFor.\n+//\n+// ConnTrack keeps all connections in a slice of buckets, each of which holds a\n+// linked list of tuples. This gives us some desirable properties:\n+// - Each bucket has its own lock, lessening lock contention.\n+// - The slice is large enough that lists stay short (<10 elements on average).\n+// Thus traversal is fast.\n+// - During linked list traversal we reap expired connections. This amortizes\n+// the cost of reaping them and makes reapUnused faster.\n+//\n+// Locks are ordered by their location in the buckets slice. That is, a\n+// goroutine that locks buckets[i] can only lock buckets[j] s.t. i < j.\n+//\n+// +stateify savable\ntype ConnTrack struct {\n- // mu protects conns.\n- mu sync.RWMutex\n+ // seed is a one-time random value initialized at stack startup\n+ // and is used in the calculation of hash keys for the list of buckets.\n+ // It is immutable.\n+ seed uint32\n+\n+ // mu protects the buckets slice, but not buckets' contents. Only take\n+ // the write lock if you are modifying the slice or saving for S/R.\n+ mu sync.RWMutex `state:\"nosave\"`\n+\n+ // buckets is protected by mu.\n+ buckets []bucket\n+}\n- // conns maintains a map of tuples needed for connection tracking for\n- // iptables NAT rules. It is protected by mu.\n- conns map[tupleID]tuple\n+// +stateify savable\n+type bucket struct {\n+ // mu protects tuples.\n+ mu sync.Mutex `state:\"nosave\"`\n+ tuples tupleList\n}\n// packetToTupleID converts packet to a tuple ID. It fails when pkt lacks a valid\n@@ -145,6 +206,7 @@ func newConn(orig, reply tupleID, manip manipType, hook Hook) *conn {\nconn := conn{\nmanip: manip,\ntcbHook: hook,\n+ lastUsed: time.Now(),\n}\nconn.original = tuple{conn: &conn, tupleID: orig}\nconn.reply = tuple{conn: &conn, tupleID: reply, direction: dirReply}\n@@ -162,14 +224,28 @@ func (ct *ConnTrack) connFor(pkt *PacketBuffer) (*conn, direction) {\nreturn nil, dirOriginal\n}\n- ct.mu.Lock()\n- defer ct.mu.Unlock()\n+ bucket := ct.bucket(tid)\n+ now := time.Now()\n- tuple, ok := ct.conns[tid]\n- if !ok {\n- return nil, dirOriginal\n+ ct.mu.RLock()\n+ defer ct.mu.RUnlock()\n+ ct.buckets[bucket].mu.Lock()\n+ defer ct.buckets[bucket].mu.Unlock()\n+\n+ // Iterate over the tuples in a bucket, cleaning up any unused\n+ // connections we find.\n+ for other := ct.buckets[bucket].tuples.Front(); other != nil; other = other.Next() {\n+ // Clean up any timed-out connections we happen to find.\n+ if ct.reapTupleLocked(other, bucket, now) {\n+ // The tuple expired.\n+ continue\n+ }\n+ if tid == other.tupleID {\n+ return other.conn, other.direction\n}\n- return tuple.conn, tuple.direction\n+ }\n+\n+ return nil, dirOriginal\n}\n// createConnFor creates a new conn for pkt.\n@@ -197,13 +273,31 @@ func (ct *ConnTrack) createConnFor(pkt *PacketBuffer, hook Hook, rt RedirectTarg\n}\nconn := newConn(tid, replyTID, manip, hook)\n- // Add the changed tuple to the map.\n- // TODO(gvisor.dev/issue/170): Need to support collisions using linked\n- // list.\n- ct.mu.Lock()\n- defer ct.mu.Unlock()\n- ct.conns[tid] = conn.original\n- ct.conns[replyTID] = conn.reply\n+ // Lock the buckets in the correct order.\n+ tupleBucket := ct.bucket(tid)\n+ replyBucket := ct.bucket(replyTID)\n+ ct.mu.RLock()\n+ defer ct.mu.RUnlock()\n+ if tupleBucket < replyBucket {\n+ ct.buckets[tupleBucket].mu.Lock()\n+ ct.buckets[replyBucket].mu.Lock()\n+ } else if tupleBucket > replyBucket {\n+ ct.buckets[replyBucket].mu.Lock()\n+ ct.buckets[tupleBucket].mu.Lock()\n+ } else {\n+ // Both tuples are in the same bucket.\n+ ct.buckets[tupleBucket].mu.Lock()\n+ }\n+\n+ // Add the tuple to the map.\n+ ct.buckets[tupleBucket].tuples.PushFront(&conn.original)\n+ ct.buckets[replyBucket].tuples.PushFront(&conn.reply)\n+\n+ // Unlocking can happen in any order.\n+ ct.buckets[tupleBucket].mu.Unlock()\n+ if tupleBucket != replyBucket {\n+ ct.buckets[replyBucket].mu.Unlock()\n+ }\nreturn conn\n}\n@@ -297,35 +391,134 @@ func (ct *ConnTrack) handlePacket(pkt *PacketBuffer, hook Hook, gso *GSO, r *Rou\n// other tcp states.\nconn.mu.Lock()\ndefer conn.mu.Unlock()\n- var st tcpconntrack.Result\n- tcpHeader := header.TCP(pkt.TransportHeader)\n- if conn.tcb.IsEmpty() {\n+\n+ // Mark the connection as having been used recently so it isn't reaped.\n+ conn.lastUsed = time.Now()\n+ // Update connection state.\n+ if tcpHeader := header.TCP(pkt.TransportHeader); conn.tcb.IsEmpty() {\nconn.tcb.Init(tcpHeader)\nconn.tcbHook = hook\n+ } else if hook == conn.tcbHook {\n+ conn.tcb.UpdateStateOutbound(tcpHeader)\n} else {\n- switch hook {\n- case conn.tcbHook:\n- st = conn.tcb.UpdateStateOutbound(tcpHeader)\n- default:\n- st = conn.tcb.UpdateStateInbound(tcpHeader)\n- }\n+ conn.tcb.UpdateStateInbound(tcpHeader)\n+ }\n+}\n+\n+// bucket gets the conntrack bucket for a tupleID.\n+func (ct *ConnTrack) bucket(id tupleID) int {\n+ h := jenkins.Sum32(ct.seed)\n+ h.Write([]byte(id.srcAddr))\n+ h.Write([]byte(id.dstAddr))\n+ shortBuf := make([]byte, 2)\n+ binary.LittleEndian.PutUint16(shortBuf, id.srcPort)\n+ h.Write([]byte(shortBuf))\n+ binary.LittleEndian.PutUint16(shortBuf, id.dstPort)\n+ h.Write([]byte(shortBuf))\n+ binary.LittleEndian.PutUint16(shortBuf, uint16(id.transProto))\n+ h.Write([]byte(shortBuf))\n+ binary.LittleEndian.PutUint16(shortBuf, uint16(id.netProto))\n+ h.Write([]byte(shortBuf))\n+ ct.mu.RLock()\n+ defer ct.mu.RUnlock()\n+ return int(h.Sum32()) % len(ct.buckets)\n+}\n+\n+// reapUnused deletes timed out entries from the conntrack map. The rules for\n+// reaping are:\n+// - Most reaping occurs in connFor, which is called on each packet. connFor\n+// cleans up the bucket the packet's connection maps to. Thus calls to\n+// reapUnused should be fast.\n+// - Each call to reapUnused traverses a fraction of the conntrack table.\n+// Specifically, it traverses len(ct.buckets)/fractionPerReaping.\n+// - After reaping, reapUnused decides when it should next run based on the\n+// ratio of expired connections to examined connections. If the ratio is\n+// greater than maxExpiredPct, it schedules the next run quickly. Otherwise it\n+// slightly increases the interval between runs.\n+// - maxFullTraversal caps the time it takes to traverse the entire table.\n+//\n+// reapUnused returns the next bucket that should be checked and the time after\n+// which it should be called again.\n+func (ct *ConnTrack) reapUnused(start int, prevInterval time.Duration) (int, time.Duration) {\n+ // TODO(gvisor.dev/issue/170): This can be more finely controlled, as\n+ // it is in Linux via sysctl.\n+ const fractionPerReaping = 128\n+ const maxExpiredPct = 50\n+ const maxFullTraversal = 60 * time.Second\n+ const minInterval = 10 * time.Millisecond\n+ const maxInterval = maxFullTraversal / fractionPerReaping\n+\n+ now := time.Now()\n+ checked := 0\n+ expired := 0\n+ var idx int\n+ ct.mu.RLock()\n+ defer ct.mu.RUnlock()\n+ for i := 0; i < len(ct.buckets)/fractionPerReaping; i++ {\n+ idx = (i + start) % len(ct.buckets)\n+ ct.buckets[idx].mu.Lock()\n+ for tuple := ct.buckets[idx].tuples.Front(); tuple != nil; tuple = tuple.Next() {\n+ checked++\n+ if ct.reapTupleLocked(tuple, idx, now) {\n+ expired++\n+ }\n+ }\n+ ct.buckets[idx].mu.Unlock()\n+ }\n+ // We already checked buckets[idx].\n+ idx++\n+\n+ // If half or more of the connections are expired, the table has gotten\n+ // stale. Reschedule quickly.\n+ expiredPct := 0\n+ if checked != 0 {\n+ expiredPct = expired * 100 / checked\n+ }\n+ if expiredPct > maxExpiredPct {\n+ return idx, minInterval\n+ }\n+ if interval := prevInterval + minInterval; interval <= maxInterval {\n+ // Increment the interval between runs.\n+ return idx, interval\n+ }\n+ // We've hit the maximum interval.\n+ return idx, maxInterval\n+}\n+\n+// reapTupleLocked tries to remove tuple and its reply from the table. It\n+// returns whether the tuple's connection has timed out.\n+//\n+// Preconditions: ct.mu is locked for reading and bucket is locked.\n+func (ct *ConnTrack) reapTupleLocked(tuple *tuple, bucket int, now time.Time) bool {\n+ if !tuple.conn.timedOut(now) {\n+ return false\n}\n- // Delete conn if tcp connection is closed.\n- if st == tcpconntrack.ResultClosedByPeer || st == tcpconntrack.ResultClosedBySelf || st == tcpconntrack.ResultReset {\n- ct.deleteConn(conn)\n+ // To maintain lock order, we can only reap these tuples if the reply\n+ // appears later in the table.\n+ replyBucket := ct.bucket(tuple.reply())\n+ if bucket > replyBucket {\n+ return true\n}\n+\n+ // Don't re-lock if both tuples are in the same bucket.\n+ differentBuckets := bucket != replyBucket\n+ if differentBuckets {\n+ ct.buckets[replyBucket].mu.Lock()\n}\n-// deleteConn deletes the connection.\n-func (ct *ConnTrack) deleteConn(conn *conn) {\n- if conn == nil {\n- return\n+ // We have the buckets locked and can remove both tuples.\n+ if tuple.direction == dirOriginal {\n+ ct.buckets[replyBucket].tuples.Remove(&tuple.conn.reply)\n+ } else {\n+ ct.buckets[replyBucket].tuples.Remove(&tuple.conn.original)\n}\n+ ct.buckets[bucket].tuples.Remove(tuple)\n- ct.mu.Lock()\n- defer ct.mu.Unlock()\n+ // Don't re-unlock if both tuples are in the same bucket.\n+ if differentBuckets {\n+ ct.buckets[replyBucket].mu.Unlock()\n+ }\n- delete(ct.conns, conn.original.tupleID)\n- delete(ct.conns, conn.reply.tupleID)\n+ return true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -16,6 +16,7 @@ package stack\nimport (\n\"fmt\"\n+ \"time\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -41,6 +42,9 @@ const (\n// underflow.\nconst HookUnset = -1\n+// reaperDelay is how long to wait before starting to reap connections.\n+const reaperDelay = 5 * time.Second\n+\n// DefaultTables returns a default set of tables. Each chain is set to accept\n// all packets.\nfunc DefaultTables() *IPTables {\n@@ -112,8 +116,9 @@ func DefaultTables() *IPTables {\nOutput: []string{TablenameMangle, TablenameNat, TablenameFilter},\n},\nconnections: ConnTrack{\n- conns: make(map[tupleID]tuple),\n+ seed: generateRandUint32(),\n},\n+ reaperDone: make(chan struct{}, 1),\n}\n}\n@@ -169,6 +174,12 @@ func (it *IPTables) GetTable(name string) (Table, bool) {\nfunc (it *IPTables) ReplaceTable(name string, table Table) {\nit.mu.Lock()\ndefer it.mu.Unlock()\n+ // If iptables is being enabled, initialize the conntrack table and\n+ // reaper.\n+ if !it.modified {\n+ it.connections.buckets = make([]bucket, numBuckets)\n+ it.startReaper(reaperDelay)\n+ }\nit.modified = true\nit.tables[name] = table\n}\n@@ -249,6 +260,35 @@ func (it *IPTables) Check(hook Hook, pkt *PacketBuffer, gso *GSO, r *Route, addr\nreturn true\n}\n+// beforeSave is invoked by stateify.\n+func (it *IPTables) beforeSave() {\n+ // Ensure the reaper exits cleanly.\n+ it.reaperDone <- struct{}{}\n+ // Prevent others from modifying the connection table.\n+ it.connections.mu.Lock()\n+}\n+\n+// afterLoad is invoked by stateify.\n+func (it *IPTables) afterLoad() {\n+ it.startReaper(reaperDelay)\n+}\n+\n+// startReaper starts a goroutine that wakes up periodically to reap timed out\n+// connections.\n+func (it *IPTables) startReaper(interval time.Duration) {\n+ go func() { // S/R-SAFE: reaperDone is signalled when iptables is saved.\n+ bucket := 0\n+ for {\n+ select {\n+ case <-it.reaperDone:\n+ return\n+ case <-time.After(interval):\n+ bucket, interval = it.connections.reapUnused(bucket, interval)\n+ }\n+ }\n+ }()\n+}\n+\n// CheckPackets runs pkts through the rules for hook and returns a map of packets that\n// should not go forward.\n//\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/stack/iptables_state.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package stack\n+\n+import (\n+ \"time\"\n+)\n+\n+// +stateify savable\n+type unixTime struct {\n+ second int64\n+ nano int64\n+}\n+\n+// saveLastUsed is invoked by stateify.\n+func (cn *conn) saveLastUsed() unixTime {\n+ return unixTime{cn.lastUsed.Unix(), cn.lastUsed.UnixNano()}\n+}\n+\n+// loadLastUsed is invoked by stateify.\n+func (cn *conn) loadLastUsed(unix unixTime) {\n+ cn.lastUsed = time.Unix(unix.second, unix.nano)\n+}\n+\n+// beforeSave is invoked by stateify.\n+func (ct *ConnTrack) beforeSave() {\n+ ct.mu.Lock()\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_types.go",
"new_path": "pkg/tcpip/stack/iptables_types.go",
"diff": "@@ -78,6 +78,8 @@ const (\n)\n// IPTables holds all the tables for a netstack.\n+//\n+// +stateify savable\ntype IPTables struct {\n// mu protects tables, priorities, and modified.\nmu sync.RWMutex\n@@ -97,10 +99,15 @@ type IPTables struct {\nmodified bool\nconnections ConnTrack\n+\n+ // reaperDone can be signalled to stop the reaper goroutine.\n+ reaperDone chan struct{}\n}\n// A Table defines a set of chains and hooks into the network stack. It is\n// really just a list of rules.\n+//\n+// +stateify savable\ntype Table struct {\n// Rules holds the rules that make up the table.\nRules []Rule\n@@ -130,6 +137,8 @@ func (table *Table) ValidHooks() uint32 {\n// contains zero or more matchers, each of which is a specification of which\n// packets this rule applies to. If there are no matchers in the rule, it\n// applies to any packet.\n+//\n+// +stateify savable\ntype Rule struct {\n// Filter holds basic IP filtering fields common to every rule.\nFilter IPHeaderFilter\n@@ -142,6 +151,8 @@ type Rule struct {\n}\n// IPHeaderFilter holds basic IP filtering data common to every rule.\n+//\n+// +stateify savable\ntype IPHeaderFilter struct {\n// Protocol matches the transport protocol.\nProtocol tcpip.TransportProtocolNumber\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -425,6 +425,7 @@ type Stack struct {\nhandleLocal bool\n// tables are the iptables packet filtering and manipulation rules.\n+ // TODO(gvisor.dev/issue/170): S/R this field.\ntables *IPTables\n// resumableEndpoints is a list of endpoints that need to be resumed if the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go",
"new_path": "pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go",
"diff": "@@ -106,6 +106,11 @@ func (t *TCB) UpdateStateOutbound(tcp header.TCP) Result {\nreturn st\n}\n+// State returns the current state of the TCB.\n+func (t *TCB) State() Result {\n+ return t.state\n+}\n+\n// IsAlive returns true as long as the connection is established(Alive)\n// or connecting state.\nfunc (t *TCB) IsAlive() bool {\n"
}
] | Go | Apache License 2.0 | google/gvisor | garbage collect connections
As in Linux, we must periodically clean up unused connections.
PiperOrigin-RevId: 321003353 |
259,992 | 13.07.2020 12:22:01 | 25,200 | b7e8ce93de54a0f897832877255bed7005b08f14 | Add ReadAllFd to test util | [
{
"change_type": "MODIFY",
"old_path": "test/util/test_util.h",
"new_path": "test/util/test_util.h",
"diff": "@@ -567,6 +567,25 @@ ssize_t ApplyFileIoSyscall(F const& f, size_t const count) {\n} // namespace internal\n+inline PosixErrorOr<std::string> ReadAllFd(int fd) {\n+ std::string all;\n+ all.reserve(128 * 1024); // arbitrary.\n+\n+ std::vector<char> buffer(16 * 1024);\n+ for (;;) {\n+ auto const bytes = RetryEINTR(read)(fd, buffer.data(), buffer.size());\n+ if (bytes < 0) {\n+ return PosixError(errno, \"file read\");\n+ }\n+ if (bytes == 0) {\n+ return std::move(all);\n+ }\n+ if (bytes > 0) {\n+ all.append(buffer.data(), bytes);\n+ }\n+ }\n+}\n+\ninline ssize_t ReadFd(int fd, void* buf, size_t count) {\nreturn internal::ApplyFileIoSyscall(\n[&](size_t completed) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add ReadAllFd to test util
PiperOrigin-RevId: 321008185 |
259,907 | 13.07.2020 13:33:09 | 25,200 | 6994f4d5912ce9dc9233aebe9902892824904b71 | [vfs2] Make gofer metadata atomics consistent
For accessing metadata fields:
If metadataMu is locked, we can access without atomics
If metadataMu is unlocked, we should use atomics
For mutating metadata fields:
Always lock metadataMu and use atomics.
There were some instances of inconsistencies which have been fixed. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -602,8 +602,14 @@ type dentry struct {\n// returned by the server. dirents is protected by dirMu.\ndirents []vfs.Dirent\n- // Cached metadata; protected by metadataMu and accessed using atomic\n- // memory operations unless otherwise specified.\n+ // Cached metadata; protected by metadataMu.\n+ // To access:\n+ // - In situations where consistency is not required (like stat), these\n+ // can be accessed using atomic operations only (without locking).\n+ // - Lock metadataMu and can access without atomic operations.\n+ // To mutate:\n+ // - Lock metadataMu and use atomic operations to update because we might\n+ // have atomic readers that don't hold the lock.\nmetadataMu sync.Mutex\nino inodeNumber // immutable\nmode uint32 // type is immutable, perms are mutable\n@@ -616,7 +622,7 @@ type dentry struct {\nctime int64\nbtime int64\n// File size, protected by both metadataMu and dataMu (i.e. both must be\n- // locked to mutate it).\n+ // locked to mutate it; locking either is sufficient to access it).\nsize uint64\n// nlink counts the number of hard links to this dentry. It's updated and\n@@ -904,14 +910,14 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, stat *lin\n// Prepare for truncate.\nif stat.Mask&linux.STATX_SIZE != 0 {\n- switch d.mode & linux.S_IFMT {\n- case linux.S_IFREG:\n+ switch mode.FileType() {\n+ case linux.ModeRegular:\nif !setLocalMtime {\n// Truncate updates mtime.\nsetLocalMtime = true\nstat.Mtime.Nsec = linux.UTIME_NOW\n}\n- case linux.S_IFDIR:\n+ case linux.ModeDirectory:\nreturn syserror.EISDIR\ndefault:\nreturn syserror.EINVAL\n@@ -994,7 +1000,7 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, stat *lin\nfunc (d *dentry) updateFileSizeLocked(newSize uint64) {\nd.dataMu.Lock()\noldSize := d.size\n- d.size = newSize\n+ atomic.StoreUint64(&d.size, newSize)\n// d.dataMu must be unlocked to lock d.mapsMu and invalidate mappings\n// below. This allows concurrent calls to Read/Translate/etc. These\n// functions synchronize with truncation by refusing to use cache\n@@ -1340,8 +1346,8 @@ func (d *dentry) removexattr(ctx context.Context, creds *auth.Credentials, name\n// Extended attributes in the user.* namespace are only supported for regular\n// files and directories.\nfunc (d *dentry) userXattrSupported() bool {\n- filetype := linux.S_IFMT & atomic.LoadUint32(&d.mode)\n- return filetype == linux.S_IFREG || filetype == linux.S_IFDIR\n+ filetype := linux.FileMode(atomic.LoadUint32(&d.mode)).FileType()\n+ return filetype == linux.ModeRegular || filetype == linux.ModeDirectory\n}\n// Preconditions: !d.isSynthetic(). d.isRegularFile() || d.isDir().\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"diff": "@@ -89,7 +89,9 @@ func (fd *regularFileFD) Allocate(ctx context.Context, mode, offset, length uint\nif err != nil {\nreturn err\n}\n- d.size = size\n+ d.dataMu.Lock()\n+ atomic.StoreUint64(&d.size, size)\n+ d.dataMu.Unlock()\nif !d.cachedMetadataAuthoritative() {\nd.touchCMtimeLocked()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2] Make gofer metadata atomics consistent
For accessing metadata fields:
- If metadataMu is locked, we can access without atomics
- If metadataMu is unlocked, we should use atomics
For mutating metadata fields:
- Always lock metadataMu and use atomics.
There were some instances of inconsistencies which have been fixed.
PiperOrigin-RevId: 321022895 |
259,891 | 13.07.2020 13:53:42 | 25,200 | 3fe9be138c65d7291aa30f67a55eca316945bf87 | iptables: remove useless ip6tables VM rules
This rule isn't restored when the Kokoro VM is restarted, so it's not doing
anything. And the problem it was meant to solved is instead addressed by | [
{
"change_type": "MODIFY",
"old_path": "tools/vm/ubuntu1604/25_docker.sh",
"new_path": "tools/vm/ubuntu1604/25_docker.sh",
"diff": "@@ -60,6 +60,3 @@ cat > /etc/docker/daemon.json <<EOF\n\"ipv6\": true\n}\nEOF\n-# Docker's IPv6 support is lacking and does not work the same way as IPv4. We\n-# can use NAT so containers can reach the outside world.\n-ip6tables -t nat -A POSTROUTING -s 2001:db8:1::/64 ! -o docker0 -j MASQUERADE\n"
}
] | Go | Apache License 2.0 | google/gvisor | iptables: remove useless ip6tables VM rules
This rule isn't restored when the Kokoro VM is restarted, so it's not doing
anything. And the problem it was meant to solved is instead addressed by
https://github.com/google/gvisor/pull/3207.
PiperOrigin-RevId: 321026846 |
259,860 | 13.07.2020 14:00:21 | 25,200 | cf0826653c436021a1b808d1f0aea082c9a0fc1b | Clean up inotify comments. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/inotify.go",
"new_path": "pkg/sentry/vfs/inotify.go",
"diff": "@@ -179,12 +179,12 @@ func (i *Inotify) Readiness(mask waiter.EventMask) waiter.EventMask {\nreturn mask & ready\n}\n-// PRead implements FileDescriptionImpl.\n+// PRead implements FileDescriptionImpl.PRead.\nfunc (*Inotify) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {\nreturn 0, syserror.ESPIPE\n}\n-// PWrite implements FileDescriptionImpl.\n+// PWrite implements FileDescriptionImpl.PWrite.\nfunc (*Inotify) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {\nreturn 0, syserror.ESPIPE\n}\n@@ -243,7 +243,7 @@ func (i *Inotify) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOpt\nreturn writeLen, nil\n}\n-// Ioctl implements fs.FileOperations.Ioctl.\n+// Ioctl implements FileDescriptionImpl.Ioctl.\nfunc (i *Inotify) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nswitch args[1].Int() {\ncase linux.FIONREAD:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clean up inotify comments.
PiperOrigin-RevId: 321028238 |
260,003 | 13.07.2020 14:35:11 | 25,200 | a287309d9ff0461842de1c487348c3032e5fdee4 | Fix recvMMsgDispatcher not slicing link header correctly. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"diff": "@@ -500,3 +500,76 @@ func TestRecvMMsgDispatcherCapLength(t *testing.T) {\n}\n}\n+\n+// fakeNetworkDispatcher delivers packets to pkts.\n+type fakeNetworkDispatcher struct {\n+ pkts []*stack.PacketBuffer\n+}\n+\n+func (d *fakeNetworkDispatcher) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\n+ d.pkts = append(d.pkts, pkt)\n+}\n+\n+func TestDispatchPacketFormat(t *testing.T) {\n+ for _, test := range []struct {\n+ name string\n+ newDispatcher func(fd int, e *endpoint) (linkDispatcher, error)\n+ }{\n+ {\n+ name: \"readVDispatcher\",\n+ newDispatcher: newReadVDispatcher,\n+ },\n+ {\n+ name: \"recvMMsgDispatcher\",\n+ newDispatcher: newRecvMMsgDispatcher,\n+ },\n+ } {\n+ t.Run(test.name, func(t *testing.T) {\n+ // Create a socket pair to send/recv.\n+ fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ defer syscall.Close(fds[0])\n+ defer syscall.Close(fds[1])\n+\n+ data := []byte{\n+ // Ethernet header.\n+ 1, 2, 3, 4, 5, 60,\n+ 1, 2, 3, 4, 5, 61,\n+ 8, 0,\n+ // Mock network header.\n+ 40, 41, 42, 43,\n+ }\n+ err = syscall.Sendmsg(fds[1], data, nil, nil, 0)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ // Create and run dispatcher once.\n+ sink := &fakeNetworkDispatcher{}\n+ d, err := test.newDispatcher(fds[0], &endpoint{\n+ hdrSize: header.EthernetMinimumSize,\n+ dispatcher: sink,\n+ })\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ if ok, err := d.dispatch(); !ok || err != nil {\n+ t.Fatalf(\"d.dispatch() = %v, %v\", ok, err)\n+ }\n+\n+ // Verify packet.\n+ if got, want := len(sink.pkts), 1; got != want {\n+ t.Fatalf(\"len(sink.pkts) = %d, want %d\", got, want)\n+ }\n+ pkt := sink.pkts[0]\n+ if got, want := len(pkt.LinkHeader), header.EthernetMinimumSize; got != want {\n+ t.Errorf(\"len(pkt.LinkHeader) = %d, want %d\", got, want)\n+ }\n+ if got, want := pkt.Data.Size(), 4; got != want {\n+ t.Errorf(\"pkt.Data.Size() = %d, want %d\", got, want)\n+ }\n+ })\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/packet_dispatchers.go",
"new_path": "pkg/tcpip/link/fdbased/packet_dispatchers.go",
"diff": "@@ -278,7 +278,7 @@ func (d *recvMMsgDispatcher) dispatch() (bool, *tcpip.Error) {\neth header.Ethernet\n)\nif d.e.hdrSize > 0 {\n- eth = header.Ethernet(d.views[k][0])\n+ eth = header.Ethernet(d.views[k][0][:header.EthernetMinimumSize])\np = eth.Type()\nremote = eth.SourceAddress()\nlocal = eth.DestinationAddress()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix recvMMsgDispatcher not slicing link header correctly.
PiperOrigin-RevId: 321035635 |
259,884 | 13.07.2020 15:05:27 | 25,200 | 74df310ac09f9f27bc88a9b7eda0fa1659e499a4 | Don't run issue reviver on forks.
Add a conditional to avoid running the issue reviver on forks. It will
always cause errors since bug references in the source code don't match
issue IDs in forked repos. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/issue_reviver.yml",
"new_path": ".github/workflows/issue_reviver.yml",
"diff": "@@ -4,11 +4,13 @@ on:\n- cron: '0 0 * * *'\njobs:\n- label:\n+ issue_reviver:\nruns-on: ubuntu-latest\nsteps:\n- uses: actions/checkout@v2\n+ if: github.repository == \"google/gvisor\"\n- run: make run TARGETS=\"//tools/issue_reviver\"\n+ if: github.repository == \"google/gvisor\"\nenv:\nGITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\nGITHUB_REPOSITORY: ${{ github.repository }}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't run issue reviver on forks.
Add a conditional to avoid running the issue reviver on forks. It will
always cause errors since bug references in the source code don't match
issue IDs in forked repos.
PiperOrigin-RevId: 321042060 |
260,003 | 13.07.2020 17:01:04 | 25,200 | 87c33be9af4ab8d80cd20dc58fe843e2eb5e4136 | Enable experimental features in docker. | [
{
"change_type": "MODIFY",
"old_path": "tools/vm/ubuntu1604/30_docker.sh",
"new_path": "tools/vm/ubuntu1604/30_docker.sh",
"diff": "@@ -53,9 +53,11 @@ while true; do\nfi\ndone\n+# Enable experimental features, for cross-building aarch64 images.\n# Enable Docker IPv6.\ncat > /etc/docker/daemon.json <<EOF\n{\n+ \"experimental\": true,\n\"fixed-cidr-v6\": \"2001:db8:1::/64\",\n\"ipv6\": true\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable experimental features in docker.
PiperOrigin-RevId: 321062975 |
259,992 | 14.07.2020 11:59:41 | 25,200 | 1bfb556ccdaee28ffea0cbdc37007edb10fa22c4 | Prepare boot.Loader to support multi-container TTY
Combine process creation code that is shared between
root and subcontainer processes
Move root container information into a struct for
clarity
Updates | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -155,7 +155,7 @@ func newController(fd int, l *Loader) (*controller, error) {\nsrv.Register(&debug{})\nsrv.Register(&control.Logging{})\n- if l.conf.ProfileEnable {\n+ if l.root.conf.ProfileEnable {\nsrv.Register(&control.Profile{\nKernel: l.k,\n})\n@@ -333,7 +333,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\n// Pause the kernel while we build a new one.\ncm.l.k.Pause()\n- p, err := createPlatform(cm.l.conf, deviceFile)\n+ p, err := createPlatform(cm.l.root.conf, deviceFile)\nif err != nil {\nreturn fmt.Errorf(\"creating platform: %v\", err)\n}\n@@ -349,8 +349,8 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\ncm.l.k = k\n// Set up the restore environment.\n- mntr := newContainerMounter(cm.l.spec, cm.l.goferFDs, cm.l.k, cm.l.mountHints)\n- renv, err := mntr.createRestoreEnvironment(cm.l.conf)\n+ mntr := newContainerMounter(cm.l.root.spec, cm.l.root.goferFDs, cm.l.k, cm.l.mountHints)\n+ renv, err := mntr.createRestoreEnvironment(cm.l.root.conf)\nif err != nil {\nreturn fmt.Errorf(\"creating RestoreEnvironment: %v\", err)\n}\n@@ -368,7 +368,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nreturn fmt.Errorf(\"file cannot be empty\")\n}\n- if cm.l.conf.ProfileEnable {\n+ if cm.l.root.conf.ProfileEnable {\n// pprof.Initialize opens /proc/self/maps, so has to be called before\n// installing seccomp filters.\npprof.Initialize()\n@@ -387,13 +387,13 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\n// Since we have a new kernel we also must make a new watchdog.\ndogOpts := watchdog.DefaultOpts\n- dogOpts.TaskTimeoutAction = cm.l.conf.WatchdogAction\n+ dogOpts.TaskTimeoutAction = cm.l.root.conf.WatchdogAction\ndog := watchdog.New(k, dogOpts)\n// Change the loader fields to reflect the changes made when restoring.\ncm.l.k = k\ncm.l.watchdog = dog\n- cm.l.rootProcArgs = kernel.CreateProcessArgs{}\n+ cm.l.root.procArgs = kernel.CreateProcessArgs{}\ncm.l.restore = true\n// Reinitialize the sandbox ID and processes map. Note that it doesn't\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -77,29 +77,34 @@ import (\n_ \"gvisor.dev/gvisor/pkg/sentry/socket/unix\"\n)\n-// Loader keeps state needed to start the kernel and run the container..\n-type Loader struct {\n- // k is the kernel.\n- k *kernel.Kernel\n-\n- // ctrl is the control server.\n- ctrl *controller\n-\n+type containerInfo struct {\nconf *Config\n- // console is set to true if terminal is enabled.\n- console bool\n+ // spec is the base configuration for the root container.\n+ spec *specs.Spec\n- watchdog *watchdog.Watchdog\n+ // procArgs refers to the container's init task.\n+ procArgs kernel.CreateProcessArgs\n// stdioFDs contains stdin, stdout, and stderr.\nstdioFDs []int\n// goferFDs are the FDs that attach the sandbox to the gofers.\ngoferFDs []int\n+}\n- // spec is the base configuration for the root container.\n- spec *specs.Spec\n+// Loader keeps state needed to start the kernel and run the container..\n+type Loader struct {\n+ // k is the kernel.\n+ k *kernel.Kernel\n+\n+ // ctrl is the control server.\n+ ctrl *controller\n+\n+ // root contains information about the root container in the sandbox.\n+ root containerInfo\n+\n+ watchdog *watchdog.Watchdog\n// stopSignalForwarding disables forwarding of signals to the sandboxed\n// container. It should be called when a sandbox is destroyed.\n@@ -108,9 +113,6 @@ type Loader struct {\n// restore is set to true if we are restoring a container.\nrestore bool\n- // rootProcArgs refers to the root sandbox init task.\n- rootProcArgs kernel.CreateProcessArgs\n-\n// sandboxID is the ID for the whole sandbox.\nsandboxID string\n@@ -175,8 +177,6 @@ type Args struct {\n// StdioFDs is the stdio for the application. The Loader takes ownership of\n// these FDs and may close them at any time.\nStdioFDs []int\n- // Console is set to true if using TTY.\n- Console bool\n// NumCPU is the number of CPUs to create inside the sandbox.\nNumCPU int\n// TotalMem is the initial amount of total memory to report back to the\n@@ -322,7 +322,7 @@ func New(args Args) (*Loader, error) {\ndogOpts.TaskTimeoutAction = args.Conf.WatchdogAction\ndog := watchdog.New(k, dogOpts)\n- procArgs, err := newProcess(args.ID, args.Spec, creds, k, k.RootPIDNamespace())\n+ procArgs, err := createProcessArgs(args.ID, args.Spec, creds, k, k.RootPIDNamespace())\nif err != nil {\nreturn nil, fmt.Errorf(\"creating init process for root container: %v\", err)\n}\n@@ -371,16 +371,17 @@ func New(args Args) (*Loader, error) {\neid := execID{cid: args.ID}\nl := &Loader{\nk: k,\n- conf: args.Conf,\n- console: args.Console,\nwatchdog: dog,\n- spec: args.Spec,\n- goferFDs: args.GoferFDs,\n- stdioFDs: stdioFDs,\n- rootProcArgs: procArgs,\nsandboxID: args.ID,\nprocesses: map[execID]*execProcess{eid: {}},\nmountHints: mountHints,\n+ root: containerInfo{\n+ conf: args.Conf,\n+ stdioFDs: stdioFDs,\n+ goferFDs: args.GoferFDs,\n+ spec: args.Spec,\n+ procArgs: procArgs,\n+ },\n}\n// We don't care about child signals; some platforms can generate a\n@@ -408,8 +409,8 @@ func New(args Args) (*Loader, error) {\nreturn l, nil\n}\n-// newProcess creates a process that can be run with kernel.CreateProcess.\n-func newProcess(id string, spec *specs.Spec, creds *auth.Credentials, k *kernel.Kernel, pidns *kernel.PIDNamespace) (kernel.CreateProcessArgs, error) {\n+// createProcessArgs creates args that can be used with kernel.CreateProcess.\n+func createProcessArgs(id string, spec *specs.Spec, creds *auth.Credentials, k *kernel.Kernel, pidns *kernel.PIDNamespace) (kernel.CreateProcessArgs, error) {\n// Create initial limits.\nls, err := createLimitSet(spec)\nif err != nil {\n@@ -483,13 +484,13 @@ func createMemoryFile() (*pgalloc.MemoryFile, error) {\n}\nfunc (l *Loader) installSeccompFilters() error {\n- if l.conf.DisableSeccomp {\n+ if l.root.conf.DisableSeccomp {\nfilter.Report(\"syscall filter is DISABLED. Running in less secure mode.\")\n} else {\nopts := filter.Options{\nPlatform: l.k.Platform,\n- HostNetwork: l.conf.Network == NetworkHost,\n- ProfileEnable: l.conf.ProfileEnable,\n+ HostNetwork: l.root.conf.Network == NetworkHost,\n+ ProfileEnable: l.root.conf.ProfileEnable,\nControllerFD: l.ctrl.srv.FD(),\n}\nif err := filter.Install(opts); err != nil {\n@@ -515,7 +516,7 @@ func (l *Loader) Run() error {\n}\nfunc (l *Loader) run() error {\n- if l.conf.Network == NetworkHost {\n+ if l.root.conf.Network == NetworkHost {\n// Delay host network configuration to this point because network namespace\n// is configured after the loader is created and before Run() is called.\nlog.Debugf(\"Configuring host network\")\n@@ -536,10 +537,8 @@ func (l *Loader) run() error {\n// If we are restoring, we do not want to create a process.\n// l.restore is set by the container manager when a restore call is made.\n- var ttyFile *host.TTYFileOperations\n- var ttyFileVFS2 *hostvfs2.TTYFileDescription\nif !l.restore {\n- if l.conf.ProfileEnable {\n+ if l.root.conf.ProfileEnable {\npprof.Initialize()\n}\n@@ -549,82 +548,29 @@ func (l *Loader) run() error {\nreturn err\n}\n- // Create the FD map, which will set stdin, stdout, and stderr. If console\n- // is true, then ioctl calls will be passed through to the host fd.\n- ctx := l.rootProcArgs.NewContext(l.k)\n- var err error\n-\n- // CreateProcess takes a reference on FDMap if successful. We won't need\n- // ours either way.\n- l.rootProcArgs.FDTable, ttyFile, ttyFileVFS2, err = createFDTable(ctx, l.console, l.stdioFDs)\n- if err != nil {\n- return fmt.Errorf(\"importing fds: %v\", err)\n- }\n-\n- // Setup the root container file system.\n- l.startGoferMonitor(l.sandboxID, l.goferFDs)\n-\n- mntr := newContainerMounter(l.spec, l.goferFDs, l.k, l.mountHints)\n- if err := mntr.processHints(l.conf, l.rootProcArgs.Credentials); err != nil {\n- return err\n- }\n- if err := setupContainerFS(ctx, l.conf, mntr, &l.rootProcArgs); err != nil {\n- return err\n- }\n-\n- // Add the HOME enviroment variable if it is not already set.\n- var envv []string\n- if kernel.VFS2Enabled {\n- envv, err = user.MaybeAddExecUserHomeVFS2(ctx, l.rootProcArgs.MountNamespaceVFS2,\n- l.rootProcArgs.Credentials.RealKUID, l.rootProcArgs.Envv)\n-\n- } else {\n- envv, err = user.MaybeAddExecUserHome(ctx, l.rootProcArgs.MountNamespace,\n- l.rootProcArgs.Credentials.RealKUID, l.rootProcArgs.Envv)\n- }\n- if err != nil {\n- return err\n- }\n- l.rootProcArgs.Envv = envv\n-\n// Create the root container init task. It will begin running\n// when the kernel is started.\n- if _, _, err := l.k.CreateProcess(l.rootProcArgs); err != nil {\n- return fmt.Errorf(\"creating init process: %v\", err)\n+ if _, err := l.createContainerProcess(true, l.sandboxID, &l.root, ep); err != nil {\n+ return err\n}\n-\n- // CreateProcess takes a reference on FDTable if successful.\n- l.rootProcArgs.FDTable.DecRef()\n}\nep.tg = l.k.GlobalInit()\n- if ns, ok := specutils.GetNS(specs.PIDNamespace, l.spec); ok {\n+ if ns, ok := specutils.GetNS(specs.PIDNamespace, l.root.spec); ok {\nep.pidnsPath = ns.Path\n}\n- if l.console {\n- // Set the foreground process group on the TTY to the global init process\n- // group, since that is what we are about to start running.\n- switch {\n- case ttyFileVFS2 != nil:\n- ep.ttyVFS2 = ttyFileVFS2\n- ttyFileVFS2.InitForegroundProcessGroup(ep.tg.ProcessGroup())\n- case ttyFile != nil:\n- ep.tty = ttyFile\n- ttyFile.InitForegroundProcessGroup(ep.tg.ProcessGroup())\n- }\n- }\n// Handle signals by forwarding them to the root container process\n// (except for panic signal, which should cause a panic).\nl.stopSignalForwarding = sighandling.StartSignalForwarding(func(sig linux.Signal) {\n// Panic signal should cause a panic.\n- if l.conf.PanicSignal != -1 && sig == linux.Signal(l.conf.PanicSignal) {\n+ if l.root.conf.PanicSignal != -1 && sig == linux.Signal(l.root.conf.PanicSignal) {\npanic(\"Signal-induced panic\")\n}\n// Otherwise forward to root container.\ndeliveryMode := DeliverToProcess\n- if l.console {\n+ if l.root.spec.Process.Terminal {\n// Since we are running with a console, we should forward the signal to\n// the foreground process group so that job control signals like ^C can\n// be handled properly.\n@@ -641,7 +587,7 @@ func (l *Loader) run() error {\n// during restore, we can release l.stdioFDs now. VFS2 takes ownership of the\n// passed FDs, so only close for VFS1.\nif !kernel.VFS2Enabled {\n- for _, fd := range l.stdioFDs {\n+ for _, fd := range l.root.stdioFDs {\nerr := syscall.Close(fd)\nif err != nil {\nreturn fmt.Errorf(\"close dup()ed stdioFDs: %v\", err)\n@@ -680,8 +626,8 @@ func (l *Loader) startContainer(spec *specs.Spec, conf *Config, cid string, file\nl.mu.Lock()\ndefer l.mu.Unlock()\n- eid := execID{cid: cid}\n- if _, ok := l.processes[eid]; !ok {\n+ ep := l.processes[execID{cid: cid}]\n+ if ep == nil {\nreturn fmt.Errorf(\"trying to start a deleted container %q\", cid)\n}\n@@ -715,76 +661,112 @@ func (l *Loader) startContainer(spec *specs.Spec, conf *Config, cid string, file\nif pidns == nil {\npidns = l.k.RootPIDNamespace().NewChild(l.k.RootUserNamespace())\n}\n- l.processes[eid].pidnsPath = ns.Path\n+ ep.pidnsPath = ns.Path\n} else {\npidns = l.k.RootPIDNamespace()\n}\n- procArgs, err := newProcess(cid, spec, creds, l.k, pidns)\n+\n+ info := &containerInfo{\n+ conf: conf,\n+ spec: spec,\n+ }\n+ info.procArgs, err = createProcessArgs(cid, spec, creds, l.k, pidns)\nif err != nil {\nreturn fmt.Errorf(\"creating new process: %v\", err)\n}\n// setupContainerFS() dups stdioFDs, so we don't need to dup them here.\n- var stdioFDs []int\nfor _, f := range files[:3] {\n- stdioFDs = append(stdioFDs, int(f.Fd()))\n+ info.stdioFDs = append(info.stdioFDs, int(f.Fd()))\n}\n- // Create the FD map, which will set stdin, stdout, and stderr.\n- ctx := procArgs.NewContext(l.k)\n- fdTable, _, _, err := createFDTable(ctx, false, stdioFDs)\n- if err != nil {\n- return fmt.Errorf(\"importing fds: %v\", err)\n- }\n- // CreateProcess takes a reference on fdTable if successful. We won't\n- // need ours either way.\n- procArgs.FDTable = fdTable\n-\n// Can't take ownership away from os.File. dup them to get a new FDs.\n- var goferFDs []int\nfor _, f := range files[3:] {\nfd, err := syscall.Dup(int(f.Fd()))\nif err != nil {\nreturn fmt.Errorf(\"failed to dup file: %v\", err)\n}\n- goferFDs = append(goferFDs, fd)\n+ info.goferFDs = append(info.goferFDs, fd)\n+ }\n+\n+ tg, err := l.createContainerProcess(false, cid, info, ep)\n+ if err != nil {\n+ return err\n+ }\n+\n+ // Success!\n+ l.k.StartProcess(tg)\n+ ep.tg = tg\n+ return nil\n+}\n+\n+func (l *Loader) createContainerProcess(root bool, cid string, info *containerInfo, ep *execProcess) (*kernel.ThreadGroup, error) {\n+ console := false\n+ if root {\n+ // Only root container supports terminal for now.\n+ console = info.spec.Process.Terminal\n}\n+ // Create the FD map, which will set stdin, stdout, and stderr.\n+ ctx := info.procArgs.NewContext(l.k)\n+ fdTable, ttyFile, ttyFileVFS2, err := createFDTable(ctx, console, info.stdioFDs)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"importing fds: %v\", err)\n+ }\n+ // CreateProcess takes a reference on fdTable if successful. We won't need\n+ // ours either way.\n+ info.procArgs.FDTable = fdTable\n+\n// Setup the child container file system.\n- l.startGoferMonitor(cid, goferFDs)\n+ l.startGoferMonitor(cid, info.goferFDs)\n- mntr := newContainerMounter(spec, goferFDs, l.k, l.mountHints)\n- if err := setupContainerFS(ctx, conf, mntr, &procArgs); err != nil {\n- return err\n+ mntr := newContainerMounter(info.spec, info.goferFDs, l.k, l.mountHints)\n+ if root {\n+ if err := mntr.processHints(info.conf, info.procArgs.Credentials); err != nil {\n+ return nil, err\n+ }\n+ }\n+ if err := setupContainerFS(ctx, info.conf, mntr, &info.procArgs); err != nil {\n+ return nil, err\n}\n// Add the HOME enviroment variable if it is not already set.\nvar envv []string\nif kernel.VFS2Enabled {\n- envv, err = user.MaybeAddExecUserHomeVFS2(ctx, procArgs.MountNamespaceVFS2,\n- procArgs.Credentials.RealKUID, procArgs.Envv)\n+ envv, err = user.MaybeAddExecUserHomeVFS2(ctx, info.procArgs.MountNamespaceVFS2,\n+ info.procArgs.Credentials.RealKUID, info.procArgs.Envv)\n} else {\n- envv, err = user.MaybeAddExecUserHome(ctx, procArgs.MountNamespace,\n- procArgs.Credentials.RealKUID, procArgs.Envv)\n+ envv, err = user.MaybeAddExecUserHome(ctx, info.procArgs.MountNamespace,\n+ info.procArgs.Credentials.RealKUID, info.procArgs.Envv)\n}\nif err != nil {\n- return err\n+ return nil, err\n}\n- procArgs.Envv = envv\n+ info.procArgs.Envv = envv\n// Create and start the new process.\n- tg, _, err := l.k.CreateProcess(procArgs)\n+ tg, _, err := l.k.CreateProcess(info.procArgs)\nif err != nil {\n- return fmt.Errorf(\"creating process: %v\", err)\n+ return nil, fmt.Errorf(\"creating process: %v\", err)\n}\n- l.k.StartProcess(tg)\n-\n// CreateProcess takes a reference on FDTable if successful.\n- procArgs.FDTable.DecRef()\n+ info.procArgs.FDTable.DecRef()\n- l.processes[eid].tg = tg\n- return nil\n+ // Set the foreground process group on the TTY to the global init process\n+ // group, since that is what we are about to start running.\n+ if root {\n+ switch {\n+ case ttyFileVFS2 != nil:\n+ ep.ttyVFS2 = ttyFileVFS2\n+ ttyFileVFS2.InitForegroundProcessGroup(tg.ProcessGroup())\n+ case ttyFile != nil:\n+ ep.tty = ttyFile\n+ ttyFile.InitForegroundProcessGroup(tg.ProcessGroup())\n+ }\n+ }\n+\n+ return tg, nil\n}\n// startGoferMonitor runs a goroutine to monitor gofer's health. It polls on\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -479,13 +479,13 @@ func TestCreateMountNamespaceVFS2(t *testing.T) {\ndefer l.Destroy()\ndefer loaderCleanup()\n- mntr := newContainerMounter(l.spec, l.goferFDs, l.k, l.mountHints)\n- if err := mntr.processHints(l.conf, l.rootProcArgs.Credentials); err != nil {\n+ mntr := newContainerMounter(l.root.spec, l.root.goferFDs, l.k, l.mountHints)\n+ if err := mntr.processHints(l.root.conf, l.root.procArgs.Credentials); err != nil {\nt.Fatalf(\"failed process hints: %v\", err)\n}\nctx := l.k.SupervisorContext()\n- mns, err := mntr.setupVFS2(ctx, l.conf, &l.rootProcArgs)\n+ mns, err := mntr.setupVFS2(ctx, l.root.conf, &l.root.procArgs)\nif err != nil {\nt.Fatalf(\"failed to setupVFS2: %v\", err)\n}\n@@ -499,7 +499,7 @@ func TestCreateMountNamespaceVFS2(t *testing.T) {\nPath: fspath.Parse(p),\n}\n- if d, err := l.k.VFS().GetDentryAt(ctx, l.rootProcArgs.Credentials, target, &vfs.GetDentryOptions{}); err != nil {\n+ if d, err := l.k.VFS().GetDentryAt(ctx, l.root.procArgs.Credentials, target, &vfs.GetDentryOptions{}); err != nil {\nt.Errorf(\"expected path %v to exist with spec %v, but got error %v\", p, tc.spec, err)\n} else {\nd.DecRef()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -54,10 +54,6 @@ type Boot struct {\n// provided in that order.\nstdioFDs intFlags\n- // console is set to true if the sandbox should allow terminal ioctl(2)\n- // syscalls.\n- console bool\n-\n// applyCaps determines if capabilities defined in the spec should be applied\n// to the process.\napplyCaps bool\n@@ -115,7 +111,6 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {\nf.IntVar(&b.deviceFD, \"device-fd\", -1, \"FD for the platform device file\")\nf.Var(&b.ioFDs, \"io-fds\", \"list of FDs to connect 9P clients. They must follow this order: root first, then mounts as defined in the spec\")\nf.Var(&b.stdioFDs, \"stdio-fds\", \"list of FDs containing sandbox stdin, stdout, and stderr in that order\")\n- f.BoolVar(&b.console, \"console\", false, \"set to true if the sandbox should allow terminal ioctl(2) syscalls\")\nf.BoolVar(&b.applyCaps, \"apply-caps\", false, \"if true, apply capabilities defined in the spec to the process\")\nf.BoolVar(&b.setUpRoot, \"setup-root\", false, \"if true, set up an empty root for the process\")\nf.BoolVar(&b.pidns, \"pidns\", false, \"if true, the sandbox is in its own PID namespace\")\n@@ -229,7 +224,6 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nDevice: os.NewFile(uintptr(b.deviceFD), \"platform device\"),\nGoferFDs: b.ioFDs.GetArray(),\nStdioFDs: b.stdioFDs.GetArray(),\n- Console: b.console,\nNumCPU: b.cpuNum,\nTotalMem: b.totalMem,\nUserLogFD: b.userLogFD,\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/console_test.go",
"new_path": "runsc/container/console_test.go",
"diff": "@@ -122,6 +122,7 @@ func TestConsoleSocket(t *testing.T) {\nfor name, conf := range configsWithVFS2(t, all...) {\nt.Run(name, func(t *testing.T) {\nspec := testutil.NewSpecWithArgs(\"true\")\n+ spec.Process.Terminal = true\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -478,9 +478,7 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncF\n// If the console control socket file is provided, then create a new\n// pty master/slave pair and set the TTY on the sandbox process.\n- if args.ConsoleSocket != \"\" {\n- cmd.Args = append(cmd.Args, \"--console=true\")\n-\n+ if args.Spec.Process.Terminal && args.ConsoleSocket != \"\" {\n// console.NewWithSocket will send the master on the given\n// socket, and return the slave.\ntty, err := console.NewWithSocket(args.ConsoleSocket)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Prepare boot.Loader to support multi-container TTY
- Combine process creation code that is shared between
root and subcontainer processes
- Move root container information into a struct for
clarity
Updates #2714
PiperOrigin-RevId: 321204798 |
259,898 | 14.07.2020 12:27:17 | 25,200 | 221e1da947f8f892e29d1ff0a382b7a171ab0437 | Test IPv6 fragment reassembly
A packetimpact test for: "A node must be able to accept a fragmented packet
that, after reassembly, is as large as 1500 octets." | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/testbench/layers.go",
"new_path": "test/packetimpact/testbench/layers.go",
"diff": "package testbench\nimport (\n+ \"encoding/binary\"\n\"encoding/hex\"\n\"fmt\"\n\"reflect\"\n@@ -470,21 +471,11 @@ func (l *IPv6) ToBytes() ([]byte, error) {\nif l.NextHeader != nil {\nfields.NextHeader = *l.NextHeader\n} else {\n- switch n := l.next().(type) {\n- case *TCP:\n- fields.NextHeader = uint8(header.TCPProtocolNumber)\n- case *UDP:\n- fields.NextHeader = uint8(header.UDPProtocolNumber)\n- case *ICMPv6:\n- fields.NextHeader = uint8(header.ICMPv6ProtocolNumber)\n- case *IPv6HopByHopOptionsExtHdr:\n- fields.NextHeader = uint8(header.IPv6HopByHopOptionsExtHdrIdentifier)\n- case *IPv6DestinationOptionsExtHdr:\n- fields.NextHeader = uint8(header.IPv6DestinationOptionsExtHdrIdentifier)\n- default:\n- // TODO(b/150301488): Support more protocols as needed.\n- return nil, fmt.Errorf(\"ToBytes can't deduce the IPv6 header's next protocol: %#v\", n)\n+ nh, err := nextHeaderByLayer(l.next())\n+ if err != nil {\n+ return nil, err\n}\n+ fields.NextHeader = nh\n}\nif l.HopLimit != nil {\nfields.HopLimit = *l.HopLimit\n@@ -514,6 +505,8 @@ func nextIPv6PayloadParser(nextHeader uint8) layerParser {\nreturn parseIPv6HopByHopOptionsExtHdr\ncase header.IPv6DestinationOptionsExtHdrIdentifier:\nreturn parseIPv6DestinationOptionsExtHdr\n+ case header.IPv6FragmentExtHdrIdentifier:\n+ return parseIPv6FragmentExtHdr\n}\nreturn parsePayload\n}\n@@ -566,14 +559,56 @@ type IPv6DestinationOptionsExtHdr struct {\nOptions []byte\n}\n+// IPv6FragmentExtHdr can construct and match an IPv6 Fragment Extension Header.\n+type IPv6FragmentExtHdr struct {\n+ LayerBase\n+ NextHeader *header.IPv6ExtensionHeaderIdentifier\n+ FragmentOffset *uint16\n+ MoreFragments *bool\n+ Identification *uint32\n+}\n+\n+// nextHeaderByLayer finds the correct next header protocol value for layer l.\n+func nextHeaderByLayer(l Layer) (uint8, error) {\n+ if l == nil {\n+ return uint8(header.IPv6NoNextHeaderIdentifier), nil\n+ }\n+ switch l.(type) {\n+ case *TCP:\n+ return uint8(header.TCPProtocolNumber), nil\n+ case *UDP:\n+ return uint8(header.UDPProtocolNumber), nil\n+ case *ICMPv6:\n+ return uint8(header.ICMPv6ProtocolNumber), nil\n+ case *Payload:\n+ return uint8(header.IPv6NoNextHeaderIdentifier), nil\n+ case *IPv6HopByHopOptionsExtHdr:\n+ return uint8(header.IPv6HopByHopOptionsExtHdrIdentifier), nil\n+ case *IPv6DestinationOptionsExtHdr:\n+ return uint8(header.IPv6DestinationOptionsExtHdrIdentifier), nil\n+ case *IPv6FragmentExtHdr:\n+ return uint8(header.IPv6FragmentExtHdrIdentifier), nil\n+ default:\n+ // TODO(b/161005083): Support more protocols as needed.\n+ return 0, fmt.Errorf(\"failed to deduce the IPv6 header's next protocol: %T\", l)\n+ }\n+}\n+\n// ipv6OptionsExtHdrToBytes serializes an options extension header into bytes.\n-func ipv6OptionsExtHdrToBytes(nextHeader *header.IPv6ExtensionHeaderIdentifier, options []byte) []byte {\n+func ipv6OptionsExtHdrToBytes(nextHeader *header.IPv6ExtensionHeaderIdentifier, nextLayer Layer, options []byte) ([]byte, error) {\nlength := len(options) + 2\n+ if length%8 != 0 {\n+ return nil, fmt.Errorf(\"IPv6 extension headers must be a multiple of 8 octets long, but the length given: %d, options: %s\", length, hex.Dump(options))\n+ }\nbytes := make([]byte, length)\n- if nextHeader == nil {\n- bytes[0] = byte(header.IPv6NoNextHeaderIdentifier)\n- } else {\n+ if nextHeader != nil {\nbytes[0] = byte(*nextHeader)\n+ } else {\n+ nh, err := nextHeaderByLayer(nextLayer)\n+ if err != nil {\n+ return nil, err\n+ }\n+ bytes[0] = nh\n}\n// ExtHdrLen field is the length of the extension header\n// in 8-octet unit, ignoring the first 8 octets.\n@@ -581,7 +616,7 @@ func ipv6OptionsExtHdrToBytes(nextHeader *header.IPv6ExtensionHeaderIdentifier,\n// https://tools.ietf.org/html/rfc2460#section-4.6\nbytes[1] = uint8((length - 8) / 8)\ncopy(bytes[2:], options)\n- return bytes\n+ return bytes, nil\n}\n// IPv6ExtHdrIdent is a helper routine that allocates a new\n@@ -591,14 +626,45 @@ func IPv6ExtHdrIdent(id header.IPv6ExtensionHeaderIdentifier) *header.IPv6Extens\nreturn &id\n}\n-// ToBytes implements Layer.ToBytes\n+// ToBytes implements Layer.ToBytes.\nfunc (l *IPv6HopByHopOptionsExtHdr) ToBytes() ([]byte, error) {\n- return ipv6OptionsExtHdrToBytes(l.NextHeader, l.Options), nil\n+ return ipv6OptionsExtHdrToBytes(l.NextHeader, l.next(), l.Options)\n}\n-// ToBytes implements Layer.ToBytes\n+// ToBytes implements Layer.ToBytes.\nfunc (l *IPv6DestinationOptionsExtHdr) ToBytes() ([]byte, error) {\n- return ipv6OptionsExtHdrToBytes(l.NextHeader, l.Options), nil\n+ return ipv6OptionsExtHdrToBytes(l.NextHeader, l.next(), l.Options)\n+}\n+\n+// ToBytes implements Layer.ToBytes.\n+func (l *IPv6FragmentExtHdr) ToBytes() ([]byte, error) {\n+ var offset, mflag uint16\n+ var ident uint32\n+ bytes := make([]byte, header.IPv6FragmentExtHdrLength)\n+ if l.NextHeader != nil {\n+ bytes[0] = byte(*l.NextHeader)\n+ } else {\n+ nh, err := nextHeaderByLayer(l.next())\n+ if err != nil {\n+ return nil, err\n+ }\n+ bytes[0] = nh\n+ }\n+ bytes[1] = 0 // reserved\n+ if l.MoreFragments != nil && *l.MoreFragments {\n+ mflag = 1\n+ }\n+ if l.FragmentOffset != nil {\n+ offset = *l.FragmentOffset\n+ }\n+ if l.Identification != nil {\n+ ident = *l.Identification\n+ }\n+ offsetAndMflag := offset<<3 | mflag\n+ binary.BigEndian.PutUint16(bytes[2:], offsetAndMflag)\n+ binary.BigEndian.PutUint32(bytes[4:], ident)\n+\n+ return bytes, nil\n}\n// parseIPv6ExtHdr parses an IPv6 extension header and returns the NextHeader\n@@ -631,6 +697,26 @@ func parseIPv6DestinationOptionsExtHdr(b []byte) (Layer, layerParser) {\nreturn &IPv6DestinationOptionsExtHdr{NextHeader: &nextHeader, Options: options}, nextParser\n}\n+// Bool is a helper routine that allocates a new\n+// bool value to store v and returns a pointer to it.\n+func Bool(v bool) *bool {\n+ return &v\n+}\n+\n+// parseIPv6FragmentExtHdr parses the bytes assuming that they start\n+// with an IPv6 Fragment Extension Header.\n+func parseIPv6FragmentExtHdr(b []byte) (Layer, layerParser) {\n+ nextHeader := b[0]\n+ var extHdr header.IPv6FragmentExtHdr\n+ copy(extHdr[:], b[2:])\n+ return &IPv6FragmentExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6ExtensionHeaderIdentifier(nextHeader)),\n+ FragmentOffset: Uint16(extHdr.FragmentOffset()),\n+ MoreFragments: Bool(extHdr.More()),\n+ Identification: Uint32(extHdr.ID()),\n+ }, nextIPv6PayloadParser(nextHeader)\n+}\n+\nfunc (l *IPv6HopByHopOptionsExtHdr) length() int {\nreturn len(l.Options) + 2\n}\n@@ -667,13 +753,31 @@ func (l *IPv6DestinationOptionsExtHdr) String() string {\nreturn stringLayer(l)\n}\n+func (*IPv6FragmentExtHdr) length() int {\n+ return header.IPv6FragmentExtHdrLength\n+}\n+\n+func (l *IPv6FragmentExtHdr) match(other Layer) bool {\n+ return equalLayer(l, other)\n+}\n+\n+// merge overrides the values in l with the values from other but only in fields\n+// where the value is not nil.\n+func (l *IPv6FragmentExtHdr) merge(other Layer) error {\n+ return mergeLayer(l, other)\n+}\n+\n+func (l *IPv6FragmentExtHdr) String() string {\n+ return stringLayer(l)\n+}\n+\n// ICMPv6 can construct and match an ICMPv6 encapsulation.\ntype ICMPv6 struct {\nLayerBase\nType *header.ICMPv6Type\nCode *byte\nChecksum *uint16\n- NDPPayload []byte\n+ Payload []byte\n}\nfunc (l *ICMPv6) String() string {\n@@ -684,7 +788,7 @@ func (l *ICMPv6) String() string {\n// ToBytes implements Layer.ToBytes.\nfunc (l *ICMPv6) ToBytes() ([]byte, error) {\n- b := make([]byte, header.ICMPv6HeaderSize+len(l.NDPPayload))\n+ b := make([]byte, header.ICMPv6HeaderSize+len(l.Payload))\nh := header.ICMPv6(b)\nif l.Type != nil {\nh.SetType(*l.Type)\n@@ -692,7 +796,7 @@ func (l *ICMPv6) ToBytes() ([]byte, error) {\nif l.Code != nil {\nh.SetCode(*l.Code)\n}\n- copy(h.NDPPayload(), l.NDPPayload)\n+ copy(h.NDPPayload(), l.Payload)\nif l.Checksum != nil {\nh.SetChecksum(*l.Checksum)\n} else {\n@@ -728,7 +832,7 @@ func parseICMPv6(b []byte) (Layer, layerParser) {\nType: ICMPv6Type(h.Type()),\nCode: Byte(h.Code()),\nChecksum: Uint16(h.Checksum()),\n- NDPPayload: h.NDPPayload(),\n+ Payload: h.NDPPayload(),\n}\nreturn &icmpv6, nil\n}\n@@ -738,7 +842,7 @@ func (l *ICMPv6) match(other Layer) bool {\n}\nfunc (l *ICMPv6) length() int {\n- return header.ICMPv6HeaderSize + len(l.NDPPayload)\n+ return header.ICMPv6HeaderSize + len(l.Payload)\n}\n// merge overrides the values in l with the values from other but only in fields\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/testbench/layers_test.go",
"new_path": "test/packetimpact/testbench/layers_test.go",
"diff": "@@ -596,7 +596,104 @@ func TestIPv6ExtHdrOptions(t *testing.T) {\nType: ICMPv6Type(header.ICMPv6ParamProblem),\nCode: Byte(0),\nChecksum: Uint16(0x5f98),\n- NDPPayload: []byte{0x00, 0x00, 0x00, 0x06},\n+ Payload: []byte{0x00, 0x00, 0x00, 0x06},\n+ },\n+ },\n+ },\n+ {\n+ description: \"IPv6/HopByHop/Fragment\",\n+ wantBytes: []byte{\n+ // IPv6 Header\n+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,\n+ // HopByHop Options\n+ 0x2c, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,\n+ // Fragment ExtHdr\n+ 0x3b, 0x00, 0x03, 0x20, 0x00, 0x00, 0x00, 0x2a,\n+ },\n+ wantLayers: []Layer{\n+ &IPv6{\n+ SrcAddr: Address(tcpip.Address(net.ParseIP(\"::1\"))),\n+ DstAddr: Address(tcpip.Address(net.ParseIP(\"fe80::dead:beef\"))),\n+ },\n+ &IPv6HopByHopOptionsExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6FragmentExtHdrIdentifier),\n+ Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},\n+ },\n+ &IPv6FragmentExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),\n+ FragmentOffset: Uint16(100),\n+ MoreFragments: Bool(false),\n+ Identification: Uint32(42),\n+ },\n+ &Payload{\n+ Bytes: nil,\n+ },\n+ },\n+ },\n+ {\n+ description: \"IPv6/DestOpt/Fragment/Payload\",\n+ wantBytes: []byte{\n+ // IPv6 Header\n+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x3c, 0x40, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,\n+ // Destination Options\n+ 0x2c, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,\n+ // Fragment ExtHdr\n+ 0x3b, 0x00, 0x03, 0x21, 0x00, 0x00, 0x00, 0x2a,\n+ // Sample Data\n+ 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61,\n+ },\n+ wantLayers: []Layer{\n+ &IPv6{\n+ SrcAddr: Address(tcpip.Address(net.ParseIP(\"::1\"))),\n+ DstAddr: Address(tcpip.Address(net.ParseIP(\"fe80::dead:beef\"))),\n+ },\n+ &IPv6DestinationOptionsExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6FragmentExtHdrIdentifier),\n+ Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},\n+ },\n+ &IPv6FragmentExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),\n+ FragmentOffset: Uint16(100),\n+ MoreFragments: Bool(true),\n+ Identification: Uint32(42),\n+ },\n+ &Payload{\n+ Bytes: []byte(\"Sample Data\"),\n+ },\n+ },\n+ },\n+ {\n+ description: \"IPv6/Fragment/Payload\",\n+ wantBytes: []byte{\n+ // IPv6 Header\n+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x13, 0x2c, 0x40, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,\n+ // Fragment ExtHdr\n+ 0x3b, 0x00, 0x03, 0x21, 0x00, 0x00, 0x00, 0x2a,\n+ // Sample Data\n+ 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61,\n+ },\n+ wantLayers: []Layer{\n+ &IPv6{\n+ SrcAddr: Address(tcpip.Address(net.ParseIP(\"::1\"))),\n+ DstAddr: Address(tcpip.Address(net.ParseIP(\"fe80::dead:beef\"))),\n+ },\n+ &IPv6FragmentExtHdr{\n+ NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),\n+ FragmentOffset: Uint16(100),\n+ MoreFragments: Bool(true),\n+ Identification: Uint32(42),\n+ },\n+ &Payload{\n+ Bytes: []byte(\"Sample Data\"),\n},\n},\n},\n@@ -606,6 +703,19 @@ func TestIPv6ExtHdrOptions(t *testing.T) {\nif !layers.match(tt.wantLayers) {\nt.Fatalf(\"match failed with diff: %s\", layers.diff(tt.wantLayers))\n}\n+ // Make sure we can generate correct next header values and checksums\n+ for _, layer := range layers {\n+ switch layer := layer.(type) {\n+ case *IPv6HopByHopOptionsExtHdr:\n+ layer.NextHeader = nil\n+ case *IPv6DestinationOptionsExtHdr:\n+ layer.NextHeader = nil\n+ case *IPv6FragmentExtHdr:\n+ layer.NextHeader = nil\n+ case *ICMPv6:\n+ layer.Checksum = nil\n+ }\n+ }\ngotBytes, err := layers.ToBytes()\nif err != nil {\nt.Fatalf(\"ToBytes() failed on %s: %s\", &layers, err)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/BUILD",
"new_path": "test/packetimpact/tests/BUILD",
"diff": "@@ -254,6 +254,20 @@ packetimpact_go_test(\n],\n)\n+packetimpact_go_test(\n+ name = \"ipv6_fragment_reassembly\",\n+ srcs = [\"ipv6_fragment_reassembly_test.go\"],\n+ # TODO(b/160919104): Fix netstack then remove the line below.\n+ expect_netstack_failure = True,\n+ deps = [\n+ \"//pkg/tcpip\",\n+ \"//pkg/tcpip/buffer\",\n+ \"//pkg/tcpip/header\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\npacketimpact_go_test(\nname = \"udp_send_recv_dgram\",\nsrcs = [\"udp_send_recv_dgram_test.go\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/icmpv6_param_problem_test.go",
"new_path": "test/packetimpact/tests/icmpv6_param_problem_test.go",
"diff": "@@ -42,7 +42,7 @@ func TestICMPv6ParamProblemTest(t *testing.T) {\n}\nicmpv6 := testbench.ICMPv6{\nType: testbench.ICMPv6Type(header.ICMPv6EchoRequest),\n- NDPPayload: []byte(\"hello world\"),\n+ Payload: []byte(\"hello world\"),\n}\ntoSend := (*testbench.Connection)(&conn).CreateFrame(testbench.Layers{&ipv6}, &icmpv6)\n@@ -63,7 +63,7 @@ func TestICMPv6ParamProblemTest(t *testing.T) {\nexpectedPayload = append(b, expectedPayload...)\nexpectedICMPv6 := testbench.ICMPv6{\nType: testbench.ICMPv6Type(header.ICMPv6ParamProblem),\n- NDPPayload: expectedPayload,\n+ Payload: expectedPayload,\n}\nparamProblem := testbench.Layers{\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/packetimpact/tests/ipv6_fragment_reassembly_test.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package ipv6_fragment_reassembly_test\n+\n+import (\n+ \"bytes\"\n+ \"encoding/binary\"\n+ \"encoding/hex\"\n+ \"flag\"\n+ \"net\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+const (\n+ // The payload length for the first fragment we send. This number\n+ // is a multiple of 8 near 750 (half of 1500).\n+ firstPayloadLength = 752\n+ // The ID field for our outgoing fragments.\n+ fragmentID = 1\n+ // A node must be able to accept a fragmented packet that,\n+ // after reassembly, is as large as 1500 octets.\n+ reassemblyCap = 1500\n+)\n+\n+func init() {\n+ testbench.RegisterFlags(flag.CommandLine)\n+}\n+\n+func TestIPv6FragmentReassembly(t *testing.T) {\n+ dut := testbench.NewDUT(t)\n+ defer dut.TearDown()\n+ conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n+ defer conn.Close()\n+\n+ firstPayloadToSend := make([]byte, firstPayloadLength)\n+ for i := range firstPayloadToSend {\n+ firstPayloadToSend[i] = 'A'\n+ }\n+\n+ secondPayloadLength := reassemblyCap - firstPayloadLength - header.ICMPv6EchoMinimumSize\n+ secondPayloadToSend := firstPayloadToSend[:secondPayloadLength]\n+\n+ icmpv6EchoPayload := make([]byte, 4)\n+ binary.BigEndian.PutUint16(icmpv6EchoPayload[0:], 0)\n+ binary.BigEndian.PutUint16(icmpv6EchoPayload[2:], 0)\n+ icmpv6EchoPayload = append(icmpv6EchoPayload, firstPayloadToSend...)\n+\n+ lIP := tcpip.Address(net.ParseIP(testbench.LocalIPv6).To16())\n+ rIP := tcpip.Address(net.ParseIP(testbench.RemoteIPv6).To16())\n+ icmpv6 := testbench.ICMPv6{\n+ Type: testbench.ICMPv6Type(header.ICMPv6EchoRequest),\n+ Code: testbench.Byte(0),\n+ Payload: icmpv6EchoPayload,\n+ }\n+ icmpv6Bytes, err := icmpv6.ToBytes()\n+ if err != nil {\n+ t.Fatalf(\"failed to serialize ICMPv6: %s\", err)\n+ }\n+ cksum := header.ICMPv6Checksum(\n+ header.ICMPv6(icmpv6Bytes),\n+ lIP,\n+ rIP,\n+ buffer.NewVectorisedView(len(secondPayloadToSend), []buffer.View{secondPayloadToSend}),\n+ )\n+\n+ conn.Send(testbench.IPv6{},\n+ &testbench.IPv6FragmentExtHdr{\n+ FragmentOffset: testbench.Uint16(0),\n+ MoreFragments: testbench.Bool(true),\n+ Identification: testbench.Uint32(fragmentID),\n+ },\n+ &testbench.ICMPv6{\n+ Type: testbench.ICMPv6Type(header.ICMPv6EchoRequest),\n+ Code: testbench.Byte(0),\n+ Payload: icmpv6EchoPayload,\n+ Checksum: &cksum,\n+ })\n+\n+ icmpv6ProtoNum := header.IPv6ExtensionHeaderIdentifier(header.ICMPv6ProtocolNumber)\n+\n+ conn.Send(testbench.IPv6{},\n+ &testbench.IPv6FragmentExtHdr{\n+ NextHeader: &icmpv6ProtoNum,\n+ FragmentOffset: testbench.Uint16((firstPayloadLength + header.ICMPv6EchoMinimumSize) / 8),\n+ MoreFragments: testbench.Bool(false),\n+ Identification: testbench.Uint32(fragmentID),\n+ },\n+ &testbench.Payload{\n+ Bytes: secondPayloadToSend,\n+ })\n+\n+ gotEchoReplyFirstPart, err := conn.ExpectFrame(testbench.Layers{\n+ &testbench.Ether{},\n+ &testbench.IPv6{},\n+ &testbench.IPv6FragmentExtHdr{\n+ FragmentOffset: testbench.Uint16(0),\n+ MoreFragments: testbench.Bool(true),\n+ },\n+ &testbench.ICMPv6{\n+ Type: testbench.ICMPv6Type(header.ICMPv6EchoReply),\n+ Code: testbench.Byte(0),\n+ },\n+ }, time.Second)\n+ if err != nil {\n+ t.Fatalf(\"expected a fragmented ICMPv6 Echo Reply, but got none: %s\", err)\n+ }\n+\n+ id := *gotEchoReplyFirstPart[2].(*testbench.IPv6FragmentExtHdr).Identification\n+ gotFirstPayload, err := gotEchoReplyFirstPart[len(gotEchoReplyFirstPart)-1].ToBytes()\n+ if err != nil {\n+ t.Fatalf(\"failed to serialize ICMPv6: %s\", err)\n+ }\n+ icmpPayload := gotFirstPayload[header.ICMPv6EchoMinimumSize:]\n+ receivedLen := len(icmpPayload)\n+ wantSecondPayloadLen := reassemblyCap - header.ICMPv6EchoMinimumSize - receivedLen\n+ wantFirstPayload := make([]byte, receivedLen)\n+ for i := range wantFirstPayload {\n+ wantFirstPayload[i] = 'A'\n+ }\n+ wantSecondPayload := wantFirstPayload[:wantSecondPayloadLen]\n+ if !bytes.Equal(icmpPayload, wantFirstPayload) {\n+ t.Fatalf(\"received unexpected payload, got: %s, want: %s\",\n+ hex.Dump(icmpPayload),\n+ hex.Dump(wantFirstPayload))\n+ }\n+\n+ gotEchoReplySecondPart, err := conn.ExpectFrame(testbench.Layers{\n+ &testbench.Ether{},\n+ &testbench.IPv6{},\n+ &testbench.IPv6FragmentExtHdr{\n+ NextHeader: &icmpv6ProtoNum,\n+ FragmentOffset: testbench.Uint16(uint16((receivedLen + header.ICMPv6EchoMinimumSize) / 8)),\n+ MoreFragments: testbench.Bool(false),\n+ Identification: &id,\n+ },\n+ &testbench.ICMPv6{},\n+ }, time.Second)\n+ if err != nil {\n+ t.Fatalf(\"expected the rest of ICMPv6 Echo Reply, but got none: %s\", err)\n+ }\n+ secondPayload, err := gotEchoReplySecondPart[len(gotEchoReplySecondPart)-1].ToBytes()\n+ if err != nil {\n+ t.Fatalf(\"failed to serialize ICMPv6 Echo Reply: %s\", err)\n+ }\n+ if !bytes.Equal(secondPayload, wantSecondPayload) {\n+ t.Fatalf(\"received unexpected payload, got: %s, want: %s\",\n+ hex.Dump(secondPayload),\n+ hex.Dump(wantSecondPayload))\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/ipv6_unknown_options_action_test.go",
"new_path": "test/packetimpact/tests/ipv6_unknown_options_action_test.go",
"diff": "@@ -173,7 +173,7 @@ func TestIPv6UnknownOptionAction(t *testing.T) {\n&tb.ICMPv6{\nType: tb.ICMPv6Type(header.ICMPv6ParamProblem),\nCode: tb.Byte(2),\n- NDPPayload: icmpv6Payload,\n+ Payload: icmpv6Payload,\n},\n}, time.Second)\nif tt.wantICMPv6 && err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Test IPv6 fragment reassembly
A packetimpact test for: "A node must be able to accept a fragmented packet
that, after reassembly, is as large as 1500 octets."
PiperOrigin-RevId: 321210729 |
259,891 | 14.07.2020 13:46:55 | 25,200 | 8a0082f5f3aaad053873800feae8d0fb8b504c50 | remove IPv6 docker
Will re-submit when ICMP dest unreachable is handled correctly and it can be
turned back on. | [
{
"change_type": "MODIFY",
"old_path": "tools/vm/ubuntu1604/30_docker.sh",
"new_path": "tools/vm/ubuntu1604/30_docker.sh",
"diff": "@@ -54,11 +54,8 @@ while true; do\ndone\n# Enable experimental features, for cross-building aarch64 images.\n-# Enable Docker IPv6.\ncat > /etc/docker/daemon.json <<EOF\n{\n- \"experimental\": true,\n- \"fixed-cidr-v6\": \"2001:db8:1::/64\",\n- \"ipv6\": true\n+ \"experimental\": true\n}\nEOF\n"
}
] | Go | Apache License 2.0 | google/gvisor | remove IPv6 docker
Will re-submit when ICMP dest unreachable is handled correctly and it can be
turned back on.
PiperOrigin-RevId: 321227330 |
259,860 | 14.07.2020 17:29:53 | 25,200 | 1b9965e06a966977a99569484da139d64d1db95e | Update special file option name in comment. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"diff": "@@ -28,9 +28,9 @@ import (\n)\n// specialFileFD implements vfs.FileDescriptionImpl for pipes, sockets, device\n-// special files, and (when filesystemOptions.specialRegularFiles is in effect)\n-// regular files. specialFileFD differs from regularFileFD by using per-FD\n-// handles instead of shared per-dentry handles, and never buffering I/O.\n+// special files, and (when filesystemOptions.regularFilesUseSpecialFileFD is\n+// in effect) regular files. specialFileFD differs from regularFileFD by using\n+// per-FD handles instead of shared per-dentry handles, and never buffering I/O.\ntype specialFileFD struct {\nfileDescription\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update special file option name in comment.
PiperOrigin-RevId: 321269281 |
259,905 | 15.07.2020 22:02:26 | -28,800 | 505bebae43183a68ee8873764817d282a6c49ec6 | hostinet: fix fd leak in fdnotifier for VFS2
When we failed to create the new socket after adding the fd to
fdnotifier, we should remove the fd from fdnotifier, because we
are going to close the fd directly.
Fixes: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket_vfs2.go",
"new_path": "pkg/sentry/socket/hostinet/socket_vfs2.go",
"diff": "@@ -71,6 +71,7 @@ func newVFS2Socket(t *kernel.Task, family int, stype linux.SockType, protocol in\nDenyPWrite: true,\nUseDentryMetadata: true,\n}); err != nil {\n+ fdnotifier.RemoveFD(int32(s.fd))\nreturn nil, syserr.FromError(err)\n}\nreturn vfsfd, nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | hostinet: fix fd leak in fdnotifier for VFS2
When we failed to create the new socket after adding the fd to
fdnotifier, we should remove the fd from fdnotifier, because we
are going to close the fd directly.
Fixes: #3241
Signed-off-by: Tiwei Bie <[email protected]> |
259,881 | 18.06.2020 14:29:39 | 14,400 | 1481673178494def3dd335ff240c8cd9219374ce | Apply pdeathsig to gofer for runsc run/do
Much like the boot process, apply pdeathsig to the gofer for cases where
the sandbox lifecycle is attached to the parent (runsc run/do).
This isn't strictly necessary, as the gofer normally exits once the
sentry disappears, but this makes that extra reliable. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -324,7 +324,7 @@ func New(conf *boot.Config, args Args) (*Container, error) {\n}\n}\nif err := runInCgroup(cg, func() error {\n- ioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir)\n+ ioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir, args.Attached)\nif err != nil {\nreturn err\n}\n@@ -427,7 +427,7 @@ func (c *Container) Start(conf *boot.Config) error {\n// the start (and all their children processes).\nif err := runInCgroup(c.Sandbox.Cgroup, func() error {\n// Create the gofer process.\n- ioFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir)\n+ ioFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir, false)\nif err != nil {\nreturn err\n}\n@@ -861,7 +861,7 @@ func (c *Container) waitForStopped() error {\nreturn backoff.Retry(op, b)\n}\n-func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string) ([]*os.File, *os.File, error) {\n+func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string, attached bool) ([]*os.File, *os.File, error) {\n// Start with the general config flags.\nargs := conf.ToFlags()\n@@ -955,6 +955,14 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund\ncmd.ExtraFiles = goferEnds\ncmd.Args[0] = \"runsc-gofer\"\n+ if attached {\n+ // The gofer is attached to the lifetime of this process, so it\n+ // should synchronously die when this process dies.\n+ cmd.SysProcAttr = &syscall.SysProcAttr{\n+ Pdeathsig: syscall.SIGKILL,\n+ }\n+ }\n+\n// Enter new namespaces to isolate from the rest of the system. Don't unshare\n// cgroup because gofer is added to a cgroup in the caller's namespace.\nnss := []specs.LinuxNamespace{\n"
}
] | Go | Apache License 2.0 | google/gvisor | Apply pdeathsig to gofer for runsc run/do
Much like the boot process, apply pdeathsig to the gofer for cases where
the sandbox lifecycle is attached to the parent (runsc run/do).
This isn't strictly necessary, as the gofer normally exits once the
sentry disappears, but this makes that extra reliable. |
260,003 | 15.07.2020 12:15:11 | 25,200 | 1d11c403787b360140dd08be3a25c5689a89c7a0 | Fix errors not getting caught when building vm image.
`set -e` does not catch errors in bash command substituions like
`echo $(bad cmd)` so bazel thinks it succeeded and cached the result. | [
{
"change_type": "MODIFY",
"old_path": "tools/vm/README.md",
"new_path": "tools/vm/README.md",
"diff": "@@ -25,6 +25,12 @@ vm_image(\nThese images can be built manually by executing the target. The output on\n`stdout` will be the image id (in the current project).\n+For example:\n+\n+```\n+$ bazel build :ubuntu\n+```\n+\nImages are always named per the hash of all the hermetic input scripts. This\nallows images to be memoized quickly and easily.\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/vm/defs.bzl",
"new_path": "tools/vm/defs.bzl",
"diff": "@@ -60,11 +60,12 @@ def _vm_image_impl(ctx):\n# Run the builder to generate our output.\necho = ctx.actions.declare_file(ctx.label.name)\nresolved_inputs, argv, runfiles_manifests = ctx.resolve_command(\n- command = \"echo -ne \\\"#!/bin/bash\\\\nset -e\\\\nimage=$(%s)\\\\necho ${image}\\\\n\\\" > %s && chmod 0755 %s\" % (\n- ctx.files.builder[0].path,\n- echo.path,\n- echo.path,\n- ),\n+ command = \"\\n\".join([\n+ \"set -e\",\n+ \"image=$(%s)\" % ctx.files.builder[0].path,\n+ \"echo -ne \\\"#!/bin/bash\\\\necho ${image}\\\\n\\\" > %s\" % echo.path,\n+ \"chmod 0755 %s\" % echo.path,\n+ ]),\ntools = [ctx.attr.builder],\n)\nctx.actions.run_shell(\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix errors not getting caught when building vm image.
`set -e` does not catch errors in bash command substituions like
`echo $(bad cmd)` so bazel thinks it succeeded and cached the result.
PiperOrigin-RevId: 321412327 |
259,962 | 15.07.2020 14:55:12 | 25,200 | 857d03f258ffafb815698917f2a1ee9e7e265464 | Add support for SO_ERROR to packet sockets.
Packet sockets also seem to allow double binding and do not return an error on
linux. This was tested by running the syscall test in a linux namespace as root
and the current test DoubleBind fails@HEAD.
Passes after this change.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/syserr/netstack.go",
"new_path": "pkg/syserr/netstack.go",
"diff": "@@ -22,7 +22,7 @@ import (\n// Mapping for tcpip.Error types.\nvar (\nErrUnknownProtocol = New(tcpip.ErrUnknownProtocol.String(), linux.EINVAL)\n- ErrUnknownNICID = New(tcpip.ErrUnknownNICID.String(), linux.EINVAL)\n+ ErrUnknownNICID = New(tcpip.ErrUnknownNICID.String(), linux.ENODEV)\nErrUnknownDevice = New(tcpip.ErrUnknownDevice.String(), linux.ENODEV)\nErrUnknownProtocolOption = New(tcpip.ErrUnknownProtocolOption.String(), linux.ENOPROTOOPT)\nErrDuplicateNICID = New(tcpip.ErrDuplicateNICID.String(), linux.EEXIST)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint.go",
"new_path": "pkg/tcpip/transport/packet/endpoint.go",
"diff": "@@ -79,6 +79,11 @@ type endpoint struct {\nclosed bool\nstats tcpip.TransportEndpointStats `state:\"nosave\"`\nbound bool\n+ boundNIC tcpip.NICID\n+\n+ // lastErrorMu protects lastError.\n+ lastErrorMu sync.Mutex `state:\"nosave\"`\n+ lastError *tcpip.Error `state:\".(string)\"`\n}\n// NewEndpoint returns a new packet endpoint.\n@@ -229,12 +234,14 @@ func (ep *endpoint) Bind(addr tcpip.FullAddress) *tcpip.Error {\nep.mu.Lock()\ndefer ep.mu.Unlock()\n- if ep.bound {\n- return tcpip.ErrAlreadyBound\n+ if ep.bound && ep.boundNIC == addr.NIC {\n+ // If the NIC being bound is the same then just return success.\n+ return nil\n}\n// Unregister endpoint with all the nics.\nep.stack.UnregisterPacketEndpoint(0, ep.netProto, ep)\n+ ep.bound = false\n// Bind endpoint to receive packets from specific interface.\nif err := ep.stack.RegisterPacketEndpoint(addr.NIC, ep.netProto, ep); err != nil {\n@@ -242,6 +249,7 @@ func (ep *endpoint) Bind(addr tcpip.FullAddress) *tcpip.Error {\n}\nep.bound = true\n+ ep.boundNIC = addr.NIC\nreturn nil\n}\n@@ -336,8 +344,21 @@ func (ep *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) *tcpip.Error {\n}\n}\n+func (ep *endpoint) takeLastError() *tcpip.Error {\n+ ep.lastErrorMu.Lock()\n+ defer ep.lastErrorMu.Unlock()\n+\n+ err := ep.lastError\n+ ep.lastError = nil\n+ return err\n+}\n+\n// GetSockOpt implements tcpip.Endpoint.GetSockOpt.\nfunc (ep *endpoint) GetSockOpt(opt interface{}) *tcpip.Error {\n+ switch opt.(type) {\n+ case tcpip.ErrorOption:\n+ return ep.takeLastError()\n+ }\nreturn tcpip.ErrNotSupported\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint_state.go",
"new_path": "pkg/tcpip/transport/packet/endpoint_state.go",
"diff": "package packet\nimport (\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\n@@ -70,3 +71,21 @@ func (ep *endpoint) afterLoad() {\npanic(*err)\n}\n}\n+\n+// saveLastError is invoked by stateify.\n+func (ep *endpoint) saveLastError() string {\n+ if ep.lastError == nil {\n+ return \"\"\n+ }\n+\n+ return ep.lastError.String()\n+}\n+\n+// loadLastError is invoked by stateify.\n+func (ep *endpoint) loadLastError(s string) {\n+ if s == \"\" {\n+ return\n+ }\n+\n+ ep.lastError = tcpip.StringToError(s)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket.cc",
"new_path": "test/syscalls/linux/packet_socket.cc",
"diff": "@@ -343,7 +343,7 @@ TEST_P(CookedPacketTest, BindReceive) {\n}\n// Double Bind socket.\n-TEST_P(CookedPacketTest, DoubleBind) {\n+TEST_P(CookedPacketTest, DoubleBindSucceeds) {\nstruct sockaddr_ll bind_addr = {};\nbind_addr.sll_family = AF_PACKET;\nbind_addr.sll_protocol = htons(GetParam());\n@@ -354,12 +354,11 @@ TEST_P(CookedPacketTest, DoubleBind) {\nSyscallSucceeds());\n// Binding socket again should fail.\n- ASSERT_THAT(\n- bind(socket_, reinterpret_cast<struct sockaddr*>(&bind_addr),\n+ ASSERT_THAT(bind(socket_, reinterpret_cast<struct sockaddr*>(&bind_addr),\nsizeof(bind_addr)),\n- // Linux 4.09 returns EINVAL here, but some time before 4.19 it switched\n- // to EADDRINUSE.\n- AnyOf(SyscallFailsWithErrno(EADDRINUSE), SyscallFailsWithErrno(EINVAL)));\n+ // Linux 4.09 returns EINVAL here, but some time before 4.19 it\n+ // switched to EADDRINUSE.\n+ SyscallSucceeds());\n}\n// Bind and verify we do not receive data on interface which is not bound\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket_raw.cc",
"new_path": "test/syscalls/linux/packet_socket_raw.cc",
"diff": "@@ -559,6 +559,64 @@ TEST_P(RawPacketTest, SetSocketSendBuf) {\nASSERT_EQ(quarter_sz, val);\n}\n+TEST_P(RawPacketTest, GetSocketError) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ERROR, &val, &val_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(val, 0);\n+}\n+\n+TEST_P(RawPacketTest, GetSocketErrorBind) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ {\n+ // Bind to the loopback device.\n+ struct sockaddr_ll bind_addr = {};\n+ bind_addr.sll_family = AF_PACKET;\n+ bind_addr.sll_protocol = htons(GetParam());\n+ bind_addr.sll_ifindex = GetLoopbackIndex();\n+\n+ ASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&bind_addr),\n+ sizeof(bind_addr)),\n+ SyscallSucceeds());\n+\n+ // SO_ERROR should return no errors.\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ERROR, &val, &val_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(val, 0);\n+ }\n+\n+ {\n+ // Now try binding to an invalid interface.\n+ struct sockaddr_ll bind_addr = {};\n+ bind_addr.sll_family = AF_PACKET;\n+ bind_addr.sll_protocol = htons(GetParam());\n+ bind_addr.sll_ifindex = 0xffff; // Just pick a really large number.\n+\n+ // Binding should fail with EINVAL\n+ ASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&bind_addr),\n+ sizeof(bind_addr)),\n+ SyscallFailsWithErrno(ENODEV));\n+\n+ // SO_ERROR does not return error when the device is invalid.\n+ // On Linux there is just one odd ball condition where this can return\n+ // an error where the device was valid and then removed or disabled\n+ // between the first check for index and the actual registration of\n+ // the packet endpoint. On Netstack this is not possible as the stack\n+ // global mutex is held during registration and check.\n+ int val = 0;\n+ socklen_t val_len = sizeof(val);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ERROR, &val, &val_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(val, 0);\n+ }\n+}\n+\n#ifndef __fuchsia__\nTEST_P(RawPacketTest, SetSocketDetachFilterNoInstalledFilter) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for SO_ERROR to packet sockets.
Packet sockets also seem to allow double binding and do not return an error on
linux. This was tested by running the syscall test in a linux namespace as root
and the current test DoubleBind fails@HEAD.
Passes after this change.
Updates #173
PiperOrigin-RevId: 321445137 |
259,891 | 15.07.2020 16:33:46 | 25,200 | e92f38ff0cd2e490637df2081fc8f75ddaf32937 | iptables: remove check for NetworkHeader
This is no longer necessary, as we always set NetworkHeader before calling
iptables.Check. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -292,10 +292,9 @@ func (it *IPTables) startReaper(interval time.Duration) {\n// CheckPackets runs pkts through the rules for hook and returns a map of packets that\n// should not go forward.\n//\n-// Precondition: pkt is a IPv4 packet of at least length header.IPv4MinimumSize.\n-//\n-// TODO(gvisor.dev/issue/170): pk.NetworkHeader will always be set as a\n-// precondition.\n+// Preconditions:\n+// - pkt is a IPv4 packet of at least length header.IPv4MinimumSize.\n+// - pkt.NetworkHeader is not nil.\n//\n// NOTE: unlike the Check API the returned map contains packets that should be\n// dropped.\n@@ -319,9 +318,9 @@ func (it *IPTables) CheckPackets(hook Hook, pkts PacketBufferList, gso *GSO, r *\nreturn drop, natPkts\n}\n-// Precondition: pkt is a IPv4 packet of at least length header.IPv4MinimumSize.\n-// TODO(gvisor.dev/issue/170): pkt.NetworkHeader will always be set as a\n-// precondition.\n+// Preconditions:\n+// - pkt is a IPv4 packet of at least length header.IPv4MinimumSize.\n+// - pkt.NetworkHeader is not nil.\nfunc (it *IPTables) checkChain(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, gso *GSO, r *Route, address tcpip.Address, nicName string) chainVerdict {\n// Start from ruleIdx and walk the list of rules until a rule gives us\n// a verdict.\n@@ -366,23 +365,12 @@ func (it *IPTables) checkChain(hook Hook, pkt *PacketBuffer, table Table, ruleId\nreturn chainDrop\n}\n-// Precondition: pkt is a IPv4 packet of at least length header.IPv4MinimumSize.\n-// TODO(gvisor.dev/issue/170): pkt.NetworkHeader will always be set as a\n-// precondition.\n+// Preconditions:\n+// - pkt is a IPv4 packet of at least length header.IPv4MinimumSize.\n+// - pkt.NetworkHeader is not nil.\nfunc (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, gso *GSO, r *Route, address tcpip.Address, nicName string) (RuleVerdict, int) {\nrule := table.Rules[ruleIdx]\n- // If pkt.NetworkHeader hasn't been set yet, it will be contained in\n- // pkt.Data.\n- if pkt.NetworkHeader == nil {\n- var ok bool\n- pkt.NetworkHeader, ok = pkt.Data.PullUp(header.IPv4MinimumSize)\n- if !ok {\n- // Precondition has been violated.\n- panic(fmt.Sprintf(\"iptables checks require IPv4 headers of at least %d bytes\", header.IPv4MinimumSize))\n- }\n- }\n-\n// Check whether the packet matches the IP header filter.\nif !rule.Filter.match(header.IPv4(pkt.NetworkHeader), hook, nicName) {\n// Continue on to the next rule.\n"
}
] | Go | Apache License 2.0 | google/gvisor | iptables: remove check for NetworkHeader
This is no longer necessary, as we always set NetworkHeader before calling
iptables.Check.
PiperOrigin-RevId: 321461978 |
259,975 | 15.07.2020 18:19:52 | 25,200 | 5c8c0d65b9062dcbe195e7131a6a3c3fb8ba9583 | Port httpd benchmark | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/benchmarks/ab/Dockerfile",
"diff": "+FROM ubuntu:18.04\n+\n+RUN set -x \\\n+ && apt-get update \\\n+ && apt-get install -y \\\n+ apache2-utils \\\n+ && rm -rf /var/lib/apt/lists/*\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/benchmarks/httpd/Dockerfile",
"diff": "+FROM ubuntu:18.04\n+\n+RUN set -x \\\n+ && apt-get update \\\n+ && apt-get install -y \\\n+ apache2 \\\n+ && rm -rf /var/lib/apt/lists/*\n+\n+# Generate a bunch of relevant files.\n+RUN mkdir -p /local && \\\n+ for size in 1 10 100 1000 1024 10240; do \\\n+ dd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \\\n+ done\n+\n+# Rewrite DocumentRoot to point to /tmp/html instead of the default path.\n+RUN sed -i 's/DocumentRoot.*\\/var\\/www\\/html$/DocumentRoot \\/tmp\\/html/' /etc/apache2/sites-enabled/000-default.conf\n+COPY ./apache2-tmpdir.conf /etc/apache2/sites-enabled/apache2-tmpdir.conf\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/benchmarks/httpd/apache2-tmpdir.conf",
"diff": "+<Directory /tmp/html/>\n+ Options Indexes FollowSymLinks\n+ AllowOverride None\n+ Require all granted\n+</Directory>\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/fs/bazel_test.go",
"new_path": "test/benchmarks/fs/bazel_test.go",
"diff": "@@ -32,6 +32,7 @@ func BenchmarkABSL(b *testing.B) {\nif err != nil {\nb.Fatalf(\"failed to get machine: %v\", err)\n}\n+ defer machine.CleanUp()\n// Dimensions here are clean/dirty cache (do or don't drop caches)\n// and if the mount on which we are compiling is a tmpfs/bind mount.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/harness/machine.go",
"new_path": "test/benchmarks/harness/machine.go",
"diff": "@@ -33,6 +33,9 @@ type Machine interface {\n// Returns IP Address for the machine.\nIPAddress() (net.IP, error)\n+\n+ // CleanUp cleans up this machine.\n+ CleanUp()\n}\n// localMachine describes this machine.\n@@ -62,3 +65,7 @@ func (l *localMachine) IPAddress() (net.IP, error) {\naddr := conn.LocalAddr().(*net.UDPAddr)\nreturn addr.IP, nil\n}\n+\n+// CleanUp implements Machine.CleanUp and does nothing for localMachine.\n+func (*localMachine) CleanUp() {\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/network/BUILD",
"new_path": "test/benchmarks/network/BUILD",
"diff": "@@ -11,7 +11,10 @@ go_library(\ngo_test(\nname = \"network_test\",\nsize = \"large\",\n- srcs = [\"iperf_test.go\"],\n+ srcs = [\n+ \"httpd_test.go\",\n+ \"iperf_test.go\",\n+ ],\nlibrary = \":network\",\ntags = [\n# Requires docker and runsc to be configured before test runs.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/benchmarks/network/httpd_test.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+package network\n+\n+import (\n+ \"context\"\n+ \"fmt\"\n+ \"regexp\"\n+ \"strconv\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/test/dockerutil\"\n+ \"gvisor.dev/gvisor/test/benchmarks/harness\"\n+)\n+\n+// see Dockerfile '//images/benchmarks/httpd'.\n+var docs = map[string]string{\n+ \"notfound\": \"notfound\",\n+ \"1Kb\": \"latin1k.txt\",\n+ \"10Kb\": \"latin10k.txt\",\n+ \"100Kb\": \"latin100k.txt\",\n+ \"1000Kb\": \"latin1000k.txt\",\n+ \"1Mb\": \"latin1024k.txt\",\n+ \"10Mb\": \"latin10240k.txt\",\n+}\n+\n+// BenchmarkHttpdConcurrency iterates the concurrency argument and tests\n+// how well the runtime under test handles requests in parallel.\n+func BenchmarkHttpdConcurrency(b *testing.B) {\n+ // Grab a machine for the client and server.\n+ clientMachine, err := h.GetMachine()\n+ if err != nil {\n+ b.Fatalf(\"failed to get client: %v\", err)\n+ }\n+ defer clientMachine.CleanUp()\n+\n+ serverMachine, err := h.GetMachine()\n+ if err != nil {\n+ b.Fatalf(\"failed to get server: %v\", err)\n+ }\n+ defer serverMachine.CleanUp()\n+\n+ // The test iterates over client concurrency, so set other parameters.\n+ requests := 1000\n+ concurrency := []int{1, 5, 10, 25}\n+ doc := docs[\"10Kb\"]\n+\n+ for _, c := range concurrency {\n+ b.Run(fmt.Sprintf(\"%dConcurrency\", c), func(b *testing.B) {\n+ runHttpd(b, clientMachine, serverMachine, doc, requests, c)\n+ })\n+ }\n+}\n+\n+// BenchmarkHttpdDocSize iterates over different sized payloads, testing how\n+// well the runtime handles different payload sizes.\n+func BenchmarkHttpdDocSize(b *testing.B) {\n+ clientMachine, err := h.GetMachine()\n+ if err != nil {\n+ b.Fatalf(\"failed to get machine: %v\", err)\n+ }\n+ defer clientMachine.CleanUp()\n+\n+ serverMachine, err := h.GetMachine()\n+ if err != nil {\n+ b.Fatalf(\"failed to get machine: %v\", err)\n+ }\n+ defer serverMachine.CleanUp()\n+\n+ requests := 1000\n+ concurrency := 1\n+\n+ for name, filename := range docs {\n+ b.Run(name, func(b *testing.B) {\n+ runHttpd(b, clientMachine, serverMachine, filename, requests, concurrency)\n+ })\n+ }\n+}\n+\n+// runHttpd runs a single test run.\n+func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc string, requests, concurrency int) {\n+ b.Helper()\n+\n+ // Grab a container from the server.\n+ ctx := context.Background()\n+ server := serverMachine.GetContainer(ctx, b)\n+ defer server.CleanUp(ctx)\n+\n+ // Copy the docs to /tmp and serve from there.\n+ cmd := \"mkdir -p /tmp/html; cp -r /local /tmp/html/.; apache2 -X\"\n+ port := 80\n+\n+ // Start the server.\n+ server.Spawn(ctx, dockerutil.RunOpts{\n+ Image: \"benchmarks/httpd\",\n+ Ports: []int{port},\n+ Env: []string{\n+ // Standard environmental variables for httpd.\n+ \"APACHE_RUN_DIR=/tmp\",\n+ \"APACHE_RUN_USER=nobody\",\n+ \"APACHE_RUN_GROUP=nogroup\",\n+ \"APACHE_LOG_DIR=/tmp\",\n+ \"APACHE_PID_FILE=/tmp/apache.pid\",\n+ },\n+ }, \"sh\", \"-c\", cmd)\n+\n+ ip, err := serverMachine.IPAddress()\n+ if err != nil {\n+ b.Fatalf(\"failed to find server ip: %v\", err)\n+ }\n+\n+ servingPort, err := server.FindPort(ctx, port)\n+ if err != nil {\n+ b.Fatalf(\"failed to find server port %d: %v\", port, err)\n+ }\n+\n+ // Check the server is serving.\n+ harness.WaitUntilServing(ctx, clientMachine, ip, servingPort)\n+\n+ // Grab a client.\n+ client := clientMachine.GetContainer(ctx, b)\n+ defer client.CleanUp(ctx)\n+\n+ path := fmt.Sprintf(\"http://%s:%d/%s\", ip, servingPort, doc)\n+ // See apachebench (ab) for flags.\n+ cmd = fmt.Sprintf(\"ab -n %d -c %d %s\", requests, concurrency, path)\n+\n+ b.ResetTimer()\n+ for i := 0; i < b.N; i++ {\n+ out, err := client.Run(ctx, dockerutil.RunOpts{\n+ Image: \"benchmarks/ab\",\n+ }, \"sh\", \"-c\", cmd)\n+ if err != nil {\n+ b.Fatalf(\"run failed with: %v\", err)\n+ }\n+\n+ b.StopTimer()\n+\n+ // Parse and report custom metrics.\n+ transferRate, err := parseTransferRate(out)\n+ if err != nil {\n+ b.Logf(\"failed to parse transferrate: %v\", err)\n+ }\n+ b.ReportMetric(transferRate*1024, \"transfer_rate\") // Convert from Kb/s to b/s.\n+\n+ latency, err := parseLatency(out)\n+ if err != nil {\n+ b.Logf(\"failed to parse latency: %v\", err)\n+ }\n+ b.ReportMetric(latency/1000, \"mean_latency\") // Convert from ms to s.\n+\n+ reqPerSecond, err := parseRequestsPerSecond(out)\n+ if err != nil {\n+ b.Logf(\"failed to parse requests per second: %v\", err)\n+ }\n+ b.ReportMetric(reqPerSecond, \"requests_per_second\")\n+\n+ b.StartTimer()\n+ }\n+}\n+\n+var transferRateRE = regexp.MustCompile(`Transfer rate:\\s+(\\d+\\.?\\d+?)\\s+\\[Kbytes/sec\\]\\s+received`)\n+\n+// parseTransferRate parses transfer rate from apachebench output.\n+func parseTransferRate(data string) (float64, error) {\n+ match := transferRateRE.FindStringSubmatch(data)\n+ if len(match) < 2 {\n+ return 0, fmt.Errorf(\"failed get bandwidth: %s\", data)\n+ }\n+ return strconv.ParseFloat(match[1], 64)\n+}\n+\n+var latencyRE = regexp.MustCompile(`Total:\\s+\\d+\\s+(\\d+)\\s+(\\d+\\.?\\d+?)\\s+\\d+\\s+\\d+\\s`)\n+\n+// parseLatency parses latency from apachebench output.\n+func parseLatency(data string) (float64, error) {\n+ match := latencyRE.FindStringSubmatch(data)\n+ if len(match) < 2 {\n+ return 0, fmt.Errorf(\"failed get bandwidth: %s\", data)\n+ }\n+ return strconv.ParseFloat(match[1], 64)\n+}\n+\n+var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\\s+(\\d+\\.?\\d+?)\\s+`)\n+\n+// parseRequestsPerSecond parses requests per second from apachebench output.\n+func parseRequestsPerSecond(data string) (float64, error) {\n+ match := requestsPerSecondRE.FindStringSubmatch(data)\n+ if len(match) < 2 {\n+ return 0, fmt.Errorf(\"failed get bandwidth: %s\", data)\n+ }\n+ return strconv.ParseFloat(match[1], 64)\n+}\n+\n+// Sample output from apachebench.\n+const sampleData = `This is ApacheBench, Version 2.3 <$Revision: 1826891 $>\n+Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/\n+Licensed to The Apache Software Foundation, http://www.apache.org/\n+\n+Benchmarking 10.10.10.10 (be patient).....done\n+\n+\n+Server Software: Apache/2.4.38\n+Server Hostname: 10.10.10.10\n+Server Port: 80\n+\n+Document Path: /latin10k.txt\n+Document Length: 210 bytes\n+\n+Concurrency Level: 1\n+Time taken for tests: 0.180 seconds\n+Complete requests: 100\n+Failed requests: 0\n+Non-2xx responses: 100\n+Total transferred: 38800 bytes\n+HTML transferred: 21000 bytes\n+Requests per second: 556.44 [#/sec] (mean)\n+Time per request: 1.797 [ms] (mean)\n+Time per request: 1.797 [ms] (mean, across all concurrent requests)\n+Transfer rate: 210.84 [Kbytes/sec] received\n+\n+Connection Times (ms)\n+ min mean[+/-sd] median max\n+Connect: 0 0 0.2 0 2\n+Processing: 1 2 1.0 1 8\n+Waiting: 1 1 1.0 1 7\n+Total: 1 2 1.2 1 10\n+\n+Percentage of the requests served within a certain time (ms)\n+ 50% 1\n+ 66% 2\n+ 75% 2\n+ 80% 2\n+ 90% 2\n+ 95% 3\n+ 98% 7\n+ 99% 10\n+ 100% 10 (longest request)`\n+\n+// TestParsers checks the parsers work.\n+func TestParsers(t *testing.T) {\n+ want := 210.84\n+ got, err := parseTransferRate(sampleData)\n+ if err != nil {\n+ t.Fatalf(\"failed to parse transfer rate with error: %v\", err)\n+ } else if got != want {\n+ t.Fatalf(\"parseTransferRate got: %f, want: %f\", got, want)\n+ }\n+\n+ want = 2.0\n+ got, err = parseLatency(sampleData)\n+ if err != nil {\n+ t.Fatalf(\"failed to parse transfer rate with error: %v\", err)\n+ } else if got != want {\n+ t.Fatalf(\"parseLatency got: %f, want: %f\", got, want)\n+ }\n+\n+ want = 556.44\n+ got, err = parseRequestsPerSecond(sampleData)\n+ if err != nil {\n+ t.Fatalf(\"failed to parse transfer rate with error: %v\", err)\n+ } else if got != want {\n+ t.Fatalf(\"parseRequestsPerSecond got: %f, want: %f\", got, want)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/network/iperf_test.go",
"new_path": "test/benchmarks/network/iperf_test.go",
"diff": "@@ -35,11 +35,13 @@ func BenchmarkIperf(b *testing.B) {\nif err != nil {\nb.Fatalf(\"failed to get machine: %v\", err)\n}\n+ defer clientMachine.CleanUp()\nserverMachine, err := h.GetMachine()\nif err != nil {\nb.Fatalf(\"failed to get machine: %v\", err)\n}\n+ defer serverMachine.CleanUp()\nfor _, bm := range []struct {\nname string\n@@ -111,7 +113,7 @@ func BenchmarkIperf(b *testing.B) {\nif err != nil {\nb.Fatalf(\"failed to parse bandwitdth from %s: %v\", out, err)\n}\n- b.ReportMetric(bW, \"KBytes/sec\")\n+ b.ReportMetric(bW*1024, \"bandwidth\") // Convert from Kb/s to b/s.\nb.StartTimer()\n}\n})\n"
}
] | Go | Apache License 2.0 | google/gvisor | Port httpd benchmark
PiperOrigin-RevId: 321478001 |
260,004 | 16.07.2020 12:25:02 | 25,200 | c66991ad7de68fd629a1620acad0c8eec2744bac | Add ethernet broadcast address constant | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/eth.go",
"new_path": "pkg/tcpip/header/eth.go",
"diff": "@@ -53,6 +53,10 @@ const (\n// (all bits set to 0).\nunspecifiedEthernetAddress = tcpip.LinkAddress(\"\\x00\\x00\\x00\\x00\\x00\\x00\")\n+ // EthernetBroadcastAddress is an ethernet address that addresses every node\n+ // on a local link.\n+ EthernetBroadcastAddress = tcpip.LinkAddress(\"\\xff\\xff\\xff\\xff\\xff\\xff\")\n+\n// unicastMulticastFlagMask is the mask of the least significant bit in\n// the first octet (in network byte order) of an ethernet address that\n// determines whether the ethernet address is a unicast or multicast. If\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/arp.go",
"new_path": "pkg/tcpip/network/arp/arp.go",
"diff": "@@ -162,7 +162,7 @@ func (*protocol) LinkAddressProtocol() tcpip.NetworkProtocolNumber {\n// LinkAddressRequest implements stack.LinkAddressResolver.LinkAddressRequest.\nfunc (*protocol) LinkAddressRequest(addr, localAddr tcpip.Address, linkEP stack.LinkEndpoint) *tcpip.Error {\nr := &stack.Route{\n- RemoteLinkAddress: broadcastMAC,\n+ RemoteLinkAddress: header.EthernetBroadcastAddress,\n}\nhdr := buffer.NewPrependable(int(linkEP.MaxHeaderLength()) + header.ARPSize)\n@@ -181,7 +181,7 @@ func (*protocol) LinkAddressRequest(addr, localAddr tcpip.Address, linkEP stack.\n// ResolveStaticAddress implements stack.LinkAddressResolver.ResolveStaticAddress.\nfunc (*protocol) ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bool) {\nif addr == header.IPv4Broadcast {\n- return broadcastMAC, true\n+ return header.EthernetBroadcastAddress, true\n}\nif header.IsV4MulticastAddress(addr) {\nreturn header.EthernetAddressFromMulticastIPv4Address(addr), true\n@@ -216,8 +216,6 @@ func (*protocol) Parse(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNu\nreturn 0, false, true\n}\n-var broadcastMAC = tcpip.LinkAddress([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff})\n-\n// NewProtocol returns an ARP network protocol.\nfunc NewProtocol() stack.NetworkProtocol {\nreturn &protocol{}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -494,8 +494,6 @@ const (\nicmpV6LengthOffset = 25\n)\n-var broadcastMAC = tcpip.LinkAddress([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff})\n-\nvar _ stack.LinkAddressResolver = (*protocol)(nil)\n// LinkAddressProtocol implements stack.LinkAddressResolver.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add ethernet broadcast address constant
PiperOrigin-RevId: 321620517 |
259,907 | 16.07.2020 14:35:39 | 25,200 | e6894cb99fe4c6e8599354bf5bc6a72a79b63fd3 | Port runtime tests to use go_test | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/BUILD",
"new_path": "test/runtimes/BUILD",
"diff": "-load(\"//test/runtimes:defs.bzl\", \"runtime_test\")\n+load(\"//test/runtimes:defs.bzl\", \"exclude_test\", \"runtime_test\")\npackage(licenses = [\"notice\"])\n+go_binary(\n+ name = \"runner\",\n+ testonly = 1,\n+ srcs = [\"runner.go\"],\n+ visibility = [\"//test/runtimes:__pkg__\"],\n+ deps = [\n+ \"//pkg/log\",\n+ \"//pkg/test/dockerutil\",\n+ \"//pkg/test/testutil\",\n+ ],\n+)\n+\nruntime_test(\nname = \"go1.12\",\n- exclude_file = \"exclude_go1.12.csv\",\n+ exclude_file = \"exclude/go1.12.csv\",\nlang = \"go\",\nshard_count = 5,\n)\nruntime_test(\nname = \"java11\",\n- exclude_file = \"exclude_java11.csv\",\n+ exclude_file = \"exclude/java11.csv\",\nlang = \"java\",\n- shard_count = 10,\n+ shard_count = 5,\n)\nruntime_test(\nname = \"nodejs12.4.0\",\n- exclude_file = \"exclude_nodejs12.4.0.csv\",\n+ exclude_file = \"exclude/nodejs12.4.0.csv\",\nlang = \"nodejs\",\nshard_count = 5,\n)\nruntime_test(\nname = \"php7.3.6\",\n- exclude_file = \"exclude_php7.3.6.csv\",\n+ exclude_file = \"exclude/php7.3.6.csv\",\nlang = \"php\",\nshard_count = 5,\n)\nruntime_test(\nname = \"python3.7.3\",\n- exclude_file = \"exclude_python3.7.3.csv\",\n+ exclude_file = \"exclude/python3.7.3.csv\",\nlang = \"python\",\nshard_count = 5,\n)\n+\n+go_test(\n+ name = \"exclude_test\",\n+ size = \"small\",\n+ srcs = [\"exclude_test.go\"],\n+ library = \":runner\",\n+)\n+\n+exclude_test(\n+ name = \"go\",\n+ exclude_file = \"exclude/go1.12.csv\",\n+)\n+\n+exclude_test(\n+ name = \"java\",\n+ exclude_file = \"exclude/java11.csv\",\n+)\n+\n+exclude_test(\n+ name = \"nodejs\",\n+ exclude_file = \"exclude/nodejs12.4.0.csv\",\n+)\n+\n+exclude_test(\n+ name = \"php\",\n+ exclude_file = \"exclude/php7.3.6.csv\",\n+)\n+\n+exclude_test(\n+ name = \"python\",\n+ exclude_file = \"exclude/python3.7.3.csv\",\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/defs.bzl",
"new_path": "test/runtimes/defs.bzl",
"diff": "load(\"//tools:defs.bzl\", \"go_test\")\n-def _runtime_test_impl(ctx):\n- # Construct arguments.\n+def runtime_test(name, lang, exclude_file, **kwargs):\n+ go_test(\n+ name = name,\n+ srcs = [\"runner.go\"],\nargs = [\n\"--lang\",\n- ctx.attr.lang,\n+ lang,\n\"--image\",\n- ctx.attr.image,\n- ]\n- if ctx.attr.exclude_file:\n- args += [\n+ name, # Resolved as images/runtimes/%s.\n\"--exclude_file\",\n- ctx.files.exclude_file[0].short_path,\n- ]\n-\n- # Build a runner.\n- runner = ctx.actions.declare_file(\"%s-executer\" % ctx.label.name)\n- runner_content = \"\\n\".join([\n- \"#!/bin/bash\",\n- \"%s %s\\n\" % (ctx.files._runner[0].short_path, \" \".join(args)),\n- ])\n- ctx.actions.write(runner, runner_content, is_executable = True)\n-\n- # Return the runner.\n- return [DefaultInfo(\n- executable = runner,\n- runfiles = ctx.runfiles(\n- files = ctx.files._runner + ctx.files.exclude_file + ctx.files._proctor,\n- collect_default = True,\n- collect_data = True,\n- ),\n- )]\n-\n-_runtime_test = rule(\n- implementation = _runtime_test_impl,\n- attrs = {\n- \"image\": attr.string(\n- mandatory = False,\n- ),\n- \"lang\": attr.string(\n- mandatory = True,\n- ),\n- \"exclude_file\": attr.label(\n- mandatory = False,\n- allow_single_file = True,\n- ),\n- \"_runner\": attr.label(\n- default = \"//test/runtimes/runner:runner\",\n- ),\n- \"_proctor\": attr.label(\n- default = \"//test/runtimes/proctor:proctor\",\n- ),\n- },\n- test = True,\n-)\n-\n-def runtime_test(name, **kwargs):\n- _runtime_test(\n- name = name,\n- image = name, # Resolved as images/runtimes/%s.\n+ \"test/runtimes/\" + exclude_file,\n+ ],\n+ data = [\n+ exclude_file,\n+ \"//test/runtimes/proctor\",\n+ ],\n+ defines_main = 1,\ntags = [\n\"local\",\n\"manual\",\n],\n+ deps = [\n+ \"//pkg/log\",\n+ \"//pkg/test/dockerutil\",\n+ \"//pkg/test/testutil\",\n+ ],\n**kwargs\n)\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/exclude_go1.12.csv",
"new_path": "test/runtimes/exclude/go1.12.csv",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/exclude_java11.csv",
"new_path": "test/runtimes/exclude/java11.csv",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/exclude_nodejs12.4.0.csv",
"new_path": "test/runtimes/exclude/nodejs12.4.0.csv",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/exclude_php7.3.6.csv",
"new_path": "test/runtimes/exclude/php7.3.6.csv",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/exclude_python3.7.3.csv",
"new_path": "test/runtimes/exclude/python3.7.3.csv",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/runner/exclude_test.go",
"new_path": "test/runtimes/exclude_test.go",
"diff": "@@ -26,7 +26,7 @@ func TestMain(m *testing.M) {\n}\n// Test that the exclude file parses without error.\n-func TestExcludelist(t *testing.T) {\n+func TestExcludeList(t *testing.T) {\nex, err := getExcludes()\nif err != nil {\nt.Fatalf(\"error parsing exclude file: %v\", err)\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/runner/main.go",
"new_path": "test/runtimes/runner.go",
"diff": "@@ -99,7 +99,7 @@ func getTests(ctx context.Context, d *dockerutil.Container, excludes map[string]\n// Get a list of all tests in the image.\nlist, err := d.Exec(ctx, dockerutil.ExecOpts{}, \"/proctor/proctor\", \"--runtime\", *lang, \"--list\")\nif err != nil {\n- return nil, fmt.Errorf(\"docker exec failed: %v\", err)\n+ return nil, fmt.Errorf(\"docker exec failed: %v\\nlogs: %s\", err, list)\n}\n// Calculate a subset of tests to run corresponding to the current\n@@ -166,7 +166,11 @@ func getExcludes() (map[string]struct{}, error) {\nif *excludeFile == \"\" {\nreturn excludes, nil\n}\n- f, err := os.Open(*excludeFile)\n+ path, err := testutil.FindFile(*excludeFile)\n+ if err != nil {\n+ return nil, err\n+ }\n+ f, err := os.Open(path)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "DELETE",
"old_path": "test/runtimes/runner/BUILD",
"new_path": null,
"diff": "-load(\"//tools:defs.bzl\", \"go_binary\", \"go_test\")\n-\n-package(licenses = [\"notice\"])\n-\n-go_binary(\n- name = \"runner\",\n- testonly = 1,\n- srcs = [\"main.go\"],\n- visibility = [\"//test/runtimes:__pkg__\"],\n- deps = [\n- \"//pkg/log\",\n- \"//pkg/test/dockerutil\",\n- \"//pkg/test/testutil\",\n- ],\n-)\n-\n-go_test(\n- name = \"exclude_test\",\n- size = \"small\",\n- srcs = [\"exclude_test.go\"],\n- library = \":runner\",\n-)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Port runtime tests to use go_test
PiperOrigin-RevId: 321647645 |
259,962 | 16.07.2020 18:38:28 | 25,200 | dcf6ddc2772b8fcf824f1f48e0281e1cc80b93ea | Add support to return protocol in recvmsg for AF_PACKET.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -299,6 +299,7 @@ type socketOpsCommon struct {\n// from Endpoint.\nreadCM tcpip.ControlMessages\nsender tcpip.FullAddress\n+ linkPacketInfo tcpip.LinkPacketInfo\n// sockOptTimestamp corresponds to SO_TIMESTAMP. When true, timestamps\n// of returned messages can be returned via control messages. When\n@@ -447,8 +448,21 @@ func (s *socketOpsCommon) fetchReadView() *syserr.Error {\n}\ns.readView = nil\ns.sender = tcpip.FullAddress{}\n+ s.linkPacketInfo = tcpip.LinkPacketInfo{}\n- v, cms, err := s.Endpoint.Read(&s.sender)\n+ var v buffer.View\n+ var cms tcpip.ControlMessages\n+ var err *tcpip.Error\n+\n+ switch e := s.Endpoint.(type) {\n+ // The ordering of these interfaces matters. The most specific\n+ // interfaces must be specified before the more generic Endpoint\n+ // interface.\n+ case tcpip.PacketEndpoint:\n+ v, cms, err = e.ReadPacket(&s.sender, &s.linkPacketInfo)\n+ case tcpip.Endpoint:\n+ v, cms, err = e.Read(&s.sender)\n+ }\nif err != nil {\natomic.StoreUint32(&s.readViewHasData, 0)\nreturn syserr.TranslateNetstackError(err)\n@@ -2509,6 +2523,10 @@ func (s *socketOpsCommon) nonBlockingRead(ctx context.Context, dst usermem.IOSeq\nvar addrLen uint32\nif isPacket && senderRequested {\naddr, addrLen = ConvertAddress(s.family, s.sender)\n+ switch v := addr.(type) {\n+ case *linux.SockAddrLink:\n+ v.Protocol = htons(uint16(s.linkPacketInfo.Protocol))\n+ }\n}\nif peek {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -549,6 +549,25 @@ type Endpoint interface {\nSetOwner(owner PacketOwner)\n}\n+// LinkPacketInfo holds Link layer information for a received packet.\n+//\n+// +stateify savable\n+type LinkPacketInfo struct {\n+ // Protocol is the NetworkProtocolNumber for the packet.\n+ Protocol NetworkProtocolNumber\n+}\n+\n+// PacketEndpoint are additional methods that are only implemented by Packet\n+// endpoints.\n+type PacketEndpoint interface {\n+ // ReadPacket reads a datagram/packet from the endpoint and optionally\n+ // returns the sender and additional LinkPacketInfo.\n+ //\n+ // This method does not block if there is no data pending. It will also\n+ // either return an error or data, never both.\n+ ReadPacket(*FullAddress, *LinkPacketInfo) (buffer.View, ControlMessages, *Error)\n+}\n+\n// EndpointInfo is the interface implemented by each endpoint info struct.\ntype EndpointInfo interface {\n// IsEndpointInfo is an empty method to implement the tcpip.EndpointInfo\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint.go",
"new_path": "pkg/tcpip/transport/packet/endpoint.go",
"diff": "@@ -45,6 +45,9 @@ type packet struct {\ntimestampNS int64\n// senderAddr is the network address of the sender.\nsenderAddr tcpip.FullAddress\n+ // packetInfo holds additional information like the protocol\n+ // of the packet etc.\n+ packetInfo tcpip.LinkPacketInfo\n}\n// endpoint is the packet socket implementation of tcpip.Endpoint. It is legal\n@@ -151,8 +154,8 @@ func (ep *endpoint) Close() {\n// ModerateRecvBuf implements tcpip.Endpoint.ModerateRecvBuf.\nfunc (ep *endpoint) ModerateRecvBuf(copied int) {}\n-// Read implements tcpip.Endpoint.Read.\n-func (ep *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\n+// Read implements tcpip.PacketEndpoint.ReadPacket.\n+func (ep *endpoint) ReadPacket(addr *tcpip.FullAddress, info *tcpip.LinkPacketInfo) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\nep.rcvMu.Lock()\n// If there's no data to read, return that read would block or that the\n@@ -177,9 +180,18 @@ func (ep *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMes\n*addr = packet.senderAddr\n}\n+ if info != nil {\n+ *info = packet.packetInfo\n+ }\n+\nreturn packet.data.ToView(), tcpip.ControlMessages{HasTimestamp: true, Timestamp: packet.timestampNS}, nil\n}\n+// Read implements tcpip.Endpoint.Read.\n+func (ep *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\n+ return ep.ReadPacket(addr, nil)\n+}\n+\nfunc (ep *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, <-chan struct{}, *tcpip.Error) {\n// TODO(b/129292371): Implement.\nreturn 0, nil, tcpip.ErrInvalidOptionValue\n@@ -428,12 +440,14 @@ func (ep *endpoint) HandlePacket(nicID tcpip.NICID, localAddr tcpip.LinkAddress,\nNIC: nicID,\nAddr: tcpip.Address(hdr.SourceAddress()),\n}\n+ packet.packetInfo.Protocol = netProto\n} else {\n// Guess the would-be ethernet header.\npacket.senderAddr = tcpip.FullAddress{\nNIC: nicID,\nAddr: tcpip.Address(localAddr),\n}\n+ packet.packetInfo.Protocol = netProto\n}\nif ep.cooked {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket.cc",
"new_path": "test/syscalls/linux/packet_socket.cc",
"diff": "@@ -193,6 +193,7 @@ void ReceiveMessage(int sock, int ifindex) {\nEXPECT_EQ(src.sll_family, AF_PACKET);\nEXPECT_EQ(src.sll_ifindex, ifindex);\nEXPECT_EQ(src.sll_halen, ETH_ALEN);\n+ EXPECT_EQ(ntohs(src.sll_protocol), ETH_P_IP);\n// This came from the loopback device, so the address is all 0s.\nfor (int i = 0; i < src.sll_halen; i++) {\nEXPECT_EQ(src.sll_addr[i], 0);\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket_raw.cc",
"new_path": "test/syscalls/linux/packet_socket_raw.cc",
"diff": "@@ -200,6 +200,7 @@ TEST_P(RawPacketTest, Receive) {\nEXPECT_EQ(src.sll_family, AF_PACKET);\nEXPECT_EQ(src.sll_ifindex, GetLoopbackIndex());\nEXPECT_EQ(src.sll_halen, ETH_ALEN);\n+ EXPECT_EQ(ntohs(src.sll_protocol), ETH_P_IP);\n// This came from the loopback device, so the address is all 0s.\nfor (int i = 0; i < src.sll_halen; i++) {\nEXPECT_EQ(src.sll_addr[i], 0);\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support to return protocol in recvmsg for AF_PACKET.
Updates #173
PiperOrigin-RevId: 321690756 |
259,907 | 17.07.2020 13:06:34 | 25,200 | 03c30ec634e529eb0f0051b0de03d4811706fe72 | Runtime tests: Download language runtime image | [
{
"change_type": "MODIFY",
"old_path": "scripts/runtime_tests.sh",
"new_path": "scripts/runtime_tests.sh",
"diff": "@@ -22,5 +22,8 @@ if [ ! -v RUNTIME_TEST_NAME ]; then\nexit 1\nfi\n+# Download language runtime image.\n+make -C images/ \"load-runtimes_${RUNTIME_TEST_NAME}\"\n+\ninstall_runsc_for_test runtimes\ntest_runsc \"//test/runtimes:${RUNTIME_TEST_NAME}\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Runtime tests: Download language runtime image
PiperOrigin-RevId: 321839398 |
259,975 | 17.07.2020 16:13:44 | 25,200 | e3c2bd51a1a970991cce71d6994bb053c546e538 | Move main methods for benchmark packages main package file. | [
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/fs/BUILD",
"new_path": "test/benchmarks/fs/BUILD",
"diff": "@@ -4,7 +4,9 @@ package(licenses = [\"notice\"])\ngo_library(\nname = \"fs\",\n+ testonly = 1,\nsrcs = [\"fs.go\"],\n+ deps = [\"//test/benchmarks/harness\"],\n)\ngo_test(\n@@ -17,8 +19,5 @@ go_test(\n\"local\",\n\"manual\",\n],\n- deps = [\n- \"//pkg/test/dockerutil\",\n- \"//test/benchmarks/harness\",\n- ],\n+ deps = [\"//pkg/test/dockerutil\"],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/fs/bazel_test.go",
"new_path": "test/benchmarks/fs/bazel_test.go",
"diff": "@@ -15,16 +15,12 @@ package fs\nimport (\n\"context\"\n- \"os\"\n\"strings\"\n\"testing\"\n\"gvisor.dev/gvisor/pkg/test/dockerutil\"\n- \"gvisor.dev/gvisor/test/benchmarks/harness\"\n)\n-var h harness.Harness\n-\n// Note: CleanCache versions of this test require running with root permissions.\nfunc BenchmarkABSL(b *testing.B) {\n// Get a machine from the Harness on which to run.\n@@ -97,8 +93,3 @@ func BenchmarkABSL(b *testing.B) {\n})\n}\n}\n-\n-func TestMain(m *testing.M) {\n- h.Init()\n- os.Exit(m.Run())\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/fs/fs.go",
"new_path": "test/benchmarks/fs/fs.go",
"diff": "// Package fs holds benchmarks around filesystem performance.\npackage fs\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/test/benchmarks/harness\"\n+)\n+\n+var h harness.Harness\n+\n+// TestMain is the main method for package fs.\n+func TestMain(m *testing.M) {\n+ h.Init()\n+ os.Exit(m.Run())\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/network/BUILD",
"new_path": "test/benchmarks/network/BUILD",
"diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"network\",\ntestonly = 1,\nsrcs = [\"network.go\"],\n+ deps = [\"//test/benchmarks/harness\"],\n)\ngo_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/network/iperf_test.go",
"new_path": "test/benchmarks/network/iperf_test.go",
"diff": "@@ -16,7 +16,6 @@ package network\nimport (\n\"context\"\n\"fmt\"\n- \"os\"\n\"regexp\"\n\"strconv\"\n\"strings\"\n@@ -26,8 +25,6 @@ import (\n\"gvisor.dev/gvisor/test/benchmarks/harness\"\n)\n-var h harness.Harness\n-\nfunc BenchmarkIperf(b *testing.B) {\n// Get two machines\n@@ -144,10 +141,4 @@ TCP window size: 45.0 KByte (default)\nif err != nil || bandwidth != 45900 {\nt.Fatalf(\"failed with: %v and %f\", err, bandwidth)\n}\n-\n-}\n-\n-func TestMain(m *testing.M) {\n- h.Init()\n- os.Exit(m.Run())\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/network/network.go",
"new_path": "test/benchmarks/network/network.go",
"diff": "// Package network holds benchmarks around raw network performance.\npackage network\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/test/benchmarks/harness\"\n+)\n+\n+var h harness.Harness\n+\n+// TestMain is the main method for package network.\n+func TestMain(m *testing.M) {\n+ h.Init()\n+ os.Exit(m.Run())\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move main methods for benchmark packages main package file.
PiperOrigin-RevId: 321875119 |
260,003 | 17.07.2020 17:43:32 | 25,200 | 5593320bee3e4ab215f501a723ef3ea92b20cf85 | Update README on cross-building images. | [
{
"change_type": "MODIFY",
"old_path": "images/README.md",
"new_path": "images/README.md",
"diff": "@@ -59,3 +59,12 @@ project.\nThe continuous integration system can either take fine-grained dependencies on\nindividual `push` targets, or ensure all images are up-to-date with a single\n`push-all-images` invocation.\n+\n+## Multi-Arch images\n+\n+By default, the image is built for host architecture. Cross-building can be\n+achieved by specifying `ARCH` variable to make. For example:\n+\n+```\n+$ make ARCH=aarch64 rebuild-default\n+```\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update README on cross-building images.
PiperOrigin-RevId: 321887956 |
259,884 | 17.07.2020 18:26:08 | 25,200 | feb1d3d5a7d9c26ab1533b350a9d6088148641aa | Clean up html on the website.
Fixes some html validation issues.
Fixes links on security basics blog post.
Adds rel=noopener to links with target=_blank and adds a check to
htmlproofer.
Add favicon check to htmlproofer.
Fixes
Fixes | [
{
"change_type": "MODIFY",
"old_path": "g3doc/README.md",
"new_path": "g3doc/README.md",
"diff": "@@ -117,9 +117,7 @@ for more information on filesystem bundles. `runsc` implements multiple commands\nthat perform various functions such as starting, stopping, listing, and querying\nthe status of containers.\n-### Sentry\n-\n-<a name=\"sentry\"></a> <!-- For deep linking. -->\n+### Sentry {#sentry}\nThe Sentry is the largest component of gVisor. It can be thought of as a\napplication kernel. The Sentry implements all the kernel functionality needed by\n@@ -136,9 +134,7 @@ calls it makes. For example, the Sentry is not able to open files directly; file\nsystem operations that extend beyond the sandbox (not internal `/proc` files,\npipes, etc) are sent to the Gofer, described below.\n-### Gofer\n-\n-<a name=\"gofer\"></a> <!-- For deep linking. -->\n+### Gofer {#gofer}\nThe Gofer is a standard host process which is started with each container and\ncommunicates with the Sentry via the [9P protocol][9p] over a socket or shared\n@@ -146,7 +142,7 @@ memory channel. The Sentry process is started in a restricted seccomp container\nwithout access to file system resources. The Gofer mediates all access to the\nthese resources, providing an additional level of isolation.\n-### Application\n+### Application {#application}\nThe application is a normal Linux binary provided to gVisor in an OCI runtime\nbundle. gVisor aims to provide an environment equivalent to Linux v4.4, so\n"
},
{
"change_type": "MODIFY",
"old_path": "images/jekyll/Dockerfile",
"new_path": "images/jekyll/Dockerfile",
"diff": "@@ -10,4 +10,5 @@ RUN gem install \\\njekyll-relative-links:0.6.1 \\\njekyll-feed:0.13.0 \\\njekyll-sitemap:1.4.0\n+COPY checks.rb /checks.rb\nCMD [\"/usr/gem/gems/jekyll-4.0.0/exe/jekyll\", \"build\", \"-t\", \"-s\", \"/input\", \"-d\", \"/output\"]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/jekyll/checks.rb",
"diff": "+#!/usr/local/bin/ruby\n+#\n+# HTMLProofer checks for the gVisor website.\n+#\n+require 'html-proofer'\n+\n+# NoOpenerCheck checks to make sure links with target=_blank include the\n+# rel=noopener attribute.\n+class NoOpenerCheck < ::HTMLProofer::Check\n+ def run\n+ @html.css('a').each do |node|\n+ link = create_element(node)\n+ line = node.line\n+\n+ rel = link.respond_to?(:rel) ? link.rel.split(' ') : []\n+\n+ if link.respond_to?(:target) && link.target == \"_blank\" && !rel.include?(\"noopener\")\n+ return add_issue(\"You should set rel=noopener for links with target=_blank\", line: line)\n+ end\n+ end\n+ end\n+end\n+\n+def main()\n+ options = {\n+ :check_html => true,\n+ :check_favicon => true,\n+ :disable_external => true,\n+ }\n+\n+ HTMLProofer.check_directories(ARGV, options).run\n+end\n+\n+if __FILE__ == $0\n+ main\n+end\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/defs.bzl",
"new_path": "tools/bazeldefs/defs.bzl",
"diff": "@@ -32,6 +32,9 @@ rbe_platform = native.platform\nrbe_toolchain = native.toolchain\nvdso_linker_option = \"-fuse-ld=gold \"\n+def short_path(path):\n+ return path\n+\ndef proto_library(name, has_services = None, **kwargs):\nnative.proto_library(\nname = name,\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -7,7 +7,7 @@ change for Google-internal and bazel-compatible rules.\nload(\"//tools/go_stateify:defs.bzl\", \"go_stateify\")\nload(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_deps\")\n-load(\"//tools/bazeldefs:defs.bzl\", _build_test = \"build_test\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _gazelle = \"gazelle\", _gbenchmark = \"gbenchmark\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _loopback = \"loopback\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\", _proto_library = \"proto_library\", _py_binary = \"py_binary\", _py_library = \"py_library\", _py_requirement = \"py_requirement\", _py_test = \"py_test\", _rbe_platform = \"rbe_platform\", _rbe_toolchain = \"rbe_toolchain\", _select_arch = \"select_arch\", _select_system = \"select_system\", _vdso_linker_option = \"vdso_linker_option\")\n+load(\"//tools/bazeldefs:defs.bzl\", _build_test = \"build_test\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _gazelle = \"gazelle\", _gbenchmark = \"gbenchmark\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _loopback = \"loopback\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\", _proto_library = \"proto_library\", _py_binary = \"py_binary\", _py_library = \"py_library\", _py_requirement = \"py_requirement\", _py_test = \"py_test\", _rbe_platform = \"rbe_platform\", _rbe_toolchain = \"rbe_toolchain\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\", _vdso_linker_option = \"vdso_linker_option\")\nload(\"//tools/bazeldefs:platforms.bzl\", _default_platform = \"default_platform\", _platforms = \"platforms\")\nload(\"//tools/bazeldefs:tags.bzl\", \"go_suffixes\")\nload(\"//tools/nogo:defs.bzl\", \"nogo_test\")\n@@ -38,6 +38,7 @@ py_requirement = _py_requirement\npy_test = _py_test\nselect_arch = _select_arch\nselect_system = _select_system\n+short_path = _short_path\nrbe_platform = _rbe_platform\nrbe_toolchain = _rbe_toolchain\nvdso_linker_option = _vdso_linker_option\n"
},
{
"change_type": "MODIFY",
"old_path": "website/BUILD",
"new_path": "website/BUILD",
"diff": "@@ -55,9 +55,7 @@ genrule(\n\"docker run -i --user $$(id -u):$$(id -g) \" +\n\"-v $$(readlink -m $$T/output/_site):/output \" +\n\"gvisor.dev/images/jekyll \" +\n- \"/usr/gem/bin/htmlproofer \" +\n- \"--disable-external \" +\n- \"--check-html \" +\n+ \"ruby /checks.rb \" +\n\"/output && \" +\n\"cp $(location //website/cmd/server) $$T/output/server && \" +\n\"tar -zcf $@ -C $$T/output . && \" +\n"
},
{
"change_type": "MODIFY",
"old_path": "website/_includes/footer.html",
"new_path": "website/_includes/footer.html",
"diff": "<script src=\"https://cdnjs.cloudflare.com/ajax/libs/d3/4.13.0/d3.min.js\" integrity=\"sha256-hYXbQJK4qdJiAeDVjjQ9G0D6A0xLnDQ4eJI9dkm7Fpk=\" crossorigin=\"anonymous\"></script>\n{% if site.analytics %}\n-<script type=\"application/javascript\">\n+<script>\nvar doNotTrack = false;\nif (!doNotTrack) {\nwindow.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;\n"
},
{
"change_type": "MODIFY",
"old_path": "website/_includes/graph.html",
"new_path": "website/_includes/graph.html",
"diff": "{::nomarkdown}\n{% assign fn = include.id | remove: \" \" | remove: \"-\" | downcase %}\n<figure><a href=\"{{ include.url }}\"><svg id=\"{{ include.id }}\" width=500 height=200 onload=\"render_{{ fn }}()\"><title>{{ include.title }}</title></svg></a></figure>\n-<script type=\"text/javascript\">\n+<script>\nfunction render_{{ fn }}() {\nd3.csv(\"{{ include.url }}\", function(d, i, columns) {\nreturn d; // Transformed below.\n"
},
{
"change_type": "MODIFY",
"old_path": "website/_includes/header-links.html",
"new_path": "website/_includes/header-links.html",
"diff": "<div class=\"container\">\n<div class=\"navbar-brand\">\n<a href=\"/\">\n- <img src=\"/assets/logos/logo_solo_on_dark.svg\" height=\"25px\" class=\"d-inline-block align-top\" style=\"margin-right: 10px;\" alt=\"logo\"/>\n+ <img src=\"/assets/logos/logo_solo_on_dark.svg\" height=\"25\" class=\"d-inline-block align-top\" style=\"margin-right: 10px;\" alt=\"logo\" />\ngVisor\n</a>\n</div>\n"
},
{
"change_type": "MODIFY",
"old_path": "website/_layouts/docs.html",
"new_path": "website/_layouts/docs.html",
"diff": "@@ -47,8 +47,8 @@ categories:\n<h1>{{ page.title }}</h1>\n{% if page.editpath %}\n<p>\n- <a href=\"https://github.com/google/gvisor/edit/master/{{page.editpath}}\" target=\"_blank\"><i class=\"fa fa-edit fa-fw\"></i> Edit this page</a>\n- <a href=\"https://github.com/google/gvisor/issues/new?title={{page.title | url_encode}}\" target=\"_blank\"><i class=\"fab fa-github fa-fw\"></i> Create issue</a>\n+ <a href=\"https://github.com/google/gvisor/edit/master/{{page.editpath}}\" target=\"_blank\" rel=\"noopener\"><i class=\"fa fa-edit fa-fw\"></i> Edit this page</a>\n+ <a href=\"https://github.com/google/gvisor/issues/new?title={{page.title | url_encode}}\" target=\"_blank\" rel=\"noopener\"><i class=\"fab fa-github fa-fw\"></i> Create issue</a>\n</p>\n{% endif %}\n<div class=\"docs-content\">\n"
},
{
"change_type": "MODIFY",
"old_path": "website/blog/2019-11-18-security-basics.md",
"new_path": "website/blog/2019-11-18-security-basics.md",
"diff": "@@ -44,10 +44,10 @@ into it in the next section!\n# Design Principles\n-gVisor was designed with some\n-[common secure design principles](https://www.owasp.org/index.php/Security_by_Design_Principles)\n-in mind: Defense-in-Depth, Principle of Least-Privilege, Attack Surface\n-Reduction and Secure-by-Default[^1].\n+gVisor was designed with some common\n+[secure design](https://en.wikipedia.org/wiki/Secure_by_design) principles in\n+mind: Defense-in-Depth, Principle of Least-Privilege, Attack Surface Reduction\n+and Secure-by-Default[^1].\nIn general, Design Principles outline good engineering practices, but in the\ncase of security, they also can be thought of as a set of tactics. In a\n@@ -282,16 +282,23 @@ stable.\n## Notes\n-[^1]: [https://www.owasp.org/index.php/Security_by_Design_Principles](https://www.owasp.org/index.php/Security_by_Design_Principles)\n+[^1]: [https://en.wikipedia.org/wiki/Secure_by_design](https://en.wikipedia.org/wiki/Secure_by_design)\n[^2]: [https://gvisor.dev/docs/architecture_guide](https://gvisor.dev/docs/architecture_guide/)\n[^3]: [https://github.com/google/gvisor/blob/master/pkg/sentry/syscalls/linux/linux64_amd64.go](https://github.com/google/gvisor/blob/master/pkg/sentry/syscalls/syscalls.go)\n-[^4]: Internally that is, it doesn't call to the Host OS to implement them, in\n- fact that is explicitly disallowed, more on that in the future.\n+\n+<!-- mdformat off(mdformat formats this into multiple lines) -->\n+[^4]: Internally that is, it doesn't call to the Host OS to implement them, in fact that is explicitly disallowed, more on that in the future.\n+<!-- mdformat on -->\n+\n[^5]: [https://elixir.bootlin.com/linux/latest/source/arch/x86/entry/syscalls/syscall_64.tbl#L345](https://elixir.bootlin.com/linux/latest/source/arch/x86/entry/syscalls/syscall_64.tbl#L345)\n[^6]: [https://github.com/google/gvisor/tree/master/runsc/boot/filter](https://github.com/google/gvisor/tree/master/runsc/boot/filter)\n[^7]: [https://en.wikipedia.org/wiki/Dirty_COW](https://en.wikipedia.org/wiki/Dirty_COW)\n[^8]: [https://github.com/google/gvisor/blob/master/runsc/boot/config.go](https://github.com/google/gvisor/blob/master/runsc/boot/config.go)\n-[^9]: [https://en.wikipedia.org/wiki/9P_(protocol)](https://en.wikipedia.org/wiki/9P_\\(protocol\\))\n+\n+<!-- mdformat off(mdformat breaks this url by escaping the parenthesis) -->\n+[^9]: [https://en.wikipedia.org/wiki/9P_(protocol)](https://en.wikipedia.org/wiki/9P_(protocol))\n+<!-- mdformat on -->\n+\n[^10]: [https://gvisor.dev/docs/user_guide/networking/#network-passthrough](https://gvisor.dev/docs/user_guide/networking/#network-passthrough)\n[^11]: [https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ptrace/subprocess.go#L390](https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ptrace/subprocess.go#L390)\n[^12]: [https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ring0/kernel_amd64.go#L182](https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ring0/kernel_amd64.go#L182)\n"
},
{
"change_type": "MODIFY",
"old_path": "website/defs.bzl",
"new_path": "website/defs.bzl",
"diff": "\"\"\"Wrappers for website documentation.\"\"\"\n+load(\"//tools:defs.bzl\", \"short_path\")\n+\n# DocInfo is a provider which simple adds sufficient metadata to the source\n# files (and additional data files) so that a jeyll header can be constructed\n# dynamically. This is done the via BUILD system so that the plain\n@@ -29,7 +31,7 @@ def _doc_impl(ctx):\ncategory = ctx.attr.category,\nsubcategory = ctx.attr.subcategory,\nweight = ctx.attr.weight,\n- editpath = ctx.files.src[0].short_path,\n+ editpath = short_path(ctx.files.src[0].short_path),\nauthors = ctx.attr.authors,\n),\n]\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clean up html on the website.
- Fixes some html validation issues.
- Fixes links on security basics blog post.
- Adds rel=noopener to links with target=_blank and adds a check to
htmlproofer.
- Add favicon check to htmlproofer.
Fixes #3286
Fixes #3284
PiperOrigin-RevId: 321892602 |
259,858 | 20.07.2020 18:03:04 | 25,200 | e1a04f84e864b9a5c8a51a7cdd32f8db5377aff1 | Add standard entrypoints for test targets. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n+# Helpful pretty-printer.\n+MAKEBANNER := \\033[1;34mmake\\033[0m\n+submake = echo -e '$(MAKEBANNER) $1' >&2; sh -c '$(MAKE) $1'\n+\n# Described below.\nOPTIONS :=\nSTARTUP_OPTIONS :=\n@@ -85,7 +89,7 @@ endif\n##\ndefine images\n$(1)-%: ## Image tool: $(1) a given image (also may use 'all-images').\n- @$(MAKE) -C images $$@\n+ @$(call submake,-C images $$@)\nendef\nrebuild-...: ## Rebuild the given image. Also may use 'rebuild-all-images'.\n$(eval $(call images,rebuild))\n@@ -96,7 +100,7 @@ $(eval $(call images,push))\nload-...: ## Load (pull or rebuild) the given image. Also may use 'load-all-images'.\n$(eval $(call images,load))\nlist-images: ## List all available images.\n- @$(MAKE) -C images $$@\n+ @$(call submake, -C images $$@)\n##\n## Canonical build and test targets.\n@@ -106,21 +110,113 @@ list-images: ## List all available images.\n## new subsystem or workflow, consider adding a new target here.\n##\nrunsc: ## Builds the runsc binary.\n- @$(MAKE) build TARGETS=\"//runsc\"\n+ @$(call submake,build OPTIONS=\"-c opt\" TARGETS=\"//runsc\")\n.PHONY: runsc\n+debian: ## Builds the debian packages.\n+ @$(call submake,build OPTIONS=\"-c opt\" TARGETS=\"//runsc:runsc-debian\")\n+.PHONY: debian\n+\nsmoke-test: ## Runs a simple smoke test after build runsc.\n- @$(MAKE) run DOCKER_PRIVILEGED=\"\" ARGS=\"--alsologtostderr --network none --debug --TESTONLY-unsafe-nonroot=true --rootless do true\"\n+ @$(call submake,run DOCKER_PRIVILEGED=\"\" ARGS=\"--alsologtostderr --network none --debug --TESTONLY-unsafe-nonroot=true --rootless do true\")\n.PHONY: smoke-tests\n-unit-tests: ## Runs all unit tests in pkg runsc and tools.\n- @$(MAKE) test OPTIONS=\"pkg/... runsc/... tools/...\"\n-.PHONY: unit-tests\n+unit-tests: ## Local package unit tests in pkg/..., runsc/, tools/.., etc.\n+ @$(call submake,test TARGETS=\"pkg/... runsc/... tools/... benchmarks/... benchmarks/runner:runner_test\")\n-tests: ## Runs all local ptrace system call tests.\n- @$(MAKE) test OPTIONS=\"--test_tag_filters runsc_ptrace test/syscalls/...\"\n+tests: ## Runs all unit tests and syscall tests.\n+tests: unit-tests\n+ @$(call submake,test TARGETS=\"test/syscalls/...\")\n.PHONY: tests\n+\n+integration-tests: ## Run all standard integration tests.\n+integration-tests: docker-tests overlay-tests hostnet-tests swgso-tests\n+integration-tests: do-tests kvm-tests root-tests containerd-tests\n+.PHONY: integration-tests\n+\n+network-tests: ## Run all networking integration tests.\n+network-tests: iptables-tests packetdrill-tests packetimpact-tests\n+.PHONY: network-tests\n+\n+# Standard integration targets.\n+INTEGRATION_TARGETS := //test/image:image_test //test/e2e:integration_test\n+\n+syscall-%-tests:\n+ @$(call submake,test OPTIONS=\"--test_tag_filters runsc_$* test/syscalls/...\")\n+\n+syscall-native-tests:\n+ @$(call submake,test OPTIONS=\"--test_tag_filters native test/syscalls/...\")\n+.PHONY: syscall-native-tests\n+\n+syscall-tests: ## Run all system call tests.\n+syscall-tests: syscall-ptrace-tests syscall-kvm-tests syscall-native-tests\n+.PHONY: syscall-tests\n+\n+%-runtime-tests: load-runtimes_%\n+ @$(call submake,install-test-runtime)\n+ @$(call submake,test-runtime TARGETS=\"//test/runtimes:$*\")\n+\n+do-tests: runsc\n+ @$(call submake,run TARGETS=\"//runsc\" ARGS=\"--rootless do true\")\n+ @$(call submake,run TARGETS=\"//runsc\" ARGS=\"--rootless -network=none do true\")\n+ @$(call submake,sudo TARGETS=\"//runsc\" ARGS=\"do true\")\n+.PHONY: do-tests\n+\n+simple-tests: unit-tests # Compatibility target.\n+.PHONY: simple-tests\n+\n+docker-tests: load-basic-images\n+ @$(call submake,install-test-runtime RUNTIME=\"vfs1\")\n+ @$(call submake,test-runtime RUNTIME=\"vfs1\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n+ @$(call submake,install-test-runtime RUNTIME=\"vfs2\" ARGS=\"--vfs2\")\n+ @$(call submake,test-runtime RUNTIME=\"vfs2\" OPTIONS=\"--test_filter=.*TestHelloWorld\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n+.PHONY: docker-tests\n+\n+overlay-tests: load-basic-images\n+ @$(call submake,install-test-runtime RUNTIME=\"overlay\" ARGS=\"--overlay\")\n+ @$(call submake,test-runtime RUNTIME=\"overlay\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n+.PHONY: overlay-tests\n+\n+swgso-tests: load-basic-images\n+ @$(call submake,install-test-runtime RUNTIME=\"swgso\" ARGS=\"--software-gso=true --gso=false\")\n+ @$(call submake,test-runtime RUNTIME=\"swgso\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n+.PHONY: swgso-tests\n+\n+hostnet-tests: load-basic-images\n+ @$(call submake,install-test-runtime RUNTIME=\"hostnet\" ARGS=\"--network=host\")\n+ @$(call submake,test-runtime RUNTIME=\"hostnet\" OPTIONS=\"--test_arg=-checkpoint=false\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n+.PHONY: hostnet-tests\n+\n+kvm-tests: load-basic-images\n+ @(lsmod | grep -E '^(kvm_intel|kvm_amd)') || sudo modprobe kvm\n+ @if ! [[ -w /dev/kvm ]]; then sudo chmod a+rw /dev/kvm; fi\n+ @$(call submake,test TARGETS=\"//pkg/sentry/platform/kvm:kvm_test\")\n+ @$(call submake,install-test-runtime RUNTIME=\"kvm\" ARGS=\"--platform=kvm\")\n+ @$(call submake,test-runtime RUNTIME=\"kvm\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n+.PHONY: kvm-tests\n+\n+iptables-tests: load-iptables\n+ @$(call submake,test-runtime RUNTIME=\"runc\" TARGETS=\"//test/iptables:iptables_test\")\n+ @$(call submake,install-test-runtime RUNTIME=\"iptables\" ARGS=\"--net-raw\")\n+ @$(call submake,test-runtime RUNTIME=\"iptables\" TARGETS=\"//test/iptables:iptables_test\")\n+.PHONY: iptables-tests\n+\n+packetdrill-tests: load-packetdrill\n+ @$(call submake,install-test-runtime RUNTIME=\"packetdrill\")\n+ @$(call submake,test-runtime RUNTIME=\"packetdrill\" TARGETS=\"$(shell $(MAKE) query TARGETS='attr(tags, packetdrill, tests(//...))')\")\n+.PHONY: packetdrill-tests\n+\n+packetimpact-tests: load-packetimpact\n+ @$(call submake,install-test-runtime RUNTIME=\"packetimpact\")\n+ @$(call submake,test-runtime RUNTIME=\"packetimpact\" TARGETS=\"$(shell $(MAKE) query TARGETS='attr(tags, packetimpact, tests(//...))')\")\n+.PHONY: packetimpact-tests\n+\n+root-tests: load-basic-images\n+ @$(call submake,install-test-runtime)\n+ @$(call submake,sudo TARGETS=\"//test/root:root_test\" ARGS=\"-test.v\")\n+.PHONY: test-root\n+\n# Specific containerd version tests.\ncontainerd-test-%: load-basic_alpine load-basic_python load-basic_busybox load-basic_resolv load-basic_httpd install-test-runtime\n@CONTAINERD_VERSION=$* $(MAKE) sudo TARGETS=\"tools/installers:containerd\"\n@@ -154,7 +250,7 @@ WEBSITE_PROJECT := gvisordev\nWEBSITE_REGION := us-central1\nwebsite-build: load-jekyll ## Build the site image locally.\n- @$(MAKE) run TARGETS=\"//website:website\"\n+ @$(call submake,run TARGETS=\"//website:website\")\n.PHONY: website-build\nwebsite-server: website-build ## Run a local server for development.\n@@ -205,8 +301,8 @@ $(RELEASE_KEY):\nrelease: $(RELEASE_KEY) ## Builds a release.\n@mkdir -p $(RELEASE_ROOT)\n@T=$$(mktemp -d /tmp/release.XXXXXX); \\\n- $(MAKE) copy TARGETS=\"runsc\" DESTINATION=$$T && \\\n- $(MAKE) copy TARGETS=\"runsc:runsc-debian\" DESTINATION=$$T && \\\n+ $(call submake,copy TARGETS=\"runsc\" DESTINATION=$$T) && \\\n+ $(call submake,copy TARGETS=\"runsc:runsc-debian\" DESTINATION=$$T) && \\\nNIGHTLY=$(RELEASE_NIGHTLY) tools/make_release.sh $(RELEASE_KEY) $(RELEASE_ROOT) $$T/*; \\\nrc=$$?; rm -rf $$T; exit $$rc\n.PHONY: release\n@@ -229,43 +325,47 @@ tag: ## Creates and pushes a release tag.\n##\nifeq (,$(BRANCH_NAME))\nRUNTIME := runsc\n-RUNTIME_DIR := $(shell dirname $(shell mktemp -u))/runsc\n+RUNTIME_DIR := $(shell dirname $(shell mktemp -u))/$(RUNTIME)\nelse\nRUNTIME := $(BRANCH_NAME)\n-RUNTIME_DIR := $(shell dirname $(shell mktemp -u))/$(BRANCH_NAME)\n+RUNTIME_DIR := $(shell dirname $(shell mktemp -u))/$(RUNTIME)\nendif\nRUNTIME_BIN := $(RUNTIME_DIR)/runsc\nRUNTIME_LOG_DIR := $(RUNTIME_DIR)/logs\nRUNTIME_LOGS := $(RUNTIME_LOG_DIR)/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%\ndev: ## Installs a set of local runtimes. Requires sudo.\n- @$(MAKE) refresh ARGS=\"--net-raw\"\n- @$(MAKE) configure RUNTIME=\"$(RUNTIME)\" ARGS=\"--net-raw\"\n- @$(MAKE) configure RUNTIME=\"$(RUNTIME)-d\" ARGS=\"--net-raw --debug --strace --log-packets\"\n- @$(MAKE) configure RUNTIME=\"$(RUNTIME)-p\" ARGS=\"--net-raw --profile\"\n- @$(MAKE) configure RUNTIME=\"$(RUNTIME)-vfs2-d\" ARGS=\"--net-raw --debug --strace --log-packets --vfs2\"\n+ @$(call submake,refresh ARGS=\"--net-raw\")\n+ @$(call submake,configure RUNTIME=\"$(RUNTIME)\" ARGS=\"--net-raw\")\n+ @$(call submake,configure RUNTIME=\"$(RUNTIME)-d\" ARGS=\"--net-raw --debug --strace --log-packets\")\n+ @$(call submake,configure RUNTIME=\"$(RUNTIME)-p\" ARGS=\"--net-raw --profile\")\n+ @$(call submake,configure RUNTIME=\"$(RUNTIME)-vfs2-d\" ARGS=\"--net-raw --debug --strace --log-packets --vfs2\")\n@sudo systemctl restart docker\n.PHONY: dev\n-refresh: ## Refreshes the runtime binary (for development only). Must have called 'dev' or 'test-install' first.\n+refresh: ## Refreshes the runtime binary (for development only). Must have called 'dev' or 'install-test-runtime' first.\n@mkdir -p \"$(RUNTIME_DIR)\"\n- @$(MAKE) copy TARGETS=runsc DESTINATION=\"$(RUNTIME_BIN)\"\n+ @$(call submake,copy TARGETS=runsc DESTINATION=\"$(RUNTIME_BIN)\")\n.PHONY: install\ninstall-test-runtime: ## Installs the runtime for testing. Requires sudo.\n- @$(MAKE) refresh ARGS=\"--net-raw --TESTONLY-test-name-env=RUNSC_TEST_NAME --debug --strace --log-packets $(ARGS)\"\n- @$(MAKE) configure RUNTIME=runsc\n- @$(MAKE) configure\n+ @$(call submake,refresh ARGS=\"--net-raw --TESTONLY-test-name-env=RUNSC_TEST_NAME --debug --strace --log-packets $(ARGS)\")\n+ @$(call submake,configure RUNTIME=runsc)\n+ @$(call submake,configure)\n@sudo systemctl restart docker\n+ @if [[ -f /etc/docker/daemon.json ]]; then \\\n+ sudo chmod 0755 /etc/docker && \\\n+ sudo chmod 0644 /etc/docker/daemon.json; \\\n+ fi\n.PHONY: install-test-runtime\n-configure: ## Configures a single runtime. Requires sudo. Typically called from dev or test-install.\n+configure: ## Configures a single runtime. Requires sudo. Typically called from dev or install-test-runtime.\n@sudo sudo \"$(RUNTIME_BIN)\" install --experimental=true --runtime=\"$(RUNTIME)\" -- --debug-log \"$(RUNTIME_LOGS)\" $(ARGS)\n- @echo \"Installed runtime \\\"$(RUNTIME)\\\" @ $(RUNTIME_BIN)\"\n- @echo \"Logs are in: $(RUNTIME_LOG_DIR)\"\n+ @echo -e \"$(INFO) Installed runtime \\\"$(RUNTIME)\\\" @ $(RUNTIME_BIN)\"\n+ @echo -e \"$(INFO) Logs are in: $(RUNTIME_LOG_DIR)\"\n@sudo rm -rf \"$(RUNTIME_LOG_DIR)\" && mkdir -p \"$(RUNTIME_LOG_DIR)\"\n.PHONY: configure\ntest-runtime: ## A convenient wrapper around test that provides the runtime argument. Target must still be provided.\n- @$(MAKE) test OPTIONS=\"$(OPTIONS) --test_arg=--runtime=$(RUNTIME)\"\n-.PHONY: runtime-test\n+ @$(call submake,test OPTIONS=\"$(OPTIONS) --test_output=streamed --test_arg=--runtime=$(RUNTIME)\")\n+.PHONY: test-runtime\n"
},
{
"change_type": "MODIFY",
"old_path": "images/Makefile",
"new_path": "images/Makefile",
"diff": "@@ -34,8 +34,15 @@ list-all-images:\n@for image in $(ALL_IMAGES); do echo $${image}; done\n.PHONY: list-build-images\n+# Handy wrapper to allow load-all-images, push-all-images, etc.\n%-all-images:\n@$(MAKE) $(patsubst %,$*-%,$(ALL_IMAGES))\n+load-all-images:\n+ @$(MAKE) $(patsubst %,load-%,$(ALL_IMAGES))\n+\n+# Handy wrapper to load specified \"groups\", e.g. load-basic-images, etc.\n+load-%-images:\n+ @$(MAKE) $(patsubst %,load-%,$(subst /,_,$(subst ./,,$(shell find ./$* -name Dockerfile -exec dirname {} \\;))))\n# tag is a function that returns the tag name, given an image.\n#\n"
},
{
"change_type": "RENAME",
"old_path": "images/hostoverlaytest/Dockerfile",
"new_path": "images/basic/hostoverlaytest/Dockerfile",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "images/hostoverlaytest/test.c",
"new_path": "images/basic/hostoverlaytest/test.c",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "images/hostoverlaytest/testfile.txt",
"new_path": "images/basic/hostoverlaytest/testfile.txt",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "images/tmpfile/Dockerfile",
"new_path": "images/basic/tmpfile/Dockerfile",
"diff": ""
},
{
"change_type": "MODIFY",
"old_path": "pkg/test/criutil/criutil.go",
"new_path": "pkg/test/criutil/criutil.go",
"diff": "@@ -40,9 +40,9 @@ type Crictl struct {\ncleanup []func()\n}\n-// resolvePath attempts to find binary paths. It may set the path to invalid,\n+// ResolvePath attempts to find binary paths. It may set the path to invalid,\n// which will cause the execution to fail with a sensible error.\n-func resolvePath(executable string) string {\n+func ResolvePath(executable string) string {\nruntime, err := dockerutil.RuntimePath()\nif err == nil {\n// Check first the directory of the runtime itself.\n@@ -230,7 +230,7 @@ func (cc *Crictl) Import(image string) error {\n// be pushing a lot of bytes in order to import the image. The connect\n// timeout stays the same and is inherited from the Crictl instance.\ncmd := testutil.Command(cc.logger,\n- resolvePath(\"ctr\"),\n+ ResolvePath(\"ctr\"),\nfmt.Sprintf(\"--connect-timeout=%s\", 30*time.Second),\nfmt.Sprintf(\"--address=%s\", cc.endpoint),\n\"-n\", \"k8s.io\", \"images\", \"import\", \"-\")\n@@ -358,7 +358,7 @@ func (cc *Crictl) StopPodAndContainer(podID, contID string) error {\n// run runs crictl with the given args.\nfunc (cc *Crictl) run(args ...string) (string, error) {\ndefaultArgs := []string{\n- resolvePath(\"crictl\"),\n+ ResolvePath(\"crictl\"),\n\"--image-endpoint\", fmt.Sprintf(\"unix://%s\", cc.endpoint),\n\"--runtime-endpoint\", fmt.Sprintf(\"unix://%s\", cc.endpoint),\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/e2e/integration_test.go",
"new_path": "test/e2e/integration_test.go",
"diff": "@@ -40,6 +40,9 @@ import (\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n)\n+// defaultWait is the default wait time used for tests.\n+const defaultWait = time.Minute\n+\n// httpRequestSucceeds sends a request to a given url and checks that the status is OK.\nfunc httpRequestSucceeds(client http.Client, server string, port int) error {\nurl := fmt.Sprintf(\"http://%s:%d\", server, port)\n@@ -76,10 +79,10 @@ func TestLifeCycle(t *testing.T) {\nif err != nil {\nt.Fatalf(\"docker.FindPort(80) failed: %v\", err)\n}\n- if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, defaultWait); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n- client := http.Client{Timeout: time.Duration(2 * time.Second)}\n+ client := http.Client{Timeout: defaultWait}\nif err := httpRequestSucceeds(client, \"localhost\", port); err != nil {\nt.Errorf(\"http request failed: %v\", err)\n}\n@@ -116,12 +119,12 @@ func TestPauseResume(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, defaultWait); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n// Check that container is working.\n- client := http.Client{Timeout: time.Duration(2 * time.Second)}\n+ client := http.Client{Timeout: defaultWait}\nif err := httpRequestSucceeds(client, \"localhost\", port); err != nil {\nt.Error(\"http request failed:\", err)\n}\n@@ -131,6 +134,7 @@ func TestPauseResume(t *testing.T) {\n}\n// Check if container is paused.\n+ client = http.Client{Timeout: 10 * time.Millisecond} // Don't wait a minute.\nswitch _, err := client.Get(fmt.Sprintf(\"http://localhost:%d\", port)); v := err.(type) {\ncase nil:\nt.Errorf(\"http req expected to fail but it succeeded\")\n@@ -147,11 +151,12 @@ func TestPauseResume(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, defaultWait); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n// Check if container is working again.\n+ client = http.Client{Timeout: defaultWait}\nif err := httpRequestSucceeds(client, \"localhost\", port); err != nil {\nt.Error(\"http request failed:\", err)\n}\n@@ -178,12 +183,12 @@ func TestCheckpointRestore(t *testing.T) {\nif err := d.Checkpoint(ctx, \"test\"); err != nil {\nt.Fatalf(\"docker checkpoint failed: %v\", err)\n}\n- if err := d.WaitTimeout(ctx, 30*time.Second); err != nil {\n+ if err := d.WaitTimeout(ctx, defaultWait); err != nil {\nt.Fatalf(\"wait failed: %v\", err)\n}\n// TODO(b/143498576): Remove Poll after github.com/moby/moby/issues/38963 is fixed.\n- if err := testutil.Poll(func() error { return d.Restore(ctx, \"test\") }, 15*time.Second); err != nil {\n+ if err := testutil.Poll(func() error { return d.Restore(ctx, \"test\") }, defaultWait); err != nil {\nt.Fatalf(\"docker restore failed: %v\", err)\n}\n@@ -194,12 +199,12 @@ func TestCheckpointRestore(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, defaultWait); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n// Check if container is working again.\n- client := http.Client{Timeout: time.Duration(2 * time.Second)}\n+ client := http.Client{Timeout: defaultWait}\nif err := httpRequestSucceeds(client, \"localhost\", port); err != nil {\nt.Error(\"http request failed:\", err)\n}\n@@ -236,7 +241,7 @@ func TestConnectToSelf(t *testing.T) {\nif want := \"server\\n\"; reply != want {\nt.Errorf(\"Error on server, want: %q, got: %q\", want, reply)\n}\n- if _, err := d.WaitForOutput(ctx, \"^client\\n$\", 1*time.Second); err != nil {\n+ if _, err := d.WaitForOutput(ctx, \"^client\\n$\", defaultWait); err != nil {\nt.Fatalf(\"docker.WaitForOutput(client) timeout: %v\", err)\n}\n}\n@@ -375,7 +380,7 @@ func TestTmpFile(t *testing.T) {\nd := dockerutil.MakeContainer(ctx, t)\ndefer d.CleanUp(ctx)\n- opts := dockerutil.RunOpts{Image: \"tmpfile\"}\n+ opts := dockerutil.RunOpts{Image: \"basic/tmpfile\"}\ngot, err := d.Run(ctx, opts, \"cat\", \"/tmp/foo/file.txt\")\nif err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n@@ -427,7 +432,7 @@ func TestHostOverlayfsCopyUp(t *testing.T) {\ndefer d.CleanUp(ctx)\nif _, err := d.Run(ctx, dockerutil.RunOpts{\n- Image: \"hostoverlaytest\",\n+ Image: \"basic/hostoverlaytest\",\nWorkDir: \"/root\",\n}, \"./test\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/image/image_test.go",
"new_path": "test/image/image_test.go",
"diff": "@@ -37,6 +37,13 @@ import (\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n)\n+// defaultWait defines how long to wait for progress.\n+//\n+// See BUILD: This is at least a \"large\" test, so allow up to 1 minute for any\n+// given \"wait\" step. Note that all tests are run in parallel, which may cause\n+// individual slow-downs (but a huge speed-up in aggregate).\n+const defaultWait = time.Minute\n+\nfunc TestHelloWorld(t *testing.T) {\nctx := context.Background()\nd := dockerutil.MakeContainer(ctx, t)\n@@ -130,7 +137,7 @@ func TestHttpd(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, defaultWait); err != nil {\nt.Errorf(\"WaitForHTTP() timeout: %v\", err)\n}\n@@ -159,7 +166,7 @@ func TestNginx(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, defaultWait); err != nil {\nt.Errorf(\"WaitForHTTP() timeout: %v\", err)\n}\n@@ -180,7 +187,7 @@ func TestMysql(t *testing.T) {\n}\n// Wait until it's up and running.\n- if _, err := server.WaitForOutput(ctx, \"port: 3306 MySQL Community Server\", 3*time.Minute); err != nil {\n+ if _, err := server.WaitForOutput(ctx, \"port: 3306 MySQL Community Server\", defaultWait); err != nil {\nt.Fatalf(\"WaitForOutput() timeout: %v\", err)\n}\n@@ -200,7 +207,7 @@ func TestMysql(t *testing.T) {\n}\n// Ensure file executed to the end and shutdown mysql.\n- if _, err := server.WaitForOutput(ctx, \"mysqld: Shutdown complete\", 30*time.Second); err != nil {\n+ if _, err := server.WaitForOutput(ctx, \"mysqld: Shutdown complete\", defaultWait); err != nil {\nt.Fatalf(\"WaitForOutput() timeout: %v\", err)\n}\n}\n@@ -225,7 +232,7 @@ func TestTomcat(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, defaultWait); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n@@ -262,7 +269,7 @@ func TestRuby(t *testing.T) {\n}\n// Wait until it's up and running, 'gem install' can take some time.\n- if err := testutil.WaitForHTTP(port, 1*time.Minute); err != nil {\n+ if err := testutil.WaitForHTTP(port, time.Minute); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n@@ -299,7 +306,7 @@ func TestStdio(t *testing.T) {\n}\nfor _, want := range []string{wantStdout, wantStderr} {\n- if _, err := d.WaitForOutput(ctx, want, 5*time.Second); err != nil {\n+ if _, err := d.WaitForOutput(ctx, want, defaultWait); err != nil {\nt.Fatalf(\"docker didn't get output %q : %v\", want, err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetdrill/defs.bzl",
"new_path": "test/packetdrill/defs.bzl",
"diff": "@@ -26,7 +26,7 @@ def _packetdrill_test_impl(ctx):\ntransitive_files = depset()\nif hasattr(ctx.attr._test_runner, \"data_runfiles\"):\n- transitive_files = depset(ctx.attr._test_runner.data_runfiles.files)\n+ transitive_files = ctx.attr._test_runner.data_runfiles.files\nrunfiles = ctx.runfiles(\nfiles = [test_runner] + ctx.files._init_script + ctx.files.scripts,\ntransitive_files = transitive_files,\n@@ -60,11 +60,15 @@ _packetdrill_test = rule(\nimplementation = _packetdrill_test_impl,\n)\n-_PACKETDRILL_TAGS = [\"local\", \"manual\"]\n+PACKETDRILL_TAGS = [\n+ \"local\",\n+ \"manual\",\n+ \"packetdrill\",\n+]\ndef packetdrill_linux_test(name, **kwargs):\nif \"tags\" not in kwargs:\n- kwargs[\"tags\"] = _PACKETDRILL_TAGS\n+ kwargs[\"tags\"] = PACKETDRILL_TAGS\n_packetdrill_test(\nname = name,\nflags = [\"--dut_platform\", \"linux\"],\n@@ -73,7 +77,7 @@ def packetdrill_linux_test(name, **kwargs):\ndef packetdrill_netstack_test(name, **kwargs):\nif \"tags\" not in kwargs:\n- kwargs[\"tags\"] = _PACKETDRILL_TAGS\n+ kwargs[\"tags\"] = PACKETDRILL_TAGS\n_packetdrill_test(\nname = name,\n# This is the default runtime unless\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/runner/defs.bzl",
"new_path": "test/packetimpact/runner/defs.bzl",
"diff": "@@ -55,7 +55,11 @@ _packetimpact_test = rule(\nimplementation = _packetimpact_test_impl,\n)\n-PACKETIMPACT_TAGS = [\"local\", \"manual\"]\n+PACKETIMPACT_TAGS = [\n+ \"local\",\n+ \"manual\",\n+ \"packetimpact\",\n+]\ndef packetimpact_linux_test(\nname,\n@@ -75,7 +79,7 @@ def packetimpact_linux_test(\nname = name + \"_linux_test\",\ntestbench_binary = testbench_binary,\nflags = [\"--dut_platform\", \"linux\"] + expect_failure_flag,\n- tags = PACKETIMPACT_TAGS + [\"packetimpact\"],\n+ tags = PACKETIMPACT_TAGS,\n**kwargs\n)\n@@ -101,7 +105,7 @@ def packetimpact_netstack_test(\n# This is the default runtime unless\n# \"--test_arg=--runtime=OTHER_RUNTIME\" is used to override the value.\nflags = [\"--dut_platform\", \"netstack\", \"--runtime=runsc-d\"] + expect_failure_flag,\n- tags = PACKETIMPACT_TAGS + [\"packetimpact\"],\n+ tags = PACKETIMPACT_TAGS,\n**kwargs\n)\n@@ -121,7 +125,10 @@ def packetimpact_go_test(name, size = \"small\", pure = True, expect_linux_failure\nname = testbench_binary,\nsize = size,\npure = pure,\n- tags = PACKETIMPACT_TAGS,\n+ tags = [\n+ \"local\",\n+ \"manual\",\n+ ],\n**kwargs\n)\npacketimpact_linux_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/root/crictl_test.go",
"new_path": "test/root/crictl_test.go",
"diff": "@@ -405,11 +405,8 @@ func setup(t *testing.T, version string) (*criutil.Crictl, func(), error) {\n}\n// We provide the shim, followed by the runtime, and then a\n- // temporary root directory. Note that we can safely assume\n- // that the shim has been installed in the same directory as\n- // the runtime (for test installs and for normal installs).\n- // Since this is v1, the binary name will be fixed.\n- config = fmt.Sprintf(v1Template, path.Join(runtimeDir, \"gvisor-containerd-shim\"), runtime, runtimeDir)\n+ // temporary root directory.\n+ config = fmt.Sprintf(v1Template, criutil.ResolvePath(\"gvisor-containerd-shim\"), runtime, containerdRoot)\ncase v2:\n// This is only supported past 1.2.\nif major < 1 || (major == 1 && minor <= 1) {\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazel.mk",
"new_path": "tools/bazel.mk",
"diff": "@@ -23,11 +23,18 @@ BRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \\\nUSER ?= gvisor\nHASH ?= $(shell readlink -m $(CURDIR) | md5sum | cut -c1-8)\nDOCKER_NAME ?= gvisor-bazel-$(HASH)\n-DOCKER_PRIVILEGED ?= --privileged\n+DOCKER_PRIVILEGED ?= --privileged --network host\nBAZEL_CACHE := $(shell readlink -m ~/.cache/bazel/)\nGCLOUD_CONFIG := $(shell readlink -m ~/.config/gcloud/)\nDOCKER_SOCKET := /var/run/docker.sock\n+# Bazel flags.\n+OPTIONS += --test_output=errors --keep_going --verbose_failures=true\n+ifneq ($(AUTH_CREDENTIALS),)\n+OPTIONS += --auth_credentials=${AUTH_CREDENTIALS} --config=remote\n+endif\n+BAZEL := bazel $(STARTUP_OPTIONS)\n+\n# Non-configurable.\nUID := $(shell id -u ${USER})\nGID := $(shell id -g ${USER})\n@@ -38,6 +45,7 @@ FULL_DOCKER_RUN_OPTIONS += -v \"$(GCLOUD_CONFIG):$(GCLOUD_CONFIG)\"\nFULL_DOCKER_RUN_OPTIONS += -v \"/tmp:/tmp\"\nifneq ($(DOCKER_PRIVILEGED),)\nFULL_DOCKER_RUN_OPTIONS += -v \"$(DOCKER_SOCKET):$(DOCKER_SOCKET)\"\n+FULL_DOCKER_RUN_OPTIONS += $(DOCKER_PRIVILEGED)\nDOCKER_GROUP := $(shell stat -c '%g' $(DOCKER_SOCKET))\nifneq ($(GID),$(DOCKER_GROUP))\nUSERADD_OPTIONS += --groups $(DOCKER_GROUP)\n@@ -63,6 +71,7 @@ SHELL=/bin/bash -o pipefail\nbazel-server-start: load-default ## Starts the bazel server.\n@mkdir -p $(BAZEL_CACHE)\n@mkdir -p $(GCLOUD_CONFIG)\n+ @if docker ps --all | grep $(DOCKER_NAME); then docker rm $(DOCKER_NAME); fi\ndocker run -d --rm \\\n--init \\\n--name $(DOCKER_NAME) \\\n@@ -75,14 +84,14 @@ bazel-server-start: load-default ## Starts the bazel server.\nsh -c \"groupadd --gid $(GID) --non-unique $(USER) && \\\n$(GROUPADD_DOCKER) \\\nuseradd --uid $(UID) --non-unique --no-create-home --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) && \\\n- bazel version && \\\n- exec tail --pid=\\$$(bazel info server_pid) -f /dev/null\"\n+ $(BAZEL) version && \\\n+ exec tail --pid=\\$$($(BAZEL) info server_pid) -f /dev/null\"\n@while :; do if docker logs $(DOCKER_NAME) 2>/dev/null | grep \"Build label:\" >/dev/null; then break; fi; \\\n- if ! docker ps | grep $(DOCKER_NAME); then exit 1; else sleep 1; fi; done\n+ if ! docker ps | grep $(DOCKER_NAME); then docker logs $(DOCKER_NAME); exit 1; else sleep 1; fi; done\n.PHONY: bazel-server-start\nbazel-shutdown: ## Shuts down a running bazel server.\n- @docker exec --user $(UID):$(GID) $(DOCKER_NAME) bazel shutdown; rc=$$?; docker kill $(DOCKER_NAME) || [[ $$rc -ne 0 ]]\n+ @docker exec --user $(UID):$(GID) $(DOCKER_NAME) $(BAZEL) shutdown; rc=$$?; docker kill $(DOCKER_NAME) || [[ $$rc -ne 0 ]]\n.PHONY: bazel-shutdown\nbazel-alias: ## Emits an alias that can be used within the shell.\n@@ -93,7 +102,7 @@ bazel-server: ## Ensures that the server exists. Used as an internal target.\n@docker exec $(DOCKER_NAME) true || $(MAKE) bazel-server-start\n.PHONY: bazel-server\n-build_cmd = docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) sh -o pipefail -c 'bazel $(STARTUP_OPTIONS) build $(OPTIONS) $(TARGETS)'\n+build_cmd = docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) sh -o pipefail -c '$(BAZEL) $(STARTUP_OPTIONS) build $(OPTIONS) $(TARGETS)'\nbuild_paths = $(build_cmd) 2>&1 \\\n| tee /proc/self/fd/2 \\\n@@ -120,5 +129,9 @@ sudo: bazel-server\n.PHONY: sudo\ntest: bazel-server\n- @docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) bazel $(STARTUP_OPTIONS) test $(OPTIONS) $(TARGETS)\n+ @docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) $(BAZEL) test $(OPTIONS) $(TARGETS)\n.PHONY: test\n+\n+query: bazel-server\n+ @docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) $(BAZEL) query $(OPTIONS) '$(TARGETS)'\n+.PHONY: query\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add standard entrypoints for test targets.
PiperOrigin-RevId: 322265513 |
260,025 | 21.07.2020 19:42:04 | -3,600 | 033fe6d70aa2c6bb64b33668c07eb5db522f9957 | p9: fix `registry.get` ob1 bug | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/messages.go",
"new_path": "pkg/p9/messages.go",
"diff": "@@ -2506,7 +2506,7 @@ type msgFactory struct {\nvar msgRegistry registry\ntype registry struct {\n- factories [math.MaxUint8]msgFactory\n+ factories [math.MaxUint8 + 1]msgFactory\n// largestFixedSize is computed so that given some message size M, you can\n// compute the maximum payload size (e.g. for Twrite, Rread) with\n"
}
] | Go | Apache License 2.0 | google/gvisor | p9: fix `registry.get` ob1 bug |
259,860 | 22.07.2020 15:10:17 | 25,200 | 13c0cca50e061c9b9a3ae8e13e8baa0f29909370 | Skip RawHDRINCL tests that are blocking presubmits/releases.
Temporarily skip these, on bhaskherh@'s advice. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket_hdrincl.cc",
"new_path": "test/syscalls/linux/raw_socket_hdrincl.cc",
"diff": "@@ -178,6 +178,9 @@ TEST_F(RawHDRINCL, ConnectToLoopback) {\n}\nTEST_F(RawHDRINCL, SendWithoutConnectSucceeds) {\n+ // FIXME(github.dev/issue/3159): Test currently flaky.\n+ SKIP_IF(true);\n+\nstruct iphdr hdr = LoopbackHeader();\nASSERT_THAT(send(socket_, &hdr, sizeof(hdr), 0),\nSyscallSucceedsWithValue(sizeof(hdr)));\n@@ -281,6 +284,9 @@ TEST_F(RawHDRINCL, SendAndReceive) {\n// Send and receive a packet where the sendto address is not the same as the\n// provided destination.\nTEST_F(RawHDRINCL, SendAndReceiveDifferentAddress) {\n+ // FIXME(github.dev/issue/3160): Test currently flaky.\n+ SKIP_IF(true);\n+\nint port = 40000;\nif (!IsRunningOnGvisor()) {\nport = static_cast<short>(ASSERT_NO_ERRNO_AND_VALUE(\n"
}
] | Go | Apache License 2.0 | google/gvisor | Skip RawHDRINCL tests that are blocking presubmits/releases.
Temporarily skip these, on bhaskherh@'s advice.
PiperOrigin-RevId: 322664955 |
259,907 | 22.07.2020 15:40:44 | 25,200 | 39525d64cbd5901ea80651e323c8554af132f4dd | Add O_APPEND support in vfs2 gofer.
Helps in fixing open syscall tests: AppendConcurrentWrite and AppendOnly.
We also now update the file size for seekable special files (regular files)
which we were not doing earlier.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"diff": "@@ -155,26 +155,53 @@ func (fd *regularFileFD) Read(ctx context.Context, dst usermem.IOSequence, opts\n// PWrite implements vfs.FileDescriptionImpl.PWrite.\nfunc (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\n+ n, _, err := fd.pwrite(ctx, src, offset, opts)\n+ return n, err\n+}\n+\n+// pwrite returns the number of bytes written, final offset, error. The final\n+// offset should be ignored by PWrite.\n+func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (written, finalOff int64, err error) {\nif offset < 0 {\n- return 0, syserror.EINVAL\n+ return 0, offset, syserror.EINVAL\n}\n// Check that flags are supported.\n//\n// TODO(gvisor.dev/issue/2601): Support select pwritev2 flags.\nif opts.Flags&^linux.RWF_HIPRI != 0 {\n- return 0, syserror.EOPNOTSUPP\n+ return 0, offset, syserror.EOPNOTSUPP\n+ }\n+\n+ d := fd.dentry()\n+ // If the fd was opened with O_APPEND, make sure the file size is updated.\n+ // There is a possible race here if size is modified externally after\n+ // metadata cache is updated.\n+ if fd.vfsfd.StatusFlags()&linux.O_APPEND != 0 && !d.cachedMetadataAuthoritative() {\n+ if err := d.updateFromGetattr(ctx); err != nil {\n+ return 0, offset, err\n+ }\n}\n+ d.metadataMu.Lock()\n+ defer d.metadataMu.Unlock()\n+ // Set offset to file size if the fd was opened with O_APPEND.\n+ if fd.vfsfd.StatusFlags()&linux.O_APPEND != 0 {\n+ // Holding d.metadataMu is sufficient for reading d.size.\n+ offset = int64(d.size)\n+ }\nlimit, err := vfs.CheckLimit(ctx, offset, src.NumBytes())\nif err != nil {\n- return 0, err\n+ return 0, offset, err\n}\nsrc = src.TakeFirst64(limit)\n+ n, err := fd.pwriteLocked(ctx, src, offset, opts)\n+ return n, offset + n, err\n+}\n+// Preconditions: fd.dentry().metatdataMu must be locked.\n+func (fd *regularFileFD) pwriteLocked(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\nd := fd.dentry()\n- d.metadataMu.Lock()\n- defer d.metadataMu.Unlock()\nif d.fs.opts.interop != InteropModeShared {\n// Compare Linux's mm/filemap.c:__generic_file_write_iter() =>\n// file_update_time(). This is d.touchCMtime(), but without locking\n@@ -237,8 +264,8 @@ func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off\n// Write implements vfs.FileDescriptionImpl.Write.\nfunc (fd *regularFileFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\nfd.mu.Lock()\n- n, err := fd.PWrite(ctx, src, fd.off, opts)\n- fd.off += n\n+ n, off, err := fd.pwrite(ctx, src, fd.off, opts)\n+ fd.off = off\nfd.mu.Unlock()\nreturn n, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"diff": "@@ -16,6 +16,7 @@ package gofer\nimport (\n\"sync\"\n+ \"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n@@ -144,7 +145,7 @@ func (fd *specialFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs\n// mmap due to lock ordering; MM locks precede dentry.dataMu. That doesn't\n// hold here since specialFileFD doesn't client-cache data. Just buffer the\n// read instead.\n- if d := fd.dentry(); d.fs.opts.interop != InteropModeShared {\n+ if d := fd.dentry(); d.cachedMetadataAuthoritative() {\nd.touchAtime(fd.vfsfd.Mount())\n}\nbuf := make([]byte, dst.NumBytes())\n@@ -176,39 +177,76 @@ func (fd *specialFileFD) Read(ctx context.Context, dst usermem.IOSequence, opts\n// PWrite implements vfs.FileDescriptionImpl.PWrite.\nfunc (fd *specialFileFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\n+ n, _, err := fd.pwrite(ctx, src, offset, opts)\n+ return n, err\n+}\n+\n+// pwrite returns the number of bytes written, final offset, error. The final\n+// offset should be ignored by PWrite.\n+func (fd *specialFileFD) pwrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (written, finalOff int64, err error) {\nif fd.seekable && offset < 0 {\n- return 0, syserror.EINVAL\n+ return 0, offset, syserror.EINVAL\n}\n// Check that flags are supported.\n//\n// TODO(gvisor.dev/issue/2601): Support select pwritev2 flags.\nif opts.Flags&^linux.RWF_HIPRI != 0 {\n- return 0, syserror.EOPNOTSUPP\n+ return 0, offset, syserror.EOPNOTSUPP\n+ }\n+\n+ d := fd.dentry()\n+ // If the regular file fd was opened with O_APPEND, make sure the file size\n+ // is updated. There is a possible race here if size is modified externally\n+ // after metadata cache is updated.\n+ if fd.seekable && fd.vfsfd.StatusFlags()&linux.O_APPEND != 0 && !d.cachedMetadataAuthoritative() {\n+ if err := d.updateFromGetattr(ctx); err != nil {\n+ return 0, offset, err\n+ }\n}\nif fd.seekable {\n+ // We need to hold the metadataMu *while* writing to a regular file.\n+ d.metadataMu.Lock()\n+ defer d.metadataMu.Unlock()\n+\n+ // Set offset to file size if the regular file was opened with O_APPEND.\n+ if fd.vfsfd.StatusFlags()&linux.O_APPEND != 0 {\n+ // Holding d.metadataMu is sufficient for reading d.size.\n+ offset = int64(d.size)\n+ }\nlimit, err := vfs.CheckLimit(ctx, offset, src.NumBytes())\nif err != nil {\n- return 0, err\n+ return 0, offset, err\n}\nsrc = src.TakeFirst64(limit)\n}\n// Do a buffered write. See rationale in PRead.\n- if d := fd.dentry(); d.fs.opts.interop != InteropModeShared {\n+ if d.cachedMetadataAuthoritative() {\nd.touchCMtime()\n}\nbuf := make([]byte, src.NumBytes())\n// Don't do partial writes if we get a partial read from src.\nif _, err := src.CopyIn(ctx, buf); err != nil {\n- return 0, err\n+ return 0, offset, err\n}\nn, err := fd.handle.writeFromBlocksAt(ctx, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf)), uint64(offset))\nif err == syserror.EAGAIN {\nerr = syserror.ErrWouldBlock\n}\n- return int64(n), err\n+ finalOff = offset\n+ // Update file size for regular files.\n+ if fd.seekable {\n+ finalOff += int64(n)\n+ // d.metadataMu is already locked at this point.\n+ if uint64(finalOff) > d.size {\n+ d.dataMu.Lock()\n+ defer d.dataMu.Unlock()\n+ atomic.StoreUint64(&d.size, uint64(finalOff))\n+ }\n+ }\n+ return int64(n), finalOff, err\n}\n// Write implements vfs.FileDescriptionImpl.Write.\n@@ -218,8 +256,8 @@ func (fd *specialFileFD) Write(ctx context.Context, src usermem.IOSequence, opts\n}\nfd.mu.Lock()\n- n, err := fd.PWrite(ctx, src, fd.off, opts)\n- fd.off += n\n+ n, off, err := fd.pwrite(ctx, src, fd.off, opts)\n+ fd.off = off\nfd.mu.Unlock()\nreturn n, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/open.cc",
"new_path": "test/syscalls/linux/open.cc",
"diff": "@@ -235,7 +235,7 @@ TEST_F(OpenTest, AppendOnly) {\nASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR | O_APPEND));\nEXPECT_THAT(lseek(fd2.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));\n- // Then try to write to the first file and make sure the bytes are appended.\n+ // Then try to write to the first fd and make sure the bytes are appended.\nEXPECT_THAT(WriteFd(fd1.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(buf.size()));\n@@ -247,7 +247,7 @@ TEST_F(OpenTest, AppendOnly) {\nEXPECT_THAT(lseek(fd1.get(), 0, SEEK_CUR),\nSyscallSucceedsWithValue(kBufSize * 2));\n- // Then try to write to the second file and make sure the bytes are appended.\n+ // Then try to write to the second fd and make sure the bytes are appended.\nEXPECT_THAT(WriteFd(fd2.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(buf.size()));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add O_APPEND support in vfs2 gofer.
Helps in fixing open syscall tests: AppendConcurrentWrite and AppendOnly.
We also now update the file size for seekable special files (regular files)
which we were not doing earlier.
Updates #2923
PiperOrigin-RevId: 322670843 |
259,907 | 22.07.2020 15:44:08 | 25,200 | 9654bf04acad7300e0bf964183f7860bd553f563 | [vfs2][tmpfs] Implement O_APPEND
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go",
"diff": "@@ -325,8 +325,15 @@ func (fd *regularFileFD) Read(ctx context.Context, dst usermem.IOSequence, opts\n// PWrite implements vfs.FileDescriptionImpl.PWrite.\nfunc (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\n+ n, _, err := fd.pwrite(ctx, src, offset, opts)\n+ return n, err\n+}\n+\n+// pwrite returns the number of bytes written, final offset and error. The\n+// final offset should be ignored by PWrite.\n+func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (written, finalOff int64, err error) {\nif offset < 0 {\n- return 0, syserror.EINVAL\n+ return 0, offset, syserror.EINVAL\n}\n// Check that flags are supported. RWF_DSYNC/RWF_SYNC can be ignored since\n@@ -334,40 +341,44 @@ func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off\n//\n// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.\nif opts.Flags&^(linux.RWF_HIPRI|linux.RWF_DSYNC|linux.RWF_SYNC) != 0 {\n- return 0, syserror.EOPNOTSUPP\n+ return 0, offset, syserror.EOPNOTSUPP\n}\nsrclen := src.NumBytes()\nif srclen == 0 {\n- return 0, nil\n+ return 0, offset, nil\n}\nf := fd.inode().impl.(*regularFile)\n+ f.inode.mu.Lock()\n+ defer f.inode.mu.Unlock()\n+ // If the file is opened with O_APPEND, update offset to file size.\n+ if fd.vfsfd.StatusFlags()&linux.O_APPEND != 0 {\n+ // Locking f.inode.mu is sufficient for reading f.size.\n+ offset = int64(f.size)\n+ }\nif end := offset + srclen; end < offset {\n// Overflow.\n- return 0, syserror.EINVAL\n+ return 0, offset, syserror.EINVAL\n}\n- var err error\nsrclen, err = vfs.CheckLimit(ctx, offset, srclen)\nif err != nil {\n- return 0, err\n+ return 0, offset, err\n}\nsrc = src.TakeFirst64(srclen)\n- f.inode.mu.Lock()\nrw := getRegularFileReadWriter(f, offset)\nn, err := src.CopyInTo(ctx, rw)\n- fd.inode().touchCMtimeLocked()\n- f.inode.mu.Unlock()\n+ f.inode.touchCMtimeLocked()\nputRegularFileReadWriter(rw)\n- return n, err\n+ return n, n + offset, err\n}\n// Write implements vfs.FileDescriptionImpl.Write.\nfunc (fd *regularFileFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\nfd.offMu.Lock()\n- n, err := fd.PWrite(ctx, src, fd.off, opts)\n- fd.off += n\n+ n, off, err := fd.pwrite(ctx, src, fd.off, opts)\n+ fd.off = off\nfd.offMu.Unlock()\nreturn n, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2][tmpfs] Implement O_APPEND
Updates #2923
PiperOrigin-RevId: 322671489 |
259,891 | 22.07.2020 16:22:06 | 25,200 | bd98f820141208d9f19b0e12dee93f6f6de3ac97 | iptables: replace maps with arrays
For iptables users, Check() is a hot path called for every packet one or more
times. Let's avoid a bunch of map lookups. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/netfilter.go",
"new_path": "pkg/sentry/socket/netfilter/netfilter.go",
"diff": "@@ -342,10 +342,10 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n// TODO(gvisor.dev/issue/170): Support other tables.\nvar table stack.Table\nswitch replace.Name.String() {\n- case stack.TablenameFilter:\n+ case stack.FilterTable:\ntable = stack.EmptyFilterTable()\n- case stack.TablenameNat:\n- table = stack.EmptyNatTable()\n+ case stack.NATTable:\n+ table = stack.EmptyNATTable()\ndefault:\nnflog(\"we don't yet support writing to the %q table (gvisor.dev/issue/170)\", replace.Name.String())\nreturn syserr.ErrInvalidArgument\n@@ -431,6 +431,8 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\nfor hook, _ := range replace.HookEntry {\nif table.ValidHooks()&(1<<hook) != 0 {\nhk := hookFromLinux(hook)\n+ table.BuiltinChains[hk] = stack.HookUnset\n+ table.Underflows[hk] = stack.HookUnset\nfor offset, ruleIdx := range offsets {\nif offset == replace.HookEntry[hook] {\ntable.BuiltinChains[hk] = ruleIdx\n@@ -456,8 +458,7 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n// Add the user chains.\nfor ruleIdx, rule := range table.Rules {\n- target, ok := rule.Target.(stack.UserChainTarget)\n- if !ok {\n+ if _, ok := rule.Target.(stack.UserChainTarget); !ok {\ncontinue\n}\n@@ -473,7 +474,6 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\nnflog(\"user chain's first node must have no matchers\")\nreturn syserr.ErrInvalidArgument\n}\n- table.UserChains[target.Name] = ruleIdx + 1\n}\n// Set each jump to point to the appropriate rule. Right now they hold byte\n@@ -499,7 +499,10 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n// Since we only support modifying the INPUT, PREROUTING and OUTPUT chain right now,\n// make sure all other chains point to ACCEPT rules.\nfor hook, ruleIdx := range table.BuiltinChains {\n- if hook == stack.Forward || hook == stack.Postrouting {\n+ if hook := stack.Hook(hook); hook == stack.Forward || hook == stack.Postrouting {\n+ if ruleIdx == stack.HookUnset {\n+ continue\n+ }\nif !isUnconditionalAccept(table.Rules[ruleIdx]) {\nnflog(\"hook %d is unsupported.\", hook)\nreturn syserr.ErrInvalidArgument\n@@ -512,9 +515,7 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n// - There are no chains without an unconditional final rule.\n// - There are no chains without an unconditional underflow rule.\n- stk.IPTables().ReplaceTable(replace.Name.String(), table)\n-\n- return nil\n+ return syserr.TranslateNetstackError(stk.IPTables().ReplaceTable(replace.Name.String(), table))\n}\n// parseMatchers parses 0 or more matchers from optVal. optVal should contain\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -22,22 +22,30 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n-// Table names.\n+// tableID is an index into IPTables.tables.\n+type tableID int\n+\nconst (\n- TablenameNat = \"nat\"\n- TablenameMangle = \"mangle\"\n- TablenameFilter = \"filter\"\n+ natID tableID = iota\n+ mangleID\n+ filterID\n+ numTables\n)\n-// Chain names as defined by net/ipv4/netfilter/ip_tables.c.\n+// Table names.\nconst (\n- ChainNamePrerouting = \"PREROUTING\"\n- ChainNameInput = \"INPUT\"\n- ChainNameForward = \"FORWARD\"\n- ChainNameOutput = \"OUTPUT\"\n- ChainNamePostrouting = \"POSTROUTING\"\n+ NATTable = \"nat\"\n+ MangleTable = \"mangle\"\n+ FilterTable = \"filter\"\n)\n+// nameToID is immutable.\n+var nameToID = map[string]tableID{\n+ NATTable: natID,\n+ MangleTable: mangleID,\n+ FilterTable: filterID,\n+}\n+\n// HookUnset indicates that there is no hook set for an entrypoint or\n// underflow.\nconst HookUnset = -1\n@@ -48,11 +56,10 @@ const reaperDelay = 5 * time.Second\n// DefaultTables returns a default set of tables. Each chain is set to accept\n// all packets.\nfunc DefaultTables() *IPTables {\n- // TODO(gvisor.dev/issue/170): We may be able to swap out some strings for\n- // iotas.\nreturn &IPTables{\n- tables: map[string]Table{\n- TablenameNat: Table{\n+ tables: [numTables]Table{\n+ // NAT table.\n+ Table{\nRules: []Rule{\nRule{Target: AcceptTarget{}},\nRule{Target: AcceptTarget{}},\n@@ -60,60 +67,70 @@ func DefaultTables() *IPTables {\nRule{Target: AcceptTarget{}},\nRule{Target: ErrorTarget{}},\n},\n- BuiltinChains: map[Hook]int{\n- Prerouting: 0,\n- Input: 1,\n- Output: 2,\n- Postrouting: 3,\n+ BuiltinChains: [NumHooks]int{\n+ 0, // Prerouting.\n+ 1, // Input.\n+ HookUnset, // Forward.\n+ 2, // Output.\n+ 3, // Postrouting.\n},\n- Underflows: map[Hook]int{\n- Prerouting: 0,\n- Input: 1,\n- Output: 2,\n- Postrouting: 3,\n+ Underflows: [NumHooks]int{\n+ 0, // Prerouting.\n+ 1, // Input.\n+ HookUnset, // Forward.\n+ 2, // Output.\n+ 3, // Postrouting.\n},\n- UserChains: map[string]int{},\n},\n- TablenameMangle: Table{\n+ // Mangle table.\n+ Table{\nRules: []Rule{\nRule{Target: AcceptTarget{}},\nRule{Target: AcceptTarget{}},\nRule{Target: ErrorTarget{}},\n},\n- BuiltinChains: map[Hook]int{\n+ BuiltinChains: [NumHooks]int{\nPrerouting: 0,\nOutput: 1,\n},\n- Underflows: map[Hook]int{\n- Prerouting: 0,\n- Output: 1,\n+ Underflows: [NumHooks]int{\n+ 0, // Prerouting.\n+ HookUnset, // Input.\n+ HookUnset, // Forward.\n+ 1, // Output.\n+ HookUnset, // Postrouting.\n},\n- UserChains: map[string]int{},\n},\n- TablenameFilter: Table{\n+ // Filter table.\n+ Table{\nRules: []Rule{\nRule{Target: AcceptTarget{}},\nRule{Target: AcceptTarget{}},\nRule{Target: AcceptTarget{}},\nRule{Target: ErrorTarget{}},\n},\n- BuiltinChains: map[Hook]int{\n- Input: 0,\n- Forward: 1,\n- Output: 2,\n+ BuiltinChains: [NumHooks]int{\n+ HookUnset, // Prerouting.\n+ Input: 0, // Input.\n+ Forward: 1, // Forward.\n+ Output: 2, // Output.\n+ HookUnset, // Postrouting.\n},\n- Underflows: map[Hook]int{\n- Input: 0,\n- Forward: 1,\n- Output: 2,\n+ Underflows: [NumHooks]int{\n+ HookUnset, // Prerouting.\n+ 0, // Input.\n+ 1, // Forward.\n+ 2, // Output.\n+ HookUnset, // Postrouting.\n},\n- UserChains: map[string]int{},\n},\n},\n- priorities: map[Hook][]string{\n- Input: []string{TablenameNat, TablenameFilter},\n- Prerouting: []string{TablenameMangle, TablenameNat},\n- Output: []string{TablenameMangle, TablenameNat, TablenameFilter},\n+ priorities: [NumHooks][]tableID{\n+ []tableID{mangleID, natID}, // Prerouting.\n+ []tableID{natID, filterID}, // Input.\n+ []tableID{}, // Forward.\n+ []tableID{mangleID, natID, filterID}, // Output.\n+ []tableID{}, // Postrouting.\n},\nconnections: ConnTrack{\nseed: generateRandUint32(),\n@@ -127,51 +144,62 @@ func DefaultTables() *IPTables {\nfunc EmptyFilterTable() Table {\nreturn Table{\nRules: []Rule{},\n- BuiltinChains: map[Hook]int{\n- Input: HookUnset,\n- Forward: HookUnset,\n- Output: HookUnset,\n+ BuiltinChains: [NumHooks]int{\n+ HookUnset,\n+ 0,\n+ 0,\n+ 0,\n+ HookUnset,\n},\n- Underflows: map[Hook]int{\n- Input: HookUnset,\n- Forward: HookUnset,\n- Output: HookUnset,\n+ Underflows: [NumHooks]int{\n+ HookUnset,\n+ 0,\n+ 0,\n+ 0,\n+ HookUnset,\n},\n- UserChains: map[string]int{},\n}\n}\n-// EmptyNatTable returns a Table with no rules and the filter table chains\n+// EmptyNATTable returns a Table with no rules and the filter table chains\n// mapped to HookUnset.\n-func EmptyNatTable() Table {\n+func EmptyNATTable() Table {\nreturn Table{\nRules: []Rule{},\n- BuiltinChains: map[Hook]int{\n- Prerouting: HookUnset,\n- Input: HookUnset,\n- Output: HookUnset,\n- Postrouting: HookUnset,\n+ BuiltinChains: [NumHooks]int{\n+ 0,\n+ 0,\n+ HookUnset,\n+ 0,\n+ 0,\n},\n- Underflows: map[Hook]int{\n- Prerouting: HookUnset,\n- Input: HookUnset,\n- Output: HookUnset,\n- Postrouting: HookUnset,\n+ Underflows: [NumHooks]int{\n+ 0,\n+ 0,\n+ HookUnset,\n+ 0,\n+ 0,\n},\n- UserChains: map[string]int{},\n}\n}\n-// GetTable returns table by name.\n+// GetTable returns a table by name.\nfunc (it *IPTables) GetTable(name string) (Table, bool) {\n+ id, ok := nameToID[name]\n+ if !ok {\n+ return Table{}, false\n+ }\nit.mu.RLock()\ndefer it.mu.RUnlock()\n- t, ok := it.tables[name]\n- return t, ok\n+ return it.tables[id], true\n}\n// ReplaceTable replaces or inserts table by name.\n-func (it *IPTables) ReplaceTable(name string, table Table) {\n+func (it *IPTables) ReplaceTable(name string, table Table) *tcpip.Error {\n+ id, ok := nameToID[name]\n+ if !ok {\n+ return tcpip.ErrInvalidOptionValue\n+ }\nit.mu.Lock()\ndefer it.mu.Unlock()\n// If iptables is being enabled, initialize the conntrack table and\n@@ -181,14 +209,8 @@ func (it *IPTables) ReplaceTable(name string, table Table) {\nit.startReaper(reaperDelay)\n}\nit.modified = true\n- it.tables[name] = table\n-}\n-\n-// GetPriorities returns slice of priorities associated with hook.\n-func (it *IPTables) GetPriorities(hook Hook) []string {\n- it.mu.RLock()\n- defer it.mu.RUnlock()\n- return it.priorities[hook]\n+ it.tables[id] = table\n+ return nil\n}\n// A chainVerdict is what a table decides should be done with a packet.\n@@ -226,8 +248,11 @@ func (it *IPTables) Check(hook Hook, pkt *PacketBuffer, gso *GSO, r *Route, addr\nit.connections.handlePacket(pkt, hook, gso, r)\n// Go through each table containing the hook.\n- for _, tablename := range it.GetPriorities(hook) {\n- table, _ := it.GetTable(tablename)\n+ it.mu.RLock()\n+ defer it.mu.RUnlock()\n+ priorities := it.priorities[hook]\n+ for _, tableID := range priorities {\n+ table := it.tables[tableID]\nruleIdx := table.BuiltinChains[hook]\nswitch verdict := it.checkChain(hook, pkt, table, ruleIdx, gso, r, address, nicName); verdict {\n// If the table returns Accept, move on to the next table.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_types.go",
"new_path": "pkg/tcpip/stack/iptables_types.go",
"diff": "@@ -84,14 +84,14 @@ type IPTables struct {\n// mu protects tables, priorities, and modified.\nmu sync.RWMutex\n- // tables maps table names to tables. User tables have arbitrary names.\n- // mu needs to be locked for accessing.\n- tables map[string]Table\n+ // tables maps tableIDs to tables. Holds builtin tables only, not user\n+ // tables. mu must be locked for accessing.\n+ tables [numTables]Table\n// priorities maps each hook to a list of table names. The order of the\n// list is the order in which each table should be visited for that\n// hook. mu needs to be locked for accessing.\n- priorities map[Hook][]string\n+ priorities [NumHooks][]tableID\n// modified is whether tables have been modified at least once. It is\n// used to elide the iptables performance overhead for workloads that\n@@ -113,23 +113,21 @@ type Table struct {\nRules []Rule\n// BuiltinChains maps builtin chains to their entrypoint rule in Rules.\n- BuiltinChains map[Hook]int\n+ BuiltinChains [NumHooks]int\n// Underflows maps builtin chains to their underflow rule in Rules\n// (i.e. the rule to execute if the chain returns without a verdict).\n- Underflows map[Hook]int\n-\n- // UserChains holds user-defined chains for the keyed by name. Users\n- // can give their chains arbitrary names.\n- UserChains map[string]int\n+ Underflows [NumHooks]int\n}\n// ValidHooks returns a bitmap of the builtin hooks for the given table.\nfunc (table *Table) ValidHooks() uint32 {\nhooks := uint32(0)\n- for hook := range table.BuiltinChains {\n+ for hook, ruleIdx := range table.BuiltinChains {\n+ if ruleIdx != HookUnset {\nhooks |= 1 << hook\n}\n+ }\nreturn hooks\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | iptables: replace maps with arrays
For iptables users, Check() is a hot path called for every packet one or more
times. Let's avoid a bunch of map lookups.
PiperOrigin-RevId: 322678699 |
259,907 | 23.07.2020 09:43:05 | 25,200 | 4fbd0728ac5e0aa33a7f5e2c5189751b4baa94b5 | [vfs2][gofer] Fix update attributes race condition.
We were getting the file attributes before locking the metadataMu which was
causing stale updates to the file attributes.
Fixes OpenTest_AppendConcurrentWrite.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -150,11 +150,9 @@ afterSymlink:\nreturn nil, err\n}\nif d != d.parent && !d.cachedMetadataAuthoritative() {\n- _, attrMask, attr, err := d.parent.file.getAttr(ctx, dentryAttrMask())\n- if err != nil {\n+ if err := d.parent.updateFromGetattr(ctx); err != nil {\nreturn nil, err\n}\n- d.parent.updateFromP9Attrs(attrMask, &attr)\n}\nrp.Advance()\nreturn d.parent, nil\n@@ -209,17 +207,28 @@ func (fs *filesystem) getChildLocked(ctx context.Context, vfsObj *vfs.VirtualFil\n// Preconditions: As for getChildLocked. !parent.isSynthetic().\nfunc (fs *filesystem) revalidateChildLocked(ctx context.Context, vfsObj *vfs.VirtualFilesystem, parent *dentry, name string, child *dentry, ds **[]*dentry) (*dentry, error) {\n+ if child != nil {\n+ // Need to lock child.metadataMu because we might be updating child\n+ // metadata. We need to hold the lock *before* getting metadata from the\n+ // server and release it after updating local metadata.\n+ child.metadataMu.Lock()\n+ }\nqid, file, attrMask, attr, err := parent.file.walkGetAttrOne(ctx, name)\nif err != nil && err != syserror.ENOENT {\n+ if child != nil {\n+ child.metadataMu.Unlock()\n+ }\nreturn nil, err\n}\nif child != nil {\nif !file.isNil() && inoFromPath(qid.Path) == child.ino {\n// The file at this path hasn't changed. Just update cached metadata.\nfile.close(ctx)\n- child.updateFromP9Attrs(attrMask, &attr)\n+ child.updateFromP9AttrsLocked(attrMask, &attr)\n+ child.metadataMu.Unlock()\nreturn child, nil\n}\n+ child.metadataMu.Unlock()\nif file.isNil() && child.isSynthetic() {\n// We have a synthetic file, and no remote file has arisen to\n// replace it.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -785,8 +785,8 @@ func (d *dentry) cachedMetadataAuthoritative() bool {\n// updateFromP9Attrs is called to update d's metadata after an update from the\n// remote filesystem.\n-func (d *dentry) updateFromP9Attrs(mask p9.AttrMask, attr *p9.Attr) {\n- d.metadataMu.Lock()\n+// Precondition: d.metadataMu must be locked.\n+func (d *dentry) updateFromP9AttrsLocked(mask p9.AttrMask, attr *p9.Attr) {\nif mask.Mode {\nif got, want := uint32(attr.Mode.FileType()), d.fileType(); got != want {\nd.metadataMu.Unlock()\n@@ -822,7 +822,6 @@ func (d *dentry) updateFromP9Attrs(mask p9.AttrMask, attr *p9.Attr) {\nif mask.Size {\nd.updateFileSizeLocked(attr.Size)\n}\n- d.metadataMu.Unlock()\n}\n// Preconditions: !d.isSynthetic()\n@@ -834,6 +833,10 @@ func (d *dentry) updateFromGetattr(ctx context.Context) error {\nfile p9file\nhandleMuRLocked bool\n)\n+ // d.metadataMu must be locked *before* we getAttr so that we do not end up\n+ // updating stale attributes in d.updateFromP9AttrsLocked().\n+ d.metadataMu.Lock()\n+ defer d.metadataMu.Unlock()\nd.handleMu.RLock()\nif !d.handle.file.isNil() {\nfile = d.handle.file\n@@ -849,7 +852,7 @@ func (d *dentry) updateFromGetattr(ctx context.Context) error {\nif err != nil {\nreturn err\n}\n- d.updateFromP9Attrs(attrMask, &attr)\n+ d.updateFromP9AttrsLocked(attrMask, &attr)\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2][gofer] Fix update attributes race condition.
We were getting the file attributes before locking the metadataMu which was
causing stale updates to the file attributes.
Fixes OpenTest_AppendConcurrentWrite.
Updates #2923
PiperOrigin-RevId: 322804438 |
259,992 | 23.07.2020 11:00:35 | 25,200 | 384369e01e653e0a2ea976d2063ba19a9c2cbbc8 | Fix fsgofer Open() when control file is using O_PATH
Open tries to reuse the control file to save syscalls and
file descriptors when opening a file. However, when the
control file was opened using O_PATH (e.g. no file permission
to open readonly), Open() would not check for it. | [
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/BUILD",
"new_path": "runsc/fsgofer/BUILD",
"diff": "@@ -31,5 +31,6 @@ go_test(\ndeps = [\n\"//pkg/log\",\n\"//pkg/p9\",\n+ \"//pkg/test/testutil\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -132,7 +132,7 @@ func (a *attachPoint) Attach() (p9.File, error) {\nreturn nil, fmt.Errorf(\"attach point already attached, prefix: %s\", a.prefix)\n}\n- f, err := openAnyFile(a.prefix, func(mode int) (*fd.FD, error) {\n+ f, readable, err := openAnyFile(a.prefix, func(mode int) (*fd.FD, error) {\nreturn fd.Open(a.prefix, openFlags|mode, 0)\n})\nif err != nil {\n@@ -144,7 +144,7 @@ func (a *attachPoint) Attach() (p9.File, error) {\nreturn nil, fmt.Errorf(\"unable to stat %q: %v\", a.prefix, err)\n}\n- lf, err := newLocalFile(a, f, a.prefix, stat)\n+ lf, err := newLocalFile(a, f, a.prefix, readable, stat)\nif err != nil {\nreturn nil, fmt.Errorf(\"unable to create localFile %q: %v\", a.prefix, err)\n}\n@@ -212,6 +212,10 @@ type localFile struct {\n// opened with.\nfile *fd.FD\n+ // controlReadable tells whether 'file' was opened with read permissions\n+ // during a walk.\n+ controlReadable bool\n+\n// mode is the mode in which the file was opened. Set to invalidMode\n// if localFile isn't opened.\nmode p9.OpenFlags\n@@ -251,49 +255,57 @@ func reopenProcFd(f *fd.FD, mode int) (*fd.FD, error) {\nreturn fd.New(d), nil\n}\n-func openAnyFileFromParent(parent *localFile, name string) (*fd.FD, string, error) {\n+func openAnyFileFromParent(parent *localFile, name string) (*fd.FD, string, bool, error) {\npath := path.Join(parent.hostPath, name)\n- f, err := openAnyFile(path, func(mode int) (*fd.FD, error) {\n+ f, readable, err := openAnyFile(path, func(mode int) (*fd.FD, error) {\nreturn fd.OpenAt(parent.file, name, openFlags|mode, 0)\n})\n- return f, path, err\n+ return f, path, readable, err\n}\n// openAnyFile attempts to open the file in O_RDONLY and if it fails fallsback\n// to O_PATH. 'path' is used for logging messages only. 'fn' is what does the\n// actual file open and is customizable by the caller.\n-func openAnyFile(path string, fn func(mode int) (*fd.FD, error)) (*fd.FD, error) {\n+func openAnyFile(path string, fn func(mode int) (*fd.FD, error)) (*fd.FD, bool, error) {\n// Attempt to open file in the following mode in order:\n// 1. RDONLY | NONBLOCK: for all files, directories, ro mounts, FIFOs.\n// Use non-blocking to prevent getting stuck inside open(2) for\n// FIFOs. This option has no effect on regular files.\n// 2. PATH: for symlinks, sockets.\n- modes := []int{syscall.O_RDONLY | syscall.O_NONBLOCK, unix.O_PATH}\n+ options := []struct {\n+ mode int\n+ readable bool\n+ }{\n+ {\n+ mode: syscall.O_RDONLY | syscall.O_NONBLOCK,\n+ readable: true,\n+ },\n+ {\n+ mode: unix.O_PATH,\n+ readable: false,\n+ },\n+ }\nvar err error\n+ for i, option := range options {\nvar file *fd.FD\n- for i, mode := range modes {\n- file, err = fn(mode)\n+ file, err = fn(option.mode)\nif err == nil {\n- // openat succeeded, we're done.\n- break\n+ // Succeeded opening the file, we're done.\n+ return file, option.readable, nil\n}\nswitch e := extractErrno(err); e {\ncase syscall.ENOENT:\n// File doesn't exist, no point in retrying.\n- return nil, e\n+ return nil, false, e\n}\n- // openat failed. Try again with next mode, preserving 'err' in case this\n- // was the last attempt.\n- log.Debugf(\"Attempt %d to open file failed, mode: %#x, path: %q, err: %v\", i, openFlags|mode, path, err)\n+ // File failed to open. Try again with next mode, preserving 'err' in case\n+ // this was the last attempt.\n+ log.Debugf(\"Attempt %d to open file failed, mode: %#x, path: %q, err: %v\", i, openFlags|option.mode, path, err)\n}\n- if err != nil {\n// All attempts to open file have failed, return the last error.\nlog.Debugf(\"Failed to open file, path: %q, err: %v\", path, err)\n- return nil, extractErrno(err)\n- }\n-\n- return file, nil\n+ return nil, false, extractErrno(err)\n}\nfunc getSupportedFileType(stat syscall.Stat_t, permitSocket bool) (fileType, error) {\n@@ -316,7 +328,7 @@ func getSupportedFileType(stat syscall.Stat_t, permitSocket bool) (fileType, err\nreturn ft, nil\n}\n-func newLocalFile(a *attachPoint, file *fd.FD, path string, stat syscall.Stat_t) (*localFile, error) {\n+func newLocalFile(a *attachPoint, file *fd.FD, path string, readable bool, stat syscall.Stat_t) (*localFile, error) {\nft, err := getSupportedFileType(stat, a.conf.HostUDS)\nif err != nil {\nreturn nil, err\n@@ -328,6 +340,7 @@ func newLocalFile(a *attachPoint, file *fd.FD, path string, stat syscall.Stat_t)\nfile: file,\nmode: invalidMode,\nft: ft,\n+ controlReadable: readable,\n}, nil\n}\n@@ -380,7 +393,7 @@ func (l *localFile) Open(flags p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n// Check if control file can be used or if a new open must be created.\nvar newFile *fd.FD\n- if flags == p9.ReadOnly {\n+ if flags == p9.ReadOnly && l.controlReadable {\nlog.Debugf(\"Open reusing control file, flags: %v, %q\", flags, l.hostPath)\nnewFile = l.file\n} else {\n@@ -518,7 +531,7 @@ func (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID)\nfunc (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\n// Duplicate current file if 'names' is empty.\nif len(names) == 0 {\n- newFile, err := openAnyFile(l.hostPath, func(mode int) (*fd.FD, error) {\n+ newFile, readable, err := openAnyFile(l.hostPath, func(mode int) (*fd.FD, error) {\nreturn reopenProcFd(l.file, openFlags|mode)\n})\nif err != nil {\n@@ -536,6 +549,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\nhostPath: l.hostPath,\nfile: newFile,\nmode: invalidMode,\n+ controlReadable: readable,\n}\nreturn []p9.QID{l.attachPoint.makeQID(stat)}, c, nil\n}\n@@ -543,7 +557,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\nvar qids []p9.QID\nlast := l\nfor _, name := range names {\n- f, path, err := openAnyFileFromParent(last, name)\n+ f, path, readable, err := openAnyFileFromParent(last, name)\nif last != l {\nlast.Close()\n}\n@@ -555,7 +569,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\nf.Close()\nreturn nil, nil, extractErrno(err)\n}\n- c, err := newLocalFile(last.attachPoint, f, path, stat)\n+ c, err := newLocalFile(last.attachPoint, f, path, readable, stat)\nif err != nil {\nf.Close()\nreturn nil, nil, extractErrno(err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer_test.go",
"new_path": "runsc/fsgofer/fsgofer_test.go",
"diff": "@@ -26,6 +26,19 @@ import (\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/p9\"\n+ \"gvisor.dev/gvisor/pkg/test/testutil\"\n+)\n+\n+var allOpenFlags = []p9.OpenFlags{p9.ReadOnly, p9.WriteOnly, p9.ReadWrite}\n+\n+var (\n+ allTypes = []fileType{regular, directory, symlink}\n+\n+ // allConfs is set in init().\n+ allConfs []Config\n+\n+ rwConfs = []Config{{ROMount: false}}\n+ roConfs = []Config{{ROMount: true}}\n)\nfunc init() {\n@@ -39,6 +52,13 @@ func init() {\n}\n}\n+func configTestName(config *Config) string {\n+ if config.ROMount {\n+ return \"ROMount\"\n+ }\n+ return \"RWMount\"\n+}\n+\nfunc assertPanic(t *testing.T, f func()) {\ndefer func() {\nif r := recover(); r == nil {\n@@ -88,18 +108,6 @@ func testReadWrite(f p9.File, flags p9.OpenFlags, content []byte) error {\nreturn nil\n}\n-var allOpenFlags = []p9.OpenFlags{p9.ReadOnly, p9.WriteOnly, p9.ReadWrite}\n-\n-var (\n- allTypes = []fileType{regular, directory, symlink}\n-\n- // allConfs is set in init() above.\n- allConfs []Config\n-\n- rwConfs = []Config{{ROMount: false}}\n- roConfs = []Config{{ROMount: true}}\n-)\n-\ntype state struct {\nroot *localFile\nfile *localFile\n@@ -117,11 +125,9 @@ func runAll(t *testing.T, test func(*testing.T, state)) {\nfunc runCustom(t *testing.T, types []fileType, confs []Config, test func(*testing.T, state)) {\nfor _, c := range confs {\n- t.Logf(\"Config: %+v\", c)\n-\nfor _, ft := range types {\n- t.Logf(\"File type: %v\", ft)\n-\n+ name := fmt.Sprintf(\"%s/%v\", configTestName(&c), ft)\n+ t.Run(name, func(t *testing.T) {\npath, name, err := setup(ft)\nif err != nil {\nt.Fatalf(\"%v\", err)\n@@ -143,16 +149,22 @@ func runCustom(t *testing.T, types []fileType, confs []Config, test func(*testin\nt.Fatalf(\"root.Walk({%q}) failed, err: %v\", \"symlink\", err)\n}\n- st := state{root: root.(*localFile), file: file.(*localFile), conf: c, ft: ft}\n+ st := state{\n+ root: root.(*localFile),\n+ file: file.(*localFile),\n+ conf: c,\n+ ft: ft,\n+ }\ntest(t, st)\nfile.Close()\nroot.Close()\n+ })\n}\n}\n}\nfunc setup(ft fileType) (string, string, error) {\n- path, err := ioutil.TempDir(\"\", \"root-\")\n+ path, err := ioutil.TempDir(testutil.TmpDir(), \"root-\")\nif err != nil {\nreturn \"\", \"\", fmt.Errorf(\"ioutil.TempDir() failed, err: %v\", err)\n}\n@@ -308,6 +320,32 @@ func TestUnopened(t *testing.T) {\n})\n}\n+// TestOpenOPath is a regression test to ensure that a file that cannot be open\n+// for read is allowed to be open. This was happening because the control file\n+// was open with O_PATH, but Open() was not checking for it and allowing the\n+// control file to be reused.\n+func TestOpenOPath(t *testing.T) {\n+ runCustom(t, []fileType{regular}, rwConfs, func(t *testing.T, s state) {\n+ // Fist remove all permissions on the file.\n+ if err := s.file.SetAttr(p9.SetAttrMask{Permissions: true}, p9.SetAttr{Permissions: p9.FileMode(0)}); err != nil {\n+ t.Fatalf(\"SetAttr(): %v\", err)\n+ }\n+ // Then walk to the file again to open a new control file.\n+ filename := filepath.Base(s.file.hostPath)\n+ _, newFile, err := s.root.Walk([]string{filename})\n+ if err != nil {\n+ t.Fatalf(\"root.Walk(%q): %v\", filename, err)\n+ }\n+\n+ if newFile.(*localFile).controlReadable {\n+ t.Fatalf(\"control file didn't open with O_PATH: %+v\", newFile)\n+ }\n+ if _, _, _, err := newFile.Open(p9.ReadOnly); err != syscall.EACCES {\n+ t.Fatalf(\"Open() should have failed, got: %v, wanted: EACCES\", err)\n+ }\n+ })\n+}\n+\nfunc SetGetAttr(l *localFile, valid p9.SetAttrMask, attr p9.SetAttr) (p9.Attr, error) {\nif err := l.SetAttr(valid, attr); err != nil {\nreturn p9.Attr{}, err\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix fsgofer Open() when control file is using O_PATH
Open tries to reuse the control file to save syscalls and
file descriptors when opening a file. However, when the
control file was opened using O_PATH (e.g. no file permission
to open readonly), Open() would not check for it.
PiperOrigin-RevId: 322821729 |
259,962 | 23.07.2020 12:52:18 | 25,200 | 20b556e625354dd8330e30e4075ad06eedc6a2ce | Fix wildcard bind for raw socket.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -456,7 +456,7 @@ func (e *endpoint) Bind(addr tcpip.FullAddress) *tcpip.Error {\ndefer e.mu.Unlock()\n// If a local address was specified, verify that it's valid.\n- if e.stack.CheckLocalAddress(addr.NIC, e.NetProto, addr.Addr) == 0 {\n+ if len(addr.Addr) != 0 && e.stack.CheckLocalAddress(addr.NIC, e.NetProto, addr.Addr) == 0 {\nreturn tcpip.ErrBadLocalAddress\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket.cc",
"new_path": "test/syscalls/linux/raw_socket.cc",
"diff": "@@ -262,6 +262,27 @@ TEST_P(RawSocketTest, SendWithoutConnectFails) {\nSyscallFailsWithErrno(EDESTADDRREQ));\n}\n+// Wildcard Bind.\n+TEST_P(RawSocketTest, BindToWildcard) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+ struct sockaddr_storage addr;\n+ addr = {};\n+\n+ // We don't set ports because raw sockets don't have a notion of ports.\n+ if (Family() == AF_INET) {\n+ struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&addr);\n+ sin->sin_family = AF_INET;\n+ sin->sin_addr.s_addr = htonl(INADDR_ANY);\n+ } else {\n+ struct sockaddr_in6* sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr);\n+ sin6->sin6_family = AF_INET6;\n+ sin6->sin6_addr = in6addr_any;\n+ }\n+\n+ ASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),\n+ SyscallSucceeds());\n+}\n+\n// Bind to localhost.\nTEST_P(RawSocketTest, BindToLocalhost) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix wildcard bind for raw socket.
Fixes #3334
PiperOrigin-RevId: 322846384 |
260,022 | 09.06.2020 12:35:39 | 14,400 | 3e0e3b9b11fee58835a0a492d66e72b354459e27 | Added stub FUSE filesystem
Allow FUSE filesystems to be mounted using libfuse.
The appropriate flags and mount options are parsed and
understood by fusefs. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/BUILD",
"new_path": "pkg/sentry/fsimpl/fuse/BUILD",
"diff": "@@ -6,15 +6,20 @@ go_library(\nname = \"fuse\",\nsrcs = [\n\"dev.go\",\n+ \"fusefs.go\",\n],\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n\"//pkg/abi/linux\",\n\"//pkg/context\",\n+ \"//pkg/log\",\n\"//pkg/sentry/fsimpl/devtmpfs\",\n+ \"//pkg/sentry/fsimpl/kernfs\",\n\"//pkg/sentry/kernel\",\n+ \"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/vfs\",\n\"//pkg/syserror\",\n\"//pkg/usermem\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/dev.go",
"new_path": "pkg/sentry/fsimpl/fuse/dev.go",
"diff": "@@ -51,6 +51,9 @@ type DeviceFD struct {\nvfs.DentryMetadataFileDescriptionImpl\nvfs.NoLockFD\n+ // mounted specifies whether a FUSE filesystem was mounted using the DeviceFD.\n+ mounted bool\n+\n// TODO(gvisor.dev/issue/2987): Add all the data structures needed to enqueue\n// and deque requests, control synchronization and establish communication\n// between the FUSE kernel module and the /dev/fuse character device.\n@@ -61,26 +64,51 @@ func (fd *DeviceFD) Release() {}\n// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (fd *DeviceFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\n+ // Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n+ if !fd.mounted {\n+ return 0, syserror.EPERM\n+ }\n+\nreturn 0, syserror.ENOSYS\n}\n// Read implements vfs.FileDescriptionImpl.Read.\nfunc (fd *DeviceFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+ // Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n+ if !fd.mounted {\n+ return 0, syserror.EPERM\n+ }\n+\nreturn 0, syserror.ENOSYS\n}\n// PWrite implements vfs.FileDescriptionImpl.PWrite.\nfunc (fd *DeviceFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\n+ // Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n+ if !fd.mounted {\n+ return 0, syserror.EPERM\n+ }\n+\nreturn 0, syserror.ENOSYS\n}\n// Write implements vfs.FileDescriptionImpl.Write.\nfunc (fd *DeviceFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+ // Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n+ if !fd.mounted {\n+ return 0, syserror.EPERM\n+ }\n+\nreturn 0, syserror.ENOSYS\n}\n// Seek implements vfs.FileDescriptionImpl.Seek.\nfunc (fd *DeviceFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\n+ // Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n+ if !fd.mounted {\n+ return 0, syserror.EPERM\n+ }\n+\nreturn 0, syserror.ENOSYS\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package fuse implements fusefs.\n+package fuse\n+\n+import (\n+ \"strconv\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n+)\n+\n+// Name is the default filesystem name.\n+const Name = \"fuse\"\n+\n+// FilesystemType implements vfs.FilesystemType.\n+type FilesystemType struct{}\n+\n+type filesystemOptions struct {\n+ // userID specifies the numeric uid of the mount owner.\n+ // This option should not be specified by the filesystem owner.\n+ // It is set by libfuse (or, if libfuse is not used, must be set\n+ // by the filesystem itself). For more information, see man page\n+ // for fuse(8)\n+ userID uint32\n+\n+ // groupID specifies the numeric gid of the mount owner.\n+ // This option should not be specified by the filesystem owner.\n+ // It is set by libfuse (or, if libfuse is not used, must be set\n+ // by the filesystem itself). For more information, see man page\n+ // for fuse(8)\n+ groupID uint32\n+\n+ // rootMode specifies the the file mode of the filesystem's root.\n+ rootMode linux.FileMode\n+}\n+\n+// filesystem implements vfs.FilesystemImpl.\n+type filesystem struct {\n+ kernfs.Filesystem\n+ devMinor uint32\n+\n+ // fuseFD is the FD returned when opening /dev/fuse. It is used for communication\n+ // between the FUSE server daemon and the sentry fusefs.\n+ fuseFD *DeviceFD\n+\n+ // opts is the options the fusefs is initialized with.\n+ opts filesystemOptions\n+}\n+\n+// Name implements vfs.FilesystemType.Name.\n+func (FilesystemType) Name() string {\n+ return Name\n+}\n+\n+// GetFilesystem implements vfs.FilesystemType.GetFilesystem.\n+func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {\n+ devMinor, err := vfsObj.GetAnonBlockDevMinor()\n+ if err != nil {\n+ return nil, nil, err\n+ }\n+\n+ var fsopts filesystemOptions\n+ mopts := vfs.GenericParseMountOptions(opts.Data)\n+ deviceDescriptorStr, ok := mopts[\"fd\"]\n+ if !ok {\n+ log.Warningf(\"%s.GetFilesystem: communication file descriptor N (obtained by opening /dev/fuse) must be specified as 'fd=N'\", fsType.Name())\n+ return nil, nil, syserror.EINVAL\n+ }\n+ delete(mopts, \"fd\")\n+\n+ deviceDescriptor, err := strconv.ParseInt(deviceDescriptorStr, 10 /* base */, 32 /* bitSize */)\n+ if err != nil {\n+ return nil, nil, err\n+ }\n+\n+ kernelTask := kernel.TaskFromContext(ctx)\n+ if kernelTask == nil {\n+ log.Warningf(\"%s.GetFilesystem: couldn't get kernel task from context\", fsType.Name())\n+ return nil, nil, syserror.EINVAL\n+ }\n+ fuseFd := kernelTask.GetFileVFS2(int32(deviceDescriptor))\n+\n+ // Parse and set all the other supported FUSE mount options.\n+ // TODO: Expand the supported mount options.\n+ if userIDStr, ok := mopts[\"user_id\"]; ok {\n+ delete(mopts, \"user_id\")\n+ userID, err := strconv.ParseUint(userIDStr, 10, 32)\n+ if err != nil {\n+ log.Warningf(\"%s.GetFilesystem: invalid user_id: user_id=%s\", fsType.Name(), userIDStr)\n+ return nil, nil, syserror.EINVAL\n+ }\n+ fsopts.userID = uint32(userID)\n+ }\n+\n+ if groupIDStr, ok := mopts[\"group_id\"]; ok {\n+ delete(mopts, \"group_id\")\n+ groupID, err := strconv.ParseUint(groupIDStr, 10, 32)\n+ if err != nil {\n+ log.Warningf(\"%s.GetFilesystem: invalid group_id: group_id=%s\", fsType.Name(), groupIDStr)\n+ return nil, nil, syserror.EINVAL\n+ }\n+ fsopts.groupID = uint32(groupID)\n+ }\n+\n+ rootMode := linux.FileMode(0777)\n+ modeStr, ok := mopts[\"rootmode\"]\n+ if ok {\n+ delete(mopts, \"rootmode\")\n+ mode, err := strconv.ParseUint(modeStr, 8, 32)\n+ if err != nil {\n+ log.Warningf(\"%s.GetFilesystem: invalid mode: %q\", fsType.Name(), modeStr)\n+ return nil, nil, syserror.EINVAL\n+ }\n+ rootMode = linux.FileMode(mode & 07777)\n+ }\n+ fsopts.rootMode = rootMode\n+\n+ // Check for unparsed options.\n+ if len(mopts) != 0 {\n+ log.Warningf(\"%s.GetFilesystem: unknown options: %v\", fsType.Name(), mopts)\n+ return nil, nil, syserror.EINVAL\n+ }\n+\n+ // Mark the device as ready so it can be used. /dev/fuse can only be used if the FD was used to\n+ // mount a FUSE filesystem.\n+ fuseFD := fuseFd.Impl().(*DeviceFD)\n+ fuseFD.mounted = true\n+\n+ fs := &filesystem{\n+ devMinor: devMinor,\n+ fuseFD: fuseFD,\n+ opts: fsopts,\n+ }\n+\n+ fs.VFSFilesystem().Init(vfsObj, &fsType, fs)\n+\n+ // TODO: dispatch a FUSE_INIT request to the FUSE daemon server before\n+ // returning. Mount will not block on this dispatched request.\n+\n+ // root is the fusefs root directory.\n+ defaultFusefsDirMode := linux.FileMode(0755)\n+ root := fs.newInode(creds, defaultFusefsDirMode)\n+\n+ return fs.VFSFilesystem(), root.VFSDentry(), nil\n+}\n+\n+// Release implements vfs.FilesystemImpl.Release.\n+func (fs *filesystem) Release() {\n+ fs.Filesystem.VFSFilesystem().VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)\n+ fs.Filesystem.Release()\n+}\n+\n+// Inode implements kernfs.Inode.\n+type Inode struct {\n+ kernfs.InodeAttrs\n+ kernfs.InodeNoDynamicLookup\n+ kernfs.InodeNotSymlink\n+ kernfs.InodeDirectoryNoNewChildren\n+ kernfs.OrderedChildren\n+\n+ locks vfs.FileLocks\n+\n+ dentry kernfs.Dentry\n+}\n+\n+func (fs *filesystem) newInode(creds *auth.Credentials, mode linux.FileMode) *kernfs.Dentry {\n+ i := &Inode{}\n+ i.InodeAttrs.Init(creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|0755)\n+ i.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})\n+ i.dentry.Init(i)\n+\n+ return &i.dentry\n+}\n+\n+// Open implements kernfs.Inode.Open.\n+func (i *Inode) Open(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ fd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), vfsd, &i.OrderedChildren, &i.locks, &opts)\n+ if err != nil {\n+ return nil, err\n+ }\n+ return fd.VFSFileDescription(), nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/mount.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/mount.go",
"diff": "@@ -77,8 +77,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// Silently allow MS_NOSUID, since we don't implement set-id bits\n// anyway.\n- const unsupportedFlags = linux.MS_NODEV |\n- linux.MS_NODIRATIME | linux.MS_STRICTATIME\n+ const unsupportedFlags = linux.MS_NODIRATIME | linux.MS_STRICTATIME\n// Linux just allows passing any flags to mount(2) - it won't fail when\n// unknown or unsupported flags are passed. Since we don't implement\n@@ -94,6 +93,12 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nif flags&linux.MS_NOEXEC == linux.MS_NOEXEC {\nopts.Flags.NoExec = true\n}\n+ if flags&linux.MS_NODEV == linux.MS_NODEV {\n+ opts.Flags.NoDev = true\n+ }\n+ if flags&linux.MS_NOSUID == linux.MS_NOSUID {\n+ opts.Flags.NoSUID = true\n+ }\nif flags&linux.MS_RDONLY == linux.MS_RDONLY {\nopts.ReadOnly = true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/options.go",
"new_path": "pkg/sentry/vfs/options.go",
"diff": "@@ -79,6 +79,17 @@ type MountFlags struct {\n// NoATime is equivalent to MS_NOATIME and indicates that the\n// filesystem should not update access time in-place.\nNoATime bool\n+\n+ // NoDev is equivalent to MS_NODEV and indicates that the\n+ // filesystem should not allow access to devices (special files).\n+ // TODO(gVisor.dev/issue/3186): respect this flag in non FUSE\n+ // filesystems.\n+ NoDev bool\n+\n+ // NoSUID is equivalent to MS_NOSUID and indicates that the\n+ // filesystem should not honor set-user-ID and set-group-ID bits or\n+ // file capabilities when executing programs.\n+ NoSUID bool\n}\n// MountOptions contains options to VirtualFilesystem.MountAt().\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -77,6 +77,10 @@ func registerFilesystems(k *kernel.Kernel) error {\nAllowUserMount: true,\nAllowUserList: true,\n})\n+ vfsObj.MustRegisterFilesystemType(fuse.Name, &fuse.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{\n+ AllowUserMount: true,\n+ AllowUserList: true,\n+ })\n// Setup files in devtmpfs.\nif err := memdev.Register(vfsObj); err != nil {\n@@ -119,6 +123,7 @@ func registerFilesystems(k *kernel.Kernel) error {\nreturn fmt.Errorf(\"creating fusedev devtmpfs files: %w\", err)\n}\n}\n+\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/dev.cc",
"new_path": "test/syscalls/linux/dev.cc",
"diff": "@@ -161,6 +161,18 @@ TEST(DevTest, OpenDevFuse) {\nASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_RDONLY));\n}\n+TEST(DevTest, ReadDevFuseWithoutMount) {\n+ // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n+ // device registration is complete.\n+ SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_RDONLY));\n+\n+ std::vector<char> buf(1);\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), sizeof(buf)), SyscallFailsWithErrno(EPERM));\n+}\n+\n} // namespace\n} // namespace testing\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mount.cc",
"new_path": "test/syscalls/linux/mount.cc",
"diff": "@@ -321,6 +321,34 @@ TEST(MountTest, RenameRemoveMountPoint) {\nASSERT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(EBUSY));\n}\n+TEST(MountTest, MountFuseFilesystemNoDevice) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+\n+ // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n+ // device registration is complete.\n+ SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());\n+\n+ auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", dir.path().c_str(), \"fuse\", 0, \"\"),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST(MountTest, MountFuseFilesystem) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+\n+ // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n+ // device registration is complete.\n+ SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_WRONLY));\n+ std::string mopts = \"fd=\" + std::to_string(fd.get());\n+\n+ auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto const mount =\n+ ASSERT_NO_ERRNO_AND_VALUE(Mount(\"\", dir.path(), \"fuse\", 0, mopts, 0));\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Added stub FUSE filesystem
Allow FUSE filesystems to be mounted using libfuse.
The appropriate flags and mount options are parsed and
understood by fusefs. |
260,022 | 20.07.2020 16:24:27 | 14,400 | 2f78c487f17e12dfee08214311c500073cb03fde | Use mode supplied by the mount options | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/BUILD",
"new_path": "pkg/sentry/fsimpl/fuse/BUILD",
"diff": "@@ -20,6 +20,5 @@ go_library(\n\"//pkg/sentry/vfs\",\n\"//pkg/syserror\",\n\"//pkg/usermem\",\n- \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"new_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"diff": "-// Copyright 2019 The gVisor Authors.\n+// Copyright 2020 The gVisor Authors.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n@@ -130,7 +130,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nlog.Warningf(\"%s.GetFilesystem: invalid mode: %q\", fsType.Name(), modeStr)\nreturn nil, nil, syserror.EINVAL\n}\n- rootMode = linux.FileMode(mode & 07777)\n+ rootMode = linux.FileMode(mode)\n}\nfsopts.rootMode = rootMode\n@@ -157,8 +157,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\n// returning. Mount will not block on this dispatched request.\n// root is the fusefs root directory.\n- defaultFusefsDirMode := linux.FileMode(0755)\n- root := fs.newInode(creds, defaultFusefsDirMode)\n+ root := fs.newInode(creds, fsopts.rootMode)\nreturn fs.VFSFilesystem(), root.VFSDentry(), nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use mode supplied by the mount options |
259,891 | 23.07.2020 15:44:09 | 25,200 | dd530eeeff09128d4c2428e1d6f24205a29e661e | iptables: use keyed array literals | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -58,8 +58,7 @@ const reaperDelay = 5 * time.Second\nfunc DefaultTables() *IPTables {\nreturn &IPTables{\ntables: [numTables]Table{\n- // NAT table.\n- Table{\n+ natID: Table{\nRules: []Rule{\nRule{Target: AcceptTarget{}},\nRule{Target: AcceptTarget{}},\n@@ -68,22 +67,21 @@ func DefaultTables() *IPTables {\nRule{Target: ErrorTarget{}},\n},\nBuiltinChains: [NumHooks]int{\n- 0, // Prerouting.\n- 1, // Input.\n- HookUnset, // Forward.\n- 2, // Output.\n- 3, // Postrouting.\n+ Prerouting: 0,\n+ Input: 1,\n+ Forward: HookUnset,\n+ Output: 2,\n+ Postrouting: 3,\n},\nUnderflows: [NumHooks]int{\n- 0, // Prerouting.\n- 1, // Input.\n- HookUnset, // Forward.\n- 2, // Output.\n- 3, // Postrouting.\n+ Prerouting: 0,\n+ Input: 1,\n+ Forward: HookUnset,\n+ Output: 2,\n+ Postrouting: 3,\n},\n},\n- // Mangle table.\n- Table{\n+ mangleID: Table{\nRules: []Rule{\nRule{Target: AcceptTarget{}},\nRule{Target: AcceptTarget{}},\n@@ -94,15 +92,14 @@ func DefaultTables() *IPTables {\nOutput: 1,\n},\nUnderflows: [NumHooks]int{\n- 0, // Prerouting.\n- HookUnset, // Input.\n- HookUnset, // Forward.\n- 1, // Output.\n- HookUnset, // Postrouting.\n+ Prerouting: 0,\n+ Input: HookUnset,\n+ Forward: HookUnset,\n+ Output: 1,\n+ Postrouting: HookUnset,\n},\n},\n- // Filter table.\n- Table{\n+ filterID: Table{\nRules: []Rule{\nRule{Target: AcceptTarget{}},\nRule{Target: AcceptTarget{}},\n@@ -110,27 +107,25 @@ func DefaultTables() *IPTables {\nRule{Target: ErrorTarget{}},\n},\nBuiltinChains: [NumHooks]int{\n- HookUnset, // Prerouting.\n- Input: 0, // Input.\n- Forward: 1, // Forward.\n- Output: 2, // Output.\n- HookUnset, // Postrouting.\n+ Prerouting: HookUnset,\n+ Input: 0,\n+ Forward: 1,\n+ Output: 2,\n+ Postrouting: HookUnset,\n},\nUnderflows: [NumHooks]int{\n- HookUnset, // Prerouting.\n- 0, // Input.\n- 1, // Forward.\n- 2, // Output.\n- HookUnset, // Postrouting.\n+ Prerouting: HookUnset,\n+ Input: 0,\n+ Forward: 1,\n+ Output: 2,\n+ Postrouting: HookUnset,\n},\n},\n},\npriorities: [NumHooks][]tableID{\n- []tableID{mangleID, natID}, // Prerouting.\n- []tableID{natID, filterID}, // Input.\n- []tableID{}, // Forward.\n- []tableID{mangleID, natID, filterID}, // Output.\n- []tableID{}, // Postrouting.\n+ Prerouting: []tableID{mangleID, natID},\n+ Input: []tableID{natID, filterID},\n+ Output: []tableID{mangleID, natID, filterID},\n},\nconnections: ConnTrack{\nseed: generateRandUint32(),\n@@ -145,18 +140,12 @@ func EmptyFilterTable() Table {\nreturn Table{\nRules: []Rule{},\nBuiltinChains: [NumHooks]int{\n- HookUnset,\n- 0,\n- 0,\n- 0,\n- HookUnset,\n+ Prerouting: HookUnset,\n+ Postrouting: HookUnset,\n},\nUnderflows: [NumHooks]int{\n- HookUnset,\n- 0,\n- 0,\n- 0,\n- HookUnset,\n+ Prerouting: HookUnset,\n+ Postrouting: HookUnset,\n},\n}\n}\n@@ -167,18 +156,10 @@ func EmptyNATTable() Table {\nreturn Table{\nRules: []Rule{},\nBuiltinChains: [NumHooks]int{\n- 0,\n- 0,\n- HookUnset,\n- 0,\n- 0,\n+ Forward: HookUnset,\n},\nUnderflows: [NumHooks]int{\n- 0,\n- 0,\n- HookUnset,\n- 0,\n- 0,\n+ Forward: HookUnset,\n},\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | iptables: use keyed array literals
PiperOrigin-RevId: 322882426 |
259,914 | 23.07.2020 16:06:46 | 25,200 | 4eb3c8c7e59b1f3dd0c0bfc253cd5c8c6067d05d | kvm-tls-2:add the preservation of user-TLS in the Arm64 kvm platform
This patch load/save TLS for the container application.
Related issue: full context-switch supporting for Arm64
COPYBARA_INTEGRATE_REVIEW=https://github.com/google/gvisor/pull/2761 from lubinszARM:pr_tls_2 | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch_aarch64.go",
"new_path": "pkg/sentry/arch/arch_aarch64.go",
"diff": "@@ -28,7 +28,14 @@ import (\n)\n// Registers represents the CPU registers for this architecture.\n-type Registers = linux.PtraceRegs\n+//\n+// +stateify savable\n+type Registers struct {\n+ linux.PtraceRegs\n+\n+ // TPIDR_EL0 is the EL0 Read/Write Software Thread ID Register.\n+ TPIDR_EL0 uint64\n+}\nconst (\n// SyscallWidth is the width of insturctions.\n@@ -101,9 +108,6 @@ type State struct {\n// Our floating point state.\naarch64FPState `state:\"wait\"`\n- // TLS pointer\n- TPValue uint64\n-\n// FeatureSet is a pointer to the currently active feature set.\nFeatureSet *cpuid.FeatureSet\n@@ -157,7 +161,6 @@ func (s *State) Fork() State {\nreturn State{\nRegs: s.Regs,\naarch64FPState: s.aarch64FPState.fork(),\n- TPValue: s.TPValue,\nFeatureSet: s.FeatureSet,\nOrigR0: s.OrigR0,\n}\n@@ -241,18 +244,18 @@ func (s *State) ptraceGetRegs() Registers {\nreturn s.Regs\n}\n-var registersSize = (*Registers)(nil).SizeBytes()\n+var ptraceRegistersSize = (*linux.PtraceRegs)(nil).SizeBytes()\n// PtraceSetRegs implements Context.PtraceSetRegs.\nfunc (s *State) PtraceSetRegs(src io.Reader) (int, error) {\nvar regs Registers\n- buf := make([]byte, registersSize)\n+ buf := make([]byte, ptraceRegistersSize)\nif _, err := io.ReadFull(src, buf); err != nil {\nreturn 0, err\n}\nregs.UnmarshalUnsafe(buf)\ns.Regs = regs\n- return registersSize, nil\n+ return ptraceRegistersSize, nil\n}\n// PtraceGetFPRegs implements Context.PtraceGetFPRegs.\n@@ -278,7 +281,7 @@ const (\nfunc (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int, error) {\nswitch regset {\ncase _NT_PRSTATUS:\n- if maxlen < registersSize {\n+ if maxlen < ptraceRegistersSize {\nreturn 0, syserror.EFAULT\n}\nreturn s.PtraceGetRegs(dst)\n@@ -291,7 +294,7 @@ func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int,\nfunc (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int, error) {\nswitch regset {\ncase _NT_PRSTATUS:\n- if maxlen < registersSize {\n+ if maxlen < ptraceRegistersSize {\nreturn 0, syserror.EFAULT\n}\nreturn s.PtraceSetRegs(src)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch_amd64.go",
"new_path": "pkg/sentry/arch/arch_amd64.go",
"diff": "@@ -300,7 +300,7 @@ func (c *context64) PtracePeekUser(addr uintptr) (interface{}, error) {\n// PTRACE_PEEKUSER and PTRACE_POKEUSER are only effective on regs and\n// u_debugreg, returning 0 or silently no-oping for other fields\n// respectively.\n- if addr < uintptr(registersSize) {\n+ if addr < uintptr(ptraceRegistersSize) {\nregs := c.ptraceGetRegs()\nbuf := make([]byte, regs.SizeBytes())\nregs.MarshalUnsafe(buf)\n@@ -315,7 +315,7 @@ func (c *context64) PtracePokeUser(addr, data uintptr) error {\nif addr&7 != 0 || addr >= userStructSize {\nreturn syscall.EIO\n}\n- if addr < uintptr(registersSize) {\n+ if addr < uintptr(ptraceRegistersSize) {\nregs := c.ptraceGetRegs()\nbuf := make([]byte, regs.SizeBytes())\nregs.MarshalUnsafe(buf)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch_arm64.go",
"new_path": "pkg/sentry/arch/arch_arm64.go",
"diff": "@@ -142,7 +142,7 @@ func (c *context64) SetStack(value uintptr) {\n// TLS returns the current TLS pointer.\nfunc (c *context64) TLS() uintptr {\n- return uintptr(c.TPValue)\n+ return uintptr(c.Regs.TPIDR_EL0)\n}\n// SetTLS sets the current TLS pointer. Returns false if value is invalid.\n@@ -151,7 +151,7 @@ func (c *context64) SetTLS(value uintptr) bool {\nreturn false\n}\n- c.TPValue = uint64(value)\n+ c.Regs.TPIDR_EL0 = uint64(value)\nreturn true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch_x86.go",
"new_path": "pkg/sentry/arch/arch_x86.go",
"diff": "@@ -31,7 +31,11 @@ import (\n)\n// Registers represents the CPU registers for this architecture.\n-type Registers = linux.PtraceRegs\n+//\n+// +stateify savable\n+type Registers struct {\n+ linux.PtraceRegs\n+}\n// System-related constants for x86.\nconst (\n@@ -311,12 +315,12 @@ func (s *State) ptraceGetRegs() Registers {\nreturn regs\n}\n-var registersSize = (*Registers)(nil).SizeBytes()\n+var ptraceRegistersSize = (*linux.PtraceRegs)(nil).SizeBytes()\n// PtraceSetRegs implements Context.PtraceSetRegs.\nfunc (s *State) PtraceSetRegs(src io.Reader) (int, error) {\nvar regs Registers\n- buf := make([]byte, registersSize)\n+ buf := make([]byte, ptraceRegistersSize)\nif _, err := io.ReadFull(src, buf); err != nil {\nreturn 0, err\n}\n@@ -374,7 +378,7 @@ func (s *State) PtraceSetRegs(src io.Reader) (int, error) {\n}\nregs.Eflags = (s.Regs.Eflags &^ eflagsPtraceMutable) | (regs.Eflags & eflagsPtraceMutable)\ns.Regs = regs\n- return registersSize, nil\n+ return ptraceRegistersSize, nil\n}\n// isUserSegmentSelector returns true if the given segment selector specifies a\n@@ -543,7 +547,7 @@ const (\nfunc (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int, error) {\nswitch regset {\ncase _NT_PRSTATUS:\n- if maxlen < registersSize {\n+ if maxlen < ptraceRegistersSize {\nreturn 0, syserror.EFAULT\n}\nreturn s.PtraceGetRegs(dst)\n@@ -563,7 +567,7 @@ func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int,\nfunc (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int, error) {\nswitch regset {\ncase _NT_PRSTATUS:\n- if maxlen < registersSize {\n+ if maxlen < ptraceRegistersSize {\nreturn 0, syserror.EFAULT\n}\nreturn s.PtraceSetRegs(src)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/kernel_arm64.go",
"new_path": "pkg/sentry/platform/ring0/kernel_arm64.go",
"diff": "@@ -58,7 +58,13 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\nregs.Pstate &= ^uint64(UserFlagsClear)\nregs.Pstate |= UserFlagsSet\n+\n+ SetTLS(regs.TPIDR_EL0)\n+\nkernelExitToEl0()\n+\n+ regs.TPIDR_EL0 = GetTLS()\n+\nvector = c.vecCode\n// Perform the switch.\n"
}
] | Go | Apache License 2.0 | google/gvisor | kvm-tls-2:add the preservation of user-TLS in the Arm64 kvm platform
This patch load/save TLS for the container application.
Related issue: full context-switch supporting for Arm64 #1238
COPYBARA_INTEGRATE_REVIEW=https://github.com/google/gvisor/pull/2761 from lubinszARM:pr_tls_2 cb5dbca1c9c3f378002406da7a58887f9b5032b3
PiperOrigin-RevId: 322887044 |
259,860 | 23.07.2020 18:43:20 | 25,200 | bac4ebaabfac95f7b467b9c777a890fcf31a42ae | FileDescription is hard to spell.
Fix typos. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/devpts/slave.go",
"new_path": "pkg/sentry/fsimpl/devpts/slave.go",
"diff": "@@ -132,7 +132,7 @@ func (sfd *slaveFileDescription) Write(ctx context.Context, src usermem.IOSequen\nreturn sfd.inode.t.ld.outputQueueWrite(ctx, src)\n}\n-// Ioctl implements vfs.FileDescripionImpl.Ioctl.\n+// Ioctl implements vfs.FileDescriptionImpl.Ioctl.\nfunc (sfd *slaveFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nswitch cmd := args[1].Uint(); cmd {\ncase linux.FIONREAD: // linux.FIONREAD == linux.TIOCINQ\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/fd_impl_util.go",
"new_path": "pkg/sentry/fsimpl/kernfs/fd_impl_util.go",
"diff": "@@ -112,7 +112,7 @@ func (fd *GenericDirectoryFD) PWrite(ctx context.Context, src usermem.IOSequence\nreturn fd.DirectoryFileDescriptionDefaultImpl.PWrite(ctx, src, offset, opts)\n}\n-// Release implements vfs.FileDecriptionImpl.Release.\n+// Release implements vfs.FileDescriptionImpl.Release.\nfunc (fd *GenericDirectoryFD) Release() {}\nfunc (fd *GenericDirectoryFD) filesystem() *vfs.Filesystem {\n@@ -123,7 +123,7 @@ func (fd *GenericDirectoryFD) inode() Inode {\nreturn fd.vfsfd.VirtualDentry().Dentry().Impl().(*Dentry).inode\n}\n-// IterDirents implements vfs.FileDecriptionImpl.IterDirents. IterDirents holds\n+// IterDirents implements vfs.FileDescriptionImpl.IterDirents. IterDirents holds\n// o.mu when calling cb.\nfunc (fd *GenericDirectoryFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {\nfd.mu.Lock()\n@@ -198,7 +198,7 @@ func (fd *GenericDirectoryFD) IterDirents(ctx context.Context, cb vfs.IterDirent\nreturn err\n}\n-// Seek implements vfs.FileDecriptionImpl.Seek.\n+// Seek implements vfs.FileDescriptionImpl.Seek.\nfunc (fd *GenericDirectoryFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\nfd.mu.Lock()\ndefer fd.mu.Unlock()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/non_directory.go",
"new_path": "pkg/sentry/fsimpl/overlay/non_directory.go",
"diff": "@@ -176,7 +176,7 @@ func (fd *nonDirectoryFD) SetStat(ctx context.Context, opts vfs.SetStatOptions)\nreturn nil\n}\n-// StatFS implements vfs.FileDesciptionImpl.StatFS.\n+// StatFS implements vfs.FileDescriptionImpl.StatFS.\nfunc (fd *nonDirectoryFD) StatFS(ctx context.Context) (linux.Statfs, error) {\nreturn fd.filesystem().statFS(ctx)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/subtasks.go",
"new_path": "pkg/sentry/fsimpl/proc/subtasks.go",
"diff": "@@ -128,7 +128,7 @@ func (fd *subtasksFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallbac\nreturn fd.GenericDirectoryFD.IterDirents(ctx, cb)\n}\n-// Seek implements vfs.FileDecriptionImpl.Seek.\n+// Seek implements vfs.FileDescriptionImpl.Seek.\nfunc (fd *subtasksFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\nif fd.task.ExitState() >= kernel.TaskExitZombie {\nreturn 0, syserror.ENOENT\n"
}
] | Go | Apache License 2.0 | google/gvisor | FileDescription is hard to spell.
Fix typos.
PiperOrigin-RevId: 322913282 |
259,907 | 24.07.2020 01:14:32 | 25,200 | c59b792f53f3aa4c24d1ca9442ffc44f6d4932df | [go-marshal] Update API
All Marshal* and Unmarshal* methods now require buffers to be correctly sized
Only the Copy{In/Out} variants can handle smaller buffers (or address spaces) | [
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator.go",
"new_path": "tools/go_marshal/gomarshal/generator.go",
"diff": "@@ -413,13 +413,13 @@ func (g *Generator) Run() error {\nfor _, t := range g.collectMarshallableTypes(a, fsets[i]) {\nimpl := g.generateOne(t, fsets[i])\n// Collect Marshallable types referenced by the generated code.\n- for ref, _ := range impl.ms {\n+ for ref := range impl.ms {\nms[ref] = struct{}{}\n}\nimpls = append(impls, impl)\n// Collect imports referenced by the generated code and add them to\n// the list of imports we need to copy to the generated code.\n- for name, _ := range impl.is {\n+ for name := range impl.is {\nif !g.imports.markUsed(name) {\npanic(fmt.Sprintf(\"Generated code for '%s' referenced a non-existent import with local name '%s'. Either go-marshal needs to add an import to the generated file, or a package in an input source file has a package name differ from the final component of its path, which go-marshal doesn't know how to detect; use an import alias to work around this limitation.\", impl.typeName(), name))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go",
"new_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go",
"diff": "@@ -268,6 +268,10 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\\n\")\ng.emit(\"func (%s *%s) MarshalUnsafe(dst []byte) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\n+ fallback := func() {\n+ g.emit(\"// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\\n\", g.typeName())\n+ g.emit(\"%s.MarshalBytes(dst)\\n\", g.r)\n+ }\nif thisPacked {\ng.recordUsedImport(\"safecopy\")\ng.recordUsedImport(\"unsafe\")\n@@ -277,16 +281,13 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"safecopy.CopyIn(dst, unsafe.Pointer(%s))\\n\", g.r)\n})\ng.emit(\"} else {\\n\")\n- g.inIndent(func() {\n- g.emit(\"%s.MarshalBytes(dst)\\n\", g.r)\n- })\n+ g.inIndent(fallback)\ng.emit(\"}\\n\")\n} else {\ng.emit(\"safecopy.CopyIn(dst, unsafe.Pointer(%s))\\n\", g.r)\n}\n} else {\n- g.emit(\"// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\\n\", g.typeName())\n- g.emit(\"%s.MarshalBytes(dst)\\n\", g.r)\n+ fallback()\n}\n})\ng.emit(\"}\\n\\n\")\n@@ -294,6 +295,10 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\\n\")\ng.emit(\"func (%s *%s) UnmarshalUnsafe(src []byte) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\n+ fallback := func() {\n+ g.emit(\"// Type %s doesn't have a packed layout in memory, fallback to UnmarshalBytes.\\n\", g.typeName())\n+ g.emit(\"%s.UnmarshalBytes(src)\\n\", g.r)\n+ }\nif thisPacked {\ng.recordUsedImport(\"safecopy\")\ng.recordUsedImport(\"unsafe\")\n@@ -303,16 +308,13 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"safecopy.CopyOut(unsafe.Pointer(%s), src)\\n\", g.r)\n})\ng.emit(\"} else {\\n\")\n- g.inIndent(func() {\n- g.emit(\"%s.UnmarshalBytes(src)\\n\", g.r)\n- })\n+ g.inIndent(fallback)\ng.emit(\"}\\n\")\n} else {\ng.emit(\"safecopy.CopyOut(unsafe.Pointer(%s), src)\\n\", g.r)\n}\n} else {\n- g.emit(\"// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\\n\", g.typeName())\n- g.emit(\"%s.UnmarshalBytes(src)\\n\", g.r)\n+ fallback()\n}\n})\ng.emit(\"}\\n\\n\")\n@@ -463,8 +465,10 @@ func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType,\n})\ng.emit(\"}\\n\\n\")\n- g.emit(\"// Handle any final partial object.\\n\")\n- g.emit(\"if length < size*count && length%size != 0 {\\n\")\n+ g.emit(\"// Handle any final partial object. buf is guaranteed to be long enough for the\\n\")\n+ g.emit(\"// final element, but may not contain valid data for the entire range. This may\\n\")\n+ g.emit(\"// result in unmarshalling zero values for some parts of the object.\\n\")\n+ g.emit(\"if length%size != 0 {\\n\")\ng.inIndent(func() {\ng.emit(\"idx := limit\\n\")\ng.emit(\"dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])\\n\")\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/marshal/marshal.go",
"new_path": "tools/go_marshal/marshal/marshal.go",
"diff": "@@ -58,18 +58,12 @@ type Marshallable interface {\n// likely make use of the type of these fields).\nSizeBytes() int\n- // MarshalBytes serializes a copy of a type to dst. dst may be smaller than\n- // SizeBytes(), which results in a part of the struct being marshalled. Note\n- // that this may have unexpected results for non-packed types, as implicit\n- // padding needs to be taken into account when reasoning about how much of\n- // the type is serialized.\n+ // MarshalBytes serializes a copy of a type to dst.\n+ // Precondition: dst must be at least SizeBytes() in length.\nMarshalBytes(dst []byte)\n- // UnmarshalBytes deserializes a type from src. src may be smaller than\n- // SizeBytes(), which results in a partially deserialized struct. Note that\n- // this may have unexpected results for non-packed types, as implicit\n- // padding needs to be taken into account when reasoning about how much of\n- // the type is deserialized.\n+ // UnmarshalBytes deserializes a type from src.\n+ // Precondition: src must be at least SizeBytes() in length.\nUnmarshalBytes(src []byte)\n// Packed returns true if the marshalled size of the type is the same as the\n@@ -89,8 +83,8 @@ type Marshallable interface {\n// representation to the dst buffer. This is only safe to do when the type\n// has no implicit padding, see Marshallable.Packed. When Packed would\n// return false, MarshalUnsafe should fall back to the safer but slower\n- // MarshalBytes. dst may be smaller than SizeBytes(), see comment for\n- // MarshalBytes for implications.\n+ // MarshalBytes.\n+ // Precondition: dst must be at least SizeBytes() in length.\nMarshalUnsafe(dst []byte)\n// UnmarshalUnsafe deserializes a type by directly copying to the underlying\n@@ -99,8 +93,8 @@ type Marshallable interface {\n// This allows much faster unmarshalling of types which have no implicit\n// padding, see Marshallable.Packed. When Packed would return false,\n// UnmarshalUnsafe should fall back to the safer but slower unmarshal\n- // mechanism implemented in UnmarshalBytes. src may be smaller than\n- // SizeBytes(), see comment for UnmarshalBytes for implications.\n+ // mechanism implemented in UnmarshalBytes.\n+ // Precondition: src must be at least SizeBytes() in length.\nUnmarshalUnsafe(src []byte)\n// CopyIn deserializes a Marshallable type from a task's memory. This may\n@@ -149,14 +143,16 @@ type Marshallable interface {\n//\n// Generates four additional functions for marshalling slices of Foos like this:\n//\n-// // MarshalUnsafeFooSlice is like Foo.MarshalUnsafe, buf for a []Foo. It's\n-// // more efficient that repeatedly calling calling Foo.MarshalUnsafe over a\n-// // []Foo in a loop.\n+// // MarshalUnsafeFooSlice is like Foo.MarshalUnsafe, buf for a []Foo. It\n+// // might be more efficient that repeatedly calling Foo.MarshalUnsafe\n+// // over a []Foo in a loop if the type is Packed.\n+// // Preconditions: dst must be at least len(src)*Foo.SizeBytes() in length.\n// func MarshalUnsafeFooSlice(src []Foo, dst []byte) (int, error) { ... }\n//\n-// // UnmarshalUnsafeFooSlice is like Foo.UnmarshalUnsafe, buf for a []Foo. It's\n-// // more efficient that repeatedly calling calling Foo.UnmarshalUnsafe over a\n-// // []Foo in a loop.\n+// // UnmarshalUnsafeFooSlice is like Foo.UnmarshalUnsafe, buf for a []Foo. It\n+// // might be more efficient that repeatedly calling Foo.UnmarshalUnsafe\n+// // over a []Foo in a loop if the type is Packed.\n+// // Preconditions: src must be at least len(dst)*Foo.SizeBytes() in length.\n// func UnmarshalUnsafeFooSlice(dst []Foo, src []byte) (int, error) { ... }\n//\n// // CopyFooSliceIn copies in a slice of Foo objects from the task's memory.\n"
}
] | Go | Apache License 2.0 | google/gvisor | [go-marshal] Update API
- All Marshal* and Unmarshal* methods now require buffers to be correctly sized
- Only the Copy{In/Out} variants can handle smaller buffers (or address spaces)
PiperOrigin-RevId: 322953881 |
259,907 | 24.07.2020 01:24:16 | 25,200 | e2c70ee9814f0f76ab5c30478748e4c697e91f33 | Enable automated marshalling for netstack. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/netdevice.go",
"new_path": "pkg/abi/linux/netdevice.go",
"diff": "@@ -22,6 +22,8 @@ const (\n)\n// IFReq is an interface request.\n+//\n+// +marshal\ntype IFReq struct {\n// IFName is an encoded name, normally null-terminated. This should be\n// accessed via the Name and SetName functions.\n@@ -79,6 +81,8 @@ type IFMap struct {\n// IFConf is used to return a list of interfaces and their addresses. See\n// netdevice(7) and struct ifconf for more detail on its use.\n+//\n+// +marshal\ntype IFConf struct {\nLen int32\n_ [4]byte // Pad to sizeof(struct ifconf).\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/file_operations.go",
"new_path": "pkg/sentry/fs/file_operations.go",
"diff": "@@ -160,6 +160,7 @@ type FileOperations interface {\n// refer.\n//\n// Preconditions: The AddressSpace (if any) that io refers to is activated.\n+ // Must only be called from a task goroutine.\nIoctl(ctx context.Context, file *File, io usermem.IO, args arch.SyscallArguments) (uintptr, error)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/netfilter.go",
"new_path": "pkg/sentry/socket/netfilter/netfilter.go",
"diff": "@@ -66,7 +66,7 @@ func nflog(format string, args ...interface{}) {\nfunc GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr) (linux.IPTGetinfo, *syserr.Error) {\n// Read in the struct and table name.\nvar info linux.IPTGetinfo\n- if _, err := t.CopyIn(outPtr, &info); err != nil {\n+ if _, err := info.CopyIn(t, outPtr); err != nil {\nreturn linux.IPTGetinfo{}, syserr.FromError(err)\n}\n@@ -84,7 +84,7 @@ func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr) (linux.IPT\nfunc GetEntries(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen int) (linux.KernelIPTGetEntries, *syserr.Error) {\n// Read in the struct and table name.\nvar userEntries linux.IPTGetEntries\n- if _, err := t.CopyIn(outPtr, &userEntries); err != nil {\n+ if _, err := userEntries.CopyIn(t, outPtr); err != nil {\nnflog(\"couldn't copy in entries %q\", userEntries.Name)\nreturn linux.KernelIPTGetEntries{}, syserr.FromError(err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -2835,6 +2835,11 @@ func (s *SocketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO,\n}\nfunc (s *socketOpsCommon) ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ t := kernel.TaskFromContext(ctx)\n+ if t == nil {\n+ panic(\"ioctl(2) may only be called from a task goroutine\")\n+ }\n+\n// SIOCGSTAMP is implemented by netstack rather than all commonEndpoint\n// sockets.\n// TODO(b/78348848): Add a commonEndpoint method to support SIOCGSTAMP.\n@@ -2847,9 +2852,7 @@ func (s *socketOpsCommon) ioctl(ctx context.Context, io usermem.IO, args arch.Sy\n}\ntv := linux.NsecToTimeval(s.timestampNS)\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), &tv, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := tv.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCINQ:\n@@ -2868,9 +2871,8 @@ func (s *socketOpsCommon) ioctl(ctx context.Context, io usermem.IO, args arch.Sy\n}\n// Copy result to userspace.\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ vP := primitive.Int32(v)\n+ _, err := vP.CopyOut(t, args[2].Pointer())\nreturn 0, err\n}\n@@ -2879,6 +2881,11 @@ func (s *socketOpsCommon) ioctl(ctx context.Context, io usermem.IO, args arch.Sy\n// Ioctl performs a socket ioctl.\nfunc Ioctl(ctx context.Context, ep commonEndpoint, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ t := kernel.TaskFromContext(ctx)\n+ if t == nil {\n+ panic(\"ioctl(2) may only be called from a task goroutine\")\n+ }\n+\nswitch arg := int(args[1].Int()); arg {\ncase linux.SIOCGIFFLAGS,\nlinux.SIOCGIFADDR,\n@@ -2895,37 +2902,28 @@ func Ioctl(ctx context.Context, ep commonEndpoint, io usermem.IO, args arch.Sysc\nlinux.SIOCETHTOOL:\nvar ifr linux.IFReq\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &ifr, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := ifr.CopyIn(t, args[2].Pointer()); err != nil {\nreturn 0, err\n}\nif err := interfaceIoctl(ctx, io, arg, &ifr); err != nil {\nreturn 0, err.ToError()\n}\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), &ifr, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := ifr.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.SIOCGIFCONF:\n// Return a list of interface addresses or the buffer size\n// necessary to hold the list.\nvar ifc linux.IFConf\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &ifc, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := ifc.CopyIn(t, args[2].Pointer()); err != nil {\nreturn 0, err\n}\n- if err := ifconfIoctl(ctx, io, &ifc); err != nil {\n+ if err := ifconfIoctl(ctx, t, io, &ifc); err != nil {\nreturn 0, err\n}\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), ifc, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n-\n+ _, err := ifc.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCINQ:\n@@ -2938,9 +2936,8 @@ func Ioctl(ctx context.Context, ep commonEndpoint, io usermem.IO, args arch.Sysc\nv = math.MaxInt32\n}\n// Copy result to userspace.\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ vP := primitive.Int32(v)\n+ _, err := vP.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCOUTQ:\n@@ -2954,9 +2951,8 @@ func Ioctl(ctx context.Context, ep commonEndpoint, io usermem.IO, args arch.Sysc\n}\n// Copy result to userspace.\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ vP := primitive.Int32(v)\n+ _, err := vP.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.SIOCGIFMEM, linux.SIOCGIFPFLAGS, linux.SIOCGMIIPHY, linux.SIOCGMIIREG:\n@@ -3105,7 +3101,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe\n}\n// ifconfIoctl populates a struct ifconf for the SIOCGIFCONF ioctl.\n-func ifconfIoctl(ctx context.Context, io usermem.IO, ifc *linux.IFConf) error {\n+func ifconfIoctl(ctx context.Context, t *kernel.Task, io usermem.IO, ifc *linux.IFConf) error {\n// If Ptr is NULL, return the necessary buffer size via Len.\n// Otherwise, write up to Len bytes starting at Ptr containing ifreq\n// structs.\n@@ -3142,9 +3138,7 @@ func ifconfIoctl(ctx context.Context, io usermem.IO, ifc *linux.IFConf) error {\n// Copy the ifr to userspace.\ndst := uintptr(ifc.Ptr) + uintptr(ifc.Len)\nifc.Len += int32(linux.SizeOfIFReq)\n- if _, err := usermem.CopyObjectOut(ctx, io, usermem.Addr(dst), ifr, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := ifr.CopyOut(t, usermem.Addr(dst)); err != nil {\nreturn err\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable automated marshalling for netstack.
PiperOrigin-RevId: 322954792 |
259,891 | 24.07.2020 15:03:13 | 25,200 | da631a3ef21ccace88803a9d8dcf05e285167e3f | Speed up some iptables tests
Sending UDP packets in a loop can be done in a separate goroutine. We
can't do this in ContainerAction because the container will terminate
early.
Locally, scripts/iptables_tests.sh runs ~40 seconds faster. | [
{
"change_type": "MODIFY",
"old_path": "test/iptables/filter_input.go",
"new_path": "test/iptables/filter_input.go",
"diff": "@@ -81,7 +81,7 @@ func (FilterInputDropUDP) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputDropUDP) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, dropPort, sendloopDuration)\n+ return spawnUDPLoop(ip, dropPort, sendloopDuration)\n}\n// FilterInputDropOnlyUDP tests that \"-p udp -j DROP\" only affects UDP traffic.\n@@ -141,7 +141,7 @@ func (FilterInputDropUDPPort) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputDropUDPPort) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, dropPort, sendloopDuration)\n+ return spawnUDPLoop(ip, dropPort, sendloopDuration)\n}\n// FilterInputDropDifferentUDPPort tests that dropping traffic for a single UDP port\n@@ -169,7 +169,7 @@ func (FilterInputDropDifferentUDPPort) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputDropDifferentUDPPort) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputDropTCPDestPort tests that connections are not accepted on specified source ports.\n@@ -269,7 +269,7 @@ func (FilterInputDropAll) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputDropAll) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, dropPort, sendloopDuration)\n+ return spawnUDPLoop(ip, dropPort, sendloopDuration)\n}\n// FilterInputMultiUDPRules verifies that multiple UDP rules are applied\n@@ -365,7 +365,7 @@ func (FilterInputDefaultPolicyAccept) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputDefaultPolicyAccept) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputDefaultPolicyDrop tests the default DROP policy.\n@@ -396,7 +396,7 @@ func (FilterInputDefaultPolicyDrop) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputDefaultPolicyDrop) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputReturnUnderflow tests that -j RETURN in a built-in chain causes\n@@ -428,7 +428,7 @@ func (FilterInputReturnUnderflow) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputReturnUnderflow) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputSerializeJump verifies that we can serialize jumps.\n@@ -482,7 +482,7 @@ func (FilterInputJumpBasic) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputJumpBasic) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputJumpReturn jumps, returns, and executes a rule.\n@@ -512,7 +512,7 @@ func (FilterInputJumpReturn) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputJumpReturn) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputJumpReturnDrop jumps to a chain, returns, and DROPs packets.\n@@ -549,7 +549,7 @@ func (FilterInputJumpReturnDrop) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputJumpReturnDrop) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, dropPort, sendloopDuration)\n+ return spawnUDPLoop(ip, dropPort, sendloopDuration)\n}\n// FilterInputJumpBuiltin verifies that jumping to a top-levl chain is illegal.\n@@ -604,7 +604,7 @@ func (FilterInputJumpTwice) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputJumpTwice) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputDestination verifies that we can filter packets via `-d\n@@ -638,7 +638,7 @@ func (FilterInputDestination) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputDestination) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputInvertDestination verifies that we can filter packets via `! -d\n@@ -667,7 +667,7 @@ func (FilterInputInvertDestination) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputInvertDestination) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputSource verifies that we can filter packets via `-s\n@@ -696,7 +696,7 @@ func (FilterInputSource) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputSource) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// FilterInputInvertSource verifies that we can filter packets via `! -s\n@@ -725,5 +725,5 @@ func (FilterInputInvertSource) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (FilterInputInvertSource) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/iptables/iptables_util.go",
"new_path": "test/iptables/iptables_util.go",
"diff": "@@ -84,17 +84,42 @@ func listenUDP(port int, timeout time.Duration) error {\n// sendUDPLoop sends 1 byte UDP packets repeatedly to the IP and port specified\n// over a duration.\nfunc sendUDPLoop(ip net.IP, port int, duration time.Duration) error {\n- // Send packets for a few seconds.\n+ conn, err := connectUDP(ip, port)\n+ if err != nil {\n+ return err\n+ }\n+ defer conn.Close()\n+ loopUDP(conn, duration)\n+ return nil\n+}\n+\n+// spawnUDPLoop works like sendUDPLoop, but returns immediately and sends\n+// packets in another goroutine.\n+func spawnUDPLoop(ip net.IP, port int, duration time.Duration) error {\n+ conn, err := connectUDP(ip, port)\n+ if err != nil {\n+ return err\n+ }\n+ go func() {\n+ defer conn.Close()\n+ loopUDP(conn, duration)\n+ }()\n+ return nil\n+}\n+\n+func connectUDP(ip net.IP, port int) (net.Conn, error) {\nremote := net.UDPAddr{\nIP: ip,\nPort: port,\n}\nconn, err := net.DialUDP(network, nil, &remote)\nif err != nil {\n- return err\n+ return nil, err\n+ }\n+ return conn, nil\n}\n- defer conn.Close()\n+func loopUDP(conn net.Conn, duration time.Duration) {\nto := time.After(duration)\nfor timedOut := false; !timedOut; {\n// This may return an error (connection refused) if the remote\n@@ -109,8 +134,6 @@ func sendUDPLoop(ip net.IP, port int, duration time.Duration) error {\ntime.Sleep(200 * time.Millisecond)\n}\n}\n-\n- return nil\n}\n// listenTCP listens for connections on a TCP port.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/iptables/nat.go",
"new_path": "test/iptables/nat.go",
"diff": "@@ -67,7 +67,7 @@ func (NATPreRedirectUDPPort) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (NATPreRedirectUDPPort) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// NATPreRedirectTCPPort tests that connections are redirected on specified ports.\n@@ -187,7 +187,7 @@ func (NATDropUDP) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (NATDropUDP) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// NATAcceptAll tests that all UDP packets are accepted.\n@@ -213,7 +213,7 @@ func (NATAcceptAll) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (NATAcceptAll) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// NATOutRedirectIP uses iptables to select packets based on destination IP and\n@@ -310,7 +310,7 @@ func (NATPreRedirectIP) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (NATPreRedirectIP) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, dropPort, sendloopDuration)\n+ return spawnUDPLoop(ip, dropPort, sendloopDuration)\n}\n// NATPreDontRedirectIP tests that iptables matching with \"-d\" does not match\n@@ -332,7 +332,7 @@ func (NATPreDontRedirectIP) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (NATPreDontRedirectIP) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+ return spawnUDPLoop(ip, acceptPort, sendloopDuration)\n}\n// NATPreRedirectInvert tests that iptables can match with \"! -d\".\n@@ -353,7 +353,7 @@ func (NATPreRedirectInvert) ContainerAction(ip net.IP) error {\n// LocalAction implements TestCase.LocalAction.\nfunc (NATPreRedirectInvert) LocalAction(ip net.IP) error {\n- return sendUDPLoop(ip, dropPort, sendloopDuration)\n+ return spawnUDPLoop(ip, dropPort, sendloopDuration)\n}\n// NATRedirectRequiresProtocol tests that use of the --to-ports flag requires a\n"
}
] | Go | Apache License 2.0 | google/gvisor | Speed up some iptables tests
Sending UDP packets in a loop can be done in a separate goroutine. We
can't do this in ContainerAction because the container will terminate
early.
Locally, scripts/iptables_tests.sh runs ~40 seconds faster. |
259,891 | 24.07.2020 17:27:11 | 25,200 | 91a47a40a8490aaeceb5a8162eb47b758ca738a1 | Bugfix: non-native tests were tagged as native
Copy the list of tags when passing it to _syscall_test. | [
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -157,7 +157,7 @@ def syscall_test(\nplatform = \"native\",\nuse_tmpfs = False,\nadd_uds_tree = add_uds_tree,\n- tags = tags,\n+ tags = list(tags),\n)\nfor (platform, platform_tags) in platforms.items():\n"
}
] | Go | Apache License 2.0 | google/gvisor | Bugfix: non-native tests were tagged as native
Copy the list of tags when passing it to _syscall_test. |
259,853 | 24.07.2020 17:29:09 | 25,200 | ad97134ce2deaae7ffded0ce1b78920fe960465d | travis: run only arm64 builds
The travis capacity is limited, but we build and test amd64 on kokoro. | [
{
"change_type": "MODIFY",
"old_path": ".travis.yml",
"new_path": ".travis.yml",
"diff": "@@ -30,8 +30,10 @@ services:\n- docker\njobs:\ninclude:\n- - os: linux\n- arch: amd64\n+ # AMD64 builds are tested on kokoro, so don't run them in travis to save\n+ # capacity for arm64 builds.\n+ # - os: linux\n+ # arch: amd64\n- os: linux\narch: arm64\nscript:\n"
}
] | Go | Apache License 2.0 | google/gvisor | travis: run only arm64 builds
The travis capacity is limited, but we build and test amd64 on kokoro.
PiperOrigin-RevId: 323103934 |
259,907 | 24.07.2020 17:46:55 | 25,200 | 7dd8d87c9626af792efdb5bb9621c7814db887d7 | Fix make dev
The "dev" target had been failing. The failure was being caused due to variable
overload of "RUNTIME". | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -336,10 +336,10 @@ RUNTIME_LOGS := $(RUNTIME_LOG_DIR)/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%\ndev: ## Installs a set of local runtimes. Requires sudo.\n@$(call submake,refresh ARGS=\"--net-raw\")\n- @$(call submake,configure RUNTIME=\"$(RUNTIME)\" ARGS=\"--net-raw\")\n- @$(call submake,configure RUNTIME=\"$(RUNTIME)-d\" ARGS=\"--net-raw --debug --strace --log-packets\")\n- @$(call submake,configure RUNTIME=\"$(RUNTIME)-p\" ARGS=\"--net-raw --profile\")\n- @$(call submake,configure RUNTIME=\"$(RUNTIME)-vfs2-d\" ARGS=\"--net-raw --debug --strace --log-packets --vfs2\")\n+ @$(call submake,configure RUNTIME_NAME=\"$(RUNTIME)\" ARGS=\"--net-raw\")\n+ @$(call submake,configure RUNTIME_NAME=\"$(RUNTIME)-d\" ARGS=\"--net-raw --debug --strace --log-packets\")\n+ @$(call submake,configure RUNTIME_NAME=\"$(RUNTIME)-p\" ARGS=\"--net-raw --profile\")\n+ @$(call submake,configure RUNTIME_NAME=\"$(RUNTIME)-vfs2-d\" ARGS=\"--net-raw --debug --strace --log-packets --vfs2\")\n@sudo systemctl restart docker\n.PHONY: dev\n@@ -350,8 +350,8 @@ refresh: ## Refreshes the runtime binary (for development only). Must have calle\ninstall-test-runtime: ## Installs the runtime for testing. Requires sudo.\n@$(call submake,refresh ARGS=\"--net-raw --TESTONLY-test-name-env=RUNSC_TEST_NAME --debug --strace --log-packets $(ARGS)\")\n- @$(call submake,configure RUNTIME=runsc)\n- @$(call submake,configure)\n+ @$(call submake,configure RUNTIME_NAME=runsc)\n+ @$(call submake,configure RUNTIME_NAME=\"$(RUNTIME)\")\n@sudo systemctl restart docker\n@if [[ -f /etc/docker/daemon.json ]]; then \\\nsudo chmod 0755 /etc/docker && \\\n@@ -360,7 +360,7 @@ install-test-runtime: ## Installs the runtime for testing. Requires sudo.\n.PHONY: install-test-runtime\nconfigure: ## Configures a single runtime. Requires sudo. Typically called from dev or install-test-runtime.\n- @sudo sudo \"$(RUNTIME_BIN)\" install --experimental=true --runtime=\"$(RUNTIME)\" -- --debug-log \"$(RUNTIME_LOGS)\" $(ARGS)\n+ @sudo sudo \"$(RUNTIME_BIN)\" install --experimental=true --runtime=\"$(RUNTIME_NAME)\" -- --debug-log \"$(RUNTIME_LOGS)\" $(ARGS)\n@echo -e \"$(INFO) Installed runtime \\\"$(RUNTIME)\\\" @ $(RUNTIME_BIN)\"\n@echo -e \"$(INFO) Logs are in: $(RUNTIME_LOG_DIR)\"\n@sudo rm -rf \"$(RUNTIME_LOG_DIR)\" && mkdir -p \"$(RUNTIME_LOG_DIR)\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix make dev
The "dev" target had been failing. The failure was being caused due to variable
overload of "RUNTIME".
PiperOrigin-RevId: 323106040 |
259,975 | 26.07.2020 21:42:17 | 25,200 | b38bae00885ef1bc97ff2798917e286bc14ca2f0 | Mark Passing Integration Tests for VFS2.
Mark the currently passing integration/image tests for
VFS2. Bugs will be filed for remaining failing tests.
Updates | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -166,11 +166,14 @@ do-tests: runsc\nsimple-tests: unit-tests # Compatibility target.\n.PHONY: simple-tests\n+IMAGE_FILTER := HelloWorld\\|Httpd\\|Ruby\\|Stdio\n+INTEGRATION_FILTER := Life\\|Pause\\|Connect\\|JobControl\\|Overlay\\|Exec\\|DirCreation/root\n+\ndocker-tests: load-basic-images\n@$(call submake,install-test-runtime RUNTIME=\"vfs1\")\n@$(call submake,test-runtime RUNTIME=\"vfs1\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n@$(call submake,install-test-runtime RUNTIME=\"vfs2\" ARGS=\"--vfs2\")\n- @$(call submake,test-runtime RUNTIME=\"vfs2\" OPTIONS=\"--test_filter=.*TestHelloWorld\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n+ @$(call submake,test-runtime RUNTIME=\"vfs2\" OPTIONS=\"--test_filter=$(IMAGE_FILTER)\\|$(INTEGRATION_FILTER)\" TARGETS=\"$(INTEGRATION_TARGETS)\")\n.PHONY: docker-tests\noverlay-tests: load-basic-images\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/docker_tests.sh",
"new_path": "scripts/docker_tests.sh",
"diff": "@@ -22,4 +22,6 @@ install_runsc_for_test docker\ntest_runsc //test/image:image_test //test/e2e:integration_test\ninstall_runsc_for_test docker --vfs2\n-test_runsc //test/image:image_test --test_filter=.*TestHelloWorld\n+IMAGE_FILTER=\"Hello|Httpd|Ruby|Stdio\"\n+INTEGRATION_FILTER=\"LifeCycle|Pause|Connect|JobControl|Overlay|Exec|DirCreation/root\"\n+test_runsc //test/e2e:integration_test //test/image:image_test --test_filter=\"${IMAGE_FILTER}|${INTEGRATION_FILTER}\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Mark Passing Integration Tests for VFS2.
Mark the currently passing integration/image tests for
VFS2. Bugs will be filed for remaining failing tests.
Updates #1487
PiperOrigin-RevId: 323297260 |
259,975 | 27.07.2020 10:00:17 | 25,200 | 29e5609b228363bcc435a8828f9b6ee19018a525 | Port redis benchmark | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/benchmarks/redis/Dockerfile",
"diff": "+FROM redis:5.0.4\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/benchmarks/database/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"database\",\n+ testonly = 1,\n+ srcs = [\"database.go\"],\n+ deps = [\"//test/benchmarks/harness\"],\n+)\n+\n+go_test(\n+ name = \"database_test\",\n+ size = \"enormous\",\n+ srcs = [\n+ \"redis_test.go\",\n+ ],\n+ library = \":database\",\n+ tags = [\n+ # Requires docker and runsc to be configured before test runs.\n+ \"manual\",\n+ \"local\",\n+ ],\n+ deps = [\n+ \"//pkg/test/dockerutil\",\n+ \"//test/benchmarks/harness\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/benchmarks/database/database.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package database holds benchmarks around database applications.\n+package database\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/test/benchmarks/harness\"\n+)\n+\n+var h harness.Harness\n+\n+// TestMain is the main method for package database.\n+func TestMain(m *testing.M) {\n+ h.Init()\n+ os.Exit(m.Run())\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/benchmarks/database/redis_test.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package database\n+\n+import (\n+ \"context\"\n+ \"fmt\"\n+ \"regexp\"\n+ \"strconv\"\n+ \"strings\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/test/dockerutil\"\n+ \"gvisor.dev/gvisor/test/benchmarks/harness\"\n+)\n+\n+// All possible operations from redis. Note: \"ping\" will\n+// run both PING_INLINE and PING_BUILD.\n+var operations []string = []string{\n+ \"PING_INLINE\",\n+ \"PING_BULK\",\n+ \"SET\",\n+ \"GET\",\n+ \"INCR\",\n+ \"LPUSH\",\n+ \"RPUSH\",\n+ \"LPOP\",\n+ \"RPOP\",\n+ \"SADD\",\n+ \"HSET\",\n+ \"SPOP\",\n+ \"LRANGE_100\",\n+ \"LRANGE_300\",\n+ \"LRANGE_500\",\n+ \"LRANGE_600\",\n+ \"MSET\",\n+}\n+\n+// BenchmarkRedis runs redis-benchmark against a redis instance and reports\n+// data in queries per second. Each is reported by named operation (e.g. LPUSH).\n+func BenchmarkRedis(b *testing.B) {\n+ clientMachine, err := h.GetMachine()\n+ if err != nil {\n+ b.Fatalf(\"failed to get machine: %v\", err)\n+ }\n+ defer clientMachine.CleanUp()\n+\n+ serverMachine, err := h.GetMachine()\n+ if err != nil {\n+ b.Fatalf(\"failed to get machine: %v\", err)\n+ }\n+ defer serverMachine.CleanUp()\n+\n+ // Redis runs on port 6379 by default.\n+ port := 6379\n+ ctx := context.Background()\n+\n+ for _, operation := range operations {\n+ b.Run(operation, func(b *testing.B) {\n+ server := serverMachine.GetContainer(ctx, b)\n+ defer server.CleanUp(ctx)\n+\n+ // The redis docker container takes no arguments to run a redis server.\n+ if err := server.Spawn(ctx, dockerutil.RunOpts{\n+ Image: \"benchmarks/redis\",\n+ Ports: []int{port},\n+ }); err != nil {\n+ b.Fatalf(\"failed to start redis server with: %v\", err)\n+ }\n+\n+ if out, err := server.WaitForOutput(ctx, \"Ready to accept connections\", 3*time.Second); err != nil {\n+ b.Fatalf(\"failed to start redis server: %v %s\", err, out)\n+ }\n+\n+ ip, err := serverMachine.IPAddress()\n+ if err != nil {\n+ b.Fatal(\"failed to get IP from server: %v\", err)\n+ }\n+\n+ serverPort, err := server.FindPort(ctx, port)\n+ if err != nil {\n+ b.Fatal(\"failed to get IP from server: %v\", err)\n+ }\n+\n+ if err = harness.WaitUntilServing(ctx, clientMachine, ip, serverPort); err != nil {\n+ b.Fatalf(\"failed to start redis with: %v\", err)\n+ }\n+\n+ // runs redis benchmark -t operation for 100K requests against server.\n+ cmd := strings.Split(\n+ fmt.Sprintf(\"redis-benchmark --csv -t %s -h %s -p %d\", operation, ip, serverPort), \" \")\n+\n+ // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case.\n+ // Note that \"ping\" will run both PING_INLINE and PING_BULK.\n+ if operation == \"PING_BULK\" {\n+ cmd = strings.Split(\n+ fmt.Sprintf(\"redis-benchmark --csv -t ping -h %s -p %d\", ip, serverPort), \" \")\n+ }\n+ // Reset profiles and timer to begin the measurement.\n+ server.RestartProfiles()\n+ b.ResetTimer()\n+ for i := 0; i < b.N; i++ {\n+ client := clientMachine.GetNativeContainer(ctx, b)\n+ defer client.CleanUp(ctx)\n+ out, err := client.Run(ctx, dockerutil.RunOpts{\n+ Image: \"benchmarks/redis\",\n+ }, cmd...)\n+ if err != nil {\n+ b.Fatalf(\"redis-benchmark failed with: %v\", err)\n+ }\n+\n+ // Stop time while we parse results.\n+ b.StopTimer()\n+ result, err := parseOperation(operation, out)\n+ if err != nil {\n+ b.Fatalf(\"parsing result %s failed with err: %v\", out, err)\n+ }\n+ b.ReportMetric(result, operation) // operations per second\n+ b.StartTimer()\n+ }\n+ })\n+ }\n+}\n+\n+// parseOperation grabs the metric operations per second from redis-benchmark output.\n+func parseOperation(operation, data string) (float64, error) {\n+ re := regexp.MustCompile(fmt.Sprintf(`\"%s( .*)?\",\"(\\d*\\.\\d*)\"`, operation))\n+ match := re.FindStringSubmatch(data)\n+ // If no match, simply don't add it to the result map.\n+ if len(match) < 3 {\n+ return 0.0, fmt.Errorf(\"could not find %s in %s\", operation, data)\n+ }\n+ return strconv.ParseFloat(match[2], 64)\n+}\n+\n+// TestParser tests the parser on sample data.\n+func TestParser(t *testing.T) {\n+ sampleData := `\n+ \"PING_INLINE\",\"48661.80\"\n+ \"PING_BULK\",\"50301.81\"\n+ \"SET\",\"48923.68\"\n+ \"GET\",\"49382.71\"\n+ \"INCR\",\"49975.02\"\n+ \"LPUSH\",\"49875.31\"\n+ \"RPUSH\",\"50276.52\"\n+ \"LPOP\",\"50327.12\"\n+ \"RPOP\",\"50556.12\"\n+ \"SADD\",\"49504.95\"\n+ \"HSET\",\"49504.95\"\n+ \"SPOP\",\"50025.02\"\n+ \"LPUSH (needed to benchmark LRANGE)\",\"48875.86\"\n+ \"LRANGE_100 (first 100 elements)\",\"33955.86\"\n+ \"LRANGE_300 (first 300 elements)\",\"16550.81\"\n+ \"LRANGE_500 (first 450 elements)\",\"13653.74\"\n+ \"LRANGE_600 (first 600 elements)\",\"11219.57\"\n+ \"MSET (10 keys)\",\"44682.75\"\n+ `\n+ wants := map[string]float64{\n+ \"PING_INLINE\": 48661.80,\n+ \"PING_BULK\": 50301.81,\n+ \"SET\": 48923.68,\n+ \"GET\": 49382.71,\n+ \"INCR\": 49975.02,\n+ \"LPUSH\": 49875.31,\n+ \"RPUSH\": 50276.52,\n+ \"LPOP\": 50327.12,\n+ \"RPOP\": 50556.12,\n+ \"SADD\": 49504.95,\n+ \"HSET\": 49504.95,\n+ \"SPOP\": 50025.02,\n+ \"LRANGE_100\": 33955.86,\n+ \"LRANGE_300\": 16550.81,\n+ \"LRANGE_500\": 13653.74,\n+ \"LRANGE_600\": 11219.57,\n+ \"MSET\": 44682.75,\n+ }\n+ for op, want := range wants {\n+ if got, err := parseOperation(op, sampleData); err != nil {\n+ t.Fatalf(\"failed to parse %s: %v\", op, err)\n+ } else if want != got {\n+ t.Fatalf(\"wanted %f for op %s, got %f\", want, op, got)\n+ }\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/harness/util.go",
"new_path": "test/benchmarks/harness/util.go",
"diff": "@@ -30,7 +30,7 @@ func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port\nnetcat := machine.GetNativeContainer(ctx, logger)\ndefer netcat.CleanUp(ctx)\n- cmd := fmt.Sprintf(\"while ! nc -zv %s %d; do true; done\", server.String(), port)\n+ cmd := fmt.Sprintf(\"while ! nc -zv %s %d; do true; done\", server, port)\n_, err := netcat.Run(ctx, dockerutil.RunOpts{\nImage: \"packetdrill\",\n}, \"sh\", \"-c\", cmd)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Port redis benchmark
PiperOrigin-RevId: 323381964 |
259,975 | 27.07.2020 10:06:07 | 25,200 | 77552f1c770da7413c3c8212443328c9081901c0 | Port ffmpeg benchmark | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/benchmarks/ffmpeg/Dockerfile",
"diff": "+FROM ubuntu:18.04\n+\n+RUN set -x \\\n+ && apt-get update \\\n+ && apt-get install -y \\\n+ ffmpeg \\\n+ && rm -rf /var/lib/apt/lists/*\n+WORKDIR /media\n+ADD https://samples.ffmpeg.org/MPEG-4/video.mp4 video.mp4\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/harness/util.go",
"new_path": "test/benchmarks/harness/util.go",
"diff": "@@ -36,3 +36,11 @@ func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port\n}, \"sh\", \"-c\", cmd)\nreturn err\n}\n+\n+// DropCaches drops caches on the provided machine. Requires root.\n+func DropCaches(machine Machine) error {\n+ if out, err := machine.RunCommand(\"/bin/sh\", \"-c\", \"sync | sysctl vm.drop_caches=3\"); err != nil {\n+ return fmt.Errorf(\"failed to drop caches: %v logs: %s\", err, out)\n+ }\n+ return nil\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/benchmarks/media/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"media\",\n+ testonly = 1,\n+ srcs = [\"media.go\"],\n+ deps = [\"//test/benchmarks/harness\"],\n+)\n+\n+go_test(\n+ name = \"media_test\",\n+ size = \"large\",\n+ srcs = [\"ffmpeg_test.go\"],\n+ library = \":media\",\n+ deps = [\n+ \"//pkg/test/dockerutil\",\n+ \"//test/benchmarks/harness\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/benchmarks/media/ffmpeg_test.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+package media\n+\n+import (\n+ \"context\"\n+ \"strings\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/test/dockerutil\"\n+ \"gvisor.dev/gvisor/test/benchmarks/harness\"\n+)\n+\n+// BenchmarkFfmpeg runs ffmpeg in a container and records runtime.\n+// BenchmarkFfmpeg should run as root to drop caches.\n+func BenchmarkFfmpeg(b *testing.B) {\n+ machine, err := h.GetMachine()\n+ if err != nil {\n+ b.Fatalf(\"failed to get machine: %v\", err)\n+ }\n+ defer machine.CleanUp()\n+\n+ ctx := context.Background()\n+ container := machine.GetContainer(ctx, b)\n+ cmd := strings.Split(\"ffmpeg -i video.mp4 -c:v libx264 -preset veryslow output.mp4\", \" \")\n+\n+ b.ResetTimer()\n+ for i := 0; i < b.N; i++ {\n+ b.StopTimer()\n+ if err := harness.DropCaches(machine); err != nil {\n+ b.Skipf(\"failed to drop caches: %v. You probably need root.\", err)\n+ }\n+ b.StartTimer()\n+\n+ if _, err := container.Run(ctx, dockerutil.RunOpts{\n+ Image: \"benchmarks/ffmpeg\",\n+ }, cmd...); err != nil {\n+ b.Fatalf(\"failed to run container: %v\", err)\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/benchmarks/media/media.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package media holds benchmarks around media processing applications.\n+package media\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/test/benchmarks/harness\"\n+)\n+\n+var h harness.Harness\n+\n+// TestMain is the main method for package media.\n+func TestMain(m *testing.M) {\n+ h.Init()\n+ os.Exit(m.Run())\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Port ffmpeg benchmark
PiperOrigin-RevId: 323383320 |
259,858 | 27.07.2020 10:38:24 | 25,200 | d0fd97541ad767975092d4ac3ecf3814797e931c | Clean-up bazel wrapper.
The bazel server was being started as the wrong user, leading to issues
where the container would suddenly exit during a build.
We can also simplify the waiting logic by starting the container in two
separate steps: those that must complete first, then the asynchronous bit. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -643,7 +643,9 @@ func TestExec(t *testing.T) {\nif err != nil {\nt.Fatalf(\"error creating temporary directory: %v\", err)\n}\n- cmd := fmt.Sprintf(\"ln -s /bin/true %q/symlink && sleep 100\", dir)\n+ // Note that some shells may exec the final command in a sequence as\n+ // an optimization. We avoid this here by adding the exit 0.\n+ cmd := fmt.Sprintf(\"ln -s /bin/true %q/symlink && sleep 100 && exit 0\", dir)\nspec := testutil.NewSpecWithArgs(\"sh\", \"-c\", cmd)\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazel.mk",
"new_path": "tools/bazel.mk",
"diff": "# limitations under the License.\n# See base Makefile.\n+SHELL=/bin/bash -o pipefail\nBRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \\\ngit rev-parse --abbrev-ref HEAD 2>/dev/null) | \\\nxargs -n 1 basename 2>/dev/null)\n@@ -22,8 +23,11 @@ BRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \\\n# Bazel container configuration (see below).\nUSER ?= gvisor\nHASH ?= $(shell readlink -m $(CURDIR) | md5sum | cut -c1-8)\n+BUILDER_BASE := gvisor.dev/images/default\n+BUILDER_IMAGE := gvisor.dev/images/builder\n+BUILDER_NAME ?= gvisor-builder-$(HASH)\nDOCKER_NAME ?= gvisor-bazel-$(HASH)\n-DOCKER_PRIVILEGED ?= --privileged --network host\n+DOCKER_PRIVILEGED ?= --privileged\nBAZEL_CACHE := $(shell readlink -m ~/.cache/bazel/)\nGCLOUD_CONFIG := $(shell readlink -m ~/.config/gcloud/)\nDOCKER_SOCKET := /var/run/docker.sock\n@@ -32,17 +36,25 @@ DOCKER_SOCKET := /var/run/docker.sock\nOPTIONS += --test_output=errors --keep_going --verbose_failures=true\nBAZEL := bazel $(STARTUP_OPTIONS)\n-# Non-configurable.\n+# Basic options.\nUID := $(shell id -u ${USER})\nGID := $(shell id -g ${USER})\nUSERADD_OPTIONS :=\nFULL_DOCKER_RUN_OPTIONS := $(DOCKER_RUN_OPTIONS)\n+FULL_DOCKER_RUN_OPTIONS += --user $(UID):$(GID)\n+FULL_DOCKER_RUN_OPTIONS += --entrypoint \"\"\n+FULL_DOCKER_RUN_OPTIONS += --init\nFULL_DOCKER_RUN_OPTIONS += -v \"$(BAZEL_CACHE):$(BAZEL_CACHE)\"\nFULL_DOCKER_RUN_OPTIONS += -v \"$(GCLOUD_CONFIG):$(GCLOUD_CONFIG)\"\nFULL_DOCKER_RUN_OPTIONS += -v \"/tmp:/tmp\"\n+FULL_DOCKER_EXEC_OPTIONS := --user $(UID):$(GID)\n+FULL_DOCKER_EXEC_OPTIONS += -i\n+\n+# Add docker passthrough options.\nifneq ($(DOCKER_PRIVILEGED),)\nFULL_DOCKER_RUN_OPTIONS += -v \"$(DOCKER_SOCKET):$(DOCKER_SOCKET)\"\nFULL_DOCKER_RUN_OPTIONS += $(DOCKER_PRIVILEGED)\n+FULL_DOCKER_EXEC_OPTIONS += $(DOCKER_PRIVILEGED)\nDOCKER_GROUP := $(shell stat -c '%g' $(DOCKER_SOCKET))\nifneq ($(GID),$(DOCKER_GROUP))\nUSERADD_OPTIONS += --groups $(DOCKER_GROUP)\n@@ -50,7 +62,30 @@ GROUPADD_DOCKER += groupadd --gid $(DOCKER_GROUP) --non-unique docker-$(HASH) &&\nFULL_DOCKER_RUN_OPTIONS += --group-add $(DOCKER_GROUP)\nendif\nendif\n-SHELL=/bin/bash -o pipefail\n+\n+# Add KVM passthrough options.\n+ifneq (,$(wildcard /dev/kvm))\n+FULL_DOCKER_RUN_OPTIONS += --device=/dev/kvm\n+KVM_GROUP := $(shell stat -c '%g' /dev/kvm)\n+ifneq ($(GID),$(KVM_GROUP))\n+USERADD_OPTIONS += --groups $(KVM_GROUP)\n+GROUPADD_DOCKER += groupadd --gid $(KVM_GROUP) --non-unique kvm-$(HASH) &&\n+FULL_DOCKER_RUN_OPTIONS += --group-add $(KVM_GROUP)\n+endif\n+endif\n+\n+bazel-image: load-default\n+ @if docker ps --all | grep $(BUILDER_NAME); then docker rm -f $(BUILDER_NAME); fi\n+ docker run --user 0:0 --entrypoint \"\" --name $(BUILDER_NAME) \\\n+ $(BUILDER_BASE) \\\n+ sh -c \"groupadd --gid $(GID) --non-unique $(USER) && \\\n+ $(GROUPADD_DOCKER) \\\n+ useradd --uid $(UID) --non-unique --no-create-home \\\n+ --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) && \\\n+ if [[ -e /dev/kvm ]]; then chmod a+rw /dev/kvm; fi\"\n+ docker commit $(BUILDER_NAME) $(BUILDER_IMAGE)\n+ @docker rm -f $(BUILDER_NAME)\n+.PHONY: bazel-image\n##\n## Bazel helpers.\n@@ -65,41 +100,37 @@ SHELL=/bin/bash -o pipefail\n## GCLOUD_CONFIG - The gcloud config directory (detect: detected).\n## DOCKER_SOCKET - The Docker socket (default: detected).\n##\n-bazel-server-start: load-default ## Starts the bazel server.\n+bazel-server-start: bazel-image ## Starts the bazel server.\n@mkdir -p $(BAZEL_CACHE)\n@mkdir -p $(GCLOUD_CONFIG)\n@if docker ps --all | grep $(DOCKER_NAME); then docker rm -f $(DOCKER_NAME); fi\n- docker run -d --rm \\\n- --init \\\n- --name $(DOCKER_NAME) \\\n- --user 0:0 $(DOCKER_GROUP_OPTIONS) \\\n+ # This command runs a bazel server, and the container sticks around\n+ # until the bazel server exits. This should ensure that it does not\n+ # exit in the middle of running a build, but also it won't stick around\n+ # forever. The build commands wrap around an appropriate exec into the\n+ # container in order to perform work via the bazel client.\n+ docker run -d --rm --name $(DOCKER_NAME) \\\n-v \"$(CURDIR):$(CURDIR)\" \\\n--workdir \"$(CURDIR)\" \\\n- --entrypoint \"\" \\\n$(FULL_DOCKER_RUN_OPTIONS) \\\n- gvisor.dev/images/default \\\n- sh -c \"groupadd --gid $(GID) --non-unique $(USER) && \\\n- $(GROUPADD_DOCKER) \\\n- useradd --uid $(UID) --non-unique --no-create-home --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) && \\\n- $(BAZEL) version && \\\n- exec tail --pid=\\$$($(BAZEL) info server_pid) -f /dev/null\"\n- @while :; do if docker logs $(DOCKER_NAME) 2>/dev/null | grep \"Build label:\" >/dev/null; then break; fi; \\\n- if ! docker ps | grep $(DOCKER_NAME); then docker logs $(DOCKER_NAME); exit 1; else sleep 1; fi; done\n+ $(BUILDER_IMAGE) \\\n+ sh -c \"tail -f --pid=\\$$($(BAZEL) info server_pid)\"\n.PHONY: bazel-server-start\nbazel-shutdown: ## Shuts down a running bazel server.\n- @docker exec --user $(UID):$(GID) $(DOCKER_NAME) $(BAZEL) shutdown; rc=$$?; docker kill $(DOCKER_NAME) || [[ $$rc -ne 0 ]]\n+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(BAZEL) shutdown; \\\n+ rc=$$?; docker kill $(DOCKER_NAME) || [[ $$rc -ne 0 ]]\n.PHONY: bazel-shutdown\nbazel-alias: ## Emits an alias that can be used within the shell.\n- @echo \"alias bazel='docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) bazel'\"\n+ @echo \"alias bazel='docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) bazel'\"\n.PHONY: bazel-alias\nbazel-server: ## Ensures that the server exists. Used as an internal target.\n- @docker exec $(DOCKER_NAME) true || $(MAKE) bazel-server-start\n+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) true || $(MAKE) bazel-server-start\n.PHONY: bazel-server\n-build_cmd = docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) sh -o pipefail -c '$(BAZEL) build $(OPTIONS) $(TARGETS)'\n+build_cmd = docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) sh -o pipefail -c '$(BAZEL) build $(OPTIONS) $(TARGETS)'\nbuild_paths = $(build_cmd) 2>&1 \\\n| tee /proc/self/fd/2 \\\n@@ -126,9 +157,9 @@ sudo: bazel-server\n.PHONY: sudo\ntest: bazel-server\n- @docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) $(BAZEL) test $(OPTIONS) $(TARGETS)\n+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(BAZEL) test $(OPTIONS) $(TARGETS)\n.PHONY: test\nquery: bazel-server\n- @docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) $(BAZEL) query $(OPTIONS) '$(TARGETS)'\n+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(BAZEL) query $(OPTIONS) '$(TARGETS)'\n.PHONY: query\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clean-up bazel wrapper.
The bazel server was being started as the wrong user, leading to issues
where the container would suddenly exit during a build.
We can also simplify the waiting logic by starting the container in two
separate steps: those that must complete first, then the asynchronous bit.
PiperOrigin-RevId: 323391161 |
259,985 | 27.07.2020 13:19:25 | 25,200 | c8fa685cb6b562e4d8fc05c7d01968e4e12234aa | Fix when FUSE tests need to be skipped due to sentry configuration. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mount.cc",
"new_path": "test/syscalls/linux/mount.cc",
"diff": "@@ -323,10 +323,7 @@ TEST(MountTest, RenameRemoveMountPoint) {\nTEST(MountTest, MountFuseFilesystemNoDevice) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n-\n- // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n- // device registration is complete.\n- SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());\n+ SKIP_IF(IsRunningOnGvisor() && !IsFUSEEnabled());\nauto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nEXPECT_THAT(mount(\"\", dir.path().c_str(), \"fuse\", 0, \"\"),\n@@ -335,10 +332,7 @@ TEST(MountTest, MountFuseFilesystemNoDevice) {\nTEST(MountTest, MountFuseFilesystem) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n-\n- // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n- // device registration is complete.\n- SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());\n+ SKIP_IF(IsRunningOnGvisor() && !IsFUSEEnabled());\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_WRONLY));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix when FUSE tests need to be skipped due to sentry configuration.
PiperOrigin-RevId: 323426851 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.