author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,962 | 25.09.2020 19:32:38 | 25,200 | ebc81fadfc6797758d63f8290ad3a9c2659ddb49 | Add openat() to list of permitted syscalls in gotsan runs. | [
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/filter/extra_filters_race.go",
"new_path": "runsc/fsgofer/filter/extra_filters_race.go",
"diff": "@@ -35,6 +35,7 @@ func instrumentationFilters() seccomp.SyscallRules {\nsyscall.SYS_MUNLOCK: {},\nsyscall.SYS_NANOSLEEP: {},\nsyscall.SYS_OPEN: {},\n+ syscall.SYS_OPENAT: {},\nsyscall.SYS_SET_ROBUST_LIST: {},\n// Used within glibc's malloc.\nsyscall.SYS_TIME: {},\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add openat() to list of permitted syscalls in gotsan runs.
PiperOrigin-RevId: 333853498 |
259,860 | 28.09.2020 16:09:27 | 25,200 | a0e0ba690f3f4946890010e12084db7f081d5bc7 | Support inotify in overlayfs.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -1416,11 +1416,11 @@ func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts\nfs.renameMuRUnlockAndCheckCaching(ctx, &ds)\nreturn err\n}\n- if err := d.setStat(ctx, rp.Credentials(), &opts, rp.Mount()); err != nil {\n+ err = d.setStat(ctx, rp.Credentials(), &opts, rp.Mount())\nfs.renameMuRUnlockAndCheckCaching(ctx, &ds)\n+ if err != nil {\nreturn err\n}\n- fs.renameMuRUnlockAndCheckCaching(ctx, &ds)\nif ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 {\nd.InotifyWithParent(ctx, ev, 0, vfs.InodeEvent)\n@@ -1556,11 +1556,11 @@ func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt\nfs.renameMuRUnlockAndCheckCaching(ctx, &ds)\nreturn err\n}\n- if err := d.setXattr(ctx, rp.Credentials(), &opts); err != nil {\n+ err = d.setXattr(ctx, rp.Credentials(), &opts)\nfs.renameMuRUnlockAndCheckCaching(ctx, &ds)\n+ if err != nil {\nreturn err\n}\n- fs.renameMuRUnlockAndCheckCaching(ctx, &ds)\nd.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n@@ -1575,11 +1575,11 @@ func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath,\nfs.renameMuRUnlockAndCheckCaching(ctx, &ds)\nreturn err\n}\n- if err := d.removeXattr(ctx, rp.Credentials(), name); err != nil {\n+ err = d.removeXattr(ctx, rp.Credentials(), name)\nfs.renameMuRUnlockAndCheckCaching(ctx, &ds)\n+ if err != nil {\nreturn err\n}\n- fs.renameMuRUnlockAndCheckCaching(ctx, &ds)\nd.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/directory.go",
"new_path": "pkg/sentry/fsimpl/overlay/directory.go",
"diff": "@@ -117,10 +117,12 @@ func (fd *directoryFD) Release(ctx context.Context) {\n// IterDirents implements vfs.FileDescriptionImpl.IterDirents.\nfunc (fd *directoryFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {\n+ d := fd.dentry()\n+ defer d.InotifyWithParent(ctx, linux.IN_ACCESS, 0, vfs.PathEvent)\n+\nfd.mu.Lock()\ndefer fd.mu.Unlock()\n- d := fd.dentry()\nif fd.dirents == nil {\nds, err := d.getDirents(ctx)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"new_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"diff": "@@ -499,7 +499,13 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif err := create(parent, name, childLayer == lookupLayerUpperWhiteout); err != nil {\nreturn err\n}\n+\nparent.dirents = nil\n+ ev := linux.IN_CREATE\n+ if dir {\n+ ev |= linux.IN_ISDIR\n+ }\n+ parent.watches.Notify(ctx, name, uint32(ev), 0 /* cookie */, vfs.InodeEvent, false /* unlinked */)\nreturn nil\n}\n@@ -631,6 +637,7 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\n}\nreturn err\n}\n+ old.watches.Notify(ctx, \"\", linux.IN_ATTRIB, 0 /* cookie */, vfs.InodeEvent, false /* unlinked */)\nreturn nil\n})\n}\n@@ -975,6 +982,7 @@ func (fs *filesystem) createAndOpenLocked(ctx context.Context, rp *vfs.Resolving\n// just can't open it anymore for some reason.\nreturn nil, err\n}\n+ parent.watches.Notify(ctx, childName, linux.IN_CREATE, 0 /* cookie */, vfs.PathEvent, false /* unlinked */)\nreturn &fd.vfsfd, nil\n}\n@@ -1236,6 +1244,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\n}\n}\n+ vfs.InotifyRename(ctx, &renamed.watches, &oldParent.watches, &newParent.watches, oldName, newName, renamed.isDir())\nreturn nil\n}\n@@ -1352,6 +1361,7 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error\ndelete(parent.children, name)\nds = appendDentry(ds, child)\nparent.dirents = nil\n+ parent.watches.Notify(ctx, name, linux.IN_DELETE|linux.IN_ISDIR, 0 /* cookie */, vfs.InodeEvent, true /* unlinked */)\nreturn nil\n}\n@@ -1359,12 +1369,25 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error\nfunc (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error {\nvar ds *[]*dentry\nfs.renameMu.RLock()\n- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\nd, err := fs.resolveLocked(ctx, rp, &ds)\n+ if err != nil {\n+ fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n+ return err\n+ }\n+ err = d.setStatLocked(ctx, rp, opts)\n+ fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\nif err != nil {\nreturn err\n}\n+ if ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 {\n+ d.InotifyWithParent(ctx, ev, 0 /* cookie */, vfs.InodeEvent)\n+ }\n+ return nil\n+}\n+\n+// Precondition: d.fs.renameMu must be held for reading.\n+func (d *dentry) setStatLocked(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error {\nmode := linux.FileMode(atomic.LoadUint32(&d.mode))\nif err := vfs.CheckSetStat(ctx, rp.Credentials(), &opts, mode, auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid))); err != nil {\nreturn err\n@@ -1555,11 +1578,14 @@ func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error\npanic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to create whiteout during UnlinkAt: %v\", err))\n}\n+ var cw *vfs.Watches\nif child != nil {\nvfsObj.CommitDeleteDentry(ctx, &child.vfsd)\ndelete(parent.children, name)\nds = appendDentry(ds, child)\n+ cw = &child.watches\n}\n+ vfs.InotifyRemoveChild(ctx, cw, &parent.watches, name)\nparent.dirents = nil\nreturn nil\n}\n@@ -1636,13 +1662,20 @@ func (fs *filesystem) getXattr(ctx context.Context, d *dentry, creds *auth.Crede\nfunc (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetXattrOptions) error {\nvar ds *[]*dentry\nfs.renameMu.RLock()\n- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\nd, err := fs.resolveLocked(ctx, rp, &ds)\n+ if err != nil {\n+ fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n+ return err\n+ }\n+\n+ err = fs.setXattrLocked(ctx, d, rp.Mount(), rp.Credentials(), &opts)\n+ fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\nif err != nil {\nreturn err\n}\n- return fs.setXattrLocked(ctx, d, rp.Mount(), rp.Credentials(), &opts)\n+ d.InotifyWithParent(ctx, linux.IN_ATTRIB, 0 /* cookie */, vfs.InodeEvent)\n+ return nil\n}\n// Precondition: fs.renameMu must be locked.\n@@ -1673,13 +1706,20 @@ func (fs *filesystem) setXattrLocked(ctx context.Context, d *dentry, mnt *vfs.Mo\nfunc (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath, name string) error {\nvar ds *[]*dentry\nfs.renameMu.RLock()\n- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\nd, err := fs.resolveLocked(ctx, rp, &ds)\nif err != nil {\n+ fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\nreturn err\n}\n- return fs.removeXattrLocked(ctx, d, rp.Mount(), rp.Credentials(), name)\n+ err = fs.removeXattrLocked(ctx, d, rp.Mount(), rp.Credentials(), name)\n+ fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n+ if err != nil {\n+ return err\n+ }\n+\n+ d.InotifyWithParent(ctx, linux.IN_ATTRIB, 0 /* cookie */, vfs.InodeEvent)\n+ return nil\n}\n// Precondition: fs.renameMu must be locked.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/non_directory.go",
"new_path": "pkg/sentry/fsimpl/overlay/non_directory.go",
"diff": "@@ -184,6 +184,9 @@ func (fd *nonDirectoryFD) SetStat(ctx context.Context, opts vfs.SetStatOptions)\nreturn err\n}\nd.updateAfterSetStatLocked(&opts)\n+ if ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 {\n+ d.InotifyWithParent(ctx, ev, 0, vfs.InodeEvent)\n+ }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"new_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"diff": "@@ -462,6 +462,13 @@ type dentry struct {\nisMappable uint32\nlocks vfs.FileLocks\n+\n+ // watches is the set of inotify watches on the file repesented by this dentry.\n+ //\n+ // Note that hard links to the same file will not share the same set of\n+ // watches, due to the fact that we do not have inode structures in this\n+ // overlay implementation.\n+ watches vfs.Watches\n}\n// newDentry creates a new dentry. The dentry initially has no references; it\n@@ -521,6 +528,14 @@ func (d *dentry) checkDropLocked(ctx context.Context) {\nif atomic.LoadInt64(&d.refs) != 0 {\nreturn\n}\n+\n+ // Make sure that we do not lose watches on dentries that have not been\n+ // deleted. Note that overlayfs never calls VFS.InvalidateDentry(), so\n+ // d.vfsd.IsDead() indicates that d was deleted.\n+ if !d.vfsd.IsDead() && d.watches.Size() > 0 {\n+ return\n+ }\n+\n// Refs is still zero; destroy it.\nd.destroyLocked(ctx)\nreturn\n@@ -549,6 +564,8 @@ func (d *dentry) destroyLocked(ctx context.Context) {\nlowerVD.DecRef(ctx)\n}\n+ d.watches.HandleDeletion(ctx)\n+\nif d.parent != nil {\nd.parent.dirMu.Lock()\nif !d.vfsd.IsDead() {\n@@ -567,19 +584,36 @@ func (d *dentry) destroyLocked(ctx context.Context) {\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\nfunc (d *dentry) InotifyWithParent(ctx context.Context, events uint32, cookie uint32, et vfs.EventType) {\n- // TODO(gvisor.dev/issue/1479): Implement inotify.\n+ if d.isDir() {\n+ events |= linux.IN_ISDIR\n+ }\n+\n+ // overlayfs never calls VFS.InvalidateDentry(), so d.vfsd.IsDead() indicates\n+ // that d was deleted.\n+ deleted := d.vfsd.IsDead()\n+\n+ d.fs.renameMu.RLock()\n+ // The ordering below is important, Linux always notifies the parent first.\n+ if d.parent != nil {\n+ d.parent.watches.Notify(ctx, d.name, events, cookie, et, deleted)\n+ }\n+ d.watches.Notify(ctx, \"\", events, cookie, et, deleted)\n+ d.fs.renameMu.RUnlock()\n}\n// Watches implements vfs.DentryImpl.Watches.\nfunc (d *dentry) Watches() *vfs.Watches {\n- // TODO(gvisor.dev/issue/1479): Implement inotify.\n- return nil\n+ return &d.watches\n}\n// OnZeroWatches implements vfs.DentryImpl.OnZeroWatches.\n-//\n-// TODO(gvisor.dev/issue/1479): Implement inotify.\n-func (d *dentry) OnZeroWatches(context.Context) {}\n+func (d *dentry) OnZeroWatches(ctx context.Context) {\n+ if atomic.LoadInt64(&d.refs) == 0 {\n+ d.fs.renameMu.Lock()\n+ d.checkDropLocked(ctx)\n+ d.fs.renameMu.Unlock()\n+ }\n+}\n// iterLayers invokes yield on each layer comprising d, from top to bottom. If\n// any call to yield returns false, iterLayer stops iteration.\n@@ -689,17 +723,33 @@ func (fd *fileDescription) GetXattr(ctx context.Context, opts vfs.GetXattrOption\n// SetXattr implements vfs.FileDescriptionImpl.SetXattr.\nfunc (fd *fileDescription) SetXattr(ctx context.Context, opts vfs.SetXattrOptions) error {\nfs := fd.filesystem()\n+ d := fd.dentry()\n+\nfs.renameMu.RLock()\n- defer fs.renameMu.RUnlock()\n- return fs.setXattrLocked(ctx, fd.dentry(), fd.vfsfd.Mount(), auth.CredentialsFromContext(ctx), &opts)\n+ err := fs.setXattrLocked(ctx, d, fd.vfsfd.Mount(), auth.CredentialsFromContext(ctx), &opts)\n+ fs.renameMu.RUnlock()\n+ if err != nil {\n+ return err\n+ }\n+\n+ d.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent)\n+ return nil\n}\n// RemoveXattr implements vfs.FileDescriptionImpl.RemoveXattr.\nfunc (fd *fileDescription) RemoveXattr(ctx context.Context, name string) error {\nfs := fd.filesystem()\n+ d := fd.dentry()\n+\nfs.renameMu.RLock()\n- defer fs.renameMu.RUnlock()\n- return fs.removeXattrLocked(ctx, fd.dentry(), fd.vfsfd.Mount(), auth.CredentialsFromContext(ctx), name)\n+ err := fs.removeXattrLocked(ctx, d, fd.vfsfd.Mount(), auth.CredentialsFromContext(ctx), name)\n+ fs.renameMu.RUnlock()\n+ if err != nil {\n+ return err\n+ }\n+\n+ d.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent)\n+ return nil\n}\n// LockPOSIX implements vfs.FileDescriptionImpl.LockPOSIX.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -673,11 +673,11 @@ func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts\nfs.mu.RUnlock()\nreturn err\n}\n- if err := d.inode.setStat(ctx, rp.Credentials(), &opts); err != nil {\n+ err = d.inode.setStat(ctx, rp.Credentials(), &opts)\nfs.mu.RUnlock()\n+ if err != nil {\nreturn err\n}\n- fs.mu.RUnlock()\nif ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 {\nd.InotifyWithParent(ctx, ev, 0, vfs.InodeEvent)\n@@ -822,11 +822,11 @@ func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt\nfs.mu.RUnlock()\nreturn err\n}\n- if err := d.inode.setXattr(rp.Credentials(), &opts); err != nil {\n+ err = d.inode.setXattr(rp.Credentials(), &opts)\nfs.mu.RUnlock()\n+ if err != nil {\nreturn err\n}\n- fs.mu.RUnlock()\nd.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n@@ -840,11 +840,11 @@ func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath,\nfs.mu.RUnlock()\nreturn err\n}\n- if err := d.inode.removeXattr(rp.Credentials(), name); err != nil {\n+ err = d.inode.removeXattr(rp.Credentials(), name)\nfs.mu.RUnlock()\n+ if err != nil {\nreturn err\n}\n- fs.mu.RUnlock()\nd.InotifyWithParent(ctx, linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -238,7 +238,7 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\n- add_overlay = False, # TODO(gvisor.dev/issue/317): enable when fixed.\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:inotify_test\",\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support inotify in overlayfs.
Fixes #1479, #317.
PiperOrigin-RevId: 334258052 |
259,896 | 28.09.2020 16:37:44 | 25,200 | 237b761f9a61ad1a821320e68f5a71e7cda6b29e | Fix lingering of TCP socket in the initial state.
When the socket is set with SO_LINGER and close()'d in the initial state, it
should not linger and return immediately. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -927,7 +927,12 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\nresult := waiter.EventMask(0)\nswitch e.EndpointState() {\n- case StateInitial, StateBound, StateConnecting, StateSynSent, StateSynRecv:\n+ case StateInitial, StateBound:\n+ // This prevents blocking of new sockets which are not\n+ // connected when SO_LINGER is set.\n+ result |= waiter.EventHUp\n+\n+ case StateConnecting, StateSynSent, StateSynRecv:\n// Ready for nothing.\ncase StateClose, StateError, StateTimeWait:\n@@ -1098,6 +1103,8 @@ func (e *endpoint) closeNoShutdownLocked() {\ne.notifyProtocolGoroutine(notifyClose)\n} else {\ne.transitionToStateCloseLocked()\n+ // Notify that the endpoint is closed.\n+ e.waiterQueue.Notify(waiter.EventHUp)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/tcp_linger_test.go",
"new_path": "test/packetimpact/tests/tcp_linger_test.go",
"diff": "@@ -251,3 +251,20 @@ func TestTCPLingerShutdownSendNonZeroTimeout(t *testing.T) {\n})\n}\n}\n+\n+func TestTCPLingerNonEstablished(t *testing.T) {\n+ dut := testbench.NewDUT(t)\n+ newFD := dut.Socket(t, unix.AF_INET, unix.SOCK_STREAM, unix.IPPROTO_TCP)\n+ dut.SetSockLingerOption(t, newFD, lingerDuration, true)\n+\n+ // As the socket is in the initial state, Close() should not linger\n+ // and return immediately.\n+ start := time.Now()\n+ dut.CloseWithErrno(context.Background(), t, newFD)\n+ diff := time.Since(start)\n+\n+ if diff > lingerDuration {\n+ t.Errorf(\"expected close to return within %s, but returned after %s\", lingerDuration, diff)\n+ }\n+ dut.TearDown()\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "@@ -1643,6 +1643,36 @@ TEST_P(SimpleTcpSocketTest, GetSocketDetachFilter) {\nSyscallFailsWithErrno(ENOPROTOOPT));\n}\n+TEST_P(SimpleTcpSocketTest, CloseNonConnectedLingerOption) {\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+\n+ constexpr int kLingerTimeout = 10; // Seconds.\n+\n+ // Set the SO_LINGER option.\n+ struct linger sl = {\n+ .l_onoff = 1,\n+ .l_linger = kLingerTimeout,\n+ };\n+ ASSERT_THAT(setsockopt(s.get(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),\n+ SyscallSucceeds());\n+\n+ struct pollfd poll_fd = {\n+ .fd = s.get(),\n+ .events = POLLHUP,\n+ };\n+ constexpr int kPollTimeoutMs = 0;\n+ ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),\n+ SyscallSucceedsWithValue(1));\n+\n+ auto const start_time = absl::Now();\n+ EXPECT_THAT(close(s.release()), SyscallSucceeds());\n+ auto const end_time = absl::Now();\n+\n+ // Close() should not linger and return immediately.\n+ ASSERT_LT((end_time - start_time), absl::Seconds(kLingerTimeout));\n+}\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, SimpleTcpSocketTest,\n::testing::Values(AF_INET, AF_INET6));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix lingering of TCP socket in the initial state.
When the socket is set with SO_LINGER and close()'d in the initial state, it
should not linger and return immediately.
PiperOrigin-RevId: 334263149 |
259,885 | 28.09.2020 16:38:39 | 25,200 | ba44298a390c69dcf33ae591b9ddc6b3514cc9b3 | Don't leak dentries returned by sockfs.NewDentry(). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket_vfs2.go",
"new_path": "pkg/sentry/socket/hostinet/socket_vfs2.go",
"diff": "@@ -52,6 +52,7 @@ var _ = socket.SocketVFS2(&socketVFS2{})\nfunc newVFS2Socket(t *kernel.Task, family int, stype linux.SockType, protocol int, fd int, flags uint32) (*vfs.FileDescription, *syserr.Error) {\nmnt := t.Kernel().SocketMount()\nd := sockfs.NewDentry(t.Credentials(), mnt)\n+ defer d.DecRef(t)\ns := &socketVFS2{\nsocketOpsCommon: socketOpsCommon{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netlink/provider_vfs2.go",
"new_path": "pkg/sentry/socket/netlink/provider_vfs2.go",
"diff": "@@ -52,6 +52,7 @@ func (*socketProviderVFS2) Socket(t *kernel.Task, stype linux.SockType, protocol\nvfsfd := &s.vfsfd\nmnt := t.Kernel().SocketMount()\nd := sockfs.NewDentry(t.Credentials(), mnt)\n+ defer d.DecRef(t)\nif err := vfsfd.Init(s, linux.O_RDWR, mnt, d, &vfs.FileDescriptionOptions{\nDenyPRead: true,\nDenyPWrite: true,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/unix_vfs2.go",
"new_path": "pkg/sentry/socket/unix/unix_vfs2.go",
"diff": "@@ -55,6 +55,7 @@ var _ = socket.SocketVFS2(&SocketVFS2{})\nfunc NewSockfsFile(t *kernel.Task, ep transport.Endpoint, stype linux.SockType) (*vfs.FileDescription, *syserr.Error) {\nmnt := t.Kernel().SocketMount()\nd := sockfs.NewDentry(t.Credentials(), mnt)\n+ defer d.DecRef(t)\nfd, err := NewFileDescription(ep, stype, linux.O_RDWR, mnt, d, &vfs.FileLocks{})\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't leak dentries returned by sockfs.NewDentry().
PiperOrigin-RevId: 334263322 |
259,964 | 29.09.2020 11:27:43 | 25,200 | f15182243e508b0754d59350a886397e2a0ba0b2 | Discard IP fragments as soon as it expires
Currently expired IP fragments are discarded only if another fragment for the
same IP datagram is received after timeout or the total size of the fragment
queue exceeded a predefined value.
Test: fragmentation.TestReassemblingTimeout
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/faketime/faketime.go",
"new_path": "pkg/tcpip/faketime/faketime.go",
"diff": "@@ -24,6 +24,26 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n+// NullClock implements a clock that never advances.\n+type NullClock struct{}\n+\n+var _ tcpip.Clock = (*NullClock)(nil)\n+\n+// NowNanoseconds implements tcpip.Clock.NowNanoseconds.\n+func (*NullClock) NowNanoseconds() int64 {\n+ return 0\n+}\n+\n+// NowMonotonic implements tcpip.Clock.NowMonotonic.\n+func (*NullClock) NowMonotonic() int64 {\n+ return 0\n+}\n+\n+// AfterFunc implements tcpip.Clock.AfterFunc.\n+func (*NullClock) AfterFunc(time.Duration, func()) tcpip.Timer {\n+ return nil\n+}\n+\n// ManualClock implements tcpip.Clock and only advances manually with Advance\n// method.\ntype ManualClock struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/BUILD",
"new_path": "pkg/tcpip/network/fragmentation/BUILD",
"diff": "@@ -43,5 +43,6 @@ go_test(\nlibrary = \":fragmentation\",\ndeps = [\n\"//pkg/tcpip/buffer\",\n+ \"//pkg/tcpip/faketime\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/fragmentation.go",
"new_path": "pkg/tcpip/network/fragmentation/fragmentation.go",
"diff": "@@ -81,6 +81,8 @@ type Fragmentation struct {\nsize int\ntimeout time.Duration\nblockSize uint16\n+ clock tcpip.Clock\n+ releaseJob *tcpip.Job\n}\n// NewFragmentation creates a new Fragmentation.\n@@ -97,7 +99,7 @@ type Fragmentation struct {\n// reassemblingTimeout specifies the maximum time allowed to reassemble a packet.\n// Fragments are lazily evicted only when a new a packet with an\n// already existing fragmentation-id arrives after the timeout.\n-func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, reassemblingTimeout time.Duration) *Fragmentation {\n+func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, reassemblingTimeout time.Duration, clock tcpip.Clock) *Fragmentation {\nif lowMemoryLimit >= highMemoryLimit {\nlowMemoryLimit = highMemoryLimit\n}\n@@ -110,13 +112,17 @@ func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, rea\nblockSize = minBlockSize\n}\n- return &Fragmentation{\n+ f := &Fragmentation{\nreassemblers: make(map[FragmentID]*reassembler),\nhighLimit: highMemoryLimit,\nlowLimit: lowMemoryLimit,\ntimeout: reassemblingTimeout,\nblockSize: blockSize,\n+ clock: clock,\n}\n+ f.releaseJob = tcpip.NewJob(f.clock, &f.mu, f.releaseReassemblersLocked)\n+\n+ return f\n}\n// Process processes an incoming fragment belonging to an ID and returns a\n@@ -155,15 +161,17 @@ func (f *Fragmentation) Process(\nf.mu.Lock()\nr, ok := f.reassemblers[id]\n- if ok && r.tooOld(f.timeout) {\n- // This is very likely to be an id-collision or someone performing a slow-rate attack.\n- f.release(r)\n- ok = false\n- }\nif !ok {\n- r = newReassembler(id)\n+ r = newReassembler(id, f.clock)\nf.reassemblers[id] = r\n+ wasEmpty := f.rList.Empty()\nf.rList.PushFront(r)\n+ if wasEmpty {\n+ // If we have just pushed a first reassembler into an empty list, we\n+ // should kickstart the release job. The release job will keep\n+ // rescheduling itself until the list becomes empty.\n+ f.releaseReassemblersLocked()\n+ }\n}\nf.mu.Unlock()\n@@ -211,3 +219,27 @@ func (f *Fragmentation) release(r *reassembler) {\nf.size = 0\n}\n}\n+\n+// releaseReassemblersLocked releases already-expired reassemblers, then\n+// schedules the job to call back itself for the remaining reassemblers if\n+// any. This function must be called with f.mu locked.\n+func (f *Fragmentation) releaseReassemblersLocked() {\n+ now := f.clock.NowMonotonic()\n+ for {\n+ // The reassembler at the end of the list is the oldest.\n+ r := f.rList.Back()\n+ if r == nil {\n+ // The list is empty.\n+ break\n+ }\n+ elapsed := time.Duration(now-r.creationTime) * time.Nanosecond\n+ if f.timeout > elapsed {\n+ // If the oldest reassembler has not expired, schedule the release\n+ // job so that this function is called back when it has expired.\n+ f.releaseJob.Schedule(f.timeout - elapsed)\n+ break\n+ }\n+ // If the oldest reassembler has already expired, release it.\n+ f.release(r)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/fragmentation_test.go",
"new_path": "pkg/tcpip/network/fragmentation/fragmentation_test.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"time\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/faketime\"\n)\n// vv is a helper to build VectorisedView from different strings.\n@@ -95,7 +96,7 @@ var processTestCases = []struct {\nfunc TestFragmentationProcess(t *testing.T) {\nfor _, c := range processTestCases {\nt.Run(c.comment, func(t *testing.T) {\n- f := NewFragmentation(minBlockSize, 1024, 512, DefaultReassembleTimeout)\n+ f := NewFragmentation(minBlockSize, 1024, 512, DefaultReassembleTimeout, &faketime.NullClock{})\nfirstFragmentProto := c.in[0].proto\nfor i, in := range c.in {\nvv, proto, done, err := f.Process(in.id, in.first, in.last, in.more, in.proto, in.vv)\n@@ -131,25 +132,126 @@ func TestFragmentationProcess(t *testing.T) {\n}\nfunc TestReassemblingTimeout(t *testing.T) {\n- timeout := time.Millisecond\n- f := NewFragmentation(minBlockSize, 1024, 512, timeout)\n- // Send first fragment with id = 0, first = 0, last = 0, and more = true.\n- f.Process(FragmentID{}, 0, 0, true, 0xFF, vv(1, \"0\"))\n- // Sleep more than the timeout.\n- time.Sleep(2 * timeout)\n- // Send another fragment that completes a packet.\n- // However, no packet should be reassembled because the fragment arrived after the timeout.\n- _, _, done, err := f.Process(FragmentID{}, 1, 1, false, 0xFF, vv(1, \"1\"))\n+ const (\n+ reassemblyTimeout = time.Millisecond\n+ protocol = 0xff\n+ )\n+\n+ type fragment struct {\n+ first uint16\n+ last uint16\n+ more bool\n+ data string\n+ }\n+\n+ type event struct {\n+ // name is a nickname of this event.\n+ name string\n+\n+ // clockAdvance is a duration to advance the clock. The clock advances\n+ // before a fragment specified in the fragment field is processed.\n+ clockAdvance time.Duration\n+\n+ // fragment is a fragment to process. This can be nil if there is no\n+ // fragment to process.\n+ fragment *fragment\n+\n+ // expectDone is true if the fragmentation instance should report the\n+ // reassembly is done after the fragment is processd.\n+ expectDone bool\n+\n+ // sizeAfterEvent is the expected size of the fragmentation instance after\n+ // the event.\n+ sizeAfterEvent int\n+ }\n+\n+ half1 := &fragment{first: 0, last: 0, more: true, data: \"0\"}\n+ half2 := &fragment{first: 1, last: 1, more: false, data: \"1\"}\n+\n+ tests := []struct {\n+ name string\n+ events []event\n+ }{\n+ {\n+ name: \"half1 and half2 are reassembled successfully\",\n+ events: []event{\n+ {\n+ name: \"half1\",\n+ fragment: half1,\n+ expectDone: false,\n+ sizeAfterEvent: 1,\n+ },\n+ {\n+ name: \"half2\",\n+ fragment: half2,\n+ expectDone: true,\n+ sizeAfterEvent: 0,\n+ },\n+ },\n+ },\n+ {\n+ name: \"half1 timeout, half2 timeout\",\n+ events: []event{\n+ {\n+ name: \"half1\",\n+ fragment: half1,\n+ expectDone: false,\n+ sizeAfterEvent: 1,\n+ },\n+ {\n+ name: \"half1 just before reassembly timeout\",\n+ clockAdvance: reassemblyTimeout - 1,\n+ sizeAfterEvent: 1,\n+ },\n+ {\n+ name: \"half1 reassembly timeout\",\n+ clockAdvance: 1,\n+ sizeAfterEvent: 0,\n+ },\n+ {\n+ name: \"half2\",\n+ fragment: half2,\n+ expectDone: false,\n+ sizeAfterEvent: 1,\n+ },\n+ {\n+ name: \"half2 just before reassembly timeout\",\n+ clockAdvance: reassemblyTimeout - 1,\n+ sizeAfterEvent: 1,\n+ },\n+ {\n+ name: \"half2 reassembly timeout\",\n+ clockAdvance: 1,\n+ sizeAfterEvent: 0,\n+ },\n+ },\n+ },\n+ }\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ clock := faketime.NewManualClock()\n+ f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassemblyTimeout, clock)\n+ for _, event := range test.events {\n+ clock.Advance(event.clockAdvance)\n+ if frag := event.fragment; frag != nil {\n+ _, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, vv(len(frag.data), frag.data))\nif err != nil {\n- t.Fatalf(\"f.Process(0, 1, 1, false, 0xFF, vv(1, \\\"1\\\")) failed: %v\", err)\n+ t.Fatalf(\"%s: f.Process failed: %s\", event.name, err)\n}\n- if done {\n- t.Errorf(\"Fragmentation does not respect the reassembling timeout.\")\n+ if done != event.expectDone {\n+ t.Fatalf(\"%s: got done = %t, want = %t\", event.name, done, event.expectDone)\n+ }\n+ }\n+ if got, want := f.size, event.sizeAfterEvent; got != want {\n+ t.Errorf(\"%s: got f.size = %d, want = %d\", event.name, got, want)\n+ }\n+ }\n+ })\n}\n}\nfunc TestMemoryLimits(t *testing.T) {\n- f := NewFragmentation(minBlockSize, 3, 1, DefaultReassembleTimeout)\n+ f := NewFragmentation(minBlockSize, 3, 1, DefaultReassembleTimeout, &faketime.NullClock{})\n// Send first fragment with id = 0.\nf.Process(FragmentID{ID: 0}, 0, 0, true, 0xFF, vv(1, \"0\"))\n// Send first fragment with id = 1.\n@@ -173,7 +275,7 @@ func TestMemoryLimits(t *testing.T) {\n}\nfunc TestMemoryLimitsIgnoresDuplicates(t *testing.T) {\n- f := NewFragmentation(minBlockSize, 1, 0, DefaultReassembleTimeout)\n+ f := NewFragmentation(minBlockSize, 1, 0, DefaultReassembleTimeout, &faketime.NullClock{})\n// Send first fragment with id = 0.\nf.Process(FragmentID{}, 0, 0, true, 0xFF, vv(1, \"0\"))\n// Send the same packet again.\n@@ -268,7 +370,7 @@ func TestErrors(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, DefaultReassembleTimeout)\n+ f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, DefaultReassembleTimeout, &faketime.NullClock{})\n_, _, done, err := f.Process(FragmentID{}, test.first, test.last, test.more, 0, vv(len(test.data), test.data))\nif !errors.Is(err, test.err) {\nt.Errorf(\"got Process(_, %d, %d, %t, _, %q) = (_, _, _, %v), want = (_, _, _, %v)\", test.first, test.last, test.more, test.data, err, test.err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/reassembler.go",
"new_path": "pkg/tcpip/network/fragmentation/reassembler.go",
"diff": "@@ -18,9 +18,9 @@ import (\n\"container/heap\"\n\"fmt\"\n\"math\"\n- \"time\"\n\"gvisor.dev/gvisor/pkg/sync\"\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n)\n@@ -40,15 +40,15 @@ type reassembler struct {\ndeleted int\nheap fragHeap\ndone bool\n- creationTime time.Time\n+ creationTime int64\n}\n-func newReassembler(id FragmentID) *reassembler {\n+func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {\nr := &reassembler{\nid: id,\nholes: make([]hole, 0, 16),\nheap: make(fragHeap, 0, 8),\n- creationTime: time.Now(),\n+ creationTime: clock.NowMonotonic(),\n}\nr.holes = append(r.holes, hole{\nfirst: 0,\n@@ -116,10 +116,6 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, vv buf\nreturn res, r.proto, true, consumed, nil\n}\n-func (r *reassembler) tooOld(timeout time.Duration) bool {\n- return time.Now().Sub(r.creationTime) > timeout\n-}\n-\nfunc (r *reassembler) checkDoneOrMark() bool {\nr.mu.Lock()\nprev := r.done\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/reassembler_test.go",
"new_path": "pkg/tcpip/network/fragmentation/reassembler_test.go",
"diff": "@@ -18,6 +18,8 @@ import (\n\"math\"\n\"reflect\"\n\"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/tcpip/faketime\"\n)\ntype updateHolesInput struct {\n@@ -94,7 +96,7 @@ var holesTestCases = []struct {\nfunc TestUpdateHoles(t *testing.T) {\nfor _, c := range holesTestCases {\n- r := newReassembler(FragmentID{})\n+ r := newReassembler(FragmentID{}, &faketime.NullClock{})\nfor _, i := range c.in {\nr.updateHoles(i.first, i.last, i.more)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -804,6 +804,6 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol {\nids: ids,\nhashIV: hashIV,\ndefaultTTL: DefaultTTL,\n- fragmentation: fragmentation.NewFragmentation(fragmentblockSize, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, fragmentation.DefaultReassembleTimeout),\n+ fragmentation: fragmentation.NewFragmentation(fragmentblockSize, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, fragmentation.DefaultReassembleTimeout, s.Clock()),\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"new_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"diff": "@@ -1236,7 +1236,10 @@ func TestLinkAddressRequest(t *testing.T) {\n}\nfor _, test := range tests {\n- p := NewProtocol(nil)\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},\n+ })\n+ p := s.NetworkProtocolInstance(ProtocolNumber)\nlinkRes, ok := p.(stack.LinkAddressResolver)\nif !ok {\nt.Fatalf(\"expected IPv6 protocol to implement stack.LinkAddressResolver\")\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -1306,7 +1306,7 @@ func NewProtocolWithOptions(opts Options) stack.NetworkProtocolFactory {\nreturn func(s *stack.Stack) stack.NetworkProtocol {\np := &protocol{\nstack: s,\n- fragmentation: fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, fragmentation.DefaultReassembleTimeout),\n+ fragmentation: fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, fragmentation.DefaultReassembleTimeout, s.Clock()),\nndpDisp: opts.NDPDisp,\nndpConfigs: opts.NDPConfigs,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -1507,13 +1507,19 @@ func TestTTL(t *testing.T) {\nif flow.isMulticast() {\nwantTTL = multicastTTL\n} else {\n- var p stack.NetworkProtocol\n+ var p stack.NetworkProtocolFactory\n+ var n tcpip.NetworkProtocolNumber\nif flow.isV4() {\n- p = ipv4.NewProtocol(nil)\n+ p = ipv4.NewProtocol\n+ n = ipv4.ProtocolNumber\n} else {\n- p = ipv6.NewProtocol(nil)\n+ p = ipv6.NewProtocol\n+ n = ipv6.ProtocolNumber\n}\n- ep := p.NewEndpoint(&testInterface{}, nil, nil, nil)\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{p},\n+ })\n+ ep := s.NetworkProtocolInstance(n).NewEndpoint(&testInterface{}, nil, nil, nil)\nwantTTL = ep.DefaultTTL()\nep.Close()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Discard IP fragments as soon as it expires
Currently expired IP fragments are discarded only if another fragment for the
same IP datagram is received after timeout or the total size of the fragment
queue exceeded a predefined value.
Test: fragmentation.TestReassemblingTimeout
Fixes #3960
PiperOrigin-RevId: 334423710 |
259,985 | 29.09.2020 12:32:32 | 25,200 | 44c7d550747a61baa6a85643de439fa45c2b9633 | Support embedded fields in go-marshal. | [
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/README.md",
"new_path": "tools/go_marshal/README.md",
"diff": "@@ -113,3 +113,18 @@ The following are some guidelines for modifying the `go_marshal` tool:\n- No runtime reflection in the code generated for the marshallable interface.\nThe entire point of the tool is to avoid runtime reflection. The generated\ntests may use reflection.\n+\n+## Debugging\n+\n+To enable debugging output from the go-marshal tool, use one of the following\n+options, depending on how go-marshal is being invoked:\n+\n+- Pass `--define gomarshal=verbose` to the bazel command. Note that this can\n+ generate a lot of output depending on what's being compiled, as this will\n+ enable debugging for all packages built by the command.\n+\n+- Set `marshal_debug = True` on the top-level `go_library` BUILD rule.\n+\n+- Set `debug = True` on the `go_marshal` BUILD rule.\n+\n+- Pass `-debug` to the go-marshal tool invocation.\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator_interfaces.go",
"new_path": "tools/go_marshal/gomarshal/generator_interfaces.go",
"diff": "@@ -43,8 +43,8 @@ type interfaceGenerator struct {\n// of t's interfaces.\nms map[string]struct{}\n- // as records embedded fields in t that are potentially not packed. The key\n- // is the accessor for the field.\n+ // as records fields in t that are potentially not packed. The key is the\n+ // accessor for the field.\nas map[string]struct{}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go",
"new_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go",
"diff": "@@ -50,12 +50,6 @@ func (g *interfaceGenerator) areFieldsPackedExpression() (string, bool) {\n// checks are done ahead of time and in one place so we can make assumptions\n// later.\nfunc (g *interfaceGenerator) validateStruct(ts *ast.TypeSpec, st *ast.StructType) {\n- forEachStructField(st, func(f *ast.Field) {\n- if len(f.Names) == 0 {\n- g.abortAt(f.Pos(), \"Cannot marshal structs with embedded fields, give the field a name; use '_' for anonymous fields such as padding fields\")\n- }\n- })\n-\nforEachStructField(st, func(f *ast.Field) {\nfieldDispatcher{\nprimitive: func(_, t *ast.Ident) {\n@@ -101,7 +95,7 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\nvar dynamicSizeTerms []string\nforEachStructField(st, fieldDispatcher{\n- primitive: func(n, t *ast.Ident) {\n+ primitive: func(_, t *ast.Ident) {\nif size, dynamic := g.scalarSize(t); !dynamic {\nprimitiveSize += size\n} else {\n@@ -109,13 +103,13 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ndynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf(\"(*%s)(nil).SizeBytes()\", t.Name))\n}\n},\n- selector: func(n, tX, tSel *ast.Ident) {\n+ selector: func(_, tX, tSel *ast.Ident) {\ntName := fmt.Sprintf(\"%s.%s\", tX.Name, tSel.Name)\ng.recordUsedImport(tX.Name)\ng.recordUsedMarshallable(tName)\ndynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf(\"(*%s)(nil).SizeBytes()\", tName))\n},\n- array: func(n *ast.Ident, a *ast.ArrayType, t *ast.Ident) {\n+ array: func(_ *ast.Ident, a *ast.ArrayType, t *ast.Ident) {\nlenExpr := g.arrayLenExpr(a)\nif size, dynamic := g.scalarSize(t); !dynamic {\ndynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf(\"%d*%s\", size, lenExpr))\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/util.go",
"new_path": "tools/go_marshal/gomarshal/util.go",
"diff": "@@ -79,7 +79,7 @@ type fieldDispatcher struct {\n}\n// Precondition: All dispatch callbacks that will be invoked must be\n-// provided. Embedded fields are not allowed, len(f.Names) >= 1.\n+// provided.\nfunc (fd fieldDispatcher) dispatch(f *ast.Field) {\n// Each field declaration may actually be multiple declarations of the same\n// type. For example, consider:\n@@ -88,12 +88,24 @@ func (fd fieldDispatcher) dispatch(f *ast.Field) {\n// x, y, z int\n// }\n//\n- // We invoke the call-backs once per such instance. Embedded fields are not\n- // allowed, and results in a panic.\n+ // We invoke the call-backs once per such instance.\n+\n+ // Handle embedded fields. Embedded fields have no names, but can be\n+ // referenced by the type name.\nif len(f.Names) < 1 {\n- panic(\"Precondition not met: attempted to dispatch on embedded field\")\n+ switch v := f.Type.(type) {\n+ case *ast.Ident:\n+ fd.primitive(v, v)\n+ case *ast.SelectorExpr:\n+ fd.selector(v.Sel, v.X.(*ast.Ident), v.Sel)\n+ default:\n+ // Note: Arrays can't be embedded, which is handled here.\n+ panic(fmt.Sprintf(\"Attempted to dispatch on embedded field of unsupported kind: %#v\", f.Type))\n+ }\n+ return\n}\n+ // Non-embedded field.\nfor _, name := range f.Names {\nswitch v := f.Type.(type) {\ncase *ast.Ident:\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/test/test.go",
"new_path": "tools/go_marshal/test/test.go",
"diff": "@@ -174,3 +174,27 @@ type Type9 struct {\nx int64\ny [sizeA]int32\n}\n+\n+// Type10Embed is a test data type which is be embedded into another type.\n+//\n+// +marshal\n+type Type10Embed struct {\n+ x int64\n+}\n+\n+// Type10 is a test data type which contains an embedded struct.\n+//\n+// +marshal\n+type Type10 struct {\n+ Type10Embed\n+ y int64\n+}\n+\n+// Type11 is a test data type which contains an embedded struct from an external\n+// package.\n+//\n+// +marshal\n+type Type11 struct {\n+ ex.External\n+ y int64\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support embedded fields in go-marshal.
PiperOrigin-RevId: 334437990 |
260,004 | 29.09.2020 13:44:03 | 25,200 | 1d88bce55e0c8ef77e31863d264b896493dce90f | Don't generate link-local IPv6 for loopback
Linux doesn't generate a link-local address for the loopback interface.
Test: integration_test.TestInitialLoopbackAddresses | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -258,7 +258,7 @@ func (e *endpoint) Enable() *tcpip.Error {\n}\n// Do not auto-generate an IPv6 link-local address for loopback devices.\n- if e.protocol.autoGenIPv6LinkLocal {\n+ if e.protocol.autoGenIPv6LinkLocal && !e.nic.IsLoopback() {\n// The valid and preferred lifetime is infinite for the auto-generated\n// link-local address.\ne.mu.ndp.doSLAAC(header.IPv6LinkLocalPrefix.Subnet(), header.NDPInfiniteLifetime, header.NDPInfiniteLifetime)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/loopback_test.go",
"new_path": "pkg/tcpip/tests/integration/loopback_test.go",
"diff": "@@ -16,6 +16,7 @@ package integration_test\nimport (\n\"testing\"\n+ \"time\"\n\"github.com/google/go-cmp/cmp\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -29,6 +30,69 @@ import (\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n+var _ ipv6.NDPDispatcher = (*ndpDispatcher)(nil)\n+\n+type ndpDispatcher struct{}\n+\n+func (*ndpDispatcher) OnDuplicateAddressDetectionStatus(tcpip.NICID, tcpip.Address, bool, *tcpip.Error) {\n+}\n+\n+func (*ndpDispatcher) OnDefaultRouterDiscovered(tcpip.NICID, tcpip.Address) bool {\n+ return false\n+}\n+\n+func (*ndpDispatcher) OnDefaultRouterInvalidated(tcpip.NICID, tcpip.Address) {}\n+\n+func (*ndpDispatcher) OnOnLinkPrefixDiscovered(tcpip.NICID, tcpip.Subnet) bool {\n+ return false\n+}\n+\n+func (*ndpDispatcher) OnOnLinkPrefixInvalidated(tcpip.NICID, tcpip.Subnet) {}\n+\n+func (*ndpDispatcher) OnAutoGenAddress(tcpip.NICID, tcpip.AddressWithPrefix) bool {\n+ return true\n+}\n+\n+func (*ndpDispatcher) OnAutoGenAddressDeprecated(tcpip.NICID, tcpip.AddressWithPrefix) {}\n+\n+func (*ndpDispatcher) OnAutoGenAddressInvalidated(tcpip.NICID, tcpip.AddressWithPrefix) {}\n+\n+func (*ndpDispatcher) OnRecursiveDNSServerOption(tcpip.NICID, []tcpip.Address, time.Duration) {}\n+\n+func (*ndpDispatcher) OnDNSSearchListOption(tcpip.NICID, []string, time.Duration) {}\n+\n+func (*ndpDispatcher) OnDHCPv6Configuration(tcpip.NICID, ipv6.DHCPv6ConfigurationFromNDPRA) {}\n+\n+// TestInitialLoopbackAddresses tests that the loopback interface does not\n+// auto-generate a link-local address when it is brought up.\n+func TestInitialLoopbackAddresses(t *testing.T) {\n+ const nicID = 1\n+\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocolWithOptions(ipv6.Options{\n+ NDPDisp: &ndpDispatcher{},\n+ AutoGenIPv6LinkLocal: true,\n+ OpaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{\n+ NICNameFromID: func(nicID tcpip.NICID, nicName string) string {\n+ t.Fatalf(\"should not attempt to get name for NIC with ID = %d; nicName = %s\", nicID, nicName)\n+ return \"\"\n+ },\n+ },\n+ })},\n+ })\n+\n+ if err := s.CreateNIC(nicID, loopback.New()); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n+ }\n+\n+ nicsInfo := s.NICInfo()\n+ if nicInfo, ok := nicsInfo[nicID]; !ok {\n+ t.Fatalf(\"did not find NIC with ID = %d in s.NICInfo() = %#v\", nicID, nicsInfo)\n+ } else if got := len(nicInfo.ProtocolAddresses); got != 0 {\n+ t.Fatalf(\"got len(nicInfo.ProtocolAddresses) = %d, want = 0; nicInfo.ProtocolAddresses = %#v\", got, nicInfo.ProtocolAddresses)\n+ }\n+}\n+\n// TestLoopbackAcceptAllInSubnet tests that a loopback interface considers\n// itself bound to all addresses in the subnet of an assigned address.\nfunc TestLoopbackAcceptAllInSubnet(t *testing.T) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't generate link-local IPv6 for loopback
Linux doesn't generate a link-local address for the loopback interface.
Test: integration_test.TestInitialLoopbackAddresses
PiperOrigin-RevId: 334453182 |
259,992 | 29.09.2020 15:47:25 | 25,200 | 4a428b13b20baf055e7e70ecb51a87299f0c6d8e | Add /proc/[pid]/cwd | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/task.go",
"new_path": "pkg/sentry/fs/proc/task.go",
"diff": "@@ -84,6 +84,7 @@ func (p *proc) newTaskDir(t *kernel.Task, msrc *fs.MountSource, isThreadGroup bo\n\"auxv\": newAuxvec(t, msrc),\n\"cmdline\": newExecArgInode(t, msrc, cmdlineExecArg),\n\"comm\": newComm(t, msrc),\n+ \"cwd\": newCwd(t, msrc),\n\"environ\": newExecArgInode(t, msrc, environExecArg),\n\"exe\": newExe(t, msrc),\n\"fd\": newFdDir(t, msrc),\n@@ -300,6 +301,49 @@ func (e *exe) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {\nreturn exec.PathnameWithDeleted(ctx), nil\n}\n+// cwd is an fs.InodeOperations symlink for the /proc/PID/cwd file.\n+//\n+// +stateify savable\n+type cwd struct {\n+ ramfs.Symlink\n+\n+ t *kernel.Task\n+}\n+\n+func newCwd(t *kernel.Task, msrc *fs.MountSource) *fs.Inode {\n+ cwdSymlink := &cwd{\n+ Symlink: *ramfs.NewSymlink(t, fs.RootOwner, \"\"),\n+ t: t,\n+ }\n+ return newProcInode(t, cwdSymlink, msrc, fs.Symlink, t)\n+}\n+\n+// Readlink implements fs.InodeOperations.\n+func (e *cwd) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {\n+ if !kernel.ContextCanTrace(ctx, e.t, false) {\n+ return \"\", syserror.EACCES\n+ }\n+ if err := checkTaskState(e.t); err != nil {\n+ return \"\", err\n+ }\n+ cwd := e.t.FSContext().WorkingDirectory()\n+ if cwd == nil {\n+ // It could have raced with process deletion.\n+ return \"\", syserror.ESRCH\n+ }\n+ defer cwd.DecRef(ctx)\n+\n+ root := fs.RootFromContext(ctx)\n+ if root == nil {\n+ // It could have raced with process deletion.\n+ return \"\", syserror.ESRCH\n+ }\n+ defer root.DecRef(ctx)\n+\n+ name, _ := cwd.FullName(root)\n+ return name, nil\n+}\n+\n// namespaceSymlink represents a symlink in the namespacefs, such as the files\n// in /proc/<pid>/ns.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task.go",
"new_path": "pkg/sentry/fsimpl/proc/task.go",
"diff": "@@ -53,6 +53,7 @@ func (fs *filesystem) newTaskInode(task *kernel.Task, pidns *kernel.PIDNamespace\n\"auxv\": fs.newTaskOwnedFile(task, fs.NextIno(), 0444, &auxvData{task: task}),\n\"cmdline\": fs.newTaskOwnedFile(task, fs.NextIno(), 0444, &cmdlineData{task: task, arg: cmdlineDataArg}),\n\"comm\": fs.newComm(task, fs.NextIno(), 0444),\n+ \"cwd\": fs.newCwdSymlink(task, fs.NextIno()),\n\"environ\": fs.newTaskOwnedFile(task, fs.NextIno(), 0444, &cmdlineData{task: task, arg: environDataArg}),\n\"exe\": fs.newExeSymlink(task, fs.NextIno()),\n\"fd\": fs.newFDDirInode(task),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task_files.go",
"new_path": "pkg/sentry/fsimpl/proc/task_files.go",
"diff": "@@ -669,18 +669,22 @@ func (fs *filesystem) newExeSymlink(task *kernel.Task, ino uint64) *kernfs.Dentr\n// Readlink implements kernfs.Inode.Readlink.\nfunc (s *exeSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error) {\n- if !kernel.ContextCanTrace(ctx, s.task, false) {\n- return \"\", syserror.EACCES\n- }\n-\n- // Pull out the executable for /proc/[pid]/exe.\n- exec, err := s.executable()\n+ exec, _, err := s.Getlink(ctx, nil)\nif err != nil {\nreturn \"\", err\n}\ndefer exec.DecRef(ctx)\n- return exec.PathnameWithDeleted(ctx), nil\n+ root := vfs.RootFromContext(ctx)\n+ if !root.Ok() {\n+ // It could have raced with process deletion.\n+ return \"\", syserror.ESRCH\n+ }\n+ defer root.DecRef(ctx)\n+\n+ vfsObj := exec.Mount().Filesystem().VirtualFilesystem()\n+ name, _ := vfsObj.PathnameWithDeleted(ctx, root, exec)\n+ return name, nil\n}\n// Getlink implements kernfs.Inode.Getlink.\n@@ -688,23 +692,12 @@ func (s *exeSymlink) Getlink(ctx context.Context, _ *vfs.Mount) (vfs.VirtualDent\nif !kernel.ContextCanTrace(ctx, s.task, false) {\nreturn vfs.VirtualDentry{}, \"\", syserror.EACCES\n}\n-\n- exec, err := s.executable()\n- if err != nil {\n- return vfs.VirtualDentry{}, \"\", err\n- }\n- defer exec.DecRef(ctx)\n-\n- vd := exec.(*fsbridge.VFSFile).FileDescription().VirtualDentry()\n- vd.IncRef()\n- return vd, \"\", nil\n-}\n-\n-func (s *exeSymlink) executable() (file fsbridge.File, err error) {\nif err := checkTaskState(s.task); err != nil {\n- return nil, err\n+ return vfs.VirtualDentry{}, \"\", err\n}\n+ var err error\n+ var exec fsbridge.File\ns.task.WithMuLocked(func(t *kernel.Task) {\nmm := t.MemoryManager()\nif mm == nil {\n@@ -715,12 +708,78 @@ func (s *exeSymlink) executable() (file fsbridge.File, err error) {\n// The MemoryManager may be destroyed, in which case\n// MemoryManager.destroy will simply set the executable to nil\n// (with locks held).\n- file = mm.Executable()\n- if file == nil {\n+ exec = mm.Executable()\n+ if exec == nil {\nerr = syserror.ESRCH\n}\n})\n- return\n+ if err != nil {\n+ return vfs.VirtualDentry{}, \"\", err\n+ }\n+ defer exec.DecRef(ctx)\n+\n+ vd := exec.(*fsbridge.VFSFile).FileDescription().VirtualDentry()\n+ vd.IncRef()\n+ return vd, \"\", nil\n+}\n+\n+// cwdSymlink is an symlink for the /proc/[pid]/cwd file.\n+//\n+// +stateify savable\n+type cwdSymlink struct {\n+ implStatFS\n+ kernfs.InodeAttrs\n+ kernfs.InodeNoopRefCount\n+ kernfs.InodeSymlink\n+\n+ task *kernel.Task\n+}\n+\n+var _ kernfs.Inode = (*cwdSymlink)(nil)\n+\n+func (fs *filesystem) newCwdSymlink(task *kernel.Task, ino uint64) *kernfs.Dentry {\n+ inode := &cwdSymlink{task: task}\n+ inode.Init(task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, ino, linux.ModeSymlink|0777)\n+\n+ d := &kernfs.Dentry{}\n+ d.Init(inode)\n+ return d\n+}\n+\n+// Readlink implements kernfs.Inode.Readlink.\n+func (s *cwdSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error) {\n+ cwd, _, err := s.Getlink(ctx, nil)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ defer cwd.DecRef(ctx)\n+\n+ root := vfs.RootFromContext(ctx)\n+ if !root.Ok() {\n+ // It could have raced with process deletion.\n+ return \"\", syserror.ESRCH\n+ }\n+ defer root.DecRef(ctx)\n+\n+ vfsObj := cwd.Mount().Filesystem().VirtualFilesystem()\n+ name, _ := vfsObj.PathnameWithDeleted(ctx, root, cwd)\n+ return name, nil\n+}\n+\n+// Getlink implements kernfs.Inode.Getlink.\n+func (s *cwdSymlink) Getlink(ctx context.Context, _ *vfs.Mount) (vfs.VirtualDentry, string, error) {\n+ if !kernel.ContextCanTrace(ctx, s.task, false) {\n+ return vfs.VirtualDentry{}, \"\", syserror.EACCES\n+ }\n+ if err := checkTaskState(s.task); err != nil {\n+ return vfs.VirtualDentry{}, \"\", err\n+ }\n+ cwd := s.task.FSContext().WorkingDirectoryVFS2()\n+ if !cwd.Ok() {\n+ // It could have raced with process deletion.\n+ return vfs.VirtualDentry{}, \"\", syserror.ESRCH\n+ }\n+ return cwd, \"\", nil\n}\n// mountInfoData is used to implement /proc/[pid]/mountinfo.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/tasks_test.go",
"new_path": "pkg/sentry/fsimpl/proc/tasks_test.go",
"diff": "@@ -67,6 +67,7 @@ var (\ntaskStaticFiles = map[string]testutil.DirentType{\n\"auxv\": linux.DT_REG,\n\"cgroup\": linux.DT_REG,\n+ \"cwd\": linux.DT_LNK,\n\"cmdline\": linux.DT_REG,\n\"comm\": linux.DT_REG,\n\"environ\": linux.DT_REG,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc.cc",
"new_path": "test/syscalls/linux/proc.cc",
"diff": "@@ -780,8 +780,12 @@ TEST(ProcSelfFdInfo, Flags) {\n}\nTEST(ProcSelfExe, Absolute) {\n- auto exe = ASSERT_NO_ERRNO_AND_VALUE(\n- ReadLink(absl::StrCat(\"/proc/\", getpid(), \"/exe\")));\n+ auto exe = ASSERT_NO_ERRNO_AND_VALUE(ReadLink(\"/proc/self/exe\"));\n+ EXPECT_EQ(exe[0], '/');\n+}\n+\n+TEST(ProcSelfCwd, Absolute) {\n+ auto exe = ASSERT_NO_ERRNO_AND_VALUE(ReadLink(\"/proc/self/cwd\"));\nEXPECT_EQ(exe[0], '/');\n}\n@@ -1473,6 +1477,16 @@ TEST(ProcPidExe, Subprocess) {\nEXPECT_EQ(actual, expected_absolute_path);\n}\n+// /proc/PID/cwd points to the correct directory.\n+TEST(ProcPidCwd, Subprocess) {\n+ auto want = ASSERT_NO_ERRNO_AND_VALUE(GetCWD());\n+\n+ char got[PATH_MAX + 1] = {};\n+ ASSERT_THAT(ReadlinkWhileRunning(\"cwd\", got, sizeof(got)),\n+ SyscallSucceedsWithValue(Gt(0)));\n+ EXPECT_EQ(got, want);\n+}\n+\n// Test whether /proc/PID/ files can be read for a running process.\nTEST(ProcPidFile, SubprocessRunning) {\nchar buf[1];\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add /proc/[pid]/cwd
PiperOrigin-RevId: 334478850 |
259,858 | 29.09.2020 18:33:11 | 25,200 | d4d9238c52ee8eae127f566f1119d915fb6c1a00 | Stop depending on go_binary targets.
Closes | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/proctor/BUILD",
"new_path": "test/runtimes/proctor/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_binary\", \"go_test\")\n+load(\"//tools:defs.bzl\", \"go_binary\")\npackage(licenses = [\"notice\"])\ngo_binary(\nname = \"proctor\",\n- srcs = [\n- \"go.go\",\n- \"java.go\",\n- \"nodejs.go\",\n- \"php.go\",\n- \"proctor.go\",\n- \"python.go\",\n- ],\n- pure = True,\n+ srcs = [\"main.go\"],\nvisibility = [\"//test/runtimes:__pkg__\"],\n-)\n-\n-go_test(\n- name = \"proctor_test\",\n- size = \"small\",\n- srcs = [\"proctor_test.go\"],\n- library = \":proctor\",\n- nogo = False, # FIXME(gvisor.dev/issue/3374): Not working with all build systems.\n- pure = True,\n- deps = [\n- \"//pkg/test/testutil\",\n- ],\n+ deps = [\"//test/runtimes/proctor/lib\"],\n)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/runtimes/proctor/lib/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"lib\",\n+ srcs = [\n+ \"go.go\",\n+ \"java.go\",\n+ \"lib.go\",\n+ \"nodejs.go\",\n+ \"php.go\",\n+ \"python.go\",\n+ ],\n+ visibility = [\"//test/runtimes/proctor:__pkg__\"],\n+)\n+\n+go_test(\n+ name = \"lib_test\",\n+ size = \"small\",\n+ srcs = [\"lib_test.go\"],\n+ library = \":lib\",\n+ deps = [\"//pkg/test/testutil\"],\n+)\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/proctor/go.go",
"new_path": "test/runtimes/proctor/lib/go.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package main\n+package lib\nimport (\n\"fmt\"\n@@ -59,7 +59,7 @@ func (goRunner) ListTests() ([]string, error) {\n}\n// Go tests on disk.\n- diskSlice, err := search(goTestDir, goTestRegEx)\n+ diskSlice, err := Search(goTestDir, goTestRegEx)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/proctor/java.go",
"new_path": "test/runtimes/proctor/lib/java.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package main\n+package lib\nimport (\n\"fmt\"\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/proctor/proctor.go",
"new_path": "test/runtimes/proctor/lib/lib.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// Binary proctor runs the test for a particular runtime. It is meant to be\n-// included in Docker images for all runtime tests.\n-package main\n+// Package lib contains proctor functions.\n+package lib\nimport (\n- \"flag\"\n\"fmt\"\n- \"log\"\n\"os\"\n\"os/exec\"\n\"os/signal\"\n\"path/filepath\"\n\"regexp\"\n- \"strings\"\n\"syscall\"\n)\n@@ -42,66 +38,8 @@ type TestRunner interface {\nTestCmds(tests []string) []*exec.Cmd\n}\n-var (\n- runtime = flag.String(\"runtime\", \"\", \"name of runtime\")\n- list = flag.Bool(\"list\", false, \"list all available tests\")\n- testNames = flag.String(\"tests\", \"\", \"run a subset of the available tests\")\n- pause = flag.Bool(\"pause\", false, \"cause container to pause indefinitely, reaping any zombie children\")\n-)\n-\n-func main() {\n- flag.Parse()\n-\n- if *pause {\n- pauseAndReap()\n- panic(\"pauseAndReap should never return\")\n- }\n-\n- if *runtime == \"\" {\n- log.Fatalf(\"runtime flag must be provided\")\n- }\n-\n- tr, err := testRunnerForRuntime(*runtime)\n- if err != nil {\n- log.Fatalf(\"%v\", err)\n- }\n-\n- // List tests.\n- if *list {\n- tests, err := tr.ListTests()\n- if err != nil {\n- log.Fatalf(\"failed to list tests: %v\", err)\n- }\n- for _, test := range tests {\n- fmt.Println(test)\n- }\n- return\n- }\n-\n- var tests []string\n- if *testNames == \"\" {\n- // Run every test.\n- tests, err = tr.ListTests()\n- if err != nil {\n- log.Fatalf(\"failed to get all tests: %v\", err)\n- }\n- } else {\n- // Run subset of test.\n- tests = strings.Split(*testNames, \",\")\n- }\n-\n- // Run tests.\n- cmds := tr.TestCmds(tests)\n- for _, cmd := range cmds {\n- cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n- if err := cmd.Run(); err != nil {\n- log.Fatalf(\"FAIL: %v\", err)\n- }\n- }\n-}\n-\n-// testRunnerForRuntime returns a new TestRunner for the given runtime.\n-func testRunnerForRuntime(runtime string) (TestRunner, error) {\n+// TestRunnerForRuntime returns a new TestRunner for the given runtime.\n+func TestRunnerForRuntime(runtime string) (TestRunner, error) {\nswitch runtime {\ncase \"go\":\nreturn goRunner{}, nil\n@@ -117,8 +55,8 @@ func testRunnerForRuntime(runtime string) (TestRunner, error) {\nreturn nil, fmt.Errorf(\"invalid runtime %q\", runtime)\n}\n-// pauseAndReap is like init. It runs forever and reaps any children.\n-func pauseAndReap() {\n+// PauseAndReap is like init. It runs forever and reaps any children.\n+func PauseAndReap() {\n// Get notified of any new children.\nch := make(chan os.Signal, 1)\nsignal.Notify(ch, syscall.SIGCHLD)\n@@ -138,9 +76,9 @@ func pauseAndReap() {\n}\n}\n-// search is a helper function to find tests in the given directory that match\n+// Search is a helper function to find tests in the given directory that match\n// the regex.\n-func search(root string, testFilter *regexp.Regexp) ([]string, error) {\n+func Search(root string, testFilter *regexp.Regexp) ([]string, error) {\nvar testSlice []string\nerr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/proctor/proctor_test.go",
"new_path": "test/runtimes/proctor/lib/lib_test.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package main\n+package lib\nimport (\n\"io/ioutil\"\n@@ -47,7 +47,7 @@ func TestSearchEmptyDir(t *testing.T) {\nvar want []string\ntestFilter := regexp.MustCompile(`^test-[^-].+\\.tc$`)\n- got, err := search(td, testFilter)\n+ got, err := Search(td, testFilter)\nif err != nil {\nt.Errorf(\"search error: %v\", err)\n}\n@@ -116,7 +116,7 @@ func TestSearch(t *testing.T) {\n}\ntestFilter := regexp.MustCompile(`^test-[^-].+\\.tc$`)\n- got, err := search(td, testFilter)\n+ got, err := Search(td, testFilter)\nif err != nil {\nt.Errorf(\"search error: %v\", err)\n}\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/proctor/nodejs.go",
"new_path": "test/runtimes/proctor/lib/nodejs.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package main\n+package lib\nimport (\n\"os/exec\"\n@@ -32,7 +32,7 @@ var _ TestRunner = nodejsRunner{}\n// ListTests implements TestRunner.ListTests.\nfunc (nodejsRunner) ListTests() ([]string, error) {\n- testSlice, err := search(nodejsTestDir, nodejsTestRegEx)\n+ testSlice, err := Search(nodejsTestDir, nodejsTestRegEx)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/proctor/php.go",
"new_path": "test/runtimes/proctor/lib/php.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package main\n+package lib\nimport (\n\"os/exec\"\n@@ -29,7 +29,7 @@ var _ TestRunner = phpRunner{}\n// ListTests implements TestRunner.ListTests.\nfunc (phpRunner) ListTests() ([]string, error) {\n- testSlice, err := search(\".\", phpTestRegEx)\n+ testSlice, err := Search(\".\", phpTestRegEx)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/proctor/python.go",
"new_path": "test/runtimes/proctor/lib/python.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package main\n+package lib\nimport (\n\"fmt\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/runtimes/proctor/main.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Binary proctor runs the test for a particular runtime. It is meant to be\n+// included in Docker images for all runtime tests.\n+package main\n+\n+import (\n+ \"flag\"\n+ \"fmt\"\n+ \"log\"\n+ \"os\"\n+ \"strings\"\n+\n+ \"gvisor.dev/gvisor/test/runtimes/proctor/lib\"\n+)\n+\n+var (\n+ runtime = flag.String(\"runtime\", \"\", \"name of runtime\")\n+ list = flag.Bool(\"list\", false, \"list all available tests\")\n+ testNames = flag.String(\"tests\", \"\", \"run a subset of the available tests\")\n+ pause = flag.Bool(\"pause\", false, \"cause container to pause indefinitely, reaping any zombie children\")\n+)\n+\n+func main() {\n+ flag.Parse()\n+\n+ if *pause {\n+ lib.PauseAndReap()\n+ panic(\"pauseAndReap should never return\")\n+ }\n+\n+ if *runtime == \"\" {\n+ log.Fatalf(\"runtime flag must be provided\")\n+ }\n+\n+ tr, err := lib.TestRunnerForRuntime(*runtime)\n+ if err != nil {\n+ log.Fatalf(\"%v\", err)\n+ }\n+\n+ // List tests.\n+ if *list {\n+ tests, err := tr.ListTests()\n+ if err != nil {\n+ log.Fatalf(\"failed to list tests: %v\", err)\n+ }\n+ for _, test := range tests {\n+ fmt.Println(test)\n+ }\n+ return\n+ }\n+\n+ var tests []string\n+ if *testNames == \"\" {\n+ // Run every test.\n+ tests, err = tr.ListTests()\n+ if err != nil {\n+ log.Fatalf(\"failed to get all tests: %v\", err)\n+ }\n+ } else {\n+ // Run subset of test.\n+ tests = strings.Split(*testNames, \",\")\n+ }\n+\n+ // Run tests.\n+ cmds := tr.TestCmds(tests)\n+ for _, cmd := range cmds {\n+ cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n+ if err := cmd.Run(); err != nil {\n+ log.Fatalf(\"FAIL: %v\", err)\n+ }\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/runner/BUILD",
"new_path": "test/runtimes/runner/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_binary\", \"go_test\")\n+load(\"//tools:defs.bzl\", \"go_binary\")\npackage(licenses = [\"notice\"])\n@@ -7,16 +7,5 @@ go_binary(\ntestonly = 1,\nsrcs = [\"main.go\"],\nvisibility = [\"//test/runtimes:__pkg__\"],\n- deps = [\n- \"//pkg/log\",\n- \"//pkg/test/dockerutil\",\n- \"//pkg/test/testutil\",\n- ],\n-)\n-\n-go_test(\n- name = \"exclude_test\",\n- size = \"small\",\n- srcs = [\"exclude_test.go\"],\n- library = \":runner\",\n+ deps = [\"//test/runtimes/runner/lib\"],\n)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/runtimes/runner/lib/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"lib\",\n+ testonly = 1,\n+ srcs = [\"lib.go\"],\n+ visibility = [\"//test/runtimes/runner:__pkg__\"],\n+ deps = [\n+ \"//pkg/log\",\n+ \"//pkg/test/dockerutil\",\n+ \"//pkg/test/testutil\",\n+ ],\n+)\n+\n+go_test(\n+ name = \"lib_test\",\n+ size = \"small\",\n+ srcs = [\"exclude_test.go\"],\n+ library = \":lib\",\n+)\n"
},
{
"change_type": "RENAME",
"old_path": "test/runtimes/runner/exclude_test.go",
"new_path": "test/runtimes/runner/lib/exclude_test.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package main\n+package lib\nimport (\n\"flag\"\n@@ -20,6 +20,8 @@ import (\n\"testing\"\n)\n+var excludeFile = flag.String(\"exclude_file\", \"\", \"file to test (standard format)\")\n+\nfunc TestMain(m *testing.M) {\nflag.Parse()\nos.Exit(m.Run())\n@@ -27,7 +29,7 @@ func TestMain(m *testing.M) {\n// Test that the exclude file parses without error.\nfunc TestExcludelist(t *testing.T) {\n- ex, err := getExcludes()\n+ ex, err := getExcludes(*excludeFile)\nif err != nil {\nt.Fatalf(\"error parsing exclude file: %v\", err)\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/runtimes/runner/lib/lib.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package lib provides utilities for runner.\n+package lib\n+\n+import (\n+ \"context\"\n+ \"encoding/csv\"\n+ \"fmt\"\n+ \"io\"\n+ \"os\"\n+ \"sort\"\n+ \"strings\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/pkg/test/dockerutil\"\n+ \"gvisor.dev/gvisor/pkg/test/testutil\"\n+)\n+\n+// RunTests is a helper that is called by main. It exists so that we can run\n+// defered functions before exiting. It returns an exit code that should be\n+// passed to os.Exit.\n+func RunTests(lang, image, excludeFile string, batchSize int, timeout time.Duration) int {\n+ // Get tests to exclude..\n+ excludes, err := getExcludes(excludeFile)\n+ if err != nil {\n+ fmt.Fprintf(os.Stderr, \"Error getting exclude list: %s\\n\", err.Error())\n+ return 1\n+ }\n+\n+ // Construct the shared docker instance.\n+ ctx := context.Background()\n+ d := dockerutil.MakeContainer(ctx, testutil.DefaultLogger(lang))\n+ defer d.CleanUp(ctx)\n+\n+ if err := testutil.TouchShardStatusFile(); err != nil {\n+ fmt.Fprintf(os.Stderr, \"error touching status shard file: %v\\n\", err)\n+ return 1\n+ }\n+\n+ // Get a slice of tests to run. This will also start a single Docker\n+ // container that will be used to run each test. The final test will\n+ // stop the Docker container.\n+ tests, err := getTests(ctx, d, lang, image, batchSize, timeout, excludes)\n+ if err != nil {\n+ fmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n+ return 1\n+ }\n+\n+ m := testing.MainStart(testDeps{}, tests, nil, nil)\n+ return m.Run()\n+}\n+\n+// getTests executes all tests as table tests.\n+func getTests(ctx context.Context, d *dockerutil.Container, lang, image string, batchSize int, timeout time.Duration, excludes map[string]struct{}) ([]testing.InternalTest, error) {\n+ // Start the container.\n+ opts := dockerutil.RunOpts{\n+ Image: fmt.Sprintf(\"runtimes/%s\", image),\n+ }\n+ d.CopyFiles(&opts, \"/proctor\", \"test/runtimes/proctor/proctor\")\n+ if err := d.Spawn(ctx, opts, \"/proctor/proctor\", \"--pause\"); err != nil {\n+ return nil, fmt.Errorf(\"docker run failed: %v\", err)\n+ }\n+\n+ // Get a list of all tests in the image.\n+ list, err := d.Exec(ctx, dockerutil.ExecOpts{}, \"/proctor/proctor\", \"--runtime\", lang, \"--list\")\n+ if err != nil {\n+ return nil, fmt.Errorf(\"docker exec failed: %v\", err)\n+ }\n+\n+ // Calculate a subset of tests to run corresponding to the current\n+ // shard.\n+ tests := strings.Fields(list)\n+ sort.Strings(tests)\n+ indices, err := testutil.TestIndicesForShard(len(tests))\n+ if err != nil {\n+ return nil, fmt.Errorf(\"TestsForShard() failed: %v\", err)\n+ }\n+\n+ var itests []testing.InternalTest\n+ for i := 0; i < len(indices); i += batchSize {\n+ var tcs []string\n+ end := i + batchSize\n+ if end > len(indices) {\n+ end = len(indices)\n+ }\n+ for _, tc := range indices[i:end] {\n+ // Add test if not excluded.\n+ if _, ok := excludes[tests[tc]]; ok {\n+ log.Infof(\"Skipping test case %s\\n\", tests[tc])\n+ continue\n+ }\n+ tcs = append(tcs, tests[tc])\n+ }\n+ itests = append(itests, testing.InternalTest{\n+ Name: strings.Join(tcs, \", \"),\n+ F: func(t *testing.T) {\n+ var (\n+ now = time.Now()\n+ done = make(chan struct{})\n+ output string\n+ err error\n+ )\n+\n+ go func() {\n+ fmt.Printf(\"RUNNING the following in a batch\\n%s\\n\", strings.Join(tcs, \"\\n\"))\n+ output, err = d.Exec(ctx, dockerutil.ExecOpts{}, \"/proctor/proctor\", \"--runtime\", lang, \"--tests\", strings.Join(tcs, \",\"))\n+ close(done)\n+ }()\n+\n+ select {\n+ case <-done:\n+ if err == nil {\n+ fmt.Printf(\"PASS: (%v)\\n\\n\", time.Since(now))\n+ return\n+ }\n+ t.Errorf(\"FAIL: (%v):\\n%s\\n\", time.Since(now), output)\n+ case <-time.After(timeout):\n+ t.Errorf(\"TIMEOUT: (%v):\\n%s\\n\", time.Since(now), output)\n+ }\n+ },\n+ })\n+ }\n+\n+ return itests, nil\n+}\n+\n+// getBlacklist reads the exclude file and returns a set of test names to\n+// exclude.\n+func getExcludes(excludeFile string) (map[string]struct{}, error) {\n+ excludes := make(map[string]struct{})\n+ if excludeFile == \"\" {\n+ return excludes, nil\n+ }\n+ f, err := os.Open(excludeFile)\n+ if err != nil {\n+ return nil, err\n+ }\n+ defer f.Close()\n+\n+ r := csv.NewReader(f)\n+\n+ // First line is header. Skip it.\n+ if _, err := r.Read(); err != nil {\n+ return nil, err\n+ }\n+\n+ for {\n+ record, err := r.Read()\n+ if err == io.EOF {\n+ break\n+ }\n+ if err != nil {\n+ return nil, err\n+ }\n+ excludes[record[0]] = struct{}{}\n+ }\n+ return excludes, nil\n+}\n+\n+// testDeps implements testing.testDeps (an unexported interface), and is\n+// required to use testing.MainStart.\n+type testDeps struct{}\n+\n+func (f testDeps) MatchString(a, b string) (bool, error) { return a == b, nil }\n+func (f testDeps) StartCPUProfile(io.Writer) error { return nil }\n+func (f testDeps) StopCPUProfile() {}\n+func (f testDeps) WriteProfileTo(string, io.Writer, int) error { return nil }\n+func (f testDeps) ImportPath() string { return \"\" }\n+func (f testDeps) StartTestLog(io.Writer) {}\n+func (f testDeps) StopTestLog() error { return nil }\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/runner/main.go",
"new_path": "test/runtimes/runner/main.go",
"diff": "package main\nimport (\n- \"context\"\n- \"encoding/csv\"\n\"flag\"\n\"fmt\"\n- \"io\"\n\"os\"\n- \"sort\"\n- \"strings\"\n- \"testing\"\n\"time\"\n- \"gvisor.dev/gvisor/pkg/log\"\n- \"gvisor.dev/gvisor/pkg/test/dockerutil\"\n- \"gvisor.dev/gvisor/pkg/test/testutil\"\n+ \"gvisor.dev/gvisor/test/runtimes/runner/lib\"\n)\nvar (\n@@ -37,169 +29,14 @@ var (\nimage = flag.String(\"image\", \"\", \"docker image with runtime tests\")\nexcludeFile = flag.String(\"exclude_file\", \"\", \"file containing list of tests to exclude, in CSV format with fields: test name, bug id, comment\")\nbatchSize = flag.Int(\"batch\", 50, \"number of test cases run in one command\")\n+ timeout = flag.Duration(\"timeout\", 90*time.Minute, \"batch timeout\")\n)\n-// Wait time for each test to run.\n-const timeout = 90 * time.Minute\n-\nfunc main() {\nflag.Parse()\nif *lang == \"\" || *image == \"\" {\nfmt.Fprintf(os.Stderr, \"lang and image flags must not be empty\\n\")\nos.Exit(1)\n}\n- os.Exit(runTests())\n-}\n-\n-// runTests is a helper that is called by main. It exists so that we can run\n-// defered functions before exiting. It returns an exit code that should be\n-// passed to os.Exit.\n-func runTests() int {\n- // Get tests to exclude..\n- excludes, err := getExcludes()\n- if err != nil {\n- fmt.Fprintf(os.Stderr, \"Error getting exclude list: %s\\n\", err.Error())\n- return 1\n- }\n-\n- // Construct the shared docker instance.\n- ctx := context.Background()\n- d := dockerutil.MakeContainer(ctx, testutil.DefaultLogger(*lang))\n- defer d.CleanUp(ctx)\n-\n- if err := testutil.TouchShardStatusFile(); err != nil {\n- fmt.Fprintf(os.Stderr, \"error touching status shard file: %v\\n\", err)\n- return 1\n- }\n-\n- // Get a slice of tests to run. This will also start a single Docker\n- // container that will be used to run each test. The final test will\n- // stop the Docker container.\n- tests, err := getTests(ctx, d, excludes)\n- if err != nil {\n- fmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n- return 1\n- }\n-\n- m := testing.MainStart(testDeps{}, tests, nil, nil)\n- return m.Run()\n-}\n-\n-// getTests executes all tests as table tests.\n-func getTests(ctx context.Context, d *dockerutil.Container, excludes map[string]struct{}) ([]testing.InternalTest, error) {\n- // Start the container.\n- opts := dockerutil.RunOpts{\n- Image: fmt.Sprintf(\"runtimes/%s\", *image),\n- }\n- d.CopyFiles(&opts, \"/proctor\", \"test/runtimes/proctor/proctor\")\n- if err := d.Spawn(ctx, opts, \"/proctor/proctor\", \"--pause\"); err != nil {\n- return nil, fmt.Errorf(\"docker run failed: %v\", err)\n- }\n-\n- // Get a list of all tests in the image.\n- list, err := d.Exec(ctx, dockerutil.ExecOpts{}, \"/proctor/proctor\", \"--runtime\", *lang, \"--list\")\n- if err != nil {\n- return nil, fmt.Errorf(\"docker exec failed: %v\", err)\n- }\n-\n- // Calculate a subset of tests to run corresponding to the current\n- // shard.\n- tests := strings.Fields(list)\n- sort.Strings(tests)\n- indices, err := testutil.TestIndicesForShard(len(tests))\n- if err != nil {\n- return nil, fmt.Errorf(\"TestsForShard() failed: %v\", err)\n+ os.Exit(lib.RunTests(*lang, *image, *excludeFile, *batchSize, *timeout))\n}\n-\n- var itests []testing.InternalTest\n- for i := 0; i < len(indices); i += *batchSize {\n- var tcs []string\n- end := i + *batchSize\n- if end > len(indices) {\n- end = len(indices)\n- }\n- for _, tc := range indices[i:end] {\n- // Add test if not excluded.\n- if _, ok := excludes[tests[tc]]; ok {\n- log.Infof(\"Skipping test case %s\\n\", tests[tc])\n- continue\n- }\n- tcs = append(tcs, tests[tc])\n- }\n- itests = append(itests, testing.InternalTest{\n- Name: strings.Join(tcs, \", \"),\n- F: func(t *testing.T) {\n- var (\n- now = time.Now()\n- done = make(chan struct{})\n- output string\n- err error\n- )\n-\n- go func() {\n- fmt.Printf(\"RUNNING the following in a batch\\n%s\\n\", strings.Join(tcs, \"\\n\"))\n- output, err = d.Exec(ctx, dockerutil.ExecOpts{}, \"/proctor/proctor\", \"--runtime\", *lang, \"--tests\", strings.Join(tcs, \",\"))\n- close(done)\n- }()\n-\n- select {\n- case <-done:\n- if err == nil {\n- fmt.Printf(\"PASS: (%v)\\n\\n\", time.Since(now))\n- return\n- }\n- t.Errorf(\"FAIL: (%v):\\n%s\\n\", time.Since(now), output)\n- case <-time.After(timeout):\n- t.Errorf(\"TIMEOUT: (%v):\\n%s\\n\", time.Since(now), output)\n- }\n- },\n- })\n- }\n-\n- return itests, nil\n-}\n-\n-// getBlacklist reads the exclude file and returns a set of test names to\n-// exclude.\n-func getExcludes() (map[string]struct{}, error) {\n- excludes := make(map[string]struct{})\n- if *excludeFile == \"\" {\n- return excludes, nil\n- }\n- f, err := os.Open(*excludeFile)\n- if err != nil {\n- return nil, err\n- }\n- defer f.Close()\n-\n- r := csv.NewReader(f)\n-\n- // First line is header. Skip it.\n- if _, err := r.Read(); err != nil {\n- return nil, err\n- }\n-\n- for {\n- record, err := r.Read()\n- if err == io.EOF {\n- break\n- }\n- if err != nil {\n- return nil, err\n- }\n- excludes[record[0]] = struct{}{}\n- }\n- return excludes, nil\n-}\n-\n-// testDeps implements testing.testDeps (an unexported interface), and is\n-// required to use testing.MainStart.\n-type testDeps struct{}\n-\n-func (f testDeps) MatchString(a, b string) (bool, error) { return a == b, nil }\n-func (f testDeps) StartCPUProfile(io.Writer) error { return nil }\n-func (f testDeps) StopCPUProfile() {}\n-func (f testDeps) WriteProfileTo(string, io.Writer, int) error { return nil }\n-func (f testDeps) ImportPath() string { return \"\" }\n-func (f testDeps) StartTestLog(io.Writer) {}\n-func (f testDeps) StopTestLog() error { return nil }\n"
}
] | Go | Apache License 2.0 | google/gvisor | Stop depending on go_binary targets.
Closes #3374
PiperOrigin-RevId: 334505627 |
260,004 | 29.09.2020 19:44:42 | 25,200 | e5ece9aea730c105ab336e6bd2858322686a5708 | Return permanent addresses when NIC is down
Test: stack_test.TestGetMainNICAddressWhenNICDisabled | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -595,6 +595,13 @@ func (e *endpoint) RemovePermanentAddress(addr tcpip.Address) *tcpip.Error {\nreturn e.mu.addressableEndpointState.RemovePermanentAddress(addr)\n}\n+// MainAddress implements stack.AddressableEndpoint.\n+func (e *endpoint) MainAddress() tcpip.AddressWithPrefix {\n+ e.mu.RLock()\n+ defer e.mu.RUnlock()\n+ return e.mu.addressableEndpointState.MainAddress()\n+}\n+\n// AcquireAssignedAddress implements stack.AddressableEndpoint.\nfunc (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB stack.PrimaryEndpointBehavior) stack.AddressEndpoint {\ne.mu.Lock()\n@@ -625,11 +632,11 @@ func (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp boo\nreturn addressEndpoint\n}\n-// AcquirePrimaryAddress implements stack.AddressableEndpoint.\n-func (e *endpoint) AcquirePrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {\n+// AcquireOutgoingPrimaryAddress implements stack.AddressableEndpoint.\n+func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- return e.mu.addressableEndpointState.AcquirePrimaryAddress(remoteAddr, allowExpired)\n+ return e.mu.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired)\n}\n// PrimaryAddresses implements stack.AddressableEndpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -922,6 +922,13 @@ func (e *endpoint) getAddressRLocked(localAddr tcpip.Address) stack.AddressEndpo\nreturn e.mu.addressableEndpointState.ReadOnly().Lookup(localAddr)\n}\n+// MainAddress implements stack.AddressableEndpoint.\n+func (e *endpoint) MainAddress() tcpip.AddressWithPrefix {\n+ e.mu.RLock()\n+ defer e.mu.RUnlock()\n+ return e.mu.addressableEndpointState.MainAddress()\n+}\n+\n// AcquireAssignedAddress implements stack.AddressableEndpoint.\nfunc (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB stack.PrimaryEndpointBehavior) stack.AddressEndpoint {\ne.mu.Lock()\n@@ -937,18 +944,18 @@ func (e *endpoint) acquireAddressOrCreateTempLocked(localAddr tcpip.Address, all\nreturn e.mu.addressableEndpointState.AcquireAssignedAddress(localAddr, allowTemp, tempPEB)\n}\n-// AcquirePrimaryAddress implements stack.AddressableEndpoint.\n-func (e *endpoint) AcquirePrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {\n+// AcquireOutgoingPrimaryAddress implements stack.AddressableEndpoint.\n+func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- return e.acquirePrimaryAddressRLocked(remoteAddr, allowExpired)\n+ return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, allowExpired)\n}\n-// acquirePrimaryAddressRLocked is like AcquirePrimaryAddress but with locking\n-// requirements.\n+// acquireOutgoingPrimaryAddressRLocked is like AcquireOutgoingPrimaryAddress\n+// but with locking requirements.\n//\n// Precondition: e.mu must be read locked.\n-func (e *endpoint) acquirePrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {\n+func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {\n// addrCandidate is a candidate for Source Address Selection, as per\n// RFC 6724 section 5.\ntype addrCandidate struct {\n@@ -957,7 +964,7 @@ func (e *endpoint) acquirePrimaryAddressRLocked(remoteAddr tcpip.Address, allowE\n}\nif len(remoteAddr) == 0 {\n- return e.mu.addressableEndpointState.AcquirePrimaryAddress(remoteAddr, allowExpired)\n+ return e.mu.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired)\n}\n// Create a candidate set of available addresses we can potentially use as a\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ndp.go",
"new_path": "pkg/tcpip/network/ipv6/ndp.go",
"diff": "@@ -1891,7 +1891,7 @@ func (ndp *ndpState) startSolicitingRouters() {\n// As per RFC 4861 section 4.1, the source of the RS is an address assigned\n// to the sending interface, or the unspecified address if no address is\n// assigned to the sending interface.\n- addressEndpoint := ndp.ep.acquirePrimaryAddressRLocked(header.IPv6AllRoutersMulticastAddress, false)\n+ addressEndpoint := ndp.ep.acquireOutgoingPrimaryAddressRLocked(header.IPv6AllRoutersMulticastAddress, false)\nif addressEndpoint == nil {\n// Incase this ends up creating a new temporary address, we need to hold\n// onto the endpoint until a route is obtained. If we decrement the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"new_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"diff": "@@ -412,6 +412,60 @@ func (a *AddressableEndpointState) decAddressRefLocked(addrState *addressState)\na.releaseAddressStateLocked(addrState)\n}\n+// MainAddress implements AddressableEndpoint.\n+func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix {\n+ a.mu.RLock()\n+ defer a.mu.RUnlock()\n+\n+ ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool {\n+ return ep.GetKind() == Permanent\n+ })\n+ if ep == nil {\n+ return tcpip.AddressWithPrefix{}\n+ }\n+\n+ addr := ep.AddressWithPrefix()\n+ a.decAddressRefLocked(ep)\n+ return addr\n+}\n+\n+// acquirePrimaryAddressRLocked returns an acquired primary address that is\n+// valid according to isValid.\n+//\n+// Precondition: e.mu must be read locked\n+func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*addressState) bool) *addressState {\n+ var deprecatedEndpoint *addressState\n+ for _, ep := range a.mu.primary {\n+ if !isValid(ep) {\n+ continue\n+ }\n+\n+ if !ep.Deprecated() {\n+ if ep.IncRef() {\n+ // ep is not deprecated, so return it immediately.\n+ //\n+ // If we kept track of a deprecated endpoint, decrement its reference\n+ // count since it was incremented when we decided to keep track of it.\n+ if deprecatedEndpoint != nil {\n+ a.decAddressRefLocked(deprecatedEndpoint)\n+ deprecatedEndpoint = nil\n+ }\n+\n+ return ep\n+ }\n+ } else if deprecatedEndpoint == nil && ep.IncRef() {\n+ // We prefer an endpoint that is not deprecated, but we keep track of\n+ // ep in case a doesn't have any non-deprecated endpoints.\n+ //\n+ // If we end up finding a more preferred endpoint, ep's reference count\n+ // will be decremented.\n+ deprecatedEndpoint = ep\n+ }\n+ }\n+\n+ return deprecatedEndpoint\n+}\n+\n// AcquireAssignedAddress implements AddressableEndpoint.\nfunc (a *AddressableEndpointState) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB PrimaryEndpointBehavior) AddressEndpoint {\na.mu.Lock()\n@@ -461,47 +515,34 @@ func (a *AddressableEndpointState) AcquireAssignedAddress(localAddr tcpip.Addres\nreturn ep\n}\n-// AcquirePrimaryAddress implements AddressableEndpoint.\n-func (a *AddressableEndpointState) AcquirePrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint {\n+// AcquireOutgoingPrimaryAddress implements AddressableEndpoint.\n+func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint {\na.mu.RLock()\ndefer a.mu.RUnlock()\n- var deprecatedEndpoint *addressState\n- for _, ep := range a.mu.primary {\n- if !ep.IsAssigned(allowExpired) {\n- continue\n- }\n+ ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool {\n+ return ep.IsAssigned(allowExpired)\n+ })\n- if !ep.Deprecated() {\n- if ep.IncRef() {\n- // ep is not deprecated, so return it immediately.\n+ // From https://golang.org/doc/faq#nil_error:\n//\n- // If we kept track of a deprecated endpoint, decrement its reference\n- // count since it was incremented when we decided to keep track of it.\n- if deprecatedEndpoint != nil {\n- a.decAddressRefLocked(deprecatedEndpoint)\n- deprecatedEndpoint = nil\n- }\n-\n- return ep\n- }\n- } else if deprecatedEndpoint == nil && ep.IncRef() {\n- // We prefer an endpoint that is not deprecated, but we keep track of\n- // ep in case a doesn't have any non-deprecated endpoints.\n+ // Under the covers, interfaces are implemented as two elements, a type T and\n+ // a value V.\n//\n- // If we end up finding a more preferred endpoint, ep's reference count\n- // will be decremented.\n- deprecatedEndpoint = ep\n- }\n- }\n-\n- // a doesn't have any valid non-deprecated endpoints, so return\n- // deprecatedEndpoint (which may be nil if a doesn't have any valid deprecated\n- // endpoints either).\n- if deprecatedEndpoint == nil {\n+ // An interface value is nil only if the V and T are both unset, (T=nil, V is\n+ // not set), In particular, a nil interface will always hold a nil type. If we\n+ // store a nil pointer of type *int inside an interface value, the inner type\n+ // will be *int regardless of the value of the pointer: (T=*int, V=nil). Such\n+ // an interface value will therefore be non-nil even when the pointer value V\n+ // inside is nil.\n+ //\n+ // Since acquirePrimaryAddressRLocked returns a nil value with a non-nil type,\n+ // we need to explicitly return nil below if ep is (a typed) nil.\n+ if ep == nil {\nreturn nil\n}\n- return deprecatedEndpoint\n+\n+ return ep\n}\n// PrimaryAddresses implements AddressableEndpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/addressable_endpoint_state_test.go",
"new_path": "pkg/tcpip/stack/addressable_endpoint_state_test.go",
"diff": "@@ -24,8 +24,13 @@ import (\n// TestAddressableEndpointStateCleanup tests that cleaning up an addressable\n// endpoint state removes permanent addresses and leaves groups.\nfunc TestAddressableEndpointStateCleanup(t *testing.T) {\n+ var ep fakeNetworkEndpoint\n+ if err := ep.Enable(); err != nil {\n+ t.Fatalf(\"ep.Enable(): %s\", err)\n+ }\n+\nvar s stack.AddressableEndpointState\n- s.Init(&fakeNetworkEndpoint{})\n+ s.Init(&ep)\naddr := tcpip.AddressWithPrefix{\nAddress: \"\\x01\",\n@@ -43,7 +48,7 @@ func TestAddressableEndpointStateCleanup(t *testing.T) {\n{\nep := s.AcquireAssignedAddress(addr.Address, false /* allowTemp */, stack.NeverPrimaryEndpoint)\nif ep == nil {\n- t.Fatalf(\"got s.AcquireAssignedAddress(%s) = nil, want = non-nil\", addr.Address)\n+ t.Fatalf(\"got s.AcquireAssignedAddress(%s, false, NeverPrimaryEndpoint) = nil, want = non-nil\", addr.Address)\n}\nep.DecRef()\n}\n@@ -63,7 +68,7 @@ func TestAddressableEndpointStateCleanup(t *testing.T) {\nep := s.AcquireAssignedAddress(addr.Address, false /* allowTemp */, stack.NeverPrimaryEndpoint)\nif ep != nil {\nep.DecRef()\n- t.Fatalf(\"got s.AcquireAssignedAddress(%s) = %s, want = nil\", addr.Address, ep.AddressWithPrefix())\n+ t.Fatalf(\"got s.AcquireAssignedAddress(%s, false, NeverPrimaryEndpoint) = %s, want = nil\", addr.Address, ep.AddressWithPrefix())\n}\n}\nif s.IsInGroup(group) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -244,22 +244,19 @@ func (n *NIC) setSpoofing(enable bool) {\nn.mu.Unlock()\n}\n-// primaryEndpoint will return the first non-deprecated endpoint if such an\n-// endpoint exists for the given protocol and remoteAddr. If no non-deprecated\n-// endpoint exists, the first deprecated endpoint will be returned.\n-//\n-// If an IPv6 primary endpoint is requested, Source Address Selection (as\n-// defined by RFC 6724 section 5) will be performed.\n+// primaryAddress returns an address that can be used to communicate with\n+// remoteAddr.\nfunc (n *NIC) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr tcpip.Address) AssignableAddressEndpoint {\nn.mu.RLock()\n- defer n.mu.RUnlock()\n+ spoofing := n.mu.spoofing\n+ n.mu.RUnlock()\nep, ok := n.networkEndpoints[protocol]\nif !ok {\nreturn nil\n}\n- return ep.AcquirePrimaryAddress(remoteAddr, n.mu.spoofing)\n+ return ep.AcquireOutgoingPrimaryAddress(remoteAddr, spoofing)\n}\ntype getAddressBehaviour int\n@@ -360,13 +357,12 @@ func (n *NIC) primaryAddresses() []tcpip.ProtocolAddress {\n// address exists. If no non-deprecated address exists, the first deprecated\n// address will be returned.\nfunc (n *NIC) primaryAddress(proto tcpip.NetworkProtocolNumber) tcpip.AddressWithPrefix {\n- addressEndpoint := n.primaryEndpoint(proto, \"\")\n- if addressEndpoint == nil {\n+ ep, ok := n.networkEndpoints[proto]\n+ if !ok {\nreturn tcpip.AddressWithPrefix{}\n}\n- addr := addressEndpoint.AddressWithPrefix()\n- addressEndpoint.DecRef()\n- return addr\n+\n+ return ep.MainAddress()\n}\n// removeAddress removes an address from n.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -435,7 +435,10 @@ type AddressableEndpoint interface {\n// permanent address.\nRemovePermanentAddress(addr tcpip.Address) *tcpip.Error\n- // AcquireAssignedAddress returns an AddressEndpoint for the passed address\n+ // MainAddress returns the endpoint's primary permanent address.\n+ MainAddress() tcpip.AddressWithPrefix\n+\n+ // AcquireAssignedAddress returns an address endpoint for the passed address\n// that is considered bound to the endpoint, optionally creating a temporary\n// endpoint if requested and no existing address exists.\n//\n@@ -444,15 +447,15 @@ type AddressableEndpoint interface {\n// Returns nil if the specified address is not local to this endpoint.\nAcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB PrimaryEndpointBehavior) AddressEndpoint\n- // AcquirePrimaryAddress returns a primary endpoint to use when communicating\n- // with the passed remote address.\n+ // AcquireOutgoingPrimaryAddress returns a primary address that may be used as\n+ // a source address when sending packets to the passed remote address.\n//\n// If allowExpired is true, expired addresses may be returned.\n//\n// The returned endpoint's reference count is incremented.\n//\n- // Returns nil if a primary endpoint is not available.\n- AcquirePrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint\n+ // Returns nil if a primary address is not available.\n+ AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint\n// PrimaryAddresses returns the primary addresses.\nPrimaryAddresses() []tcpip.AddressWithPrefix\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_test.go",
"new_path": "pkg/tcpip/stack/stack_test.go",
"diff": "@@ -71,21 +71,36 @@ const (\ntype fakeNetworkEndpoint struct {\nstack.AddressableEndpointState\n+ mu struct {\n+ sync.RWMutex\n+\n+ enabled bool\n+ }\n+\nnicID tcpip.NICID\nproto *fakeNetworkProtocol\ndispatcher stack.TransportDispatcher\nep stack.LinkEndpoint\n}\n-func (*fakeNetworkEndpoint) Enable() *tcpip.Error {\n+func (f *fakeNetworkEndpoint) Enable() *tcpip.Error {\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+ f.mu.enabled = true\nreturn nil\n}\n-func (*fakeNetworkEndpoint) Enabled() bool {\n- return true\n+func (f *fakeNetworkEndpoint) Enabled() bool {\n+ f.mu.RLock()\n+ defer f.mu.RUnlock()\n+ return f.mu.enabled\n}\n-func (*fakeNetworkEndpoint) Disable() {}\n+func (f *fakeNetworkEndpoint) Disable() {\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+ f.mu.enabled = false\n+}\nfunc (f *fakeNetworkEndpoint) MTU() uint32 {\nreturn f.ep.MTU() - uint32(f.MaxHeaderLength())\n@@ -3620,3 +3635,43 @@ func TestGetNetworkEndpoint(t *testing.T) {\n})\n}\n}\n+\n+func TestGetMainNICAddressWhenNICDisabled(t *testing.T) {\n+ const nicID = 1\n+\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},\n+ })\n+\n+ if err := s.CreateNIC(nicID, channel.New(0, defaultMTU, \"\")); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n+ }\n+\n+ protocolAddress := tcpip.ProtocolAddress{\n+ Protocol: fakeNetNumber,\n+ AddressWithPrefix: tcpip.AddressWithPrefix{\n+ Address: \"\\x01\",\n+ PrefixLen: 8,\n+ },\n+ }\n+ if err := s.AddProtocolAddress(nicID, protocolAddress); err != nil {\n+ t.Fatalf(\"AddProtocolAddress(%d, %#v): %s\", nicID, protocolAddress, err)\n+ }\n+\n+ // Check that we get the right initial address and prefix length.\n+ if gotAddr, err := s.GetMainNICAddress(nicID, fakeNetNumber); err != nil {\n+ t.Fatalf(\"GetMainNICAddress(%d, %d): %s\", nicID, fakeNetNumber, err)\n+ } else if gotAddr != protocolAddress.AddressWithPrefix {\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, gotAddr, protocolAddress.AddressWithPrefix)\n+ }\n+\n+ // Should still get the address when the NIC is diabled.\n+ if err := s.DisableNIC(nicID); err != nil {\n+ t.Fatalf(\"DisableNIC(%d): %s\", nicID, err)\n+ }\n+ if gotAddr, err := s.GetMainNICAddress(nicID, fakeNetNumber); err != nil {\n+ t.Fatalf(\"GetMainNICAddress(%d, %d): %s\", nicID, fakeNetNumber, err)\n+ } else if gotAddr != protocolAddress.AddressWithPrefix {\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, gotAddr, protocolAddress.AddressWithPrefix)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Return permanent addresses when NIC is down
Test: stack_test.TestGetMainNICAddressWhenNICDisabled
PiperOrigin-RevId: 334513286 |
259,891 | 29.09.2020 22:39:37 | 25,200 | 0aae51c6e09046e56f2d4b6064124da059731286 | iptables: remove unused min/max NAT range fields | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/targets.go",
"new_path": "pkg/sentry/socket/netfilter/targets.go",
"diff": "@@ -194,11 +194,9 @@ func (*redirectTargetMaker) marshal(target stack.Target) []byte {\nret := make([]byte, 0, linux.SizeOfXTRedirectTarget)\nxt.NfRange.RangeSize = 1\n- if rt.RangeProtoSpecified {\nxt.NfRange.RangeIPV4.Flags |= linux.NF_NAT_RANGE_PROTO_SPECIFIED\n- }\n- xt.NfRange.RangeIPV4.MinPort = htons(rt.MinPort)\n- xt.NfRange.RangeIPV4.MaxPort = htons(rt.MaxPort)\n+ xt.NfRange.RangeIPV4.MinPort = htons(rt.Port)\n+ xt.NfRange.RangeIPV4.MaxPort = xt.NfRange.RangeIPV4.MinPort\nreturn binary.Marshal(ret, usermem.ByteOrder, xt)\n}\n@@ -231,23 +229,23 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (\n// Also check if we need to map ports or IP.\n// For now, redirect target only supports destination port change.\n// Port range and IP range are not supported yet.\n- if nfRange.RangeIPV4.Flags&linux.NF_NAT_RANGE_PROTO_SPECIFIED == 0 {\n+ if nfRange.RangeIPV4.Flags != linux.NF_NAT_RANGE_PROTO_SPECIFIED {\nnflog(\"redirectTargetMaker: invalid range flags %d\", nfRange.RangeIPV4.Flags)\nreturn nil, syserr.ErrInvalidArgument\n}\n- target.RangeProtoSpecified = true\n-\n- target.MinIP = tcpip.Address(nfRange.RangeIPV4.MinIP[:])\n- target.MaxIP = tcpip.Address(nfRange.RangeIPV4.MaxIP[:])\n// TODO(gvisor.dev/issue/170): Port range is not supported yet.\nif nfRange.RangeIPV4.MinPort != nfRange.RangeIPV4.MaxPort {\nnflog(\"redirectTargetMaker: MinPort != MaxPort (%d, %d)\", nfRange.RangeIPV4.MinPort, nfRange.RangeIPV4.MaxPort)\nreturn nil, syserr.ErrInvalidArgument\n}\n+ if nfRange.RangeIPV4.MinIP != nfRange.RangeIPV4.MaxIP {\n+ nflog(\"redirectTargetMaker: MinIP != MaxIP (%d, %d)\", nfRange.RangeIPV4.MinPort, nfRange.RangeIPV4.MaxPort)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n- target.MinPort = ntohs(nfRange.RangeIPV4.MinPort)\n- target.MaxPort = ntohs(nfRange.RangeIPV4.MaxPort)\n+ target.Addr = tcpip.Address(nfRange.RangeIPV4.MinIP[:])\n+ target.Port = ntohs(nfRange.RangeIPV4.MinPort)\nreturn &target, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "@@ -281,8 +281,8 @@ func (ct *ConnTrack) insertRedirectConn(pkt *PacketBuffer, hook Hook, rt Redirec\n// rule. This tuple will be used to manipulate the packet in\n// handlePacket.\nreplyTID := tid.reply()\n- replyTID.srcAddr = rt.MinIP\n- replyTID.srcPort = rt.MinPort\n+ replyTID.srcAddr = rt.Addr\n+ replyTID.srcPort = rt.Port\nvar manip manipType\nswitch hook {\ncase Prerouting:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_targets.go",
"new_path": "pkg/tcpip/stack/iptables_targets.go",
"diff": "@@ -128,26 +128,14 @@ func (ReturnTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.\nconst RedirectTargetName = \"REDIRECT\"\n// RedirectTarget redirects the packet by modifying the destination port/IP.\n-// Min and Max values for IP and Ports in the struct indicate the range of\n-// values which can be used to redirect.\n+// TODO(gvisor.dev/issue/170): Other flags need to be added after we support\n+// them.\ntype RedirectTarget struct {\n- // TODO(gvisor.dev/issue/170): Other flags need to be added after\n- // we support them.\n- // RangeProtoSpecified flag indicates single port is specified to\n- // redirect.\n- RangeProtoSpecified bool\n+ // Addr indicates address used to redirect.\n+ Addr tcpip.Address\n- // MinIP indicates address used to redirect.\n- MinIP tcpip.Address\n-\n- // MaxIP indicates address used to redirect.\n- MaxIP tcpip.Address\n-\n- // MinPort indicates port used to redirect.\n- MinPort uint16\n-\n- // MaxPort indicates port used to redirect.\n- MaxPort uint16\n+ // Port indicates port used to redirect.\n+ Port uint16\n// NetworkProtocol is the network protocol the target is used with.\nNetworkProtocol tcpip.NetworkProtocolNumber\n@@ -180,11 +168,9 @@ func (rt RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gso\n// to primary address of the incoming interface in Prerouting.\nswitch hook {\ncase Output:\n- rt.MinIP = tcpip.Address([]byte{127, 0, 0, 1})\n- rt.MaxIP = tcpip.Address([]byte{127, 0, 0, 1})\n+ rt.Addr = tcpip.Address([]byte{127, 0, 0, 1})\ncase Prerouting:\n- rt.MinIP = address\n- rt.MaxIP = address\n+ rt.Addr = address\ndefault:\npanic(\"redirect target is supported only on output and prerouting hooks\")\n}\n@@ -195,7 +181,7 @@ func (rt RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gso\nswitch protocol := netHeader.TransportProtocol(); protocol {\ncase header.UDPProtocolNumber:\nudpHeader := header.UDP(pkt.TransportHeader().View())\n- udpHeader.SetDestinationPort(rt.MinPort)\n+ udpHeader.SetDestinationPort(rt.Port)\n// Calculate UDP checksum and set it.\nif hook == Output {\n@@ -213,7 +199,7 @@ func (rt RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gso\n}\n}\n// Change destination address.\n- netHeader.SetDestinationAddress(rt.MinIP)\n+ netHeader.SetDestinationAddress(rt.Addr)\nnetHeader.SetChecksum(0)\nnetHeader.SetChecksum(^netHeader.CalculateChecksum())\npkt.NatDone = true\n"
}
] | Go | Apache License 2.0 | google/gvisor | iptables: remove unused min/max NAT range fields
PiperOrigin-RevId: 334531794 |
259,891 | 29.09.2020 23:20:08 | 25,200 | 3ef549b67f83fdbc1c7aa30c2b8531f38419461e | Set transport protocol number during parsing | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/parse/parse.go",
"new_path": "pkg/tcpip/header/parse/parse.go",
"diff": "@@ -139,6 +139,7 @@ traverseExtensions:\n// Returns true if the header was successfully parsed.\nfunc UDP(pkt *stack.PacketBuffer) bool {\n_, ok := pkt.TransportHeader().Consume(header.UDPMinimumSize)\n+ pkt.TransportProtocolNumber = header.UDPProtocolNumber\nreturn ok\n}\n@@ -162,5 +163,6 @@ func TCP(pkt *stack.PacketBuffer) bool {\n}\n_, ok = pkt.TransportHeader().Consume(hdrLen)\n+ pkt.TransportProtocolNumber = header.TCPProtocolNumber\nreturn ok\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set transport protocol number during parsing
PiperOrigin-RevId: 334535896 |
260,001 | 30.09.2020 11:19:16 | 25,200 | 299e5d6e4035d0792dd06562d6b495a85797db87 | Add verity fs tests
The tests confirms that when a file is opened in verity, the
corresponding Merkle trees are generated. Also a normal read succeeds on
verity enabled files, but fails if either the verity file or the Merkle
tree file is modified. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/BUILD",
"new_path": "pkg/sentry/fsimpl/verity/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_library\")\n+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\nlicenses([\"notice\"])\n@@ -26,3 +26,22 @@ go_library(\n\"//pkg/usermem\",\n],\n)\n+\n+go_test(\n+ name = \"verity_test\",\n+ srcs = [\n+ \"verity_test.go\",\n+ ],\n+ library = \":verity\",\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//pkg/context\",\n+ \"//pkg/fspath\",\n+ \"//pkg/sentry/arch\",\n+ \"//pkg/sentry/fsimpl/tmpfs\",\n+ \"//pkg/sentry/kernel/auth\",\n+ \"//pkg/sentry/kernel/contexttest\",\n+ \"//pkg/sentry/vfs\",\n+ \"//pkg/usermem\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fsimpl/verity/verity_test.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package verity\n+\n+import (\n+ \"fmt\"\n+ \"io\"\n+ \"math/rand\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/fspath\"\n+ \"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/contexttest\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+)\n+\n+// rootMerkleFilename is the name of the root Merkle tree file.\n+const rootMerkleFilename = \"root.verity\"\n+\n+// maxDataSize is the maximum data size written to the file for test.\n+const maxDataSize = 100000\n+\n+// newVerityRoot creates a new verity mount, and returns the root. The\n+// underlying file system is tmpfs. If the error is not nil, then cleanup\n+// should be called when the root is no longer needed.\n+func newVerityRoot(ctx context.Context) (*vfs.VirtualFilesystem, vfs.VirtualDentry, func(), error) {\n+ rand.Seed(time.Now().UnixNano())\n+ vfsObj := &vfs.VirtualFilesystem{}\n+ if err := vfsObj.Init(ctx); err != nil {\n+ return nil, vfs.VirtualDentry{}, nil, fmt.Errorf(\"VFS init: %v\", err)\n+ }\n+\n+ vfsObj.MustRegisterFilesystemType(\"verity\", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{\n+ AllowUserMount: true,\n+ })\n+\n+ vfsObj.MustRegisterFilesystemType(\"tmpfs\", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{\n+ AllowUserMount: true,\n+ })\n+\n+ mntns, err := vfsObj.NewMountNamespace(ctx, auth.CredentialsFromContext(ctx), \"\", \"verity\", &vfs.MountOptions{\n+ GetFilesystemOptions: vfs.GetFilesystemOptions{\n+ InternalData: InternalFilesystemOptions{\n+ RootMerkleFileName: rootMerkleFilename,\n+ LowerName: \"tmpfs\",\n+ AllowRuntimeEnable: true,\n+ NoCrashOnVerificationFailure: true,\n+ },\n+ },\n+ })\n+ if err != nil {\n+ return nil, vfs.VirtualDentry{}, nil, fmt.Errorf(\"failed to create verity root mount: %v\", err)\n+ }\n+ root := mntns.Root()\n+ return vfsObj, root, func() {\n+ root.DecRef(ctx)\n+ mntns.DecRef(ctx)\n+ }, nil\n+}\n+\n+// newFileFD creates a new file in the verity mount, and returns the FD. The FD\n+// points to a file that has random data generated.\n+func newFileFD(ctx context.Context, vfsObj *vfs.VirtualFilesystem, root vfs.VirtualDentry, filePath string, mode linux.FileMode) (*vfs.FileDescription, int, error) {\n+ creds := auth.CredentialsFromContext(ctx)\n+ lowerRoot := root.Dentry().Impl().(*dentry).lowerVD\n+\n+ // Create the file in the underlying file system.\n+ lowerFD, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{\n+ Root: lowerRoot,\n+ Start: lowerRoot,\n+ Path: fspath.Parse(filePath),\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,\n+ Mode: linux.ModeRegular | mode,\n+ })\n+ if err != nil {\n+ return nil, 0, err\n+ }\n+\n+ // Generate random data to be written to the file.\n+ dataSize := rand.Intn(maxDataSize) + 1\n+ data := make([]byte, dataSize)\n+ rand.Read(data)\n+\n+ // Write directly to the underlying FD, since verity FD is read-only.\n+ n, err := lowerFD.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})\n+ if err != nil {\n+ return nil, 0, err\n+ }\n+\n+ if n != int64(len(data)) {\n+ return nil, 0, fmt.Errorf(\"lowerFD.Write got write length %d, want %d\", n, len(data))\n+ }\n+\n+ lowerFD.DecRef(ctx)\n+\n+ // Now open the verity file descriptor.\n+ fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(filePath),\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDONLY,\n+ Mode: linux.ModeRegular | mode,\n+ })\n+ return fd, dataSize, err\n+}\n+\n+// corruptRandomBit randomly flips a bit in the file represented by fd.\n+func corruptRandomBit(ctx context.Context, fd *vfs.FileDescription, size int) error {\n+ // Flip a random bit in the underlying file.\n+ randomPos := int64(rand.Intn(size))\n+ byteToModify := make([]byte, 1)\n+ if _, err := fd.PRead(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.ReadOptions{}); err != nil {\n+ return fmt.Errorf(\"lowerFD.PRead failed: %v\", err)\n+ }\n+ byteToModify[0] ^= 1\n+ if _, err := fd.PWrite(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.WriteOptions{}); err != nil {\n+ return fmt.Errorf(\"lowerFD.PWrite failed: %v\", err)\n+ }\n+ return nil\n+}\n+\n+// TestOpen ensures that when a file is created, the corresponding Merkle tree\n+// file and the root Merkle tree file exist.\n+func TestOpen(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, cleanup, err := newVerityRoot(ctx)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ filename := \"verity-test-file\"\n+ if _, _, err := newFileFD(ctx, vfsObj, root, filename, 0644); err != nil {\n+ t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ }\n+\n+ // Ensure that the corresponding Merkle tree file is created.\n+ lowerRoot := root.Dentry().Impl().(*dentry).lowerVD\n+ if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: lowerRoot,\n+ Start: lowerRoot,\n+ Path: fspath.Parse(merklePrefix + filename),\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDONLY,\n+ }); err != nil {\n+ t.Errorf(\"Failed to open Merkle tree file %s: %v\", merklePrefix+filename, err)\n+ }\n+\n+ // Ensure the root merkle tree file is created.\n+ if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: lowerRoot,\n+ Start: lowerRoot,\n+ Path: fspath.Parse(merklePrefix + rootMerkleFilename),\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDONLY,\n+ }); err != nil {\n+ t.Errorf(\"Failed to open root Merkle tree file %s: %v\", merklePrefix+rootMerkleFilename, err)\n+ }\n+}\n+\n+// TestUntouchedFileSucceeds ensures that read from an untouched verity file\n+// succeeds after enabling verity for it.\n+func TestReadUntouchedFileSucceeds(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, cleanup, err := newVerityRoot(ctx)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ filename := \"verity-test-file\"\n+ fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ }\n+\n+ // Enable verity on the file and confirm a normal read succeeds.\n+ var args arch.SyscallArguments\n+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\n+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl failed: %v\", err)\n+ }\n+\n+ buf := make([]byte, size)\n+ n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{})\n+ if err != nil && err != io.EOF {\n+ t.Fatalf(\"fd.PRead failed: %v\", err)\n+ }\n+\n+ if n != int64(size) {\n+ t.Errorf(\"fd.PRead got read length %d, want %d\", n, size)\n+ }\n+}\n+\n+// TestReopenUntouchedFileSucceeds ensures that reopen an untouched verity file\n+// succeeds after enabling verity for it.\n+func TestReopenUntouchedFileSucceeds(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, cleanup, err := newVerityRoot(ctx)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ filename := \"verity-test-file\"\n+ fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ }\n+\n+ // Enable verity on the file and confirms a normal read succeeds.\n+ var args arch.SyscallArguments\n+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\n+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl failed: %v\", err)\n+ }\n+\n+ // Ensure reopening the verity enabled file succeeds.\n+ if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(filename),\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDONLY,\n+ Mode: linux.ModeRegular,\n+ }); err != nil {\n+ t.Errorf(\"reopen enabled file failed: %v\", err)\n+ }\n+}\n+\n+// TestModifiedFileFails ensures that read from a modified verity file fails.\n+func TestModifiedFileFails(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, cleanup, err := newVerityRoot(ctx)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ filename := \"verity-test-file\"\n+ fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ }\n+\n+ // Enable verity on the file.\n+ var args arch.SyscallArguments\n+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\n+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl failed: %v\", err)\n+ }\n+\n+ // Open a new lowerFD that's read/writable.\n+ lowerVD := fd.Impl().(*fileDescription).d.lowerVD\n+\n+ lowerFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: lowerVD,\n+ Start: lowerVD,\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDWR,\n+ })\n+ if err != nil {\n+ t.Fatalf(\"Open lowerFD failed: %v\", err)\n+ }\n+\n+ if err := corruptRandomBit(ctx, lowerFD, size); err != nil {\n+ t.Fatalf(\"%v\", err)\n+ }\n+\n+ // Confirm that read from the modified file fails.\n+ buf := make([]byte, size)\n+ if _, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{}); err == nil {\n+ t.Fatalf(\"fd.PRead succeeded with modified file\")\n+ }\n+}\n+\n+// TestModifiedMerkleFails ensures that read from a verity file fails if the\n+// corresponding Merkle tree file is modified.\n+func TestModifiedMerkleFails(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, cleanup, err := newVerityRoot(ctx)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ filename := \"verity-test-file\"\n+ fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ }\n+\n+ // Enable verity on the file.\n+ var args arch.SyscallArguments\n+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\n+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl failed: %v\", err)\n+ }\n+\n+ // Open a new lowerMerkleFD that's read/writable.\n+ lowerMerkleVD := fd.Impl().(*fileDescription).d.lowerMerkleVD\n+\n+ lowerMerkleFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: lowerMerkleVD,\n+ Start: lowerMerkleVD,\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDWR,\n+ })\n+ if err != nil {\n+ t.Fatalf(\"Open lowerMerkleFD failed: %v\", err)\n+ }\n+\n+ // Flip a random bit in the Merkle tree file.\n+ stat, err := lowerMerkleFD.Stat(ctx, vfs.StatOptions{})\n+ if err != nil {\n+ t.Fatalf(\"Failed to get lowerMerkleFD stat: %v\", err)\n+ }\n+ merkleSize := int(stat.Size)\n+ if err := corruptRandomBit(ctx, lowerMerkleFD, merkleSize); err != nil {\n+ t.Fatalf(\"%v\", err)\n+ }\n+\n+ // Confirm that read from a file with modified Merkle tree fails.\n+ buf := make([]byte, size)\n+ if _, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{}); err == nil {\n+ fmt.Println(buf)\n+ t.Fatalf(\"fd.PRead succeeded with modified Merkle file\")\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add verity fs tests
The tests confirms that when a file is opened in verity, the
corresponding Merkle trees are generated. Also a normal read succeeds on
verity enabled files, but fails if either the verity file or the Merkle
tree file is modified.
PiperOrigin-RevId: 334640331 |
259,891 | 30.09.2020 12:21:01 | 25,200 | b49a17fc3d33068a1235d97efd8cc8bc9f0f3ccf | Make all Target.Action implementation pointer receivers | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/targets.go",
"new_path": "pkg/sentry/socket/netfilter/targets.go",
"diff": "@@ -306,7 +306,7 @@ func (jt *JumpTarget) ID() stack.TargetID {\n}\n// Action implements stack.Target.Action.\n-func (jt JumpTarget) Action(*stack.PacketBuffer, *stack.ConnTrack, stack.Hook, *stack.GSO, *stack.Route, tcpip.Address) (stack.RuleVerdict, int) {\n+func (jt *JumpTarget) Action(*stack.PacketBuffer, *stack.ConnTrack, stack.Hook, *stack.GSO, *stack.Route, tcpip.Address) (stack.RuleVerdict, int) {\nreturn stack.RuleJump, jt.RuleNum\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "@@ -268,7 +268,7 @@ func (ct *ConnTrack) connForTID(tid tupleID) (*conn, direction) {\nreturn nil, dirOriginal\n}\n-func (ct *ConnTrack) insertRedirectConn(pkt *PacketBuffer, hook Hook, rt RedirectTarget) *conn {\n+func (ct *ConnTrack) insertRedirectConn(pkt *PacketBuffer, hook Hook, rt *RedirectTarget) *conn {\ntid, err := packetToTupleID(pkt)\nif err != nil {\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_targets.go",
"new_path": "pkg/tcpip/stack/iptables_targets.go",
"diff": "@@ -34,7 +34,7 @@ func (at *AcceptTarget) ID() TargetID {\n}\n// Action implements Target.Action.\n-func (AcceptTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\n+func (*AcceptTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\nreturn RuleAccept, 0\n}\n@@ -52,7 +52,7 @@ func (dt *DropTarget) ID() TargetID {\n}\n// Action implements Target.Action.\n-func (DropTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\n+func (*DropTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\nreturn RuleDrop, 0\n}\n@@ -76,7 +76,7 @@ func (et *ErrorTarget) ID() TargetID {\n}\n// Action implements Target.Action.\n-func (ErrorTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\n+func (*ErrorTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\nlog.Debugf(\"ErrorTarget triggered.\")\nreturn RuleDrop, 0\n}\n@@ -99,7 +99,7 @@ func (uc *UserChainTarget) ID() TargetID {\n}\n// Action implements Target.Action.\n-func (UserChainTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\n+func (*UserChainTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\npanic(\"UserChainTarget should never be called.\")\n}\n@@ -118,7 +118,7 @@ func (rt *ReturnTarget) ID() TargetID {\n}\n// Action implements Target.Action.\n-func (ReturnTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\n+func (*ReturnTarget) Action(*PacketBuffer, *ConnTrack, Hook, *GSO, *Route, tcpip.Address) (RuleVerdict, int) {\nreturn RuleReturn, 0\n}\n@@ -153,7 +153,7 @@ func (rt *RedirectTarget) ID() TargetID {\n// TODO(gvisor.dev/issue/170): Parse headers without copying. The current\n// implementation only works for PREROUTING and calls pkt.Clone(), neither\n// of which should be the case.\n-func (rt RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gso *GSO, r *Route, address tcpip.Address) (RuleVerdict, int) {\n+func (rt *RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gso *GSO, r *Route, address tcpip.Address) (RuleVerdict, int) {\n// Packet is already manipulated.\nif pkt.NatDone {\nreturn RuleAccept, 0\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make all Target.Action implementation pointer receivers
PiperOrigin-RevId: 334652998 |
260,001 | 30.09.2020 14:43:06 | 25,200 | 38704d9b667b9ebabc3f694a6508b37ead567b6f | Implement ioctl with measure in verity fs | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/ioctl.go",
"new_path": "pkg/abi/linux/ioctl.go",
"diff": "@@ -122,8 +122,26 @@ const (\n// Constants from uapi/linux/fsverity.h.\nconst (\nFS_IOC_ENABLE_VERITY = 1082156677\n+ FS_IOC_MEASURE_VERITY = 3221513862\n)\n+// DigestMetadata is a helper struct for VerityDigest.\n+//\n+// +marshal\n+type DigestMetadata struct {\n+ DigestAlgorithm uint16\n+ DigestSize uint16\n+}\n+\n+// SizeOfDigestMetadata is the size of struct DigestMetadata.\n+const SizeOfDigestMetadata = 4\n+\n+// VerityDigest is struct from uapi/linux/fsverity.h.\n+type VerityDigest struct {\n+ Metadata DigestMetadata\n+ Digest []byte\n+}\n+\n// IOC outputs the result of _IOC macro in asm-generic/ioctl.h.\nfunc IOC(dir, typ, nr, size uint32) uint32 {\nreturn uint32(dir)<<_IOC_DIRSHIFT | typ<<_IOC_TYPESHIFT | nr<<_IOC_NRSHIFT | size<<_IOC_SIZESHIFT\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/verity.go",
"new_path": "pkg/sentry/fsimpl/verity/verity.go",
"diff": "@@ -558,7 +558,7 @@ func (fd *fileDescription) generateMerkle(ctx context.Context) ([]byte, uint64,\n// enableVerity enables verity features on fd by generating a Merkle tree file\n// and stores its root hash in its parent directory's Merkle tree.\n-func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO) (uintptr, error) {\nif !fd.d.fs.allowRuntimeEnable {\nreturn 0, syserror.EPERM\n}\n@@ -616,7 +616,45 @@ func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO, arg\nreturn 0, nil\n}\n-func (fd *fileDescription) getFlags(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+// measureVerity returns the root hash of fd, saved in args[2].\n+func (fd *fileDescription) measureVerity(ctx context.Context, uio usermem.IO, verityDigest usermem.Addr) (uintptr, error) {\n+ t := kernel.TaskFromContext(ctx)\n+ var metadata linux.DigestMetadata\n+\n+ // If allowRuntimeEnable is true, an empty fd.d.rootHash indicates that\n+ // verity is not enabled for the file. If allowRuntimeEnable is false,\n+ // this is an integrity violation because all files should have verity\n+ // enabled, in which case fd.d.rootHash should be set.\n+ if len(fd.d.rootHash) == 0 {\n+ if fd.d.fs.allowRuntimeEnable {\n+ return 0, syserror.ENODATA\n+ }\n+ return 0, alertIntegrityViolation(syserror.ENODATA, \"Ioctl measureVerity: no root hash found\")\n+ }\n+\n+ // The first part of VerityDigest is the metadata.\n+ if _, err := metadata.CopyIn(t, verityDigest); err != nil {\n+ return 0, err\n+ }\n+ if metadata.DigestSize < uint16(len(fd.d.rootHash)) {\n+ return 0, syserror.EOVERFLOW\n+ }\n+\n+ // Populate the output digest size, since DigestSize is both input and\n+ // output.\n+ metadata.DigestSize = uint16(len(fd.d.rootHash))\n+\n+ // First copy the metadata.\n+ if _, err := metadata.CopyOut(t, verityDigest); err != nil {\n+ return 0, err\n+ }\n+\n+ // Now copy the root hash bytes to the memory after metadata.\n+ _, err := t.CopyOutBytes(usermem.Addr(uintptr(verityDigest)+linux.SizeOfDigestMetadata), fd.d.rootHash)\n+ return 0, err\n+}\n+\n+func (fd *fileDescription) verityFlags(ctx context.Context, uio usermem.IO, flags usermem.Addr) (uintptr, error) {\nf := int32(0)\n// All enabled files should store a root hash. This flag is not settable\n@@ -626,8 +664,7 @@ func (fd *fileDescription) getFlags(ctx context.Context, uio usermem.IO, args ar\n}\nt := kernel.TaskFromContext(ctx)\n- addr := args[2].Pointer()\n- _, err := primitive.CopyInt32Out(t, addr, f)\n+ _, err := primitive.CopyInt32Out(t, flags, f)\nreturn 0, err\n}\n@@ -635,11 +672,15 @@ func (fd *fileDescription) getFlags(ctx context.Context, uio usermem.IO, args ar\nfunc (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nswitch cmd := args[1].Uint(); cmd {\ncase linux.FS_IOC_ENABLE_VERITY:\n- return fd.enableVerity(ctx, uio, args)\n+ return fd.enableVerity(ctx, uio)\n+ case linux.FS_IOC_MEASURE_VERITY:\n+ return fd.measureVerity(ctx, uio, args[2].Pointer())\ncase linux.FS_IOC_GETFLAGS:\n- return fd.getFlags(ctx, uio, args)\n+ return fd.verityFlags(ctx, uio, args[2].Pointer())\ndefault:\n- return fd.lowerFD.Ioctl(ctx, uio, args)\n+ // TODO(b/169682228): Investigate which ioctl commands should\n+ // be allowed.\n+ return 0, syserror.ENOSYS\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement ioctl with measure in verity fs
PiperOrigin-RevId: 334682753 |
259,951 | 30.09.2020 15:08:00 | 25,200 | 7f9e13053e84b82c67c12a4964fa4703ebaa571f | Count IP OutgoingPacketErrors in the NetworkEndpoint methods
Before this change, OutgoingPacketErrors was incremented in the
stack.Route methods. This was going to be a problem once
IPv4/IPv6 WritePackets support fragmentation because Route.WritePackets
might now know how many packets are left after an error occurs.
Test:
- pkg/tcpip/network/ipv4:ipv4_test
- pkg/tcpip/network/ipv6:ipv6_test | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -278,6 +278,7 @@ func (e *endpoint) writePacketFragments(r *stack.Route, gso *stack.GSO, mtu int,\n// Send out the fragment.\nif err := e.linkEP.WritePacket(r, gso, ProtocolNumber, fragPkt); err != nil {\n+ r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(n - i))\nreturn err\n}\nr.Stats().IP.PacketsSent.Increment()\n@@ -349,6 +350,7 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, params stack.Netw\nreturn e.writePacketFragments(r, gso, int(e.linkEP.MTU()), pkt)\n}\nif err := e.linkEP.WritePacket(r, gso, ProtocolNumber, pkt); err != nil {\n+ r.Stats().IP.OutgoingPacketErrors.Increment()\nreturn err\n}\nr.Stats().IP.PacketsSent.Increment()\n@@ -379,6 +381,9 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\n// faster WritePackets API directly.\nn, err := e.linkEP.WritePackets(r, gso, pkts, ProtocolNumber)\nr.Stats().IP.PacketsSent.IncrementBy(uint64(n))\n+ if err != nil {\n+ r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(pkts.Len() - n))\n+ }\nreturn n, err\n}\nr.Stats().IP.IPTablesOutputDropped.IncrementBy(uint64(len(dropped)))\n@@ -403,6 +408,7 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\n}\nif err := e.linkEP.WritePacket(r, gso, ProtocolNumber, pkt); err != nil {\nr.Stats().IP.PacketsSent.IncrementBy(uint64(n))\n+ r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(pkts.Len() - n - len(dropped)))\n// Dropped packets aren't errors, so include them in\n// the return value.\nreturn n + len(dropped), err\n@@ -461,9 +467,12 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBu\nreturn nil\n}\n+ if err := e.linkEP.WritePacket(r, nil /* gso */, ProtocolNumber, pkt); err != nil {\n+ r.Stats().IP.OutgoingPacketErrors.Increment()\n+ return err\n+ }\nr.Stats().IP.PacketsSent.Increment()\n-\n- return e.linkEP.WritePacket(r, nil /* gso */, ProtocolNumber, pkt)\n+ return nil\n}\n// HandlePacket is called by the link layer when new ipv4 packets arrive for\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"diff": "@@ -450,6 +450,9 @@ func TestFragmentation(t *testing.T) {\nif got, want := len(ep.WrittenPackets), int(r.Stats().IP.PacketsSent.Value()); got != want {\nt.Errorf(\"no errors yet got len(ep.WrittenPackets) = %d, want = %d\", got, want)\n}\n+ if got := r.Stats().IP.OutgoingPacketErrors.Value(); got != 0 {\n+ t.Errorf(\"got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = 0\", got)\n+ }\ncompareFragments(t, ep.WrittenPackets, source, ft.mtu)\n})\n}\n@@ -465,11 +468,44 @@ func TestFragmentationErrors(t *testing.T) {\npayloadViewsSizes []int\nerr *tcpip.Error\nallowPackets int\n+ fragmentCount int\n}{\n- {\"NoFrag\", 2000, 0, []int{1000}, tcpip.ErrAborted, 0},\n- {\"ErrorOnFirstFrag\", 500, 0, []int{1000}, tcpip.ErrAborted, 0},\n- {\"ErrorOnSecondFrag\", 500, 0, []int{1000}, tcpip.ErrAborted, 1},\n- {\"ErrorOnFirstFragMTUSmallerThanHeader\", 500, 1000, []int{500}, tcpip.ErrAborted, 0},\n+ {\n+ description: \"NoFrag\",\n+ mtu: 2000,\n+ transportHeaderLength: 0,\n+ payloadViewsSizes: []int{1000},\n+ err: tcpip.ErrAborted,\n+ allowPackets: 0,\n+ fragmentCount: 1,\n+ },\n+ {\n+ description: \"ErrorOnFirstFrag\",\n+ mtu: 500,\n+ transportHeaderLength: 0,\n+ payloadViewsSizes: []int{1000},\n+ err: tcpip.ErrAborted,\n+ allowPackets: 0,\n+ fragmentCount: 3,\n+ },\n+ {\n+ description: \"ErrorOnSecondFrag\",\n+ mtu: 500,\n+ transportHeaderLength: 0,\n+ payloadViewsSizes: []int{1000},\n+ err: tcpip.ErrAborted,\n+ allowPackets: 1,\n+ fragmentCount: 3,\n+ },\n+ {\n+ description: \"ErrorOnFirstFragMTUSmallerThanHeader\",\n+ mtu: 500,\n+ transportHeaderLength: 1000,\n+ payloadViewsSizes: []int{500},\n+ err: tcpip.ErrAborted,\n+ allowPackets: 0,\n+ fragmentCount: 4,\n+ },\n}\nfor _, ft := range fragTests {\n@@ -488,6 +524,9 @@ func TestFragmentationErrors(t *testing.T) {\nif got, want := len(ep.WrittenPackets), int(r.Stats().IP.PacketsSent.Value()); err != nil && got != want {\nt.Errorf(\"got len(ep.WrittenPackets) = %d, want = %d\", got, want)\n}\n+ if got, want := int(r.Stats().IP.OutgoingPacketErrors.Value()), ft.fragmentCount-ft.allowPackets; got != want {\n+ t.Errorf(\"got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = %d\", got, want)\n+ }\n})\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -424,6 +424,7 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, params stack.Netw\n}\nif err := e.linkEP.WritePacket(r, gso, ProtocolNumber, pkt); err != nil {\n+ r.Stats().IP.OutgoingPacketErrors.Increment()\nreturn err\n}\nr.Stats().IP.PacketsSent.Increment()\n@@ -453,6 +454,9 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\n// faster WritePackets API directly.\nn, err := e.linkEP.WritePackets(r, gso, pkts, ProtocolNumber)\nr.Stats().IP.PacketsSent.IncrementBy(uint64(n))\n+ if err != nil {\n+ r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(pkts.Len() - n))\n+ }\nreturn n, err\n}\nr.Stats().IP.IPTablesOutputDropped.IncrementBy(uint64(len(dropped)))\n@@ -477,6 +481,7 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\n}\nif err := e.linkEP.WritePacket(r, gso, ProtocolNumber, pkt); err != nil {\nr.Stats().IP.PacketsSent.IncrementBy(uint64(n))\n+ r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(pkts.Len() - n + len(dropped)))\n// Dropped packets aren't errors, so include them in\n// the return value.\nreturn n + len(dropped), err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/route.go",
"new_path": "pkg/tcpip/stack/route.go",
"diff": "@@ -211,14 +211,13 @@ func (r *Route) WritePacket(gso *GSO, params NetworkHeaderParams, pkt *PacketBuf\n// WritePacket takes ownership of pkt, calculate numBytes first.\nnumBytes := pkt.Size()\n- err := r.addressEndpoint.NetworkEndpoint().WritePacket(r, gso, params, pkt)\n- if err != nil {\n- r.Stats().IP.OutgoingPacketErrors.Increment()\n- } else {\n+ if err := r.addressEndpoint.NetworkEndpoint().WritePacket(r, gso, params, pkt); err != nil {\n+ return err\n+ }\n+\nr.nic.stats.Tx.Packets.Increment()\nr.nic.stats.Tx.Bytes.IncrementBy(uint64(numBytes))\n- }\n- return err\n+ return nil\n}\n// WritePackets writes a list of n packets through the given route and returns\n@@ -228,15 +227,8 @@ func (r *Route) WritePackets(gso *GSO, pkts PacketBufferList, params NetworkHead\nreturn 0, tcpip.ErrInvalidEndpointState\n}\n- // WritePackets takes ownership of pkt, calculate length first.\n- numPkts := pkts.Len()\n-\nn, err := r.addressEndpoint.NetworkEndpoint().WritePackets(r, gso, pkts, params)\n- if err != nil {\n- r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(numPkts - n))\n- }\nr.nic.stats.Tx.Packets.IncrementBy(uint64(n))\n-\nwrittenBytes := 0\nfor i, pb := 0, pkts.Front(); i < n && pb != nil; i, pb = i+1, pb.Next() {\nwrittenBytes += pb.Size()\n@@ -257,7 +249,6 @@ func (r *Route) WriteHeaderIncludedPacket(pkt *PacketBuffer) *tcpip.Error {\nnumBytes := pkt.Data.Size()\nif err := r.addressEndpoint.NetworkEndpoint().WriteHeaderIncludedPacket(r, pkt); err != nil {\n- r.Stats().IP.OutgoingPacketErrors.Increment()\nreturn err\n}\nr.nic.stats.Tx.Packets.Increment()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Count IP OutgoingPacketErrors in the NetworkEndpoint methods
Before this change, OutgoingPacketErrors was incremented in the
stack.Route methods. This was going to be a problem once
IPv4/IPv6 WritePackets support fragmentation because Route.WritePackets
might now know how many packets are left after an error occurs.
Test:
- pkg/tcpip/network/ipv4:ipv4_test
- pkg/tcpip/network/ipv6:ipv6_test
PiperOrigin-RevId: 334687983 |
259,891 | 30.09.2020 16:02:46 | 25,200 | 6f8d64f4229be58814319003a397b971ca9b4e1b | ip6tables: redirect support
Adds support for the IPv6-compatible redirect target. Redirection is a limited
form of DNAT, where the destination is always the localhost.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/netfilter_ipv6.go",
"new_path": "pkg/abi/linux/netfilter_ipv6.go",
"diff": "@@ -321,3 +321,16 @@ const (\n// Enable all flags.\nIP6T_INV_MASK = 0x7F\n)\n+\n+// NFNATRange corresponds to struct nf_nat_range in\n+// include/uapi/linux/netfilter/nf_nat.h.\n+type NFNATRange struct {\n+ Flags uint32\n+ MinAddr Inet6Addr\n+ MaxAddr Inet6Addr\n+ MinProto uint16 // Network byte order.\n+ MaxProto uint16 // Network byte order.\n+}\n+\n+// SizeOfNFNATRange is the size of NFNATRange.\n+const SizeOfNFNATRange = 40\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/netfilter.go",
"new_path": "pkg/sentry/socket/netfilter/netfilter.go",
"diff": "@@ -147,10 +147,6 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {\ncase stack.FilterTable:\ntable = stack.EmptyFilterTable()\ncase stack.NATTable:\n- if ipv6 {\n- nflog(\"IPv6 redirection not yet supported (gvisor.dev/issue/3549)\")\n- return syserr.ErrInvalidArgument\n- }\ntable = stack.EmptyNATTable()\ndefault:\nnflog(\"we don't yet support writing to the %q table (gvisor.dev/issue/170)\", replace.Name.String())\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/targets.go",
"new_path": "pkg/sentry/socket/netfilter/targets.go",
"diff": "@@ -47,6 +47,9 @@ func init() {\nregisterTargetMaker(&redirectTargetMaker{\nNetworkProtocol: header.IPv4ProtocolNumber,\n})\n+ registerTargetMaker(&nfNATTargetMaker{\n+ NetworkProtocol: header.IPv6ProtocolNumber,\n+ })\n}\ntype standardTargetMaker struct {\n@@ -250,6 +253,86 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (\nreturn &target, nil\n}\n+type nfNATTarget struct {\n+ Target linux.XTEntryTarget\n+ Range linux.NFNATRange\n+}\n+\n+const nfNATMarhsalledSize = linux.SizeOfXTEntryTarget + linux.SizeOfNFNATRange\n+\n+type nfNATTargetMaker struct {\n+ NetworkProtocol tcpip.NetworkProtocolNumber\n+}\n+\n+func (rm *nfNATTargetMaker) id() stack.TargetID {\n+ return stack.TargetID{\n+ Name: stack.RedirectTargetName,\n+ NetworkProtocol: rm.NetworkProtocol,\n+ }\n+}\n+\n+func (*nfNATTargetMaker) marshal(target stack.Target) []byte {\n+ rt := target.(*stack.RedirectTarget)\n+ nt := nfNATTarget{\n+ Target: linux.XTEntryTarget{\n+ TargetSize: nfNATMarhsalledSize,\n+ },\n+ Range: linux.NFNATRange{\n+ Flags: linux.NF_NAT_RANGE_PROTO_SPECIFIED,\n+ },\n+ }\n+ copy(nt.Target.Name[:], stack.RedirectTargetName)\n+ copy(nt.Range.MinAddr[:], rt.Addr)\n+ copy(nt.Range.MaxAddr[:], rt.Addr)\n+\n+ nt.Range.MinProto = htons(rt.Port)\n+ nt.Range.MaxProto = nt.Range.MinProto\n+\n+ ret := make([]byte, 0, nfNATMarhsalledSize)\n+ return binary.Marshal(ret, usermem.ByteOrder, nt)\n+}\n+\n+func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Target, *syserr.Error) {\n+ if size := nfNATMarhsalledSize; len(buf) < size {\n+ nflog(\"nfNATTargetMaker: buf has insufficient size (%d) for nfNAT target (%d)\", len(buf), size)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ if p := filter.Protocol; p != header.TCPProtocolNumber && p != header.UDPProtocolNumber {\n+ nflog(\"nfNATTargetMaker: bad proto %d\", p)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ var natRange linux.NFNATRange\n+ buf = buf[linux.SizeOfXTEntryTarget:nfNATMarhsalledSize]\n+ binary.Unmarshal(buf, usermem.ByteOrder, &natRange)\n+\n+ // We don't support port or address ranges.\n+ if natRange.MinAddr != natRange.MaxAddr {\n+ nflog(\"nfNATTargetMaker: MinAddr and MaxAddr are different\")\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ if natRange.MinProto != natRange.MaxProto {\n+ nflog(\"nfNATTargetMaker: MinProto and MaxProto are different\")\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ // TODO(gvisor.dev/issue/3549): Check for other flags.\n+ // For now, redirect target only supports destination change.\n+ if natRange.Flags != linux.NF_NAT_RANGE_PROTO_SPECIFIED {\n+ nflog(\"nfNATTargetMaker: invalid range flags %d\", natRange.Flags)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ target := stack.RedirectTarget{\n+ NetworkProtocol: filter.NetworkProtocol(),\n+ Addr: tcpip.Address(natRange.MinAddr[:]),\n+ Port: ntohs(natRange.MinProto),\n+ }\n+\n+ return &target, nil\n+}\n+\n// translateToStandardTarget translates from the value in a\n// linux.XTStandardTarget to an stack.Verdict.\nfunc translateToStandardTarget(val int32, netProto tcpip.NetworkProtocolNumber) (stack.Target, *syserr.Error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1512,8 +1512,17 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\nreturn &vP, nil\ncase linux.IP6T_ORIGINAL_DST:\n- // TODO(gvisor.dev/issue/170): ip6tables.\n+ if outLen < int(binary.Size(linux.SockAddrInet6{})) {\nreturn nil, syserr.ErrInvalidArgument\n+ }\n+\n+ var v tcpip.OriginalDestinationOption\n+ if err := ep.GetSockOpt(&v); err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ a, _ := ConvertAddress(linux.AF_INET6, tcpip.FullAddress(v))\n+ return a.(*linux.SockAddrInet6), nil\ncase linux.IP6T_SO_GET_INFO:\nif outLen < linux.SizeOfIPTGetinfo {\n@@ -1555,6 +1564,26 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\n}\nreturn &entries, nil\n+ case linux.IP6T_SO_GET_REVISION_TARGET:\n+ if outLen < linux.SizeOfXTGetRevision {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ // Only valid for raw IPv6 sockets.\n+ if family, skType, _ := s.Type(); family != linux.AF_INET6 || skType != linux.SOCK_RAW {\n+ return nil, syserr.ErrProtocolNotAvailable\n+ }\n+\n+ stack := inet.StackFromContext(t)\n+ if stack == nil {\n+ return nil, syserr.ErrNoDevice\n+ }\n+ ret, err := netfilter.TargetRevision(t, outPtr, header.IPv6ProtocolNumber)\n+ if err != nil {\n+ return nil, err\n+ }\n+ return &ret, nil\n+\ndefault:\nemitUnimplementedEventIPv6(t, name)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "@@ -196,13 +196,14 @@ type bucket struct {\n// packetToTupleID converts packet to a tuple ID. It fails when pkt lacks a valid\n// TCP header.\n+//\n+// Preconditions: pkt.NetworkHeader() is valid.\nfunc packetToTupleID(pkt *PacketBuffer) (tupleID, *tcpip.Error) {\n- // TODO(gvisor.dev/issue/170): Need to support for other\n- // protocols as well.\n- netHeader := header.IPv4(pkt.NetworkHeader().View())\n- if len(netHeader) < header.IPv4MinimumSize || netHeader.TransportProtocol() != header.TCPProtocolNumber {\n+ netHeader := pkt.Network()\n+ if netHeader.TransportProtocol() != header.TCPProtocolNumber {\nreturn tupleID{}, tcpip.ErrUnknownProtocol\n}\n+\ntcpHeader := header.TCP(pkt.TransportHeader().View())\nif len(tcpHeader) < header.TCPMinimumSize {\nreturn tupleID{}, tcpip.ErrUnknownProtocol\n@@ -214,7 +215,7 @@ func packetToTupleID(pkt *PacketBuffer) (tupleID, *tcpip.Error) {\ndstAddr: netHeader.DestinationAddress(),\ndstPort: tcpHeader.DestinationPort(),\ntransProto: netHeader.TransportProtocol(),\n- netProto: header.IPv4ProtocolNumber,\n+ netProto: pkt.NetworkProtocolNumber,\n}, nil\n}\n@@ -344,7 +345,7 @@ func handlePacketPrerouting(pkt *PacketBuffer, conn *conn, dir direction) {\nreturn\n}\n- netHeader := header.IPv4(pkt.NetworkHeader().View())\n+ netHeader := pkt.Network()\ntcpHeader := header.TCP(pkt.TransportHeader().View())\n// For prerouting redirection, packets going in the original direction\n@@ -366,9 +367,13 @@ func handlePacketPrerouting(pkt *PacketBuffer, conn *conn, dir direction) {\n// support cases when they are validated, e.g. when we can't offload\n// receive checksumming.\n+ // After modification, IPv4 packets need a valid checksum.\n+ if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber {\n+ netHeader := header.IPv4(pkt.NetworkHeader().View())\nnetHeader.SetChecksum(0)\nnetHeader.SetChecksum(^netHeader.CalculateChecksum())\n}\n+}\n// handlePacketOutput manipulates ports for packets in Output hook.\nfunc handlePacketOutput(pkt *PacketBuffer, conn *conn, gso *GSO, r *Route, dir direction) {\n@@ -377,7 +382,7 @@ func handlePacketOutput(pkt *PacketBuffer, conn *conn, gso *GSO, r *Route, dir d\nreturn\n}\n- netHeader := header.IPv4(pkt.NetworkHeader().View())\n+ netHeader := pkt.Network()\ntcpHeader := header.TCP(pkt.TransportHeader().View())\n// For output redirection, packets going in the original direction\n@@ -396,7 +401,7 @@ func handlePacketOutput(pkt *PacketBuffer, conn *conn, gso *GSO, r *Route, dir d\n// Calculate the TCP checksum and set it.\ntcpHeader.SetChecksum(0)\n- length := uint16(pkt.Size()) - uint16(netHeader.HeaderLength())\n+ length := uint16(pkt.Size()) - uint16(len(pkt.NetworkHeader().View()))\nxsum := r.PseudoHeaderChecksum(header.TCPProtocolNumber, length)\nif gso != nil && gso.NeedsCsum {\ntcpHeader.SetChecksum(xsum)\n@@ -405,9 +410,12 @@ func handlePacketOutput(pkt *PacketBuffer, conn *conn, gso *GSO, r *Route, dir d\ntcpHeader.SetChecksum(^tcpHeader.CalculateChecksum(xsum))\n}\n+ if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber {\n+ netHeader := header.IPv4(pkt.NetworkHeader().View())\nnetHeader.SetChecksum(0)\nnetHeader.SetChecksum(^netHeader.CalculateChecksum())\n}\n+}\n// handlePacket will manipulate the port and address of the packet if the\n// connection exists. Returns whether, after the packet traverses the tables,\n@@ -422,7 +430,7 @@ func (ct *ConnTrack) handlePacket(pkt *PacketBuffer, hook Hook, gso *GSO, r *Rou\n}\n// TODO(gvisor.dev/issue/170): Support other transport protocols.\n- if nh := pkt.NetworkHeader().View(); nh.IsEmpty() || header.IPv4(nh).TransportProtocol() != header.TCPProtocolNumber {\n+ if pkt.Network().TransportProtocol() != header.TCPProtocolNumber {\nreturn false\n}\n@@ -473,7 +481,7 @@ func (ct *ConnTrack) maybeInsertNoop(pkt *PacketBuffer, hook Hook) {\n}\n// We only track TCP connections.\n- if nh := pkt.NetworkHeader().View(); nh.IsEmpty() || header.IPv4(nh).TransportProtocol() != header.TCPProtocolNumber {\n+ if pkt.Network().TransportProtocol() != header.TCPProtocolNumber {\nreturn\n}\n@@ -609,7 +617,7 @@ func (ct *ConnTrack) reapTupleLocked(tuple *tuple, bucket int, now time.Time) bo\nreturn true\n}\n-func (ct *ConnTrack) originalDst(epID TransportEndpointID) (tcpip.Address, uint16, *tcpip.Error) {\n+func (ct *ConnTrack) originalDst(epID TransportEndpointID, netProto tcpip.NetworkProtocolNumber) (tcpip.Address, uint16, *tcpip.Error) {\n// Lookup the connection. The reply's original destination\n// describes the original address.\ntid := tupleID{\n@@ -618,7 +626,7 @@ func (ct *ConnTrack) originalDst(epID TransportEndpointID) (tcpip.Address, uint1\ndstAddr: epID.RemoteAddress,\ndstPort: epID.RemotePort,\ntransProto: header.TCPProtocolNumber,\n- netProto: header.IPv4ProtocolNumber,\n+ netProto: netProto,\n}\nconn, _ := ct.connForTID(tid)\nif conn == nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -502,11 +502,11 @@ func (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx\n// OriginalDst returns the original destination of redirected connections. It\n// returns an error if the connection doesn't exist or isn't redirected.\n-func (it *IPTables) OriginalDst(epID TransportEndpointID) (tcpip.Address, uint16, *tcpip.Error) {\n+func (it *IPTables) OriginalDst(epID TransportEndpointID, netProto tcpip.NetworkProtocolNumber) (tcpip.Address, uint16, *tcpip.Error) {\nit.mu.RLock()\ndefer it.mu.RUnlock()\nif !it.modified {\nreturn \"\", 0, tcpip.ErrNotConnected\n}\n- return it.connections.originalDst(epID)\n+ return it.connections.originalDst(epID, netProto)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_targets.go",
"new_path": "pkg/tcpip/stack/iptables_targets.go",
"diff": "@@ -164,11 +164,15 @@ func (rt *RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gs\nreturn RuleDrop, 0\n}\n- // Change the address to localhost (127.0.0.1) in Output and\n- // to primary address of the incoming interface in Prerouting.\n+ // Change the address to localhost (127.0.0.1 or ::1) in Output and to\n+ // the primary address of the incoming interface in Prerouting.\nswitch hook {\ncase Output:\n+ if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber {\nrt.Addr = tcpip.Address([]byte{127, 0, 0, 1})\n+ } else {\n+ rt.Addr = header.IPv6Loopback\n+ }\ncase Prerouting:\nrt.Addr = address\ndefault:\n@@ -177,8 +181,7 @@ func (rt *RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gs\n// TODO(gvisor.dev/issue/170): Check Flags in RedirectTarget if\n// we need to change dest address (for OUTPUT chain) or ports.\n- netHeader := header.IPv4(pkt.NetworkHeader().View())\n- switch protocol := netHeader.TransportProtocol(); protocol {\n+ switch protocol := pkt.TransportProtocolNumber; protocol {\ncase header.UDPProtocolNumber:\nudpHeader := header.UDP(pkt.TransportHeader().View())\nudpHeader.SetDestinationPort(rt.Port)\n@@ -186,10 +189,10 @@ func (rt *RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gs\n// Calculate UDP checksum and set it.\nif hook == Output {\nudpHeader.SetChecksum(0)\n- length := uint16(pkt.Size()) - uint16(netHeader.HeaderLength())\n// Only calculate the checksum if offloading isn't supported.\nif r.Capabilities()&CapabilityTXChecksumOffload == 0 {\n+ length := uint16(pkt.Size()) - uint16(len(pkt.NetworkHeader().View()))\nxsum := r.PseudoHeaderChecksum(protocol, length)\nfor _, v := range pkt.Data.Views() {\nxsum = header.Checksum(v, xsum)\n@@ -198,10 +201,15 @@ func (rt *RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gs\nudpHeader.SetChecksum(^udpHeader.CalculateChecksum(xsum))\n}\n}\n- // Change destination address.\n- netHeader.SetDestinationAddress(rt.Addr)\n+\n+ pkt.Network().SetDestinationAddress(rt.Addr)\n+\n+ // After modification, IPv4 packets need a valid checksum.\n+ if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber {\n+ netHeader := header.IPv4(pkt.NetworkHeader().View())\nnetHeader.SetChecksum(0)\nnetHeader.SetChecksum(^netHeader.CalculateChecksum())\n+ }\npkt.NatDone = true\ncase header.TCPProtocolNumber:\nif ct == nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/packet_buffer.go",
"new_path": "pkg/tcpip/stack/packet_buffer.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\ntype headerType int\n@@ -255,6 +256,20 @@ func (pk *PacketBuffer) Clone() *PacketBuffer {\nreturn newPk\n}\n+// Network returns the network header as a header.Network.\n+//\n+// Network should only be called when NetworkHeader has been set.\n+func (pk *PacketBuffer) Network() header.Network {\n+ switch netProto := pk.NetworkProtocolNumber; netProto {\n+ case header.IPv4ProtocolNumber:\n+ return header.IPv4(pk.NetworkHeader().View())\n+ case header.IPv6ProtocolNumber:\n+ return header.IPv6(pk.NetworkHeader().View())\n+ default:\n+ panic(fmt.Sprintf(\"unknown network protocol number %d\", netProto))\n+ }\n+}\n+\n// headerInfo stores metadata about a header in a packet.\ntype headerInfo struct {\n// buf is the memorized slice for both prepended and consumed header.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -2099,7 +2099,7 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) *tcpip.Error {\ncase *tcpip.OriginalDestinationOption:\ne.LockUser()\nipt := e.stack.IPTables()\n- addr, port, err := ipt.OriginalDst(e.ID)\n+ addr, port, err := ipt.OriginalDst(e.ID, e.NetProto)\ne.UnlockUser()\nif err != nil {\nreturn err\n"
},
{
"change_type": "MODIFY",
"old_path": "test/iptables/iptables_test.go",
"new_path": "test/iptables/iptables_test.go",
"diff": "@@ -48,13 +48,6 @@ func singleTest(t *testing.T, test TestCase) {\n}\n}\n-// TODO(gvisor.dev/issue/3549): IPv6 NAT support.\n-func ipv4Test(t *testing.T, test TestCase) {\n- t.Run(\"IPv4\", func(t *testing.T) {\n- iptablesTest(t, test, false)\n- })\n-}\n-\nfunc iptablesTest(t *testing.T, test TestCase, ipv6 bool) {\nif _, ok := Tests[test.Name()]; !ok {\nt.Fatalf(\"no test found with name %q. Has it been registered?\", test.Name())\n@@ -325,66 +318,66 @@ func TestFilterOutputInvertDestination(t *testing.T) {\n}\nfunc TestNATPreRedirectUDPPort(t *testing.T) {\n- ipv4Test(t, NATPreRedirectUDPPort{})\n+ singleTest(t, NATPreRedirectUDPPort{})\n}\nfunc TestNATPreRedirectTCPPort(t *testing.T) {\n- ipv4Test(t, NATPreRedirectTCPPort{})\n+ singleTest(t, NATPreRedirectTCPPort{})\n}\nfunc TestNATPreRedirectTCPOutgoing(t *testing.T) {\n- ipv4Test(t, NATPreRedirectTCPOutgoing{})\n+ singleTest(t, NATPreRedirectTCPOutgoing{})\n}\nfunc TestNATOutRedirectTCPIncoming(t *testing.T) {\n- ipv4Test(t, NATOutRedirectTCPIncoming{})\n+ singleTest(t, NATOutRedirectTCPIncoming{})\n}\nfunc TestNATOutRedirectUDPPort(t *testing.T) {\n- ipv4Test(t, NATOutRedirectUDPPort{})\n+ singleTest(t, NATOutRedirectUDPPort{})\n}\nfunc TestNATOutRedirectTCPPort(t *testing.T) {\n- ipv4Test(t, NATOutRedirectTCPPort{})\n+ singleTest(t, NATOutRedirectTCPPort{})\n}\nfunc TestNATDropUDP(t *testing.T) {\n- ipv4Test(t, NATDropUDP{})\n+ singleTest(t, NATDropUDP{})\n}\nfunc TestNATAcceptAll(t *testing.T) {\n- ipv4Test(t, NATAcceptAll{})\n+ singleTest(t, NATAcceptAll{})\n}\nfunc TestNATOutRedirectIP(t *testing.T) {\n- ipv4Test(t, NATOutRedirectIP{})\n+ singleTest(t, NATOutRedirectIP{})\n}\nfunc TestNATOutDontRedirectIP(t *testing.T) {\n- ipv4Test(t, NATOutDontRedirectIP{})\n+ singleTest(t, NATOutDontRedirectIP{})\n}\nfunc TestNATOutRedirectInvert(t *testing.T) {\n- ipv4Test(t, NATOutRedirectInvert{})\n+ singleTest(t, NATOutRedirectInvert{})\n}\nfunc TestNATPreRedirectIP(t *testing.T) {\n- ipv4Test(t, NATPreRedirectIP{})\n+ singleTest(t, NATPreRedirectIP{})\n}\nfunc TestNATPreDontRedirectIP(t *testing.T) {\n- ipv4Test(t, NATPreDontRedirectIP{})\n+ singleTest(t, NATPreDontRedirectIP{})\n}\nfunc TestNATPreRedirectInvert(t *testing.T) {\n- ipv4Test(t, NATPreRedirectInvert{})\n+ singleTest(t, NATPreRedirectInvert{})\n}\nfunc TestNATRedirectRequiresProtocol(t *testing.T) {\n- ipv4Test(t, NATRedirectRequiresProtocol{})\n+ singleTest(t, NATRedirectRequiresProtocol{})\n}\nfunc TestNATLoopbackSkipsPrerouting(t *testing.T) {\n- ipv4Test(t, NATLoopbackSkipsPrerouting{})\n+ singleTest(t, NATLoopbackSkipsPrerouting{})\n}\nfunc TestInputSource(t *testing.T) {\n@@ -421,9 +414,9 @@ func TestFilterAddrs(t *testing.T) {\n}\nfunc TestNATPreOriginalDst(t *testing.T) {\n- ipv4Test(t, NATPreOriginalDst{})\n+ singleTest(t, NATPreOriginalDst{})\n}\nfunc TestNATOutOriginalDst(t *testing.T) {\n- ipv4Test(t, NATOutOriginalDst{})\n+ singleTest(t, NATOutOriginalDst{})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/ip6tables.cc",
"new_path": "test/syscalls/linux/ip6tables.cc",
"diff": "@@ -95,16 +95,10 @@ TEST(IP6TablesBasic, GetRevision) {\n};\nsocklen_t rev_len = sizeof(rev);\n- // TODO(gvisor.dev/issue/3549): IPv6 redirect support.\n- const int retval =\n- getsockopt(sock, SOL_IPV6, IP6T_SO_GET_REVISION_TARGET, &rev, &rev_len);\n- if (IsRunningOnGvisor()) {\n- EXPECT_THAT(retval, SyscallFailsWithErrno(ENOPROTOOPT));\n- return;\n- }\n-\n// Revision 0 exists.\n- EXPECT_THAT(retval, SyscallSucceeds());\n+ EXPECT_THAT(\n+ getsockopt(sock, SOL_IPV6, IP6T_SO_GET_REVISION_TARGET, &rev, &rev_len),\n+ SyscallSucceeds());\nEXPECT_EQ(rev.revision, 0);\n// Revisions > 0 don't exist.\n"
}
] | Go | Apache License 2.0 | google/gvisor | ip6tables: redirect support
Adds support for the IPv6-compatible redirect target. Redirection is a limited
form of DNAT, where the destination is always the localhost.
Updates #3549.
PiperOrigin-RevId: 334698344 |
259,858 | 30.09.2020 17:46:56 | 25,200 | ae51aef5bb31458a98c7f27546213a7d2bdff504 | Ensure proctor is built as pure Go binary. | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/proctor/BUILD",
"new_path": "test/runtimes/proctor/BUILD",
"diff": "@@ -5,6 +5,7 @@ package(licenses = [\"notice\"])\ngo_binary(\nname = \"proctor\",\nsrcs = [\"main.go\"],\n+ pure = True,\nvisibility = [\"//test/runtimes:__pkg__\"],\ndeps = [\"//test/runtimes/proctor/lib\"],\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ensure proctor is built as pure Go binary.
PiperOrigin-RevId: 334716351 |
260,001 | 01.10.2020 22:24:29 | 25,200 | 7f39d5342873f00a6d0a89c27ed4744168fa01bc | Add a verity test for modified parent Merkle file
When a child's root hash or its Merkle path is modified in its parent's
Merkle tree file, opening the file should fail, provided the directory
is verity enabled. The test for this behavior is added. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/verity_test.go",
"new_path": "pkg/sentry/fsimpl/verity/verity_test.go",
"diff": "@@ -67,7 +67,7 @@ func newVerityRoot(ctx context.Context) (*vfs.VirtualFilesystem, vfs.VirtualDent\n},\n})\nif err != nil {\n- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf(\"failed to create verity root mount: %v\", err)\n+ return nil, vfs.VirtualDentry{}, nil, fmt.Errorf(\"NewMountNamespace: %v\", err)\n}\nroot := mntns.Root()\nreturn vfsObj, root, func() {\n@@ -130,11 +130,11 @@ func corruptRandomBit(ctx context.Context, fd *vfs.FileDescription, size int) er\nrandomPos := int64(rand.Intn(size))\nbyteToModify := make([]byte, 1)\nif _, err := fd.PRead(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.ReadOptions{}); err != nil {\n- return fmt.Errorf(\"lowerFD.PRead failed: %v\", err)\n+ return fmt.Errorf(\"lowerFD.PRead: %v\", err)\n}\nbyteToModify[0] ^= 1\nif _, err := fd.PWrite(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.WriteOptions{}); err != nil {\n- return fmt.Errorf(\"lowerFD.PWrite failed: %v\", err)\n+ return fmt.Errorf(\"lowerFD.PWrite: %v\", err)\n}\nreturn nil\n}\n@@ -145,13 +145,13 @@ func TestOpen(t *testing.T) {\nctx := contexttest.Context(t)\nvfsObj, root, cleanup, err := newVerityRoot(ctx)\nif err != nil {\n- t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n}\ndefer cleanup()\nfilename := \"verity-test-file\"\nif _, _, err := newFileFD(ctx, vfsObj, root, filename, 0644); err != nil {\n- t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ t.Fatalf(\"newFileFD: %v\", err)\n}\n// Ensure that the corresponding Merkle tree file is created.\n@@ -163,7 +163,7 @@ func TestOpen(t *testing.T) {\n}, &vfs.OpenOptions{\nFlags: linux.O_RDONLY,\n}); err != nil {\n- t.Errorf(\"Failed to open Merkle tree file %s: %v\", merklePrefix+filename, err)\n+ t.Errorf(\"OpenAt Merkle tree file %s: %v\", merklePrefix+filename, err)\n}\n// Ensure the root merkle tree file is created.\n@@ -174,7 +174,7 @@ func TestOpen(t *testing.T) {\n}, &vfs.OpenOptions{\nFlags: linux.O_RDONLY,\n}); err != nil {\n- t.Errorf(\"Failed to open root Merkle tree file %s: %v\", merklePrefix+rootMerkleFilename, err)\n+ t.Errorf(\"OpenAt root Merkle tree file %s: %v\", merklePrefix+rootMerkleFilename, err)\n}\n}\n@@ -184,27 +184,27 @@ func TestReadUntouchedFileSucceeds(t *testing.T) {\nctx := contexttest.Context(t)\nvfsObj, root, cleanup, err := newVerityRoot(ctx)\nif err != nil {\n- t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n}\ndefer cleanup()\nfilename := \"verity-test-file\"\nfd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\nif err != nil {\n- t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ t.Fatalf(\"newFileFD: %v\", err)\n}\n// Enable verity on the file and confirm a normal read succeeds.\nvar args arch.SyscallArguments\nargs[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\nif _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n- t.Fatalf(\"Ioctl failed: %v\", err)\n+ t.Fatalf(\"Ioctl: %v\", err)\n}\nbuf := make([]byte, size)\nn, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{})\nif err != nil && err != io.EOF {\n- t.Fatalf(\"fd.PRead failed: %v\", err)\n+ t.Fatalf(\"fd.PRead: %v\", err)\n}\nif n != int64(size) {\n@@ -218,21 +218,21 @@ func TestReopenUntouchedFileSucceeds(t *testing.T) {\nctx := contexttest.Context(t)\nvfsObj, root, cleanup, err := newVerityRoot(ctx)\nif err != nil {\n- t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n}\ndefer cleanup()\nfilename := \"verity-test-file\"\nfd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)\nif err != nil {\n- t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ t.Fatalf(\"newFileFD: %v\", err)\n}\n// Enable verity on the file and confirms a normal read succeeds.\nvar args arch.SyscallArguments\nargs[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\nif _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n- t.Fatalf(\"Ioctl failed: %v\", err)\n+ t.Fatalf(\"Ioctl: %v\", err)\n}\n// Ensure reopening the verity enabled file succeeds.\n@@ -253,21 +253,21 @@ func TestModifiedFileFails(t *testing.T) {\nctx := contexttest.Context(t)\nvfsObj, root, cleanup, err := newVerityRoot(ctx)\nif err != nil {\n- t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n}\ndefer cleanup()\nfilename := \"verity-test-file\"\nfd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\nif err != nil {\n- t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ t.Fatalf(\"newFileFD: %v\", err)\n}\n// Enable verity on the file.\nvar args arch.SyscallArguments\nargs[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\nif _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n- t.Fatalf(\"Ioctl failed: %v\", err)\n+ t.Fatalf(\"Ioctl: %v\", err)\n}\n// Open a new lowerFD that's read/writable.\n@@ -280,11 +280,11 @@ func TestModifiedFileFails(t *testing.T) {\nFlags: linux.O_RDWR,\n})\nif err != nil {\n- t.Fatalf(\"Open lowerFD failed: %v\", err)\n+ t.Fatalf(\"OpenAt: %v\", err)\n}\nif err := corruptRandomBit(ctx, lowerFD, size); err != nil {\n- t.Fatalf(\"%v\", err)\n+ t.Fatalf(\"corruptRandomBit: %v\", err)\n}\n// Confirm that read from the modified file fails.\n@@ -300,21 +300,21 @@ func TestModifiedMerkleFails(t *testing.T) {\nctx := contexttest.Context(t)\nvfsObj, root, cleanup, err := newVerityRoot(ctx)\nif err != nil {\n- t.Fatalf(\"Failed to create new verity root: %v\", err)\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n}\ndefer cleanup()\nfilename := \"verity-test-file\"\nfd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\nif err != nil {\n- t.Fatalf(\"Failed to create new file fd: %v\", err)\n+ t.Fatalf(\"newFileFD: %v\", err)\n}\n// Enable verity on the file.\nvar args arch.SyscallArguments\nargs[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\nif _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n- t.Fatalf(\"Ioctl failed: %v\", err)\n+ t.Fatalf(\"Ioctl: %v\", err)\n}\n// Open a new lowerMerkleFD that's read/writable.\n@@ -327,17 +327,17 @@ func TestModifiedMerkleFails(t *testing.T) {\nFlags: linux.O_RDWR,\n})\nif err != nil {\n- t.Fatalf(\"Open lowerMerkleFD failed: %v\", err)\n+ t.Fatalf(\"OpenAt: %v\", err)\n}\n// Flip a random bit in the Merkle tree file.\nstat, err := lowerMerkleFD.Stat(ctx, vfs.StatOptions{})\nif err != nil {\n- t.Fatalf(\"Failed to get lowerMerkleFD stat: %v\", err)\n+ t.Fatalf(\"stat: %v\", err)\n}\nmerkleSize := int(stat.Size)\nif err := corruptRandomBit(ctx, lowerMerkleFD, merkleSize); err != nil {\n- t.Fatalf(\"%v\", err)\n+ t.Fatalf(\"corruptRandomBit: %v\", err)\n}\n// Confirm that read from a file with modified Merkle tree fails.\n@@ -347,3 +347,83 @@ func TestModifiedMerkleFails(t *testing.T) {\nt.Fatalf(\"fd.PRead succeeded with modified Merkle file\")\n}\n}\n+\n+// TestModifiedParentMerkleFails ensures that open a verity enabled file in a\n+// verity enabled directory fails if the hashes related to the target file in\n+// the parent Merkle tree file is modified.\n+func TestModifiedParentMerkleFails(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, cleanup, err := newVerityRoot(ctx)\n+ if err != nil {\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ filename := \"verity-test-file\"\n+ fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)\n+ if err != nil {\n+ t.Fatalf(\"newFileFD: %v\", err)\n+ }\n+\n+ // Enable verity on the file.\n+ var args arch.SyscallArguments\n+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\n+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl: %v\", err)\n+ }\n+\n+ // Enable verity on the parent directory.\n+ parentFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDONLY,\n+ })\n+ if err != nil {\n+ t.Fatalf(\"OpenAt: %v\", err)\n+ }\n+\n+ if _, err := parentFD.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl: %v\", err)\n+ }\n+\n+ // Open a new lowerMerkleFD that's read/writable.\n+ parentLowerMerkleVD := fd.Impl().(*fileDescription).d.parent.lowerMerkleVD\n+\n+ parentLowerMerkleFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: parentLowerMerkleVD,\n+ Start: parentLowerMerkleVD,\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDWR,\n+ })\n+ if err != nil {\n+ t.Fatalf(\"OpenAt: %v\", err)\n+ }\n+\n+ // Flip a random bit in the parent Merkle tree file.\n+ // This parent directory contains only one child, so any random\n+ // modification in the parent Merkle tree should cause verification\n+ // failure when opening the child file.\n+ stat, err := parentLowerMerkleFD.Stat(ctx, vfs.StatOptions{})\n+ if err != nil {\n+ t.Fatalf(\"stat: %v\", err)\n+ }\n+ parentMerkleSize := int(stat.Size)\n+ if err := corruptRandomBit(ctx, parentLowerMerkleFD, parentMerkleSize); err != nil {\n+ t.Fatalf(\"corruptRandomBit: %v\", err)\n+ }\n+\n+ parentLowerMerkleFD.DecRef(ctx)\n+\n+ // Ensure reopening the verity enabled file fails.\n+ if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(filename),\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDONLY,\n+ Mode: linux.ModeRegular,\n+ }); err == nil {\n+ t.Errorf(\"OpenAt file with modified parent Merkle succeeded\")\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a verity test for modified parent Merkle file
When a child's root hash or its Merkle path is modified in its parent's
Merkle tree file, opening the file should fail, provided the directory
is verity enabled. The test for this behavior is added.
PiperOrigin-RevId: 334963690 |
259,885 | 02.10.2020 11:49:24 | 25,200 | ed94c0de51412a41e6657e23685dda876df31d15 | Actually disable nodejs test parallel/test-fs-write-stream-double-close. | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/exclude/nodejs12.4.0.csv",
"new_path": "test/runtimes/exclude/nodejs12.4.0.csv",
"diff": "@@ -13,7 +13,7 @@ parallel/test-dns-channel-timeout.js,b/161893056,\nparallel/test-fs-access.js,,\nparallel/test-fs-watchfile.js,,Flaky - File already exists error\nparallel/test-fs-write-stream.js,b/166819807,Flaky\n-parallel/test-fs-write-stream-double-close,b/166819807,Flaky\n+parallel/test-fs-write-stream-double-close.js,b/166819807,Flaky\nparallel/test-fs-write-stream-throw-type-error.js,b/166819807,Flaky\nparallel/test-http-writable-true-after-close.js,,Flaky - Mismatched <anonymous> function calls. Expected exactly 1 actual 2\nparallel/test-os.js,b/63997097,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Actually disable nodejs test parallel/test-fs-write-stream-double-close.
PiperOrigin-RevId: 335070320 |
259,985 | 02.10.2020 12:22:34 | 25,200 | 4f462b0ed9912fa19b3a3eab6d2f08a98d364574 | Convert uses of the binary package in kernel to go-marshal. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/seccomp.go",
"new_path": "pkg/abi/linux/seccomp.go",
"diff": "@@ -83,3 +83,22 @@ type SockFprog struct {\npad [6]byte\nFilter *BPFInstruction\n}\n+\n+// SeccompData is equivalent to struct seccomp_data, which contains the data\n+// passed to seccomp-bpf filters.\n+//\n+// +marshal\n+type SeccompData struct {\n+ // Nr is the system call number.\n+ Nr int32\n+\n+ // Arch is an AUDIT_ARCH_* value indicating the system call convention.\n+ Arch uint32\n+\n+ // InstructionPointer is the value of the instruction pointer at the time\n+ // of the system call.\n+ InstructionPointer uint64\n+\n+ // Args contains the first 6 system call arguments.\n+ Args [6]uint64\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/signalfd.go",
"new_path": "pkg/abi/linux/signalfd.go",
"diff": "@@ -23,6 +23,8 @@ const (\n)\n// SignalfdSiginfo is the siginfo encoding for signalfds.\n+//\n+// +marshal\ntype SignalfdSiginfo struct {\nSigno uint32\nErrno int32\n@@ -41,5 +43,5 @@ type SignalfdSiginfo struct {\nSTime uint64\nAddr uint64\nAddrLSB uint16\n- _ [48]uint8\n+ _ [48]uint8 `marshal:\"unaligned\"`\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/BUILD",
"new_path": "pkg/seccomp/BUILD",
"diff": "@@ -49,7 +49,7 @@ go_test(\nlibrary = \":seccomp\",\ndeps = [\n\"//pkg/abi/linux\",\n- \"//pkg/binary\",\n\"//pkg/bpf\",\n+ \"//pkg/usermem\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_test.go",
"new_path": "pkg/seccomp/seccomp_test.go",
"diff": "@@ -28,17 +28,10 @@ import (\n\"time\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/binary\"\n\"gvisor.dev/gvisor/pkg/bpf\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n)\n-type seccompData struct {\n- nr uint32\n- arch uint32\n- instructionPointer uint64\n- args [6]uint64\n-}\n-\n// newVictim makes a victim binary.\nfunc newVictim() (string, error) {\nf, err := ioutil.TempFile(\"\", \"victim\")\n@@ -58,9 +51,14 @@ func newVictim() (string, error) {\nreturn path, nil\n}\n-// asInput converts a seccompData to a bpf.Input.\n-func (d *seccompData) asInput() bpf.Input {\n- return bpf.InputBytes{binary.Marshal(nil, binary.LittleEndian, d), binary.LittleEndian}\n+// dataAsInput converts a linux.SeccompData to a bpf.Input.\n+func dataAsInput(d *linux.SeccompData) bpf.Input {\n+ buf := make([]byte, d.SizeBytes())\n+ d.MarshalUnsafe(buf)\n+ return bpf.InputBytes{\n+ Data: buf,\n+ Order: usermem.ByteOrder,\n+ }\n}\nfunc TestBasic(t *testing.T) {\n@@ -69,7 +67,7 @@ func TestBasic(t *testing.T) {\ndesc string\n// data is the input data.\n- data seccompData\n+ data linux.SeccompData\n// want is the expected return value of the BPF program.\nwant linux.BPFAction\n@@ -95,12 +93,12 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"syscall allowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"syscall disallowed\",\n- data: seccompData{nr: 2, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 2, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -131,22 +129,22 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"allowed (1a)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x1}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x1}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"allowed (1b)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"syscall 1 matched 2nd rule\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"no match\",\n- data: seccompData{nr: 0, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 0, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_KILL_THREAD,\n},\n},\n@@ -168,42 +166,42 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"allowed (1)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"allowed (3)\",\n- data: seccompData{nr: 3, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 3, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"allowed (5)\",\n- data: seccompData{nr: 5, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 5, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"disallowed (0)\",\n- data: seccompData{nr: 0, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 0, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"disallowed (2)\",\n- data: seccompData{nr: 2, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 2, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"disallowed (4)\",\n- data: seccompData{nr: 4, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 4, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"disallowed (6)\",\n- data: seccompData{nr: 6, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 6, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"disallowed (100)\",\n- data: seccompData{nr: 100, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 100, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -223,7 +221,7 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"arch (123)\",\n- data: seccompData{nr: 1, arch: 123},\n+ data: linux.SeccompData{Nr: 1, Arch: 123},\nwant: linux.SECCOMP_RET_KILL_THREAD,\n},\n},\n@@ -243,7 +241,7 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"action trap\",\n- data: seccompData{nr: 2, arch: LINUX_AUDIT_ARCH},\n+ data: linux.SeccompData{Nr: 2, Arch: LINUX_AUDIT_ARCH},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -268,12 +266,12 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"allowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0xf, 0xf}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xf}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"disallowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0xf, 0xe}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xe}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -300,12 +298,12 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"match first rule\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0xf}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"match 2nd rule\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0xe}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xe}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n},\n@@ -331,28 +329,28 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"argument allowed (all match)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n- args: [6]uint64{0, math.MaxUint64 - 1, math.MaxUint32},\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n+ Args: [6]uint64{0, math.MaxUint64 - 1, math.MaxUint32},\n},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"argument disallowed (one mismatch)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n- args: [6]uint64{0, math.MaxUint64, math.MaxUint32},\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n+ Args: [6]uint64{0, math.MaxUint64, math.MaxUint32},\n},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"argument disallowed (multiple mismatch)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n- args: [6]uint64{0, math.MaxUint64, math.MaxUint32 - 1},\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n+ Args: [6]uint64{0, math.MaxUint64, math.MaxUint32 - 1},\n},\nwant: linux.SECCOMP_RET_TRAP,\n},\n@@ -379,28 +377,28 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"arg allowed\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n- args: [6]uint64{0, math.MaxUint64, math.MaxUint32 - 1},\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n+ Args: [6]uint64{0, math.MaxUint64, math.MaxUint32 - 1},\n},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (one equal)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n- args: [6]uint64{0x7aabbccdd, math.MaxUint64, math.MaxUint32 - 1},\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n+ Args: [6]uint64{0x7aabbccdd, math.MaxUint64, math.MaxUint32 - 1},\n},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (all equal)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n- args: [6]uint64{0x7aabbccdd, math.MaxUint64 - 1, math.MaxUint32},\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n+ Args: [6]uint64{0x7aabbccdd, math.MaxUint64 - 1, math.MaxUint32},\n},\nwant: linux.SECCOMP_RET_TRAP,\n},\n@@ -429,27 +427,27 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"high 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000003_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits equal, low 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000003}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits equal, low 32bits equal\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits equal, low 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000001}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000001_00000003}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000003}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -474,27 +472,27 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"arg allowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x10, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xffffffff}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (first arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0xf, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xffffffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (first arg smaller)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (second arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x10, 0xabcd000d}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xabcd000d}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (second arg smaller)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x10, 0xa000ffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xa000ffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -522,27 +520,27 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"high 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000003_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits equal, low 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000003}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits equal, low 32bits equal\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits equal, low 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000001}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000001_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000002}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -567,32 +565,32 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"arg allowed (both greater)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x10, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xffffffff}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg allowed (first arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0xf, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xffffffff}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (first arg smaller)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg allowed (second arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x10, 0xabcd000d}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xabcd000d}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (second arg smaller)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x10, 0xa000ffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xa000ffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (both arg smaller)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0xa000ffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xa000ffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -620,27 +618,27 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"high 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000003_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits equal, low 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000003}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits equal, low 32bits equal\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits equal, low 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000001}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000001_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000002}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n},\n@@ -665,32 +663,32 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"arg allowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0x0}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0x0}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (first arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x1, 0x0}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x1, 0x0}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (first arg greater)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x2, 0x0}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0x0}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (second arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0xabcd000d}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xabcd000d}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (second arg greater)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (both arg greater)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x2, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0xffffffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -718,27 +716,27 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"high 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000003_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits equal, low 32bits greater\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000003}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"high 32bits equal, low 32bits equal\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits equal, low 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000002_00000001}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"high 32bits less\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x00000001_00000002}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000002}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n},\n@@ -764,32 +762,32 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"arg allowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0x0}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0x0}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg allowed (first arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x1, 0x0}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x1, 0x0}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (first arg greater)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x2, 0x0}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0x0}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg allowed (second arg equal)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0xabcd000d}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xabcd000d}},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (second arg greater)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x0, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (both arg greater)\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{0x2, 0xffffffff}},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0xffffffff}},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -816,51 +814,51 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"arg allowed (low order mandatory bit)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n// 00000000 00000000 00000000 00000001\n- args: [6]uint64{0x1},\n+ Args: [6]uint64{0x1},\n},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg allowed (low order optional bit)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n// 00000000 00000000 00000000 00000101\n- args: [6]uint64{0x5},\n+ Args: [6]uint64{0x5},\n},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"arg disallowed (lowest order bit not set)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n// 00000000 00000000 00000000 00000010\n- args: [6]uint64{0x2},\n+ Args: [6]uint64{0x2},\n},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (second lowest order bit set)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n// 00000000 00000000 00000000 00000011\n- args: [6]uint64{0x3},\n+ Args: [6]uint64{0x3},\n},\nwant: linux.SECCOMP_RET_TRAP,\n},\n{\ndesc: \"arg disallowed (8th bit set)\",\n- data: seccompData{\n- nr: 1,\n- arch: LINUX_AUDIT_ARCH,\n+ data: linux.SeccompData{\n+ Nr: 1,\n+ Arch: LINUX_AUDIT_ARCH,\n// 00000000 00000000 00000001 00000000\n- args: [6]uint64{0x100},\n+ Args: [6]uint64{0x100},\n},\nwant: linux.SECCOMP_RET_TRAP,\n},\n@@ -885,12 +883,12 @@ func TestBasic(t *testing.T) {\nspecs: []spec{\n{\ndesc: \"allowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{}, instructionPointer: 0x7aabbccdd},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{}, InstructionPointer: 0x7aabbccdd},\nwant: linux.SECCOMP_RET_ALLOW,\n},\n{\ndesc: \"disallowed\",\n- data: seccompData{nr: 1, arch: LINUX_AUDIT_ARCH, args: [6]uint64{}, instructionPointer: 0x711223344},\n+ data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{}, InstructionPointer: 0x711223344},\nwant: linux.SECCOMP_RET_TRAP,\n},\n},\n@@ -906,7 +904,7 @@ func TestBasic(t *testing.T) {\nt.Fatalf(\"bpf.Compile() got error: %v\", err)\n}\nfor _, spec := range test.specs {\n- got, err := bpf.Exec(p, spec.data.asInput())\n+ got, err := bpf.Exec(p, dataAsInput(&spec.data))\nif err != nil {\nt.Fatalf(\"%s: bpf.Exec() got error: %v\", spec.desc, err)\n}\n@@ -947,8 +945,8 @@ func TestRandom(t *testing.T) {\nt.Fatalf(\"bpf.Compile() got error: %v\", err)\n}\nfor i := uint32(0); i < 200; i++ {\n- data := seccompData{nr: i, arch: LINUX_AUDIT_ARCH}\n- got, err := bpf.Exec(p, data.asInput())\n+ data := linux.SeccompData{Nr: int32(i), Arch: LINUX_AUDIT_ARCH}\n+ got, err := bpf.Exec(p, dataAsInput(&data))\nif err != nil {\nt.Errorf(\"bpf.Exec() got error: %v, for syscall %d\", err, i)\ncontinue\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/signalfd/BUILD",
"new_path": "pkg/sentry/fsimpl/signalfd/BUILD",
"diff": "@@ -8,7 +8,6 @@ go_library(\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n\"//pkg/abi/linux\",\n- \"//pkg/binary\",\n\"//pkg/context\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/vfs\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/signalfd/signalfd.go",
"new_path": "pkg/sentry/fsimpl/signalfd/signalfd.go",
"diff": "@@ -16,7 +16,6 @@ package signalfd\nimport (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/binary\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n@@ -95,8 +94,7 @@ func (sfd *SignalFileDescription) Read(ctx context.Context, dst usermem.IOSequen\n}\n// Copy out the signal info using the specified format.\n- var buf [128]byte\n- binary.Marshal(buf[:0], usermem.ByteOrder, &linux.SignalfdSiginfo{\n+ infoNative := linux.SignalfdSiginfo{\nSigno: uint32(info.Signo),\nErrno: info.Errno,\nCode: info.Code,\n@@ -105,9 +103,13 @@ func (sfd *SignalFileDescription) Read(ctx context.Context, dst usermem.IOSequen\nStatus: info.Status(),\nOverrun: uint32(info.Overrun()),\nAddr: info.Addr(),\n- })\n- n, err := dst.CopyOut(ctx, buf[:])\n- return int64(n), err\n+ }\n+ n, err := infoNative.WriteTo(dst.Writer(ctx))\n+ if err == usermem.ErrEndOfIOSequence {\n+ // Partial copy-out ok.\n+ err = nil\n+ }\n+ return n, err\n}\n// Readiness implements waiter.Waitable.Readiness.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -204,7 +204,6 @@ go_library(\n\"//pkg/abi\",\n\"//pkg/abi/linux\",\n\"//pkg/amutex\",\n- \"//pkg/binary\",\n\"//pkg/bits\",\n\"//pkg/bpf\",\n\"//pkg/context\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/seccomp.go",
"new_path": "pkg/sentry/kernel/seccomp.go",
"diff": "@@ -18,7 +18,6 @@ import (\n\"syscall\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/binary\"\n\"gvisor.dev/gvisor/pkg/bpf\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -27,25 +26,18 @@ import (\nconst maxSyscallFilterInstructions = 1 << 15\n-// seccompData is equivalent to struct seccomp_data, which contains the data\n-// passed to seccomp-bpf filters.\n-type seccompData struct {\n- // nr is the system call number.\n- nr int32\n-\n- // arch is an AUDIT_ARCH_* value indicating the system call convention.\n- arch uint32\n-\n- // instructionPointer is the value of the instruction pointer at the time\n- // of the system call.\n- instructionPointer uint64\n-\n- // args contains the first 6 system call arguments.\n- args [6]uint64\n+// dataAsBPFInput returns a serialized BPF program, only valid on the current task\n+// goroutine.\n+//\n+// Note: this is called for every syscall, which is a very hot path.\n+func dataAsBPFInput(t *Task, d *linux.SeccompData) bpf.Input {\n+ buf := t.CopyScratchBuffer(d.SizeBytes())\n+ d.MarshalUnsafe(buf)\n+ return bpf.InputBytes{\n+ Data: buf,\n+ // Go-marshal always uses the native byte order.\n+ Order: usermem.ByteOrder,\n}\n-\n-func (d *seccompData) asBPFInput() bpf.Input {\n- return bpf.InputBytes{binary.Marshal(nil, usermem.ByteOrder, d), usermem.ByteOrder}\n}\nfunc seccompSiginfo(t *Task, errno, sysno int32, ip usermem.Addr) *arch.SignalInfo {\n@@ -112,20 +104,20 @@ func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip u\n}\nfunc (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, ip usermem.Addr) uint32 {\n- data := seccompData{\n- nr: sysno,\n- arch: t.tc.st.AuditNumber,\n- instructionPointer: uint64(ip),\n+ data := linux.SeccompData{\n+ Nr: sysno,\n+ Arch: t.tc.st.AuditNumber,\n+ InstructionPointer: uint64(ip),\n}\n// data.args is []uint64 and args is []arch.SyscallArgument (uintptr), so\n// we can't do any slicing tricks or even use copy/append here.\nfor i, arg := range args {\n- if i >= len(data.args) {\n+ if i >= len(data.Args) {\nbreak\n}\n- data.args[i] = arg.Uint64()\n+ data.Args[i] = arg.Uint64()\n}\n- input := data.asBPFInput()\n+ input := dataAsBPFInput(t, &data)\nret := uint32(linux.SECCOMP_RET_ALLOW)\nf := t.syscallFilters.Load()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/signalfd/BUILD",
"new_path": "pkg/sentry/kernel/signalfd/BUILD",
"diff": "@@ -8,7 +8,6 @@ go_library(\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n\"//pkg/abi/linux\",\n- \"//pkg/binary\",\n\"//pkg/context\",\n\"//pkg/sentry/fs\",\n\"//pkg/sentry/fs/anon\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/signalfd/signalfd.go",
"new_path": "pkg/sentry/kernel/signalfd/signalfd.go",
"diff": "@@ -17,7 +17,6 @@ package signalfd\nimport (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/binary\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/anon\"\n@@ -103,8 +102,7 @@ func (s *SignalOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\n}\n// Copy out the signal info using the specified format.\n- var buf [128]byte\n- binary.Marshal(buf[:0], usermem.ByteOrder, &linux.SignalfdSiginfo{\n+ infoNative := linux.SignalfdSiginfo{\nSigno: uint32(info.Signo),\nErrno: info.Errno,\nCode: info.Code,\n@@ -113,9 +111,13 @@ func (s *SignalOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\nStatus: info.Status(),\nOverrun: uint32(info.Overrun()),\nAddr: info.Addr(),\n- })\n- n, err := dst.CopyOut(ctx, buf[:])\n- return int64(n), err\n+ }\n+ n, err := infoNative.WriteTo(dst.Writer(ctx))\n+ if err == usermem.ErrEndOfIOSequence {\n+ // Partial copy-out ok.\n+ err = nil\n+ }\n+ return n, err\n}\n// Readiness implements waiter.Waitable.Readiness.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/vdso.go",
"new_path": "pkg/sentry/kernel/vdso.go",
"diff": "@@ -17,7 +17,6 @@ package kernel\nimport (\n\"fmt\"\n- \"gvisor.dev/gvisor/pkg/binary\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/memmap\"\n\"gvisor.dev/gvisor/pkg/sentry/pgalloc\"\n@@ -28,6 +27,8 @@ import (\n//\n// They are exposed to the VDSO via a parameter page managed by VDSOParamPage,\n// which also includes a sequence counter.\n+//\n+// +marshal\ntype vdsoParams struct {\nmonotonicReady uint64\nmonotonicBaseCycles int64\n@@ -68,6 +69,13 @@ type VDSOParamPage struct {\n// checked in state_test_util tests, causing this field to change across\n// save / restore.\nseq uint64\n+\n+ // copyScratchBuffer is a temporary buffer used to marshal the params before\n+ // copying it to the real parameter page. The parameter page is typically\n+ // updated at a moderate frequency of ~O(seconds) throughout the lifetime of\n+ // the sentry, so reusing this buffer is a good tradeoff between memory\n+ // usage and the cost of allocation.\n+ copyScratchBuffer []byte\n}\n// NewVDSOParamPage returns a VDSOParamPage.\n@@ -79,7 +87,11 @@ type VDSOParamPage struct {\n// * VDSOParamPage must be the only writer to fr.\n// * mfp.MemoryFile().MapInternal(fr) must return a single safemem.Block.\nfunc NewVDSOParamPage(mfp pgalloc.MemoryFileProvider, fr memmap.FileRange) *VDSOParamPage {\n- return &VDSOParamPage{mfp: mfp, fr: fr}\n+ return &VDSOParamPage{\n+ mfp: mfp,\n+ fr: fr,\n+ copyScratchBuffer: make([]byte, (*vdsoParams)(nil).SizeBytes()),\n+ }\n}\n// access returns a mapping of the param page.\n@@ -133,7 +145,8 @@ func (v *VDSOParamPage) Write(f func() vdsoParams) error {\n// Get the new params.\np := f()\n- buf := binary.Marshal(nil, usermem.ByteOrder, p)\n+ buf := v.copyScratchBuffer[:p.SizeBytes()]\n+ p.MarshalUnsafe(buf)\n// Skip the sequence counter.\nif _, err := safemem.Copy(paramPage.DropFirst(8), safemem.BlockFromSafeSlice(buf)); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/BUILD",
"new_path": "pkg/sentry/vfs/BUILD",
"diff": "@@ -92,7 +92,6 @@ go_library(\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n\"//pkg/abi/linux\",\n- \"//pkg/binary\",\n\"//pkg/context\",\n\"//pkg/fd\",\n\"//pkg/fdnotifier\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Convert uses of the binary package in kernel to go-marshal.
PiperOrigin-RevId: 335077195 |
259,858 | 02.10.2020 13:12:22 | 25,200 | 0cea6472188e056673fd0ab8ff06a25dbe44ca52 | Save addresses for "allowed" functions. | [
{
"change_type": "MODIFY",
"old_path": "tools/checkescape/checkescape.go",
"new_path": "tools/checkescape/checkescape.go",
"diff": "@@ -398,7 +398,37 @@ func loadObjdump() (map[string][]string, error) {\nreturn nil, err\n}\n+ // Identify calls by address or name. Note that this is also\n+ // constructed dynamically below, as we encounted the addresses.\n+ // This is because some of the functions (duffzero) may have\n+ // jump targets in the middle of the function itself.\n+ funcsAllowed := map[string]struct{}{\n+ \"runtime.duffzero\": struct{}{},\n+ \"runtime.duffcopy\": struct{}{},\n+ \"runtime.racefuncenter\": struct{}{},\n+ \"runtime.gcWriteBarrier\": struct{}{},\n+ \"runtime.retpolineAX\": struct{}{},\n+ \"runtime.retpolineBP\": struct{}{},\n+ \"runtime.retpolineBX\": struct{}{},\n+ \"runtime.retpolineCX\": struct{}{},\n+ \"runtime.retpolineDI\": struct{}{},\n+ \"runtime.retpolineDX\": struct{}{},\n+ \"runtime.retpolineR10\": struct{}{},\n+ \"runtime.retpolineR11\": struct{}{},\n+ \"runtime.retpolineR12\": struct{}{},\n+ \"runtime.retpolineR13\": struct{}{},\n+ \"runtime.retpolineR14\": struct{}{},\n+ \"runtime.retpolineR15\": struct{}{},\n+ \"runtime.retpolineR8\": struct{}{},\n+ \"runtime.retpolineR9\": struct{}{},\n+ \"runtime.retpolineSI\": struct{}{},\n+ \"runtime.stackcheck\": struct{}{},\n+ \"runtime.settls\": struct{}{},\n+ }\n+ addrsAllowed := make(map[string]struct{})\n+\n// Build the map.\n+ nextFunc := \"\" // For funcsAllowed.\nm := make(map[string][]string)\nr := bufio.NewReader(out)\nNextLine:\n@@ -407,6 +437,20 @@ NextLine:\nif err != nil && err != io.EOF {\nreturn nil, err\n}\n+ fields := strings.Fields(line)\n+\n+ // Is this an \"allowed\" function definition?\n+ if len(fields) >= 2 && fields[0] == \"TEXT\" {\n+ if _, ok := funcsAllowed[fields[1]]; ok {\n+ nextFunc = strings.TrimSuffix(fields[1], \"(SB)\")\n+ } else {\n+ nextFunc = \"\" // Don't record addresses.\n+ }\n+ }\n+ if nextFunc != \"\" && len(fields) > 2 {\n+ // Save the given address (in hex form, as it appears).\n+ addrsAllowed[fields[1]] = struct{}{}\n+ }\n// We recognize lines corresponding to actual code (not the\n// symbol name or other metadata) and annotate them if they\n@@ -416,53 +460,31 @@ NextLine:\n//\n// Lines look like this (including the first space):\n// gohacks_unsafe.go:33 0xa39 488b442408 MOVQ 0x8(SP), AX\n- if len(line) > 0 && line[0] == ' ' {\n- fields := strings.Fields(line)\n+ if len(fields) >= 5 && line[0] == ' ' {\nif !strings.Contains(fields[3], \"CALL\") {\ncontinue\n}\n- site := strings.TrimSpace(fields[0])\n- var callStr string // Friendly string.\n- if len(fields) > 5 {\n- callStr = strings.Join(fields[5:], \" \")\n- }\n- if len(callStr) == 0 {\n- // Just a raw call? is this asm?\n- callStr = strings.Join(fields[3:], \" \")\n- }\n+ site := fields[0]\n+ target := strings.TrimSuffix(fields[4], \"(SB)\")\n- // Ignore strings containing duffzero, which is just\n- // used by stack allocations for types that are large\n- // enough to warrant Duff's device.\n- if strings.Contains(callStr, \"runtime.duffzero\") ||\n- strings.Contains(callStr, \"runtime.duffcopy\") {\n+ // Ignore strings containing allowed functions.\n+ if _, ok := funcsAllowed[target]; ok {\ncontinue\n}\n-\n- // Ignore the racefuncenter call, which is used for\n- // race builds. This does not escape.\n- if strings.Contains(callStr, \"runtime.racefuncenter\") {\n+ if _, ok := addrsAllowed[target]; ok {\ncontinue\n}\n-\n- // Ignore the write barriers.\n- if strings.Contains(callStr, \"runtime.gcWriteBarrier\") {\n- continue\n+ if len(fields) > 5 {\n+ // This may be a future relocation. Some\n+ // objdump versions describe this differently.\n+ // If it contains any of the functions allowed\n+ // above as a string, we let it go.\n+ softTarget := strings.Join(fields[5:], \" \")\n+ for name := range funcsAllowed {\n+ if strings.Contains(softTarget, name) {\n+ continue NextLine\n}\n-\n- // Ignore retpolines.\n- if strings.Contains(callStr, \"runtime.retpoline\") {\n- continue\n}\n-\n- // Ignore stack sanity check (does not split).\n- if strings.Contains(callStr, \"runtime.stackcheck\") {\n- continue\n- }\n-\n- // Ignore tls functions.\n- if strings.Contains(callStr, \"runtime.settls\") {\n- continue\n}\n// Does this exist already?\n@@ -471,11 +493,11 @@ NextLine:\nexisting = make([]string, 0, 1)\n}\nfor _, other := range existing {\n- if callStr == other {\n+ if target == other {\ncontinue NextLine\n}\n}\n- existing = append(existing, callStr)\n+ existing = append(existing, target)\nm[site] = existing // Update.\n}\nif err == io.EOF {\n@@ -483,12 +505,25 @@ NextLine:\n}\n}\n+ // Zap any accidental false positives.\n+ final := make(map[string][]string)\n+ for site, calls := range m {\n+ filteredCalls := make([]string, 0, len(calls))\n+ for _, call := range calls {\n+ if _, ok := addrsAllowed[call]; ok {\n+ continue // Omit this call.\n+ }\n+ filteredCalls = append(filteredCalls, call)\n+ }\n+ final[site] = filteredCalls\n+ }\n+\n// Wait for the dump to finish.\nif err := cmd.Wait(); err != nil {\nreturn nil, err\n}\n- return m, nil\n+ return final, nil\n}\n// poser is a type that implements Pos.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Save addresses for "allowed" functions.
PiperOrigin-RevId: 335086850 |
259,990 | 02.10.2020 14:37:55 | 25,200 | 6321eccddce2b59976454799dcd25bc60ce5b0e8 | Treat absent "linux" section is empty "cgroupsPath" too | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -311,7 +311,10 @@ func New(conf *boot.Config, args Args) (*Container, error) {\nif isRoot(args.Spec) {\nlog.Debugf(\"Creating new sandbox for container %q\", args.ID)\n- if args.Spec.Linux != nil && args.Spec.Linux.CgroupsPath == \"\" {\n+ if args.Spec.Linux == nil {\n+ args.Spec.Linux = &specs.Linux{}\n+ }\n+ if args.Spec.Linux.CgroupsPath == \"\" {\nargs.Spec.Linux.CgroupsPath = \"/\" + args.ID\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Treat absent "linux" section is empty "cgroupsPath" too |
259,891 | 05.10.2020 15:47:30 | 25,200 | e73bb6d3029273a3adde3807a88b668380af07e9 | Remove reference to deleted script | [
{
"change_type": "MODIFY",
"old_path": "test/iptables/README.md",
"new_path": "test/iptables/README.md",
"diff": "# iptables Tests\n-iptables tests are run via `scripts/iptables_test.sh`.\n+iptables tests are run via `make iptables-tests`.\niptables requires raw socket support, so you must add the `--net-raw=true` flag\nto `/etc/docker/daemon.json` in order to use it.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove reference to deleted script
PiperOrigin-RevId: 335516625 |
259,992 | 05.10.2020 15:48:06 | 25,200 | 9e9fec3a09308a5df616c86b665b848eba2ba6f7 | Enable more VFS2 tests
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/test/testutil/testutil.go",
"new_path": "pkg/test/testutil/testutil.go",
"diff": "@@ -270,7 +270,7 @@ func RandomID(prefix string) string {\n// same name, sometimes between test runs the socket does not get cleaned up\n// quickly enough, causing container creation to fail.\nfunc RandomContainerID() string {\n- return RandomID(\"test-container-\")\n+ return RandomID(\"test-container\")\n}\n// Copy copies file from src to dst.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -264,7 +264,7 @@ type CreateMountTestcase struct {\nexpectedPaths []string\n}\n-func createMountTestcases(vfs2 bool) []*CreateMountTestcase {\n+func createMountTestcases() []*CreateMountTestcase {\ntestCases := []*CreateMountTestcase{\n&CreateMountTestcase{\n// Only proc.\n@@ -409,32 +409,26 @@ func createMountTestcases(vfs2 bool) []*CreateMountTestcase {\nDestination: \"/proc\",\nType: \"tmpfs\",\n},\n- // TODO (gvisor.dev/issue/1487): Re-add this case when sysfs supports\n- // MkDirAt in VFS2 (and remove the reduntant append).\n- // {\n- // Destination: \"/sys/bar\",\n- // Type: \"tmpfs\",\n- // },\n- //\n+ {\n+ Destination: \"/sys/bar\",\n+ Type: \"tmpfs\",\n+ },\n+\n{\nDestination: \"/tmp/baz\",\nType: \"tmpfs\",\n},\n},\n},\n- expectedPaths: []string{\"/proc\", \"/sys\" /* \"/sys/bar\" ,*/, \"/tmp\", \"/tmp/baz\"},\n+ expectedPaths: []string{\"/proc\", \"/sys\", \"/sys/bar\", \"/tmp\", \"/tmp/baz\"},\n}\n- if !vfs2 {\n- vfsCase.spec.Mounts = append(vfsCase.spec.Mounts, specs.Mount{Destination: \"/sys/bar\", Type: \"tmpfs\"})\n- vfsCase.expectedPaths = append(vfsCase.expectedPaths, \"/sys/bar\")\n- }\nreturn append(testCases, vfsCase)\n}\n// Test that MountNamespace can be created with various specs.\nfunc TestCreateMountNamespace(t *testing.T) {\n- for _, tc := range createMountTestcases(false /* vfs2 */) {\n+ for _, tc := range createMountTestcases() {\nt.Run(tc.name, func(t *testing.T) {\nconf := testConfig()\nctx := contexttest.Context(t)\n@@ -471,7 +465,7 @@ func TestCreateMountNamespace(t *testing.T) {\n// Test that MountNamespace can be created with various specs.\nfunc TestCreateMountNamespaceVFS2(t *testing.T) {\n- for _, tc := range createMountTestcases(true /* vfs2 */) {\n+ for _, tc := range createMountTestcases() {\nt.Run(tc.name, func(t *testing.T) {\nspec := testSpec()\nspec.Mounts = tc.spec.Mounts\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -316,6 +316,7 @@ func configs(t *testing.T, opts ...configOption) map[string]*config.Config {\nreturn cs\n}\n+// TODO(gvisor.dev/issue/1624): Merge with configs when VFS2 is the default.\nfunc configsWithVFS2(t *testing.T, opts ...configOption) map[string]*config.Config {\nall := configs(t, opts...)\nfor key, value := range configs(t, opts...) {\n@@ -894,13 +895,15 @@ func TestKillPid(t *testing.T) {\n}\n}\n-// TestCheckpointRestore creates a container that continuously writes successive integers\n-// to a file. To test checkpoint and restore functionality, the container is\n-// checkpointed and the last number printed to the file is recorded. Then, it is restored in two\n-// new containers and the first number printed from these containers is checked. Both should\n-// be the next consecutive number after the last number from the checkpointed container.\n+// TestCheckpointRestore creates a container that continuously writes successive\n+// integers to a file. To test checkpoint and restore functionality, the\n+// container is checkpointed and the last number printed to the file is\n+// recorded. Then, it is restored in two new containers and the first number\n+// printed from these containers is checked. Both should be the next consecutive\n+// number after the last number from the checkpointed container.\nfunc TestCheckpointRestore(t *testing.T) {\n// Skip overlay because test requires writing to host file.\n+ // TODO(gvisor.dev/issue/1663): Add VFS when S/R support is added.\nfor name, conf := range configs(t, noOverlay...) {\nt.Run(name, func(t *testing.T) {\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"checkpoint-test\")\n@@ -1062,6 +1065,7 @@ func TestCheckpointRestore(t *testing.T) {\n// with filesystem Unix Domain Socket use.\nfunc TestUnixDomainSockets(t *testing.T) {\n// Skip overlay because test requires writing to host file.\n+ // TODO(gvisor.dev/issue/1663): Add VFS when S/R support is added.\nfor name, conf := range configs(t, noOverlay...) {\nt.Run(name, func(t *testing.T) {\n// UDS path is limited to 108 chars for compatibility with older systems.\n@@ -1199,7 +1203,7 @@ func TestUnixDomainSockets(t *testing.T) {\n// recreated. Then it resumes the container, verify that the file gets created\n// again.\nfunc TestPauseResume(t *testing.T) {\n- for name, conf := range configs(t, noOverlay...) {\n+ for name, conf := range configsWithVFS2(t, noOverlay...) {\nt.Run(name, func(t *testing.T) {\ntmpDir, err := ioutil.TempDir(testutil.TmpDir(), \"lock\")\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -203,7 +203,6 @@ def syscall_test(\ntags = platform_tags + tags,\n)\n- # TODO(gvisor.dev/issue/1487): Enable VFS2 overlay tests.\nif add_overlay:\n_syscall_test(\ntest = test,\n@@ -216,6 +215,23 @@ def syscall_test(\noverlay = True,\n)\n+ # TODO(gvisor.dev/issue/4407): Remove tags to enable VFS2 overlay tests.\n+ overlay_vfs2_tags = list(vfs2_tags)\n+ overlay_vfs2_tags.append(\"manual\")\n+ overlay_vfs2_tags.append(\"noguitar\")\n+ overlay_vfs2_tags.append(\"notap\")\n+ _syscall_test(\n+ test = test,\n+ shard_count = shard_count,\n+ size = size,\n+ platform = default_platform,\n+ use_tmpfs = use_tmpfs,\n+ add_uds_tree = add_uds_tree,\n+ tags = platforms[default_platform] + overlay_vfs2_tags,\n+ overlay = True,\n+ vfs2 = True,\n+ )\n+\nif add_hostinet:\n_syscall_test(\ntest = test,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable more VFS2 tests
Updates #1487
PiperOrigin-RevId: 335516732 |
259,860 | 05.10.2020 19:03:04 | 25,200 | a1df7f2ed1be5cb170218c3e127a9b5f51a314fd | Simplify nil assignment in kcov. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kcov.go",
"new_path": "pkg/sentry/kernel/kcov.go",
"diff": "@@ -215,10 +215,8 @@ func (kcov *Kcov) Clear() {\nfunc (kcov *Kcov) clearLocked() {\nkcov.mode = linux.KCOV_MODE_INIT\nkcov.owningTask = nil\n- if kcov.mappable != nil {\nkcov.mappable = nil\n}\n-}\n// OnTaskExit is called when the owning task exits. It is similar to\n// kcov.Clear(), except the memory mapping is not cleared, so that the same\n"
}
] | Go | Apache License 2.0 | google/gvisor | Simplify nil assignment in kcov.
PiperOrigin-RevId: 335548610 |
259,860 | 06.10.2020 00:18:55 | 25,200 | a57dc67b6386c4c75ba6b9b4cee11277a8a98898 | [vfs2] Don't leak reference from Mountnamespace.Root(). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsbridge/vfs.go",
"new_path": "pkg/sentry/fsbridge/vfs.go",
"diff": "@@ -122,7 +122,7 @@ func NewVFSLookup(mntns *vfs.MountNamespace, root, workingDir vfs.VirtualDentry)\n// remainingTraversals is not configurable in VFS2, all callers are using the\n// default anyways.\nfunc (l *vfsLookup) OpenPath(ctx context.Context, pathname string, opts vfs.OpenOptions, _ *uint, resolveFinal bool) (File, error) {\n- vfsObj := l.mntns.Root().Mount().Filesystem().VirtualFilesystem()\n+ vfsObj := l.root.Mount().Filesystem().VirtualFilesystem()\ncreds := auth.CredentialsFromContext(ctx)\npath := fspath.Parse(pathname)\npop := &vfs.PathOperation{\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2] Don't leak reference from Mountnamespace.Root().
PiperOrigin-RevId: 335583637 |
259,990 | 06.10.2020 15:34:02 | 25,200 | a2a27eedf44303a60f580e03be617124ce35bb17 | Ignore errors in rootless and test modes | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -327,9 +327,18 @@ func New(conf *boot.Config, args Args) (*Container, error) {\nif cg != nil {\n// If there is cgroup config, install it before creating sandbox process.\nif err := cg.Install(args.Spec.Linux.Resources); err != nil {\n+ switch {\n+ case errors.Is(err, syscall.EROFS) && conf.TestOnlyAllowRunAsCurrentUserWithoutChroot:\n+ log.Warningf(\"Skipping cgroup configuration in test mode: %v\", err)\n+ cg = nil\n+ case errors.Is(err, syscall.EACCES) && conf.Rootless:\n+ log.Warningf(\"Skipping cgroup configuration in rootless mode: %v\", err)\n+ cg = nil\n+ default:\nreturn nil, fmt.Errorf(\"configuring cgroup: %v\", err)\n}\n}\n+ }\nif err := runInCgroup(cg, func() error {\nioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir, args.Attached)\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ignore errors in rootless and test modes |
259,986 | 06.10.2020 16:15:45 | 25,200 | 95cac27d0d1ca93610f1bea554e3eb67c8a907d7 | Discard invalid Neighbor Solicitations
...per RFC 4861 s7.1.1. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ipv6.go",
"new_path": "pkg/tcpip/header/ipv6.go",
"diff": "@@ -309,14 +309,21 @@ func IsV6UnicastAddress(addr tcpip.Address) bool {\nreturn addr[0] != 0xff\n}\n+const solicitedNodeMulticastPrefix = \"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\xff\"\n+\n// SolicitedNodeAddr computes the solicited-node multicast address. This is\n// used for NDP. Described in RFC 4291. The argument must be a full-length IPv6\n// address.\nfunc SolicitedNodeAddr(addr tcpip.Address) tcpip.Address {\n- const solicitedNodeMulticastPrefix = \"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\xff\"\nreturn solicitedNodeMulticastPrefix + addr[len(addr)-3:]\n}\n+// IsSolicitedNodeAddr determines whether the address is a solicited-node\n+// multicast address.\n+func IsSolicitedNodeAddr(addr tcpip.Address) bool {\n+ return solicitedNodeMulticastPrefix == addr[:len(addr)-3]\n+}\n+\n// EthernetAdddressToModifiedEUI64IntoBuf populates buf with a modified EUI-64\n// from a 48-bit Ethernet/MAC address, as per RFC 4291 section 2.5.1.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -286,6 +286,17 @@ func (e *endpoint) handleICMP(r *stack.Route, pkt *stack.PacketBuffer, hasFragme\ne.linkAddrCache.AddLinkAddress(e.nic.ID(), r.RemoteAddress, sourceLinkAddr)\n}\n+ // As per RFC 4861 section 7.1.1:\n+ // A node MUST silently discard any received Neighbor Solicitation\n+ // messages that do not satisfy all of the following validity checks:\n+ // ...\n+ // - If the IP source address is the unspecified address, the IP\n+ // destination address is a solicited-node multicast address.\n+ if unspecifiedSource && !header.IsSolicitedNodeAddr(r.LocalAddress) {\n+ received.Invalid.Increment()\n+ return\n+ }\n+\n// ICMPv6 Neighbor Solicit messages are always sent to\n// specially crafted IPv6 multicast addresses. As a result, the\n// route we end up with here has as its LocalAddress such a\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"new_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"diff": "@@ -410,7 +410,7 @@ func TestNeighorSolicitationResponse(t *testing.T) {\nnaDst tcpip.Address\n}{\n{\n- name: \"Unspecified source to multicast destination\",\n+ name: \"Unspecified source to solicited-node multicast destination\",\nnsOpts: nil,\nnsSrcLinkAddr: remoteLinkAddr0,\nnsSrc: header.IPv6Any,\n@@ -437,11 +437,7 @@ func TestNeighorSolicitationResponse(t *testing.T) {\nnsSrcLinkAddr: remoteLinkAddr0,\nnsSrc: header.IPv6Any,\nnsDst: nicAddr,\n- nsInvalid: false,\n- naDstLinkAddr: remoteLinkAddr0,\n- naSolicited: false,\n- naSrc: nicAddr,\n- naDst: header.IPv6AllNodesMulticastAddress,\n+ nsInvalid: true,\n},\n{\nname: \"Unspecified source with source ll option to unicast destination\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Discard invalid Neighbor Solicitations
...per RFC 4861 s7.1.1.
PiperOrigin-RevId: 335742851 |
259,860 | 07.10.2020 11:43:31 | 25,200 | 7e55ee14eb9035ed737eb7ee4ddcb0fdf40feed6 | Fix text processing in bazel build command.
The extraction of the build target was overfitted before, making build_cmd fail
in some environments. | [
{
"change_type": "MODIFY",
"old_path": "tools/bazel.mk",
"new_path": "tools/bazel.mk",
"diff": "@@ -152,8 +152,9 @@ build_cmd = docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) sh -o pipefai\nbuild_paths = $(build_cmd) 2>&1 \\\n| tee /proc/self/fd/2 \\\n- | grep -E \"^ bazel-bin/\" \\\n- | tr -d '\\r' \\\n+ | grep \" bazel-bin/\" \\\n+ | sed \"s/ /\\n/g\" \\\n+ | strings -n 10 \\\n| awk '{$$1=$$1};1' \\\n| xargs -n 1 -I {} sh -c \"$(1)\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix text processing in bazel build command.
The extraction of the build target was overfitted before, making build_cmd fail
in some environments.
PiperOrigin-RevId: 335916651 |
259,858 | 07.10.2020 12:06:20 | 25,200 | ecf9a7ef09bd5ef722a0b6438a8e9770ca904475 | Add precise synchronization to KVM.
By using TSC scaling as a hack, we can trick the kernel into setting an offset
of exactly zero. Huzzah! | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/BUILD",
"new_path": "pkg/sentry/platform/kvm/BUILD",
"diff": "@@ -79,6 +79,7 @@ go_test(\n\"//pkg/sentry/platform/kvm/testutil\",\n\"//pkg/sentry/platform/ring0\",\n\"//pkg/sentry/platform/ring0/pagetables\",\n+ \"//pkg/sentry/time\",\n\"//pkg/usermem\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_const.go",
"new_path": "pkg/sentry/platform/kvm/kvm_const.go",
"diff": "@@ -26,11 +26,14 @@ const (\n_KVM_RUN = 0xae80\n_KVM_NMI = 0xae9a\n_KVM_CHECK_EXTENSION = 0xae03\n+ _KVM_GET_TSC_KHZ = 0xaea3\n+ _KVM_SET_TSC_KHZ = 0xaea2\n_KVM_INTERRUPT = 0x4004ae86\n_KVM_SET_MSRS = 0x4008ae89\n_KVM_SET_USER_MEMORY_REGION = 0x4020ae46\n_KVM_SET_REGS = 0x4090ae82\n_KVM_SET_SREGS = 0x4138ae84\n+ _KVM_GET_MSRS = 0xc008ae88\n_KVM_GET_REGS = 0x8090ae81\n_KVM_GET_SREGS = 0x8138ae83\n_KVM_GET_SUPPORTED_CPUID = 0xc008ae05\n@@ -80,11 +83,14 @@ const (\n)\n// KVM hypercall list.\n+//\n// Canonical list of hypercalls supported.\nconst (\n// On amd64, it uses 'HLT' to leave the guest.\n+ //\n// Unlike amd64, arm64 can only uses mmio_exit/psci to leave the guest.\n- // _KVM_HYPERCALL_VMEXIT is only used on Arm64 for now.\n+ //\n+ // _KVM_HYPERCALL_VMEXIT is only used on arm64 for now.\n_KVM_HYPERCALL_VMEXIT int = iota\n_KVM_HYPERCALL_MAX\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_test.go",
"new_path": "pkg/sentry/platform/kvm/kvm_test.go",
"diff": "@@ -27,6 +27,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/platform/kvm/testutil\"\n\"gvisor.dev/gvisor/pkg/sentry/platform/ring0\"\n\"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables\"\n+ ktime \"gvisor.dev/gvisor/pkg/sentry/time\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n@@ -442,6 +443,22 @@ func TestWrongVCPU(t *testing.T) {\n})\n}\n+func TestRdtsc(t *testing.T) {\n+ var i int // Iteration count.\n+ kvmTest(t, nil, func(c *vCPU) bool {\n+ start := ktime.Rdtsc()\n+ bluepill(c)\n+ guest := ktime.Rdtsc()\n+ redpill()\n+ end := ktime.Rdtsc()\n+ if start > guest || guest > end {\n+ t.Errorf(\"inconsistent time: start=%d, guest=%d, end=%d\", start, guest, end)\n+ }\n+ i++\n+ return i < 100\n+ })\n+}\n+\nfunc BenchmarkApplicationSyscall(b *testing.B) {\nvar (\ni int // Iteration includes machine.Get() / machine.Put().\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"new_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"diff": "@@ -18,14 +18,17 @@ package kvm\nimport (\n\"fmt\"\n+ \"math/big\"\n\"reflect\"\n\"runtime/debug\"\n\"syscall\"\n+ \"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/platform\"\n\"gvisor.dev/gvisor/pkg/sentry/platform/ring0\"\n\"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables\"\n+ ktime \"gvisor.dev/gvisor/pkg/sentry/time\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n@@ -167,6 +170,133 @@ func (c *vCPU) initArchState() error {\nreturn c.setSystemTime()\n}\n+// bitsForScaling returns the bits available for storing the fraction component\n+// of the TSC scaling ratio. This allows us to replicate the (bad) math done by\n+// the kernel below in scaledTSC, and ensure we can compute an exact zero\n+// offset in setSystemTime.\n+//\n+// These constants correspond to kvm_tsc_scaling_ratio_frac_bits.\n+var bitsForScaling = func() int64 {\n+ fs := cpuid.HostFeatureSet()\n+ if fs.Intel() {\n+ return 48 // See vmx.c (kvm sources).\n+ } else if fs.AMD() {\n+ return 32 // See svm.c (svm sources).\n+ } else {\n+ return 63 // Unknown: theoretical maximum.\n+ }\n+}()\n+\n+// scaledTSC returns the host TSC scaled by the given frequency.\n+//\n+// This assumes a current frequency of 1. We require only the unitless ratio of\n+// rawFreq to some current frequency. See setSystemTime for context.\n+//\n+// The kernel math guarantees that all bits of the multiplication and division\n+// will be correctly preserved and applied. However, it is not possible to\n+// actually store the ratio correctly. So we need to use the same schema in\n+// order to calculate the scaled frequency and get the same result.\n+//\n+// We can assume that the current frequency is (1), so we are calculating a\n+// strict inverse of this value. This simplifies this function considerably.\n+//\n+// Roughly, the returned value \"scaledTSC\" will have:\n+// scaledTSC/hostTSC == 1/rawFreq\n+//\n+//go:nosplit\n+func scaledTSC(rawFreq uintptr) int64 {\n+ scale := int64(1 << bitsForScaling)\n+ ratio := big.NewInt(scale / int64(rawFreq))\n+ ratio.Mul(ratio, big.NewInt(int64(ktime.Rdtsc())))\n+ ratio.Div(ratio, big.NewInt(scale))\n+ return ratio.Int64()\n+}\n+\n+// setSystemTime sets the vCPU to the system time.\n+func (c *vCPU) setSystemTime() error {\n+ // First, scale down the clock frequency to the lowest value allowed by\n+ // the API itself. How low we can go depends on the underlying\n+ // hardware, but it is typically ~1/2^48 for Intel, ~1/2^32 for AMD.\n+ // Even the lower bound here will take a 4GHz frequency down to 1Hz,\n+ // meaning that everything should be able to handle a Khz setting of 1\n+ // with bits to spare.\n+ //\n+ // Note that reducing the clock does not typically require special\n+ // capabilities as it is emulated in KVM. We don't actually use this\n+ // capability, but it means that this method should be robust to\n+ // different hardware configurations.\n+ rawFreq, err := c.getTSCFreq()\n+ if err != nil {\n+ return c.setSystemTimeLegacy()\n+ }\n+ if err := c.setTSCFreq(1); err != nil {\n+ return c.setSystemTimeLegacy()\n+ }\n+\n+ // Always restore the original frequency.\n+ defer func() {\n+ if err := c.setTSCFreq(rawFreq); err != nil {\n+ panic(err.Error())\n+ }\n+ }()\n+\n+ // Attempt to set the system time in this compressed world. The\n+ // calculation for offset normally looks like:\n+ //\n+ // offset = target_tsc - kvm_scale_tsc(vcpu, rdtsc());\n+ //\n+ // So as long as the kvm_scale_tsc component is constant before and\n+ // after the call to set the TSC value (and it is passes as the\n+ // target_tsc), we will compute an offset value of zero.\n+ //\n+ // This is effectively cheating to make our \"setSystemTime\" call so\n+ // unbelievably, incredibly fast that we do it \"instantly\" and all the\n+ // calculations result in an offset of zero.\n+ lastTSC := scaledTSC(rawFreq)\n+ for {\n+ if err := c.setTSC(uint64(lastTSC)); err != nil {\n+ return err\n+ }\n+ nextTSC := scaledTSC(rawFreq)\n+ if lastTSC == nextTSC {\n+ return nil\n+ }\n+ lastTSC = nextTSC // Try again.\n+ }\n+}\n+\n+// setSystemTimeLegacy calibrates and sets an approximate system time.\n+func (c *vCPU) setSystemTimeLegacy() error {\n+ const minIterations = 10\n+ minimum := uint64(0)\n+ for iter := 0; ; iter++ {\n+ // Try to set the TSC to an estimate of where it will be\n+ // on the host during a \"fast\" system call iteration.\n+ start := uint64(ktime.Rdtsc())\n+ if err := c.setTSC(start + (minimum / 2)); err != nil {\n+ return err\n+ }\n+ // See if this is our new minimum call time. Note that this\n+ // serves two functions: one, we make sure that we are\n+ // accurately predicting the offset we need to set. Second, we\n+ // don't want to do the final set on a slow call, which could\n+ // produce a really bad result.\n+ end := uint64(ktime.Rdtsc())\n+ if end < start {\n+ continue // Totally bogus: unstable TSC?\n+ }\n+ current := end - start\n+ if current < minimum || iter == 0 {\n+ minimum = current // Set our new minimum.\n+ }\n+ // Is this past minIterations and within ~10% of minimum?\n+ upperThreshold := (((minimum << 3) + minimum) >> 3)\n+ if iter >= minIterations && current <= upperThreshold {\n+ return nil\n+ }\n+ }\n+}\n+\n// nonCanonical generates a canonical address return.\n//\n//go:nosplit\n@@ -347,19 +477,17 @@ func availableRegionsForSetMem() (phyRegions []physicalRegion) {\nreturn physicalRegions\n}\n-var execRegions []region\n-\n-func init() {\n+var execRegions = func() (regions []region) {\napplyVirtualRegions(func(vr virtualRegion) {\nif excludeVirtualRegion(vr) || vr.filename == \"[vsyscall]\" {\nreturn\n}\n-\nif vr.accessType.Execute {\n- execRegions = append(execRegions, vr.region)\n+ regions = append(regions, vr.region)\n}\n})\n-}\n+ return\n+}()\nfunc (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {\nfor _, r := range execRegions {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_amd64_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/machine_amd64_unsafe.go",
"diff": "@@ -23,7 +23,6 @@ import (\n\"unsafe\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/sentry/time\"\n)\n// loadSegments copies the current segments.\n@@ -61,77 +60,47 @@ func (c *vCPU) setCPUID() error {\nreturn nil\n}\n-// setSystemTime sets the TSC for the vCPU.\n+// getTSCFreq gets the TSC frequency.\n//\n-// This has to make the call many times in order to minimize the intrinsic\n-// error in the offset. Unfortunately KVM does not expose a relative offset via\n-// the API, so this is an approximation. We do this via an iterative algorithm.\n-// This has the advantage that it can generally deal with highly variable\n-// system call times and should converge on the correct offset.\n-func (c *vCPU) setSystemTime() error {\n- const (\n- _MSR_IA32_TSC = 0x00000010\n- calibrateTries = 10\n- )\n- registers := modelControlRegisters{\n- nmsrs: 1,\n+// If mustSucceed is true, then this function panics on error.\n+func (c *vCPU) getTSCFreq() (uintptr, error) {\n+ rawFreq, _, errno := syscall.RawSyscall(\n+ syscall.SYS_IOCTL,\n+ uintptr(c.fd),\n+ _KVM_GET_TSC_KHZ,\n+ 0 /* ignored */)\n+ if errno != 0 {\n+ return 0, errno\n}\n- registers.entries[0] = modelControlRegister{\n- index: _MSR_IA32_TSC,\n+ return rawFreq, nil\n}\n- target := uint64(^uint32(0))\n- for done := 0; done < calibrateTries; {\n- start := uint64(time.Rdtsc())\n- registers.entries[0].data = start + target\n+\n+// setTSCFreq sets the TSC frequency.\n+func (c *vCPU) setTSCFreq(freq uintptr) error {\nif _, _, errno := syscall.RawSyscall(\nsyscall.SYS_IOCTL,\nuintptr(c.fd),\n- _KVM_SET_MSRS,\n- uintptr(unsafe.Pointer(®isters))); errno != 0 {\n- return fmt.Errorf(\"error setting system time: %v\", errno)\n- }\n- // See if this is our new minimum call time. Note that this\n- // serves two functions: one, we make sure that we are\n- // accurately predicting the offset we need to set. Second, we\n- // don't want to do the final set on a slow call, which could\n- // produce a really bad result. So we only count attempts\n- // within +/- 6.25% of our minimum as an attempt.\n- end := uint64(time.Rdtsc())\n- if end < start {\n- continue // Totally bogus.\n- }\n- half := (end - start) / 2\n- if half < target {\n- target = half\n- }\n- if (half - target) < target/8 {\n- done++\n- }\n+ _KVM_SET_TSC_KHZ,\n+ freq /* khz */); errno != 0 {\n+ return fmt.Errorf(\"error setting TSC frequency: %v\", errno)\n}\nreturn nil\n}\n-// setSignalMask sets the vCPU signal mask.\n-//\n-// This must be called prior to running the vCPU.\n-func (c *vCPU) setSignalMask() error {\n- // The layout of this structure implies that it will not necessarily be\n- // the same layout chosen by the Go compiler. It gets fudged here.\n- var data struct {\n- length uint32\n- mask1 uint32\n- mask2 uint32\n- _ uint32\n- }\n- data.length = 8 // Fixed sigset size.\n- data.mask1 = ^uint32(bounceSignalMask & 0xffffffff)\n- data.mask2 = ^uint32(bounceSignalMask >> 32)\n+// setTSC sets the TSC value.\n+func (c *vCPU) setTSC(value uint64) error {\n+ const _MSR_IA32_TSC = 0x00000010\n+ registers := modelControlRegisters{\n+ nmsrs: 1,\n+ }\n+ registers.entries[0].index = _MSR_IA32_TSC\n+ registers.entries[0].data = value\nif _, _, errno := syscall.RawSyscall(\nsyscall.SYS_IOCTL,\nuintptr(c.fd),\n- _KVM_SET_SIGNAL_MASK,\n- uintptr(unsafe.Pointer(&data))); errno != 0 {\n- return fmt.Errorf(\"error setting signal mask: %v\", errno)\n+ _KVM_SET_MSRS,\n+ uintptr(unsafe.Pointer(®isters))); errno != 0 {\n+ return fmt.Errorf(\"error setting tsc: %v\", errno)\n}\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_arm64_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/machine_arm64_unsafe.go",
"diff": "@@ -191,42 +191,6 @@ func (c *vCPU) getOneRegister(reg *kvmOneReg) error {\nreturn nil\n}\n-// setCPUID sets the CPUID to be used by the guest.\n-func (c *vCPU) setCPUID() error {\n- return nil\n-}\n-\n-// setSystemTime sets the TSC for the vCPU.\n-func (c *vCPU) setSystemTime() error {\n- return nil\n-}\n-\n-// setSignalMask sets the vCPU signal mask.\n-//\n-// This must be called prior to running the vCPU.\n-func (c *vCPU) setSignalMask() error {\n- // The layout of this structure implies that it will not necessarily be\n- // the same layout chosen by the Go compiler. It gets fudged here.\n- var data struct {\n- length uint32\n- mask1 uint32\n- mask2 uint32\n- _ uint32\n- }\n- data.length = 8 // Fixed sigset size.\n- data.mask1 = ^uint32(bounceSignalMask & 0xffffffff)\n- data.mask2 = ^uint32(bounceSignalMask >> 32)\n- if _, _, errno := syscall.RawSyscall(\n- syscall.SYS_IOCTL,\n- uintptr(c.fd),\n- _KVM_SET_SIGNAL_MASK,\n- uintptr(unsafe.Pointer(&data))); errno != 0 {\n- return fmt.Errorf(\"error setting signal mask: %v\", errno)\n- }\n-\n- return nil\n-}\n-\n// SwitchToUser unpacks architectural-details.\nfunc (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) {\n// Check for canonical addresses.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"diff": "@@ -143,3 +143,29 @@ func (c *vCPU) waitUntilNot(state uint32) {\npanic(\"futex wait error\")\n}\n}\n+\n+// setSignalMask sets the vCPU signal mask.\n+//\n+// This must be called prior to running the vCPU.\n+func (c *vCPU) setSignalMask() error {\n+ // The layout of this structure implies that it will not necessarily be\n+ // the same layout chosen by the Go compiler. It gets fudged here.\n+ var data struct {\n+ length uint32\n+ mask1 uint32\n+ mask2 uint32\n+ _ uint32\n+ }\n+ data.length = 8 // Fixed sigset size.\n+ data.mask1 = ^uint32(bounceSignalMask & 0xffffffff)\n+ data.mask2 = ^uint32(bounceSignalMask >> 32)\n+ if _, _, errno := syscall.RawSyscall(\n+ syscall.SYS_IOCTL,\n+ uintptr(c.fd),\n+ _KVM_SET_SIGNAL_MASK,\n+ uintptr(unsafe.Pointer(&data))); errno != 0 {\n+ return fmt.Errorf(\"error setting signal mask: %v\", errno)\n+ }\n+\n+ return nil\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add precise synchronization to KVM.
By using TSC scaling as a hack, we can trick the kernel into setting an offset
of exactly zero. Huzzah!
PiperOrigin-RevId: 335922019 |
259,907 | 07.10.2020 12:30:47 | 25,200 | 85a58d110f13298cb2afcd18a79049125ea86b73 | [runtime-tests] Exclude failing test due to expired cert. | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/exclude/java11.csv",
"new_path": "test/runtimes/exclude/java11.csv",
"diff": "@@ -57,6 +57,7 @@ java/nio/channels/SocketChannel/SocketOptionTests.java,b/77965901,\njava/nio/channels/spi/SelectorProvider/inheritedChannel/InheritedChannelTest.java,,Fails in Docker\njava/rmi/activation/Activatable/extLoadedImpl/ext.sh,,\njava/rmi/transport/checkLeaseInfoLeak/CheckLeaseLeak.java,,\n+java/security/cert/PolicyNode/GetPolicyQualifiers.java,b/170263154,Kokoro executor cert expired\njava/text/Format/NumberFormat/CurrencyFormat.java,,Fails in Docker\njava/text/Format/NumberFormat/CurrencyFormat.java,,Fails in Docker\njava/util/Calendar/JapaneseEraNameTest.java,,\n"
}
] | Go | Apache License 2.0 | google/gvisor | [runtime-tests] Exclude failing test due to expired cert.
PiperOrigin-RevId: 335927821 |
259,951 | 08.10.2020 00:54:05 | 25,200 | 0c3134028d63774914f560d51588b11a3ecfed5e | Change IPv6 reassembly timeout to 60s
It was originally set to 30s for IPv6 (same as IPv4) but this is not
what RFC 8200 prescibes. Linux also defaults to 60s [1].
[1] | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/fragmentation.go",
"new_path": "pkg/tcpip/network/fragmentation/fragmentation.go",
"diff": "@@ -29,9 +29,6 @@ import (\n)\nconst (\n- // DefaultReassembleTimeout is based on the linux stack: net.ipv4.ipfrag_time.\n- DefaultReassembleTimeout = 30 * time.Second\n-\n// HighFragThreshold is the threshold at which we start trimming old\n// fragmented packets. Linux uses a default value of 4 MB. See\n// net.ipv4.ipfrag_high_thresh for more information.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/fragmentation_test.go",
"new_path": "pkg/tcpip/network/fragmentation/fragmentation_test.go",
"diff": "@@ -26,6 +26,10 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/network/testutil\"\n)\n+// reassembleTimeout is dummy timeout used for testing, where the clock never\n+// advances.\n+const reassembleTimeout = 1\n+\n// vv is a helper to build VectorisedView from different strings.\nfunc vv(size int, pieces ...string) buffer.VectorisedView {\nviews := make([]buffer.View, len(pieces))\n@@ -98,7 +102,7 @@ var processTestCases = []struct {\nfunc TestFragmentationProcess(t *testing.T) {\nfor _, c := range processTestCases {\nt.Run(c.comment, func(t *testing.T) {\n- f := NewFragmentation(minBlockSize, 1024, 512, DefaultReassembleTimeout, &faketime.NullClock{})\n+ f := NewFragmentation(minBlockSize, 1024, 512, reassembleTimeout, &faketime.NullClock{})\nfirstFragmentProto := c.in[0].proto\nfor i, in := range c.in {\nvv, proto, done, err := f.Process(in.id, in.first, in.last, in.more, in.proto, in.vv)\n@@ -253,7 +257,7 @@ func TestReassemblingTimeout(t *testing.T) {\n}\nfunc TestMemoryLimits(t *testing.T) {\n- f := NewFragmentation(minBlockSize, 3, 1, DefaultReassembleTimeout, &faketime.NullClock{})\n+ f := NewFragmentation(minBlockSize, 3, 1, reassembleTimeout, &faketime.NullClock{})\n// Send first fragment with id = 0.\nf.Process(FragmentID{ID: 0}, 0, 0, true, 0xFF, vv(1, \"0\"))\n// Send first fragment with id = 1.\n@@ -277,7 +281,7 @@ func TestMemoryLimits(t *testing.T) {\n}\nfunc TestMemoryLimitsIgnoresDuplicates(t *testing.T) {\n- f := NewFragmentation(minBlockSize, 1, 0, DefaultReassembleTimeout, &faketime.NullClock{})\n+ f := NewFragmentation(minBlockSize, 1, 0, reassembleTimeout, &faketime.NullClock{})\n// Send first fragment with id = 0.\nf.Process(FragmentID{}, 0, 0, true, 0xFF, vv(1, \"0\"))\n// Send the same packet again.\n@@ -372,7 +376,7 @@ func TestErrors(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, DefaultReassembleTimeout, &faketime.NullClock{})\n+ f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{})\n_, _, done, err := f.Process(FragmentID{}, test.first, test.last, test.more, 0, vv(len(test.data), test.data))\nif !errors.Is(err, test.err) {\nt.Errorf(\"got Process(_, %d, %d, %t, _, %q) = (_, _, _, %v), want = (_, _, _, %v)\", test.first, test.last, test.more, test.data, err, test.err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -18,6 +18,7 @@ package ipv4\nimport (\n\"fmt\"\n\"sync/atomic\"\n+ \"time\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -30,6 +31,15 @@ import (\n)\nconst (\n+ // As per RFC 791 section 3.2:\n+ // The current recommendation for the initial timer setting is 15 seconds.\n+ // This may be changed as experience with this protocol accumulates.\n+ //\n+ // Considering that it is an old recommendation, we use the same reassembly\n+ // timeout that linux defines, which is 30 seconds:\n+ // https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/include/net/ip.h#L138\n+ reassembleTimeout = 30 * time.Second\n+\n// ProtocolNumber is the ipv4 protocol number.\nProtocolNumber = header.IPv4ProtocolNumber\n@@ -785,7 +795,7 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol {\nids: ids,\nhashIV: hashIV,\ndefaultTTL: DefaultTTL,\n- fragmentation: fragmentation.NewFragmentation(fragmentblockSize, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, fragmentation.DefaultReassembleTimeout, s.Clock()),\n+ fragmentation: fragmentation.NewFragmentation(fragmentblockSize, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, reassembleTimeout, s.Clock()),\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"hash/fnv\"\n\"sort\"\n\"sync/atomic\"\n+ \"time\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -33,6 +34,15 @@ import (\n)\nconst (\n+ // As per RFC 8200 section 4.5:\n+ // If insufficient fragments are received to complete reassembly of a packet\n+ // within 60 seconds of the reception of the first-arriving fragment of that\n+ // packet, reassembly of that packet must be abandoned.\n+ //\n+ // Linux also uses 60 seconds for reassembly timeout:\n+ // https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/include/net/ipv6.h#L456\n+ reassembleTimeout = 60 * time.Second\n+\n// ProtocolNumber is the ipv6 protocol number.\nProtocolNumber = header.IPv6ProtocolNumber\n@@ -1459,7 +1469,7 @@ func NewProtocolWithOptions(opts Options) stack.NetworkProtocolFactory {\nreturn func(s *stack.Stack) stack.NetworkProtocol {\np := &protocol{\nstack: s,\n- fragmentation: fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, fragmentation.DefaultReassembleTimeout, s.Clock()),\n+ fragmentation: fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, reassembleTimeout, s.Clock()),\nids: ids,\nhashIV: hashIV,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Change IPv6 reassembly timeout to 60s
It was originally set to 30s for IPv6 (same as IPv4) but this is not
what RFC 8200 prescibes. Linux also defaults to 60s [1].
[1] https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/include/net/ipv6.h#L456
PiperOrigin-RevId: 336034636 |
260,021 | 22.09.2020 16:45:10 | -28,800 | 190cf30e41cdd7b23d53d86024afeabbcd92a0db | arm64: the mair_el1 value is wrong
the correct value needed is 0xbbff440c0400 but the const
defined is 0x000000000000ffc0 due to the operator error
in _MT_EL1_INIT, both kernel and user space memory
attribute should be Normal memory not DEVICE_nGnRE | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_const_arm64.go",
"new_path": "pkg/sentry/platform/kvm/kvm_const_arm64.go",
"diff": "@@ -107,7 +107,14 @@ const (\n_MT_NORMAL_NC = 3\n_MT_NORMAL = 4\n_MT_NORMAL_WT = 5\n- _MT_EL1_INIT = (0 << _MT_DEVICE_nGnRnE) | (0x4 << _MT_DEVICE_nGnRE * 8) | (0xc << _MT_DEVICE_GRE * 8) | (0x44 << _MT_NORMAL_NC * 8) | (0xff << _MT_NORMAL * 8) | (0xbb << _MT_NORMAL_WT * 8)\n+ _MT_ATTR_DEVICE_nGnRnE = 0x00\n+ _MT_ATTR_DEVICE_nGnRE = 0x04\n+ _MT_ATTR_DEVICE_GRE = 0x0c\n+ _MT_ATTR_NORMAL_NC = 0x44\n+ _MT_ATTR_NORMAL_WT = 0xbb\n+ _MT_ATTR_NORMAL = 0xff\n+ _MT_ATTR_MASK = 0xff\n+ _MT_EL1_INIT = (_MT_ATTR_DEVICE_nGnRnE << (_MT_DEVICE_nGnRnE * 8)) | (_MT_ATTR_DEVICE_nGnRE << (_MT_DEVICE_nGnRE * 8)) | (_MT_ATTR_DEVICE_GRE << (_MT_DEVICE_GRE * 8)) | (_MT_ATTR_NORMAL_NC << (_MT_NORMAL_NC * 8)) | (_MT_ATTR_NORMAL << (_MT_NORMAL * 8)) | (_MT_ATTR_NORMAL_WT << (_MT_NORMAL_WT * 8))\n)\nconst (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/pagetables/pagetables_aarch64.go",
"new_path": "pkg/sentry/platform/ring0/pagetables/pagetables_aarch64.go",
"diff": "@@ -78,7 +78,7 @@ const (\nconst (\nexecuteDisable = xn\n- optionMask = 0xfff | 0xfff<<48\n+ optionMask = 0xfff | 0xffff<<48\nprotDefault = accessed | shared\n)\n@@ -188,7 +188,7 @@ func (p *PTE) Set(addr uintptr, opts MapOpts) {\nv |= mtNormal\n} else {\nv = v &^ user\n- v |= mtDevicenGnRE // Strong order for the addresses with ring0.KernelStartAddress.\n+ v |= mtNormal\n}\natomic.StoreUintptr((*uintptr)(p), v)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | arm64: the mair_el1 value is wrong
the correct value needed is 0xbbff440c0400 but the const
defined is 0x000000000000ffc0 due to the operator error
in _MT_EL1_INIT, both kernel and user space memory
attribute should be Normal memory not DEVICE_nGnRE
Signed-off-by: Min Le <[email protected]> |
259,858 | 08.10.2020 10:03:31 | 25,200 | 06200cb5cfd44dbd3edc221fa3f6c1a716a8c4c6 | Improve multi-arch support.
This change allows Dockerfiles named Dockerfile.$(ARCH) and makes list-images
list only supported architectures.
Updates | [
{
"change_type": "MODIFY",
"old_path": "images/Makefile",
"new_path": "images/Makefile",
"diff": "@@ -23,7 +23,7 @@ ARCH := $(shell uname -m)\n# tests are using locally-defined images (that are consistent and idempotent).\nREMOTE_IMAGE_PREFIX ?= gcr.io/gvisor-presubmit\nLOCAL_IMAGE_PREFIX ?= gvisor.dev/images\n-ALL_IMAGES := $(subst /,_,$(subst ./,,$(shell find . -name Dockerfile -exec dirname {} \\;)))\n+ALL_IMAGES := $(subst /,_,$(subst ./,,$(shell find . -name Dockerfile -o -name Dockerfile.$(ARCH) | xargs -n 1 dirname | uniq)))\nifneq ($(ARCH),$(shell uname -m))\nDOCKER_PLATFORM_ARGS := --platform=$(ARCH)\nelse\n@@ -51,6 +51,7 @@ load-%-images:\n# ensuring that images will always be sourced using the local files if there\n# are changes.\npath = $(subst _,/,$(1))\n+dockerfile = $$(if [ -f \"$(call path,$(1))/Dockerfile.$(ARCH)\" ]; then echo Dockerfile.$(ARCH); else echo Dockerfile; fi)\ntag = $(shell find $(call path,$(1)) -type f -print | sort | xargs -n 1 sha256sum | sha256sum - | cut -c 1-16)\nremote_image = $(REMOTE_IMAGE_PREFIX)/$(subst _,/,$(1))_$(ARCH):$(call tag,$(1))\nlocal_image = $(LOCAL_IMAGE_PREFIX)/$(subst _,/,$(1))\n@@ -59,11 +60,17 @@ local_image = $(LOCAL_IMAGE_PREFIX)/$(subst _,/,$(1))\n# we need to explicitly repull the base layer in order to ensure that the\n# architecture is correct. Note that we use the term \"rebuild\" here to avoid\n# conflicting with the bazel \"build\" terminology, which is used elsewhere.\n-rebuild-%: FROM=$(shell grep FROM $(call path,$*)/Dockerfile | cut -d' ' -f2)\n+rebuild-%: FROM=$(shell grep FROM \"$(call path,$*)/$(call dockerfile,$*)\" | cut -d' ' -f2)\nrebuild-%: register-cross\n+ @if ! [ -f \"$(call path,$*)/$(call dockerfile,$*)\" ]; then \\\n+ (echo \"ERROR: Dockerfile for $* not found (is it available for $(ARCH)?).\" >&2 && exit 1); \\\n+ fi\n$(foreach IMAGE,$(FROM),docker pull $(DOCKER_PLATFORM_ARGS) $(IMAGE) &&) \\\nT=$$(mktemp -d) && cp -a $(call path,$*)/* $$T && \\\n- docker build $(DOCKER_PLATFORM_ARGS) -t $(call remote_image,$*) $$T && \\\n+ docker build $(DOCKER_PLATFORM_ARGS) \\\n+ -f \"$$T/$(call dockerfile,$*)\" \\\n+ -t \"$(call remote_image,$*)\" \\\n+ $$T && \\\nrm -rf $$T\n# pull will check the \"remote\" image and pull if necessary. If the remote image\n"
},
{
"change_type": "MODIFY",
"old_path": "images/basic/mysql/Dockerfile",
"new_path": "images/basic/mysql/Dockerfile",
"diff": "-FROM mysql:8.0.19\n+FROM mysql/mysql-server:8.0.19\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/basic/tomcat/Dockerfile.aarch64",
"diff": "+FROM arm64v8/tomcat:8.0\n"
},
{
"change_type": "RENAME",
"old_path": "images/jekyll/Dockerfile",
"new_path": "images/jekyll/Dockerfile.x86_64",
"diff": ""
}
] | Go | Apache License 2.0 | google/gvisor | Improve multi-arch support.
This change allows Dockerfiles named Dockerfile.$(ARCH) and makes list-images
list only supported architectures.
Updates #2847
PiperOrigin-RevId: 336108293 |
259,858 | 08.10.2020 11:22:15 | 25,200 | 5124ce579d34079d2aefd45b486e0127ed6c2917 | Minor nogo cleanup. | [
{
"change_type": "MODIFY",
"old_path": "tools/checkescape/checkescape.go",
"new_path": "tools/checkescape/checkescape.go",
"diff": "@@ -102,8 +102,8 @@ var (\n// This may be set instead of Binary.\nReader io.Reader\n- // Tool is the tool used to dump a binary.\n- tool = flag.String(\"dump_tool\", \"\", \"tool used to dump a binary\")\n+ // objdumpTool is the tool used to dump a binary.\n+ objdumpTool = flag.String(\"objdump_tool\", \"\", \"tool used to dump a binary\")\n)\n// EscapeReason is an escape reason.\n@@ -387,7 +387,7 @@ func loadObjdump() (map[string][]string, error) {\n}\n// Construct our command.\n- cmd := exec.Command(*tool, args...)\n+ cmd := exec.Command(*objdumpTool, args...)\ncmd.Stdin = stdin\ncmd.Stderr = os.Stderr\nout, err := cmd.StdoutPipe()\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -72,7 +72,7 @@ def go_binary(name, nogo = True, pure = False, static = False, x_defs = None, **\n)\nnogo_test(\nname = name + \"_nogo\",\n- deps = [\":\" + name + \"_nogo_library\"],\n+ library = \":\" + name + \"_nogo_library\",\n)\ndef calculate_sets(srcs):\n@@ -203,7 +203,7 @@ def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = F\nif nogo:\nnogo_test(\nname = name + \"_nogo\",\n- deps = [\":\" + name],\n+ library = \":\" + name,\n)\nif marshal:\n@@ -239,7 +239,7 @@ def go_test(name, nogo = True, **kwargs):\nif nogo:\nnogo_test(\nname = name + \"_nogo\",\n- deps = [\":\" + name],\n+ library = \":\" + name,\n)\ndef proto_library(name, srcs, deps = None, has_services = 0, **kwargs):\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/BUILD",
"new_path": "tools/nogo/BUILD",
"diff": "load(\"//tools:defs.bzl\", \"bzl_library\", \"go_library\")\n-load(\"//tools/nogo:defs.bzl\", \"nogo_dump_tool\", \"nogo_stdlib\")\n+load(\"//tools/nogo:defs.bzl\", \"nogo_objdump_tool\", \"nogo_stdlib\")\npackage(licenses = [\"notice\"])\n-nogo_dump_tool(\n- name = \"dump_tool\",\n+nogo_objdump_tool(\n+ name = \"objdump_tool\",\nvisibility = [\"//visibility:public\"],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "load(\"//tools/bazeldefs:defs.bzl\", \"go_context\", \"go_importpath\", \"go_rule\", \"go_test_library\")\n-def _nogo_dump_tool_impl(ctx):\n- # Extract the Go context.\n+def _nogo_objdump_tool_impl(ctx):\ngo_ctx = go_context(ctx)\n# Construct the magic dump command.\n@@ -40,9 +39,9 @@ def _nogo_dump_tool_impl(ctx):\nexecutable = dumper,\n)]\n-nogo_dump_tool = go_rule(\n+nogo_objdump_tool = go_rule(\nrule,\n- implementation = _nogo_dump_tool_impl,\n+ implementation = _nogo_objdump_tool_impl,\n)\n# NogoStdlibInfo is the set of standard library facts.\n@@ -55,7 +54,6 @@ NogoStdlibInfo = provider(\n)\ndef _nogo_stdlib_impl(ctx):\n- # Extract the Go context.\ngo_ctx = go_context(ctx)\n# Build the standard library facts.\n@@ -72,12 +70,12 @@ def _nogo_stdlib_impl(ctx):\nctx.actions.run(\ninputs = [config_file] + go_ctx.stdlib_srcs,\noutputs = [facts, findings],\n- tools = depset(go_ctx.runfiles.to_list() + ctx.files._dump_tool),\n+ tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),\nexecutable = ctx.files._nogo[0],\nmnemonic = \"GoStandardLibraryAnalysis\",\nprogress_message = \"Analyzing Go Standard Library\",\narguments = go_ctx.nogo_args + [\n- \"-dump_tool=%s\" % ctx.files._dump_tool[0].path,\n+ \"-objdump_tool=%s\" % ctx.files._objdump_tool[0].path,\n\"-stdlib=%s\" % config_file.path,\n\"-findings=%s\" % findings.path,\n\"-facts=%s\" % facts.path,\n@@ -97,8 +95,8 @@ nogo_stdlib = go_rule(\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo/check:check\",\n),\n- \"_dump_tool\": attr.label(\n- default = \"//tools/nogo:dump_tool\",\n+ \"_objdump_tool\": attr.label(\n+ default = \"//tools/nogo:objdump_tool\",\n),\n},\n)\n@@ -121,6 +119,8 @@ NogoInfo = provider(\n)\ndef _nogo_aspect_impl(target, ctx):\n+ go_ctx = go_context(ctx)\n+\n# If this is a nogo rule itself (and not the shadow of a go_library or\n# go_binary rule created by such a rule), then we simply return nothing.\n# All work is done in the shadow properties for go rules. For a proto\n@@ -135,9 +135,6 @@ def _nogo_aspect_impl(target, ctx):\nelse:\nreturn [NogoInfo()]\n- # Extract the Go context.\n- go_ctx = go_context(ctx)\n-\n# If we're using the \"library\" attribute, then we need to aggregate the\n# original library sources and dependencies into this target to perform\n# proper type analysis.\n@@ -227,13 +224,13 @@ def _nogo_aspect_impl(target, ctx):\nctx.actions.run(\ninputs = inputs,\noutputs = [facts, findings, escapes],\n- tools = depset(go_ctx.runfiles.to_list() + ctx.files._dump_tool),\n+ tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),\nexecutable = ctx.files._nogo[0],\nmnemonic = \"GoStaticAnalysis\",\nprogress_message = \"Analyzing %s\" % target.label,\narguments = go_ctx.nogo_args + [\n\"-binary=%s\" % target_objfile.path,\n- \"-dump_tool=%s\" % ctx.files._dump_tool[0].path,\n+ \"-objdump_tool=%s\" % ctx.files._objdump_tool[0].path,\n\"-package=%s\" % config_file.path,\n\"-findings=%s\" % findings.path,\n\"-facts=%s\" % facts.path,\n@@ -271,7 +268,7 @@ nogo_aspect = go_rule(\nattrs = {\n\"_nogo\": attr.label(default = \"//tools/nogo/check:check\"),\n\"_nogo_stdlib\": attr.label(default = \"//tools/nogo:stdlib\"),\n- \"_dump_tool\": attr.label(default = \"//tools/nogo:dump_tool\"),\n+ \"_objdump_tool\": attr.label(default = \"//tools/nogo:objdump_tool\"),\n},\n)\n@@ -314,15 +311,17 @@ def _nogo_test_impl(ctx):\n_nogo_test = rule(\nimplementation = _nogo_test_impl,\nattrs = {\n+ # deps should have only a single element.\n\"deps\": attr.label_list(aspects = [nogo_aspect]),\n},\ntest = True,\n)\n-def nogo_test(name, **kwargs):\n+def nogo_test(name, library, **kwargs):\ntags = kwargs.pop(\"tags\", []) + [\"nogo\"]\n_nogo_test(\nname = name,\n+ deps = [library],\ntags = tags,\n**kwargs\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Minor nogo cleanup.
PiperOrigin-RevId: 336126583 |
260,004 | 08.10.2020 14:33:58 | 25,200 | 40269d0c24d1ea9b040a8326c9fa01b03477410a | Send unicast probes when link address is known
When the neighbor table already has link address for a neighbor but is
trying to confirm reachability, it may send unicast probes to the
neighbor. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -646,18 +646,21 @@ func (*protocol) LinkAddressProtocol() tcpip.NetworkProtocolNumber {\n// LinkAddressRequest implements stack.LinkAddressResolver.\nfunc (*protocol) LinkAddressRequest(addr, localAddr tcpip.Address, remoteLinkAddr tcpip.LinkAddress, linkEP stack.LinkEndpoint) *tcpip.Error {\n- snaddr := header.SolicitedNodeAddr(addr)\n-\n// TODO(b/148672031): Use stack.FindRoute instead of manually creating the\n// route here. Note, we would need the nicID to do this properly so the right\n// NIC (associated to linkEP) is used to send the NDP NS message.\n- r := &stack.Route{\n+ r := stack.Route{\nLocalAddress: localAddr,\n- RemoteAddress: snaddr,\n+ RemoteAddress: addr,\nRemoteLinkAddress: remoteLinkAddr,\n}\n+\n+ // If a remote address is not already known, then send a multicast\n+ // solicitation since multicast addresses have a static mapping to link\n+ // addresses.\nif len(r.RemoteLinkAddress) == 0 {\n- r.RemoteLinkAddress = header.EthernetAddressFromMulticastIPv6Address(snaddr)\n+ r.RemoteAddress = header.SolicitedNodeAddr(addr)\n+ r.RemoteLinkAddress = header.EthernetAddressFromMulticastIPv6Address(r.RemoteAddress)\n}\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n@@ -683,7 +686,7 @@ func (*protocol) LinkAddressRequest(addr, localAddr tcpip.Address, remoteLinkAdd\n})\n// TODO(stijlist): count this in ICMP stats.\n- return linkEP.WritePacket(r, nil /* gso */, ProtocolNumber, pkt)\n+ return linkEP.WritePacket(&r, nil /* gso */, ProtocolNumber, pkt)\n}\n// ResolveStaticAddress implements stack.LinkAddressResolver.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"new_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/checker\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/channel\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/sniffer\"\n@@ -1225,17 +1226,20 @@ func TestLinkAddressRequest(t *testing.T) {\ntests := []struct {\nname string\nremoteLinkAddr tcpip.LinkAddress\n- expectLinkAddr tcpip.LinkAddress\n+ expectedLinkAddr tcpip.LinkAddress\n+ expectedAddr tcpip.Address\n}{\n{\nname: \"Unicast\",\nremoteLinkAddr: linkAddr1,\n- expectLinkAddr: linkAddr1,\n+ expectedLinkAddr: linkAddr1,\n+ expectedAddr: lladdr0,\n},\n{\nname: \"Multicast\",\nremoteLinkAddr: \"\",\n- expectLinkAddr: mcaddr,\n+ expectedLinkAddr: mcaddr,\n+ expectedAddr: snaddr,\n},\n}\n@@ -1258,9 +1262,22 @@ func TestLinkAddressRequest(t *testing.T) {\nif !ok {\nt.Fatal(\"expected to send a link address request\")\n}\n-\n- if got, want := pkt.Route.RemoteLinkAddress, test.expectLinkAddr; got != want {\n- t.Errorf(\"got pkt.Route.RemoteLinkAddress = %s, want = %s\", got, want)\n+ if pkt.Route.RemoteLinkAddress != test.expectedLinkAddr {\n+ t.Errorf(\"got pkt.Route.RemoteLinkAddress = %s, want = %s\", pkt.Route.RemoteLinkAddress, test.expectedLinkAddr)\n+ }\n+ if pkt.Route.RemoteAddress != test.expectedAddr {\n+ t.Errorf(\"got pkt.Route.RemoteAddress = %s, want = %s\", pkt.Route.RemoteAddress, test.expectedAddr)\n+ }\n+ if pkt.Route.LocalAddress != lladdr1 {\n+ t.Errorf(\"got pkt.Route.LocalAddress = %s, want = %s\", pkt.Route.LocalAddress, lladdr1)\n}\n+ checker.IPv6(t, stack.PayloadSince(pkt.Pkt.NetworkHeader()),\n+ checker.SrcAddr(lladdr1),\n+ checker.DstAddr(test.expectedAddr),\n+ checker.TTL(header.NDPHopLimit),\n+ checker.NDPNS(\n+ checker.NDPNSTargetAddress(lladdr0),\n+ checker.NDPNSOptions([]header.NDPOption{header.NDPSourceLinkLayerAddressOption(linkAddr0)}),\n+ ))\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Send unicast probes when link address is known
When the neighbor table already has link address for a neighbor but is
trying to confirm reachability, it may send unicast probes to the
neighbor.
PiperOrigin-RevId: 336166711 |
259,885 | 08.10.2020 16:21:14 | 25,200 | 6bad4851d4a04b5f56ade1f8ba68bd9c16471533 | Implement MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ.
cf. "rseq/membarrier: Add MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ" | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/membarrier.go",
"new_path": "pkg/abi/linux/membarrier.go",
"diff": "package linux\n-// membarrier(2) commands, from include/uapi/linux/membarrier.h\n+// membarrier(2) commands, from include/uapi/linux/membarrier.h.\nconst (\nMEMBARRIER_CMD_QUERY = 0\nMEMBARRIER_CMD_GLOBAL = (1 << 0)\n@@ -24,4 +24,11 @@ const (\nMEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4)\nMEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5)\nMEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6)\n+ MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = (1 << 7)\n+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = (1 << 8)\n+)\n+\n+// membarrier(2) flags, from include/uapi/linux/membarrier.h.\n+const (\n+ MEMBARRIER_CMD_FLAG_CPU = (1 << 0)\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/mm.go",
"new_path": "pkg/sentry/mm/mm.go",
"diff": "@@ -243,6 +243,12 @@ type MemoryManager struct {\n//\n// membarrierPrivateEnabled is accessed using atomic memory operations.\nmembarrierPrivateEnabled uint32\n+\n+ // membarrierRSeqEnabled is non-zero if EnableMembarrierRSeq has previously\n+ // been called.\n+ //\n+ // membarrierRSeqEnabled is accessed using atomic memory operations.\n+ membarrierRSeqEnabled uint32\n}\n// vma represents a virtual memory area.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/syscalls.go",
"new_path": "pkg/sentry/mm/syscalls.go",
"diff": "@@ -1287,3 +1287,15 @@ func (mm *MemoryManager) EnableMembarrierPrivate() {\nfunc (mm *MemoryManager) IsMembarrierPrivateEnabled() bool {\nreturn atomic.LoadUint32(&mm.membarrierPrivateEnabled) != 0\n}\n+\n+// EnableMembarrierRSeq causes future calls to IsMembarrierRSeqEnabled to\n+// return true.\n+func (mm *MemoryManager) EnableMembarrierRSeq() {\n+ atomic.StoreUint32(&mm.membarrierRSeqEnabled, 1)\n+}\n+\n+// IsMembarrierRSeqEnabled returns true if mm.EnableMembarrierRSeq() has\n+// previously been called.\n+func (mm *MemoryManager) IsMembarrierRSeqEnabled() bool {\n+ return atomic.LoadUint32(&mm.membarrierRSeqEnabled) != 0\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_membarrier.go",
"new_path": "pkg/sentry/syscalls/linux/sys_membarrier.go",
"diff": "@@ -24,47 +24,80 @@ import (\n// Membarrier implements syscall membarrier(2).\nfunc Membarrier(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\ncmd := args[0].Int()\n- flags := args[1].Int()\n-\n- p := t.Kernel().Platform\n- if !p.HaveGlobalMemoryBarrier() {\n- // Event for applications that want membarrier on a configuration that\n- // doesn't support them.\n- t.Kernel().EmitUnimplementedEvent(t)\n- return 0, nil, syserror.ENOSYS\n- }\n+ flags := args[1].Uint()\n+ switch cmd {\n+ case linux.MEMBARRIER_CMD_QUERY:\nif flags != 0 {\nreturn 0, nil, syserror.EINVAL\n}\n-\n- switch cmd {\n- case linux.MEMBARRIER_CMD_QUERY:\n- const supportedCommands = linux.MEMBARRIER_CMD_GLOBAL |\n+ var supportedCommands uintptr\n+ if t.Kernel().Platform.HaveGlobalMemoryBarrier() {\n+ supportedCommands |= linux.MEMBARRIER_CMD_GLOBAL |\nlinux.MEMBARRIER_CMD_GLOBAL_EXPEDITED |\nlinux.MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED |\nlinux.MEMBARRIER_CMD_PRIVATE_EXPEDITED |\nlinux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED\n+ }\n+ if t.RSeqAvailable() {\n+ supportedCommands |= linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ |\n+ linux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ\n+ }\nreturn supportedCommands, nil, nil\n- case linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED:\n- if !t.MemoryManager().IsMembarrierPrivateEnabled() {\n+ case linux.MEMBARRIER_CMD_GLOBAL, linux.MEMBARRIER_CMD_GLOBAL_EXPEDITED, linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED:\n+ if flags != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if !t.Kernel().Platform.HaveGlobalMemoryBarrier() {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if cmd == linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED && !t.MemoryManager().IsMembarrierPrivateEnabled() {\nreturn 0, nil, syserror.EPERM\n}\n- fallthrough\n- case linux.MEMBARRIER_CMD_GLOBAL, linux.MEMBARRIER_CMD_GLOBAL_EXPEDITED:\n- return 0, nil, p.GlobalMemoryBarrier()\n+ return 0, nil, t.Kernel().Platform.GlobalMemoryBarrier()\ncase linux.MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:\n+ if flags != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if !t.Kernel().Platform.HaveGlobalMemoryBarrier() {\n+ return 0, nil, syserror.EINVAL\n+ }\n// no-op\nreturn 0, nil, nil\ncase linux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:\n+ if flags != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if !t.Kernel().Platform.HaveGlobalMemoryBarrier() {\n+ return 0, nil, syserror.EINVAL\n+ }\nt.MemoryManager().EnableMembarrierPrivate()\nreturn 0, nil, nil\n- case linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE, linux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:\n- // We're aware of these, but they aren't implemented since no platform\n- // supports them yet.\n- t.Kernel().EmitUnimplementedEvent(t)\n- fallthrough\n+ case linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:\n+ if flags&^linux.MEMBARRIER_CMD_FLAG_CPU != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if !t.RSeqAvailable() {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if !t.MemoryManager().IsMembarrierRSeqEnabled() {\n+ return 0, nil, syserror.EPERM\n+ }\n+ // MEMBARRIER_CMD_FLAG_CPU and cpu_id are ignored since we don't have\n+ // the ability to preempt specific CPUs.\n+ return 0, nil, t.Kernel().Platform.PreemptAllCPUs()\n+ case linux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:\n+ if flags != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if !t.RSeqAvailable() {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ t.MemoryManager().EnableMembarrierRSeq()\n+ return 0, nil, nil\ndefault:\n+ // Probably a command we don't implement.\n+ t.Kernel().EmitUnimplementedEvent(t)\nreturn 0, nil, syserror.EINVAL\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ.
cf. 2a36ab717e8f "rseq/membarrier: Add MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ"
PiperOrigin-RevId: 336186795 |
260,004 | 08.10.2020 17:32:41 | 25,200 | 07b1d7413e8b648b85fa9276a516732dd93276b4 | Only block resolution when NUD is incomplete
When a completed entry exists for a neighbor, there is no need to block
while reachability is (re)confirmed. The stack should continue to use
the neighbor's link address while NUD is performed.
Test: stack_test.TestNeighborCacheReplace | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/neighbor_cache.go",
"new_path": "pkg/tcpip/stack/neighbor_cache.go",
"diff": "@@ -131,10 +131,17 @@ func (n *neighborCache) entry(remoteAddr, localAddr tcpip.Address, linkRes LinkA\ndefer entry.mu.Unlock()\nswitch s := entry.neigh.State; s {\n- case Reachable, Static:\n+ case Stale:\n+ entry.handlePacketQueuedLocked()\n+ fallthrough\n+ case Reachable, Static, Delay, Probe:\n+ // As per RFC 4861 section 7.3.3:\n+ // \"Neighbor Unreachability Detection operates in parallel with the sending\n+ // of packets to a neighbor. While reasserting a neighbor's reachability,\n+ // a node continues sending packets to that neighbor using the cached\n+ // link-layer address.\"\nreturn entry.neigh, nil, nil\n-\n- case Unknown, Incomplete, Stale, Delay, Probe:\n+ case Unknown, Incomplete:\nentry.addWakerLocked(w)\nif entry.done == nil {\n@@ -147,10 +154,8 @@ func (n *neighborCache) entry(remoteAddr, localAddr tcpip.Address, linkRes LinkA\nentry.handlePacketQueuedLocked()\nreturn entry.neigh, entry.done, tcpip.ErrWouldBlock\n-\ncase Failed:\nreturn entry.neigh, nil, tcpip.ErrNoLinkAddress\n-\ndefault:\npanic(fmt.Sprintf(\"Invalid cache entry state: %s\", s))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/neighbor_cache_test.go",
"new_path": "pkg/tcpip/stack/neighbor_cache_test.go",
"diff": "@@ -1500,6 +1500,7 @@ func TestNeighborCacheReplace(t *testing.T) {\n}\n// Verify the entry exists\n+ {\ne, doneCh, err := neigh.entry(entry.Addr, entry.LocalAddr, linkRes, nil)\nif err != nil {\nt.Errorf(\"unexpected error from neigh.entry(%s, %s, _, nil): %s\", entry.Addr, entry.LocalAddr, err)\n@@ -1519,6 +1520,7 @@ func TestNeighborCacheReplace(t *testing.T) {\nif diff := cmp.Diff(e, want, entryDiffOpts()...); diff != \"\" {\nt.Errorf(\"neigh.entry(%s, %s, _, nil) mismatch (-got, +want):\\n%s\", entry.Addr, entry.LinkAddr, diff)\n}\n+ }\n// Notify of a link address change\nvar updatedLinkAddr tcpip.LinkAddress\n@@ -1536,28 +1538,34 @@ func TestNeighborCacheReplace(t *testing.T) {\nIsRouter: false,\n})\n- // Requesting the entry again should start address resolution\n+ // Requesting the entry again should start neighbor reachability confirmation.\n+ //\n+ // Verify the entry's new link address and the new state.\n{\n- _, doneCh, err := neigh.entry(entry.Addr, entry.LocalAddr, linkRes, nil)\n- if err != tcpip.ErrWouldBlock {\n- t.Fatalf(\"got neigh.entry(%s, %s, _, nil) = %v, want = %s\", entry.Addr, entry.LocalAddr, err, tcpip.ErrWouldBlock)\n+ e, _, err := neigh.entry(entry.Addr, entry.LocalAddr, linkRes, nil)\n+ if err != nil {\n+ t.Fatalf(\"neigh.entry(%s, %s, _, nil): %s\", entry.Addr, entry.LocalAddr, err)\n}\n- clock.Advance(config.DelayFirstProbeTime + typicalLatency)\n- select {\n- case <-doneCh:\n- default:\n- t.Fatalf(\"expected notification from done channel returned by neigh.entry(%s, %s, _, nil)\", entry.Addr, entry.LocalAddr)\n+ want := NeighborEntry{\n+ Addr: entry.Addr,\n+ LocalAddr: entry.LocalAddr,\n+ LinkAddr: updatedLinkAddr,\n+ State: Delay,\n}\n+ if diff := cmp.Diff(e, want, entryDiffOpts()...); diff != \"\" {\n+ t.Errorf(\"neigh.entry(%s, %s, _, nil) mismatch (-got, +want):\\n%s\", entry.Addr, entry.LocalAddr, diff)\n+ }\n+ clock.Advance(config.DelayFirstProbeTime + typicalLatency)\n}\n- // Verify the entry's new link address\n+ // Verify that the neighbor is now reachable.\n{\ne, _, err := neigh.entry(entry.Addr, entry.LocalAddr, linkRes, nil)\nclock.Advance(typicalLatency)\nif err != nil {\nt.Errorf(\"unexpected error from neigh.entry(%s, %s, _, nil): %s\", entry.Addr, entry.LocalAddr, err)\n}\n- want = NeighborEntry{\n+ want := NeighborEntry{\nAddr: entry.Addr,\nLocalAddr: entry.LocalAddr,\nLinkAddr: updatedLinkAddr,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Only block resolution when NUD is incomplete
When a completed entry exists for a neighbor, there is no need to block
while reachability is (re)confirmed. The stack should continue to use
the neighbor's link address while NUD is performed.
Test: stack_test.TestNeighborCacheReplace
PiperOrigin-RevId: 336199043 |
259,853 | 09.10.2020 10:55:30 | 25,200 | 76a09f0cf5994bae5684fc80b7e7da6161b52975 | syscalls: Don't leak a file on the error path
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -84,6 +84,7 @@ func fileOpOn(t *kernel.Task, dirFD int32, path string, resolve bool, fn func(ro\n}\nrel = f.Dirent\nif !fs.IsDir(rel.Inode.StableAttr) {\n+ f.DecRef(t)\nreturn syserror.ENOTDIR\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | syscalls: Don't leak a file on the error path
Reported-by: [email protected]
PiperOrigin-RevId: 336324720 |
259,853 | 09.10.2020 11:28:57 | 25,200 | 33d6622172a85209f644840409d1b00ae94d609c | test/syscall/iptables: don't use designated initializers
test/syscalls/linux/iptables.cc:130:3:
error: C99 designator 'name' outside aggregate initializer
130 | };
| | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/ip6tables.cc",
"new_path": "test/syscalls/linux/ip6tables.cc",
"diff": "@@ -89,12 +89,12 @@ TEST(IP6TablesBasic, GetRevision) {\nASSERT_THAT(sock = socket(AF_INET6, SOCK_RAW, IPPROTO_RAW),\nSyscallSucceeds());\n- struct xt_get_revision rev = {\n- .name = \"REDIRECT\",\n- .revision = 0,\n- };\n+ struct xt_get_revision rev = {};\nsocklen_t rev_len = sizeof(rev);\n+ snprintf(rev.name, sizeof(rev.name), \"REDIRECT\");\n+ rev.revision = 0;\n+\n// Revision 0 exists.\nEXPECT_THAT(\ngetsockopt(sock, SOL_IPV6, IP6T_SO_GET_REVISION_TARGET, &rev, &rev_len),\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/iptables.cc",
"new_path": "test/syscalls/linux/iptables.cc",
"diff": "@@ -124,12 +124,12 @@ TEST(IPTablesBasic, GetRevision) {\nASSERT_THAT(sock = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP),\nSyscallSucceeds());\n- struct xt_get_revision rev = {\n- .name = \"REDIRECT\",\n- .revision = 0,\n- };\n+ struct xt_get_revision rev = {};\nsocklen_t rev_len = sizeof(rev);\n+ snprintf(rev.name, sizeof(rev.name), \"REDIRECT\");\n+ rev.revision = 0;\n+\n// Revision 0 exists.\nEXPECT_THAT(\ngetsockopt(sock, SOL_IP, IPT_SO_GET_REVISION_TARGET, &rev, &rev_len),\n"
}
] | Go | Apache License 2.0 | google/gvisor | test/syscall/iptables: don't use designated initializers
test/syscalls/linux/iptables.cc:130:3:
error: C99 designator 'name' outside aggregate initializer
130 | };
|
PiperOrigin-RevId: 336331738 |
259,858 | 09.10.2020 12:11:21 | 25,200 | 743327817faa1aa46ff3b31f74a0c5c2d047d65a | Infer receiver name for stateify. | [
{
"change_type": "MODIFY",
"old_path": "tools/go_stateify/main.go",
"new_path": "tools/go_stateify/main.go",
"diff": "@@ -39,7 +39,7 @@ var (\n)\n// resolveTypeName returns a qualified type name.\n-func resolveTypeName(name string, typ ast.Expr) (field string, qualified string) {\n+func resolveTypeName(typ ast.Expr) (field string, qualified string) {\nfor done := false; !done; {\n// Resolve star expressions.\nswitch rs := typ.(type) {\n@@ -69,11 +69,7 @@ func resolveTypeName(name string, typ ast.Expr) (field string, qualified string)\n}\n// Figure out actual type name.\n- ident, ok := typ.(*ast.Ident)\n- if !ok {\n- panic(fmt.Sprintf(\"type not supported: %s (involves anonymous types?)\", name))\n- }\n- field = ident.Name\n+ field = typ.(*ast.Ident).Name\nqualified = qualified + field\nreturn\n}\n@@ -119,7 +115,7 @@ func scanFields(ss *ast.StructType, prefix string, fn scanFunctions) {\n} else {\n// Anonymous types can't be embedded, so we don't need\n// to worry about providing a useful name here.\n- name, _ = resolveTypeName(\"\", field.Type)\n+ name, _ = resolveTypeName(field.Type)\n}\n// Skip _ fields.\n@@ -262,52 +258,39 @@ func main() {\n}\ntype method struct {\n- receiver string\n- name string\n+ typeName string\n+ methodName string\n}\n- // Search for and add all methods with a pointer receiver and no other\n- // arguments to a set. We support auto-detecting the existence of\n- // several different methods with this signature.\n- simpleMethods := map[method]struct{}{}\n+ // Search for and add all method to a set. We auto-detecting several\n+ // different methods (and insert them if we don't find them, in order\n+ // to ensure that expectations match reality).\n+ //\n+ // While we do this, figure out the right receiver name. If there are\n+ // multiple distinct receivers, then we will just pick the last one.\n+ simpleMethods := make(map[method]struct{})\n+ receiverNames := make(map[string]string)\nfor _, f := range files {\n-\n// Go over all functions.\nfor _, decl := range f.Decls {\nd, ok := decl.(*ast.FuncDecl)\nif !ok {\ncontinue\n}\n- if d.Name == nil || d.Recv == nil || d.Type == nil {\n+ if d.Recv == nil || len(d.Recv.List) != 1 {\n// Not a named method.\ncontinue\n}\n- if len(d.Recv.List) != 1 {\n- // Wrong number of receivers?\n- continue\n- }\n- if d.Type.Params != nil && len(d.Type.Params.List) != 0 {\n- // Has argument(s).\n- continue\n- }\n- if d.Type.Results != nil && len(d.Type.Results.List) != 0 {\n- // Has return(s).\n- continue\n- }\n- pt, ok := d.Recv.List[0].Type.(*ast.StarExpr)\n- if !ok {\n- // Not a pointer receiver.\n- continue\n+ // Save the method and the receiver.\n+ name, _ := resolveTypeName(d.Recv.List[0].Type)\n+ simpleMethods[method{\n+ typeName: name,\n+ methodName: d.Name.Name,\n+ }] = struct{}{}\n+ if len(d.Recv.List[0].Names) > 0 {\n+ receiverNames[name] = d.Recv.List[0].Names[0].Name\n}\n-\n- t, ok := pt.X.(*ast.Ident)\n- if !ok {\n- // This shouldn't happen with valid Go.\n- continue\n- }\n-\n- simpleMethods[method{t.Name, d.Name.Name}] = struct{}{}\n}\n}\n@@ -346,7 +329,11 @@ func main() {\nfor _, gs := range d.Specs {\nts := gs.(*ast.TypeSpec)\n- letter := strings.ToLower(ts.Name.Name[:1])\n+ recv, ok := receiverNames[ts.Name.Name]\n+ if !ok {\n+ // Maybe no methods were defined?\n+ recv = strings.ToLower(ts.Name.Name[:1])\n+ }\nswitch x := ts.Type.(type) {\ncase *ast.StructType:\nmaybeEmitImports()\n@@ -363,32 +350,32 @@ func main() {\nemitField(name)\n}\nemitLoadValue := func(name, typName string) {\n- fmt.Fprintf(outputFile, \" stateSourceObject.LoadValue(%d, new(%s), func(y interface{}) { %s.load%s(y.(%s)) })\\n\", fields[name], typName, letter, camelCased(name), typName)\n+ fmt.Fprintf(outputFile, \" stateSourceObject.LoadValue(%d, new(%s), func(y interface{}) { %s.load%s(y.(%s)) })\\n\", fields[name], typName, recv, camelCased(name), typName)\n}\nemitLoad := func(name string) {\n- fmt.Fprintf(outputFile, \" stateSourceObject.Load(%d, &%s.%s)\\n\", fields[name], letter, name)\n+ fmt.Fprintf(outputFile, \" stateSourceObject.Load(%d, &%s.%s)\\n\", fields[name], recv, name)\n}\nemitLoadWait := func(name string) {\n- fmt.Fprintf(outputFile, \" stateSourceObject.LoadWait(%d, &%s.%s)\\n\", fields[name], letter, name)\n+ fmt.Fprintf(outputFile, \" stateSourceObject.LoadWait(%d, &%s.%s)\\n\", fields[name], recv, name)\n}\nemitSaveValue := func(name, typName string) {\n- fmt.Fprintf(outputFile, \" var %sValue %s = %s.save%s()\\n\", name, typName, letter, camelCased(name))\n+ fmt.Fprintf(outputFile, \" var %sValue %s = %s.save%s()\\n\", name, typName, recv, camelCased(name))\nfmt.Fprintf(outputFile, \" stateSinkObject.SaveValue(%d, %sValue)\\n\", fields[name], name)\n}\nemitSave := func(name string) {\n- fmt.Fprintf(outputFile, \" stateSinkObject.Save(%d, &%s.%s)\\n\", fields[name], letter, name)\n+ fmt.Fprintf(outputFile, \" stateSinkObject.Save(%d, &%s.%s)\\n\", fields[name], recv, name)\n}\nemitZeroCheck := func(name string) {\n- fmt.Fprintf(outputFile, \" if !%sIsZeroValue(&%s.%s) { %sFailf(\\\"%s is %%#v, expected zero\\\", &%s.%s) }\\n\", statePrefix, letter, name, statePrefix, name, letter, name)\n+ fmt.Fprintf(outputFile, \" if !%sIsZeroValue(&%s.%s) { %sFailf(\\\"%s is %%#v, expected zero\\\", &%s.%s) }\\n\", statePrefix, recv, name, statePrefix, name, recv, name)\n}\n// Generate the type name method.\n- fmt.Fprintf(outputFile, \"func (%s *%s) StateTypeName() string {\\n\", letter, ts.Name.Name)\n+ fmt.Fprintf(outputFile, \"func (%s *%s) StateTypeName() string {\\n\", recv, ts.Name.Name)\nfmt.Fprintf(outputFile, \" return \\\"%s.%s\\\"\\n\", *fullPkg, ts.Name.Name)\nfmt.Fprintf(outputFile, \"}\\n\\n\")\n// Generate the fields method.\n- fmt.Fprintf(outputFile, \"func (%s *%s) StateFields() []string {\\n\", letter, ts.Name.Name)\n+ fmt.Fprintf(outputFile, \"func (%s *%s) StateFields() []string {\\n\", recv, ts.Name.Name)\nfmt.Fprintf(outputFile, \" return []string{\\n\")\nscanFields(x, \"\", scanFunctions{\nnormal: emitField,\n@@ -402,8 +389,11 @@ func main() {\n// the code from compiling if a custom beforeSave was defined in a\n// file not provided to this binary and prevents inherited methods\n// from being called multiple times by overriding them.\n- if _, ok := simpleMethods[method{ts.Name.Name, \"beforeSave\"}]; !ok && generateSaverLoader {\n- fmt.Fprintf(outputFile, \"func (%s *%s) beforeSave() {}\\n\\n\", letter, ts.Name.Name)\n+ if _, ok := simpleMethods[method{\n+ typeName: ts.Name.Name,\n+ methodName: \"beforeSave\",\n+ }]; !ok && generateSaverLoader {\n+ fmt.Fprintf(outputFile, \"func (%s *%s) beforeSave() {}\\n\\n\", recv, ts.Name.Name)\n}\n// Generate the save method.\n@@ -413,8 +403,8 @@ func main() {\n// on this specific behavior, but the ability to specify slots\n// allows a manual implementation to be order-dependent.\nif generateSaverLoader {\n- fmt.Fprintf(outputFile, \"func (%s *%s) StateSave(stateSinkObject %sSink) {\\n\", letter, ts.Name.Name, statePrefix)\n- fmt.Fprintf(outputFile, \" %s.beforeSave()\\n\", letter)\n+ fmt.Fprintf(outputFile, \"func (%s *%s) StateSave(stateSinkObject %sSink) {\\n\", recv, ts.Name.Name, statePrefix)\n+ fmt.Fprintf(outputFile, \" %s.beforeSave()\\n\", recv)\nscanFields(x, \"\", scanFunctions{zerovalue: emitZeroCheck})\nscanFields(x, \"\", scanFunctions{value: emitSaveValue})\nscanFields(x, \"\", scanFunctions{normal: emitSave, wait: emitSave})\n@@ -423,16 +413,19 @@ func main() {\n// Define afterLoad if a definition was not found. We do this for\n// the same reason that we do it for beforeSave.\n- _, hasAfterLoad := simpleMethods[method{ts.Name.Name, \"afterLoad\"}]\n+ _, hasAfterLoad := simpleMethods[method{\n+ typeName: ts.Name.Name,\n+ methodName: \"afterLoad\",\n+ }]\nif !hasAfterLoad && generateSaverLoader {\n- fmt.Fprintf(outputFile, \"func (%s *%s) afterLoad() {}\\n\\n\", letter, ts.Name.Name)\n+ fmt.Fprintf(outputFile, \"func (%s *%s) afterLoad() {}\\n\\n\", recv, ts.Name.Name)\n}\n// Generate the load method.\n//\n// N.B. See the comment above for the save method.\nif generateSaverLoader {\n- fmt.Fprintf(outputFile, \"func (%s *%s) StateLoad(stateSourceObject %sSource) {\\n\", letter, ts.Name.Name, statePrefix)\n+ fmt.Fprintf(outputFile, \"func (%s *%s) StateLoad(stateSourceObject %sSource) {\\n\", recv, ts.Name.Name, statePrefix)\nscanFields(x, \"\", scanFunctions{normal: emitLoad, wait: emitLoadWait})\nscanFields(x, \"\", scanFunctions{value: emitLoadValue})\nif hasAfterLoad {\n@@ -440,7 +433,7 @@ func main() {\n// AfterLoad is called, the object encodes a dependency on\n// referred objects (i.e. fields). This means that afterLoad\n// will not be called until the other afterLoads are called.\n- fmt.Fprintf(outputFile, \" stateSourceObject.AfterLoad(%s.afterLoad)\\n\", letter)\n+ fmt.Fprintf(outputFile, \" stateSourceObject.AfterLoad(%s.afterLoad)\\n\", recv)\n}\nfmt.Fprintf(outputFile, \"}\\n\\n\")\n}\n@@ -452,10 +445,10 @@ func main() {\nmaybeEmitImports()\n// Generate the info methods.\n- fmt.Fprintf(outputFile, \"func (%s *%s) StateTypeName() string {\\n\", letter, ts.Name.Name)\n+ fmt.Fprintf(outputFile, \"func (%s *%s) StateTypeName() string {\\n\", recv, ts.Name.Name)\nfmt.Fprintf(outputFile, \" return \\\"%s.%s\\\"\\n\", *fullPkg, ts.Name.Name)\nfmt.Fprintf(outputFile, \"}\\n\\n\")\n- fmt.Fprintf(outputFile, \"func (%s *%s) StateFields() []string {\\n\", letter, ts.Name.Name)\n+ fmt.Fprintf(outputFile, \"func (%s *%s) StateFields() []string {\\n\", recv, ts.Name.Name)\nfmt.Fprintf(outputFile, \" return nil\\n\")\nfmt.Fprintf(outputFile, \"}\\n\\n\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Infer receiver name for stateify.
PiperOrigin-RevId: 336340035 |
259,858 | 09.10.2020 12:31:35 | 25,200 | 6229be5e48578dda3b08b6b0c3d381553e7b31ef | Minor nogo restructuring. | [
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -72,6 +72,7 @@ def go_binary(name, nogo = True, pure = False, static = False, x_defs = None, **\n)\nnogo_test(\nname = name + \"_nogo\",\n+ srcs = kwargs.get(\"srcs\", []),\nlibrary = \":\" + name + \"_nogo_library\",\n)\n@@ -203,6 +204,7 @@ def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = F\nif nogo:\nnogo_test(\nname = name + \"_nogo\",\n+ srcs = all_srcs,\nlibrary = \":\" + name,\n)\n@@ -239,6 +241,7 @@ def go_test(name, nogo = True, **kwargs):\nif nogo:\nnogo_test(\nname = name + \"_nogo\",\n+ srcs = kwargs.get(\"srcs\", []),\nlibrary = \":\" + name,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/BUILD",
"new_path": "tools/nogo/BUILD",
"diff": "@@ -13,6 +13,12 @@ nogo_stdlib(\nvisibility = [\"//visibility:public\"],\n)\n+sh_binary(\n+ name = \"gentest\",\n+ srcs = [\"gentest.sh\"],\n+ visibility = [\"//visibility:public\"],\n+)\n+\ngo_library(\nname = \"nogo\",\nsrcs = [\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "@@ -275,36 +275,19 @@ nogo_aspect = go_rule(\ndef _nogo_test_impl(ctx):\n\"\"\"Check nogo findings.\"\"\"\n- # Build a runner that checks for the existence of the facts file. Note that\n- # the actual build will fail in the case of a broken analysis. We things\n- # this way so that any test applied is effectively pushed down to all\n- # upstream dependencies through the aspect.\n- inputs = []\n- findings = []\n- runner = ctx.actions.declare_file(\"%s-executer\" % ctx.label.name)\n- runner_content = [\"#!/bin/bash\"]\n- for dep in ctx.attr.deps:\n- # Extract the findings.\n- info = dep[NogoInfo]\n- inputs.append(info.findings)\n- findings.append(info.findings)\n-\n- # Include all source files, transitively. This will make this target\n- # \"directly affected\" for the purpose of build analysis.\n- inputs += info.srcs\n-\n- # If there are findings, dump them and fail.\n- runner_content.append(\"if [[ -s \\\"%s\\\" ]]; then cat \\\"%s\\\" && exit 1; fi\" % (\n- info.findings.short_path,\n- info.findings.short_path,\n- ))\n-\n- # Otherwise, draw a sweet unicode checkmark with the package name (in green).\n- runner_content.append(\"echo -e \\\"\\\\033[0;32m\\\\xE2\\\\x9C\\\\x94\\\\033[0;31m\\\\033[0m %s\\\"\" % info.importpath)\n- runner_content.append(\"exit 0\\n\")\n- ctx.actions.write(runner, \"\\n\".join(runner_content), is_executable = True)\n+ # Build a runner that checks the facts files.\n+ findings = [dep[NogoInfo].findings for dep in ctx.attr.deps]\n+ runner = ctx.actions.declare_file(ctx.label.name)\n+ ctx.actions.run(\n+ inputs = findings + ctx.files.srcs,\n+ outputs = [runner],\n+ tools = depset(ctx.files._gentest),\n+ executable = ctx.files._gentest[0],\n+ mnemonic = \"Gentest\",\n+ progress_message = \"Generating %s\" % ctx.label,\n+ arguments = [runner.path] + [f.path for f in findings],\n+ )\nreturn [DefaultInfo(\n- runfiles = ctx.runfiles(files = inputs),\nexecutable = runner,\n)]\n@@ -313,14 +296,19 @@ _nogo_test = rule(\nattrs = {\n# deps should have only a single element.\n\"deps\": attr.label_list(aspects = [nogo_aspect]),\n+ # srcs exist here only to ensure that this target is\n+ # directly affected by changes to the source files.\n+ \"srcs\": attr.label_list(allow_files = True),\n+ \"_gentest\": attr.label(default = \"//tools/nogo:gentest\"),\n},\ntest = True,\n)\n-def nogo_test(name, library, **kwargs):\n+def nogo_test(name, srcs, library, **kwargs):\ntags = kwargs.pop(\"tags\", []) + [\"nogo\"]\n_nogo_test(\nname = name,\n+ srcs = srcs,\ndeps = [library],\ntags = tags,\n**kwargs\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/nogo/gentest.sh",
"diff": "+#!/bin/bash\n+# Copyright 2019 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+set -euo pipefail\n+\n+if [[ \"$#\" -lt 2 ]]; then\n+ echo \"usage: $0 <output> <findings...>\"\n+ exit 2\n+fi\n+declare violations=0\n+declare output=$1\n+shift\n+\n+# Start the script.\n+echo \"#!/bin/sh\" > \"${output}\"\n+\n+# Read a list of findings files.\n+declare filename\n+declare line\n+for filename in \"$@\"; do\n+ if [[ -z \"${filename}\" ]]; then\n+ continue\n+ fi\n+ while read -r line; do\n+ violations=$((${violations}+1));\n+ echo \"echo -e '\\\\033[0;31m${line}\\\\033[0;31m\\\\033[0m'\" >> \"${output}\"\n+ done < \"${filename}\"\n+done\n+\n+# Show violations.\n+if [[ \"${violations}\" -eq 0 ]]; then\n+ echo \"echo -e '\\\\033[0;32mPASS\\\\033[0;31m\\\\033[0m'\" >> \"${output}\"\n+else\n+ echo \"exit 1\" >> \"${output}\"\n+fi\n"
}
] | Go | Apache License 2.0 | google/gvisor | Minor nogo restructuring.
PiperOrigin-RevId: 336343819 |
259,898 | 09.10.2020 13:05:18 | 25,200 | 46e168b5a00bd85f3739bac6f185c3bdc39dfa37 | Set expect_failure flags on tests that currently fails on fuchsia | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/ipv4_id_uniqueness_test.go",
"new_path": "test/packetimpact/tests/ipv4_id_uniqueness_test.go",
"diff": "@@ -88,7 +88,8 @@ func TestIPv4RetransmitIdentificationUniqueness(t *testing.T) {\n// this test. Once the socket option is supported, the following call\n// can be changed to simply assert success.\nret, errno := dut.SetSockOptIntWithErrno(context.Background(), t, remoteFD, unix.IPPROTO_IP, linux.IP_MTU_DISCOVER, linux.IP_PMTUDISC_DONT)\n- if ret == -1 && errno != unix.ENOTSUP {\n+ // Fuchsia will return ENOPROTOPT errno.\n+ if ret == -1 && errno != unix.ENOPROTOOPT {\nt.Fatalf(\"failed to set IP_MTU_DISCOVER socket option to IP_PMTUDISC_DONT: %s\", errno)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set expect_failure flags on tests that currently fails on fuchsia
PiperOrigin-RevId: 336350318 |
259,853 | 09.10.2020 14:32:05 | 25,200 | a0ffc84adfe345e52a249bd1bac9c9f883bf0fe7 | platform/kvm: remove the unused field | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/defs_amd64.go",
"new_path": "pkg/sentry/platform/ring0/defs_amd64.go",
"diff": "@@ -92,7 +92,6 @@ type kernelEntry struct {\n// scratch space for temporary usage.\nscratch0 uint64\n- scratch1 uint64\n// stackTop is the top of the stack.\nstackTop uint64\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/offsets_amd64.go",
"new_path": "pkg/sentry/platform/ring0/offsets_amd64.go",
"diff": "@@ -38,7 +38,6 @@ func Emit(w io.Writer) {\ne := &kernelEntry{}\nfmt.Fprintf(w, \"\\n// CPU entry offsets.\\n\")\nfmt.Fprintf(w, \"#define ENTRY_SCRATCH0 0x%02x\\n\", reflect.ValueOf(&e.scratch0).Pointer()-reflect.ValueOf(e).Pointer())\n- fmt.Fprintf(w, \"#define ENTRY_SCRATCH1 0x%02x\\n\", reflect.ValueOf(&e.scratch1).Pointer()-reflect.ValueOf(e).Pointer())\nfmt.Fprintf(w, \"#define ENTRY_STACK_TOP 0x%02x\\n\", reflect.ValueOf(&e.stackTop).Pointer()-reflect.ValueOf(e).Pointer())\nfmt.Fprintf(w, \"#define ENTRY_CPU_SELF 0x%02x\\n\", reflect.ValueOf(&e.cpuSelf).Pointer()-reflect.ValueOf(e).Pointer())\nfmt.Fprintf(w, \"#define ENTRY_KERNEL_CR3 0x%02x\\n\", reflect.ValueOf(&e.kernelCR3).Pointer()-reflect.ValueOf(e).Pointer())\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/kvm: remove the unused field
PiperOrigin-RevId: 336366624 |
259,891 | 09.10.2020 17:09:39 | 25,200 | 79a5910c04ed18901f755588003ca62d0646b763 | Add gvisor webhook configuration | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/kubernetes/gvisor-injection-admission-webhook.yaml",
"diff": "+# Copyright 2020 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+---\n+apiVersion: v1\n+kind: Namespace\n+metadata:\n+ name: e2e\n+ labels:\n+ name: e2e\n+---\n+apiVersion: v1\n+kind: ServiceAccount\n+metadata:\n+ name: gvisor-injection-admission-webhook\n+ namespace: e2e\n+---\n+apiVersion: rbac.authorization.k8s.io/v1\n+kind: ClusterRole\n+metadata:\n+ name: gvisor-injection-admission-webhook\n+rules:\n+- apiGroups: [ admissionregistration.k8s.io ]\n+ resources: [ mutatingwebhookconfigurations ]\n+ verbs: [ create ]\n+---\n+apiVersion: rbac.authorization.k8s.io/v1\n+kind: ClusterRoleBinding\n+metadata:\n+ name: gvisor-injection-admission-webhook\n+ namespace: e2e\n+roleRef:\n+ apiGroup: rbac.authorization.k8s.io\n+ kind: ClusterRole\n+ name: gvisor-injection-admission-webhook\n+subjects:\n+- kind: ServiceAccount\n+ name: gvisor-injection-admission-webhook\n+ namespace: e2e\n+---\n+apiVersion: apps/v1\n+kind: Deployment\n+metadata:\n+ name: gvisor-injection-admission-webhook\n+ namespace: e2e\n+ labels:\n+ app: gvisor-injection-admission-webhook\n+spec:\n+ replicas: 1\n+ selector:\n+ matchLabels:\n+ app: gvisor-injection-admission-webhook\n+ template:\n+ metadata:\n+ labels:\n+ app: gvisor-injection-admission-webhook\n+ spec:\n+ containers:\n+ - name: webhook\n+ image: gcr.io/gke-gvisor/gvisor-injection-admission-webhook:54ce9bd\n+ args:\n+ - --log-level=debug\n+ ports:\n+ - containerPort: 8443\n+ serviceAccountName: gvisor-injection-admission-webhook\n+---\n+kind: Service\n+apiVersion: v1\n+metadata:\n+ name: gvisor-injection-admission-webhook\n+ namespace: e2e\n+spec:\n+ selector:\n+ app: gvisor-injection-admission-webhook\n+ ports:\n+ - protocol: TCP\n+ port: 443\n+ targetPort: 8443\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add gvisor webhook configuration
PiperOrigin-RevId: 336393190 |
259,896 | 09.10.2020 17:45:23 | 25,200 | d75fe7660a61a454ece9472658eac609b3bf61e6 | RACK: Detect packet reordering.
RACK detects packet reordering by checking if the sender received ACK for
the packet which has the sequence number less than the already acknowledged
packets. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -3013,6 +3013,7 @@ func (e *endpoint) completeState() stack.TCPEndpointState {\nEndSequence: rc.endSequence,\nFACK: rc.fack,\nRTT: rc.rtt,\n+ Reord: rc.reorderSeen,\n}\nreturn s\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/rack.go",
"new_path": "pkg/tcpip/transport/tcp/rack.go",
"diff": "@@ -29,12 +29,12 @@ import (\n//\n// +stateify savable\ntype rackControl struct {\n- // xmitTime is the latest transmission timestamp of rackControl.seg.\n- xmitTime time.Time `state:\".(unixTime)\"`\n-\n// endSequence is the ending TCP sequence number of rackControl.seg.\nendSequence seqnum.Value\n+ // dsack indicates if the connection has seen a DSACK.\n+ dsack bool\n+\n// fack is the highest selectively or cumulatively acknowledged\n// sequence.\nfack seqnum.Value\n@@ -47,11 +47,18 @@ type rackControl struct {\n// acknowledged) that was not marked invalid as a possible spurious\n// retransmission.\nrtt time.Duration\n+\n+ // reorderSeen indicates if reordering has been detected on this\n+ // connection.\n+ reorderSeen bool\n+\n+ // xmitTime is the latest transmission timestamp of rackControl.seg.\n+ xmitTime time.Time `state:\".(unixTime)\"`\n}\n-// Update will update the RACK related fields when an ACK has been received.\n+// update will update the RACK related fields when an ACK has been received.\n// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2\n-func (rc *rackControl) Update(seg *segment, ackSeg *segment, offset uint32) {\n+func (rc *rackControl) update(seg *segment, ackSeg *segment, offset uint32) {\nrtt := time.Now().Sub(seg.xmitTime)\n// If the ACK is for a retransmitted packet, do not update if it is a\n@@ -92,3 +99,26 @@ func (rc *rackControl) Update(seg *segment, ackSeg *segment, offset uint32) {\nrc.endSequence = endSeq\n}\n}\n+\n+// detectReorder detects if packet reordering has been observed.\n+// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2\n+// * Step 3: Detect data segment reordering.\n+// To detect reordering, the sender looks for original data segments being\n+// delivered out of order. To detect such cases, the sender tracks the\n+// highest sequence selectively or cumulatively acknowledged in the RACK.fack\n+// variable. The name \"fack\" stands for the most \"Forward ACK\" (this term is\n+// adopted from [FACK]). If a never retransmitted segment that's below\n+// RACK.fack is (selectively or cumulatively) acknowledged, it has been\n+// delivered out of order. The sender sets RACK.reord to TRUE if such segment\n+// is identified.\n+func (rc *rackControl) detectReorder(seg *segment) {\n+ endSeq := seg.sequenceNumber.Add(seqnum.Size(seg.data.Size()))\n+ if rc.fack.LessThan(endSeq) {\n+ rc.fack = endSeq\n+ return\n+ }\n+\n+ if endSeq.LessThan(rc.fack) && seg.xmitCount == 1 {\n+ rc.reorderSeen = true\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/segment.go",
"new_path": "pkg/tcpip/transport/tcp/segment.go",
"diff": "@@ -71,6 +71,9 @@ type segment struct {\n// xmitTime is the last transmit time of this segment.\nxmitTime time.Time `state:\".(unixTime)\"`\nxmitCount uint32\n+\n+ // acked indicates if the segment has already been SACKed.\n+ acked bool\n}\nfunc newSegment(r *stack.Route, id stack.TransportEndpointID, pkt *stack.PacketBuffer) *segment {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -17,6 +17,7 @@ package tcp\nimport (\n\"fmt\"\n\"math\"\n+ \"sort\"\n\"sync/atomic\"\n\"time\"\n@@ -263,6 +264,9 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\nhighRxt: iss,\nrescueRxt: iss,\n},\n+ rc: rackControl{\n+ fack: iss,\n+ },\ngso: ep.gso != nil,\n}\n@@ -1274,6 +1278,39 @@ func (s *sender) checkDuplicateAck(seg *segment) (rtx bool) {\nreturn true\n}\n+// Iterate the writeList and update RACK for each segment which is newly acked\n+// either cumulatively or selectively. Loop through the segments which are\n+// sacked, and update the RACK related variables and check for reordering.\n+//\n+// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2\n+// steps 2 and 3.\n+func (s *sender) walkSACK(rcvdSeg *segment) {\n+ // Sort the SACK blocks. The first block is the most recent unacked\n+ // block. The following blocks can be in arbitrary order.\n+ sackBlocks := make([]header.SACKBlock, len(rcvdSeg.parsedOptions.SACKBlocks))\n+ copy(sackBlocks, rcvdSeg.parsedOptions.SACKBlocks)\n+ sort.Slice(sackBlocks, func(i, j int) bool {\n+ return sackBlocks[j].Start.LessThan(sackBlocks[i].Start)\n+ })\n+\n+ seg := s.writeList.Front()\n+ for _, sb := range sackBlocks {\n+ // This check excludes DSACK blocks.\n+ if sb.Start.LessThanEq(rcvdSeg.ackNumber) || sb.Start.LessThanEq(s.sndUna) || s.sndNxt.LessThan(sb.End) {\n+ continue\n+ }\n+\n+ for seg != nil && seg.sequenceNumber.LessThan(sb.End) && seg.xmitCount != 0 {\n+ if sb.Start.LessThanEq(seg.sequenceNumber) && !seg.acked {\n+ s.rc.update(seg, rcvdSeg, s.ep.tsOffset)\n+ s.rc.detectReorder(seg)\n+ seg.acked = true\n+ }\n+ seg = seg.Next()\n+ }\n+ }\n+}\n+\n// handleRcvdSegment is called when a segment is received; it is responsible for\n// updating the send-related state.\nfunc (s *sender) handleRcvdSegment(rcvdSeg *segment) {\n@@ -1308,6 +1345,21 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {\nrcvdSeg.hasNewSACKInfo = true\n}\n}\n+\n+ // See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08\n+ // section-7.2\n+ // * Step 2: Update RACK stats.\n+ // If the ACK is not ignored as invalid, update the RACK.rtt\n+ // to be the RTT sample calculated using this ACK, and\n+ // continue. If this ACK or SACK was for the most recently\n+ // sent packet, then record the RACK.xmit_ts timestamp and\n+ // RACK.end_seq sequence implied by this ACK.\n+ // * Step 3: Detect packet reordering.\n+ // If the ACK selectively or cumulatively acknowledges an\n+ // unacknowledged and also never retransmitted sequence below\n+ // RACK.fack, then the corresponding packet has been\n+ // reordered and RACK.reord is set to TRUE.\n+ s.walkSACK(rcvdSeg)\ns.SetPipe()\n}\n@@ -1385,13 +1437,14 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {\n}\n// Update the RACK fields if SACK is enabled.\n- if s.ep.sackPermitted {\n- s.rc.Update(seg, rcvdSeg, s.ep.tsOffset)\n+ if s.ep.sackPermitted && !seg.acked {\n+ s.rc.update(seg, rcvdSeg, s.ep.tsOffset)\n+ s.rc.detectReorder(seg)\n}\ns.writeList.Remove(seg)\n- // if SACK is enabled then Only reduce outstanding if\n+ // If SACK is enabled then Only reduce outstanding if\n// the segment was not previously SACKED as these have\n// already been accounted for in SetPipe().\nif !s.ep.sackPermitted || !s.ep.scoreboard.IsSACKED(seg.sackBlock()) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_rack_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_rack_test.go",
"diff": "@@ -21,17 +21,20 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcp/testing/context\"\n)\n+const (\n+ maxPayload = 10\n+ tsOptionSize = 12\n+ maxTCPOptionSize = 40\n+)\n+\n// TestRACKUpdate tests the RACK related fields are updated when an ACK is\n// received on a SACK enabled connection.\nfunc TestRACKUpdate(t *testing.T) {\n- const maxPayload = 10\n- const tsOptionSize = 12\n- const maxTCPOptionSize = 40\n-\nc := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxTCPOptionSize+maxPayload))\ndefer c.Cleanup()\n@@ -49,7 +52,7 @@ func TestRACKUpdate(t *testing.T) {\n}\nif state.Sender.RACKState.RTT == 0 {\n- t.Fatalf(\"RACK RTT failed to update when an ACK is received\")\n+ t.Fatalf(\"RACK RTT failed to update when an ACK is received, got RACKState.RTT == 0 want != 0\")\n}\n})\nsetStackSACKPermitted(t, c, true)\n@@ -69,6 +72,66 @@ func TestRACKUpdate(t *testing.T) {\nbytesRead := 0\nc.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)\nbytesRead += maxPayload\n- c.SendAck(790, bytesRead)\n+ c.SendAck(seqnum.Value(context.TestInitialSequenceNumber).Add(1), bytesRead)\ntime.Sleep(200 * time.Millisecond)\n}\n+\n+// TestRACKDetectReorder tests that RACK detects packet reordering.\n+func TestRACKDetectReorder(t *testing.T) {\n+ c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxTCPOptionSize+maxPayload))\n+ defer c.Cleanup()\n+\n+ const ackNum = 2\n+\n+ var n int\n+ ch := make(chan struct{})\n+ c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {\n+ gotSeq := state.Sender.RACKState.FACK\n+ wantSeq := state.Sender.SndNxt\n+ // FACK should be updated to the highest ending sequence number of the\n+ // segment acknowledged most recently.\n+ if !gotSeq.LessThanEq(wantSeq) || gotSeq.LessThan(wantSeq) {\n+ t.Fatalf(\"RACK FACK failed to update, got: %v, but want: %v\", gotSeq, wantSeq)\n+ }\n+\n+ n++\n+ if n < ackNum {\n+ if state.Sender.RACKState.Reord {\n+ t.Fatalf(\"RACK reorder detected when there is no reordering\")\n+ }\n+ return\n+ }\n+\n+ if state.Sender.RACKState.Reord == false {\n+ t.Fatalf(\"RACK reorder detection failed\")\n+ }\n+ close(ch)\n+ })\n+ setStackSACKPermitted(t, c, true)\n+ createConnectedWithSACKAndTS(c)\n+ data := buffer.NewView(ackNum * maxPayload)\n+ for i := range data {\n+ data[i] = byte(i)\n+ }\n+\n+ // Write the data.\n+ if _, _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil {\n+ t.Fatalf(\"Write failed: %s\", err)\n+ }\n+\n+ bytesRead := 0\n+ for i := 0; i < ackNum; i++ {\n+ c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)\n+ bytesRead += maxPayload\n+ }\n+\n+ start := c.IRS.Add(maxPayload + 1)\n+ end := start.Add(maxPayload)\n+ seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)\n+ c.SendAckWithSACK(seq, 0, []header.SACKBlock{{start, end}})\n+ c.SendAck(seq, bytesRead)\n+\n+ // Wait for the probe function to finish processing the ACK before the\n+ // test completes.\n+ <-ch\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"new_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"diff": "@@ -68,9 +68,9 @@ const (\n// V4MappedWildcardAddr is the mapped v6 representation of 0.0.0.0.\nV4MappedWildcardAddr = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xff\\x00\\x00\\x00\\x00\"\n- // testInitialSequenceNumber is the initial sequence number sent in packets that\n+ // TestInitialSequenceNumber is the initial sequence number sent in packets that\n// are sent in response to a SYN or in the initial SYN sent to the stack.\n- testInitialSequenceNumber = 789\n+ TestInitialSequenceNumber = 789\n)\n// StackAddrWithPrefix is StackAddr with its associated prefix length.\n@@ -505,7 +505,7 @@ func (c *Context) ReceiveAndCheckPacketWithOptions(data []byte, offset, size, op\nchecker.TCP(\nchecker.DstPort(TestPort),\nchecker.TCPSeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))),\n- checker.TCPAckNum(uint32(seqnum.Value(testInitialSequenceNumber).Add(1))),\n+ checker.TCPAckNum(uint32(seqnum.Value(TestInitialSequenceNumber).Add(1))),\nchecker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),\n),\n)\n@@ -532,7 +532,7 @@ func (c *Context) ReceiveNonBlockingAndCheckPacket(data []byte, offset, size int\nchecker.TCP(\nchecker.DstPort(TestPort),\nchecker.TCPSeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))),\n- checker.TCPAckNum(uint32(seqnum.Value(testInitialSequenceNumber).Add(1))),\n+ checker.TCPAckNum(uint32(seqnum.Value(TestInitialSequenceNumber).Add(1))),\nchecker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),\n),\n)\n@@ -912,7 +912,7 @@ func (c *Context) CreateConnectedWithOptions(wantOptions header.TCPSynOptions) *\n// Build SYN-ACK.\nc.IRS = seqnum.Value(tcpSeg.SequenceNumber())\n- iss := seqnum.Value(testInitialSequenceNumber)\n+ iss := seqnum.Value(TestInitialSequenceNumber)\nc.SendPacket(nil, &Headers{\nSrcPort: tcpSeg.DestinationPort(),\nDstPort: tcpSeg.SourcePort(),\n@@ -1084,7 +1084,7 @@ func (c *Context) PassiveConnectWithOptions(maxPayload, wndScale int, synOptions\noffset += paddingToAdd\n// Send a SYN request.\n- iss := seqnum.Value(testInitialSequenceNumber)\n+ iss := seqnum.Value(TestInitialSequenceNumber)\nc.SendPacket(nil, &Headers{\nSrcPort: TestPort,\nDstPort: StackPort,\n"
}
] | Go | Apache License 2.0 | google/gvisor | RACK: Detect packet reordering.
RACK detects packet reordering by checking if the sender received ACK for
the packet which has the sequence number less than the already acknowledged
packets.
PiperOrigin-RevId: 336397526 |
260,021 | 10.10.2020 16:38:34 | -28,800 | 2ae97b27aa385ddb51c234ec31fb4c4869c0088a | arm64: set DZE bit to make EL0 can use DC ZVA | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/entry_arm64.s",
"new_path": "pkg/sentry/platform/ring0/entry_arm64.s",
"diff": "#define SCTLR_M 1 << 0\n#define SCTLR_C 1 << 2\n#define SCTLR_I 1 << 12\n+#define SCTLR_DZE 1 << 14\n#define SCTLR_UCT 1 << 15\n#define SCTLR_UCI 1 << 26\n-#define SCTLR_EL1_DEFAULT (SCTLR_M | SCTLR_C | SCTLR_I | SCTLR_UCT | SCTLR_UCI)\n+#define SCTLR_EL1_DEFAULT (SCTLR_M | SCTLR_C | SCTLR_I | SCTLR_UCT | SCTLR_UCI | SCTLR_DZE)\n// cntkctl_el1: counter-timer kernel control register el1.\n#define CNTKCTL_EL0PCTEN 1 << 0\n"
}
] | Go | Apache License 2.0 | google/gvisor | arm64: set DZE bit to make EL0 can use DC ZVA
Signed-off-by: Min Le <[email protected]> |
259,960 | 11.10.2020 17:41:29 | -28,800 | d4413c31bd121bee66e42648360b1fc1ec7c04e1 | Assign ep.effectiveNetProtos in UDP forwarder's CreateEndpoint | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/forwarder.go",
"new_path": "pkg/tcpip/transport/udp/forwarder.go",
"diff": "@@ -81,6 +81,7 @@ func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint,\nep.ID = r.id\nep.route = r.route.Clone()\nep.dstPort = r.id.RemotePort\n+ ep.effectiveNetProtos = []tcpip.NetworkProtocolNumber{r.route.NetProto}\nep.RegisterNICID = r.route.NICID()\nep.boundPortFlags = ep.portFlags\n"
}
] | Go | Apache License 2.0 | google/gvisor | Assign ep.effectiveNetProtos in UDP forwarder's CreateEndpoint |
259,860 | 12.10.2020 10:39:03 | 25,200 | d861cd5f14bc42b32eeac20c444a685f1d9748f7 | [vfs2] Don't leak disconnected mounts. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -1738,3 +1738,18 @@ func (k *Kernel) ShmMount() *vfs.Mount {\nfunc (k *Kernel) SocketMount() *vfs.Mount {\nreturn k.socketMount\n}\n+\n+// Release releases resources owned by k.\n+//\n+// Precondition: This should only be called after the kernel is fully\n+// initialized, e.g. after k.Start() has been called.\n+func (k *Kernel) Release() {\n+ if VFS2Enabled {\n+ ctx := k.SupervisorContext()\n+ k.hostMount.DecRef(ctx)\n+ k.pipeMount.DecRef(ctx)\n+ k.shmMount.DecRef(ctx)\n+ k.socketMount.DecRef(ctx)\n+ k.vfs.Release(ctx)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -46,8 +46,9 @@ import (\n// +stateify savable\ntype Mount struct {\n// vfs, fs, root are immutable. References are held on fs and root.\n+ // Note that for a disconnected mount, root may be nil.\n//\n- // Invariant: root belongs to fs.\n+ // Invariant: if not nil, root belongs to fs.\nvfs *VirtualFilesystem\nfs *Filesystem\nroot *Dentry\n@@ -498,7 +499,9 @@ func (mnt *Mount) DecRef(ctx context.Context) {\nmnt.vfs.mounts.seq.EndWrite()\nmnt.vfs.mountMu.Unlock()\n}\n+ if mnt.root != nil {\nmnt.root.DecRef(ctx)\n+ }\nmnt.fs.DecRef(ctx)\nif vd.Ok() {\nvd.DecRef(ctx)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/vfs.go",
"new_path": "pkg/sentry/vfs/vfs.go",
"diff": "@@ -122,6 +122,13 @@ type VirtualFilesystem struct {\nfilesystems map[*Filesystem]struct{}\n}\n+// Release drops references on filesystem objects held by vfs.\n+//\n+// Precondition: This must be called after VFS.Init() has succeeded.\n+func (vfs *VirtualFilesystem) Release(ctx context.Context) {\n+ vfs.anonMount.DecRef(ctx)\n+}\n+\n// Init initializes a new VirtualFilesystem with no mounts or FilesystemTypes.\nfunc (vfs *VirtualFilesystem) Init(ctx context.Context) error {\nif vfs.mountpoints != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -472,9 +472,13 @@ func (l *Loader) Destroy() {\n}\nl.watchdog.Stop()\n+ // Release all kernel resources. This is only safe after we can no longer\n+ // save/restore.\n+ l.k.Release()\n+\n// In the success case, stdioFDs and goferFDs will only contain\n// released/closed FDs that ownership has been passed over to host FDs and\n- // gofer sessions. Close them here in case on failure.\n+ // gofer sessions. Close them here in case of failure.\nfor _, fd := range l.root.stdioFDs {\n_ = fd.Close()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2] Don't leak disconnected mounts.
PiperOrigin-RevId: 336694658 |
260,001 | 12.10.2020 17:28:58 | 25,200 | 4885931ac342e033b39ba9645b8e6a584f4d9844 | Change verity mu to be per file system
verity Mu should be per file system instead of global, so that enabling
and verifying in different file systems won't block each other.
Also Lock verity Mu in PRead. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/filesystem.go",
"new_path": "pkg/sentry/fsimpl/verity/filesystem.go",
"diff": "@@ -174,8 +174,8 @@ func (fs *filesystem) verifyChild(ctx context.Context, parent *dentry, child *de\nreturn nil, err\n}\n- verityMu.RLock()\n- defer verityMu.RUnlock()\n+ fs.verityMu.RLock()\n+ defer fs.verityMu.RUnlock()\n// Read the offset of the child from the extended attributes of the\n// corresponding Merkle tree file.\n// This is the offset of the hash for child in its parent's Merkle tree\n@@ -302,8 +302,8 @@ func (fs *filesystem) verifyStat(ctx context.Context, d *dentry, stat linux.Stat\nreturn err\n}\n- verityMu.RLock()\n- defer verityMu.RUnlock()\n+ fs.verityMu.RLock()\n+ defer fs.verityMu.RUnlock()\nfd, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{\nRoot: d.lowerMerkleVD,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/verity.go",
"new_path": "pkg/sentry/fsimpl/verity/verity.go",
"diff": "@@ -68,11 +68,6 @@ const sizeOfStringInt32 = 10\n// flag.\nvar noCrashOnVerificationFailure bool\n-// verityMu synchronizes enabling verity files, protects files or directories\n-// from being enabled by different threads simultaneously. It also ensures that\n-// verity does not access files that are being enabled.\n-var verityMu sync.RWMutex\n-\n// FilesystemType implements vfs.FilesystemType.\n//\n// +stateify savable\n@@ -106,6 +101,17 @@ type filesystem struct {\n// to ensure consistent lock ordering between dentry.dirMu in different\n// dentries.\nrenameMu sync.RWMutex `state:\"nosave\"`\n+\n+ // verityMu synchronizes enabling verity files, protects files or\n+ // directories from being enabled by different threads simultaneously.\n+ // It also ensures that verity does not access files that are being\n+ // enabled.\n+ //\n+ // Also, the directory Merkle trees depends on the generated trees of\n+ // its children. So they shouldn't be enabled the same time. This lock\n+ // is for the whole file system to ensure that no more than one file is\n+ // enabled the same time.\n+ verityMu sync.RWMutex\n}\n// InternalFilesystemOptions may be passed as\n@@ -594,10 +600,8 @@ func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO) (ui\nreturn 0, syserror.EPERM\n}\n- // Lock to prevent other threads performing enable or access the file\n- // while it's being enabled.\n- verityMu.Lock()\n- defer verityMu.Unlock()\n+ fd.d.fs.verityMu.Lock()\n+ defer fd.d.fs.verityMu.Unlock()\n// In allowRuntimeEnable mode, the underlying fd and read/write fd for\n// the Merkle tree file should have all been initialized. For any file\n@@ -723,6 +727,8 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of\nreturn fd.lowerFD.PRead(ctx, dst, offset, opts)\n}\n+ fd.d.fs.verityMu.RLock()\n+ defer fd.d.fs.verityMu.RUnlock()\n// dataSize is the size of the whole file.\ndataSize, err := fd.merkleReader.GetXattr(ctx, &vfs.GetXattrOptions{\nName: merkleSizeXattr,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Change verity mu to be per file system
verity Mu should be per file system instead of global, so that enabling
and verifying in different file systems won't block each other.
Also Lock verity Mu in PRead.
PiperOrigin-RevId: 336779356 |
259,853 | 12.10.2020 17:55:37 | 25,200 | 10ca12b3d089fa16f67d886ed6dc2cacd2468d0b | gvisor/test: Set nogotsan for native tests
Tests are written in C++ and there is no reason to run them with gotsan without
gVisor. | [
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -102,6 +102,10 @@ def _syscall_test(\n# Disable off-host networking.\ntags.append(\"requires-net:loopback\")\n+ # gotsan makes sense only if tests are running in gVisor.\n+ if platform == \"native\":\n+ tags.append(\"nogotsan\")\n+\nrunner_args = [\n# Arguments are passed directly to runner binary.\n\"--platform=\" + platform,\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/test: Set nogotsan for native tests
Tests are written in C++ and there is no reason to run them with gotsan without
gVisor.
PiperOrigin-RevId: 336783276 |
259,860 | 13.10.2020 00:12:28 | 25,200 | fc7df53222e56fdce7dee002f1fb4c332de616af | Don't leak VDSO mappings. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -1744,12 +1744,14 @@ func (k *Kernel) SocketMount() *vfs.Mount {\n// Precondition: This should only be called after the kernel is fully\n// initialized, e.g. after k.Start() has been called.\nfunc (k *Kernel) Release() {\n- if VFS2Enabled {\nctx := k.SupervisorContext()\n+ if VFS2Enabled {\nk.hostMount.DecRef(ctx)\nk.pipeMount.DecRef(ctx)\nk.shmMount.DecRef(ctx)\nk.socketMount.DecRef(ctx)\nk.vfs.Release(ctx)\n}\n+ k.timekeeper.Destroy()\n+ k.vdso.Release(ctx)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/vdso.go",
"new_path": "pkg/sentry/loader/vdso.go",
"diff": "@@ -380,3 +380,9 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)\nreturn vdsoAddr, nil\n}\n+\n+// Release drops references on mappings held by v.\n+func (v *VDSO) Release(ctx context.Context) {\n+ v.ParamPage.DecRef(ctx)\n+ v.vdso.DecRef(ctx)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't leak VDSO mappings.
PiperOrigin-RevId: 336822021 |
259,860 | 13.10.2020 01:52:47 | 25,200 | 60f159b5580da98ab8261f4e694692c9412a2fae | [vfs2] Destroy all tmpfs files when the filesystem is released.
In addition to fixing reference leaks, this change also releases memory used
by regular tmpfs files once the containing filesystem is released. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go",
"diff": "@@ -74,6 +74,8 @@ type filesystem struct {\nmu sync.RWMutex `state:\"nosave\"`\nnextInoMinusOne uint64 // accessed using atomic memory operations\n+\n+ root *dentry\n}\n// Name implements vfs.FilesystemType.Name.\n@@ -197,6 +199,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nfs.vfsfs.DecRef(ctx)\nreturn nil, nil, fmt.Errorf(\"invalid tmpfs root file type: %#o\", rootFileType)\n}\n+ fs.root = root\nreturn &fs.vfsfs, &root.vfsd, nil\n}\n@@ -208,6 +211,37 @@ func NewFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *au\n// Release implements vfs.FilesystemImpl.Release.\nfunc (fs *filesystem) Release(ctx context.Context) {\nfs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)\n+ fs.mu.Lock()\n+ if fs.root.inode.isDir() {\n+ fs.root.releaseChildrenLocked(ctx)\n+ }\n+ fs.mu.Unlock()\n+}\n+\n+// releaseChildrenLocked is called on the mount point by filesystem.Release() to\n+// destroy all objects in the mount. It performs a depth-first walk of the\n+// filesystem and \"unlinks\" everything by decrementing link counts\n+// appropriately. There should be no open file descriptors when this is called,\n+// so each inode should only have one outstanding reference that is removed once\n+// its link count hits zero.\n+//\n+// Note that we do not update filesystem state precisely while tearing down (for\n+// instance, the child maps are ignored)--we only care to remove all remaining\n+// references so that every filesystem object gets destroyed. Also note that we\n+// do not need to trigger DecRef on the mount point itself or any child mount;\n+// these are taken care of by the destructor of the enclosing MountNamespace.\n+//\n+// Precondition: filesystem.mu is held.\n+func (d *dentry) releaseChildrenLocked(ctx context.Context) {\n+ dir := d.inode.impl.(*directory)\n+ for _, child := range dir.childMap {\n+ if child.inode.isDir() {\n+ child.releaseChildrenLocked(ctx)\n+ child.inode.decLinksLocked(ctx) // link for child/.\n+ dir.inode.decLinksLocked(ctx) // link for child/..\n+ }\n+ child.inode.decLinksLocked(ctx) // link for child\n+ }\n}\n// immutable\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2] Destroy all tmpfs files when the filesystem is released.
In addition to fixing reference leaks, this change also releases memory used
by regular tmpfs files once the containing filesystem is released.
PiperOrigin-RevId: 336833111 |
259,858 | 13.10.2020 10:41:54 | 25,200 | d9b32efb306444440daa89a79e4d85516ff8f340 | Avoid excessive Tgkill and wait operations.
The required states may simply not be observed by the thread running bounce, so
track guest and user generations to ensure that at least one of the desired
state transitions happens.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"diff": "@@ -62,6 +62,9 @@ func bluepillArchContext(context unsafe.Pointer) *arch.SignalContext64 {\n//\n//go:nosplit\nfunc bluepillGuestExit(c *vCPU, context unsafe.Pointer) {\n+ // Increment our counter.\n+ atomic.AddUint64(&c.guestExits, 1)\n+\n// Copy out registers.\nbluepillArchExit(c, bluepillArchContext(context))\n@@ -89,9 +92,6 @@ func bluepillHandler(context unsafe.Pointer) {\n// Sanitize the registers; interrupts must always be disabled.\nc := bluepillArchEnter(bluepillArchContext(context))\n- // Increment the number of switches.\n- atomic.AddUint32(&c.switches, 1)\n-\n// Mark this as guest mode.\nswitch atomic.SwapUint32(&c.state, vCPUGuest|vCPUUser) {\ncase vCPUUser: // Expected case.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/context.go",
"new_path": "pkg/sentry/platform/kvm/context.go",
"diff": "package kvm\nimport (\n+ \"sync/atomic\"\n+\npkgcontext \"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/platform\"\n@@ -75,6 +77,9 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a\n// Clear the address space.\ncpu.active.set(nil)\n+ // Increment the number of user exits.\n+ atomic.AddUint64(&cpu.userExits, 1)\n+\n// Release resources.\nc.machine.Put(cpu)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_test.go",
"new_path": "pkg/sentry/platform/kvm/kvm_test.go",
"diff": "@@ -412,9 +412,9 @@ func TestWrongVCPU(t *testing.T) {\n// Basic test, one then the other.\nbluepill(c1)\nbluepill(c2)\n- if c2.switches == 0 {\n+ if c2.guestExits == 0 {\n// Don't allow the test to proceed if this fails.\n- t.Fatalf(\"wrong vCPU#2 switches: vCPU1=%+v,vCPU2=%+v\", c1, c2)\n+ t.Fatalf(\"wrong vCPU#2 exits: vCPU1=%+v,vCPU2=%+v\", c1, c2)\n}\n// Alternate vCPUs; we expect to need to trigger the\n@@ -423,11 +423,11 @@ func TestWrongVCPU(t *testing.T) {\nbluepill(c1)\nbluepill(c2)\n}\n- if count := c1.switches; count < 90 {\n- t.Errorf(\"wrong vCPU#1 switches: vCPU1=%+v,vCPU2=%+v\", c1, c2)\n+ if count := c1.guestExits; count < 90 {\n+ t.Errorf(\"wrong vCPU#1 exits: vCPU1=%+v,vCPU2=%+v\", c1, c2)\n}\n- if count := c2.switches; count < 90 {\n- t.Errorf(\"wrong vCPU#2 switches: vCPU1=%+v,vCPU2=%+v\", c1, c2)\n+ if count := c2.guestExits; count < 90 {\n+ t.Errorf(\"wrong vCPU#2 exits: vCPU1=%+v,vCPU2=%+v\", c1, c2)\n}\nreturn false\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -103,8 +103,11 @@ type vCPU struct {\n// tid is the last set tid.\ntid uint64\n- // switches is a count of world switches (informational only).\n- switches uint32\n+ // userExits is the count of user exits.\n+ userExits uint64\n+\n+ // guestExits is the count of guest to host world switches.\n+ guestExits uint64\n// faults is a count of world faults (informational only).\nfaults uint32\n@@ -127,6 +130,7 @@ type vCPU struct {\n// vCPUArchState is the architecture-specific state.\nvCPUArchState\n+ // dieState holds state related to vCPU death.\ndieState dieState\n}\n@@ -540,6 +544,8 @@ var pid = syscall.Getpid()\n//\n// This effectively unwinds the state machine.\nfunc (c *vCPU) bounce(forceGuestExit bool) {\n+ origGuestExits := atomic.LoadUint64(&c.guestExits)\n+ origUserExits := atomic.LoadUint64(&c.userExits)\nfor {\nswitch state := atomic.LoadUint32(&c.state); state {\ncase vCPUReady, vCPUWaiter:\n@@ -595,6 +601,14 @@ func (c *vCPU) bounce(forceGuestExit bool) {\n// Should not happen: the above is exhaustive.\npanic(\"invalid state\")\n}\n+\n+ // Check if we've missed the state transition, but\n+ // we can safely return at this point in time.\n+ newGuestExits := atomic.LoadUint64(&c.guestExits)\n+ newUserExits := atomic.LoadUint64(&c.userExits)\n+ if newUserExits != origUserExits && (!forceGuestExit || newGuestExits != origGuestExits) {\n+ return\n+ }\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Avoid excessive Tgkill and wait operations.
The required states may simply not be observed by the thread running bounce, so
track guest and user generations to ensure that at least one of the desired
state transitions happens.
Fixes #3532
PiperOrigin-RevId: 336908216 |
259,967 | 13.10.2020 13:26:19 | 25,200 | 51913ba400ccff88be58ed12ac3f393258629a50 | Correct NA minimum size
Remove the duplicate NA size variable while I'm here.
See for the packet format. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv6.go",
"new_path": "pkg/tcpip/header/icmpv6.go",
"diff": "@@ -49,11 +49,6 @@ const (\n// neighbor advertisement packet.\nICMPv6NeighborAdvertMinimumSize = ICMPv6HeaderSize + NDPNAMinimumSize\n- // ICMPv6NeighborAdvertSize is size of a neighbor advertisement\n- // including the NDP Target Link Layer option for an Ethernet\n- // address.\n- ICMPv6NeighborAdvertSize = ICMPv6HeaderSize + NDPNAMinimumSize + NDPLinkLayerAddressSize\n-\n// ICMPv6EchoMinimumSize is the minimum size of a valid echo packet.\nICMPv6EchoMinimumSize = 8\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -361,7 +361,7 @@ func (e *endpoint) handleICMP(r *stack.Route, pkt *stack.PacketBuffer, hasFragme\ncase header.ICMPv6NeighborAdvert:\nreceived.NeighborAdvert.Increment()\n- if !isNDPValid() || pkt.Data.Size() < header.ICMPv6NeighborAdvertSize {\n+ if !isNDPValid() || pkt.Data.Size() < header.ICMPv6NeighborAdvertMinimumSize {\nreceived.Invalid.Increment()\nreturn\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6_test.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6_test.go",
"diff": "@@ -57,8 +57,8 @@ func testReceiveICMP(t *testing.T, s *stack.Stack, e *channel.Endpoint, src, dst\nt.Helper()\n// Receive ICMP packet.\n- hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.ICMPv6NeighborAdvertSize)\n- pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6NeighborAdvertSize))\n+ hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.ICMPv6NeighborAdvertMinimumSize)\n+ pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6NeighborAdvertMinimumSize))\npkt.SetType(header.ICMPv6NeighborAdvert)\npkt.SetChecksum(header.ICMPv6Checksum(pkt, src, dst, buffer.VectorisedView{}))\npayloadLength := hdr.UsedLength()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Correct NA minimum size
Remove the duplicate NA size variable while I'm here.
See https://tools.ietf.org/html/rfc4861#section-4.4 for the packet format.
PiperOrigin-RevId: 336943206 |
259,884 | 13.10.2020 18:58:01 | 25,200 | 631dd5330d438729a7a8f6e00b279386924de640 | Various website fixes
Formatting on the most recent blog post
Add a link to faq from containerd docs
Fix code in FAQ | [
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/FAQ.md",
"new_path": "g3doc/user_guide/FAQ.md",
"diff": "@@ -111,7 +111,7 @@ Please recreate your cluster and set the `--cni-socket` option on kubeadm\ncommands. For example:\n```bash\n-kubeadm init --cni-socket=/var/run/containerd/containerd.sock` ...\n+kubeadm init --cni-socket=/var/run/containerd/containerd.sock ...\n```\nTo fix an existing cluster edit the `/var/lib/kubelet/kubeadm-flags.env` file\n"
},
{
"change_type": "MODIFY",
"old_path": "website/blog/2020-09-18-containing-a-real-vulnerability.md",
"new_path": "website/blog/2020-09-18-containing-a-real-vulnerability.md",
"diff": "@@ -48,7 +48,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,\npo->tp_reserve;\n} else {\nunsigned int maclen = skb_network_offset(skb);\n- // tp_reserve is unsigned int, netoff is unsigned short. Addition can overflow netoff\n+ // tp_reserve is unsigned int, netoff is unsigned short.\n+ // Addition can overflow netoff\nnetoff = TPACKET_ALIGN(po->tp_hdrlen +\n(maclen < 16 ? 16 : maclen)) +\npo->tp_reserve;\n@@ -56,11 +57,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,\nnetoff += sizeof(struct virtio_net_hdr);\ndo_vnet = true;\n}\n- // Attacker controls netoff and can make macoff be smaller than sizeof(struct virtio_net_hdr)\n+ // Attacker controls netoff and can make macoff be smaller\n+ // than sizeof(struct virtio_net_hdr)\nmacoff = netoff - maclen;\n}\n// ...\n- // \"macoff - sizeof(struct virtio_net_hdr)\" can be negative, resulting in a pointer before h.raw\n+ // \"macoff - sizeof(struct virtio_net_hdr)\" can be negative,\n+ // resulting in a pointer before h.raw\nif (do_vnet &&\nvirtio_net_hdr_from_skb(skb, h.raw + macoff -\nsizeof(struct virtio_net_hdr),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Various website fixes
- Formatting on the most recent blog post
- Add a link to faq from containerd docs
- Fix code in FAQ
PiperOrigin-RevId: 337001738 |
259,860 | 14.10.2020 09:52:05 | 25,200 | fc1e653973c97a857582b8f3651399673343589a | Fix SCM Rights reference leaks.
Control messages should be released on Read (which ignores the control message)
or zero-byte Send. Otherwise, open fds sent through the control messages will
be leaked. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/unix.go",
"new_path": "pkg/sentry/socket/unix/unix.go",
"diff": "@@ -573,13 +573,17 @@ func (s *SocketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\nif dst.NumBytes() == 0 {\nreturn 0, nil\n}\n- return dst.CopyOutFrom(ctx, &EndpointReader{\n+ r := &EndpointReader{\nCtx: ctx,\nEndpoint: s.ep,\nNumRights: 0,\nPeek: false,\nFrom: nil,\n- })\n+ }\n+ n, err := dst.CopyOutFrom(ctx, r)\n+ // Drop control messages.\n+ r.Control.Release(ctx)\n+ return n, err\n}\n// RecvMsg implements the linux syscall recvmsg(2) for sockets backed by\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/unix_vfs2.go",
"new_path": "pkg/sentry/socket/unix/unix_vfs2.go",
"diff": "@@ -267,13 +267,17 @@ func (s *SocketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.\nif dst.NumBytes() == 0 {\nreturn 0, nil\n}\n- return dst.CopyOutFrom(ctx, &EndpointReader{\n+ r := &EndpointReader{\nCtx: ctx,\nEndpoint: s.ep,\nNumRights: 0,\nPeek: false,\nFrom: nil,\n- })\n+ }\n+ n, err := dst.CopyOutFrom(ctx, r)\n+ // Drop control messages.\n+ r.Control.Release(ctx)\n+ return n, err\n}\n// PWrite implements vfs.FileDescriptionImpl.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_socket.go",
"new_path": "pkg/sentry/syscalls/linux/sys_socket.go",
"diff": "@@ -1052,7 +1052,9 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme\n// Call the syscall implementation.\nn, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, controlMessages)\nerr = handleIOError(t, n != 0, e.ToError(), syserror.ERESTARTSYS, \"sendmsg\", file)\n- if err != nil {\n+ // Control messages should be released on error as well as for zero-length\n+ // messages, which are discarded by the receiver.\n+ if n == 0 || err != nil {\ncontrolMessages.Release(t)\n}\nreturn uintptr(n), err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/socket.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/socket.go",
"diff": "@@ -1055,7 +1055,9 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio\n// Call the syscall implementation.\nn, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, controlMessages)\nerr = slinux.HandleIOErrorVFS2(t, n != 0, e.ToError(), syserror.ERESTARTSYS, \"sendmsg\", file)\n- if err != nil {\n+ // Control messages should be released on error as well as for zero-length\n+ // messages, which are discarded by the receiver.\n+ if n == 0 || err != nil {\ncontrolMessages.Release(t)\n}\nreturn uintptr(n), err\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix SCM Rights reference leaks.
Control messages should be released on Read (which ignores the control message)
or zero-byte Send. Otherwise, open fds sent through the control messages will
be leaked.
PiperOrigin-RevId: 337110774 |
260,004 | 14.10.2020 15:28:06 | 25,200 | 6e6a9d3f3dd6dd9ce290952406ba7ca7c5570311 | Find route before sending NA response
This change also brings back the stack.Route.ResolveWith method so that
we can immediately resolve a route when sending an NA in response to a
a NS with a source link layer address option.
Test: ipv6_test.TestNeighorSolicitationResponse | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -252,26 +252,29 @@ func (e *endpoint) handleICMP(r *stack.Route, pkt *stack.PacketBuffer, hasFragme\nreturn\n}\n+ var sourceLinkAddr tcpip.LinkAddress\n+ {\nit, err := ns.Options().Iter(false /* check */)\nif err != nil {\n- // Options are not valid as per the wire format, silently drop the packet.\n+ // Options are not valid as per the wire format, silently drop the\n+ // packet.\nreceived.Invalid.Increment()\nreturn\n}\n- sourceLinkAddr, ok := getSourceLinkAddr(it)\n+ sourceLinkAddr, ok = getSourceLinkAddr(it)\nif !ok {\nreceived.Invalid.Increment()\nreturn\n}\n-\n- unspecifiedSource := r.RemoteAddress == header.IPv6Any\n+ }\n// As per RFC 4861 section 4.3, the Source Link-Layer Address Option MUST\n// NOT be included when the source IP address is the unspecified address.\n// Otherwise, on link layers that have addresses this option MUST be\n// included in multicast solicitations and SHOULD be included in unicast\n// solicitations.\n+ unspecifiedSource := r.RemoteAddress == header.IPv6Any\nif len(sourceLinkAddr) == 0 {\nif header.IsV6MulticastAddress(r.LocalAddress) && !unspecifiedSource {\nreceived.Invalid.Increment()\n@@ -297,41 +300,51 @@ func (e *endpoint) handleICMP(r *stack.Route, pkt *stack.PacketBuffer, hasFragme\nreturn\n}\n- // ICMPv6 Neighbor Solicit messages are always sent to\n- // specially crafted IPv6 multicast addresses. As a result, the\n- // route we end up with here has as its LocalAddress such a\n- // multicast address. It would be nonsense to claim that our\n- // source address is a multicast address, so we manually set\n- // the source address to the target address requested in the\n- // solicit message. Since that requires mutating the route, we\n- // must first clone it.\n- r := r.Clone()\n- defer r.Release()\n- r.LocalAddress = targetAddr\n-\n- // As per RFC 4861 section 7.2.4, if the the source of the solicitation is\n- // the unspecified address, the node MUST set the Solicited flag to zero and\n- // multicast the advertisement to the all-nodes address.\n- solicited := true\n+ // As per RFC 4861 section 7.2.4:\n+ //\n+ // If the source of the solicitation is the unspecified address, the node\n+ // MUST [...] and multicast the advertisement to the all-nodes address.\n+ //\n+ remoteAddr := r.RemoteAddress\nif unspecifiedSource {\n- solicited = false\n- r.RemoteAddress = header.IPv6AllNodesMulticastAddress\n+ remoteAddr = header.IPv6AllNodesMulticastAddress\n}\n- // If the NS has a source link-layer option, use the link address it\n- // specifies as the remote link address for the response instead of the\n- // source link address of the packet.\n+ // Even if we were able to receive a packet from some remote, we may not\n+ // have a route to it - the remote may be blocked via routing rules. We must\n+ // always consult our routing table and find a route to the remote before\n+ // sending any packet.\n+ r, err := e.protocol.stack.FindRoute(e.nic.ID(), targetAddr, remoteAddr, ProtocolNumber, false /* multicastLoop */)\n+ if err != nil {\n+ // If we cannot find a route to the destination, silently drop the packet.\n+ return\n+ }\n+ defer r.Release()\n+\n+ // If the NS has a source link-layer option, resolve the route immediately\n+ // to avoid querying the neighbor table when the neighbor entry was updated\n+ // as probing the neighbor table for a link address will transition the\n+ // entry's state from stale to delay.\n+ //\n+ // Note, if the source link address is unspecified and this is a unicast\n+ // solicitation, we may need to perform neighbor discovery to send the\n+ // neighbor advertisement response. This is expected as per RFC 4861 section\n+ // 7.2.4:\n+ //\n+ // Because unicast Neighbor Solicitations are not required to include a\n+ // Source Link-Layer Address, it is possible that a node sending a\n+ // solicited Neighbor Advertisement does not have a corresponding link-\n+ // layer address for its neighbor in its Neighbor Cache. In such\n+ // situations, a node will first have to use Neighbor Discovery to\n+ // determine the link-layer address of its neighbor (i.e., send out a\n+ // multicast Neighbor Solicitation).\n//\n- // TODO(#2401): As per RFC 4861 section 7.2.4 we should consult our link\n- // address cache for the right destination link address instead of manually\n- // patching the route with the remote link address if one is specified in a\n- // Source Link-Layer Address option.\nif len(sourceLinkAddr) != 0 {\n- r.RemoteLinkAddress = sourceLinkAddr\n+ r.ResolveWith(sourceLinkAddr)\n}\noptsSerializer := header.NDPOptionsSerializer{\n- header.NDPTargetLinkLayerAddressOption(r.LocalLinkAddress),\n+ header.NDPTargetLinkLayerAddressOption(e.nic.LinkAddress()),\n}\nneighborAdvertSize := header.ICMPv6NeighborAdvertMinimumSize + optsSerializer.Length()\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n@@ -341,7 +354,14 @@ func (e *endpoint) handleICMP(r *stack.Route, pkt *stack.PacketBuffer, hasFragme\npacket := header.ICMPv6(pkt.TransportHeader().Push(neighborAdvertSize))\npacket.SetType(header.ICMPv6NeighborAdvert)\nna := header.NDPNeighborAdvert(packet.NDPPayload())\n- na.SetSolicitedFlag(solicited)\n+\n+ // As per RFC 4861 section 7.2.4:\n+ //\n+ // If the source of the solicitation is the unspecified address, the node\n+ // MUST set the Solicited flag to zero and [..]. Otherwise, the node MUST\n+ // set the Solicited flag to one and [..].\n+ //\n+ na.SetSolicitedFlag(!unspecifiedSource)\nna.SetOverrideFlag(true)\nna.SetTargetAddress(targetAddr)\nna.Options().Serialize(optsSerializer)\n@@ -635,6 +655,7 @@ func (*protocol) LinkAddressRequest(addr, localAddr tcpip.Address, remoteLinkAdd\nr := stack.Route{\nLocalAddress: localAddr,\nRemoteAddress: addr,\n+ LocalLinkAddress: linkEP.LinkAddress(),\nRemoteLinkAddress: remoteLinkAddr,\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"new_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"diff": "package ipv6\nimport (\n+ \"context\"\n\"strings\"\n\"testing\"\n\"time\"\n@@ -408,6 +409,7 @@ func TestNeighorSolicitationResponse(t *testing.T) {\nnaSolicited bool\nnaSrc tcpip.Address\nnaDst tcpip.Address\n+ performsLinkResolution bool\n}{\n{\nname: \"Unspecified source to solicited-node multicast destination\",\n@@ -416,7 +418,7 @@ func TestNeighorSolicitationResponse(t *testing.T) {\nnsSrc: header.IPv6Any,\nnsDst: nicAddrSNMC,\nnsInvalid: false,\n- naDstLinkAddr: remoteLinkAddr0,\n+ naDstLinkAddr: header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllNodesMulticastAddress),\nnaSolicited: false,\nnaSrc: nicAddr,\nnaDst: header.IPv6AllNodesMulticastAddress,\n@@ -449,7 +451,6 @@ func TestNeighorSolicitationResponse(t *testing.T) {\nnsDst: nicAddr,\nnsInvalid: true,\n},\n-\n{\nname: \"Specified source with 1 source ll to multicast destination\",\nnsOpts: header.NDPOptionsSerializer{\n@@ -509,6 +510,10 @@ func TestNeighorSolicitationResponse(t *testing.T) {\nnaSolicited: true,\nnaSrc: nicAddr,\nnaDst: remoteAddr,\n+ // Since we send a unicast solicitations to a node without an entry for\n+ // the remote, the node needs to perform neighbor discovery to get the\n+ // remote's link address to send the advertisement response.\n+ performsLinkResolution: true,\n},\n{\nname: \"Specified source with 1 source ll to unicast destination\",\n@@ -615,11 +620,78 @@ func TestNeighorSolicitationResponse(t *testing.T) {\nt.Fatalf(\"got invalid = %d, want = 0\", got)\n}\n- p, got := e.Read()\n+ if test.performsLinkResolution {\n+ p, got := e.ReadContext(context.Background())\n+ if !got {\n+ t.Fatal(\"expected an NDP NS response\")\n+ }\n+\n+ if p.Route.LocalAddress != nicAddr {\n+ t.Errorf(\"got p.Route.LocalAddress = %s, want = %s\", p.Route.LocalAddress, nicAddr)\n+ }\n+ if p.Route.LocalLinkAddress != nicLinkAddr {\n+ t.Errorf(\"p.Route.LocalLinkAddress = %s, want = %s\", p.Route.LocalLinkAddress, nicLinkAddr)\n+ }\n+ respNSDst := header.SolicitedNodeAddr(test.nsSrc)\n+ if p.Route.RemoteAddress != respNSDst {\n+ t.Errorf(\"got p.Route.RemoteAddress = %s, want = %s\", p.Route.RemoteAddress, respNSDst)\n+ }\n+ if want := header.EthernetAddressFromMulticastIPv6Address(respNSDst); p.Route.RemoteLinkAddress != want {\n+ t.Errorf(\"got p.Route.RemoteLinkAddress = %s, want = %s\", p.Route.RemoteLinkAddress, want)\n+ }\n+\n+ checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),\n+ checker.SrcAddr(nicAddr),\n+ checker.DstAddr(respNSDst),\n+ checker.TTL(header.NDPHopLimit),\n+ checker.NDPNS(\n+ checker.NDPNSTargetAddress(test.nsSrc),\n+ checker.NDPNSOptions([]header.NDPOption{\n+ header.NDPSourceLinkLayerAddressOption(nicLinkAddr),\n+ }),\n+ ))\n+\n+ ser := header.NDPOptionsSerializer{\n+ header.NDPTargetLinkLayerAddressOption(linkAddr1),\n+ }\n+ ndpNASize := header.ICMPv6NeighborAdvertMinimumSize + ser.Length()\n+ hdr := buffer.NewPrependable(header.IPv6MinimumSize + ndpNASize)\n+ pkt := header.ICMPv6(hdr.Prepend(ndpNASize))\n+ pkt.SetType(header.ICMPv6NeighborAdvert)\n+ na := header.NDPNeighborAdvert(pkt.NDPPayload())\n+ na.SetSolicitedFlag(true)\n+ na.SetOverrideFlag(true)\n+ na.SetTargetAddress(test.nsSrc)\n+ na.Options().Serialize(ser)\n+ pkt.SetChecksum(header.ICMPv6Checksum(pkt, test.nsSrc, nicAddr, buffer.VectorisedView{}))\n+ payloadLength := hdr.UsedLength()\n+ ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))\n+ ip.Encode(&header.IPv6Fields{\n+ PayloadLength: uint16(payloadLength),\n+ NextHeader: uint8(header.ICMPv6ProtocolNumber),\n+ HopLimit: header.NDPHopLimit,\n+ SrcAddr: test.nsSrc,\n+ DstAddr: nicAddr,\n+ })\n+ e.InjectLinkAddr(ProtocolNumber, \"\", stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: hdr.View().ToVectorisedView(),\n+ }))\n+ }\n+\n+ p, got := e.ReadContext(context.Background())\nif !got {\nt.Fatal(\"expected an NDP NA response\")\n}\n+ if p.Route.LocalAddress != test.naSrc {\n+ t.Errorf(\"got p.Route.LocalAddress = %s, want = %s\", p.Route.LocalAddress, test.naSrc)\n+ }\n+ if p.Route.LocalLinkAddress != nicLinkAddr {\n+ t.Errorf(\"p.Route.LocalLinkAddress = %s, want = %s\", p.Route.LocalLinkAddress, nicLinkAddr)\n+ }\n+ if p.Route.RemoteAddress != test.naDst {\n+ t.Errorf(\"got p.Route.RemoteAddress = %s, want = %s\", p.Route.RemoteAddress, test.naDst)\n+ }\nif p.Route.RemoteLinkAddress != test.naDstLinkAddr {\nt.Errorf(\"got p.Route.RemoteLinkAddress = %s, want = %s\", p.Route.RemoteLinkAddress, test.naDstLinkAddr)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/BUILD",
"new_path": "pkg/tcpip/stack/BUILD",
"diff": "@@ -123,6 +123,7 @@ go_test(\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/link/channel\",\n\"//pkg/tcpip/link/loopback\",\n+ \"//pkg/tcpip/network/arp\",\n\"//pkg/tcpip/network/ipv4\",\n\"//pkg/tcpip/network/ipv6\",\n\"//pkg/tcpip/ports\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/route.go",
"new_path": "pkg/tcpip/stack/route.go",
"diff": "@@ -126,6 +126,12 @@ func (r *Route) GSOMaxSize() uint32 {\nreturn 0\n}\n+// ResolveWith immediately resolves a route with the specified remote link\n+// address.\n+func (r *Route) ResolveWith(addr tcpip.LinkAddress) {\n+ r.RemoteLinkAddress = addr\n+}\n+\n// Resolve attempts to resolve the link address if necessary. Returns ErrWouldBlock in\n// case address resolution requires blocking, e.g. wait for ARP reply. Waker is\n// notified when address resolution is complete (success or not).\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_test.go",
"new_path": "pkg/tcpip/stack/stack_test.go",
"diff": "@@ -34,6 +34,7 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/channel\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/loopback\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/network/arp\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv4\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv6\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n@@ -3498,6 +3499,52 @@ func TestOutgoingSubnetBroadcast(t *testing.T) {\n}\n}\n+func TestResolveWith(t *testing.T) {\n+ const (\n+ unspecifiedNICID = 0\n+ nicID = 1\n+ )\n+\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, arp.NewProtocol},\n+ })\n+ ep := channel.New(0, defaultMTU, \"\")\n+ ep.LinkEPCapabilities |= stack.CapabilityResolutionRequired\n+ if err := s.CreateNIC(nicID, ep); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n+ }\n+ addr := tcpip.ProtocolAddress{\n+ Protocol: header.IPv4ProtocolNumber,\n+ AddressWithPrefix: tcpip.AddressWithPrefix{\n+ Address: tcpip.Address([]byte{192, 168, 1, 58}),\n+ PrefixLen: 24,\n+ },\n+ }\n+ if err := s.AddProtocolAddress(nicID, addr); err != nil {\n+ t.Fatalf(\"AddProtocolAddress(%d, %#v): %s\", nicID, addr, err)\n+ }\n+\n+ s.SetRouteTable([]tcpip.Route{{Destination: header.IPv4EmptySubnet, NIC: nicID}})\n+\n+ remoteAddr := tcpip.Address([]byte{192, 168, 1, 59})\n+ r, err := s.FindRoute(unspecifiedNICID, \"\" /* localAddr */, remoteAddr, header.IPv4ProtocolNumber, false /* multicastLoop */)\n+ if err != nil {\n+ t.Fatalf(\"FindRoute(%d, '', %s, %d): %s\", unspecifiedNICID, remoteAddr, header.IPv4ProtocolNumber, err)\n+ }\n+ defer r.Release()\n+\n+ // Should initially require resolution.\n+ if !r.IsResolutionRequired() {\n+ t.Fatal(\"got r.IsResolutionRequired() = false, want = true\")\n+ }\n+\n+ // Manually resolving the route should no longer require resolution.\n+ r.ResolveWith(\"\\x01\")\n+ if r.IsResolutionRequired() {\n+ t.Fatal(\"got r.IsResolutionRequired() = true, want = false\")\n+ }\n+}\n+\n// TestRouteReleaseAfterAddrRemoval tests that releasing a Route after its\n// associated address is removed should not cause a panic.\nfunc TestRouteReleaseAfterAddrRemoval(t *testing.T) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Find route before sending NA response
This change also brings back the stack.Route.ResolveWith method so that
we can immediately resolve a route when sending an NA in response to a
a NS with a source link layer address option.
Test: ipv6_test.TestNeighorSolicitationResponse
PiperOrigin-RevId: 337185461 |
259,951 | 15.10.2020 09:27:03 | 25,200 | 8f70c6ef351110cf94e758b6dc295387f0388707 | Refactor compareFragments to follow Go style
Test helpers should be used for test setup/teardown, not actual
testing. Use cmp.Diff instead of bytes.Equal to improve readability. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"diff": "package ipv4_test\nimport (\n- \"bytes\"\n\"context\"\n\"encoding/hex\"\n+ \"fmt\"\n\"math\"\n\"net\"\n\"testing\"\n@@ -243,7 +243,7 @@ func TestIPv4Sanity(t *testing.T) {\n// Default routes for IPv4 so ICMP can find a route to the remote\n// node when attempting to send the ICMP Echo Reply.\ns.SetRouteTable([]tcpip.Route{\n- tcpip.Route{\n+ {\nDestination: header.IPv4EmptySubnet,\nNIC: nicID,\n},\n@@ -369,11 +369,10 @@ func TestIPv4Sanity(t *testing.T) {\n// comparePayloads compared the contents of all the packets against the contents\n// of the source packet.\n-func compareFragments(t *testing.T, packets []*stack.PacketBuffer, sourcePacketInfo *stack.PacketBuffer, mtu uint32) {\n- t.Helper()\n- // Make a complete array of the sourcePacketInfo packet.\n- source := header.IPv4(packets[0].NetworkHeader().View()[:header.IPv4MinimumSize])\n- vv := buffer.NewVectorisedView(sourcePacketInfo.Size(), sourcePacketInfo.Views())\n+func compareFragments(packets []*stack.PacketBuffer, sourcePacket *stack.PacketBuffer, mtu uint32) error {\n+ // Make a complete array of the sourcePacket packet.\n+ source := header.IPv4(packets[0].NetworkHeader().View())\n+ vv := buffer.NewVectorisedView(sourcePacket.Size(), sourcePacket.Views())\nsource = append(source, vv.ToView()...)\n// Make a copy of the IP header, which will be modified in some fields to make\n@@ -384,46 +383,49 @@ func compareFragments(t *testing.T, packets []*stack.PacketBuffer, sourcePacketI\nsourceCopy.SetTotalLength(0)\nvar offset uint16\n// Build up an array of the bytes sent.\n- var reassembledPayload []byte\n+ var reassembledPayload buffer.VectorisedView\nfor i, packet := range packets {\n// Confirm that the packet is valid.\nallBytes := buffer.NewVectorisedView(packet.Size(), packet.Views())\n- ip := header.IPv4(allBytes.ToView())\n- if !ip.IsValid(len(ip)) {\n- t.Errorf(\"IP packet is invalid:\\n%s\", hex.Dump(ip))\n+ fragmentIPHeader := header.IPv4(allBytes.ToView())\n+ if !fragmentIPHeader.IsValid(len(fragmentIPHeader)) {\n+ return fmt.Errorf(\"fragment #%d: IP packet is invalid:\\n%s\", i, hex.Dump(fragmentIPHeader))\n}\n- if got, want := ip.CalculateChecksum(), uint16(0xffff); got != want {\n- t.Errorf(\"ip.CalculateChecksum() got %#x, want %#x\", got, want)\n+ if got, want := fragmentIPHeader.CalculateChecksum(), uint16(0xffff); got != want {\n+ return fmt.Errorf(\"fragment #%d: fragmentIPHeader.CalculateChecksum() got %#x, want %#x\", i, got, want)\n}\n- if got, want := len(ip), int(mtu); got > want {\n- t.Errorf(\"fragment is too large, got %d want %d\", got, want)\n+ if got := len(fragmentIPHeader); got > int(mtu) {\n+ return fmt.Errorf(\"fragment #%d: got len(fragmentIPHeader) = %d, want <= %d\", i, got, mtu)\n}\n- if got, want := packet.AvailableHeaderBytes(), sourcePacketInfo.AvailableHeaderBytes()-header.IPv4MinimumSize; got != want {\n- t.Errorf(\"fragment #%d should have the same available space for prepending as source: got %d, want %d\", i, got, want)\n+ if got, want := packet.AvailableHeaderBytes(), sourcePacket.AvailableHeaderBytes()-header.IPv4MinimumSize; got != want {\n+ return fmt.Errorf(\"fragment #%d: should have the same available space for prepending as source: got %d, want %d\", i, got, want)\n}\n- if got, want := packet.NetworkProtocolNumber, sourcePacketInfo.NetworkProtocolNumber; got != want {\n- t.Errorf(\"fragment #%d has wrong network protocol number: got %d, want %d\", i, got, want)\n+ if got, want := packet.NetworkProtocolNumber, sourcePacket.NetworkProtocolNumber; got != want {\n+ return fmt.Errorf(\"fragment #%d: has wrong network protocol number: got %d, want %d\", i, got, want)\n}\nif i < len(packets)-1 {\nsourceCopy.SetFlagsFragmentOffset(sourceCopy.Flags()|header.IPv4FlagMoreFragments, offset)\n} else {\nsourceCopy.SetFlagsFragmentOffset(sourceCopy.Flags()&^header.IPv4FlagMoreFragments, offset)\n}\n- reassembledPayload = append(reassembledPayload, ip.Payload()...)\n- offset += ip.TotalLength() - uint16(ip.HeaderLength())\n+ reassembledPayload.AppendView(packet.TransportHeader().View())\n+ reassembledPayload.Append(packet.Data)\n+ offset += fragmentIPHeader.TotalLength() - uint16(fragmentIPHeader.HeaderLength())\n// Clear out the checksum and length from the ip because we can't compare\n// it.\n- sourceCopy.SetTotalLength(uint16(len(ip)))\n+ sourceCopy.SetTotalLength(uint16(len(fragmentIPHeader)))\nsourceCopy.SetChecksum(0)\nsourceCopy.SetChecksum(^sourceCopy.CalculateChecksum())\n- if !bytes.Equal(ip[:ip.HeaderLength()], sourceCopy[:sourceCopy.HeaderLength()]) {\n- t.Errorf(\"ip[:ip.HeaderLength()] got:\\n%s\\nwant:\\n%s\", hex.Dump(ip[:ip.HeaderLength()]), hex.Dump(sourceCopy[:sourceCopy.HeaderLength()]))\n+ if diff := cmp.Diff(fragmentIPHeader[:fragmentIPHeader.HeaderLength()], sourceCopy[:sourceCopy.HeaderLength()]); diff != \"\" {\n+ return fmt.Errorf(\"fragment #%d: fragmentIPHeader[:fragmentIPHeader.HeaderLength()] mismatch (-want +got):\\n%s\", i, diff)\n}\n}\n- expected := source[source.HeaderLength():]\n- if !bytes.Equal(reassembledPayload, expected) {\n- t.Errorf(\"reassembledPayload got:\\n%s\\nwant:\\n%s\", hex.Dump(reassembledPayload), hex.Dump(expected))\n+ expected := buffer.View(source[source.HeaderLength():])\n+ if diff := cmp.Diff(expected, reassembledPayload.ToView()); diff != \"\" {\n+ return fmt.Errorf(\"reassembledPayload mismatch (-want +got):\\n%s\", diff)\n}\n+\n+ return nil\n}\nfunc TestFragmentation(t *testing.T) {\n@@ -477,7 +479,9 @@ func TestFragmentation(t *testing.T) {\nif got := r.Stats().IP.OutgoingPacketErrors.Value(); got != 0 {\nt.Errorf(\"got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = 0\", got)\n}\n- compareFragments(t, ep.WrittenPackets, source, ft.mtu)\n+ if err := compareFragments(ep.WrittenPackets, source, ft.mtu); err != nil {\n+ t.Error(err)\n+ }\n})\n}\n}\n@@ -1633,7 +1637,7 @@ func TestPacketQueing(t *testing.T) {\n}\ns.SetRouteTable([]tcpip.Route{\n- tcpip.Route{\n+ {\nDestination: host1IPv4Addr.AddressWithPrefix.Subnet(),\nNIC: nicID,\n},\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor compareFragments to follow Go style
Test helpers should be used for test setup/teardown, not actual
testing. Use cmp.Diff instead of bytes.Equal to improve readability.
PiperOrigin-RevId: 337323242 |
260,001 | 15.10.2020 14:33:18 | 25,200 | f0f7431ea2e9f6864bc81c375108857b79c6e64b | Change verity isEnable to be a member of dentry | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/filesystem.go",
"new_path": "pkg/sentry/fsimpl/verity/filesystem.go",
"diff": "@@ -377,12 +377,12 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s\n// enabled, we should verify the child hash here because it may\n// be cached before enabled.\nif fs.allowRuntimeEnable {\n- if isEnabled(parent) {\n+ if parent.verityEnabled() {\nif _, err := fs.verifyChild(ctx, parent, child); err != nil {\nreturn nil, err\n}\n}\n- if isEnabled(child) {\n+ if child.verityEnabled() {\nvfsObj := fs.vfsfs.VirtualFilesystem()\nmask := uint32(linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID)\nstat, err := vfsObj.StatAt(ctx, fs.creds, &vfs.PathOperation{\n@@ -553,13 +553,13 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\n// Verify child hash. This should always be performed unless in\n// allowRuntimeEnable mode and the parent directory hasn't been enabled\n// yet.\n- if isEnabled(parent) {\n+ if parent.verityEnabled() {\nif _, err := fs.verifyChild(ctx, parent, child); err != nil {\nchild.destroyLocked(ctx)\nreturn nil, err\n}\n}\n- if isEnabled(child) {\n+ if child.verityEnabled() {\nif err := fs.verifyStat(ctx, child, stat); err != nil {\nchild.destroyLocked(ctx)\nreturn nil, err\n@@ -915,7 +915,7 @@ func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nif err != nil {\nreturn linux.Statx{}, err\n}\n- if isEnabled(d) {\n+ if d.verityEnabled() {\nif err := fs.verifyStat(ctx, d, stat); err != nil {\nreturn linux.Statx{}, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/verity.go",
"new_path": "pkg/sentry/fsimpl/verity/verity.go",
"diff": "@@ -148,14 +148,6 @@ func (FilesystemType) Name() string {\nreturn Name\n}\n-// isEnabled checks whether the target is enabled with verity features. It\n-// should always be true if runtime enable is not allowed. In runtime enable\n-// mode, it returns true if the target has been enabled with\n-// ioctl(FS_IOC_ENABLE_VERITY).\n-func isEnabled(d *dentry) bool {\n- return !d.fs.allowRuntimeEnable || len(d.hash) != 0\n-}\n-\n// Release implements vfs.FilesystemType.Release.\nfunc (FilesystemType) Release(ctx context.Context) {}\n@@ -448,6 +440,14 @@ func (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes)\nreturn vfs.GenericCheckPermissions(creds, ats, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid)))\n}\n+// verityEnabled checks whether the file is enabled with verity features. It\n+// should always be true if runtime enable is not allowed. In runtime enable\n+// mode, it returns true if the target has been enabled with\n+// ioctl(FS_IOC_ENABLE_VERITY).\n+func (d *dentry) verityEnabled() bool {\n+ return !d.fs.allowRuntimeEnable || len(d.hash) != 0\n+}\n+\nfunc (d *dentry) readlink(ctx context.Context) (string, error) {\nreturn d.fs.vfsfs.VirtualFilesystem().ReadlinkAt(ctx, d.fs.creds, &vfs.PathOperation{\nRoot: d.lowerVD,\n@@ -510,7 +510,7 @@ func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linu\nif err != nil {\nreturn linux.Statx{}, err\n}\n- if isEnabled(fd.d) {\n+ if fd.d.verityEnabled() {\nif err := fd.d.fs.verifyStat(ctx, fd.d, stat); err != nil {\nreturn linux.Statx{}, err\n}\n@@ -726,7 +726,7 @@ func (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.\nfunc (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\n// No need to verify if the file is not enabled yet in\n// allowRuntimeEnable mode.\n- if !isEnabled(fd.d) {\n+ if !fd.d.verityEnabled() {\nreturn fd.lowerFD.PRead(ctx, dst, offset, opts)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Change verity isEnable to be a member of dentry
PiperOrigin-RevId: 337384146 |
259,858 | 15.10.2020 17:19:21 | 25,200 | 0a7e32bd17fb3f4aae8fdea427283cda49fe002f | Add easier-to-use docker_image target. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -94,9 +94,9 @@ endef\nrebuild-...: ## Rebuild the given image. Also may use 'rebuild-all-images'.\n$(eval $(call images,rebuild))\npush-...: ## Push the given image. Also may use 'push-all-images'.\n-$(eval $(call images,pull))\n-pull-...: ## Pull the given image. Also may use 'pull-all-images'.\n$(eval $(call images,push))\n+pull-...: ## Pull the given image. Also may use 'pull-all-images'.\n+$(eval $(call images,pull))\nload-...: ## Load (pull or rebuild) the given image. Also may use 'load-all-images'.\n$(eval $(call images,load))\nlist-images: ## List all available images.\n@@ -258,7 +258,7 @@ WEBSITE_PROJECT := gvisordev\nWEBSITE_REGION := us-central1\nwebsite-build: load-jekyll ## Build the site image locally.\n- @$(call submake,run TARGETS=\"//website:website\")\n+ @$(call submake,run TARGETS=\"//website:website\" ARGS=\"$(WEBSITE_IMAGE)\")\n.PHONY: website-build\nwebsite-server: website-build ## Run a local server for development.\n@@ -266,7 +266,7 @@ website-server: website-build ## Run a local server for development.\n.PHONY: website-server\nwebsite-push: website-build ## Push a new image and update the service.\n- @docker tag gvisor.dev/images/website $(WEBSITE_IMAGE) && docker push $(WEBSITE_IMAGE)\n+ @docker push $(WEBSITE_IMAGE)\n.PHONY: website-push\nwebsite-deploy: website-push ## Deploy a new version of the website.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "images/defs.bzl",
"diff": "+\"\"\"Helpers for Docker image generation.\"\"\"\n+\n+def _docker_image_impl(ctx):\n+ importer = ctx.actions.declare_file(ctx.label.name)\n+ importer_content = [\n+ \"#!/bin/bash\",\n+ \"set -euo pipefail\",\n+ \"exec docker import \" + \" \".join([\n+ \"-c '%s'\" % attr\n+ for attr in ctx.attr.statements\n+ ]) + \" \" + \" \".join([\n+ \"'%s'\" % f.path\n+ for f in ctx.files.data\n+ ]) + \" $1\",\n+ \"\",\n+ ]\n+ ctx.actions.write(importer, \"\\n\".join(importer_content), is_executable = True)\n+ return [DefaultInfo(\n+ runfiles = ctx.runfiles(ctx.files.data),\n+ executable = importer,\n+ )]\n+\n+docker_image = rule(\n+ implementation = _docker_image_impl,\n+ doc = \"Tool to load a Docker image; takes a single parameter (image name).\",\n+ attrs = {\n+ \"statements\": attr.string_list(doc = \"Extra Dockerfile directives.\"),\n+ \"data\": attr.label_list(doc = \"All image data.\"),\n+ },\n+ executable = True,\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "website/BUILD",
"new_path": "website/BUILD",
"diff": "load(\"//tools:defs.bzl\", \"bzl_library\", \"pkg_tar\")\nload(\"//website:defs.bzl\", \"doc\", \"docs\")\n+load(\"//images:defs.bzl\", \"docker_image\")\npackage(licenses = [\"notice\"])\n-# website is the full container image. Note that this actually just collects\n-# other dependendcies and runs Docker locally to import and tag the image.\n-sh_binary(\n+docker_image(\nname = \"website\",\n- srcs = [\"import.sh\"],\ndata = [\":files\"],\n- tags = [\n- \"local\",\n- \"manual\",\n+ statements = [\n+ \"EXPOSE 8080/tcp\",\n+ 'ENTRYPOINT [\"/server\"]',\n],\n)\n"
},
{
"change_type": "DELETE",
"old_path": "website/import.sh",
"new_path": null,
"diff": "-#!/bin/bash\n-\n-# Copyright 2018 The gVisor Authors.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-set -xeuo pipefail\n-\n-if [[ -d $0.runfiles ]]; then\n- cd $0.runfiles\n-fi\n-\n-exec docker import \\\n- -c \"EXPOSE 8080/tcp\" \\\n- -c \"ENTRYPOINT [\\\"/server\\\"]\" \\\n- $(find . -name files.tgz) \\\n- gvisor.dev/images/website\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add easier-to-use docker_image target.
PiperOrigin-RevId: 337415009 |
259,992 | 15.10.2020 21:48:50 | 25,200 | fc40ead6856603b85c978e9120c66721cdbfc87b | `runsc do` fallback to internal network on failure
In case setting up network fails, log a warning and fallback to internal
network.
Closes | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/do.go",
"new_path": "runsc/cmd/do.go",
"diff": "@@ -17,6 +17,7 @@ package cmd\nimport (\n\"context\"\n\"encoding/json\"\n+ \"errors\"\n\"fmt\"\n\"io/ioutil\"\n\"math/rand\"\n@@ -36,6 +37,8 @@ import (\n\"gvisor.dev/gvisor/runsc/specutils\"\n)\n+var errNoDefaultInterface = errors.New(\"no default interface found\")\n+\n// Do implements subcommands.Command for the \"do\" command. It sets up a simple\n// sandbox and executes the command inside it. See Usage() for more details.\ntype Do struct {\n@@ -126,26 +129,28 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\ncid := fmt.Sprintf(\"runsc-%06d\", rand.Int31n(1000000))\nif conf.Network == config.NetworkNone {\n- netns := specs.LinuxNamespace{\n- Type: specs.NetworkNamespace,\n- }\n- if spec.Linux != nil {\n- panic(\"spec.Linux is not nil\")\n- }\n- spec.Linux = &specs.Linux{Namespaces: []specs.LinuxNamespace{netns}}\n+ addNamespace(spec, specs.LinuxNamespace{Type: specs.NetworkNamespace})\n} else if conf.Rootless {\nif conf.Network == config.NetworkSandbox {\n- c.notifyUser(\"*** Warning: using host network due to --rootless ***\")\n+ c.notifyUser(\"*** Warning: sandbox network isn't supported with --rootless, switching to host ***\")\nconf.Network = config.NetworkHost\n}\n} else {\n- clean, err := c.setupNet(cid, spec)\n- if err != nil {\n+ switch clean, err := c.setupNet(cid, spec); err {\n+ case errNoDefaultInterface:\n+ log.Warningf(\"Network interface not found, using internal network\")\n+ addNamespace(spec, specs.LinuxNamespace{Type: specs.NetworkNamespace})\n+ conf.Network = config.NetworkHost\n+\n+ case nil:\n+ // Setup successfull.\n+ defer clean()\n+\n+ default:\nreturn Errorf(\"Error setting up network: %v\", err)\n}\n- defer clean()\n}\nout, err := json.Marshal(spec)\n@@ -199,6 +204,13 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nreturn subcommands.ExitSuccess\n}\n+func addNamespace(spec *specs.Spec, ns specs.LinuxNamespace) {\n+ if spec.Linux == nil {\n+ spec.Linux = &specs.Linux{}\n+ }\n+ spec.Linux.Namespaces = append(spec.Linux.Namespaces, ns)\n+}\n+\nfunc (c *Do) notifyUser(format string, v ...interface{}) {\nif !c.quiet {\nfmt.Printf(format+\"\\n\", v...)\n@@ -219,10 +231,14 @@ func resolvePath(path string) (string, error) {\nreturn path, nil\n}\n+// setupNet setups up the sandbox network, including the creation of a network\n+// namespace, and iptable rules to redirect the traffic. Returns a cleanup\n+// function to tear down the network. Returns errNoDefaultInterface when there\n+// is no network interface available to setup the network.\nfunc (c *Do) setupNet(cid string, spec *specs.Spec) (func(), error) {\ndev, err := defaultDevice()\nif err != nil {\n- return nil, err\n+ return nil, errNoDefaultInterface\n}\npeerIP, err := calculatePeerIP(c.ip)\nif err != nil {\n@@ -279,14 +295,11 @@ func (c *Do) setupNet(cid string, spec *specs.Spec) (func(), error) {\nreturn nil, err\n}\n- if spec.Linux == nil {\n- spec.Linux = &specs.Linux{}\n- }\nnetns := specs.LinuxNamespace{\nType: specs.NetworkNamespace,\nPath: filepath.Join(\"/var/run/netns\", cid),\n}\n- spec.Linux.Namespaces = append(spec.Linux.Namespaces, netns)\n+ addNamespace(spec, netns)\nreturn func() { c.cleanupNet(cid, dev, resolvPath, hostnamePath, hostsPath) }, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | `runsc do` fallback to internal network on failure
In case setting up network fails, log a warning and fallback to internal
network.
Closes #4498
PiperOrigin-RevId: 337442632 |
259,853 | 15.10.2020 23:13:40 | 25,200 | c002fc36f9bbf0fe3ed8b7712c72376f8f8190c1 | sockets: ignore io.EOF from view.ReadAt
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netlink/socket.go",
"new_path": "pkg/sentry/socket/netlink/socket.go",
"diff": "package netlink\nimport (\n+ \"io\"\n\"math\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -748,6 +749,12 @@ func (s *socketOpsCommon) sendMsg(ctx context.Context, src usermem.IOSequence, t\nbuf := make([]byte, src.NumBytes())\nn, err := src.CopyIn(ctx, buf)\n+ // io.EOF can be only returned if src is a file, this means that\n+ // sendMsg is called from splice and the error has to be ignored in\n+ // this case.\n+ if err == io.EOF {\n+ err = nil\n+ }\nif err != nil {\n// Don't partially consume messages.\nreturn 0, syserr.FromError(err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -587,6 +587,11 @@ func (i *ioSequencePayload) Payload(size int) ([]byte, *tcpip.Error) {\n}\nv := buffer.NewView(size)\nif _, err := i.src.CopyIn(i.ctx, v); err != nil {\n+ // EOF can be returned only if src is a file and this means it\n+ // is in a splice syscall and the error has to be ignored.\n+ if err == io.EOF {\n+ return v, nil\n+ }\nreturn nil, tcpip.ErrBadAddress\n}\nreturn v, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"new_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"diff": "#include \"test/syscalls/linux/socket_ip_tcp_generic.h\"\n+#include <fcntl.h>\n#include <netinet/in.h>\n#include <netinet/tcp.h>\n#include <poll.h>\n@@ -979,6 +980,56 @@ TEST_P(TCPSocketPairTest, SetTCPUserTimeoutAboveZero) {\nEXPECT_EQ(get, kAbove);\n}\n+#ifdef __linux__\n+TEST_P(TCPSocketPairTest, SpliceFromPipe) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int fds[2];\n+ ASSERT_THAT(pipe(fds), SyscallSucceeds());\n+ FileDescriptor rfd(fds[0]);\n+ FileDescriptor wfd(fds[1]);\n+\n+ // Fill with some random data.\n+ std::vector<char> buf(kPageSize / 2);\n+ RandomizeBuffer(buf.data(), buf.size());\n+ ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),\n+ SyscallSucceedsWithValue(buf.size()));\n+\n+ EXPECT_THAT(\n+ splice(rfd.get(), nullptr, sockets->first_fd(), nullptr, kPageSize, 0),\n+ SyscallSucceedsWithValue(buf.size()));\n+\n+ std::vector<char> rbuf(buf.size());\n+ ASSERT_THAT(read(sockets->second_fd(), rbuf.data(), rbuf.size()),\n+ SyscallSucceedsWithValue(buf.size()));\n+ EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);\n+}\n+\n+TEST_P(TCPSocketPairTest, SpliceToPipe) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int fds[2];\n+ ASSERT_THAT(pipe(fds), SyscallSucceeds());\n+ FileDescriptor rfd(fds[0]);\n+ FileDescriptor wfd(fds[1]);\n+\n+ // Fill with some random data.\n+ std::vector<char> buf(kPageSize / 2);\n+ RandomizeBuffer(buf.data(), buf.size());\n+ ASSERT_THAT(write(sockets->first_fd(), buf.data(), buf.size()),\n+ SyscallSucceedsWithValue(buf.size()));\n+ shutdown(sockets->first_fd(), SHUT_WR);\n+ EXPECT_THAT(\n+ splice(sockets->second_fd(), nullptr, wfd.get(), nullptr, kPageSize, 0),\n+ SyscallSucceedsWithValue(buf.size()));\n+\n+ std::vector<char> rbuf(buf.size());\n+ ASSERT_THAT(read(rfd.get(), rbuf.data(), rbuf.size()),\n+ SyscallSucceedsWithValue(buf.size()));\n+ EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);\n+}\n+#endif // __linux__\n+\nTEST_P(TCPSocketPairTest, SetTCPWindowClampBelowMinRcvBufConnectedSocket) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n// Discover minimum receive buf by setting a really low value\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_netlink_route.cc",
"new_path": "test/syscalls/linux/socket_netlink_route.cc",
"diff": "// limitations under the License.\n#include <arpa/inet.h>\n+#include <fcntl.h>\n#include <ifaddrs.h>\n#include <linux/if.h>\n#include <linux/netlink.h>\n@@ -335,6 +336,49 @@ TEST(NetlinkRouteTest, MsgHdrMsgTrunc) {\nEXPECT_EQ((msg.msg_flags & MSG_TRUNC), MSG_TRUNC);\n}\n+TEST(NetlinkRouteTest, SpliceFromPipe) {\n+ Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));\n+\n+ int fds[2];\n+ ASSERT_THAT(pipe(fds), SyscallSucceeds());\n+ FileDescriptor rfd(fds[0]);\n+ FileDescriptor wfd(fds[1]);\n+\n+ struct request {\n+ struct nlmsghdr hdr;\n+ struct ifinfomsg ifm;\n+ };\n+\n+ struct request req = {};\n+ req.hdr.nlmsg_len = sizeof(req);\n+ req.hdr.nlmsg_type = RTM_GETLINK;\n+ req.hdr.nlmsg_flags = NLM_F_REQUEST;\n+ req.hdr.nlmsg_seq = kSeq;\n+ req.ifm.ifi_family = AF_UNSPEC;\n+ req.ifm.ifi_index = loopback_link.index;\n+\n+ ASSERT_THAT(write(wfd.get(), &req, sizeof(req)),\n+ SyscallSucceedsWithValue(sizeof(req)));\n+\n+ EXPECT_THAT(splice(rfd.get(), nullptr, fd.get(), nullptr, sizeof(req) + 1, 0),\n+ SyscallSucceedsWithValue(sizeof(req)));\n+ close(wfd.release());\n+ EXPECT_THAT(splice(rfd.get(), nullptr, fd.get(), nullptr, sizeof(req) + 1, 0),\n+ SyscallSucceedsWithValue(0));\n+\n+ bool found = false;\n+ ASSERT_NO_ERRNO(NetlinkResponse(\n+ fd,\n+ [&](const struct nlmsghdr* hdr) {\n+ CheckLinkMsg(hdr, loopback_link);\n+ found = true;\n+ },\n+ false));\n+ EXPECT_TRUE(found) << \"Netlink response does not contain any links.\";\n+}\n+\nTEST(NetlinkRouteTest, MsgTruncMsgHdrMsgTrunc) {\nFileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_netlink_util.cc",
"new_path": "test/syscalls/linux/socket_netlink_util.cc",
"diff": "@@ -67,10 +67,21 @@ PosixError NetlinkRequestResponse(\nRETURN_ERROR_IF_SYSCALL_FAIL(RetryEINTR(sendmsg)(fd.get(), &msg, 0));\n+ return NetlinkResponse(fd, fn, expect_nlmsgerr);\n+}\n+\n+PosixError NetlinkResponse(\n+ const FileDescriptor& fd,\n+ const std::function<void(const struct nlmsghdr* hdr)>& fn,\n+ bool expect_nlmsgerr) {\nconstexpr size_t kBufferSize = 4096;\nstd::vector<char> buf(kBufferSize);\n+ struct iovec iov = {};\niov.iov_base = buf.data();\niov.iov_len = buf.size();\n+ struct msghdr msg = {};\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n// If NLM_F_MULTI is set, response is a series of messages that ends with a\n// NLMSG_DONE message.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_netlink_util.h",
"new_path": "test/syscalls/linux/socket_netlink_util.h",
"diff": "@@ -41,6 +41,14 @@ PosixError NetlinkRequestResponse(\nconst std::function<void(const struct nlmsghdr* hdr)>& fn,\nbool expect_nlmsgerr);\n+// Call fn on all response netlink messages.\n+//\n+// To be used on requests with NLM_F_MULTI reponses.\n+PosixError NetlinkResponse(\n+ const FileDescriptor& fd,\n+ const std::function<void(const struct nlmsghdr* hdr)>& fn,\n+ bool expect_nlmsgerr);\n+\n// Send the passed request and call fn on all response netlink messages.\n//\n// To be used on requests without NLM_F_MULTI reponses.\n"
}
] | Go | Apache License 2.0 | google/gvisor | sockets: ignore io.EOF from view.ReadAt
Reported-by: [email protected]
PiperOrigin-RevId: 337451896 |
259,858 | 16.10.2020 09:03:21 | 25,200 | 14a003c60f35e55f9e8c29fc0d75478c9a1214f9 | Cache errors when processing stdlib with nogo. | [
{
"change_type": "MODIFY",
"old_path": "tools/nogo/nogo.go",
"new_path": "tools/nogo/nogo.go",
"diff": "@@ -264,12 +264,17 @@ func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]str\n// Closure to check a single package.\nallFindings := make([]string, 0)\nstdlibFacts := make(map[string][]byte)\n+ stdlibErrs := make(map[string]error)\nvar checkOne func(pkg string) error // Recursive.\ncheckOne = func(pkg string) error {\n// Is this already done?\nif _, ok := stdlibFacts[pkg]; ok {\nreturn nil\n}\n+ // Did this fail previously?\n+ if _, ok := stdlibErrs[pkg]; ok {\n+ return nil\n+ }\n// Lookup the configuration.\nconfig, ok := packages[pkg]\n@@ -283,6 +288,7 @@ func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]str\n// If there's no binary for this package, it is likely\n// not built with the distribution. That's fine, we can\n// just skip analysis.\n+ stdlibErrs[pkg] = err\nreturn nil\n}\n@@ -299,6 +305,7 @@ func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]str\nif err != nil {\n// If we can't analyze a package from the standard library,\n// then we skip it. It will simply not have any findings.\n+ stdlibErrs[pkg] = err\nreturn nil\n}\nstdlibFacts[pkg] = factData\n@@ -312,7 +319,9 @@ func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]str\n// to evaluate in the order provided here. We do ensure however, that\n// all packages are evaluated.\nfor pkg := range packages {\n- checkOne(pkg)\n+ if err := checkOne(pkg); err != nil {\n+ return nil, nil, err\n+ }\n}\n// Sanity check.\n@@ -326,6 +335,11 @@ func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]str\nreturn nil, nil, fmt.Errorf(\"error saving stdlib facts: %w\", err)\n}\n+ // Write out all errors.\n+ for pkg, err := range stdlibErrs {\n+ log.Printf(\"WARNING: error while processing %v: %v\", pkg, err)\n+ }\n+\n// Return all findings.\nreturn allFindings, factData, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Cache errors when processing stdlib with nogo.
PiperOrigin-RevId: 337515664 |
259,858 | 16.10.2020 11:24:52 | 25,200 | b0da31b9213741aa035a77d602237b4fcd067c98 | Refactor nogo to better support ARM. | [
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/defs.bzl",
"new_path": "tools/bazeldefs/defs.bzl",
"diff": "@@ -158,11 +158,31 @@ def go_test_library(target):\nreturn target.attr.embed[0]\nreturn None\n-def go_context(ctx, std = False):\n+def go_context(ctx, goos = None, goarch = None, std = False):\n+ \"\"\"Extracts a standard Go context struct.\n+\n+ Args:\n+ ctx: the starlark context (required).\n+ goos: the GOOS value.\n+ goarch: the GOARCH value.\n+ std: ignored.\n+\n+ Returns:\n+ A context Go struct with pointers to Go toolchain components.\n+ \"\"\"\n+\n# We don't change anything for the standard library analysis. All Go files\n# are available in all instances. Note that this includes the standard\n# library sources, which are analyzed by nogo.\ngo_ctx = _go_context(ctx)\n+ if goos == None:\n+ goos = go_ctx.sdk.goos\n+ elif goos != go_ctx.sdk.goos:\n+ fail(\"Internal GOOS (%s) doesn't match GoSdk GOOS (%s).\" % (goos, go_ctx.sdk.goos))\n+ if goarch == None:\n+ goarch = go_ctx.sdk.goarch\n+ elif goarch != go_ctx.sdk.goarch:\n+ fail(\"Internal GOARCH (%s) doesn't match GoSdk GOARCH (%s).\" % (goarch, go_ctx.sdk.goarch))\nreturn struct(\ngo = go_ctx.go,\nenv = go_ctx.env,\n@@ -186,6 +206,12 @@ def select_arch(amd64 = \"amd64\", arm64 = \"arm64\", default = None, **kwargs):\ndef select_system(linux = [\"__linux__\"], **kwargs):\nreturn linux # Only Linux supported.\n+def select_goarch():\n+ return select_arch(arm64 = \"arm64\", amd64 = \"amd64\")\n+\n+def select_goos():\n+ return select_system(linux = \"linux\")\n+\ndef default_installer():\nreturn None\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -7,7 +7,7 @@ change for Google-internal and bazel-compatible rules.\nload(\"//tools/go_stateify:defs.bzl\", \"go_stateify\")\nload(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_deps\")\n-load(\"//tools/bazeldefs:defs.bzl\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _gazelle = \"gazelle\", _gbenchmark = \"gbenchmark\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _loopback = \"loopback\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\", _proto_library = \"proto_library\", _py_binary = \"py_binary\", _rbe_platform = \"rbe_platform\", _rbe_toolchain = \"rbe_toolchain\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\", _vdso_linker_option = \"vdso_linker_option\")\n+load(\"//tools/bazeldefs:defs.bzl\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _gazelle = \"gazelle\", _gbenchmark = \"gbenchmark\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _loopback = \"loopback\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\", _proto_library = \"proto_library\", _py_binary = \"py_binary\", _rbe_platform = \"rbe_platform\", _rbe_toolchain = \"rbe_toolchain\", _select_arch = \"select_arch\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\", _select_system = \"select_system\", _short_path = \"short_path\", _vdso_linker_option = \"vdso_linker_option\")\nload(\"//tools/bazeldefs:platforms.bzl\", _default_platform = \"default_platform\", _platforms = \"platforms\")\nload(\"//tools/bazeldefs:tags.bzl\", \"go_suffixes\")\nload(\"//tools/nogo:defs.bzl\", \"nogo_test\")\n@@ -35,6 +35,8 @@ pkg_tar = _pkg_tar\npy_binary = _py_binary\nselect_arch = _select_arch\nselect_system = _select_system\n+select_goos = _select_goos\n+select_goarch = _select_goarch\nshort_path = _short_path\nrbe_platform = _rbe_platform\nrbe_toolchain = _rbe_toolchain\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/BUILD",
"new_path": "tools/nogo/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"bzl_library\", \"go_library\")\n-load(\"//tools/nogo:defs.bzl\", \"nogo_objdump_tool\", \"nogo_stdlib\")\n+load(\"//tools:defs.bzl\", \"bzl_library\", \"go_library\", \"select_goarch\", \"select_goos\")\n+load(\"//tools/nogo:defs.bzl\", \"nogo_objdump_tool\", \"nogo_stdlib\", \"nogo_target\")\npackage(licenses = [\"notice\"])\n+nogo_target(\n+ name = \"target\",\n+ goarch = select_goarch(),\n+ goos = select_goos(),\n+ visibility = [\"//visibility:public\"],\n+)\n+\nnogo_objdump_tool(\nname = \"objdump_tool\",\nvisibility = [\"//visibility:public\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "load(\"//tools/bazeldefs:defs.bzl\", \"go_context\", \"go_importpath\", \"go_rule\", \"go_test_library\")\n-def _nogo_objdump_tool_impl(ctx):\n- go_ctx = go_context(ctx)\n+NogoTargetInfo = provider(\n+ \"information about the Go target\",\n+ fields = {\n+ \"goarch\": \"the build architecture (GOARCH)\",\n+ \"goos\": \"the build OS target (GOOS)\",\n+ },\n+)\n+\n+def _nogo_target_impl(ctx):\n+ return [NogoTargetInfo(\n+ goarch = ctx.attr.goarch,\n+ goos = ctx.attr.goos,\n+ )]\n+nogo_target = go_rule(\n+ rule,\n+ implementation = _nogo_target_impl,\n+ attrs = {\n+ # goarch is the build architecture. This will normally be provided by a\n+ # select statement, but this information is propagated to other rules.\n+ \"goarch\": attr.string(mandatory = True),\n+ # goos is similarly the build operating system target.\n+ \"goos\": attr.string(mandatory = True),\n+ },\n+)\n+\n+def _nogo_objdump_tool_impl(ctx):\n# Construct the magic dump command.\n#\n# Note that in some cases, the input is being fed into the tool via stdin.\n@@ -12,6 +36,8 @@ def _nogo_objdump_tool_impl(ctx):\n# we need the tool to handle this case by creating a temporary file.\n#\n# [1] https://github.com/golang/go/issues/41051\n+ nogo_target_info = ctx.attr._nogo_target[NogoTargetInfo]\n+ go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)\nenv_prefix = \" \".join([\"%s=%s\" % (key, value) for (key, value) in go_ctx.env.items()])\ndumper = ctx.actions.declare_file(ctx.label.name)\nctx.actions.write(dumper, \"\\n\".join([\n@@ -42,6 +68,12 @@ def _nogo_objdump_tool_impl(ctx):\nnogo_objdump_tool = go_rule(\nrule,\nimplementation = _nogo_objdump_tool_impl,\n+ attrs = {\n+ \"_nogo_target\": attr.label(\n+ default = \"//tools/nogo:target\",\n+ cfg = \"target\",\n+ ),\n+ },\n)\n# NogoStdlibInfo is the set of standard library facts.\n@@ -54,9 +86,9 @@ NogoStdlibInfo = provider(\n)\ndef _nogo_stdlib_impl(ctx):\n- go_ctx = go_context(ctx)\n-\n# Build the standard library facts.\n+ nogo_target_info = ctx.attr._nogo_target[NogoTargetInfo]\n+ go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)\nfacts = ctx.actions.declare_file(ctx.label.name + \".facts\")\nfindings = ctx.actions.declare_file(ctx.label.name + \".findings\")\nconfig = struct(\n@@ -70,12 +102,12 @@ def _nogo_stdlib_impl(ctx):\nctx.actions.run(\ninputs = [config_file] + go_ctx.stdlib_srcs,\noutputs = [facts, findings],\n- tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),\n- executable = ctx.files._nogo[0],\n+ tools = depset(go_ctx.runfiles.to_list() + ctx.files._nogo_objdump_tool),\n+ executable = ctx.files._nogo_check[0],\nmnemonic = \"GoStandardLibraryAnalysis\",\nprogress_message = \"Analyzing Go Standard Library\",\narguments = go_ctx.nogo_args + [\n- \"-objdump_tool=%s\" % ctx.files._objdump_tool[0].path,\n+ \"-objdump_tool=%s\" % ctx.files._nogo_objdump_tool[0].path,\n\"-stdlib=%s\" % config_file.path,\n\"-findings=%s\" % findings.path,\n\"-facts=%s\" % facts.path,\n@@ -92,11 +124,17 @@ nogo_stdlib = go_rule(\nrule,\nimplementation = _nogo_stdlib_impl,\nattrs = {\n- \"_nogo\": attr.label(\n+ \"_nogo_check\": attr.label(\ndefault = \"//tools/nogo/check:check\",\n+ cfg = \"host\",\n),\n- \"_objdump_tool\": attr.label(\n+ \"_nogo_objdump_tool\": attr.label(\ndefault = \"//tools/nogo:objdump_tool\",\n+ cfg = \"host\",\n+ ),\n+ \"_nogo_target\": attr.label(\n+ default = \"//tools/nogo:target\",\n+ cfg = \"target\",\n),\n},\n)\n@@ -119,8 +157,6 @@ NogoInfo = provider(\n)\ndef _nogo_aspect_impl(target, ctx):\n- go_ctx = go_context(ctx)\n-\n# If this is a nogo rule itself (and not the shadow of a go_library or\n# go_binary rule created by such a rule), then we simply return nothing.\n# All work is done in the shadow properties for go rules. For a proto\n@@ -200,10 +236,13 @@ def _nogo_aspect_impl(target, ctx):\ninputs += info.binaries\n# Add the standard library facts.\n- stdlib_facts = ctx.attr._nogo_stdlib[NogoStdlibInfo].facts\n+ stdlib_info = ctx.attr._nogo_stdlib[NogoStdlibInfo]\n+ stdlib_facts = stdlib_info.facts\ninputs.append(stdlib_facts)\n# The nogo tool operates on a configuration serialized in JSON format.\n+ nogo_target_info = ctx.attr._nogo_target[NogoTargetInfo]\n+ go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)\nfacts = ctx.actions.declare_file(target.label.name + \".facts\")\nfindings = ctx.actions.declare_file(target.label.name + \".findings\")\nescapes = ctx.actions.declare_file(target.label.name + \".escapes\")\n@@ -224,13 +263,13 @@ def _nogo_aspect_impl(target, ctx):\nctx.actions.run(\ninputs = inputs,\noutputs = [facts, findings, escapes],\n- tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),\n- executable = ctx.files._nogo[0],\n+ tools = depset(go_ctx.runfiles.to_list() + ctx.files._nogo_objdump_tool),\n+ executable = ctx.files._nogo_check[0],\nmnemonic = \"GoStaticAnalysis\",\nprogress_message = \"Analyzing %s\" % target.label,\narguments = go_ctx.nogo_args + [\n\"-binary=%s\" % target_objfile.path,\n- \"-objdump_tool=%s\" % ctx.files._objdump_tool[0].path,\n+ \"-objdump_tool=%s\" % ctx.files._nogo_objdump_tool[0].path,\n\"-package=%s\" % config_file.path,\n\"-findings=%s\" % findings.path,\n\"-facts=%s\" % facts.path,\n@@ -266,9 +305,22 @@ nogo_aspect = go_rule(\n\"embed\",\n],\nattrs = {\n- \"_nogo\": attr.label(default = \"//tools/nogo/check:check\"),\n- \"_nogo_stdlib\": attr.label(default = \"//tools/nogo:stdlib\"),\n- \"_objdump_tool\": attr.label(default = \"//tools/nogo:objdump_tool\"),\n+ \"_nogo_check\": attr.label(\n+ default = \"//tools/nogo/check:check\",\n+ cfg = \"host\",\n+ ),\n+ \"_nogo_stdlib\": attr.label(\n+ default = \"//tools/nogo:stdlib\",\n+ cfg = \"host\",\n+ ),\n+ \"_nogo_objdump_tool\": attr.label(\n+ default = \"//tools/nogo:objdump_tool\",\n+ cfg = \"host\",\n+ ),\n+ \"_nogo_target\": attr.label(\n+ default = \"//tools/nogo:target\",\n+ cfg = \"target\",\n+ ),\n},\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor nogo to better support ARM.
PiperOrigin-RevId: 337544107 |
260,024 | 16.10.2020 12:28:54 | 25,200 | 4d27f33b09932a7f6cc5ccb03ad6f7462d497afb | Make IPv4 check the IP header checksum
The IPv4 header checksum has not been checked, at least in recent times,
so add code to do so. Fix all the tests that fail because they never
needed to set the checksum.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ip_test.go",
"new_path": "pkg/tcpip/network/ip_test.go",
"diff": "@@ -322,6 +322,7 @@ func TestSourceAddressValidation(t *testing.T) {\nSrcAddr: src,\nDstAddr: localIPv4Addr,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\ne.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n@@ -344,7 +345,6 @@ func TestSourceAddressValidation(t *testing.T) {\nSrcAddr: src,\nDstAddr: localIPv6Addr,\n})\n-\ne.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n}))\n@@ -581,6 +581,7 @@ func TestIPv4Receive(t *testing.T) {\nSrcAddr: remoteIPv4Addr,\nDstAddr: localIPv4Addr,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\n// Make payload be non-zero.\nfor i := header.IPv4MinimumSize; i < totalLen; i++ {\n@@ -662,6 +663,7 @@ func TestIPv4ReceiveControl(t *testing.T) {\nSrcAddr: \"\\x0a\\x00\\x00\\xbb\",\nDstAddr: localIPv4Addr,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\n// Create the ICMP header.\nicmp := header.ICMPv4(view[header.IPv4MinimumSize:])\n@@ -681,6 +683,7 @@ func TestIPv4ReceiveControl(t *testing.T) {\nSrcAddr: localIPv4Addr,\nDstAddr: remoteIPv4Addr,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\n// Make payload be non-zero.\nfor i := dataOffset; i < len(view); i++ {\n@@ -734,6 +737,8 @@ func TestIPv4FragmentationReceive(t *testing.T) {\nSrcAddr: remoteIPv4Addr,\nDstAddr: localIPv4Addr,\n})\n+ ip1.SetChecksum(^ip1.CalculateChecksum())\n+\n// Make payload be non-zero.\nfor i := header.IPv4MinimumSize; i < totalLen; i++ {\nfrag1[i] = uint8(i)\n@@ -750,6 +755,8 @@ func TestIPv4FragmentationReceive(t *testing.T) {\nSrcAddr: remoteIPv4Addr,\nDstAddr: localIPv4Addr,\n})\n+ ip2.SetChecksum(^ip2.CalculateChecksum())\n+\n// Make payload be non-zero.\nfor i := header.IPv4MinimumSize; i < totalLen; i++ {\nfrag2[i] = uint8(i)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -440,6 +440,32 @@ func (e *endpoint) HandlePacket(r *stack.Route, pkt *stack.PacketBuffer) {\nreturn\n}\n+ // There has been some confusion regarding verifying checksums. We need\n+ // just look for negative 0 (0xffff) as the checksum, as it's not possible to\n+ // get positive 0 (0) for the checksum. Some bad implementations could get it\n+ // when doing entry replacement in the early days of the Internet,\n+ // however the lore that one needs to check for both persists.\n+ //\n+ // RFC 1624 section 1 describes the source of this confusion as:\n+ // [the partial recalculation method described in RFC 1071] computes a\n+ // result for certain cases that differs from the one obtained from\n+ // scratch (one's complement of one's complement sum of the original\n+ // fields).\n+ //\n+ // However RFC 1624 section 5 clarifies that if using the verification method\n+ // \"recommended by RFC 1071, it does not matter if an intermediate system\n+ // generated a -0 instead of +0\".\n+ //\n+ // RFC1071 page 1 specifies the verification method as:\n+ // (3) To check a checksum, the 1's complement sum is computed over the\n+ // same set of octets, including the checksum field. If the result\n+ // is all 1 bits (-0 in 1's complement arithmetic), the check\n+ // succeeds.\n+ if h.CalculateChecksum() != 0xffff {\n+ r.Stats().IP.MalformedPacketsReceived.Increment()\n+ return\n+ }\n+\n// As per RFC 1122 section 3.2.1.3:\n// When a host sends any datagram, the IP source address MUST\n// be one of its own IP addresses (but not a broadcast or\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"diff": "@@ -120,6 +120,7 @@ func TestIPv4Sanity(t *testing.T) {\ntests := []struct {\nname string\nheaderLength uint8 // value of 0 means \"use correct size\"\n+ badHeaderChecksum bool\nmaxTotalLength uint16\ntransportProtocol uint8\nTTL uint8\n@@ -135,6 +136,14 @@ func TestIPv4Sanity(t *testing.T) {\ntransportProtocol: uint8(header.ICMPv4ProtocolNumber),\nTTL: ttl,\n},\n+ {\n+ name: \"bad header checksum\",\n+ maxTotalLength: defaultMTU,\n+ transportProtocol: uint8(header.ICMPv4ProtocolNumber),\n+ TTL: ttl,\n+ badHeaderChecksum: true,\n+ shouldFail: true,\n+ },\n// The TTL tests check that we are not rejecting an incoming packet\n// with a zero or one TTL, which has been a point of confusion in the\n// past as RFC 791 says: \"If this field contains the value zero, then the\n@@ -290,6 +299,12 @@ func TestIPv4Sanity(t *testing.T) {\nif test.headerLength != 0 {\nip.SetHeaderLength(test.headerLength)\n}\n+ ip.SetChecksum(0)\n+ ipHeaderChecksum := ip.CalculateChecksum()\n+ if test.badHeaderChecksum {\n+ ipHeaderChecksum += 42\n+ }\n+ ip.SetChecksum(^ipHeaderChecksum)\nrequestPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n})\n@@ -1427,6 +1442,7 @@ func TestReceiveFragments(t *testing.T) {\nSrcAddr: frag.srcAddr,\nDstAddr: frag.dstAddr,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\nvv := hdr.View().ToVectorisedView()\nvv.AppendView(frag.payload)\n@@ -1695,6 +1711,7 @@ func TestPacketQueing(t *testing.T) {\nSrcAddr: host2IPv4Addr.AddressWithPrefix.Address,\nDstAddr: host1IPv4Addr.AddressWithPrefix.Address,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\ne.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n}))\n@@ -1738,6 +1755,7 @@ func TestPacketQueing(t *testing.T) {\nSrcAddr: host2IPv4Addr.AddressWithPrefix.Address,\nDstAddr: host1IPv4Addr.AddressWithPrefix.Address,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\ne.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n}))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/multicast_broadcast_test.go",
"new_path": "pkg/tcpip/tests/integration/multicast_broadcast_test.go",
"diff": "@@ -80,6 +80,7 @@ func TestPingMulticastBroadcast(t *testing.T) {\nSrcAddr: remoteIPv4Addr,\nDstAddr: dst,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\ne.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n@@ -250,6 +251,7 @@ func TestIncomingMulticastAndBroadcast(t *testing.T) {\nSrcAddr: remoteIPv4Addr,\nDstAddr: dst,\n})\n+ ip.SetChecksum(^ip.CalculateChecksum())\ne.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make IPv4 check the IP header checksum
The IPv4 header checksum has not been checked, at least in recent times,
so add code to do so. Fix all the tests that fail because they never
needed to set the checksum.
Fixes #4484
PiperOrigin-RevId: 337556243 |
259,858 | 16.10.2020 14:42:02 | 25,200 | 9a3d8973c4fcd1475b3748c10eb3e255d44e8a20 | Refactor shared starlark files. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/bazeldefs/cc.bzl",
"diff": "+\"\"\"C++ rules.\"\"\"\n+\n+load(\"@bazel_tools//tools/cpp:cc_flags_supplier.bzl\", _cc_flags_supplier = \"cc_flags_supplier\")\n+load(\"@rules_cc//cc:defs.bzl\", _cc_binary = \"cc_binary\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\")\n+load(\"@com_github_grpc_grpc//bazel:cc_grpc_library.bzl\", _cc_grpc_library = \"cc_grpc_library\")\n+\n+cc_library = _cc_library\n+cc_flags_supplier = _cc_flags_supplier\n+cc_proto_library = _cc_proto_library\n+cc_test = _cc_test\n+cc_toolchain = \"@bazel_tools//tools/cpp:current_cc_toolchain\"\n+gtest = \"@com_google_googletest//:gtest\"\n+gbenchmark = \"@com_google_benchmark//:benchmark\"\n+grpcpp = \"@com_github_grpc_grpc//:grpc++\"\n+vdso_linker_option = \"-fuse-ld=gold \"\n+\n+def cc_grpc_library(name, **kwargs):\n+ _cc_grpc_library(name = name, grpc_only = True, **kwargs)\n+\n+def cc_binary(name, static = False, **kwargs):\n+ \"\"\"Run cc_binary.\n+\n+ Args:\n+ name: name of the target.\n+ static: make a static binary if True\n+ **kwargs: the rest of the args.\n+ \"\"\"\n+ if static:\n+ # How to statically link a c++ program that uses threads, like for gRPC:\n+ # https://gcc.gnu.org/legacy-ml/gcc-help/2010-05/msg00029.html\n+ if \"linkopts\" not in kwargs:\n+ kwargs[\"linkopts\"] = []\n+ kwargs[\"linkopts\"] += [\n+ \"-static\",\n+ \"-lstdc++\",\n+ \"-Wl,--whole-archive\",\n+ \"-lpthread\",\n+ \"-Wl,--no-whole-archive\",\n+ ]\n+ _cc_binary(\n+ name = name,\n+ **kwargs\n+ )\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/defs.bzl",
"new_path": "tools/bazeldefs/defs.bzl",
"diff": "-\"\"\"Bazel implementations of standard rules.\"\"\"\n+\"\"\"Meta and miscellaneous rules.\"\"\"\n-load(\"@bazel_gazelle//:def.bzl\", _gazelle = \"gazelle\")\nload(\"@bazel_skylib//rules:build_test.bzl\", _build_test = \"build_test\")\nload(\"@bazel_skylib//:bzl_library.bzl\", _bzl_library = \"bzl_library\")\n-load(\"@bazel_tools//tools/cpp:cc_flags_supplier.bzl\", _cc_flags_supplier = \"cc_flags_supplier\")\n-load(\"@io_bazel_rules_go//go:def.bzl\", \"GoLibrary\", _go_binary = \"go_binary\", _go_context = \"go_context\", _go_embed_data = \"go_embed_data\", _go_library = \"go_library\", _go_path = \"go_path\", _go_test = \"go_test\")\n-load(\"@io_bazel_rules_go//proto:def.bzl\", _go_grpc_library = \"go_grpc_library\", _go_proto_library = \"go_proto_library\")\n-load(\"@rules_cc//cc:defs.bzl\", _cc_binary = \"cc_binary\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\")\n-load(\"@rules_pkg//:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\n-load(\"@com_github_grpc_grpc//bazel:cc_grpc_library.bzl\", _cc_grpc_library = \"cc_grpc_library\")\nbuild_test = _build_test\nbzl_library = _bzl_library\n-cc_library = _cc_library\n-cc_flags_supplier = _cc_flags_supplier\n-cc_proto_library = _cc_proto_library\n-cc_test = _cc_test\n-cc_toolchain = \"@bazel_tools//tools/cpp:current_cc_toolchain\"\n-gazelle = _gazelle\n-go_embed_data = _go_embed_data\n-go_path = _go_path\n-gtest = \"@com_google_googletest//:gtest\"\n-grpcpp = \"@com_github_grpc_grpc//:grpc++\"\n-gbenchmark = \"@com_google_benchmark//:benchmark\"\nloopback = \"//tools/bazeldefs:loopback\"\n-pkg_deb = _pkg_deb\n-pkg_tar = _pkg_tar\n-py_binary = native.py_binary\nrbe_platform = native.platform\nrbe_toolchain = native.toolchain\n-vdso_linker_option = \"-fuse-ld=gold \"\ndef short_path(path):\nreturn path\n@@ -40,160 +18,6 @@ def proto_library(name, has_services = None, **kwargs):\n**kwargs\n)\n-def cc_grpc_library(name, **kwargs):\n- _cc_grpc_library(name = name, grpc_only = True, **kwargs)\n-\n-def _go_proto_or_grpc_library(go_library_func, name, **kwargs):\n- deps = [\n- dep.replace(\"_proto\", \"_go_proto\")\n- for dep in (kwargs.pop(\"deps\", []) or [])\n- ]\n- go_library_func(\n- name = name + \"_go_proto\",\n- importpath = \"gvisor.dev/gvisor/\" + native.package_name() + \"/\" + name + \"_go_proto\",\n- proto = \":\" + name + \"_proto\",\n- deps = deps,\n- **kwargs\n- )\n-\n-def go_proto_library(name, **kwargs):\n- _go_proto_or_grpc_library(_go_proto_library, name, **kwargs)\n-\n-def go_grpc_and_proto_libraries(name, **kwargs):\n- _go_proto_or_grpc_library(_go_grpc_library, name, **kwargs)\n-\n-def cc_binary(name, static = False, **kwargs):\n- \"\"\"Run cc_binary.\n-\n- Args:\n- name: name of the target.\n- static: make a static binary if True\n- **kwargs: the rest of the args.\n- \"\"\"\n- if static:\n- # How to statically link a c++ program that uses threads, like for gRPC:\n- # https://gcc.gnu.org/legacy-ml/gcc-help/2010-05/msg00029.html\n- if \"linkopts\" not in kwargs:\n- kwargs[\"linkopts\"] = []\n- kwargs[\"linkopts\"] += [\n- \"-static\",\n- \"-lstdc++\",\n- \"-Wl,--whole-archive\",\n- \"-lpthread\",\n- \"-Wl,--no-whole-archive\",\n- ]\n- _cc_binary(\n- name = name,\n- **kwargs\n- )\n-\n-def go_binary(name, static = False, pure = False, x_defs = None, **kwargs):\n- \"\"\"Build a go binary.\n-\n- Args:\n- name: name of the target.\n- static: build a static binary.\n- pure: build without cgo.\n- x_defs: additional definitions.\n- **kwargs: rest of the arguments are passed to _go_binary.\n- \"\"\"\n- if static:\n- kwargs[\"static\"] = \"on\"\n- if pure:\n- kwargs[\"pure\"] = \"on\"\n- _go_binary(\n- name = name,\n- x_defs = x_defs,\n- **kwargs\n- )\n-\n-def go_importpath(target):\n- \"\"\"Returns the importpath for the target.\"\"\"\n- return target[GoLibrary].importpath\n-\n-def go_library(name, **kwargs):\n- _go_library(\n- name = name,\n- importpath = \"gvisor.dev/gvisor/\" + native.package_name(),\n- **kwargs\n- )\n-\n-def go_test(name, pure = False, library = None, **kwargs):\n- \"\"\"Build a go test.\n-\n- Args:\n- name: name of the output binary.\n- pure: should it be built without cgo.\n- library: the library to embed.\n- **kwargs: rest of the arguments to pass to _go_test.\n- \"\"\"\n- if pure:\n- kwargs[\"pure\"] = \"on\"\n- if library:\n- kwargs[\"embed\"] = [library]\n- _go_test(\n- name = name,\n- **kwargs\n- )\n-\n-def go_rule(rule, implementation, **kwargs):\n- \"\"\"Wraps a rule definition with Go attributes.\n-\n- Args:\n- rule: rule function (typically rule or aspect).\n- implementation: implementation function.\n- **kwargs: other arguments to pass to rule.\n-\n- Returns:\n- The result of invoking the rule.\n- \"\"\"\n- attrs = kwargs.pop(\"attrs\", dict())\n- attrs[\"_go_context_data\"] = attr.label(default = \"@io_bazel_rules_go//:go_context_data\")\n- attrs[\"_stdlib\"] = attr.label(default = \"@io_bazel_rules_go//:stdlib\")\n- toolchains = kwargs.get(\"toolchains\", []) + [\"@io_bazel_rules_go//go:toolchain\"]\n- return rule(implementation, attrs = attrs, toolchains = toolchains, **kwargs)\n-\n-def go_test_library(target):\n- if hasattr(target.attr, \"embed\") and len(target.attr.embed) > 0:\n- return target.attr.embed[0]\n- return None\n-\n-def go_context(ctx, goos = None, goarch = None, std = False):\n- \"\"\"Extracts a standard Go context struct.\n-\n- Args:\n- ctx: the starlark context (required).\n- goos: the GOOS value.\n- goarch: the GOARCH value.\n- std: ignored.\n-\n- Returns:\n- A context Go struct with pointers to Go toolchain components.\n- \"\"\"\n-\n- # We don't change anything for the standard library analysis. All Go files\n- # are available in all instances. Note that this includes the standard\n- # library sources, which are analyzed by nogo.\n- go_ctx = _go_context(ctx)\n- if goos == None:\n- goos = go_ctx.sdk.goos\n- elif goos != go_ctx.sdk.goos:\n- fail(\"Internal GOOS (%s) doesn't match GoSdk GOOS (%s).\" % (goos, go_ctx.sdk.goos))\n- if goarch == None:\n- goarch = go_ctx.sdk.goarch\n- elif goarch != go_ctx.sdk.goarch:\n- fail(\"Internal GOARCH (%s) doesn't match GoSdk GOARCH (%s).\" % (goarch, go_ctx.sdk.goarch))\n- return struct(\n- go = go_ctx.go,\n- env = go_ctx.env,\n- nogo_args = [],\n- stdlib_srcs = go_ctx.sdk.srcs,\n- runfiles = depset([go_ctx.go] + go_ctx.sdk.srcs + go_ctx.sdk.tools + go_ctx.stdlib.libs),\n- goos = go_ctx.sdk.goos,\n- goarch = go_ctx.sdk.goarch,\n- tags = go_ctx.tags,\n- )\n-\ndef select_arch(amd64 = \"amd64\", arm64 = \"arm64\", default = None, **kwargs):\nvalues = {\n\"@bazel_tools//src/conditions:linux_x86_64\": amd64,\n@@ -206,12 +30,6 @@ def select_arch(amd64 = \"amd64\", arm64 = \"arm64\", default = None, **kwargs):\ndef select_system(linux = [\"__linux__\"], **kwargs):\nreturn linux # Only Linux supported.\n-def select_goarch():\n- return select_arch(arm64 = \"arm64\", amd64 = \"amd64\")\n-\n-def select_goos():\n- return select_system(linux = \"linux\")\n-\ndef default_installer():\nreturn None\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/bazeldefs/go.bzl",
"diff": "+\"\"\"Go rules.\"\"\"\n+\n+load(\"@bazel_gazelle//:def.bzl\", _gazelle = \"gazelle\")\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"GoLibrary\", _go_binary = \"go_binary\", _go_context = \"go_context\", _go_embed_data = \"go_embed_data\", _go_library = \"go_library\", _go_path = \"go_path\", _go_test = \"go_test\")\n+load(\"@io_bazel_rules_go//proto:def.bzl\", _go_grpc_library = \"go_grpc_library\", _go_proto_library = \"go_proto_library\")\n+load(\"//tools/bazeldefs:defs.bzl\", \"select_arch\", \"select_system\")\n+\n+gazelle = _gazelle\n+go_embed_data = _go_embed_data\n+go_path = _go_path\n+\n+def _go_proto_or_grpc_library(go_library_func, name, **kwargs):\n+ deps = [\n+ dep.replace(\"_proto\", \"_go_proto\")\n+ for dep in (kwargs.pop(\"deps\", []) or [])\n+ ]\n+ go_library_func(\n+ name = name + \"_go_proto\",\n+ importpath = \"gvisor.dev/gvisor/\" + native.package_name() + \"/\" + name + \"_go_proto\",\n+ proto = \":\" + name + \"_proto\",\n+ deps = deps,\n+ **kwargs\n+ )\n+\n+def go_proto_library(name, **kwargs):\n+ _go_proto_or_grpc_library(_go_proto_library, name, **kwargs)\n+\n+def go_grpc_and_proto_libraries(name, **kwargs):\n+ _go_proto_or_grpc_library(_go_grpc_library, name, **kwargs)\n+\n+def go_binary(name, static = False, pure = False, x_defs = None, **kwargs):\n+ \"\"\"Build a go binary.\n+\n+ Args:\n+ name: name of the target.\n+ static: build a static binary.\n+ pure: build without cgo.\n+ x_defs: additional definitions.\n+ **kwargs: rest of the arguments are passed to _go_binary.\n+ \"\"\"\n+ if static:\n+ kwargs[\"static\"] = \"on\"\n+ if pure:\n+ kwargs[\"pure\"] = \"on\"\n+ _go_binary(\n+ name = name,\n+ x_defs = x_defs,\n+ **kwargs\n+ )\n+\n+def go_importpath(target):\n+ \"\"\"Returns the importpath for the target.\"\"\"\n+ return target[GoLibrary].importpath\n+\n+def go_library(name, **kwargs):\n+ _go_library(\n+ name = name,\n+ importpath = \"gvisor.dev/gvisor/\" + native.package_name(),\n+ **kwargs\n+ )\n+\n+def go_test(name, pure = False, library = None, **kwargs):\n+ \"\"\"Build a go test.\n+\n+ Args:\n+ name: name of the output binary.\n+ pure: should it be built without cgo.\n+ library: the library to embed.\n+ **kwargs: rest of the arguments to pass to _go_test.\n+ \"\"\"\n+ if pure:\n+ kwargs[\"pure\"] = \"on\"\n+ if library:\n+ kwargs[\"embed\"] = [library]\n+ _go_test(\n+ name = name,\n+ **kwargs\n+ )\n+\n+def go_rule(rule, implementation, **kwargs):\n+ \"\"\"Wraps a rule definition with Go attributes.\n+\n+ Args:\n+ rule: rule function (typically rule or aspect).\n+ implementation: implementation function.\n+ **kwargs: other arguments to pass to rule.\n+\n+ Returns:\n+ The result of invoking the rule.\n+ \"\"\"\n+ attrs = kwargs.pop(\"attrs\", dict())\n+ attrs[\"_go_context_data\"] = attr.label(default = \"@io_bazel_rules_go//:go_context_data\")\n+ attrs[\"_stdlib\"] = attr.label(default = \"@io_bazel_rules_go//:stdlib\")\n+ toolchains = kwargs.get(\"toolchains\", []) + [\"@io_bazel_rules_go//go:toolchain\"]\n+ return rule(implementation, attrs = attrs, toolchains = toolchains, **kwargs)\n+\n+def go_test_library(target):\n+ if hasattr(target.attr, \"embed\") and len(target.attr.embed) > 0:\n+ return target.attr.embed[0]\n+ return None\n+\n+def go_context(ctx, goos = None, goarch = None, std = False):\n+ \"\"\"Extracts a standard Go context struct.\n+\n+ Args:\n+ ctx: the starlark context (required).\n+ goos: the GOOS value.\n+ goarch: the GOARCH value.\n+ std: ignored.\n+\n+ Returns:\n+ A context Go struct with pointers to Go toolchain components.\n+ \"\"\"\n+\n+ # We don't change anything for the standard library analysis. All Go files\n+ # are available in all instances. Note that this includes the standard\n+ # library sources, which are analyzed by nogo.\n+ go_ctx = _go_context(ctx)\n+ if goos == None:\n+ goos = go_ctx.sdk.goos\n+ elif goos != go_ctx.sdk.goos:\n+ fail(\"Internal GOOS (%s) doesn't match GoSdk GOOS (%s).\" % (goos, go_ctx.sdk.goos))\n+ if goarch == None:\n+ goarch = go_ctx.sdk.goarch\n+ elif goarch != go_ctx.sdk.goarch:\n+ fail(\"Internal GOARCH (%s) doesn't match GoSdk GOARCH (%s).\" % (goarch, go_ctx.sdk.goarch))\n+ return struct(\n+ go = go_ctx.go,\n+ env = go_ctx.env,\n+ nogo_args = [],\n+ stdlib_srcs = go_ctx.sdk.srcs,\n+ runfiles = depset([go_ctx.go] + go_ctx.sdk.srcs + go_ctx.sdk.tools + go_ctx.stdlib.libs),\n+ goos = go_ctx.sdk.goos,\n+ goarch = go_ctx.sdk.goarch,\n+ tags = go_ctx.tags,\n+ )\n+\n+def select_goarch():\n+ return select_arch(arm64 = \"arm64\", amd64 = \"amd64\")\n+\n+def select_goos():\n+ return select_system(linux = \"linux\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/bazeldefs/pkg.bzl",
"diff": "+\"\"\"Packaging rules.\"\"\"\n+\n+load(\"@rules_pkg//:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\n+\n+pkg_deb = _pkg_deb\n+pkg_tar = _pkg_tar\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -7,41 +7,49 @@ change for Google-internal and bazel-compatible rules.\nload(\"//tools/go_stateify:defs.bzl\", \"go_stateify\")\nload(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_deps\")\n-load(\"//tools/bazeldefs:defs.bzl\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _gazelle = \"gazelle\", _gbenchmark = \"gbenchmark\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _loopback = \"loopback\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\", _proto_library = \"proto_library\", _py_binary = \"py_binary\", _rbe_platform = \"rbe_platform\", _rbe_toolchain = \"rbe_toolchain\", _select_arch = \"select_arch\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\", _select_system = \"select_system\", _short_path = \"short_path\", _vdso_linker_option = \"vdso_linker_option\")\n+load(\"//tools/nogo:defs.bzl\", \"nogo_test\")\n+load(\"//tools/bazeldefs:defs.bzl\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _loopback = \"loopback\", _proto_library = \"proto_library\", _rbe_platform = \"rbe_platform\", _rbe_toolchain = \"rbe_toolchain\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\")\n+load(\"//tools/bazeldefs:cc.bzl\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _gbenchmark = \"gbenchmark\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _vdso_linker_option = \"vdso_linker_option\")\n+load(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\n+load(\"//tools/bazeldefs:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\nload(\"//tools/bazeldefs:platforms.bzl\", _default_platform = \"default_platform\", _platforms = \"platforms\")\nload(\"//tools/bazeldefs:tags.bzl\", \"go_suffixes\")\n-load(\"//tools/nogo:defs.bzl\", \"nogo_test\")\n-# Delegate directly.\n+# Core rules.\nbuild_test = _build_test\nbzl_library = _bzl_library\n+default_installer = _default_installer\n+default_net_util = _default_net_util\n+loopback = _loopback\n+select_arch = _select_arch\n+select_system = _select_system\n+short_path = _short_path\n+rbe_platform = _rbe_platform\n+rbe_toolchain = _rbe_toolchain\n+coreutil = _coreutil\n+\n+# C++ rules.\ncc_binary = _cc_binary\ncc_flags_supplier = _cc_flags_supplier\ncc_grpc_library = _cc_grpc_library\ncc_library = _cc_library\ncc_test = _cc_test\ncc_toolchain = _cc_toolchain\n-default_installer = _default_installer\n-default_net_util = _default_net_util\ngbenchmark = _gbenchmark\n+gtest = _gtest\n+grpcpp = _grpcpp\n+vdso_linker_option = _vdso_linker_option\n+\n+# Go rules.\ngazelle = _gazelle\ngo_embed_data = _go_embed_data\ngo_path = _go_path\n-gtest = _gtest\n-grpcpp = _grpcpp\n-loopback = _loopback\n-pkg_deb = _pkg_deb\n-pkg_tar = _pkg_tar\n-py_binary = _py_binary\n-select_arch = _select_arch\n-select_system = _select_system\nselect_goos = _select_goos\nselect_goarch = _select_goarch\n-short_path = _short_path\n-rbe_platform = _rbe_platform\n-rbe_toolchain = _rbe_toolchain\n-vdso_linker_option = _vdso_linker_option\n-coreutil = _coreutil\n+\n+# Packaging rules.\n+pkg_deb = _pkg_deb\n+pkg_tar = _pkg_tar\n# Platform options.\ndefault_platform = _default_platform\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "\"\"\"Nogo rules.\"\"\"\n-load(\"//tools/bazeldefs:defs.bzl\", \"go_context\", \"go_importpath\", \"go_rule\", \"go_test_library\")\n+load(\"//tools/bazeldefs:go.bzl\", \"go_context\", \"go_importpath\", \"go_rule\", \"go_test_library\")\nNogoTargetInfo = provider(\n\"information about the Go target\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor shared starlark files.
PiperOrigin-RevId: 337581114 |
259,853 | 19.10.2020 11:51:15 | 25,200 | cd108432a50ec777ce92f9d207154173e3f0b665 | splice: return EINVAL is len is negative
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/splice.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/splice.go",
"diff": "@@ -45,6 +45,9 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nif count > int64(kernel.MAX_RW_COUNT) {\ncount = int64(kernel.MAX_RW_COUNT)\n}\n+ if count < 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n// Check for invalid flags.\nif flags&^(linux.SPLICE_F_MOVE|linux.SPLICE_F_NONBLOCK|linux.SPLICE_F_MORE|linux.SPLICE_F_GIFT) != 0 {\n@@ -192,6 +195,9 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo\nif count > int64(kernel.MAX_RW_COUNT) {\ncount = int64(kernel.MAX_RW_COUNT)\n}\n+ if count < 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n// Check for invalid flags.\nif flags&^(linux.SPLICE_F_MOVE|linux.SPLICE_F_NONBLOCK|linux.SPLICE_F_MORE|linux.SPLICE_F_GIFT) != 0 {\n"
}
] | Go | Apache License 2.0 | google/gvisor | splice: return EINVAL is len is negative
Reported-by: [email protected]
PiperOrigin-RevId: 337901664 |
259,860 | 19.10.2020 13:18:32 | 25,200 | 63f4cef4d160e37b0cbe30ba60b2be95092790ed | [vfs2] Fix fork reference leaks. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"new_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"diff": "@@ -147,7 +147,12 @@ func CreateTask(ctx context.Context, name string, tc *kernel.ThreadGroup, mntns\nFSContext: kernel.NewFSContextVFS2(root, cwd, 0022),\nFDTable: k.NewFDTable(),\n}\n- return k.TaskSet().NewTask(config)\n+ t, err := k.TaskSet().NewTask(ctx, config)\n+ if err != nil {\n+ config.ThreadGroup.Release(ctx)\n+ return nil, err\n+ }\n+ return t, nil\n}\nfunc newFakeExecutable(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, root vfs.VirtualDentry) (*vfs.FileDescription, error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -218,6 +218,7 @@ go_library(\n\"//pkg/amutex\",\n\"//pkg/bits\",\n\"//pkg/bpf\",\n+ \"//pkg/cleanup\",\n\"//pkg/context\",\n\"//pkg/coverage\",\n\"//pkg/cpuid\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -39,6 +39,7 @@ import (\n\"time\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/eventchannel\"\n@@ -340,7 +341,7 @@ func (k *Kernel) Init(args InitKernelArgs) error {\nreturn fmt.Errorf(\"Timekeeper is nil\")\n}\nif args.Timekeeper.clocks == nil {\n- return fmt.Errorf(\"Must call Timekeeper.SetClocks() before Kernel.Init()\")\n+ return fmt.Errorf(\"must call Timekeeper.SetClocks() before Kernel.Init()\")\n}\nif args.RootUserNamespace == nil {\nreturn fmt.Errorf(\"RootUserNamespace is nil\")\n@@ -365,7 +366,7 @@ func (k *Kernel) Init(args InitKernelArgs) error {\nk.useHostCores = true\nmaxCPU, err := hostcpu.MaxPossibleCPU()\nif err != nil {\n- return fmt.Errorf(\"Failed to get maximum CPU number: %v\", err)\n+ return fmt.Errorf(\"failed to get maximum CPU number: %v\", err)\n}\nminAppCores := uint(maxCPU) + 1\nif k.applicationCores < minAppCores {\n@@ -966,6 +967,10 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\n}\ntg := k.NewThreadGroup(mntns, args.PIDNamespace, NewSignalHandlers(), linux.SIGCHLD, args.Limits)\n+ cu := cleanup.Make(func() {\n+ tg.Release(ctx)\n+ })\n+ defer cu.Clean()\n// Check which file to start from.\nswitch {\n@@ -1025,13 +1030,14 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\nMountNamespaceVFS2: mntnsVFS2,\nContainerID: args.ContainerID,\n}\n- t, err := k.tasks.NewTask(config)\n+ t, err := k.tasks.NewTask(ctx, config)\nif err != nil {\nreturn nil, 0, err\n}\nt.traceExecEvent(tc) // Simulate exec for tracing.\n// Success.\n+ cu.Release()\ntgid := k.tasks.Root.IDOfThreadGroup(tg)\nif k.globalInit == nil {\nk.globalInit = tg\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_clone.go",
"new_path": "pkg/sentry/kernel/task_clone.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/bpf\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/sentry/inet\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n@@ -206,6 +207,10 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n} else {\nipcns.IncRef()\n}\n+ cu := cleanup.Make(func() {\n+ ipcns.DecRef(t)\n+ })\n+ defer cu.Clean()\nnetns := t.NetworkNamespace()\nif opts.NewNetworkNamespace {\n@@ -216,13 +221,18 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\nmntnsVFS2 := t.mountNamespaceVFS2\nif mntnsVFS2 != nil {\nmntnsVFS2.IncRef()\n+ cu.Add(func() {\n+ mntnsVFS2.DecRef(t)\n+ })\n}\ntc, err := t.tc.Fork(t, t.k, !opts.NewAddressSpace)\nif err != nil {\n- ipcns.DecRef(t)\nreturn 0, nil, err\n}\n+ cu.Add(func() {\n+ tc.release()\n+ })\n// clone() returns 0 in the child.\ntc.Arch.SetReturn(0)\nif opts.Stack != 0 {\n@@ -230,7 +240,6 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n}\nif opts.SetTLS {\nif !tc.Arch.SetTLS(uintptr(opts.TLS)) {\n- ipcns.DecRef(t)\nreturn 0, nil, syserror.EPERM\n}\n}\n@@ -299,11 +308,11 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n} else {\ncfg.InheritParent = t\n}\n- nt, err := t.tg.pidns.owner.NewTask(cfg)\n+ nt, err := t.tg.pidns.owner.NewTask(t, cfg)\n+ // If NewTask succeeds, we transfer references to nt. If NewTask fails, it does\n+ // the cleanup for us.\n+ cu.Release()\nif err != nil {\n- if opts.NewThreadGroup {\n- tg.release(t)\n- }\nreturn 0, nil, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exit.go",
"new_path": "pkg/sentry/kernel/task_exit.go",
"diff": "@@ -286,7 +286,7 @@ func (*runExitMain) execute(t *Task) taskRunState {\n// If this is the last task to exit from the thread group, release the\n// thread group's resources.\nif lastExiter {\n- t.tg.release(t)\n+ t.tg.Release(t)\n}\n// Detach tracees.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_start.go",
"new_path": "pkg/sentry/kernel/task_start.go",
"diff": "@@ -16,6 +16,7 @@ package kernel\nimport (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/inet\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -98,15 +99,18 @@ type TaskConfig struct {\n// NewTask creates a new task defined by cfg.\n//\n// NewTask does not start the returned task; the caller must call Task.Start.\n-func (ts *TaskSet) NewTask(cfg *TaskConfig) (*Task, error) {\n+//\n+// If successful, NewTask transfers references held by cfg to the new task.\n+// Otherwise, NewTask releases them.\n+func (ts *TaskSet) NewTask(ctx context.Context, cfg *TaskConfig) (*Task, error) {\nt, err := ts.newTask(cfg)\nif err != nil {\ncfg.TaskContext.release()\n- cfg.FSContext.DecRef(t)\n- cfg.FDTable.DecRef(t)\n- cfg.IPCNamespace.DecRef(t)\n+ cfg.FSContext.DecRef(ctx)\n+ cfg.FDTable.DecRef(ctx)\n+ cfg.IPCNamespace.DecRef(ctx)\nif cfg.MountNamespaceVFS2 != nil {\n- cfg.MountNamespaceVFS2.DecRef(t)\n+ cfg.MountNamespaceVFS2.DecRef(ctx)\n}\nreturn nil, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/thread_group.go",
"new_path": "pkg/sentry/kernel/thread_group.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -307,8 +308,8 @@ func (tg *ThreadGroup) Limits() *limits.LimitSet {\nreturn tg.limits\n}\n-// release releases the thread group's resources.\n-func (tg *ThreadGroup) release(t *Task) {\n+// Release releases the thread group's resources.\n+func (tg *ThreadGroup) Release(ctx context.Context) {\n// Timers must be destroyed without holding the TaskSet or signal mutexes\n// since timers send signals with Timer.mu locked.\ntg.itimerRealTimer.Destroy()\n@@ -325,7 +326,7 @@ func (tg *ThreadGroup) release(t *Task) {\nit.DestroyTimer()\n}\nif tg.mounts != nil {\n- tg.mounts.DecRef(t)\n+ tg.mounts.DecRef(ctx)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs2] Fix fork reference leaks.
PiperOrigin-RevId: 337919424 |
259,992 | 19.10.2020 15:30:48 | 25,200 | 4b4d12d5bb9c4902380fa999b5f49d3ed7029938 | Fixes to cgroups
There were a few problems with cgroups:
cleanup loop what breaking too early
parse of /proc/[pid]/cgroups was skipping "name=systemd"
because "name=" was not being removed from name.
When no limits are specified, fillFromAncestor was not being
called, causing a failure to set cpuset.mems
Updates | [
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/cgroup.go",
"new_path": "runsc/cgroup/cgroup.go",
"diff": "@@ -201,13 +201,15 @@ func LoadPaths(pid string) (map[string]string, error) {\npaths := make(map[string]string)\nscanner := bufio.NewScanner(f)\nfor scanner.Scan() {\n- // Format: ID:controller1,controller2:path\n+ // Format: ID:[name=]controller1,controller2:path\n// Example: 2:cpu,cpuacct:/user.slice\ntokens := strings.Split(scanner.Text(), \":\")\nif len(tokens) != 3 {\nreturn nil, fmt.Errorf(\"invalid cgroups file, line: %q\", scanner.Text())\n}\nfor _, ctrlr := range strings.Split(tokens[1], \",\") {\n+ // Remove prefix for cgroups with no controller, eg. systemd.\n+ ctrlr = strings.TrimPrefix(ctrlr, \"name=\")\npaths[ctrlr] = tokens[2]\n}\n}\n@@ -237,7 +239,7 @@ func New(spec *specs.Spec) (*Cgroup, error) {\nvar err error\nparents, err = LoadPaths(\"self\")\nif err != nil {\n- return nil, fmt.Errorf(\"finding current cgroups: %v\", err)\n+ return nil, fmt.Errorf(\"finding current cgroups: %w\", err)\n}\n}\nreturn &Cgroup{\n@@ -276,12 +278,10 @@ func (c *Cgroup) Install(res *specs.LinuxResources) error {\n}\nreturn err\n}\n- if res != nil {\nif err := cfg.ctrlr.set(res, path); err != nil {\nreturn err\n}\n}\n- }\nclean.Release()\nreturn nil\n}\n@@ -304,14 +304,15 @@ func (c *Cgroup) Uninstall() error {\nctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\ndefer cancel()\nb := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n- if err := backoff.Retry(func() error {\n+ fn := func() error {\nerr := syscall.Rmdir(path)\nif os.IsNotExist(err) {\nreturn nil\n}\nreturn err\n- }, b); err != nil {\n- return fmt.Errorf(\"removing cgroup path %q: %v\", path, err)\n+ }\n+ if err := backoff.Retry(fn, b); err != nil {\n+ return fmt.Errorf(\"removing cgroup path %q: %w\", path, err)\n}\n}\nreturn nil\n@@ -332,7 +333,6 @@ func (c *Cgroup) Join() (func(), error) {\nif _, ok := controllers[ctrlr]; ok {\nfullPath := filepath.Join(cgroupRoot, ctrlr, path)\nundoPaths = append(undoPaths, fullPath)\n- break\n}\n}\n@@ -422,7 +422,7 @@ func (*noop) set(*specs.LinuxResources, string) error {\ntype memory struct{}\nfunc (*memory) set(spec *specs.LinuxResources, path string) error {\n- if spec.Memory == nil {\n+ if spec == nil || spec.Memory == nil {\nreturn nil\n}\nif err := setOptionalValueInt(path, \"memory.limit_in_bytes\", spec.Memory.Limit); err != nil {\n@@ -455,7 +455,7 @@ func (*memory) set(spec *specs.LinuxResources, path string) error {\ntype cpu struct{}\nfunc (*cpu) set(spec *specs.LinuxResources, path string) error {\n- if spec.CPU == nil {\n+ if spec == nil || spec.CPU == nil {\nreturn nil\n}\nif err := setOptionalValueUint(path, \"cpu.shares\", spec.CPU.Shares); err != nil {\n@@ -478,7 +478,7 @@ type cpuSet struct{}\nfunc (*cpuSet) set(spec *specs.LinuxResources, path string) error {\n// cpuset.cpus and mems are required fields, but are not set on a new cgroup.\n// If not set in the spec, get it from one of the ancestors cgroup.\n- if spec.CPU == nil || spec.CPU.Cpus == \"\" {\n+ if spec == nil || spec.CPU == nil || spec.CPU.Cpus == \"\" {\nif _, err := fillFromAncestor(filepath.Join(path, \"cpuset.cpus\")); err != nil {\nreturn err\n}\n@@ -488,18 +488,17 @@ func (*cpuSet) set(spec *specs.LinuxResources, path string) error {\n}\n}\n- if spec.CPU == nil || spec.CPU.Mems == \"\" {\n+ if spec == nil || spec.CPU == nil || spec.CPU.Mems == \"\" {\n_, err := fillFromAncestor(filepath.Join(path, \"cpuset.mems\"))\nreturn err\n}\n- mems := spec.CPU.Mems\n- return setValue(path, \"cpuset.mems\", mems)\n+ return setValue(path, \"cpuset.mems\", spec.CPU.Mems)\n}\ntype blockIO struct{}\nfunc (*blockIO) set(spec *specs.LinuxResources, path string) error {\n- if spec.BlockIO == nil {\n+ if spec == nil || spec.BlockIO == nil {\nreturn nil\n}\n@@ -549,7 +548,7 @@ func setThrottle(path, name string, devs []specs.LinuxThrottleDevice) error {\ntype networkClass struct{}\nfunc (*networkClass) set(spec *specs.LinuxResources, path string) error {\n- if spec.Network == nil {\n+ if spec == nil || spec.Network == nil {\nreturn nil\n}\nreturn setOptionalValueUint32(path, \"net_cls.classid\", spec.Network.ClassID)\n@@ -558,7 +557,7 @@ func (*networkClass) set(spec *specs.LinuxResources, path string) error {\ntype networkPrio struct{}\nfunc (*networkPrio) set(spec *specs.LinuxResources, path string) error {\n- if spec.Network == nil {\n+ if spec == nil || spec.Network == nil {\nreturn nil\n}\nfor _, prio := range spec.Network.Priorities {\n@@ -573,7 +572,7 @@ func (*networkPrio) set(spec *specs.LinuxResources, path string) error {\ntype pids struct{}\nfunc (*pids) set(spec *specs.LinuxResources, path string) error {\n- if spec.Pids == nil || spec.Pids.Limit <= 0 {\n+ if spec == nil || spec.Pids == nil || spec.Pids.Limit <= 0 {\nreturn nil\n}\nval := strconv.FormatInt(spec.Pids.Limit, 10)\n@@ -583,6 +582,9 @@ func (*pids) set(spec *specs.LinuxResources, path string) error {\ntype hugeTLB struct{}\nfunc (*hugeTLB) set(spec *specs.LinuxResources, path string) error {\n+ if spec == nil {\n+ return nil\n+ }\nfor _, limit := range spec.HugepageLimits {\nname := fmt.Sprintf(\"hugetlb.%s.limit_in_bytes\", limit.Pagesize)\nval := strconv.FormatUint(limit.Limit, 10)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -985,7 +985,7 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *config.Config, bu\n// Start the gofer in the given namespace.\nlog.Debugf(\"Starting gofer: %s %v\", binPath, args)\nif err := specutils.StartInNS(cmd, nss); err != nil {\n- return nil, nil, fmt.Errorf(\"Gofer: %v\", err)\n+ return nil, nil, fmt.Errorf(\"gofer: %v\", err)\n}\nlog.Infof(\"Gofer started, PID: %d\", cmd.Process.Pid)\nc.GoferPid = cmd.Process.Pid\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -2362,12 +2362,12 @@ func executeCombinedOutput(cont *Container, name string, arg ...string) ([]byte,\n}\n// executeSync synchronously executes a new process.\n-func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {\n- pid, err := cont.Execute(args)\n+func (c *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {\n+ pid, err := c.Execute(args)\nif err != nil {\nreturn 0, fmt.Errorf(\"error executing: %v\", err)\n}\n- ws, err := cont.WaitPID(pid)\n+ ws, err := c.WaitPID(pid)\nif err != nil {\nreturn 0, fmt.Errorf(\"error waiting: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fixes to cgroups
There were a few problems with cgroups:
- cleanup loop what breaking too early
- parse of /proc/[pid]/cgroups was skipping "name=systemd"
because "name=" was not being removed from name.
- When no limits are specified, fillFromAncestor was not being
called, causing a failure to set cpuset.mems
Updates #4536
PiperOrigin-RevId: 337947356 |
259,858 | 19.10.2020 17:24:22 | 25,200 | 8f29b8d252ceda8a3e3b777b0b77ea967b0ef2d0 | Remove now unused remote3 configurations. | [
{
"change_type": "MODIFY",
"old_path": ".bazelrc",
"new_path": ".bazelrc",
"diff": "@@ -28,20 +28,12 @@ build:remote --bes_results_url=\"https://source.cloud.google.com/results/invocati\nbuild:remote --bes_timeout=600s\nbuild:remote --project_id=gvisor-rbe\nbuild:remote --remote_instance_name=projects/gvisor-rbe/instances/default_instance\n-build:remote3 --remote_executor=grpcs://remotebuildexecution.googleapis.com\n-build:remote3 --project_id=gvisor-rbe\n-build:remote3 --bes_backend=buildeventservice.googleapis.com\n-build:remote3 --bes_results_url=\"https://source.cloud.google.com/results/invocations\"\n-build:remote3 --bes_timeout=600s\n-build:remote3 --remote_instance_name=projects/gvisor-rbe/instances/default_instance\n# Enable authentication. This will pick up application default credentials by\n# default. You can use --google_credentials=some_file.json to use a service\n# account credential instead.\nbuild:remote --google_default_credentials=true\nbuild:remote --auth_scope=\"https://www.googleapis.com/auth/cloud-source-tools\"\n-build:remote3 --google_default_credentials=true\n-build:remote3 --auth_scope=\"https://www.googleapis.com/auth/cloud-source-tools\"\n# Add a custom platform and toolchain that builds in a privileged docker\n# container, which is required by our syscall tests.\n@@ -52,12 +44,3 @@ build:remote --platforms=//tools/bazeldefs:rbe_ubuntu1604\nbuild:remote --crosstool_top=@rbe_default//cc:toolchain\nbuild:remote --jobs=300\nbuild:remote --remote_timeout=3600\n-\n-# Identical to the above, to be removed once CI switches.\n-build:remote3 --host_platform=//tools/bazeldefs:rbe_ubuntu1604\n-build:remote3 --extra_toolchains=//tools/bazeldefs:cc-toolchain-clang-x86_64-default\n-build:remote3 --extra_execution_platforms=//tools/bazeldefs:rbe_ubuntu1604\n-build:remote3 --platforms=//tools/bazeldefs:rbe_ubuntu1604\n-build:remote3 --crosstool_top=@rbe_default//cc:toolchain\n-build:remote3 --jobs=300\n-build:remote3 --remote_timeout=3600\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove now unused remote3 configurations.
PiperOrigin-RevId: 337968219 |
259,860 | 19.10.2020 18:07:38 | 25,200 | dcc1b71f1ba47646808f61cc86e560179c233af2 | Fix reference counting on kcov mappings.
Reported-by:
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/sys/kcov.go",
"new_path": "pkg/sentry/fsimpl/sys/kcov.go",
"diff": "@@ -102,7 +102,7 @@ func (fd *kcovFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) erro\nfunc (fd *kcovFD) Release(ctx context.Context) {\n// kcov instances have reference counts in Linux, but this seems sufficient\n// for our purposes.\n- fd.kcov.Clear()\n+ fd.kcov.Clear(ctx)\n}\n// SetStat implements vfs.FileDescriptionImpl.SetStat.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kcov.go",
"new_path": "pkg/sentry/kernel/kcov.go",
"diff": "@@ -199,24 +199,26 @@ func (kcov *Kcov) DisableTrace(ctx context.Context) error {\n}\nkcov.mode = linux.KCOV_MODE_INIT\nkcov.owningTask = nil\n+ if kcov.mappable != nil {\n+ kcov.mappable.DecRef(ctx)\nkcov.mappable = nil\n+ }\nreturn nil\n}\n// Clear resets the mode and clears the owning task and memory mapping for kcov.\n// It is called when the fd corresponding to kcov is closed. Note that the mode\n// needs to be set so that the next call to kcov.TaskWork() will exit early.\n-func (kcov *Kcov) Clear() {\n+func (kcov *Kcov) Clear(ctx context.Context) {\nkcov.mu.Lock()\n- kcov.clearLocked()\n- kcov.mu.Unlock()\n-}\n-\n-func (kcov *Kcov) clearLocked() {\nkcov.mode = linux.KCOV_MODE_INIT\nkcov.owningTask = nil\n+ if kcov.mappable != nil {\n+ kcov.mappable.DecRef(ctx)\nkcov.mappable = nil\n}\n+ kcov.mu.Unlock()\n+}\n// OnTaskExit is called when the owning task exits. It is similar to\n// kcov.Clear(), except the memory mapping is not cleared, so that the same\n@@ -254,6 +256,7 @@ func (kcov *Kcov) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) erro\n// will look different under /proc/[pid]/maps than they do on Linux.\nkcov.mappable = mm.NewSpecialMappable(fmt.Sprintf(\"[kcov:%d]\", t.ThreadID()), kcov.mfp, fr)\n}\n+ kcov.mappable.IncRef()\nopts.Mappable = kcov.mappable\nopts.MappingIdentity = kcov.mappable\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kcov_unsafe.go",
"new_path": "pkg/sentry/kernel/kcov_unsafe.go",
"diff": "@@ -20,9 +20,9 @@ import (\n\"gvisor.dev/gvisor/pkg/safemem\"\n)\n-// countBlock provides a safemem.BlockSeq for k.count.\n+// countBlock provides a safemem.BlockSeq for kcov.count.\n//\n// Like k.count, the block returned is protected by k.mu.\n-func (k *Kcov) countBlock() safemem.BlockSeq {\n- return safemem.BlockSeqOf(safemem.BlockFromSafePointer(unsafe.Pointer(&k.count), int(unsafe.Sizeof(k.count))))\n+func (kcov *Kcov) countBlock() safemem.BlockSeq {\n+ return safemem.BlockSeqOf(safemem.BlockFromSafePointer(unsafe.Pointer(&kcov.count), int(unsafe.Sizeof(kcov.count))))\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix reference counting on kcov mappings.
Reported-by: [email protected]
Reported-by: [email protected]
PiperOrigin-RevId: 337973519 |
259,853 | 19.10.2020 18:16:37 | 25,200 | 34a6e9576a9684087f95f57ee73171a637bee8b2 | loader/elf: validate file offset
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -194,6 +194,10 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {\nlog.Infof(\"Too many phdrs (%d): total size %d > %d\", hdr.Phnum, totalPhdrSize, maxTotalPhdrSize)\nreturn elfInfo{}, syserror.ENOEXEC\n}\n+ if int64(hdr.Phoff) < 0 || int64(hdr.Phoff+uint64(totalPhdrSize)) < 0 {\n+ ctx.Infof(\"Unsupported phdr offset %d\", hdr.Phoff)\n+ return elfInfo{}, syserror.ENOEXEC\n+ }\nphdrBuf := make([]byte, totalPhdrSize)\n_, err = f.ReadFull(ctx, usermem.BytesIOSequence(phdrBuf), int64(hdr.Phoff))\n@@ -437,6 +441,10 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in\nctx.Infof(\"PT_INTERP path too big: %v\", phdr.Filesz)\nreturn loadedELF{}, syserror.ENOEXEC\n}\n+ if int64(phdr.Off) < 0 || int64(phdr.Off+phdr.Filesz) < 0 {\n+ ctx.Infof(\"Unsupported PT_INTERP offset %d\", phdr.Off)\n+ return loadedELF{}, syserror.ENOEXEC\n+ }\npath := make([]byte, phdr.Filesz)\n_, err := f.ReadFull(ctx, usermem.BytesIOSequence(path), int64(phdr.Off))\n"
}
] | Go | Apache License 2.0 | google/gvisor | loader/elf: validate file offset
Reported-by: [email protected]
PiperOrigin-RevId: 337974474 |
259,992 | 20.10.2020 09:18:51 | 25,200 | c21d8375d91ff037cf8eaf12d47c23657cb36b8b | Add /dev to mandatory mounts test | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -266,7 +266,7 @@ type CreateMountTestcase struct {\nfunc createMountTestcases() []*CreateMountTestcase {\ntestCases := []*CreateMountTestcase{\n- &CreateMountTestcase{\n+ {\n// Only proc.\nname: \"only proc mount\",\nspec: specs.Spec{\n@@ -304,11 +304,10 @@ func createMountTestcases() []*CreateMountTestcase {\n},\n},\n},\n- // /some/deep/path should be mounted, along with /proc,\n- // /dev, and /sys.\n+ // /some/deep/path should be mounted, along with /proc, /dev, and /sys.\nexpectedPaths: []string{\"/some/very/very/deep/path\", \"/proc\", \"/dev\", \"/sys\"},\n},\n- &CreateMountTestcase{\n+ {\n// Mounts are nested inside each other.\nname: \"nested mounts\",\nspec: specs.Spec{\n@@ -352,7 +351,7 @@ func createMountTestcases() []*CreateMountTestcase {\nexpectedPaths: []string{\"/foo\", \"/foo/bar\", \"/foo/bar/baz\", \"/foo/qux\",\n\"/foo/qux-quz\", \"/foo/some/very/very/deep/path\", \"/proc\", \"/dev\", \"/sys\"},\n},\n- &CreateMountTestcase{\n+ {\nname: \"mount inside /dev\",\nspec: specs.Spec{\nRoot: &specs.Root{\n@@ -395,9 +394,7 @@ func createMountTestcases() []*CreateMountTestcase {\n},\nexpectedPaths: []string{\"/proc\", \"/dev\", \"/dev/fd-foo\", \"/dev/foo\", \"/dev/bar\", \"/sys\"},\n},\n- }\n-\n- vfsCase := &CreateMountTestcase{\n+ {\nname: \"mounts inside mandatory mounts\",\nspec: specs.Spec{\nRoot: &specs.Root{\n@@ -413,17 +410,21 @@ func createMountTestcases() []*CreateMountTestcase {\nDestination: \"/sys/bar\",\nType: \"tmpfs\",\n},\n-\n{\nDestination: \"/tmp/baz\",\nType: \"tmpfs\",\n},\n+ {\n+ Destination: \"/dev/goo\",\n+ Type: \"tmpfs\",\n+ },\n+ },\n},\n+ expectedPaths: []string{\"/proc\", \"/sys\", \"/sys/bar\", \"/tmp\", \"/tmp/baz\", \"/dev/goo\"},\n},\n- expectedPaths: []string{\"/proc\", \"/sys\", \"/sys/bar\", \"/tmp\", \"/tmp/baz\"},\n}\n- return append(testCases, vfsCase)\n+ return testCases\n}\n// Test that MountNamespace can be created with various specs.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add /dev to mandatory mounts test
PiperOrigin-RevId: 338072845 |
260,003 | 20.10.2020 15:45:38 | 25,200 | 4da10f873e22a43be648ab34c9b9b2759d33337d | Fix nogo tests.
//pkg/tcpip/stack:stack_x_test_nogo
//pkg/tcpip/transport/raw:raw_nogo | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_test.go",
"new_path": "pkg/tcpip/stack/transport_test.go",
"diff": "@@ -28,7 +28,7 @@ import (\nconst (\nfakeTransNumber tcpip.TransportProtocolNumber = 1\n- fakeTransHeaderLen = 3\n+ fakeTransHeaderLen int = 3\n)\n// fakeTransportEndpoint is a transport-layer protocol endpoint. It counts\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint_state.go",
"new_path": "pkg/tcpip/transport/raw/endpoint_state.go",
"diff": "@@ -37,57 +37,57 @@ func (p *rawPacket) loadData(data buffer.VectorisedView) {\n}\n// beforeSave is invoked by stateify.\n-func (ep *endpoint) beforeSave() {\n+func (e *endpoint) beforeSave() {\n// Stop incoming packets from being handled (and mutate endpoint state).\n// The lock will be released after saveRcvBufSizeMax(), which would have\n- // saved ep.rcvBufSizeMax and set it to 0 to continue blocking incoming\n+ // saved e.rcvBufSizeMax and set it to 0 to continue blocking incoming\n// packets.\n- ep.rcvMu.Lock()\n+ e.rcvMu.Lock()\n}\n// saveRcvBufSizeMax is invoked by stateify.\n-func (ep *endpoint) saveRcvBufSizeMax() int {\n- max := ep.rcvBufSizeMax\n+func (e *endpoint) saveRcvBufSizeMax() int {\n+ max := e.rcvBufSizeMax\n// Make sure no new packets will be handled regardless of the lock.\n- ep.rcvBufSizeMax = 0\n+ e.rcvBufSizeMax = 0\n// Release the lock acquired in beforeSave() so regular endpoint closing\n// logic can proceed after save.\n- ep.rcvMu.Unlock()\n+ e.rcvMu.Unlock()\nreturn max\n}\n// loadRcvBufSizeMax is invoked by stateify.\n-func (ep *endpoint) loadRcvBufSizeMax(max int) {\n- ep.rcvBufSizeMax = max\n+func (e *endpoint) loadRcvBufSizeMax(max int) {\n+ e.rcvBufSizeMax = max\n}\n// afterLoad is invoked by stateify.\n-func (ep *endpoint) afterLoad() {\n- stack.StackFromEnv.RegisterRestoredEndpoint(ep)\n+func (e *endpoint) afterLoad() {\n+ stack.StackFromEnv.RegisterRestoredEndpoint(e)\n}\n// Resume implements tcpip.ResumableEndpoint.Resume.\n-func (ep *endpoint) Resume(s *stack.Stack) {\n- ep.stack = s\n+func (e *endpoint) Resume(s *stack.Stack) {\n+ e.stack = s\n// If the endpoint is connected, re-connect.\n- if ep.connected {\n+ if e.connected {\nvar err *tcpip.Error\n- ep.route, err = ep.stack.FindRoute(ep.RegisterNICID, ep.BindAddr, ep.route.RemoteAddress, ep.NetProto, false)\n+ e.route, err = e.stack.FindRoute(e.RegisterNICID, e.BindAddr, e.route.RemoteAddress, e.NetProto, false)\nif err != nil {\npanic(err)\n}\n}\n// If the endpoint is bound, re-bind.\n- if ep.bound {\n- if ep.stack.CheckLocalAddress(ep.RegisterNICID, ep.NetProto, ep.BindAddr) == 0 {\n+ if e.bound {\n+ if e.stack.CheckLocalAddress(e.RegisterNICID, e.NetProto, e.BindAddr) == 0 {\npanic(tcpip.ErrBadLocalAddress)\n}\n}\n- if ep.associated {\n- if err := ep.stack.RegisterRawTransportEndpoint(ep.RegisterNICID, ep.NetProto, ep.TransProto, ep); err != nil {\n+ if e.associated {\n+ if err := e.stack.RegisterRawTransportEndpoint(e.RegisterNICID, e.NetProto, e.TransProto, e); err != nil {\npanic(err)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix nogo tests.
//pkg/tcpip/stack:stack_x_test_nogo
//pkg/tcpip/transport/raw:raw_nogo
PiperOrigin-RevId: 338153265 |
260,004 | 20.10.2020 16:04:45 | 25,200 | 2bfdbfd1fdbe09615b51d3cf360e751bd2c6a981 | Fix locking in AddressableEndpointState | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"new_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"diff": "@@ -361,6 +361,8 @@ func (a *AddressableEndpointState) RemovePermanentEndpoint(ep AddressEndpoint) *\nreturn tcpip.ErrInvalidEndpointState\n}\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\nreturn a.removePermanentEndpointLocked(addrState)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix locking in AddressableEndpointState
PiperOrigin-RevId: 338156438 |
259,907 | 20.10.2020 16:17:06 | 25,200 | e36a2b7930425fe062b3234a2f44ff4992eafe4e | [runtime tests] Update exclude files.
bhaskerh@ fixed a bunch of the EADDRINUSE flakes in so we should
unexclude them.
I have also tested other flaky tests on this list and removed those that do
not flake anymore. | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/exclude/nodejs12.4.0.csv",
"new_path": "test/runtimes/exclude/nodejs12.4.0.csv",
"diff": "test name,bug id,comment\nasync-hooks/test-statwatcher.js,https://github.com/nodejs/node/issues/21425,Check for fix inclusion in nodejs releases after 2020-03-29\n-benchmark/test-benchmark-fs.js,,\n-benchmark/test-benchmark-napi.js,,\n+benchmark/test-benchmark-fs.js,,Broken test\n+benchmark/test-benchmark-napi.js,,Broken test\ndoctool/test-make-doc.js,b/68848110,Expected to fail.\ninternet/test-dgram-multicast-set-interface-lo.js,b/162798882,\n-internet/test-doctool-versions.js,,\n-internet/test-uv-threadpool-schedule.js,,\n-parallel/test-cluster-dgram-reuse.js,b/64024294,\n+internet/test-doctool-versions.js,,Broken test\n+internet/test-uv-threadpool-schedule.js,,Broken test\nparallel/test-dgram-bind-fd.js,b/132447356,\nparallel/test-dgram-socket-buffer-size.js,b/68847921,\nparallel/test-dns-channel-timeout.js,b/161893056,\n-parallel/test-fs-access.js,,\n-parallel/test-fs-watchfile.js,,Flaky - File already exists error\n-parallel/test-fs-write-stream.js,b/166819807,Flaky\n-parallel/test-fs-write-stream-double-close.js,b/166819807,Flaky\n-parallel/test-fs-write-stream-throw-type-error.js,b/166819807,Flaky\n-parallel/test-http-writable-true-after-close.js,,Flaky - Mismatched <anonymous> function calls. Expected exactly 1 actual 2\n+parallel/test-fs-access.js,,Broken test\n+parallel/test-fs-watchfile.js,b/166819807,Flaky - VFS1 only\n+parallel/test-fs-write-stream.js,b/166819807,Flaky - VFS1 only\n+parallel/test-fs-write-stream-double-close.js,b/166819807,Flaky - VFS1 only\n+parallel/test-fs-write-stream-throw-type-error.js,b/166819807,Flaky - VFS1 only\n+parallel/test-http-writable-true-after-close.js,b/171301436,Flaky - Mismatched <anonymous> function calls. Expected exactly 1 actual 2\nparallel/test-os.js,b/63997097,\n-parallel/test-net-server-listen-options.js,,Flaky - EADDRINUSE\n-parallel/test-process-uid-gid.js,,\n-parallel/test-tls-cli-min-version-1.0.js,,Flaky - EADDRINUSE\n-parallel/test-tls-cli-min-version-1.1.js,,Flaky - EADDRINUSE\n-parallel/test-tls-cli-min-version-1.2.js,,Flaky - EADDRINUSE\n-parallel/test-tls-cli-min-version-1.3.js,,Flaky - EADDRINUSE\n-parallel/test-tls-cli-max-version-1.2.js,,Flaky - EADDRINUSE\n-parallel/test-tls-cli-max-version-1.3.js,,Flaky - EADDRINUSE\n-parallel/test-tls-min-max-version.js,,Flaky - EADDRINUSE\n+parallel/test-process-uid-gid.js,,Does not work inside Docker with gid nobody\npseudo-tty/test-assert-colors.js,b/162801321,\npseudo-tty/test-assert-no-color.js,b/162801321,\npseudo-tty/test-assert-position-indicator.js,b/162801321,\n@@ -48,11 +39,7 @@ pseudo-tty/test-tty-stdout-resize.js,b/162801321,\npseudo-tty/test-tty-stream-constructors.js,b/162801321,\npseudo-tty/test-tty-window-size.js,b/162801321,\npseudo-tty/test-tty-wrap.js,b/162801321,\n-pummel/test-heapdump-http2.js,,Flaky\n-pummel/test-net-pingpong.js,,\n+pummel/test-net-pingpong.js,,Broken test\npummel/test-vm-memleak.js,b/162799436,\n-pummel/test-watch-file.js,,Flaky - Timeout\n-sequential/test-child-process-pass-fd.js,b/63926391,Flaky\n-sequential/test-https-connect-localport.js,,Flaky - EADDRINUSE\n-sequential/test-net-bytes-per-incoming-chunk-overhead.js,,flaky - timeout\n-tick-processor/test-tick-processor-builtin.js,,\n+pummel/test-watch-file.js,,Flaky - VFS1 only\n+tick-processor/test-tick-processor-builtin.js,,Broken test\n"
}
] | Go | Apache License 2.0 | google/gvisor | [runtime tests] Update exclude files.
bhaskerh@ fixed a bunch of the EADDRINUSE flakes in #3662 so we should
unexclude them.
I have also tested other flaky tests on this list and removed those that do
not flake anymore.
PiperOrigin-RevId: 338158545 |
260,003 | 20.10.2020 17:20:33 | 25,200 | 16ba350314b457a8d1dcdf17040b75b7a4e646cb | Fix nogo test in //pkg/tcpip/... | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/adapters/gonet/gonet_test.go",
"new_path": "pkg/tcpip/adapters/gonet/gonet_test.go",
"diff": "@@ -97,6 +97,9 @@ type testConnection struct {\nfunc connect(s *stack.Stack, addr tcpip.FullAddress) (*testConnection, *tcpip.Error) {\nwq := &waiter.Queue{}\nep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)\n+ if err != nil {\n+ return nil, err\n+ }\nentry, ch := waiter.NewChannelEntry(nil)\nwq.EventRegister(&entry, waiter.EventOut)\n@@ -145,7 +148,9 @@ func TestCloseReader(t *testing.T) {\ndefer close(done)\nc, err := l.Accept()\nif err != nil {\n- t.Fatalf(\"l.Accept() = %v\", err)\n+ t.Errorf(\"l.Accept() = %v\", err)\n+ // Cannot call Fatalf in goroutine. Just return from the goroutine.\n+ return\n}\n// Give c.Read() a chance to block before closing the connection.\n@@ -416,7 +421,9 @@ func TestDeadlineChange(t *testing.T) {\ndefer close(done)\nc, err := l.Accept()\nif err != nil {\n- t.Fatalf(\"l.Accept() = %v\", err)\n+ t.Errorf(\"l.Accept() = %v\", err)\n+ // Cannot call Fatalf in goroutine. Just return from the goroutine.\n+ return\n}\nc.SetDeadline(time.Now().Add(time.Minute))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/pipe/pipe_test.go",
"new_path": "pkg/tcpip/link/sharedmem/pipe/pipe_test.go",
"diff": "@@ -470,6 +470,7 @@ func TestConcurrentReaderWriter(t *testing.T) {\nconst count = 1000000\nvar wg sync.WaitGroup\n+ defer wg.Wait()\nwg.Add(1)\ngo func() {\ndefer wg.Done()\n@@ -489,10 +490,6 @@ func TestConcurrentReaderWriter(t *testing.T) {\n}\n}()\n- wg.Add(1)\n- go func() {\n- defer wg.Done()\n- runtime.Gosched()\nfor i := 0; i < count; i++ {\nn := 1 + rr.Intn(80)\nrb := rx.Pull()\n@@ -512,7 +509,4 @@ func TestConcurrentReaderWriter(t *testing.T) {\nrx.Flush()\n}\n- }()\n-\n- wg.Wait()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/sack_scoreboard.go",
"new_path": "pkg/tcpip/transport/tcp/sack_scoreboard.go",
"diff": "@@ -164,7 +164,7 @@ func (s *SACKScoreboard) IsSACKED(r header.SACKBlock) bool {\nreturn found\n}\n-// Dump prints the state of the scoreboard structure.\n+// String returns human-readable state of the scoreboard structure.\nfunc (s *SACKScoreboard) String() string {\nvar str strings.Builder\nstr.WriteString(\"SACKScoreboard: {\")\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/config.go",
"new_path": "tools/nogo/config.go",
"diff": "@@ -481,21 +481,6 @@ func init() {\n\"pkg/state/tests/integer_test.go:28\",\n\"pkg/sync/rwmutex_test.go:105\",\n\"pkg/syserr/host_linux.go:35\",\n- \"pkg/tcpip/adapters/gonet/gonet_test.go:144\",\n- \"pkg/tcpip/adapters/gonet/gonet_test.go:415\",\n- \"pkg/tcpip/adapters/gonet/gonet_test.go:99\",\n- \"pkg/tcpip/buffer/view.go:238\",\n- \"pkg/tcpip/buffer/view.go:238\",\n- \"pkg/tcpip/buffer/view.go:246\",\n- \"pkg/tcpip/header/tcp.go:151\",\n- \"pkg/tcpip/link/sharedmem/pipe/pipe_test.go:493\",\n- \"pkg/tcpip/stack/iptables.go:293\",\n- \"pkg/tcpip/stack/iptables_types.go:277\",\n- \"pkg/tcpip/stack/stack.go:553\",\n- \"pkg/tcpip/stack/transport_test.go:30\",\n- \"pkg/tcpip/transport/packet/endpoint.go:126\",\n- \"pkg/tcpip/transport/raw/endpoint.go:145\",\n- \"pkg/tcpip/transport/tcp/sack_scoreboard.go:167\",\n\"pkg/unet/unet_test.go:634\",\n\"pkg/unet/unet_test.go:662\",\n\"pkg/unet/unet_test.go:703\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix nogo test in //pkg/tcpip/...
PiperOrigin-RevId: 338168977 |
259,990 | 20.10.2020 20:03:04 | 25,200 | d579ed85052dfba0579bd3286b6ae04210e4f975 | Do not even try forcing cgroups in tests | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -314,7 +314,8 @@ func New(conf *boot.Config, args Args) (*Container, error) {\nif args.Spec.Linux == nil {\nargs.Spec.Linux = &specs.Linux{}\n}\n- if args.Spec.Linux.CgroupsPath == \"\" {\n+ // Don't force the use of cgroups in tests because they lack permission to do so.\n+ if args.Spec.Linux.CgroupsPath == \"\" && !conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {\nargs.Spec.Linux.CgroupsPath = \"/\" + args.ID\n}\n@@ -328,9 +329,6 @@ func New(conf *boot.Config, args Args) (*Container, error) {\n// If there is cgroup config, install it before creating sandbox process.\nif err := cg.Install(args.Spec.Linux.Resources); err != nil {\nswitch {\n- case errors.Is(err, syscall.EROFS) && conf.TestOnlyAllowRunAsCurrentUserWithoutChroot:\n- log.Warningf(\"Skipping cgroup configuration in test mode: %v\", err)\n- cg = nil\ncase errors.Is(err, syscall.EACCES) && conf.Rootless:\nlog.Warningf(\"Skipping cgroup configuration in rootless mode: %v\", err)\ncg = nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Do not even try forcing cgroups in tests |
259,860 | 21.10.2020 19:15:02 | 25,200 | 4e389c785779114620b47e005d08ca469cc1ed68 | Check for nil in kernel.FSContext functions.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fs_context.go",
"new_path": "pkg/sentry/kernel/fs_context.go",
"diff": "@@ -147,19 +147,23 @@ func (f *FSContext) WorkingDirectory() *fs.Dirent {\nf.mu.Lock()\ndefer f.mu.Unlock()\n+ if f.cwd != nil {\nf.cwd.IncRef()\n+ }\nreturn f.cwd\n}\n// WorkingDirectoryVFS2 returns the current working directory.\n//\n-// This will return nil if called after f is destroyed, otherwise it will return\n-// a Dirent with a reference taken.\n+// This will return an empty vfs.VirtualDentry if called after f is\n+// destroyed, otherwise it will return a Dirent with a reference taken.\nfunc (f *FSContext) WorkingDirectoryVFS2() vfs.VirtualDentry {\nf.mu.Lock()\ndefer f.mu.Unlock()\n+ if f.cwdVFS2.Ok() {\nf.cwdVFS2.IncRef()\n+ }\nreturn f.cwdVFS2\n}\n@@ -218,13 +222,15 @@ func (f *FSContext) RootDirectory() *fs.Dirent {\n// RootDirectoryVFS2 returns the current filesystem root.\n//\n-// This will return nil if called after f is destroyed, otherwise it will return\n-// a Dirent with a reference taken.\n+// This will return an empty vfs.VirtualDentry if called after f is\n+// destroyed, otherwise it will return a Dirent with a reference taken.\nfunc (f *FSContext) RootDirectoryVFS2() vfs.VirtualDentry {\nf.mu.Lock()\ndefer f.mu.Unlock()\n+ if f.rootVFS2.Ok() {\nf.rootVFS2.IncRef()\n+ }\nreturn f.rootVFS2\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Check for nil in kernel.FSContext functions.
Reported-by: [email protected]
PiperOrigin-RevId: 338386575 |
259,907 | 22.10.2020 11:44:47 | 25,200 | c188daf889ea71b30e6862f3a87fcd9924319b70 | [bazel] Reduce number of jobs to 100. | [
{
"change_type": "MODIFY",
"old_path": ".bazelrc",
"new_path": ".bazelrc",
"diff": "@@ -42,5 +42,6 @@ build:remote --extra_toolchains=//tools/bazeldefs:cc-toolchain-clang-x86_64-defa\nbuild:remote --extra_execution_platforms=//tools/bazeldefs:rbe_ubuntu1604\nbuild:remote --platforms=//tools/bazeldefs:rbe_ubuntu1604\nbuild:remote --crosstool_top=@rbe_default//cc:toolchain\n-build:remote --jobs=300\n+# TODO(b/171495162): Revert to 300 jobs once bug is fixed.\n+build:remote --jobs=100\nbuild:remote --remote_timeout=3600\n"
}
] | Go | Apache License 2.0 | google/gvisor | [bazel] Reduce number of jobs to 100.
PiperOrigin-RevId: 338517024 |
259,884 | 22.10.2020 21:21:16 | 25,200 | cc772f3d54d46b65c663c8cf7812103df31f17d3 | Add a platform portability blog post
Also fixes the docker_image bazel rule, and website-server make target.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -260,7 +260,7 @@ website-build: load-jekyll ## Build the site image locally.\n.PHONY: website-build\nwebsite-server: website-build ## Run a local server for development.\n- @docker run -i -p 8080:8080 gvisor.dev/images/website\n+ @docker run -i -p 8080:8080 $(WEBSITE_IMAGE)\n.PHONY: website-server\nwebsite-push: website-build ## Push a new image and update the service.\n"
},
{
"change_type": "MODIFY",
"old_path": "images/defs.bzl",
"new_path": "images/defs.bzl",
"diff": "def _docker_image_impl(ctx):\nimporter = ctx.actions.declare_file(ctx.label.name)\n+\nimporter_content = [\n\"#!/bin/bash\",\n\"set -euo pipefail\",\n+ \"source_file='%s'\" % ctx.file.data.path,\n+ \"if [[ ! -f \\\"$source_file\\\" ]]; then\",\n+ \" source_file='%s'\" % ctx.file.data.short_path,\n+ \"fi\",\n\"exec docker import \" + \" \".join([\n\"-c '%s'\" % attr\nfor attr in ctx.attr.statements\n- ]) + \" \" + \" \".join([\n- \"'%s'\" % f.path\n- for f in ctx.files.data\n- ]) + \" $1\",\n+ ]) + \" \\\"$source_file\\\" $1\",\n\"\",\n]\n+\nctx.actions.write(importer, \"\\n\".join(importer_content), is_executable = True)\nreturn [DefaultInfo(\n- runfiles = ctx.runfiles(ctx.files.data),\n+ runfiles = ctx.runfiles([ctx.file.data]),\nexecutable = importer,\n)]\ndocker_image = rule(\nimplementation = _docker_image_impl,\n- doc = \"Tool to load a Docker image; takes a single parameter (image name).\",\n+ doc = \"Tool to import a Docker image; takes a single parameter (image name).\",\nattrs = {\n\"statements\": attr.string_list(doc = \"Extra Dockerfile directives.\"),\n- \"data\": attr.label_list(doc = \"All image data.\"),\n+ \"data\": attr.label(doc = \"Image filesystem tarball\", allow_single_file = [\".tgz\", \".tar.gz\"]),\n},\nexecutable = True,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "website/BUILD",
"new_path": "website/BUILD",
"diff": "@@ -6,7 +6,7 @@ package(licenses = [\"notice\"])\ndocker_image(\nname = \"website\",\n- data = [\":files\"],\n+ data = \":files\",\nstatements = [\n\"EXPOSE 8080/tcp\",\n'ENTRYPOINT [\"/server\"]',\n"
},
{
"change_type": "MODIFY",
"old_path": "website/_config.yml",
"new_path": "website/_config.yml",
"diff": "@@ -37,3 +37,9 @@ authors:\nfvoznika:\nname: Fabricio Voznika\nemail: [email protected]\n+ ianlewis:\n+ name: Ian Lewis\n+ email: [email protected]\n+ mpratt:\n+ name: Michael Pratt\n+ email: [email protected]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "website/blog/2020-10-22-platform-portability.md",
"diff": "+# Platform Portability\n+\n+Hardware virtualization is often seen as a requirement to provide an additional\n+isolation layer for untrusted applications. However, hardware virtualization\n+requires expensive bare-metal machines or cloud instances to run safely with\n+good performance, increasing cost and complexity for Cloud users. gVisor,\n+however, takes a more flexible approach.\n+\n+One of the pillars of gVisor's architecture is portability, allowing it to run\n+anywhere that runs Linux. Modern Cloud-Native applications run in containers in\n+many different places, from bare metal to virtual machines, and can't always\n+rely on nested virtualization. It is important for gVisor to be able to support\n+the environments where you run containers.\n+\n+gVisor achieves portability through an abstraction called a _Platform_.\n+Platforms can have many implementations, and each implementation can cover\n+different environments, making use of available software or hardware features.\n+\n+## Background\n+\n+Before we can understand how gVisor achieves portability using platforms, we\n+should take a step back and understand how applications interact with their\n+host.\n+\n+Container sandboxes can provide an isolation layer between the host and\n+application by virtualizing one of the layers below it, including the hardware\n+or operating system. Many sandboxes virtualize the hardware layer by running\n+applications in virtual machines. gVisor takes a different approach by\n+virtualizing the OS layer.\n+\n+When an application is run in a normal situation the host operating system loads\n+the application into user memory and schedules it for execution. The operating\n+system scheduler eventually schedules the application to a CPU and begins\n+executing it. It then handles the application's requests, such as for memory and\n+the lifecycle of the application. gVisor virtualizes these interactions, such as\n+system calls, and context switching that happen between an application and OS.\n+\n+[System calls](https://en.wikipedia.org/wiki/System_call) allow applications to\n+ask the OS to perform some task for it. System calls look like a normal function\n+call in most programming languages though works a bit differently under the\n+hood. When an application system call is encountered some special processing\n+takes place to do a\n+[context switch](https://en.wikipedia.org/wiki/Context_switch) into kernel mode\n+and begin executing code in the kernel before returning a result to the\n+application. Context switching may happen in other situations as well. For\n+example, to respond to an interrupt.\n+\n+## The Platform Interface\n+\n+gVisor provides a sandbox which implements the Linux OS interface, intercepting\n+OS interactions such as system calls and implements them in the sandbox kernel.\n+\n+It does this to limit interactions with the host, and protect the host from an\n+untrusted application running in the sandbox. The Platform is the bottom layer\n+of gVisor which provides the environment necessary for gVisor to control and\n+manage applications. In general, the Platform must:\n+\n+1. Provide the ability to create and manage memory address spaces.\n+2. Provide execution contexts for running applications in those memory address\n+ spaces.\n+3. Provide the ability to change execution context and return control to gVisor\n+ at specific times (e.g. system call, page fault)\n+\n+This interface is conceptually simple, but very powerful. Since the Platform\n+interface only requires these three capabilities, it gives gVisor enough control\n+for it to act as the application's OS, while still allowing the use of very\n+different isolation technologies under the hood. You can learn more about the\n+Platform interface in the\n+[Platform Guide](https://gvisor.dev/docs/architecture_guide/platforms/).\n+\n+## Implementations of the Platform Interface\n+\n+While gVisor can make use of technologies like hardware virtualization, it\n+doesn't necessarily rely on any one technology to provide a similar level of\n+isolation. The flexibility of the Platform interface allows for implementations\n+that use technologies other than hardware virtualization. This allows gVisor to\n+run in VMs without nested virtualization, for example. By providing an\n+abstraction for the underlying platform, each implementation can make various\n+tradeoffs regarding performance or hardware requirements.\n+\n+Currently gVisor provides two gVisor Platform implementations; the Ptrace\n+Platform, and the KVM Platform, each using very different methods to implement\n+the Platform interface.\n+\n+\n+\n+The Ptrace Platform uses\n+[PTRACE\\_SYSEMU](http://man7.org/linux/man-pages/man2/ptrace.2.html) to trap\n+syscalls, and uses the host for memory mapping and context switching. This\n+platform can run anywhere that ptrace is available, which includes most Linux\n+systems, VMs or otherwise.\n+\n+The KVM Platform uses virtualization, but in an unconventional way. gVisor runs\n+in a virtual machine but as both guest OS and VMM, and presents no virtualized\n+hardware layer. This provides a simpler interface that can avoid hardware\n+initialization for fast start up, while taking advantage of hardware\n+virtualization support to improve memory isolation and performance of context\n+switching.\n+\n+The flexibility of the Platform interface allows for a lot of room to improve\n+the existing KVM and ptrace platforms, as well as the ability to utilize new\n+methods for improving gVisor's performance or portability in future Platform\n+implementations.\n+\n+## Portability\n+\n+Through the Platform interface, gVisor is able to support bare metal, virtual\n+machines, and Cloud environments while still providing a highly secure sandbox\n+for running untrusted applications. This is especially important for Cloud and\n+Kubernetes users because it allows gVisor to run anywhere that Kubernetes can\n+run and provide similar experiences in multi-region, hybrid, multi-platform\n+environments.\n+\n+Give gVisor's open source platforms a try. Using a Platform is as easy as\n+providing the `--platform` flag to `runsc`. See the documentation on\n+[changing platforms](https://gvisor.dev/docs/user_guide/platforms/) for how to\n+use different platforms with Docker. We would love to hear about your experience\n+so come chat with us in our\n+[Gitter channel](https://gitter.im/gvisor/community), or send us an\n+[issue on Github](https://gvisor.dev/issue) if you run into any problems.\n"
},
{
"change_type": "MODIFY",
"old_path": "website/blog/BUILD",
"new_path": "website/blog/BUILD",
"diff": "@@ -38,6 +38,17 @@ doc(\npermalink = \"/blog/2020/09/18/containing-a-real-vulnerability/\",\n)\n+doc(\n+ name = \"platform_portability\",\n+ src = \"2020-10-22-platform-portability.md\",\n+ authors = [\n+ \"ianlewis\",\n+ \"mpratt\",\n+ ],\n+ layout = \"post\",\n+ permalink = \"/blog/2020/09/22/platform-portability/\",\n+)\n+\ndocs(\nname = \"posts\",\ndeps = [\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a platform portability blog post
Also fixes the docker_image bazel rule, and website-server make target.
Fixes #3273
PiperOrigin-RevId: 338606668 |
259,992 | 22.10.2020 22:04:57 | 25,200 | 293877cf647ac3e900f0ae15061317a512bba7a0 | Load spec during "runsc start" to process flag overrides
Subcontainers are only configured when the container starts, however because
start doesn't load the spec, flag annotations that may override flags were
not getting applied to the configuration.
Updates | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -737,7 +737,7 @@ func (l *Loader) createContainerProcess(root bool, cid string, info *containerIn\nreturn nil, err\n}\n- // Add the HOME enviroment variable if it is not already set.\n+ // Add the HOME environment variable if it is not already set.\nvar envv []string\nif kernel.VFS2Enabled {\nenvv, err = user.MaybeAddExecUserHomeVFS2(ctx, info.procArgs.MountNamespaceVFS2,\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/start.go",
"new_path": "runsc/cmd/start.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"gvisor.dev/gvisor/runsc/config\"\n\"gvisor.dev/gvisor/runsc/container\"\n\"gvisor.dev/gvisor/runsc/flag\"\n+ \"gvisor.dev/gvisor/runsc/specutils\"\n)\n// Start implements subcommands.Command for the \"start\" command.\n@@ -58,6 +59,12 @@ func (*Start) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s\nif err != nil {\nFatalf(\"loading container: %v\", err)\n}\n+ // Read the spec again here to ensure flag annotations from the spec are\n+ // applied to \"conf\".\n+ if _, err := specutils.ReadSpec(c.BundleDir, conf); err != nil {\n+ Fatalf(\"reading spec: %v\", err)\n+ }\n+\nif err := c.Start(conf); err != nil {\nFatalf(\"starting container: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Load spec during "runsc start" to process flag overrides
Subcontainers are only configured when the container starts, however because
start doesn't load the spec, flag annotations that may override flags were
not getting applied to the configuration.
Updates #3494
PiperOrigin-RevId: 338610953 |
259,935 | 21.10.2020 08:33:38 | 0 | 3b8193e762c1cc37df42104f5abeb6fdc7b70c01 | Add --traceback flag to customize GOTRACEBACK level | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -131,11 +131,11 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nreturn subcommands.ExitUsageError\n}\n- // Ensure that if there is a panic, all goroutine stacks are printed.\n- debug.SetTraceback(\"system\")\n-\nconf := args[0].(*config.Config)\n+ // Set traceback level\n+ debug.SetTraceback(conf.Traceback)\n+\nif b.attached {\n// Ensure this process is killed after parent process terminates when\n// attached mode is enabled. In the unfortunate event that the parent\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -37,6 +37,9 @@ type Config struct {\n// RootDir is the runtime root directory.\nRootDir string `flag:\"root\"`\n+ // Traceback changes the Go runtime's traceback level.\n+ Traceback string `flag:\"traceback\"`\n+\n// Debug indicates that debug logging should be enabled.\nDebug bool `flag:\"debug\"`\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -49,6 +49,7 @@ func RegisterFlags() {\nflag.String(\"debug-log-format\", \"text\", \"log format: text (default), json, or json-k8s.\")\nflag.Bool(\"alsologtostderr\", false, \"send log messages to stderr.\")\nflag.Bool(\"allow-flag-override\", false, \"allow OCI annotations (dev.gvisor.flag.<name>) to override flags for debugging.\")\n+ flag.String(\"traceback\", \"system\", \"golang runtime's traceback level\")\n// Debugging flags: strace related\nflag.Bool(\"strace\", false, \"enable strace.\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add --traceback flag to customize GOTRACEBACK level |
259,896 | 23.10.2020 10:46:12 | 25,200 | 39e9b3bb8a25cdfdbc2203e33c6881a7c2c88766 | Support getsockopt for SO_ACCEPTCONN.
The SO_ACCEPTCONN option is used only on getsockopt(). When this option is
specified, getsockopt() indicates whether socket listening is enabled for
the socket. A value of zero indicates that socket listening is disabled;
non-zero that it is enabled. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1244,6 +1244,18 @@ func getSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, fam\nvP := primitive.Int32(boolToInt32(v))\nreturn &vP, nil\n+ case linux.SO_ACCEPTCONN:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ v, err := ep.GetSockOptBool(tcpip.AcceptConnOption)\n+ if err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+ vP := primitive.Int32(boolToInt32(v))\n+ return &vP, nil\n+\ndefault:\nsocket.GetSockOptEmitUnimplementedEvent(t, name)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/transport/unix.go",
"new_path": "pkg/sentry/socket/unix/transport/unix.go",
"diff": "@@ -879,7 +879,7 @@ func (e *baseEndpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) *tcpip.Error {\nfunc (e *baseEndpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\nswitch opt {\n- case tcpip.KeepaliveEnabledOption:\n+ case tcpip.KeepaliveEnabledOption, tcpip.AcceptConnOption:\nreturn false, nil\ncase tcpip.PasscredOption:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -763,6 +763,10 @@ const (\n// endpoint that all packets being written have an IP header and the\n// endpoint should not attach an IP header.\nIPHdrIncludedOption\n+\n+ // AcceptConnOption is used by GetSockOptBool to indicate if the\n+ // socket is a listening socket.\n+ AcceptConnOption\n)\n// SockOptInt represents socket options which values have the int type.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -378,7 +378,7 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) *tcpip.Error {\n// GetSockOptBool implements tcpip.Endpoint.GetSockOptBool.\nfunc (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\nswitch opt {\n- case tcpip.KeepaliveEnabledOption:\n+ case tcpip.KeepaliveEnabledOption, tcpip.AcceptConnOption:\nreturn false, nil\ndefault:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint.go",
"new_path": "pkg/tcpip/transport/packet/endpoint.go",
"diff": "@@ -389,8 +389,13 @@ func (ep *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) *tcpip.Error {\n// GetSockOptBool implements tcpip.Endpoint.GetSockOptBool.\nfunc (*endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\n+ switch opt {\n+ case tcpip.AcceptConnOption:\n+ return false, nil\n+ default:\nreturn false, tcpip.ErrNotSupported\n}\n+}\n// GetSockOptInt implements tcpip.Endpoint.GetSockOptInt.\nfunc (ep *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, *tcpip.Error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -601,7 +601,7 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) *tcpip.Error {\n// GetSockOptBool implements tcpip.Endpoint.GetSockOptBool.\nfunc (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\nswitch opt {\n- case tcpip.KeepaliveEnabledOption:\n+ case tcpip.KeepaliveEnabledOption, tcpip.AcceptConnOption:\nreturn false, nil\ncase tcpip.IPHdrIncludedOption:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1999,6 +1999,12 @@ func (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\ncase tcpip.MulticastLoopOption:\nreturn true, nil\n+ case tcpip.AcceptConnOption:\n+ e.LockUser()\n+ defer e.UnlockUser()\n+\n+ return e.EndpointState() == StateListen, nil\n+\ndefault:\nreturn false, tcpip.ErrUnknownProtocolOption\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -895,6 +895,9 @@ func (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\nreturn v, nil\n+ case tcpip.AcceptConnOption:\n+ return false, nil\n+\ndefault:\nreturn false, tcpip.ErrUnknownProtocolOption\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket_raw.cc",
"new_path": "test/syscalls/linux/packet_socket_raw.cc",
"diff": "@@ -664,6 +664,17 @@ TEST_P(RawPacketTest, SetAndGetSocketLinger) {\nEXPECT_EQ(0, memcmp(&sl, &got_linger, length));\n}\n+TEST_P(RawPacketTest, GetSocketAcceptConn) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int got = -1;\n+ socklen_t length = sizeof(got);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceedsWithValue(0));\n+\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 0);\n+}\nINSTANTIATE_TEST_SUITE_P(AllInetTests, RawPacketTest,\n::testing::Values(ETH_P_IP, ETH_P_ALL));\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket_icmp.cc",
"new_path": "test/syscalls/linux/raw_socket_icmp.cc",
"diff": "@@ -438,6 +438,19 @@ TEST_F(RawSocketICMPTest, SetAndGetSocketLinger) {\nEXPECT_EQ(0, memcmp(&sl, &got_linger, length));\n}\n+// Test getsockopt for SO_ACCEPTCONN.\n+TEST_F(RawSocketICMPTest, GetSocketAcceptConn) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int got = -1;\n+ socklen_t length = sizeof(got);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceedsWithValue(0));\n+\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 0);\n+}\n+\nvoid RawSocketICMPTest::ExpectICMPSuccess(const struct icmphdr& icmp) {\n// We're going to receive both the echo request and reply, but the order is\n// indeterminate.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ip_udp_generic.cc",
"new_path": "test/syscalls/linux/socket_ip_udp_generic.cc",
"diff": "@@ -472,5 +472,19 @@ TEST_P(UDPSocketPairTest, SetAndGetSocketLinger) {\nEXPECT_EQ(0, memcmp(&sl, &got_linger, length));\n}\n+// Test getsockopt for SO_ACCEPTCONN on udp socket.\n+TEST_P(UDPSocketPairTest, GetSocketAcceptConn) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int got = -1;\n+ socklen_t length = sizeof(got);\n+ ASSERT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceedsWithValue(0));\n+\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 0);\n+}\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_unix_stream.cc",
"new_path": "test/syscalls/linux/socket_unix_stream.cc",
"diff": "@@ -121,6 +121,19 @@ TEST_P(StreamUnixSocketPairTest, SetAndGetSocketLinger) {\nEXPECT_EQ(0, memcmp(&got_linger, &sl, length));\n}\n+TEST_P(StreamUnixSocketPairTest, GetSocketAcceptConn) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int got = -1;\n+ socklen_t length = sizeof(got);\n+ ASSERT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceedsWithValue(0));\n+\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 0);\n+}\n+\nINSTANTIATE_TEST_SUITE_P(\nAllUnixDomainSockets, StreamUnixSocketPairTest,\n::testing::ValuesIn(IncludeReversals(VecCat<SocketPairKind>(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "@@ -1725,6 +1725,63 @@ TEST_P(SimpleTcpSocketTest, CloseNonConnectedLingerOption) {\nASSERT_LT((end_time - start_time), absl::Seconds(kLingerTimeout));\n}\n+// Tests that SO_ACCEPTCONN returns non zero value for listening sockets.\n+TEST_P(TcpSocketTest, GetSocketAcceptConnListener) {\n+ int got = -1;\n+ socklen_t length = sizeof(got);\n+ ASSERT_THAT(getsockopt(listener_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceeds());\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 1);\n+}\n+\n+// Tests that SO_ACCEPTCONN returns zero value for not listening sockets.\n+TEST_P(TcpSocketTest, GetSocketAcceptConnNonListener) {\n+ int got = -1;\n+ socklen_t length = sizeof(got);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceeds());\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 0);\n+\n+ ASSERT_THAT(getsockopt(t_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceeds());\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 0);\n+}\n+\n+TEST_P(SimpleTcpSocketTest, GetSocketAcceptConnWithShutdown) {\n+ // TODO(b/171345701): Fix the TCP state for listening socket on shutdown.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+\n+ // Initialize address to the loopback one.\n+ sockaddr_storage addr =\n+ ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));\n+ socklen_t addrlen = sizeof(addr);\n+\n+ // Bind to some port then start listening.\n+ ASSERT_THAT(bind(s.get(), reinterpret_cast<struct sockaddr*>(&addr), addrlen),\n+ SyscallSucceeds());\n+\n+ ASSERT_THAT(listen(s.get(), SOMAXCONN), SyscallSucceeds());\n+\n+ int got = -1;\n+ socklen_t length = sizeof(got);\n+ ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceeds());\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 1);\n+\n+ EXPECT_THAT(shutdown(s.get(), SHUT_RD), SyscallSucceeds());\n+ ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),\n+ SyscallSucceeds());\n+ ASSERT_EQ(length, sizeof(got));\n+ EXPECT_EQ(got, 0);\n+}\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, SimpleTcpSocketTest,\n::testing::Values(AF_INET, AF_INET6));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support getsockopt for SO_ACCEPTCONN.
The SO_ACCEPTCONN option is used only on getsockopt(). When this option is
specified, getsockopt() indicates whether socket listening is enabled for
the socket. A value of zero indicates that socket listening is disabled;
non-zero that it is enabled.
PiperOrigin-RevId: 338703206 |
259,907 | 23.10.2020 10:54:43 | 25,200 | 6237563f0a154ee3f62dc1a82be5903e405b98eb | [runtime tests] Exclude flaky tests.
Also updated a test which only fails with VFS1. | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/exclude/java11.csv",
"new_path": "test/runtimes/exclude/java11.csv",
"diff": "@@ -144,6 +144,7 @@ jdk/jfr/cmd/TestSplit.java,,java.lang.RuntimeException: 'Missing file' missing f\njdk/jfr/cmd/TestSummary.java,,java.lang.RuntimeException: 'Missing file' missing from stdout/stderr\njdk/jfr/event/compiler/TestCompilerStats.java,,java.lang.RuntimeException: Field nmetodsSize not in event\njdk/jfr/event/metadata/TestDefaultConfigurations.java,,Setting 'threshold' in event 'jdk.SecurityPropertyModification' was not configured in the configuration 'default'\n+jdk/jfr/event/oldobject/TestLargeRootSet.java,,Flaky - `main' threw exception: java.lang.RuntimeException: Could not find root object\njdk/jfr/event/runtime/TestActiveSettingEvent.java,,java.lang.Exception: Could not find setting with name jdk.X509Validation#threshold\njdk/jfr/event/runtime/TestModuleEvents.java,,java.lang.RuntimeException: assertEquals: expected jdk.proxy1 to equal java.base\njdk/jfr/event/runtime/TestNetworkUtilizationEvent.java,,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/exclude/php7.3.6.csv",
"new_path": "test/runtimes/exclude/php7.3.6.csv",
"diff": "@@ -26,12 +26,13 @@ ext/standard/tests/file/php_fd_wrapper_01.phpt,,\next/standard/tests/file/php_fd_wrapper_02.phpt,,\next/standard/tests/file/php_fd_wrapper_03.phpt,,\next/standard/tests/file/php_fd_wrapper_04.phpt,,\n-ext/standard/tests/file/realpath_bug77484.phpt,b/162894969,\n+ext/standard/tests/file/realpath_bug77484.phpt,b/162894969,VFS1 only failure\next/standard/tests/file/rename_variation.phpt,b/68717309,\next/standard/tests/file/symlink_link_linkinfo_is_link_variation4.phpt,b/162895341,\next/standard/tests/file/symlink_link_linkinfo_is_link_variation8.phpt,b/162896223,\next/standard/tests/general_functions/escapeshellarg_bug71270.phpt,,\next/standard/tests/general_functions/escapeshellcmd_bug71270.phpt,,\n+ext/standard/tests/network/bug20134.phpt,b/171347929,Flaky\next/standard/tests/streams/proc_open_bug60120.phpt,,Flaky until php-src 3852a35fdbcb\next/standard/tests/streams/proc_open_bug69900.phpt,,Flaky\next/standard/tests/streams/stream_socket_sendto.phpt,,\n"
}
] | Go | Apache License 2.0 | google/gvisor | [runtime tests] Exclude flaky tests.
Also updated a test which only fails with VFS1.
PiperOrigin-RevId: 338704940 |
260,001 | 23.10.2020 12:03:05 | 25,200 | 39e214090be6717c01e8a65ee50e194a1a50b462 | Implement Read in gvisor verity fs
Read is implemented by PRead, with offset obtained from Seek. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/verity.go",
"new_path": "pkg/sentry/fsimpl/verity/verity.go",
"diff": "@@ -499,6 +499,10 @@ type fileDescription struct {\n// directory that contains the current file/directory. This is only used\n// if allowRuntimeEnable is set to true.\nparentMerkleWriter *vfs.FileDescription\n+\n+ // off is the file offset. off is protected by mu.\n+ mu sync.Mutex `state:\"nosave\"`\n+ off int64\n}\n// Release implements vfs.FileDescriptionImpl.Release.\n@@ -738,6 +742,16 @@ func (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.\n}\n}\n+// Read implements vfs.FileDescriptionImpl.Read.\n+func (fd *fileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+ // Implement Read with PRead by setting offset.\n+ fd.mu.Lock()\n+ n, err := fd.PRead(ctx, dst, fd.off, opts)\n+ fd.off += n\n+ fd.mu.Unlock()\n+ return n, err\n+}\n+\n// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\n// No need to verify if the file is not enabled yet in\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/verity_test.go",
"new_path": "pkg/sentry/fsimpl/verity/verity_test.go",
"diff": "@@ -180,9 +180,9 @@ func TestOpen(t *testing.T) {\n}\n}\n-// TestUnmodifiedFileSucceeds ensures that read from an untouched verity file\n-// succeeds after enabling verity for it.\n-func TestReadUnmodifiedFileSucceeds(t *testing.T) {\n+// TestPReadUnmodifiedFileSucceeds ensures that pread from an untouched verity\n+// file succeeds after enabling verity for it.\n+func TestPReadUnmodifiedFileSucceeds(t *testing.T) {\nctx := contexttest.Context(t)\nvfsObj, root, err := newVerityRoot(ctx, t)\nif err != nil {\n@@ -213,6 +213,39 @@ func TestReadUnmodifiedFileSucceeds(t *testing.T) {\n}\n}\n+// TestReadUnmodifiedFileSucceeds ensures that read from an untouched verity\n+// file succeeds after enabling verity for it.\n+func TestReadUnmodifiedFileSucceeds(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, err := newVerityRoot(ctx, t)\n+ if err != nil {\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n+ }\n+\n+ filename := \"verity-test-file\"\n+ fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\n+ if err != nil {\n+ t.Fatalf(\"newFileFD: %v\", err)\n+ }\n+\n+ // Enable verity on the file and confirm a normal read succeeds.\n+ var args arch.SyscallArguments\n+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\n+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl: %v\", err)\n+ }\n+\n+ buf := make([]byte, size)\n+ n, err := fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})\n+ if err != nil && err != io.EOF {\n+ t.Fatalf(\"fd.Read: %v\", err)\n+ }\n+\n+ if n != int64(size) {\n+ t.Errorf(\"fd.PRead got read length %d, want %d\", n, size)\n+ }\n+}\n+\n// TestReopenUnmodifiedFileSucceeds ensures that reopen an untouched verity file\n// succeeds after enabling verity for it.\nfunc TestReopenUnmodifiedFileSucceeds(t *testing.T) {\n@@ -248,8 +281,9 @@ func TestReopenUnmodifiedFileSucceeds(t *testing.T) {\n}\n}\n-// TestModifiedFileFails ensures that read from a modified verity file fails.\n-func TestModifiedFileFails(t *testing.T) {\n+// TestPReadModifiedFileFails ensures that read from a modified verity file\n+// fails.\n+func TestPReadModifiedFileFails(t *testing.T) {\nctx := contexttest.Context(t)\nvfsObj, root, err := newVerityRoot(ctx, t)\nif err != nil {\n@@ -289,7 +323,53 @@ func TestModifiedFileFails(t *testing.T) {\n// Confirm that read from the modified file fails.\nbuf := make([]byte, size)\nif _, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{}); err == nil {\n- t.Fatalf(\"fd.PRead succeeded with modified file\")\n+ t.Fatalf(\"fd.PRead succeeded, expected failure\")\n+ }\n+}\n+\n+// TestReadModifiedFileFails ensures that read from a modified verity file\n+// fails.\n+func TestReadModifiedFileFails(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ vfsObj, root, err := newVerityRoot(ctx, t)\n+ if err != nil {\n+ t.Fatalf(\"newVerityRoot: %v\", err)\n+ }\n+\n+ filename := \"verity-test-file\"\n+ fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)\n+ if err != nil {\n+ t.Fatalf(\"newFileFD: %v\", err)\n+ }\n+\n+ // Enable verity on the file.\n+ var args arch.SyscallArguments\n+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}\n+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {\n+ t.Fatalf(\"Ioctl: %v\", err)\n+ }\n+\n+ // Open a new lowerFD that's read/writable.\n+ lowerVD := fd.Impl().(*fileDescription).d.lowerVD\n+\n+ lowerFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{\n+ Root: lowerVD,\n+ Start: lowerVD,\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDWR,\n+ })\n+ if err != nil {\n+ t.Fatalf(\"OpenAt: %v\", err)\n+ }\n+\n+ if err := corruptRandomBit(ctx, lowerFD, size); err != nil {\n+ t.Fatalf(\"corruptRandomBit: %v\", err)\n+ }\n+\n+ // Confirm that read from the modified file fails.\n+ buf := make([]byte, size)\n+ if _, err := fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{}); err == nil {\n+ t.Fatalf(\"fd.Read succeeded, expected failure\")\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement Read in gvisor verity fs
Read is implemented by PRead, with offset obtained from Seek.
PiperOrigin-RevId: 338718587 |
259,967 | 23.10.2020 12:31:11 | 25,200 | 8db147b55423d7dbe5f9af4e6154eab2d19025e1 | Wait before transitioning NUD entries from Probe to Failed
Wait an additional RetransmitTimer duration after the last probe before
transitioning to Failed. The previous implementation transitions immediately to
Failed after sending the last probe, which is erroneous behavior. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/neighbor_entry.go",
"new_path": "pkg/tcpip/stack/neighbor_entry.go",
"diff": "@@ -238,12 +238,6 @@ func (e *neighborEntry) setStateLocked(next NeighborState) {\n}\nretryCounter++\n- if retryCounter == config.MaxUnicastProbes {\n- e.dispatchRemoveEventLocked()\n- e.setStateLocked(Failed)\n- return\n- }\n-\ne.job = e.nic.stack.newJob(&e.mu, sendUnicastProbe)\ne.job.Schedule(config.RetransmitTimer)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/neighbor_entry_test.go",
"new_path": "pkg/tcpip/stack/neighbor_entry_test.go",
"diff": "@@ -2938,10 +2938,31 @@ func TestEntryProbeToFailed(t *testing.T) {\nc := DefaultNUDConfigurations()\nc.MaxMulticastProbes = 3\nc.MaxUnicastProbes = 3\n+ c.DelayFirstProbeTime = c.RetransmitTimer\ne, nudDisp, linkRes, clock := entryTestSetup(c)\ne.mu.Lock()\ne.handlePacketQueuedLocked(entryTestAddr2)\n+ e.mu.Unlock()\n+\n+ {\n+ wantProbes := []entryTestProbeInfo{\n+ // Caused by the Unknown-to-Incomplete transition.\n+ {\n+ RemoteAddress: entryTestAddr1,\n+ LocalAddress: entryTestAddr2,\n+ },\n+ }\n+ linkRes.mu.Lock()\n+ diff := cmp.Diff(linkRes.probes, wantProbes)\n+ linkRes.probes = nil\n+ linkRes.mu.Unlock()\n+ if diff != \"\" {\n+ t.Fatalf(\"link address resolver probes mismatch (-got, +want):\\n%s\", diff)\n+ }\n+ }\n+\n+ e.mu.Lock()\ne.handleConfirmationLocked(entryTestLinkAddr1, ReachabilityConfirmationFlags{\nSolicited: false,\nOverride: false,\n@@ -2950,25 +2971,10 @@ func TestEntryProbeToFailed(t *testing.T) {\ne.handlePacketQueuedLocked(entryTestAddr2)\ne.mu.Unlock()\n- waitFor := c.DelayFirstProbeTime + c.RetransmitTimer*time.Duration(c.MaxUnicastProbes)\n- clock.Advance(waitFor)\n-\n+ // Observe each probe sent while in the Probe state.\n+ for i := uint32(0); i < c.MaxUnicastProbes; i++ {\n+ clock.Advance(c.RetransmitTimer)\nwantProbes := []entryTestProbeInfo{\n- // The first probe is caused by the Unknown-to-Incomplete transition.\n- {\n- RemoteAddress: entryTestAddr1,\n- RemoteLinkAddress: tcpip.LinkAddress(\"\"),\n- LocalAddress: entryTestAddr2,\n- },\n- // The next three probe are caused by the Delay-to-Probe transition.\n- {\n- RemoteAddress: entryTestAddr1,\n- RemoteLinkAddress: entryTestLinkAddr1,\n- },\n- {\n- RemoteAddress: entryTestAddr1,\n- RemoteLinkAddress: entryTestLinkAddr1,\n- },\n{\nRemoteAddress: entryTestAddr1,\nRemoteLinkAddress: entryTestLinkAddr1,\n@@ -2976,10 +2982,26 @@ func TestEntryProbeToFailed(t *testing.T) {\n}\nlinkRes.mu.Lock()\ndiff := cmp.Diff(linkRes.probes, wantProbes)\n+ linkRes.probes = nil\nlinkRes.mu.Unlock()\nif diff != \"\" {\n- t.Fatalf(\"link address resolver probes mismatch (-got, +want):\\n%s\", diff)\n+ t.Fatalf(\"link address resolver probe #%d mismatch (-got, +want):\\n%s\", i+1, diff)\n+ }\n+\n+ e.mu.Lock()\n+ if e.neigh.State != Probe {\n+ t.Errorf(\"got e.neigh.State = %q, want = %q\", e.neigh.State, Probe)\n+ }\n+ e.mu.Unlock()\n+ }\n+\n+ // Wait for the last probe to expire, causing a transition to Failed.\n+ clock.Advance(c.RetransmitTimer)\n+ e.mu.Lock()\n+ if e.neigh.State != Failed {\n+ t.Errorf(\"got e.neigh.State = %q, want = %q\", e.neigh.State, Failed)\n}\n+ e.mu.Unlock()\nwantEvents := []testEntryEventInfo{\n{\n@@ -3023,12 +3045,6 @@ func TestEntryProbeToFailed(t *testing.T) {\nt.Errorf(\"nud dispatcher events mismatch (-got, +want):\\n%s\", diff)\n}\nnudDisp.mu.Unlock()\n-\n- e.mu.Lock()\n- if got, want := e.neigh.State, Failed; got != want {\n- t.Errorf(\"got e.neigh.State = %q, want = %q\", got, want)\n- }\n- e.mu.Unlock()\n}\nfunc TestEntryFailedGetsDeleted(t *testing.T) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Wait before transitioning NUD entries from Probe to Failed
Wait an additional RetransmitTimer duration after the last probe before
transitioning to Failed. The previous implementation transitions immediately to
Failed after sending the last probe, which is erroneous behavior.
PiperOrigin-RevId: 338723794 |
259,907 | 23.10.2020 12:53:07 | 25,200 | 61b379ee19bd160de01ad483d883f09fa0bce23c | [vfs] kernfs: cleanup/refactor. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/fd_impl_util.go",
"new_path": "pkg/sentry/fsimpl/kernfs/fd_impl_util.go",
"diff": "@@ -145,8 +145,12 @@ func (fd *GenericDirectoryFD) filesystem() *vfs.Filesystem {\nreturn fd.vfsfd.VirtualDentry().Mount().Filesystem()\n}\n+func (fd *GenericDirectoryFD) dentry() *Dentry {\n+ return fd.vfsfd.Dentry().Impl().(*Dentry)\n+}\n+\nfunc (fd *GenericDirectoryFD) inode() Inode {\n- return fd.vfsfd.VirtualDentry().Dentry().Impl().(*Dentry).inode\n+ return fd.dentry().inode\n}\n// IterDirents implements vfs.FileDescriptionImpl.IterDirents. IterDirents holds\n@@ -176,8 +180,7 @@ func (fd *GenericDirectoryFD) IterDirents(ctx context.Context, cb vfs.IterDirent\n// Handle \"..\".\nif fd.off == 1 {\n- vfsd := fd.vfsfd.VirtualDentry().Dentry()\n- parentInode := genericParentOrSelf(vfsd.Impl().(*Dentry)).inode\n+ parentInode := genericParentOrSelf(fd.dentry()).inode\nstat, err := parentInode.Stat(ctx, fd.filesystem(), opts)\nif err != nil {\nreturn err\n@@ -265,8 +268,7 @@ func (fd *GenericDirectoryFD) Stat(ctx context.Context, opts vfs.StatOptions) (l\n// SetStat implements vfs.FileDescriptionImpl.SetStat.\nfunc (fd *GenericDirectoryFD) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {\ncreds := auth.CredentialsFromContext(ctx)\n- inode := fd.vfsfd.VirtualDentry().Dentry().Impl().(*Dentry).inode\n- return inode.SetStat(ctx, fd.filesystem(), creds, opts)\n+ return fd.inode().SetStat(ctx, fd.filesystem(), creds, opts)\n}\n// Allocate implements vfs.FileDescriptionImpl.Allocate.\n"
}
] | Go | Apache License 2.0 | google/gvisor | [vfs] kernfs: cleanup/refactor.
PiperOrigin-RevId: 338728070 |
259,907 | 23.10.2020 13:54:57 | 25,200 | ad6d32f2260e85d2a7320265109868d753558c5e | [bazel] Increase number of jobs back to 300 | [
{
"change_type": "MODIFY",
"old_path": ".bazelrc",
"new_path": ".bazelrc",
"diff": "@@ -42,6 +42,5 @@ build:remote --extra_toolchains=//tools/bazeldefs:cc-toolchain-clang-x86_64-defa\nbuild:remote --extra_execution_platforms=//tools/bazeldefs:rbe_ubuntu1604\nbuild:remote --platforms=//tools/bazeldefs:rbe_ubuntu1604\nbuild:remote --crosstool_top=@rbe_default//cc:toolchain\n-# TODO(b/171495162): Revert to 300 jobs once bug is fixed.\n-build:remote --jobs=100\n+build:remote --jobs=300\nbuild:remote --remote_timeout=3600\n"
}
] | Go | Apache License 2.0 | google/gvisor | [bazel] Increase number of jobs back to 300
PiperOrigin-RevId: 338739277 |
259,853 | 23.10.2020 14:33:20 | 25,200 | d18346e79022f4b7039593a5793ab24dd6169314 | tools/parsers: disable nogo checks
There are too many dependencies. | [
{
"change_type": "MODIFY",
"old_path": "tools/parsers/BUILD",
"new_path": "tools/parsers/BUILD",
"diff": "@@ -7,6 +7,7 @@ go_test(\nsize = \"small\",\nsrcs = [\"go_parser_test.go\"],\nlibrary = \":parsers\",\n+ nogo = False,\ndeps = [\n\"//tools/bigquery\",\n\"@com_github_google_go_cmp//cmp:go_default_library\",\n@@ -19,6 +20,7 @@ go_library(\nsrcs = [\n\"go_parser.go\",\n],\n+ nogo = False,\nvisibility = [\"//:sandbox\"],\ndeps = [\n\"//test/benchmarks/tools\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | tools/parsers: disable nogo checks
There are too many dependencies.
PiperOrigin-RevId: 338746264 |
259,875 | 23.10.2020 15:29:35 | 25,200 | e5c1b035ab3bbbbaf187d746f858ddd0a859602a | Introduce SemidDs struct for amd64 and arm64. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/BUILD",
"new_path": "pkg/abi/linux/BUILD",
"diff": "@@ -55,6 +55,8 @@ go_library(\n\"sched.go\",\n\"seccomp.go\",\n\"sem.go\",\n+ \"sem_amd64.go\",\n+ \"sem_arm64.go\",\n\"shm.go\",\n\"signal.go\",\n\"signalfd.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/sem.go",
"new_path": "pkg/abi/linux/sem.go",
"diff": "@@ -34,18 +34,6 @@ const (\nconst SEM_UNDO = 0x1000\n-// SemidDS is equivalent to struct semid64_ds.\n-//\n-// +marshal\n-type SemidDS struct {\n- SemPerm IPCPerm\n- SemOTime TimeT\n- SemCTime TimeT\n- SemNSems uint64\n- unused3 uint64\n- unused4 uint64\n-}\n-\n// Sembuf is equivalent to struct sembuf.\n//\n// +marshal slice:SembufSlice\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/sem_amd64.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build amd64\n+\n+package linux\n+\n+// SemidDS is equivalent to struct semid64_ds.\n+//\n+// Source: arch/x86/include/uapi/asm/sembuf.h\n+//\n+// +marshal\n+type SemidDS struct {\n+ SemPerm IPCPerm\n+ SemOTime TimeT\n+ unused1 uint64\n+ SemCTime TimeT\n+ unused2 uint64\n+ SemNSems uint64\n+ unused3 uint64\n+ unused4 uint64\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/sem_arm64.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build arm64\n+\n+package linux\n+\n+// SemidDS is equivalent to struct semid64_ds.\n+//\n+// Source: include/uapi/asm-generic/sembuf.h\n+//\n+// +marshal\n+type SemidDS struct {\n+ SemPerm IPCPerm\n+ SemOTime TimeT\n+ SemCTime TimeT\n+ SemNSems uint64\n+ unused3 uint64\n+ unused4 uint64\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Introduce SemidDs struct for amd64 and arm64.
PiperOrigin-RevId: 338756277 |
259,891 | 23.10.2020 15:44:22 | 25,200 | a04c8ad4cee876a8dc5ebab43c0a4759602841d9 | iptables testing: handle EINTR on calls to accept().
This caused test flakes. | [
{
"change_type": "MODIFY",
"old_path": "test/iptables/nat.go",
"new_path": "test/iptables/nat.go",
"diff": "@@ -577,11 +577,18 @@ func listenForRedirectedConn(ctx context.Context, ipv6 bool, originalDsts []net.\nconnCh := make(chan int)\nerrCh := make(chan error)\ngo func() {\n+ for {\nconnFD, _, err := syscall.Accept(sockfd)\n+ if errors.Is(err, syscall.EINTR) {\n+ continue\n+ }\nif err != nil {\nerrCh <- err\n+ return\n}\nconnCh <- connFD\n+ return\n+ }\n}()\n// Wait for accept() to return or for the context to finish.\n"
}
] | Go | Apache License 2.0 | google/gvisor | iptables testing: handle EINTR on calls to accept().
This caused test flakes.
PiperOrigin-RevId: 338758723 |
259,975 | 23.10.2020 16:03:04 | 25,200 | 634e14a09408e50ef70442c0114a8b1dd12c8d03 | Fix socket_ipv4_udp_unbound_loopback_test_linux
Handle "Resource temporarily unavailable" EAGAIN errors with a select
call before calling recvmsg.
Also rename similar helper call from "RecvMsgTimeout" to "RecvTimeout",
because it calls "recv". | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc",
"new_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc",
"diff": "@@ -75,7 +75,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackNoGroup) {\n// Check that we did not receive the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nEXPECT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n@@ -209,7 +209,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackAddr) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -265,7 +265,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackNic) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -321,7 +321,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddr) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -377,7 +377,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNic) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -437,7 +437,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrConnect) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -497,7 +497,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicConnect) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -553,7 +553,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrSelf) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -609,7 +609,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicSelf) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -669,7 +669,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrSelfConnect) {\n// Check that we did not receive the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nEXPECT_THAT(\n- RecvMsgTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n@@ -727,7 +727,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicSelfConnect) {\n// Check that we did not receive the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nEXPECT_THAT(\n- RecvMsgTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n@@ -785,7 +785,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrSelfNoLoop) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -845,7 +845,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicSelfNoLoop) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n@@ -919,7 +919,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastDropAddr) {\n// Check that we did not receive the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nEXPECT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n@@ -977,7 +977,7 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastDropNic) {\n// Check that we did not receive the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nEXPECT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n@@ -1330,8 +1330,8 @@ TEST_P(IPv4UDPUnboundSocketTest, TestMcastReceptionOnTwoSockets) {\n// Check that we received the multicast packet on both sockets.\nfor (auto& sockets : socket_pairs) {\nchar recv_buf[sizeof(send_buf)] = {};\n- ASSERT_THAT(RecvMsgTimeout(sockets->second_fd(), recv_buf,\n- sizeof(recv_buf), 1 /*timeout*/),\n+ ASSERT_THAT(RecvTimeout(sockets->second_fd(), recv_buf, sizeof(recv_buf),\n+ 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n}\n@@ -1409,8 +1409,8 @@ TEST_P(IPv4UDPUnboundSocketTest, TestMcastReceptionWhenDroppingMemberships) {\n// Check that we received the multicast packet on both sockets.\nfor (auto& sockets : socket_pairs) {\nchar recv_buf[sizeof(send_buf)] = {};\n- ASSERT_THAT(RecvMsgTimeout(sockets->second_fd(), recv_buf,\n- sizeof(recv_buf), 1 /*timeout*/),\n+ ASSERT_THAT(RecvTimeout(sockets->second_fd(), recv_buf, sizeof(recv_buf),\n+ 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n}\n@@ -1432,8 +1432,8 @@ TEST_P(IPv4UDPUnboundSocketTest, TestMcastReceptionWhenDroppingMemberships) {\nchar recv_buf[sizeof(send_buf)] = {};\nfor (auto& sockets : socket_pairs) {\n- ASSERT_THAT(RecvMsgTimeout(sockets->second_fd(), recv_buf,\n- sizeof(recv_buf), 1 /*timeout*/),\n+ ASSERT_THAT(RecvTimeout(sockets->second_fd(), recv_buf, sizeof(recv_buf),\n+ 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n}\n@@ -1486,7 +1486,7 @@ TEST_P(IPv4UDPUnboundSocketTest, TestBindToMcastThenJoinThenReceive) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n}\n@@ -1530,7 +1530,7 @@ TEST_P(IPv4UDPUnboundSocketTest, TestBindToMcastThenNoJoinThenNoReceive) {\n// Check that we don't receive the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n@@ -1580,7 +1580,7 @@ TEST_P(IPv4UDPUnboundSocketTest, TestBindToMcastThenSend) {\n// Check that we received the packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n}\n@@ -1627,7 +1627,7 @@ TEST_P(IPv4UDPUnboundSocketTest, TestBindToBcastThenReceive) {\n// Check that we received the multicast packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n}\n@@ -1678,7 +1678,7 @@ TEST_P(IPv4UDPUnboundSocketTest, TestBindToBcastThenSend) {\n// Check that we received the packet.\nchar recv_buf[sizeof(send_buf)] = {};\nASSERT_THAT(\n- RecvMsgTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(recv_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n}\n@@ -1737,7 +1737,7 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrDistribution_NoRandomSave) {\n// of the other sockets to have received it, but we will check that later.\nchar recv_buf[sizeof(send_buf)] = {};\nEXPECT_THAT(\n- RecvMsgTimeout(last->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\n+ RecvTimeout(last->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(send_buf)));\nEXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));\n}\n@@ -1745,8 +1745,8 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrDistribution_NoRandomSave) {\n// Verify that no other messages were received.\nfor (auto& socket : sockets) {\nchar recv_buf[kMessageSize] = {};\n- EXPECT_THAT(RecvMsgTimeout(socket->get(), recv_buf, sizeof(recv_buf),\n- 1 /*timeout*/),\n+ EXPECT_THAT(\n+ RecvTimeout(socket->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nPosixErrorIs(EAGAIN, ::testing::_));\n}\n}\n@@ -2124,11 +2124,11 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrReusePortDistribution) {\n// balancing (REUSEPORT) instead of the most recently bound socket\n// (REUSEADDR).\nchar recv_buf[kMessageSize] = {};\n- EXPECT_THAT(RecvMsgTimeout(receiver1->get(), recv_buf, sizeof(recv_buf),\n- 1 /*timeout*/),\n+ EXPECT_THAT(\n+ RecvTimeout(receiver1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(kMessageSize));\n- EXPECT_THAT(RecvMsgTimeout(receiver2->get(), recv_buf, sizeof(recv_buf),\n- 1 /*timeout*/),\n+ EXPECT_THAT(\n+ RecvTimeout(receiver2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(kMessageSize));\n}\n@@ -2193,8 +2193,8 @@ TEST_P(IPv4UDPUnboundSocketTest, SetAndReceiveIPPKTINFO) {\nreceived_msg.msg_controllen = CMSG_LEN(cmsg_data_len);\nreceived_msg.msg_control = received_cmsg_buf;\n- ASSERT_THAT(RetryEINTR(recvmsg)(receiver->get(), &received_msg, 0),\n- SyscallSucceedsWithValue(kDataLength));\n+ ASSERT_THAT(RecvMsgTimeout(receiver->get(), &received_msg, 1 /*timeout*/),\n+ IsPosixErrorOkAndHolds(kDataLength));\ncmsghdr* cmsg = CMSG_FIRSTHDR(&received_msg);\nASSERT_NE(cmsg, nullptr);\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_test_util.cc",
"new_path": "test/syscalls/linux/socket_test_util.cc",
"diff": "@@ -753,8 +753,7 @@ PosixErrorOr<int> SendMsg(int sock, msghdr* msg, char buf[], int buf_size) {\nreturn ret;\n}\n-PosixErrorOr<int> RecvMsgTimeout(int sock, char buf[], int buf_size,\n- int timeout) {\n+PosixErrorOr<int> RecvTimeout(int sock, char buf[], int buf_size, int timeout) {\nfd_set rfd;\nstruct timeval to = {.tv_sec = timeout, .tv_usec = 0};\nFD_ZERO(&rfd);\n@@ -767,6 +766,19 @@ PosixErrorOr<int> RecvMsgTimeout(int sock, char buf[], int buf_size,\nreturn ret;\n}\n+PosixErrorOr<int> RecvMsgTimeout(int sock, struct msghdr* msg, int timeout) {\n+ fd_set rfd;\n+ struct timeval to = {.tv_sec = timeout, .tv_usec = 0};\n+ FD_ZERO(&rfd);\n+ FD_SET(sock, &rfd);\n+\n+ int ret;\n+ RETURN_ERROR_IF_SYSCALL_FAIL(ret = select(1, &rfd, NULL, NULL, &to));\n+ RETURN_ERROR_IF_SYSCALL_FAIL(\n+ ret = RetryEINTR(recvmsg)(sock, msg, MSG_DONTWAIT));\n+ return ret;\n+}\n+\nvoid RecvNoData(int sock) {\nchar data = 0;\nstruct iovec iov;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_test_util.h",
"new_path": "test/syscalls/linux/socket_test_util.h",
"diff": "@@ -467,9 +467,12 @@ PosixError FreeAvailablePort(int port);\n// SendMsg converts a buffer to an iovec and adds it to msg before sending it.\nPosixErrorOr<int> SendMsg(int sock, msghdr* msg, char buf[], int buf_size);\n-// RecvMsgTimeout calls select on sock with timeout and then calls recv on sock.\n-PosixErrorOr<int> RecvMsgTimeout(int sock, char buf[], int buf_size,\n- int timeout);\n+// RecvTimeout calls select on sock with timeout and then calls recv on sock.\n+PosixErrorOr<int> RecvTimeout(int sock, char buf[], int buf_size, int timeout);\n+\n+// RecvMsgTimeout calls select on sock with timeout and then calls recvmsg on\n+// sock.\n+PosixErrorOr<int> RecvMsgTimeout(int sock, msghdr* msg, int timeout);\n// RecvNoData checks that no data is receivable on sock.\nvoid RecvNoData(int sock);\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/udp_socket.cc",
"new_path": "test/syscalls/linux/udp_socket.cc",
"diff": "@@ -838,7 +838,7 @@ TEST_P(UdpSocketTest, ReceiveBeforeConnect) {\n// Receive the data. It works because it was sent before the connect.\nchar received[sizeof(buf)];\nEXPECT_THAT(\n- RecvMsgTimeout(bind_.get(), received, sizeof(received), 1 /*timeout*/),\n+ RecvTimeout(bind_.get(), received, sizeof(received), 1 /*timeout*/),\nIsPosixErrorOkAndHolds(sizeof(received)));\nEXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);\n@@ -928,8 +928,7 @@ TEST_P(UdpSocketTest, ReadShutdownNonblockPendingData) {\nSyscallSucceedsWithValue(1));\n// We should get the data even though read has been shutdown.\n- EXPECT_THAT(\n- RecvMsgTimeout(bind_.get(), received, 2 /*buf_size*/, 1 /*timeout*/),\n+ EXPECT_THAT(RecvTimeout(bind_.get(), received, 2 /*buf_size*/, 1 /*timeout*/),\nIsPosixErrorOkAndHolds(2));\n// Because we read less than the entire packet length, since it's a packet\n@@ -1698,7 +1697,7 @@ TEST_P(UdpSocketTest, RecvBufLimitsEmptyRcvBuf) {\nsendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),\nSyscallSucceedsWithValue(buf.size()));\nstd::vector<char> received(buf.size());\n- EXPECT_THAT(RecvMsgTimeout(bind_.get(), received.data(), received.size(),\n+ EXPECT_THAT(RecvTimeout(bind_.get(), received.data(), received.size(),\n1 /*timeout*/),\nIsPosixErrorOkAndHolds(received.size()));\n}\n@@ -1714,7 +1713,7 @@ TEST_P(UdpSocketTest, RecvBufLimitsEmptyRcvBuf) {\nSyscallSucceedsWithValue(buf.size()));\nstd::vector<char> received(buf.size());\n- ASSERT_THAT(RecvMsgTimeout(bind_.get(), received.data(), received.size(),\n+ ASSERT_THAT(RecvTimeout(bind_.get(), received.data(), received.size(),\n1 /*timeout*/),\nIsPosixErrorOkAndHolds(received.size()));\n}\n@@ -1785,7 +1784,7 @@ TEST_P(UdpSocketTest, RecvBufLimits) {\nfor (int i = 0; i < sent - 1; i++) {\n// Receive the data.\nstd::vector<char> received(buf.size());\n- EXPECT_THAT(RecvMsgTimeout(bind_.get(), received.data(), received.size(),\n+ EXPECT_THAT(RecvTimeout(bind_.get(), received.data(), received.size(),\n1 /*timeout*/),\nIsPosixErrorOkAndHolds(received.size()));\nEXPECT_EQ(memcmp(buf.data(), received.data(), buf.size()), 0);\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix socket_ipv4_udp_unbound_loopback_test_linux
Handle "Resource temporarily unavailable" EAGAIN errors with a select
call before calling recvmsg.
Also rename similar helper call from "RecvMsgTimeout" to "RecvTimeout",
because it calls "recv".
PiperOrigin-RevId: 338761695 |
259,992 | 23.10.2020 18:33:57 | 25,200 | 3ed8ace87123a5cee4fd3aa3751bb24c151749ff | Fix nogo errors in specutils | [
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils.go",
"new_path": "runsc/specutils/specutils.go",
"diff": "@@ -425,7 +425,7 @@ func Mount(src, dst, typ string, flags uint32) error {\n// Special case, as there is no source directory for proc mounts.\nisDir = true\n} else if fi, err := os.Stat(src); err != nil {\n- return fmt.Errorf(\"Stat(%q) failed: %v\", src, err)\n+ return fmt.Errorf(\"stat(%q) failed: %v\", src, err)\n} else {\nisDir = fi.IsDir()\n}\n@@ -433,25 +433,25 @@ func Mount(src, dst, typ string, flags uint32) error {\nif isDir {\n// Create the destination directory.\nif err := os.MkdirAll(dst, 0777); err != nil {\n- return fmt.Errorf(\"Mkdir(%q) failed: %v\", dst, err)\n+ return fmt.Errorf(\"mkdir(%q) failed: %v\", dst, err)\n}\n} else {\n// Create the parent destination directory.\nparent := path.Dir(dst)\nif err := os.MkdirAll(parent, 0777); err != nil {\n- return fmt.Errorf(\"Mkdir(%q) failed: %v\", parent, err)\n+ return fmt.Errorf(\"mkdir(%q) failed: %v\", parent, err)\n}\n// Create the destination file if it does not exist.\nf, err := os.OpenFile(dst, syscall.O_CREAT, 0777)\nif err != nil {\n- return fmt.Errorf(\"Open(%q) failed: %v\", dst, err)\n+ return fmt.Errorf(\"open(%q) failed: %v\", dst, err)\n}\nf.Close()\n}\n// Do the mount.\nif err := syscall.Mount(src, dst, typ, uintptr(flags), \"\"); err != nil {\n- return fmt.Errorf(\"Mount(%q, %q, %d) failed: %v\", src, dst, flags, err)\n+ return fmt.Errorf(\"mount(%q, %q, %d) failed: %v\", src, dst, flags, err)\n}\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix nogo errors in specutils
PiperOrigin-RevId: 338780793 |
260,003 | 23.10.2020 19:22:07 | 25,200 | 8dfbec28a47483e0c03a5c94331081d7219f215b | Fix nogo tests in //pkg/sentry/socket/... | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/control/control_vfs2.go",
"new_path": "pkg/sentry/socket/control/control_vfs2.go",
"diff": "@@ -36,9 +36,9 @@ type SCMRightsVFS2 interface {\nFiles(ctx context.Context, max int) (rf RightsFilesVFS2, truncated bool)\n}\n-// RightsFilesVFS2 represents a SCM_RIGHTS socket control message. A reference is\n-// maintained for each vfs.FileDescription and is release either when an FD is created or\n-// when the Release method is called.\n+// RightsFilesVFS2 represents a SCM_RIGHTS socket control message. A reference\n+// is maintained for each vfs.FileDescription and is release either when an FD\n+// is created or when the Release method is called.\n//\n// +stateify savable\ntype RightsFilesVFS2 []*vfs.FileDescription\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/stack.go",
"new_path": "pkg/sentry/socket/hostinet/stack.go",
"diff": "@@ -430,18 +430,18 @@ func (s *Stack) Statistics(stat interface{}, arg string) error {\n}\nif rawLine == \"\" {\n- return fmt.Errorf(\"Failed to get raw line\")\n+ return fmt.Errorf(\"failed to get raw line\")\n}\nparts := strings.SplitN(rawLine, \":\", 2)\nif len(parts) != 2 {\n- return fmt.Errorf(\"Failed to get prefix from: %q\", rawLine)\n+ return fmt.Errorf(\"failed to get prefix from: %q\", rawLine)\n}\nsliceStat = toSlice(stat)\nfields := strings.Fields(strings.TrimSpace(parts[1]))\nif len(fields) != len(sliceStat) {\n- return fmt.Errorf(\"Failed to parse fields: %q\", rawLine)\n+ return fmt.Errorf(\"failed to parse fields: %q\", rawLine)\n}\nif _, ok := stat.(*inet.StatSNMPTCP); ok {\nsnmpTCP = true\n@@ -457,7 +457,7 @@ func (s *Stack) Statistics(stat interface{}, arg string) error {\nsliceStat[i], err = strconv.ParseUint(fields[i], 10, 64)\n}\nif err != nil {\n- return fmt.Errorf(\"Failed to parse field %d from: %q, %v\", i, rawLine, err)\n+ return fmt.Errorf(\"failed to parse field %d from: %q, %v\", i, rawLine, err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/tcp_matcher.go",
"new_path": "pkg/sentry/socket/netfilter/tcp_matcher.go",
"diff": "@@ -71,7 +71,7 @@ func (tcpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma\n}\nif filter.Protocol != header.TCPProtocolNumber {\n- return nil, fmt.Errorf(\"TCP matching is only valid for protocol %d.\", header.TCPProtocolNumber)\n+ return nil, fmt.Errorf(\"TCP matching is only valid for protocol %d\", header.TCPProtocolNumber)\n}\nreturn &TCPMatcher{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/udp_matcher.go",
"new_path": "pkg/sentry/socket/netfilter/udp_matcher.go",
"diff": "@@ -68,7 +68,7 @@ func (udpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma\n}\nif filter.Protocol != header.UDPProtocolNumber {\n- return nil, fmt.Errorf(\"UDP matching is only valid for protocol %d.\", header.UDPProtocolNumber)\n+ return nil, fmt.Errorf(\"UDP matching is only valid for protocol %d\", header.UDPProtocolNumber)\n}\nreturn &UDPMatcher{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netlink/route/protocol.go",
"new_path": "pkg/sentry/socket/netlink/route/protocol.go",
"diff": "@@ -36,9 +36,9 @@ type commandKind int\nconst (\nkindNew commandKind = 0x0\n- kindDel = 0x1\n- kindGet = 0x2\n- kindSet = 0x3\n+ kindDel commandKind = 0x1\n+ kindGet commandKind = 0x2\n+ kindSet commandKind = 0x3\n)\nfunc typeKind(typ uint16) commandKind {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/transport/unix.go",
"new_path": "pkg/sentry/socket/unix/transport/unix.go",
"diff": "@@ -487,7 +487,7 @@ func (q *streamQueueReceiver) Recv(ctx context.Context, data [][]byte, wantCreds\nc := q.control.Clone()\n// Don't consume data since we are peeking.\n- copied, data, _ = vecCopy(data, q.buffer)\n+ copied, _, _ = vecCopy(data, q.buffer)\nreturn copied, copied, c, false, q.addr, notify, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/config.go",
"new_path": "tools/nogo/config.go",
"diff": "@@ -410,23 +410,6 @@ func init() {\n\"pkg/sentry/mm/special_mappable.go:80\",\n\"pkg/sentry/platform/systrap/subprocess.go:370\",\n\"pkg/sentry/platform/systrap/usertrap/usertrap_amd64.go:124\",\n- \"pkg/sentry/socket/control/control.go:260\",\n- \"pkg/sentry/socket/control/control.go:94\",\n- \"pkg/sentry/socket/control/control_vfs2.go:37\",\n- \"pkg/sentry/socket/hostinet/stack.go:433\",\n- \"pkg/sentry/socket/hostinet/stack.go:438\",\n- \"pkg/sentry/socket/hostinet/stack.go:444\",\n- \"pkg/sentry/socket/hostinet/stack.go:460\",\n- \"pkg/sentry/socket/netfilter/tcp_matcher.go:74\",\n- \"pkg/sentry/socket/netfilter/udp_matcher.go:71\",\n- \"pkg/sentry/socket/netlink/route/protocol.go:38\",\n- \"pkg/sentry/socket/socket.go:332\",\n- \"pkg/sentry/socket/unix/transport/connectioned.go:394\",\n- \"pkg/sentry/socket/unix/transport/connectionless.go:152\",\n- \"pkg/sentry/socket/unix/transport/unix.go:436\",\n- \"pkg/sentry/socket/unix/transport/unix.go:490\",\n- \"pkg/sentry/socket/unix/transport/unix.go:685\",\n- \"pkg/sentry/socket/unix/transport/unix.go:795\",\n\"pkg/sentry/syscalls/linux/sys_sem.go:62\",\n\"pkg/sentry/syscalls/linux/sys_time.go:189\",\n\"pkg/sentry/usage/cpu.go:42\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix nogo tests in //pkg/sentry/socket/...
PiperOrigin-RevId: 338784921 |
259,860 | 23.10.2020 19:24:51 | 25,200 | 54d2d927ac9c3ee290d44405a3028307459a49fb | Direct gvisor.dev/issues to the same place as gvisor.dev/issue.
Also let the Github bug reviver detect both in TODOs. | [
{
"change_type": "MODIFY",
"old_path": "tools/github/reviver/github.go",
"new_path": "tools/github/reviver/github.go",
"diff": "@@ -121,13 +121,24 @@ func (b *GitHubBugger) Activate(todo *Todo) (bool, error) {\nreturn true, nil\n}\n+var issuePrefixes = []string{\n+ \"gvisor.dev/issue/\",\n+ \"gvisor.dev/issues/\",\n+}\n+\n// parseIssueNo parses the issue number out of the issue url.\n+//\n+// 0 is returned if url does not correspond to an issue.\nfunc parseIssueNo(url string) (int, error) {\n- const prefix = \"gvisor.dev/issue/\"\n-\n// First check if I can handle the TODO.\n- idStr := strings.TrimPrefix(url, prefix)\n- if len(url) == len(idStr) {\n+ var idStr string\n+ for _, p := range issuePrefixes {\n+ if str := strings.TrimPrefix(url, p); len(str) < len(url) {\n+ idStr = str\n+ break\n+ }\n+ }\n+ if len(idStr) == 0 {\nreturn 0, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/github/reviver/reviver_test.go",
"new_path": "tools/github/reviver/reviver_test.go",
"diff": "@@ -32,6 +32,15 @@ func TestProcessLine(t *testing.T) {\n},\n},\n},\n+ {\n+ line: \"// TODO(foobar.com/issues/123): comment, bla. blabla.\",\n+ want: &Todo{\n+ Issue: \"foobar.com/issues/123\",\n+ Locations: []Location{\n+ {Comment: \"comment, bla. blabla.\"},\n+ },\n+ },\n+ },\n{\nline: \"// FIXME(b/123): internal bug\",\nwant: &Todo{\n"
},
{
"change_type": "MODIFY",
"old_path": "website/cmd/server/main.go",
"new_path": "website/cmd/server/main.go",
"diff": "@@ -29,6 +29,7 @@ var redirects = map[string]string{\n// GitHub redirects.\n\"/change\": \"https://github.com/google/gvisor\",\n\"/issue\": \"https://github.com/google/gvisor/issues\",\n+ \"/issues\": \"https://github.com/google/gvisor/issues\",\n\"/issue/new\": \"https://github.com/google/gvisor/issues/new\",\n\"/pr\": \"https://github.com/google/gvisor/pulls\",\n@@ -60,6 +61,7 @@ var redirects = map[string]string{\nvar prefixHelpers = map[string]string{\n\"change\": \"https://github.com/google/gvisor/commit/%s\",\n\"issue\": \"https://github.com/google/gvisor/issues/%s\",\n+ \"issues\": \"https://github.com/google/gvisor/issues/%s\",\n\"pr\": \"https://github.com/google/gvisor/pull/%s\",\n// Redirects to compatibility docs.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Direct gvisor.dev/issues to the same place as gvisor.dev/issue.
Also let the Github bug reviver detect both in TODOs.
PiperOrigin-RevId: 338785089 |
259,885 | 24.10.2020 00:22:11 | 25,200 | bc814b01ab022a95c586499d9f6105d6a023ae57 | Avoid excessive save/restore cycles in socket_ipv4_udp_unbound tests. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -2435,6 +2435,7 @@ cc_library(\n\"@com_google_absl//absl/memory\",\ngtest,\n\"//test/util:posix_error\",\n+ \"//test/util:save_util\",\n\"//test/util:test_util\",\n],\nalwayslink = 1,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc",
"new_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc",
"diff": "#include \"test/syscalls/linux/ip_socket_test_util.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/util/posix_error.h\"\n+#include \"test/util/save_util.h\"\n#include \"test/util/test_util.h\"\nnamespace gvisor {\n@@ -2108,6 +2109,9 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrReusePortDistribution) {\nconstexpr int kMessageSize = 10;\n+ // Saving during each iteration of the following loop is too expensive.\n+ DisableSave ds;\n+\nfor (int i = 0; i < 100; ++i) {\n// Send a new message to the REUSEADDR/REUSEPORT group. We use a new socket\n// each time so that a new ephemerial port will be used each time. This\n@@ -2120,6 +2124,8 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrReusePortDistribution) {\nSyscallSucceedsWithValue(sizeof(send_buf)));\n}\n+ ds.reset();\n+\n// Check that both receivers got messages. This checks that we are using load\n// balancing (REUSEPORT) instead of the most recently bound socket\n// (REUSEADDR).\n"
}
] | Go | Apache License 2.0 | google/gvisor | Avoid excessive save/restore cycles in socket_ipv4_udp_unbound tests.
PiperOrigin-RevId: 338805321 |
259,860 | 24.10.2020 07:46:30 | 25,200 | 4feb5c7c263de2310608d1a0e608d4ffd5e2990f | Add leak checking to vfs2 structures that cannot use the refs_vfs2 template.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/BUILD",
"new_path": "pkg/sentry/fsimpl/gofer/BUILD",
"diff": "@@ -54,6 +54,7 @@ go_library(\n\"//pkg/log\",\n\"//pkg/p9\",\n\"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/safemem\",\n\"//pkg/sentry/fs/fsutil\",\n\"//pkg/sentry/fs/lock\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/directory.go",
"new_path": "pkg/sentry/fsimpl/gofer/directory.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/p9\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/pipe\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n@@ -100,6 +101,9 @@ func (d *dentry) createSyntheticChildLocked(opts *createSyntheticOpts) {\nhostFD: -1,\nnlink: uint32(2),\n}\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Register(child, \"gofer.dentry\")\n+ }\nswitch opts.mode.FileType() {\ncase linux.S_IFDIR:\n// Nothing else needs to be done.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -46,6 +46,8 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/p9\"\n+ refs_vfs1 \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -109,8 +111,8 @@ type filesystem struct {\n// cachedDentries contains all dentries with 0 references. (Due to race\n// conditions, it may also contain dentries with non-zero references.)\n- // cachedDentriesLen is the number of dentries in cachedDentries. These\n- // fields are protected by renameMu.\n+ // cachedDentriesLen is the number of dentries in cachedDentries. These fields\n+ // are protected by renameMu.\ncachedDentries dentryList\ncachedDentriesLen uint64\n@@ -134,6 +136,10 @@ type filesystem struct {\n// savedDentryRW records open read/write handles during save/restore.\nsavedDentryRW map[*dentry]savedDentryRW\n+\n+ // released is nonzero once filesystem.Release has been called. It is accessed\n+ // with atomic memory operations.\n+ released int32\n}\n// +stateify savable\n@@ -454,9 +460,8 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nreturn nil, nil, err\n}\n// Set the root's reference count to 2. One reference is returned to the\n- // caller, and the other is deliberately leaked to prevent the root from\n- // being \"cached\" and subsequently evicted. Its resources will still be\n- // cleaned up by fs.Release().\n+ // caller, and the other is held by fs to prevent the root from being \"cached\"\n+ // and subsequently evicted.\nroot.refs = 2\nfs.root = root\n@@ -526,15 +531,16 @@ func (fs *filesystem) dial(ctx context.Context) error {\n// Release implements vfs.FilesystemImpl.Release.\nfunc (fs *filesystem) Release(ctx context.Context) {\n- mf := fs.mfp.MemoryFile()\n+ atomic.StoreInt32(&fs.released, 1)\n+ mf := fs.mfp.MemoryFile()\nfs.syncMu.Lock()\nfor d := range fs.syncableDentries {\nd.handleMu.Lock()\nd.dataMu.Lock()\nif h := d.writeHandleLocked(); h.isOpen() {\n// Write dirty cached data to the remote file.\n- if err := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size, fs.mfp.MemoryFile(), h.writeFromBlocksAt); err != nil {\n+ if err := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size, mf, h.writeFromBlocksAt); err != nil {\nlog.Warningf(\"gofer.filesystem.Release: failed to flush dentry: %v\", err)\n}\n// TODO(jamieliu): Do we need to flushf/fsync d?\n@@ -555,6 +561,21 @@ func (fs *filesystem) Release(ctx context.Context) {\n// fs.\nfs.syncMu.Unlock()\n+ // If leak checking is enabled, release all outstanding references in the\n+ // filesystem. We deliberately avoid doing this outside of leak checking; we\n+ // have released all external resources above rather than relying on dentry\n+ // destructors.\n+ if refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking {\n+ fs.renameMu.Lock()\n+ fs.root.releaseSyntheticRecursiveLocked(ctx)\n+ fs.evictAllCachedDentriesLocked(ctx)\n+ fs.renameMu.Unlock()\n+\n+ // An extra reference was held by the filesystem on the root to prevent it from\n+ // being cached/evicted.\n+ fs.root.DecRef(ctx)\n+ }\n+\nif !fs.iopts.LeakConnection {\n// Close the connection to the server. This implicitly clunks all fids.\nfs.client.Close()\n@@ -563,6 +584,31 @@ func (fs *filesystem) Release(ctx context.Context) {\nfs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)\n}\n+// releaseSyntheticRecursiveLocked traverses the tree with root d and decrements\n+// the reference count on every synthetic dentry. Synthetic dentries have one\n+// reference for existence that should be dropped during filesystem.Release.\n+//\n+// Precondition: d.fs.renameMu is locked.\n+func (d *dentry) releaseSyntheticRecursiveLocked(ctx context.Context) {\n+ if d.isSynthetic() {\n+ d.decRefLocked()\n+ d.checkCachingLocked(ctx)\n+ }\n+ if d.isDir() {\n+ var children []*dentry\n+ d.dirMu.Lock()\n+ for _, child := range d.children {\n+ children = append(children, child)\n+ }\n+ d.dirMu.Unlock()\n+ for _, child := range children {\n+ if child != nil {\n+ child.releaseSyntheticRecursiveLocked(ctx)\n+ }\n+ }\n+ }\n+}\n+\n// dentry implements vfs.DentryImpl.\n//\n// +stateify savable\n@@ -815,6 +861,9 @@ func (fs *filesystem) newDentry(ctx context.Context, file p9file, qid p9.QID, ma\nd.nlink = uint32(attr.NLink)\n}\nd.vfsd.Init(d)\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Register(d, \"gofer.dentry\")\n+ }\nfs.syncMu.Lock()\nfs.syncableDentries[d] = struct{}{}\n@@ -1210,6 +1259,11 @@ func (d *dentry) decRefLocked() {\n}\n}\n+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.\n+func (d *dentry) LeakMessage() string {\n+ return fmt.Sprintf(\"[gofer.dentry %p] reference count of %d instead of -1\", d, atomic.LoadInt64(&d.refs))\n+}\n+\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\nfunc (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {\nif d.isDir() {\n@@ -1292,6 +1346,16 @@ func (d *dentry) checkCachingLocked(ctx context.Context) {\nif d.watches.Size() > 0 {\nreturn\n}\n+\n+ if atomic.LoadInt32(&d.fs.released) != 0 {\n+ if d.parent != nil {\n+ d.parent.dirMu.Lock()\n+ delete(d.parent.children, d.name)\n+ d.parent.dirMu.Unlock()\n+ }\n+ d.destroyLocked(ctx)\n+ }\n+\n// If d is already cached, just move it to the front of the LRU.\nif d.cached {\nd.fs.cachedDentries.Remove(d)\n@@ -1310,6 +1374,14 @@ func (d *dentry) checkCachingLocked(ctx context.Context) {\n}\n}\n+// Precondition: fs.renameMu must be locked for writing; it may be temporarily\n+// unlocked.\n+func (fs *filesystem) evictAllCachedDentriesLocked(ctx context.Context) {\n+ for fs.cachedDentriesLen != 0 {\n+ fs.evictCachedDentryLocked(ctx)\n+ }\n+}\n+\n// Preconditions:\n// * fs.renameMu must be locked for writing; it may be temporarily unlocked.\n// * fs.cachedDentriesLen != 0.\n@@ -1422,6 +1494,10 @@ func (d *dentry) destroyLocked(ctx context.Context) {\npanic(\"gofer.dentry.DecRef() called without holding a reference\")\n}\n}\n+\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Unregister(d, \"gofer.dentry\")\n+ }\n}\nfunc (d *dentry) isDeleted() bool {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/save_restore.go",
"new_path": "pkg/sentry/fsimpl/gofer/save_restore.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/fdnotifier\"\n\"gvisor.dev/gvisor/pkg/p9\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -53,9 +54,7 @@ func (fs *filesystem) PrepareSave(ctx context.Context) error {\n// Purge cached dentries, which may not be reopenable after restore due to\n// permission changes.\nfs.renameMu.Lock()\n- for fs.cachedDentriesLen != 0 {\n- fs.evictCachedDentryLocked(ctx)\n- }\n+ fs.evictAllCachedDentriesLocked(ctx)\nfs.renameMu.Unlock()\n// Buffer pipe data so that it's available for reading after restore. (This\n@@ -141,6 +140,9 @@ func (d *dentry) beforeSave() {\n// afterLoad is invoked by stateify.\nfunc (d *dentry) afterLoad() {\nd.hostFD = -1\n+ if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&d.refs) != -1 {\n+ refsvfs2.Register(d, \"gofer.dentry\")\n+ }\n}\n// afterLoad is invoked by stateify.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/BUILD",
"new_path": "pkg/sentry/fsimpl/overlay/BUILD",
"diff": "@@ -23,6 +23,7 @@ go_library(\n\"fstree.go\",\n\"overlay.go\",\n\"regular_file.go\",\n+ \"save_restore.go\",\n],\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n@@ -30,6 +31,7 @@ go_library(\n\"//pkg/context\",\n\"//pkg/fspath\",\n\"//pkg/log\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/fs/lock\",\n\"//pkg/sentry/kernel/auth\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"new_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"diff": "package overlay\nimport (\n+ \"fmt\"\n\"strings\"\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/fspath\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/memmap\"\n@@ -484,6 +486,9 @@ func (fs *filesystem) newDentry() *dentry {\n}\nd.lowerVDs = d.inlineLowerVDs[:0]\nd.vfsd.Init(d)\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Register(d, \"overlay.dentry\")\n+ }\nreturn d\n}\n@@ -583,6 +588,14 @@ func (d *dentry) destroyLocked(ctx context.Context) {\npanic(\"overlay.dentry.DecRef() called without holding a reference\")\n}\n}\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Unregister(d, \"overlay.dentry\")\n+ }\n+}\n+\n+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.\n+func (d *dentry) LeakMessage() string {\n+ return fmt.Sprintf(\"[overlay.dentry %p] reference count of %d instead of -1\", d, atomic.LoadInt64(&d.refs))\n}\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fsimpl/overlay/save_restore.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package overlay\n+\n+import (\n+ \"sync/atomic\"\n+\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n+)\n+\n+func (d *dentry) afterLoad() {\n+ if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&d.refs) != -1 {\n+ refsvfs2.Register(d, \"overlay.dentry\")\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/BUILD",
"new_path": "pkg/sentry/fsimpl/verity/BUILD",
"diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"verity\",\nsrcs = [\n\"filesystem.go\",\n+ \"save_restore.go\",\n\"verity.go\",\n],\nvisibility = [\"//pkg/sentry:internal\"],\n@@ -15,6 +16,7 @@ go_library(\n\"//pkg/fspath\",\n\"//pkg/marshal/primitive\",\n\"//pkg/merkletree\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/fs/lock\",\n\"//pkg/sentry/kernel\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fsimpl/verity/save_restore.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package verity\n+\n+import (\n+ \"sync/atomic\"\n+\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n+)\n+\n+func (d *dentry) afterLoad() {\n+ if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&d.refs) != -1 {\n+ refsvfs2.Register(d, \"verity.dentry\")\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/verity/verity.go",
"new_path": "pkg/sentry/fsimpl/verity/verity.go",
"diff": "@@ -31,6 +31,7 @@ import (\n\"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/marshal/primitive\"\n\"gvisor.dev/gvisor/pkg/merkletree\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n@@ -331,6 +332,9 @@ func (fs *filesystem) newDentry() *dentry {\nfs: fs,\n}\nd.vfsd.Init(d)\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Register(d, \"verity.dentry\")\n+ }\nreturn d\n}\n@@ -393,6 +397,9 @@ func (d *dentry) destroyLocked(ctx context.Context) {\nif d.lowerVD.Ok() {\nd.lowerVD.DecRef(ctx)\n}\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Unregister(d, \"verity.dentry\")\n+ }\nif d.lowerMerkleVD.Ok() {\nd.lowerMerkleVD.DecRef(ctx)\n@@ -412,6 +419,11 @@ func (d *dentry) destroyLocked(ctx context.Context) {\n}\n}\n+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.\n+func (d *dentry) LeakMessage() string {\n+ return fmt.Sprintf(\"[verity.dentry %p] reference count of %d instead of -1\", d, atomic.LoadInt64(&d.refs))\n+}\n+\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\nfunc (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {\n//TODO(b/159261227): Implement InotifyWithParent.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -24,6 +24,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n@@ -106,6 +107,9 @@ func newMount(vfs *VirtualFilesystem, fs *Filesystem, root *Dentry, mntns *Mount\nif opts.ReadOnly {\nmnt.setReadOnlyLocked(true)\n}\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Register(mnt, \"vfs.Mount\")\n+ }\nreturn mnt\n}\n@@ -489,8 +493,16 @@ func (mnt *Mount) IncRef() {\n// DecRef decrements mnt's reference count.\nfunc (mnt *Mount) DecRef(ctx context.Context) {\n- refs := atomic.AddInt64(&mnt.refs, -1)\n- if refs&^math.MinInt64 == 0 { // mask out MSB\n+ r := atomic.AddInt64(&mnt.refs, -1)\n+ if r&^math.MinInt64 == 0 { // mask out MSB\n+ if refsvfs2.LeakCheckEnabled() {\n+ refsvfs2.Unregister(mnt, \"vfs.Mount\")\n+ }\n+ mnt.destroy(ctx)\n+ }\n+}\n+\n+func (mnt *Mount) destroy(ctx context.Context) {\nvar vd VirtualDentry\nif mnt.parent() != nil {\nmnt.vfs.mountMu.Lock()\n@@ -507,6 +519,10 @@ func (mnt *Mount) DecRef(ctx context.Context) {\nvd.DecRef(ctx)\n}\n}\n+\n+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.\n+func (mnt *Mount) LeakMessage() string {\n+ return fmt.Sprintf(\"[vfs.Mount %p] reference count of %d instead of 0\", mnt, atomic.LoadInt64(&mnt.refs))\n}\n// DecRef decrements mntns' reference count.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/save_restore.go",
"new_path": "pkg/sentry/vfs/save_restore.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n)\n// FilesystemImplSaveRestoreExtension is an optional extension to\n@@ -109,6 +110,12 @@ func (vfs *VirtualFilesystem) loadMounts(mounts []*Mount) {\n}\n}\n+func (mnt *Mount) afterLoad() {\n+ if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&mnt.refs) != 0 {\n+ refsvfs2.Register(mnt, \"vfs.Mount\")\n+ }\n+}\n+\n// afterLoad is called by stateify.\nfunc (epi *epollInterest) afterLoad() {\n// Mark all epollInterests as ready after restore so that the next call to\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add leak checking to vfs2 structures that cannot use the refs_vfs2 template.
Updates #1486.
PiperOrigin-RevId: 338832085 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.