author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
259,907
18.08.2020 10:20:17
25,200
b9d87ae7af7b86e5480ef66d2a4d674646922ed7
[vfs2] Implement /proc/sys/net/ipv4/tcp_rmem and /proc/sys/net/ipv4/tcp_wmem. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/proc/BUILD", "new_path": "pkg/sentry/fsimpl/proc/BUILD", "diff": "@@ -36,6 +36,7 @@ go_library(\n\"//pkg/sentry/socket/unix/transport\",\n\"//pkg/sentry/usage\",\n\"//pkg/sentry/vfs\",\n+ \"//pkg/sync\",\n\"//pkg/syserror\",\n\"//pkg/tcpip/header\",\n\"//pkg/usermem\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/proc/tasks_sys.go", "new_path": "pkg/sentry/fsimpl/proc/tasks_sys.go", "diff": "@@ -25,10 +25,18 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n+type tcpMemDir int\n+\n+const (\n+ tcpRMem tcpMemDir = iota\n+ tcpWMem\n+)\n+\n// newSysDir returns the dentry corresponding to /proc/sys directory.\nfunc (fs *filesystem) newSysDir(root *auth.Credentials, k *kernel.Kernel) *kernfs.Dentry {\nreturn kernfs.NewStaticDir(root, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), 0555, map[string]*kernfs.Dentry{\n@@ -56,7 +64,9 @@ func (fs *filesystem) newSysNetDir(root *auth.Credentials, k *kernel.Kernel) *ke\ncontents = map[string]*kernfs.Dentry{\n\"ipv4\": kernfs.NewStaticDir(root, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), 0555, map[string]*kernfs.Dentry{\n\"tcp_recovery\": fs.newDentry(root, fs.NextIno(), 0644, &tcpRecoveryData{stack: stack}),\n+ \"tcp_rmem\": fs.newDentry(root, fs.NextIno(), 0644, &tcpMemData{stack: stack, dir: tcpRMem}),\n\"tcp_sack\": fs.newDentry(root, fs.NextIno(), 0644, &tcpSackData{stack: stack}),\n+ \"tcp_wmem\": fs.newDentry(root, fs.NextIno(), 0644, &tcpMemData{stack: stack, dir: tcpWMem}),\n// The following files are simple stubs until they are implemented in\n// netstack, most of these files are configuration related. We use the\n@@ -181,10 +191,11 @@ func (d *tcpSackData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n// Tough luck.\nval = \"1\\n\"\n}\n- buf.WriteString(val)\n- return nil\n+ _, err := buf.WriteString(val)\n+ return err\n}\n+// Write implements vfs.WritableDynamicBytesSource.Write.\nfunc (d *tcpSackData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {\nif offset != 0 {\n// No need to handle partial writes thus far.\n@@ -200,7 +211,7 @@ func (d *tcpSackData) Write(ctx context.Context, src usermem.IOSequence, offset\nvar v int32\nn, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)\nif err != nil {\n- return n, err\n+ return 0, err\n}\nif d.enabled == nil {\nd.enabled = new(bool)\n@@ -228,10 +239,11 @@ func (d *tcpRecoveryData) Generate(ctx context.Context, buf *bytes.Buffer) error\nreturn err\n}\n- buf.WriteString(fmt.Sprintf(\"%d\\n\", recovery))\n- return nil\n+ _, err = buf.WriteString(fmt.Sprintf(\"%d\\n\", recovery))\n+ return err\n}\n+// Write implements vfs.WritableDynamicBytesSource.Write.\nfunc (d *tcpRecoveryData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {\nif offset != 0 {\n// No need to handle partial writes thus far.\n@@ -254,3 +266,91 @@ func (d *tcpRecoveryData) Write(ctx context.Context, src usermem.IOSequence, off\n}\nreturn n, nil\n}\n+\n+// tcpMemData implements vfs.WritableDynamicBytesSource for\n+// /proc/sys/net/ipv4/tcp_rmem and /proc/sys/net/ipv4/tcp_wmem.\n+//\n+// +stateify savable\n+type tcpMemData struct {\n+ kernfs.DynamicBytesFile\n+\n+ dir tcpMemDir\n+ stack inet.Stack `state:\"wait\"`\n+\n+ // mu protects against concurrent reads/writes to FDs based on the dentry\n+ // backing this byte source.\n+ mu sync.Mutex `state:\"nosave\"`\n+}\n+\n+var _ vfs.WritableDynamicBytesSource = (*tcpMemData)(nil)\n+\n+// Generate implements vfs.DynamicBytesSource.\n+func (d *tcpMemData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ d.mu.Lock()\n+ defer d.mu.Unlock()\n+\n+ size, err := d.readSizeLocked()\n+ if err != nil {\n+ return err\n+ }\n+ _, err = buf.WriteString(fmt.Sprintf(\"%d\\t%d\\t%d\\n\", size.Min, size.Default, size.Max))\n+ return err\n+}\n+\n+// Write implements vfs.WritableDynamicBytesSource.Write.\n+func (d *tcpMemData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {\n+ if offset != 0 {\n+ // No need to handle partial writes thus far.\n+ return 0, syserror.EINVAL\n+ }\n+ if src.NumBytes() == 0 {\n+ return 0, nil\n+ }\n+ d.mu.Lock()\n+ defer d.mu.Unlock()\n+\n+ // Limit the amount of memory allocated.\n+ src = src.TakeFirst(usermem.PageSize - 1)\n+ size, err := d.readSizeLocked()\n+ if err != nil {\n+ return 0, err\n+ }\n+ buf := []int32{int32(size.Min), int32(size.Default), int32(size.Max)}\n+ n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, buf, src.Opts)\n+ if err != nil {\n+ return 0, err\n+ }\n+ newSize := inet.TCPBufferSize{\n+ Min: int(buf[0]),\n+ Default: int(buf[1]),\n+ Max: int(buf[2]),\n+ }\n+ if err := d.writeSizeLocked(newSize); err != nil {\n+ return 0, err\n+ }\n+ return n, nil\n+}\n+\n+// Precondition: d.mu must be locked.\n+func (d *tcpMemData) readSizeLocked() (inet.TCPBufferSize, error) {\n+ switch d.dir {\n+ case tcpRMem:\n+ return d.stack.TCPReceiveBufferSize()\n+ case tcpWMem:\n+ return d.stack.TCPSendBufferSize()\n+ default:\n+ panic(fmt.Sprintf(\"unknown tcpMemFile type: %v\", d.dir))\n+ }\n+}\n+\n+// Precondition: d.mu must be locked.\n+func (d *tcpMemData) writeSizeLocked(size inet.TCPBufferSize) error {\n+ switch d.dir {\n+ case tcpRMem:\n+ return d.stack.SetTCPReceiveBufferSize(size)\n+ case tcpWMem:\n+ return d.stack.SetTCPSendBufferSize(size)\n+ default:\n+ panic(fmt.Sprintf(\"unknown tcpMemFile type: %v\", d.dir))\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs2] Implement /proc/sys/net/ipv4/tcp_rmem and /proc/sys/net/ipv4/tcp_wmem. Updates #1035 PiperOrigin-RevId: 327253907
259,885
18.08.2020 12:30:10
25,200
b3141b680ffc0e4ad87b3cba5ca28e3e8d5b1db1
Wait for all p9 handlers to complete before server shutdown. ... including those invoked via flipcall.
[ { "change_type": "MODIFY", "old_path": "pkg/p9/server.go", "new_path": "pkg/p9/server.go", "diff": "@@ -87,6 +87,9 @@ type connState struct {\n// version 0 implies 9P2000.L.\nversion uint32\n+ // pendingWg counts requests that are still being handled.\n+ pendingWg sync.WaitGroup\n+\n// -- below relates to the legacy handler --\n// recvOkay indicates that a receive may start.\n@@ -479,7 +482,9 @@ func (cs *connState) lookupChannel(id uint32) *channel {\n// handle handles a single message.\nfunc (cs *connState) handle(m message) (r message) {\n+ cs.pendingWg.Add(1)\ndefer func() {\n+ cs.pendingWg.Done()\nif r == nil {\n// Don't allow a panic to propagate.\nerr := recover()\n@@ -568,6 +573,11 @@ func (cs *connState) handleRequests() {\n}\nfunc (cs *connState) stop() {\n+ // Wait for completion of all inflight requests. This is mostly so that if\n+ // a request is stuck, the sandbox supervisor has the opportunity to kill\n+ // us with SIGABRT to get a stack dump of the offending handler.\n+ cs.pendingWg.Wait()\n+\n// Close all channels.\nclose(cs.recvOkay)\nclose(cs.recvDone)\n@@ -606,11 +616,6 @@ func (cs *connState) stop() {\n// service services requests concurrently.\nfunc (cs *connState) service() error {\n- // Pending is the number of handlers that have finished receiving but\n- // not finished processing requests. These must be waiting on properly\n- // below. See the next comment for an explanation of the loop.\n- pending := 0\n-\n// Start the first request handler.\ngo cs.handleRequests() // S/R-SAFE: Irrelevant.\ncs.recvOkay <- true\n@@ -622,16 +627,9 @@ func (cs *connState) service() error {\nselect {\ncase err := <-cs.recvDone:\nif err != nil {\n- // Wait for pending handlers.\n- for i := 0; i < pending; i++ {\n- <-cs.sendDone\n- }\n- return nil\n+ return err\n}\n- // This handler is now pending.\n- pending++\n-\n// Kick the next receiver, or start a new handler\n// if no receiver is currently waiting.\nselect {\n@@ -642,9 +640,6 @@ func (cs *connState) service() error {\n}\ncase <-cs.sendDone:\n- // This handler is finished.\n- pending--\n-\n// Error sending a response? Nothing can be done.\n//\n// We don't terminate on a send error though, since\n" } ]
Go
Apache License 2.0
google/gvisor
Wait for all p9 handlers to complete before server shutdown. ... including those invoked via flipcall. PiperOrigin-RevId: 327283194
259,975
18.08.2020 12:55:06
25,200
4141dc0d2c8c46b14dbae83aab304fa338ebafc5
Fix timeval for Socket test tv_usec field should be a multiple of 4K to pass in open source on linux/native, so make it one.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_generic.cc", "new_path": "test/syscalls/linux/socket_generic.cc", "diff": "@@ -463,7 +463,7 @@ TEST_P(AllSocketPairTest, SetGetSendTimeout) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n// tv_usec should be a multiple of 4000 to work on most systems.\n- timeval tv = {.tv_sec = 89, .tv_usec = 42000};\n+ timeval tv = {.tv_sec = 89, .tv_usec = 44000};\nEXPECT_THAT(\nsetsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\nSyscallSucceeds());\n" } ]
Go
Apache License 2.0
google/gvisor
Fix timeval for Socket test tv_usec field should be a multiple of 4K to pass in open source on linux/native, so make it one. PiperOrigin-RevId: 327288405
259,992
18.08.2020 13:57:03
25,200
760c131da17250b6adbb8d08dd52e9a3d652b2c1
Return EROFS if mount is read-only
[ { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -46,6 +46,8 @@ const (\ninvalidMode = p9.OpenFlags(math.MaxUint32)\nopenFlags = syscall.O_NOFOLLOW | syscall.O_CLOEXEC\n+\n+ allowedOpenFlags = unix.O_TRUNC\n)\n// Config sets configuration options for each attach point.\n@@ -357,10 +359,16 @@ func (l *localFile) Open(flags p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\nif l.isOpen() {\npanic(fmt.Sprintf(\"attempting to open already opened file: %q\", l.hostPath))\n}\n+ mode := flags & p9.OpenFlagsModeMask\n+ if mode == p9.WriteOnly || mode == p9.ReadWrite || flags&p9.OpenTruncate != 0 {\n+ if err := l.checkROMount(); err != nil {\n+ return nil, p9.QID{}, 0, err\n+ }\n+ }\n// Check if control file can be used or if a new open must be created.\nvar newFile *fd.FD\n- if flags == p9.ReadOnly && l.controlReadable {\n+ if mode == p9.ReadOnly && l.controlReadable && flags.OSFlags()&allowedOpenFlags == 0 {\nlog.Debugf(\"Open reusing control file, flags: %v, %q\", flags, l.hostPath)\nnewFile = l.file\n} else {\n@@ -369,8 +377,8 @@ func (l *localFile) Open(flags p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n// name_to_handle_at and open_by_handle_at aren't supported by overlay2.\nlog.Debugf(\"Open reopening file, flags: %v, %q\", flags, l.hostPath)\nvar err error\n- // Constrain open flags to the open mode and O_TRUNC.\n- newFile, err = reopenProcFd(l.file, openFlags|(flags.OSFlags()&(syscall.O_ACCMODE|syscall.O_TRUNC)))\n+ osFlags := flags.OSFlags() & (syscall.O_ACCMODE | allowedOpenFlags)\n+ newFile, err = reopenProcFd(l.file, openFlags|osFlags)\nif err != nil {\nreturn nil, p9.QID{}, 0, extractErrno(err)\n}\n@@ -389,31 +397,31 @@ func (l *localFile) Open(flags p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n}\nl.file = newFile\n}\n- l.mode = flags & p9.OpenFlagsModeMask\n+ l.mode = mode\nreturn fd, l.qid, 0, nil\n}\n// Create implements p9.File.\n-func (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid p9.UID, gid p9.GID) (*fd.FD, p9.File, p9.QID, uint32, error) {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return nil, nil, p9.QID{}, 0, syscall.EBADF\n+func (l *localFile) Create(name string, p9Flags p9.OpenFlags, perm p9.FileMode, uid p9.UID, gid p9.GID) (*fd.FD, p9.File, p9.QID, uint32, error) {\n+ if err := l.checkROMount(); err != nil {\n+ return nil, nil, p9.QID{}, 0, err\n}\n+ // Set file creation flags, plus allowed open flags from caller.\n+ osFlags := openFlags | syscall.O_CREAT | syscall.O_EXCL\n+ osFlags |= p9Flags.OSFlags() & allowedOpenFlags\n+\n// 'file' may be used for other operations (e.g. Walk), so read access is\n// always added to flags. Note that resulting file might have a wider mode\n// than needed for each particular case.\n- flags := openFlags | syscall.O_CREAT | syscall.O_EXCL\n+ mode := p9Flags & p9.OpenFlagsModeMask\nif mode == p9.WriteOnly {\n- flags |= syscall.O_RDWR\n+ osFlags |= syscall.O_RDWR\n} else {\n- flags |= mode.OSFlags()\n+ osFlags |= mode.OSFlags() & unix.O_ACCMODE\n}\n- child, err := fd.OpenAt(l.file, name, flags, uint32(perm.Permissions()))\n+ child, err := fd.OpenAt(l.file, name, osFlags, uint32(perm.Permissions()))\nif err != nil {\nreturn nil, nil, p9.QID{}, 0, extractErrno(err)\n}\n@@ -449,12 +457,8 @@ func (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid\n// Mkdir implements p9.File.\nfunc (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID) (p9.QID, error) {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return p9.QID{}, syscall.EBADF\n+ if err := l.checkROMount(); err != nil {\n+ return p9.QID{}, err\n}\nif err := syscall.Mkdirat(l.file.FD(), name, uint32(perm.Permissions())); err != nil {\n@@ -637,12 +641,8 @@ func (l *localFile) fillAttr(stat syscall.Stat_t) (p9.AttrMask, p9.Attr) {\n// cannot be changed atomically and user may see partial changes when\n// an error happens.\nfunc (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return syscall.EBADF\n+ if err := l.checkROMount(); err != nil {\n+ return err\n}\nallowed := p9.SetAttrMask{\n@@ -804,12 +804,8 @@ func (*localFile) Rename(p9.File, string) error {\n// RenameAt implements p9.File.RenameAt.\nfunc (l *localFile) RenameAt(oldName string, directory p9.File, newName string) error {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return syscall.EBADF\n+ if err := l.checkROMount(); err != nil {\n+ return err\n}\nnewParent := directory.(*localFile)\n@@ -855,12 +851,8 @@ func (l *localFile) WriteAt(p []byte, offset uint64) (int, error) {\n// Symlink implements p9.File.\nfunc (l *localFile) Symlink(target, newName string, uid p9.UID, gid p9.GID) (p9.QID, error) {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return p9.QID{}, syscall.EBADF\n+ if err := l.checkROMount(); err != nil {\n+ return p9.QID{}, err\n}\nif err := unix.Symlinkat(target, l.file.FD(), newName); err != nil {\n@@ -895,12 +887,8 @@ func (l *localFile) Symlink(target, newName string, uid p9.UID, gid p9.GID) (p9.\n// Link implements p9.File.\nfunc (l *localFile) Link(target p9.File, newName string) error {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return syscall.EBADF\n+ if err := l.checkROMount(); err != nil {\n+ return err\n}\ntargetFile := target.(*localFile)\n@@ -912,12 +900,8 @@ func (l *localFile) Link(target p9.File, newName string) error {\n// Mknod implements p9.File.\nfunc (l *localFile) Mknod(name string, mode p9.FileMode, _ uint32, _ uint32, _ p9.UID, _ p9.GID) (p9.QID, error) {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return p9.QID{}, syscall.EROFS\n+ if err := l.checkROMount(); err != nil {\n+ return p9.QID{}, err\n}\nhostPath := path.Join(l.hostPath, name)\n@@ -948,12 +932,8 @@ func (l *localFile) Mknod(name string, mode p9.FileMode, _ uint32, _ uint32, _ p\n// UnlinkAt implements p9.File.\nfunc (l *localFile) UnlinkAt(name string, flags uint32) error {\n- conf := l.attachPoint.conf\n- if conf.ROMount {\n- if conf.PanicOnWrite {\n- panic(\"attempt to write to RO mount\")\n- }\n- return syscall.EBADF\n+ if err := l.checkROMount(); err != nil {\n+ return err\n}\nif err := unix.Unlinkat(l.file.FD(), name, int(flags)); err != nil {\n@@ -1178,3 +1158,13 @@ func extractErrno(err error) syscall.Errno {\nlog.Debugf(\"Unknown error: %v, defaulting to EIO\", err)\nreturn syscall.EIO\n}\n+\n+func (l *localFile) checkROMount() error {\n+ if conf := l.attachPoint.conf; conf.ROMount {\n+ if conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\n+ return syscall.EROFS\n+ }\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer_test.go", "new_path": "runsc/fsgofer/fsgofer_test.go", "diff": "@@ -491,30 +491,50 @@ func TestLink(t *testing.T) {\n}\nfunc TestROMountChecks(t *testing.T) {\n+ const want = syscall.EROFS\nrunCustom(t, allTypes, roConfs, func(t *testing.T, s state) {\n- if _, _, _, _, err := s.file.Create(\"some_file\", p9.ReadWrite, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != syscall.EBADF {\n- t.Errorf(\"%v: Create() should have failed, got: %v, expected: syscall.EBADF\", s, err)\n+ if s.fileType != syscall.S_IFLNK {\n+ if _, _, _, err := s.file.Open(p9.WriteOnly); err != want {\n+ t.Errorf(\"Open() should have failed, got: %v, expected: %v\", err, want)\n}\n- if _, err := s.file.Mkdir(\"some_dir\", 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != syscall.EBADF {\n- t.Errorf(\"%v: MkDir() should have failed, got: %v, expected: syscall.EBADF\", s, err)\n+ if _, _, _, err := s.file.Open(p9.ReadWrite); err != want {\n+ t.Errorf(\"Open() should have failed, got: %v, expected: %v\", err, want)\n}\n- if err := s.file.RenameAt(\"some_file\", s.file, \"other_file\"); err != syscall.EBADF {\n- t.Errorf(\"%v: Rename() should have failed, got: %v, expected: syscall.EBADF\", s, err)\n+ if _, _, _, err := s.file.Open(p9.ReadOnly | p9.OpenTruncate); err != want {\n+ t.Errorf(\"Open() should have failed, got: %v, expected: %v\", err, want)\n}\n- if _, err := s.file.Symlink(\"some_place\", \"some_symlink\", p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != syscall.EBADF {\n- t.Errorf(\"%v: Symlink() should have failed, got: %v, expected: syscall.EBADF\", s, err)\n+ f, _, _, err := s.file.Open(p9.ReadOnly)\n+ if err != nil {\n+ t.Errorf(\"Open() failed: %v\", err)\n+ }\n+ if f != nil {\n+ _ = f.Close()\n+ }\n+ }\n+\n+ if _, _, _, _, err := s.file.Create(\"some_file\", p9.ReadWrite, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != want {\n+ t.Errorf(\"Create() should have failed, got: %v, expected: %v\", err, want)\n+ }\n+ if _, err := s.file.Mkdir(\"some_dir\", 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != want {\n+ t.Errorf(\"MkDir() should have failed, got: %v, expected: %v\", err, want)\n+ }\n+ if err := s.file.RenameAt(\"some_file\", s.file, \"other_file\"); err != want {\n+ t.Errorf(\"Rename() should have failed, got: %v, expected: %v\", err, want)\n+ }\n+ if _, err := s.file.Symlink(\"some_place\", \"some_symlink\", p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != want {\n+ t.Errorf(\"Symlink() should have failed, got: %v, expected: %v\", err, want)\n}\n- if err := s.file.UnlinkAt(\"some_file\", 0); err != syscall.EBADF {\n- t.Errorf(\"%v: UnlinkAt() should have failed, got: %v, expected: syscall.EBADF\", s, err)\n+ if err := s.file.UnlinkAt(\"some_file\", 0); err != want {\n+ t.Errorf(\"UnlinkAt() should have failed, got: %v, expected: %v\", err, want)\n}\n- if err := s.file.Link(s.file, \"some_link\"); err != syscall.EBADF {\n- t.Errorf(\"%v: Link() should have failed, got: %v, expected: syscall.EBADF\", s, err)\n+ if err := s.file.Link(s.file, \"some_link\"); err != want {\n+ t.Errorf(\"Link() should have failed, got: %v, expected: %v\", err, want)\n}\nvalid := p9.SetAttrMask{Size: true}\nattr := p9.SetAttr{Size: 0}\n- if err := s.file.SetAttr(valid, attr); err != syscall.EBADF {\n- t.Errorf(\"%v: SetAttr() should have failed, got: %v, expected: syscall.EBADF\", s, err)\n+ if err := s.file.SetAttr(valid, attr); err != want {\n+ t.Errorf(\"SetAttr() should have failed, got: %v, expected: %v\", err, want)\n}\n})\n}\n@@ -522,6 +542,9 @@ func TestROMountChecks(t *testing.T) {\nfunc TestROMountPanics(t *testing.T) {\nconf := Config{ROMount: true, PanicOnWrite: true}\nrunCustom(t, allTypes, []Config{conf}, func(t *testing.T, s state) {\n+ if s.fileType != syscall.S_IFLNK {\n+ assertPanic(t, func() { s.file.Open(p9.WriteOnly) })\n+ }\nassertPanic(t, func() { s.file.Create(\"some_file\", p9.ReadWrite, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())) })\nassertPanic(t, func() { s.file.Mkdir(\"some_dir\", 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())) })\nassertPanic(t, func() { s.file.RenameAt(\"some_file\", s.file, \"other_file\") })\n@@ -741,3 +764,36 @@ func TestDoubleAttachError(t *testing.T) {\nt.Fatalf(\"Attach should have failed, got %v want non-nil\", err)\n}\n}\n+\n+func TestTruncate(t *testing.T) {\n+ runCustom(t, []uint32{syscall.S_IFDIR}, rwConfs, func(t *testing.T, s state) {\n+ child, err := createFile(s.file, \"test\")\n+ if err != nil {\n+ t.Fatalf(\"createFile() failed, err: %v\", err)\n+ }\n+ defer child.Close()\n+ want := []byte(\"foobar\")\n+ w, err := child.WriteAt(want, 0)\n+ if err != nil {\n+ t.Fatalf(\"Write() failed, err: %v\", err)\n+ }\n+ if w != len(want) {\n+ t.Fatalf(\"Write() was partial, got: %d, expected: %d\", w, len(want))\n+ }\n+\n+ _, l, err := s.file.Walk([]string{\"test\"})\n+ if err != nil {\n+ t.Fatalf(\"Walk(%s) failed, err: %v\", \"test\", err)\n+ }\n+ if _, _, _, err := l.Open(p9.ReadOnly | p9.OpenTruncate); err != nil {\n+ t.Fatalf(\"Open() failed, err: %v\", err)\n+ }\n+ _, mask, attr, err := l.GetAttr(p9.AttrMask{Size: true})\n+ if !mask.Size {\n+ t.Fatalf(\"GetAttr() didn't return size: %+v\", mask)\n+ }\n+ if attr.Size != 0 {\n+ t.Fatalf(\"truncate didn't work, want: 0, got: %d\", attr.Size)\n+ }\n+ })\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Return EROFS if mount is read-only PiperOrigin-RevId: 327300635
259,885
18.08.2020 14:34:15
25,200
6405525b046bb82a39e3338e61e41690133c43c1
Avoid holding locks when opening files in VFS2. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -834,7 +834,14 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nvar ds *[]*dentry\nfs.renameMu.RLock()\n- defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)\n+ unlocked := false\n+ unlock := func() {\n+ if !unlocked {\n+ fs.renameMuRUnlockAndCheckCaching(ctx, &ds)\n+ unlocked = true\n+ }\n+ }\n+ defer unlock()\nstart := rp.Start().Impl().(*dentry)\nif !start.cachedMetadataAuthoritative() {\n@@ -851,7 +858,10 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nif mustCreate {\nreturn nil, syserror.EEXIST\n}\n- return start.openLocked(ctx, rp, &opts)\n+ start.IncRef()\n+ defer start.DecRef(ctx)\n+ unlock()\n+ return start.open(ctx, rp, &opts)\n}\nafterTrailingSymlink:\n@@ -901,11 +911,15 @@ afterTrailingSymlink:\nif rp.MustBeDir() && !child.isDir() {\nreturn nil, syserror.ENOTDIR\n}\n- return child.openLocked(ctx, rp, &opts)\n+ child.IncRef()\n+ defer child.DecRef(ctx)\n+ unlock()\n+ return child.open(ctx, rp, &opts)\n}\n-// Preconditions: fs.renameMu must be locked.\n-func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\n+// Preconditions: The caller must hold no locks (since opening pipes may block\n+// indefinitely).\n+func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\nats := vfs.AccessTypesForOpenFlags(opts)\nif err := d.checkPermissions(rp.Credentials(), ats); err != nil {\nreturn nil, err\n@@ -968,7 +982,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\nreturn nil, syserror.ENXIO\n}\nif d.fs.iopts.OpenSocketsByConnecting {\n- return d.connectSocketLocked(ctx, opts)\n+ return d.openSocketByConnecting(ctx, opts)\n}\ncase linux.S_IFIFO:\nif d.isSynthetic() {\n@@ -977,7 +991,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\n}\nif vfd == nil {\n- if vfd, err = d.openSpecialFileLocked(ctx, mnt, opts); err != nil {\n+ if vfd, err = d.openSpecialFile(ctx, mnt, opts); err != nil {\nreturn nil, err\n}\n}\n@@ -996,7 +1010,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\nreturn vfd, err\n}\n-func (d *dentry) connectSocketLocked(ctx context.Context, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\n+func (d *dentry) openSocketByConnecting(ctx context.Context, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\nif opts.Flags&linux.O_DIRECT != 0 {\nreturn nil, syserror.EINVAL\n}\n@@ -1016,7 +1030,7 @@ func (d *dentry) connectSocketLocked(ctx context.Context, opts *vfs.OpenOptions)\nreturn fd, nil\n}\n-func (d *dentry) openSpecialFileLocked(ctx context.Context, mnt *vfs.Mount, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\n+func (d *dentry) openSpecialFile(ctx context.Context, mnt *vfs.Mount, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\nats := vfs.AccessTypesForOpenFlags(opts)\nif opts.Flags&linux.O_DIRECT != 0 {\nreturn nil, syserror.EINVAL\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/kernfs/filesystem.go", "diff": "@@ -397,15 +397,21 @@ func (fs *Filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\n// Do not create new file.\nif opts.Flags&linux.O_CREAT == 0 {\nfs.mu.RLock()\n- defer fs.processDeferredDecRefs(ctx)\n- defer fs.mu.RUnlock()\nvfsd, inode, err := fs.walkExistingLocked(ctx, rp)\nif err != nil {\n+ fs.mu.RUnlock()\n+ fs.processDeferredDecRefs(ctx)\nreturn nil, err\n}\nif err := inode.CheckPermissions(ctx, rp.Credentials(), ats); err != nil {\n+ fs.mu.RUnlock()\n+ fs.processDeferredDecRefs(ctx)\nreturn nil, err\n}\n+ inode.IncRef()\n+ defer inode.DecRef(ctx)\n+ fs.mu.RUnlock()\n+ fs.processDeferredDecRefs(ctx)\nreturn inode.Open(ctx, rp, vfsd, opts)\n}\n@@ -414,7 +420,14 @@ func (fs *Filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nvfsd := rp.Start()\ninode := vfsd.Impl().(*Dentry).inode\nfs.mu.Lock()\n- defer fs.mu.Unlock()\n+ unlocked := false\n+ unlock := func() {\n+ if !unlocked {\n+ fs.mu.Unlock()\n+ unlocked = true\n+ }\n+ }\n+ defer unlock()\nif rp.Done() {\nif rp.MustBeDir() {\nreturn nil, syserror.EISDIR\n@@ -425,6 +438,9 @@ func (fs *Filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nif err := inode.CheckPermissions(ctx, rp.Credentials(), ats); err != nil {\nreturn nil, err\n}\n+ inode.IncRef()\n+ defer inode.DecRef(ctx)\n+ unlock()\nreturn inode.Open(ctx, rp, vfsd, opts)\n}\nafterTrailingSymlink:\n@@ -466,6 +482,9 @@ afterTrailingSymlink:\n}\nchild := childVFSD.Impl().(*Dentry)\nparentVFSD.Impl().(*Dentry).InsertChild(pc, child)\n+ child.inode.IncRef()\n+ defer child.inode.DecRef(ctx)\n+ unlock()\nreturn child.inode.Open(ctx, rp, childVFSD, opts)\n}\nif err != nil {\n@@ -499,6 +518,9 @@ afterTrailingSymlink:\nif err := child.inode.CheckPermissions(ctx, rp.Credentials(), ats); err != nil {\nreturn nil, err\n}\n+ child.inode.IncRef()\n+ defer child.inode.DecRef(ctx)\n+ unlock()\nreturn child.inode.Open(ctx, rp, &child.vfsd, opts)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "new_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "diff": "@@ -717,17 +717,33 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nfunc (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\nmayCreate := opts.Flags&linux.O_CREAT != 0\nmustCreate := opts.Flags&(linux.O_CREAT|linux.O_EXCL) == (linux.O_CREAT | linux.O_EXCL)\n+ mayWrite := vfs.AccessTypesForOpenFlags(&opts).MayWrite()\nvar ds *[]*dentry\nfs.renameMu.RLock()\n- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n+ unlocked := false\n+ unlock := func() {\n+ if !unlocked {\n+ fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n+ unlocked = true\n+ }\n+ }\n+ defer unlock()\nstart := rp.Start().Impl().(*dentry)\nif rp.Done() {\nif mustCreate {\nreturn nil, syserror.EEXIST\n}\n- return start.openLocked(ctx, rp, &opts)\n+ if mayWrite {\n+ if err := start.copyUpLocked(ctx); err != nil {\n+ return nil, err\n+ }\n+ }\n+ start.IncRef()\n+ defer start.DecRef(ctx)\n+ unlock()\n+ return start.openCopiedUp(ctx, rp, &opts)\n}\nafterTrailingSymlink:\n@@ -767,20 +783,24 @@ afterTrailingSymlink:\nstart = parent\ngoto afterTrailingSymlink\n}\n- return child.openLocked(ctx, rp, &opts)\n+ if mayWrite {\n+ if err := child.copyUpLocked(ctx); err != nil {\n+ return nil, err\n+ }\n+ }\n+ child.IncRef()\n+ defer child.DecRef(ctx)\n+ unlock()\n+ return child.openCopiedUp(ctx, rp, &opts)\n}\n-// Preconditions: fs.renameMu must be locked.\n-func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\n+// Preconditions: If vfs.AccessTypesForOpenFlags(opts).MayWrite(), then d has\n+// been copied up.\n+func (d *dentry) openCopiedUp(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\nats := vfs.AccessTypesForOpenFlags(opts)\nif err := d.checkPermissions(rp.Credentials(), ats); err != nil {\nreturn nil, err\n}\n- if ats.MayWrite() {\n- if err := d.copyUpLocked(ctx); err != nil {\n- return nil, err\n- }\n- }\nmnt := rp.Mount()\n// Directory FDs open FDs from each layer when directory entries are read,\n@@ -792,7 +812,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\nreturn nil, syserror.EISDIR\n}\n// Can't open directories writably.\n- if ats&vfs.MayWrite != 0 {\n+ if ats.MayWrite() {\nreturn nil, syserror.EISDIR\n}\nif opts.Flags&linux.O_DIRECT != 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -307,18 +307,28 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\n// don't need fs.mu for writing.\nif opts.Flags&linux.O_CREAT == 0 {\nfs.mu.RLock()\n- defer fs.mu.RUnlock()\nd, err := resolveLocked(ctx, rp)\nif err != nil {\n+ fs.mu.RUnlock()\nreturn nil, err\n}\n+ d.IncRef()\n+ defer d.DecRef(ctx)\n+ fs.mu.RUnlock()\nreturn d.open(ctx, rp, &opts, false /* afterCreate */)\n}\nmustCreate := opts.Flags&linux.O_EXCL != 0\nstart := rp.Start().Impl().(*dentry)\nfs.mu.Lock()\n- defer fs.mu.Unlock()\n+ unlocked := false\n+ unlock := func() {\n+ if !unlocked {\n+ fs.mu.Unlock()\n+ unlocked = true\n+ }\n+ }\n+ defer unlock()\nif rp.Done() {\n// Reject attempts to open mount root directory with O_CREAT.\nif rp.MustBeDir() {\n@@ -327,6 +337,9 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nif mustCreate {\nreturn nil, syserror.EEXIST\n}\n+ start.IncRef()\n+ defer start.DecRef(ctx)\n+ unlock()\nreturn start.open(ctx, rp, &opts, false /* afterCreate */)\n}\nafterTrailingSymlink:\n@@ -364,6 +377,7 @@ afterTrailingSymlink:\ncreds := rp.Credentials()\nchild := fs.newDentry(fs.newRegularFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode))\nparentDir.insertChildLocked(child, name)\n+ unlock()\nfd, err := child.open(ctx, rp, &opts, true)\nif err != nil {\nreturn nil, err\n@@ -392,9 +406,14 @@ afterTrailingSymlink:\nif rp.MustBeDir() && !child.inode.isDir() {\nreturn nil, syserror.ENOTDIR\n}\n+ child.IncRef()\n+ defer child.DecRef(ctx)\n+ unlock()\nreturn child.open(ctx, rp, &opts, false)\n}\n+// Preconditions: The caller must hold no locks (since opening pipes may block\n+// indefinitely).\nfunc (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions, afterCreate bool) (*vfs.FileDescription, error) {\nats := vfs.AccessTypesForOpenFlags(opts)\nif !afterCreate {\n" } ]
Go
Apache License 2.0
google/gvisor
Avoid holding locks when opening files in VFS2. Fixes #3243, #3521 PiperOrigin-RevId: 327308890
259,896
18.08.2020 15:57:48
25,200
4184a7d5f189cfac4a7c9d7a1f0197d074e74e9b
RACK: Create a new list for segments. RACK requires the segments to be in the order of their transmission or retransmission times. This cl creates a new list and moves the retransmitted segments to the end of the list.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/BUILD", "new_path": "pkg/tcpip/transport/tcp/BUILD", "diff": "@@ -11,7 +11,8 @@ go_template_instance(\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n\"Element\": \"*segment\",\n- \"Linker\": \"*segment\",\n+ \"ElementMapper\": \"segmentMapper\",\n+ \"Linker\": \"*segmentEntry\",\n},\n)\n@@ -27,6 +28,19 @@ go_template_instance(\n},\n)\n+go_template_instance(\n+ name = \"tcp_rack_segment_list\",\n+ out = \"tcp_rack_segment_list.go\",\n+ package = \"tcp\",\n+ prefix = \"rackSegment\",\n+ template = \"//pkg/ilist:generic_list\",\n+ types = {\n+ \"Element\": \"*segment\",\n+ \"ElementMapper\": \"rackSegmentMapper\",\n+ \"Linker\": \"*rackSegmentEntry\",\n+ },\n+)\n+\ngo_library(\nname = \"tcp\",\nsrcs = [\n@@ -55,6 +69,7 @@ go_library(\n\"snd.go\",\n\"snd_state.go\",\n\"tcp_endpoint_list.go\",\n+ \"tcp_rack_segment_list.go\",\n\"tcp_segment_list.go\",\n\"timer.go\",\n],\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -924,7 +924,18 @@ func (e *endpoint) handleWrite() *tcpip.Error {\nfirst := e.sndQueue.Front()\nif first != nil {\n+ lastSeg := e.snd.writeList.Back()\ne.snd.writeList.PushBackList(&e.sndQueue)\n+ if lastSeg == nil {\n+ lastSeg = e.snd.writeList.Front()\n+ } else {\n+ lastSeg = lastSeg.segEntry.Next()\n+ }\n+ // Add new segments to rcList, as rcList and writeList should\n+ // be consistent.\n+ for seg := lastSeg; seg != nil; seg = seg.segEntry.Next() {\n+ e.snd.rcList.PushBack(seg)\n+ }\ne.sndBufInQueue = 0\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -1428,7 +1428,7 @@ func (e *endpoint) Peek(vec [][]byte) (int64, tcpip.ControlMessages, *tcpip.Erro\nvec = append([][]byte(nil), vec...)\nvar num int64\n- for s := e.rcvList.Front(); s != nil; s = s.Next() {\n+ for s := e.rcvList.Front(); s != nil; s = s.segEntry.Next() {\nviews := s.data.Views()\nfor i := s.viewToDeliver; i < len(views); i++ {\n@@ -2249,7 +2249,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc\nif !handshake {\ne.segmentQueue.mu.Lock()\nfor _, l := range []segmentList{e.segmentQueue.list, e.sndQueue, e.snd.writeList} {\n- for s := l.Front(); s != nil; s = s.Next() {\n+ for s := l.Front(); s != nil; s = s.segEntry.Next() {\ns.id = e.ID\ns.route = r.Clone()\ne.sndWaker.Assert()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/segment.go", "new_path": "pkg/tcpip/transport/tcp/segment.go", "diff": "@@ -30,7 +30,8 @@ import (\n//\n// +stateify savable\ntype segment struct {\n- segmentEntry\n+ segEntry segmentEntry\n+ rackSegEntry rackSegmentEntry\nrefCnt int32\nid stack.TransportEndpointID `state:\"manual\"`\nroute stack.Route `state:\"manual\"`\n@@ -61,6 +62,16 @@ type segment struct {\nxmitCount uint32\n}\n+// segmentMapper is the ElementMapper for the writeList.\n+type segmentMapper struct{}\n+\n+func (segmentMapper) linkerFor(seg *segment) *segmentEntry { return &seg.segEntry }\n+\n+// rackSegmentMapper is the ElementMapper for the rcList.\n+type rackSegmentMapper struct{}\n+\n+func (rackSegmentMapper) linkerFor(seg *segment) *rackSegmentEntry { return &seg.rackSegEntry }\n+\nfunc newSegment(r *stack.Route, id stack.TransportEndpointID, pkt *stack.PacketBuffer) *segment {\ns := &segment{\nrefCnt: 1,\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -154,6 +154,7 @@ type sender struct {\nclosed bool\nwriteNext *segment\nwriteList segmentList\n+ rcList rackSegmentList\nresendTimer timer `state:\"nosave\"`\nresendWaker sleep.Waker `state:\"nosave\"`\n@@ -367,7 +368,7 @@ func (s *sender) updateMaxPayloadSize(mtu, count int) {\n// Rewind writeNext to the first segment exceeding the MTU. Do nothing\n// if it is already before such a packet.\n- for seg := s.writeList.Front(); seg != nil; seg = seg.Next() {\n+ for seg := s.writeList.Front(); seg != nil; seg = seg.segEntry.Next() {\nif seg == s.writeNext {\n// We got to writeNext before we could find a segment\n// exceeding the MTU.\n@@ -622,6 +623,7 @@ func (s *sender) splitSeg(seg *segment, size int) {\nnSeg.data.TrimFront(size)\nnSeg.sequenceNumber.UpdateForward(seqnum.Size(size))\ns.writeList.InsertAfter(seg, nSeg)\n+ s.rcList.InsertAfter(seg, nSeg)\n// The segment being split does not carry PUSH flag because it is\n// followed by the newly split segment.\n@@ -653,7 +655,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt\nvar s3 *segment\nvar s4 *segment\n// Step 1.\n- for seg := nextSegHint; seg != nil; seg = seg.Next() {\n+ for seg := nextSegHint; seg != nil; seg = seg.segEntry.Next() {\n// Stop iteration if we hit a segment that has never been\n// transmitted (i.e. either it has no assigned sequence number\n// or if it does have one, it's >= the next sequence number\n@@ -683,7 +685,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt\n// NextSeg():\n// (1.c) IsLost(S2) returns true.\nif s.ep.scoreboard.IsLost(segSeq) {\n- return seg, seg.Next(), false\n+ return seg, seg.segEntry.Next(), false\n}\n// NextSeg():\n@@ -697,7 +699,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt\n// SHOULD be returned.\nif s3 == nil {\ns3 = seg\n- hint = seg.Next()\n+ hint = seg.segEntry.Next()\n}\n}\n// NextSeg():\n@@ -731,7 +733,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt\n// range of one segment of up to SMSS octets of\n// previously unsent data starting with sequence number\n// HighData+1 MUST be returned.\"\n- for seg := s.writeNext; seg != nil; seg = seg.Next() {\n+ for seg := s.writeNext; seg != nil; seg = seg.segEntry.Next() {\nif s.isAssignedSequenceNumber(seg) && seg.sequenceNumber.LessThan(s.sndNxt) {\ncontinue\n}\n@@ -773,15 +775,16 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se\n// triggering bugs in poorly written DNS\n// implementations.\nvar nextTooBig bool\n- for seg.Next() != nil && seg.Next().data.Size() != 0 {\n- if seg.data.Size()+seg.Next().data.Size() > available {\n+ for seg.segEntry.Next() != nil && seg.segEntry.Next().data.Size() != 0 {\n+ if seg.data.Size()+seg.segEntry.Next().data.Size() > available {\nnextTooBig = true\nbreak\n}\n- seg.data.Append(seg.Next().data)\n+ seg.data.Append(seg.segEntry.Next().data)\n// Consume the segment that we just merged in.\n- s.writeList.Remove(seg.Next())\n+ s.writeList.Remove(seg.segEntry.Next())\n+ s.rcList.Remove(seg.rackSegEntry.Next())\n}\nif !nextTooBig && seg.data.Size() < available {\n// Segment is not full.\n@@ -948,7 +951,7 @@ func (s *sender) handleSACKRecovery(limit int, end seqnum.Value) (dataSent bool)\n}\ndataSent = true\ns.outstanding++\n- s.writeNext = nextSeg.Next()\n+ s.writeNext = nextSeg.segEntry.Next()\ncontinue\n}\n@@ -961,6 +964,7 @@ func (s *sender) handleSACKRecovery(limit int, end seqnum.Value) (dataSent bool)\n// transmitted in (C.1).\"\ns.outstanding++\ndataSent = true\n+\ns.sendSegment(nextSeg)\nsegEnd := nextSeg.sequenceNumber.Add(nextSeg.logicalLen())\n@@ -1035,7 +1039,7 @@ func (s *sender) sendData() {\nif s.fr.active && s.ep.sackPermitted {\ndataSent = s.handleSACKRecovery(s.maxPayloadSize, end)\n} else {\n- for seg := s.writeNext; seg != nil && s.outstanding < s.sndCwnd; seg = seg.Next() {\n+ for seg := s.writeNext; seg != nil && s.outstanding < s.sndCwnd; seg = seg.segEntry.Next() {\ncwndLimit := (s.sndCwnd - s.outstanding) * s.maxPayloadSize\nif cwndLimit < limit {\nlimit = cwndLimit\n@@ -1043,7 +1047,7 @@ func (s *sender) sendData() {\nif s.isAssignedSequenceNumber(seg) && s.ep.sackPermitted && s.ep.scoreboard.IsSACKED(seg.sackBlock()) {\n// Move writeNext along so that we don't try and scan data that\n// has already been SACKED.\n- s.writeNext = seg.Next()\n+ s.writeNext = seg.segEntry.Next()\ncontinue\n}\nif sent := s.maybeSendSegment(seg, limit, end); !sent {\n@@ -1051,7 +1055,7 @@ func (s *sender) sendData() {\n}\ndataSent = true\ns.outstanding += s.pCount(seg)\n- s.writeNext = seg.Next()\n+ s.writeNext = seg.segEntry.Next()\n}\n}\n@@ -1182,7 +1186,7 @@ func (s *sender) SetPipe() {\n}\npipe := 0\nsmss := seqnum.Size(s.ep.scoreboard.SMSS())\n- for s1 := s.writeList.Front(); s1 != nil && s1.data.Size() != 0 && s.isAssignedSequenceNumber(s1); s1 = s1.Next() {\n+ for s1 := s.writeList.Front(); s1 != nil && s1.data.Size() != 0 && s.isAssignedSequenceNumber(s1); s1 = s1.segEntry.Next() {\n// With GSO each segment can be much larger than SMSS. So check the segment\n// in SMSS sized ranges.\nsegEnd := s1.sequenceNumber.Add(seqnum.Size(s1.data.Size()))\n@@ -1384,7 +1388,7 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {\n}\nif s.writeNext == seg {\n- s.writeNext = seg.Next()\n+ s.writeNext = seg.segEntry.Next()\n}\n// Update the RACK fields if SACK is enabled.\n@@ -1393,6 +1397,7 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {\n}\ns.writeList.Remove(seg)\n+ s.rcList.Remove(seg)\n// if SACK is enabled then Only reduce outstanding if\n// the segment was not previously SACKED as these have\n@@ -1460,6 +1465,12 @@ func (s *sender) sendSegment(seg *segment) *tcpip.Error {\nif s.sndCwnd < s.sndSsthresh {\ns.ep.stack.Stats().TCP.SlowStartRetransmits.Increment()\n}\n+\n+ // Move the segment which has to be retransmitted to the end of the list, as\n+ // RACK requires the segments in the order of their transmission times.\n+ // See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-09#section-6.2\n+ // Step 5\n+ s.rcList.PushBack(seg)\n}\nseg.xmitTime = time.Now()\nseg.xmitCount++\n" } ]
Go
Apache License 2.0
google/gvisor
RACK: Create a new list for segments. RACK requires the segments to be in the order of their transmission or retransmission times. This cl creates a new list and moves the retransmitted segments to the end of the list. PiperOrigin-RevId: 327325153
259,975
18.08.2020 16:02:52
25,200
f6d24904829d71574821cce5515c5777e0837aff
Fix return value in shm_test for opensource Some machines return 128 + signal for failures. Accept that as a valid result.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/shm.cc", "new_path": "test/syscalls/linux/shm.cc", "diff": "@@ -29,6 +29,8 @@ namespace testing {\nnamespace {\nusing ::testing::_;\n+using ::testing::AnyOf;\n+using ::testing::Eq;\nconst uint64_t kAllocSize = kPageSize * 128ULL;\n@@ -394,7 +396,8 @@ TEST(ShmDeathTest, SegmentNotAccessibleAfterDetach) {\n};\nEXPECT_THAT(InForkedProcess(rest),\n- IsPosixErrorOkAndHolds(W_EXITCODE(0, SIGSEGV)));\n+ IsPosixErrorOkAndHolds(AnyOf(Eq(W_EXITCODE(0, SIGSEGV)),\n+ Eq(W_EXITCODE(0, 128 + SIGSEGV)))));\n}\nTEST(ShmTest, RequestingSegmentSmallerThanSHMMINFails) {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix return value in shm_test for opensource Some machines return 128 + signal for failures. Accept that as a valid result. PiperOrigin-RevId: 327326113
259,992
18.08.2020 18:50:24
25,200
afe7303c3cdcf04c9e4491004b3ef2d229bafc74
Add more information to panic when device ID don't match
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/device/device.go", "new_path": "pkg/sentry/device/device.go", "diff": "@@ -256,7 +256,7 @@ func (m *MultiDevice) Load(key MultiDeviceKey, value uint64) bool {\n}\nif k, exists := m.rcache[value]; exists && k != key {\n// Should never happen.\n- panic(\"MultiDevice's caches are inconsistent\")\n+ panic(fmt.Sprintf(\"MultiDevice's caches are inconsistent, current: %+v, previous: %+v\", key, k))\n}\n// Cache value at key.\n" } ]
Go
Apache License 2.0
google/gvisor
Add more information to panic when device ID don't match PiperOrigin-RevId: 327351357
259,992
18.08.2020 18:51:19
25,200
9da77d00fa682ff504c7ff549985f78b0574c79b
Don't set atime if mount is readonly Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/time.go", "new_path": "pkg/sentry/fsimpl/gofer/time.go", "diff": "@@ -38,7 +38,7 @@ func statxTimestampFromDentry(ns int64) linux.StatxTimestamp {\n// Preconditions: d.cachedMetadataAuthoritative() == true.\nfunc (d *dentry) touchAtime(mnt *vfs.Mount) {\n- if mnt.Flags.NoATime {\n+ if mnt.Flags.NoATime || mnt.ReadOnly() {\nreturn\n}\nif err := mnt.CheckBeginWrite(); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/mount.go", "new_path": "pkg/sentry/vfs/mount.go", "diff": "@@ -114,7 +114,7 @@ func (mnt *Mount) Options() MountOptions {\ndefer mnt.vfs.mountMu.Unlock()\nreturn MountOptions{\nFlags: mnt.Flags,\n- ReadOnly: mnt.readOnly(),\n+ ReadOnly: mnt.ReadOnly(),\n}\n}\n@@ -688,7 +688,8 @@ func (mnt *Mount) setReadOnlyLocked(ro bool) error {\nreturn nil\n}\n-func (mnt *Mount) readOnly() bool {\n+// ReadOnly returns true if mount is readonly.\n+func (mnt *Mount) ReadOnly() bool {\nreturn atomic.LoadInt64(&mnt.writers) < 0\n}\n@@ -756,7 +757,7 @@ func (vfs *VirtualFilesystem) GenerateProcMounts(ctx context.Context, taskRootDi\n}\nopts := \"rw\"\n- if mnt.readOnly() {\n+ if mnt.ReadOnly() {\nopts = \"ro\"\n}\nif mnt.Flags.NoATime {\n@@ -844,7 +845,7 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\n// (6) Mount options.\nopts := \"rw\"\n- if mnt.readOnly() {\n+ if mnt.ReadOnly() {\nopts = \"ro\"\n}\nif mnt.Flags.NoATime {\n@@ -883,7 +884,7 @@ func superBlockOpts(mountPath string, mnt *Mount) string {\n// gVisor doesn't (yet) have a concept of super block options, so we\n// use the ro/rw bit from the mount flag.\nopts := \"rw\"\n- if mnt.readOnly() {\n+ if mnt.ReadOnly() {\nopts = \"ro\"\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Don't set atime if mount is readonly Updates #1035 PiperOrigin-RevId: 327351475
259,860
18.08.2020 21:52:08
25,200
e5f05d9bf4cfad75c295abfe2f994688a8863102
Get rid of kernfs.Inode.Destroy. This interface method is unneeded.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -432,17 +432,14 @@ func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre\n// DecRef implements kernfs.Inode.\nfunc (i *inode) DecRef(ctx context.Context) {\n- i.AtomicRefCount.DecRefWithDestructor(ctx, i.Destroy)\n-}\n-\n-// Destroy implements kernfs.Inode.\n-func (i *inode) Destroy(context.Context) {\n+ i.AtomicRefCount.DecRefWithDestructor(ctx, func(context.Context) {\nif i.wouldBlock {\nfdnotifier.RemoveFD(int32(i.hostFD))\n}\nif err := unix.Close(i.hostFD); err != nil {\nlog.Warningf(\"failed to close host fd %d: %v\", i.hostFD, err)\n}\n+ })\n}\n// Open implements kernfs.Inode.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go", "new_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go", "diff": "@@ -48,10 +48,6 @@ func (InodeNoopRefCount) TryIncRef() bool {\nreturn true\n}\n-// Destroy implements Inode.Destroy.\n-func (InodeNoopRefCount) Destroy(context.Context) {\n-}\n-\n// InodeDirectoryNoNewChildren partially implements the Inode interface.\n// InodeDirectoryNoNewChildren represents a directory inode which does not\n// support creation of new children.\n@@ -367,15 +363,12 @@ func (o *OrderedChildren) Init(opts OrderedChildrenOptions) {\n// DecRef implements Inode.DecRef.\nfunc (o *OrderedChildren) DecRef(ctx context.Context) {\n- o.AtomicRefCount.DecRefWithDestructor(ctx, o.Destroy)\n-}\n-\n-// Destroy cleans up resources referenced by this OrderedChildren.\n-func (o *OrderedChildren) Destroy(context.Context) {\n+ o.AtomicRefCount.DecRefWithDestructor(ctx, func(context.Context) {\no.mu.Lock()\ndefer o.mu.Unlock()\no.order.Reset()\no.set = nil\n+ })\n}\n// Populate inserts children into this OrderedChildren, and d's dentry\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "new_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "diff": "@@ -328,10 +328,6 @@ type inodeRefs interface {\nIncRef()\nDecRef(ctx context.Context)\nTryIncRef() bool\n- // Destroy is called when the inode reaches zero references. Destroy release\n- // all resources (references) on objects referenced by the inode, including\n- // any child dentries.\n- Destroy(ctx context.Context)\n}\ntype inodeMetadata interface {\n" } ]
Go
Apache License 2.0
google/gvisor
Get rid of kernfs.Inode.Destroy. This interface method is unneeded. PiperOrigin-RevId: 327370325
259,907
18.08.2020 21:55:16
25,200
35dc7fe7e78faab35b55eaa6f82360cc8b23f3b3
[vfs] Allow offsets for special files other than regular files. Some character and block devices can be seekable. So allow their FD to maintain file offset.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/special_file.go", "new_path": "pkg/sentry/fsimpl/gofer/special_file.go", "diff": "@@ -39,8 +39,14 @@ type specialFileFD struct {\n// handle is used for file I/O. handle is immutable.\nhandle handle\n+ // isRegularFile is true if this FD represents a regular file which is only\n+ // possible when filesystemOptions.regularFilesUseSpecialFileFD is in\n+ // effect. isRegularFile is immutable.\n+ isRegularFile bool\n+\n// seekable is true if this file description represents a file for which\n- // file offset is significant, i.e. a regular file. seekable is immutable.\n+ // file offset is significant, i.e. a regular file, character device or\n+ // block device. seekable is immutable.\nseekable bool\n// haveQueue is true if this file description represents a file for which\n@@ -55,10 +61,11 @@ type specialFileFD struct {\nfunc newSpecialFileFD(h handle, mnt *vfs.Mount, d *dentry, locks *vfs.FileLocks, flags uint32) (*specialFileFD, error) {\nftype := d.fileType()\n- seekable := ftype == linux.S_IFREG\n+ seekable := ftype == linux.S_IFREG || ftype == linux.S_IFCHR || ftype == linux.S_IFBLK\nhaveQueue := (ftype == linux.S_IFIFO || ftype == linux.S_IFSOCK) && h.fd >= 0\nfd := &specialFileFD{\nhandle: h,\n+ isRegularFile: ftype == linux.S_IFREG,\nseekable: seekable,\nhaveQueue: haveQueue,\n}\n@@ -200,13 +207,13 @@ func (fd *specialFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off\n// If the regular file fd was opened with O_APPEND, make sure the file size\n// is updated. There is a possible race here if size is modified externally\n// after metadata cache is updated.\n- if fd.seekable && fd.vfsfd.StatusFlags()&linux.O_APPEND != 0 && !d.cachedMetadataAuthoritative() {\n+ if fd.isRegularFile && fd.vfsfd.StatusFlags()&linux.O_APPEND != 0 && !d.cachedMetadataAuthoritative() {\nif err := d.updateFromGetattr(ctx); err != nil {\nreturn 0, offset, err\n}\n}\n- if fd.seekable {\n+ if fd.isRegularFile {\n// We need to hold the metadataMu *while* writing to a regular file.\nd.metadataMu.Lock()\ndefer d.metadataMu.Unlock()\n@@ -236,18 +243,20 @@ func (fd *specialFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off\nif err == syserror.EAGAIN {\nerr = syserror.ErrWouldBlock\n}\n- finalOff = offset\n+ // Update offset if the offset is valid.\n+ if offset >= 0 {\n+ offset += int64(n)\n+ }\n// Update file size for regular files.\n- if fd.seekable {\n- finalOff += int64(n)\n+ if fd.isRegularFile {\n// d.metadataMu is already locked at this point.\n- if uint64(finalOff) > d.size {\n+ if uint64(offset) > d.size {\nd.dataMu.Lock()\ndefer d.dataMu.Unlock()\n- atomic.StoreUint64(&d.size, uint64(finalOff))\n+ atomic.StoreUint64(&d.size, uint64(offset))\n}\n}\n- return int64(n), finalOff, err\n+ return int64(n), offset, err\n}\n// Write implements vfs.FileDescriptionImpl.Write.\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Allow offsets for special files other than regular files. Some character and block devices can be seekable. So allow their FD to maintain file offset. PiperOrigin-RevId: 327370684
259,860
19.08.2020 08:50:59
25,200
33c60b893fe8a0f039c781091bf96cbcd47ecc2d
Return appropriate errors when file locking is unsuccessful. test_eintr now passes in the Python runtime tests. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/lock.go", "new_path": "pkg/sentry/vfs/lock.go", "diff": "@@ -46,8 +46,14 @@ func (fl *FileLocks) LockBSD(uid fslock.UniqueID, t fslock.LockType, block fsloc\nif fl.bsd.LockRegion(uid, t, fslock.LockRange{0, fslock.LockEOF}, block) {\nreturn nil\n}\n+\n+ // Return an appropriate error for the unsuccessful lock attempt, depending on\n+ // whether this is a blocking or non-blocking operation.\n+ if block == nil {\nreturn syserror.ErrWouldBlock\n}\n+ return syserror.ERESTARTSYS\n+}\n// UnlockBSD releases a BSD-style lock on the entire file.\n//\n@@ -66,8 +72,14 @@ func (fl *FileLocks) LockPOSIX(ctx context.Context, fd *FileDescription, uid fsl\nif fl.posix.LockRegion(uid, t, rng, block) {\nreturn nil\n}\n+\n+ // Return an appropriate error for the unsuccessful lock attempt, depending on\n+ // whether this is a blocking or non-blocking operation.\n+ if block == nil {\nreturn syserror.ErrWouldBlock\n}\n+ return syserror.ERESTARTSYS\n+}\n// UnlockPOSIX releases a POSIX-style lock on a file region.\n//\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/flock.cc", "new_path": "test/syscalls/linux/flock.cc", "diff": "@@ -185,7 +185,7 @@ TEST_F(FlockTest, TestMultipleHolderSharedExclusive) {\nASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));\n}\n-TEST_F(FlockTest, TestSharedLockFailExclusiveHolder) {\n+TEST_F(FlockTest, TestSharedLockFailExclusiveHolderNonblocking) {\n// This test will verify that a shared lock is denied while\n// someone holds an exclusive lock.\nASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),\n@@ -203,7 +203,33 @@ TEST_F(FlockTest, TestSharedLockFailExclusiveHolder) {\nASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));\n}\n-TEST_F(FlockTest, TestExclusiveLockFailExclusiveHolder) {\n+void trivial_handler(int signum) {}\n+\n+TEST_F(FlockTest, TestSharedLockFailExclusiveHolderBlocking_NoRandomSave) {\n+ const DisableSave ds; // Timing-related.\n+\n+ // This test will verify that a shared lock is denied while\n+ // someone holds an exclusive lock.\n+ ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),\n+ SyscallSucceedsWithValue(0));\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));\n+\n+ // Register a signal handler for SIGALRM and set an alarm that will go off\n+ // while blocking in the subsequent flock() call. This will interrupt flock()\n+ // and cause it to return EINTR.\n+ struct sigaction act = {};\n+ act.sa_handler = trivial_handler;\n+ ASSERT_THAT(sigaction(SIGALRM, &act, NULL), SyscallSucceeds());\n+ ASSERT_THAT(ualarm(10000, 0), SyscallSucceeds());\n+ ASSERT_THAT(flock(fd.get(), LOCK_SH), SyscallFailsWithErrno(EINTR));\n+\n+ // Unlock\n+ ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));\n+}\n+\n+TEST_F(FlockTest, TestExclusiveLockFailExclusiveHolderNonblocking) {\n// This test will verify that an exclusive lock is denied while\n// someone already holds an exclsuive lock.\nASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),\n@@ -221,6 +247,30 @@ TEST_F(FlockTest, TestExclusiveLockFailExclusiveHolder) {\nASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));\n}\n+TEST_F(FlockTest, TestExclusiveLockFailExclusiveHolderBlocking_NoRandomSave) {\n+ const DisableSave ds; // Timing-related.\n+\n+ // This test will verify that an exclusive lock is denied while\n+ // someone already holds an exclsuive lock.\n+ ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),\n+ SyscallSucceedsWithValue(0));\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));\n+\n+ // Register a signal handler for SIGALRM and set an alarm that will go off\n+ // while blocking in the subsequent flock() call. This will interrupt flock()\n+ // and cause it to return EINTR.\n+ struct sigaction act = {};\n+ act.sa_handler = trivial_handler;\n+ ASSERT_THAT(sigaction(SIGALRM, &act, NULL), SyscallSucceeds());\n+ ASSERT_THAT(ualarm(10000, 0), SyscallSucceeds());\n+ ASSERT_THAT(flock(fd.get(), LOCK_EX), SyscallFailsWithErrno(EINTR));\n+\n+ // Unlock\n+ ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));\n+}\n+\nTEST_F(FlockTest, TestMultipleHolderSharedExclusiveUpgrade) {\n// This test will verify that we cannot obtain an exclusive lock while\n// a shared lock is held by another descriptor, then verify that an upgrade\n" } ]
Go
Apache License 2.0
google/gvisor
Return appropriate errors when file locking is unsuccessful. test_eintr now passes in the Python runtime tests. Updates #3515. PiperOrigin-RevId: 327441081
259,975
19.08.2020 09:48:55
25,200
33317222917d805edd71f0d2ddd7a208bd57fc4c
Skip ECN test for native/linux tests. Skip check for ECN bits in native/linux tests. General advice for the ECN field is to leave the setting to the kernel, so behavior of the test is undefined for different kernels.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_unbound.cc", "new_path": "test/syscalls/linux/socket_ip_unbound.cc", "diff": "@@ -217,6 +217,8 @@ TEST_P(IPUnboundSocketTest, InvalidLargeTOS) {\n}\nTEST_P(IPUnboundSocketTest, CheckSkipECN) {\n+ // Test is inconsistant on different kernels.\n+ SKIP_IF(!IsRunningOnGvisor());\nauto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\nint set = 0xFF;\nsocklen_t set_sz = sizeof(set);\n" } ]
Go
Apache License 2.0
google/gvisor
Skip ECN test for native/linux tests. Skip check for ECN bits in native/linux tests. General advice for the ECN field is to leave the setting to the kernel, so behavior of the test is undefined for different kernels. http://www.masterraghu.com/subjects/np/introduction/unix_network_programming_v1.3/ch07lev1sec6.html PiperOrigin-RevId: 327451414
259,992
19.08.2020 11:43:24
25,200
41777e90ea6a48eadbfdedc71029a634e1a23b5f
Change runtimeoptions proto handling. Stolen from cl/327337408 (ascannell is OOO)
[ { "change_type": "MODIFY", "old_path": "pkg/shim/v2/runtimeoptions/BUILD", "new_path": "pkg/shim/v2/runtimeoptions/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"go_library\", \"proto_library\")\n+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\", \"proto_library\")\npackage(licenses = [\"notice\"])\n@@ -14,7 +14,19 @@ go_library(\nsrcs = [\"runtimeoptions.go\"],\nvisibility = [\"//pkg/shim/v2:__pkg__\"],\ndeps = [\n- \"//pkg/shim/v2/runtimeoptions:api_go_proto\",\n+ \":api_go_proto\",\n\"@com_github_gogo_protobuf//proto:go_default_library\",\n],\n)\n+\n+go_test(\n+ name = \"runtimeoptions_test\",\n+ size = \"small\",\n+ srcs = [\"runtimeoptions_test.go\"],\n+ library = \":runtimeoptions\",\n+ deps = [\n+ \"@com_github_containerd_containerd//runtime/v1/shim/v1:go_default_library\",\n+ \"@com_github_containerd_typeurl//:go_default_library\",\n+ \"@com_github_golang_protobuf//proto:go_default_library\",\n+ ],\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/v2/runtimeoptions/runtimeoptions.go", "new_path": "pkg/shim/v2/runtimeoptions/runtimeoptions.go", "diff": "@@ -23,5 +23,8 @@ import (\ntype Options = pb.Options\nfunc init() {\n+ // The generated proto file auto registers with \"golang/protobuf/proto\"\n+ // package. However, typeurl uses \"golang/gogo/protobuf/proto\". So registers\n+ // the type there too.\nproto.RegisterType((*Options)(nil), \"cri.runtimeoptions.v1.Options\")\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/v2/runtimeoptions/runtimeoptions.proto", "new_path": "pkg/shim/v2/runtimeoptions/runtimeoptions.proto", "diff": "syntax = \"proto3\";\n-package runtimeoptions;\n+package cri.runtimeoptions.v1;\n// This is a version of the runtimeoptions CRI API that is vendored.\n//\n-// Imported the full CRI package is a nightmare.\n+// Importing the full CRI package is a nightmare.\nmessage Options {\nstring type_url = 1;\nstring config_path = 2;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/shim/v2/runtimeoptions/runtimeoptions_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// https://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package runtimeoptions\n+\n+import (\n+ \"testing\"\n+\n+ shim \"github.com/containerd/containerd/runtime/v1/shim/v1\"\n+ \"github.com/containerd/typeurl\"\n+ \"github.com/golang/protobuf/proto\"\n+)\n+\n+func TestCreateTaskRequest(t *testing.T) {\n+ // Serialize the top-level message.\n+ const encodedText = `options: <\n+ type_url: \"cri.runtimeoptions.v1.Options\"\n+ value: \"\\n\\010type_url\\022\\013config_path\"\n+>`\n+ got := &shim.CreateTaskRequest{} // Should have raw options.\n+ if err := proto.UnmarshalText(encodedText, got); err != nil {\n+ t.Fatalf(\"unable to unmarshal text: %v\", err)\n+ }\n+ t.Logf(\"got: %s\", proto.MarshalTextString(got))\n+\n+ // Check the options.\n+ wantOptions := &Options{}\n+ wantOptions.TypeUrl = \"type_url\"\n+ wantOptions.ConfigPath = \"config_path\"\n+ gotMessage, err := typeurl.UnmarshalAny(got.Options)\n+ if err != nil {\n+ t.Fatalf(\"unable to unmarshal any: %v\", err)\n+ }\n+ gotOptions, ok := gotMessage.(*Options)\n+ if !ok {\n+ t.Fatalf(\"got %v, want %v\", gotMessage, wantOptions)\n+ }\n+ if !proto.Equal(gotOptions, wantOptions) {\n+ t.Fatalf(\"got %v, want %v\", gotOptions, wantOptions)\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Change runtimeoptions proto handling. Stolen from cl/327337408 (ascannell is OOO) PiperOrigin-RevId: 327475423
259,891
19.08.2020 11:55:21
25,200
5cf330106a0169856e87abe9fec9cfa8c0b3fc1a
ip6tables: test initial state Tests that we have the correct initial (empty) state for ip6tables.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -250,6 +250,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:iptables_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:ip6tables_test\",\n+)\n+\nsyscall_test(\nsize = \"large\",\nshard_count = 5,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1029,6 +1029,24 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"ip6tables_test\",\n+ testonly = 1,\n+ srcs = [\n+ \"ip6tables.cc\",\n+ ],\n+ linkstatic = 1,\n+ deps = [\n+ \":iptables_types\",\n+ \":socket_test_util\",\n+ \"//test/util:capability_util\",\n+ \"//test/util:file_descriptor\",\n+ gtest,\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"itimer_test\",\ntestonly = 1,\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/syscalls/linux/ip6tables.cc", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <linux/capability.h>\n+#include <sys/socket.h>\n+\n+#include \"gtest/gtest.h\"\n+#include \"test/syscalls/linux/iptables.h\"\n+#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/file_descriptor.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+constexpr char kNatTablename[] = \"nat\";\n+constexpr char kErrorTarget[] = \"ERROR\";\n+constexpr size_t kEmptyStandardEntrySize =\n+ sizeof(struct ip6t_entry) + sizeof(struct xt_standard_target);\n+constexpr size_t kEmptyErrorEntrySize =\n+ sizeof(struct ip6t_entry) + sizeof(struct xt_error_target);\n+\n+// This tests the initial state of a machine with empty ip6tables via\n+// getsockopt(IP6T_SO_GET_INFO). We don't have a guarantee that the iptables are\n+// empty when running in native, but we can test that gVisor has the same\n+// initial state that a newly-booted Linux machine would have.\n+TEST(IP6TablesTest, InitialInfo) {\n+ // TODO(gvisor.dev/issue/3549): Enable for ip6tables.\n+ SKIP_IF(true);\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ FileDescriptor sock =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_RAW));\n+\n+ // Get info via sockopt.\n+ struct ipt_getinfo info = {};\n+ snprintf(info.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\n+ socklen_t info_size = sizeof(info);\n+ ASSERT_THAT(\n+ getsockopt(sock.get(), SOL_IPV6, IP6T_SO_GET_INFO, &info, &info_size),\n+ SyscallSucceeds());\n+\n+ // The nat table supports PREROUTING, and OUTPUT.\n+ unsigned int valid_hooks =\n+ (1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_OUT) |\n+ (1 << NF_IP6_POST_ROUTING) | (1 << NF_IP6_LOCAL_IN);\n+ EXPECT_EQ(info.valid_hooks, valid_hooks);\n+\n+ // Each chain consists of an empty entry with a standard target..\n+ EXPECT_EQ(info.hook_entry[NF_IP6_PRE_ROUTING], 0);\n+ EXPECT_EQ(info.hook_entry[NF_IP6_LOCAL_IN], kEmptyStandardEntrySize);\n+ EXPECT_EQ(info.hook_entry[NF_IP6_LOCAL_OUT], kEmptyStandardEntrySize * 2);\n+ EXPECT_EQ(info.hook_entry[NF_IP6_POST_ROUTING], kEmptyStandardEntrySize * 3);\n+\n+ // The underflow points are the same as the entry points.\n+ EXPECT_EQ(info.underflow[NF_IP6_PRE_ROUTING], 0);\n+ EXPECT_EQ(info.underflow[NF_IP6_LOCAL_IN], kEmptyStandardEntrySize);\n+ EXPECT_EQ(info.underflow[NF_IP6_LOCAL_OUT], kEmptyStandardEntrySize * 2);\n+ EXPECT_EQ(info.underflow[NF_IP6_POST_ROUTING], kEmptyStandardEntrySize * 3);\n+\n+ // One entry for each chain, plus an error entry at the end.\n+ EXPECT_EQ(info.num_entries, 5);\n+\n+ EXPECT_EQ(info.size, 4 * kEmptyStandardEntrySize + kEmptyErrorEntrySize);\n+ EXPECT_EQ(strcmp(info.name, kNatTablename), 0);\n+}\n+\n+// This tests the initial state of a machine with empty ip6tables via\n+// getsockopt(IP6T_SO_GET_ENTRIES). We don't have a guarantee that the iptables\n+// are empty when running in native, but we can test that gVisor has the same\n+// initial state that a newly-booted Linux machine would have.\n+TEST(IP6TablesTest, InitialEntries) {\n+ // TODO(gvisor.dev/issue/3549): Enable for ip6tables.\n+ SKIP_IF(true);\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ FileDescriptor sock =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_RAW));\n+\n+ // Get info via sockopt.\n+ struct ipt_getinfo info = {};\n+ snprintf(info.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\n+ socklen_t info_size = sizeof(info);\n+ ASSERT_THAT(\n+ getsockopt(sock.get(), SOL_IPV6, IP6T_SO_GET_INFO, &info, &info_size),\n+ SyscallSucceeds());\n+\n+ // Use info to get entries.\n+ socklen_t entries_size = sizeof(struct ip6t_get_entries) + info.size;\n+ struct ip6t_get_entries* entries =\n+ static_cast<struct ip6t_get_entries*>(malloc(entries_size));\n+ snprintf(entries->name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\n+ entries->size = info.size;\n+ ASSERT_THAT(getsockopt(sock.get(), SOL_IPV6, IP6T_SO_GET_ENTRIES, entries,\n+ &entries_size),\n+ SyscallSucceeds());\n+\n+ // Verify the name and size.\n+ ASSERT_EQ(info.size, entries->size);\n+ ASSERT_EQ(strcmp(entries->name, kNatTablename), 0);\n+\n+ // Verify that the entrytable is 4 entries with accept targets and no matches\n+ // followed by a single error target.\n+ size_t entry_offset = 0;\n+ while (entry_offset < entries->size) {\n+ struct ip6t_entry* entry = reinterpret_cast<struct ip6t_entry*>(\n+ reinterpret_cast<char*>(entries->entrytable) + entry_offset);\n+\n+ // ipv6 should be zeroed.\n+ struct ip6t_ip6 zeroed = {};\n+ ASSERT_EQ(memcmp(static_cast<void*>(&zeroed),\n+ static_cast<void*>(&entry->ipv6), sizeof(zeroed)),\n+ 0);\n+\n+ // target_offset should be zero.\n+ EXPECT_EQ(entry->target_offset, sizeof(ip6t_entry));\n+\n+ if (entry_offset < kEmptyStandardEntrySize * 4) {\n+ // The first 4 entries are standard targets\n+ struct xt_standard_target* target =\n+ reinterpret_cast<struct xt_standard_target*>(entry->elems);\n+ EXPECT_EQ(entry->next_offset, kEmptyStandardEntrySize);\n+ EXPECT_EQ(target->target.u.user.target_size, sizeof(*target));\n+ EXPECT_EQ(strcmp(target->target.u.user.name, \"\"), 0);\n+ EXPECT_EQ(target->target.u.user.revision, 0);\n+ // This is what's returned for an accept verdict. I don't know why.\n+ EXPECT_EQ(target->verdict, -NF_ACCEPT - 1);\n+ } else {\n+ // The last entry is an error target\n+ struct xt_error_target* target =\n+ reinterpret_cast<struct xt_error_target*>(entry->elems);\n+ EXPECT_EQ(entry->next_offset, kEmptyErrorEntrySize);\n+ EXPECT_EQ(target->target.u.user.target_size, sizeof(*target));\n+ EXPECT_EQ(strcmp(target->target.u.user.name, kErrorTarget), 0);\n+ EXPECT_EQ(target->target.u.user.revision, 0);\n+ EXPECT_EQ(strcmp(target->errorname, kErrorTarget), 0);\n+ }\n+\n+ entry_offset += entry->next_offset;\n+ break;\n+ }\n+\n+ free(entries);\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/iptables.cc", "new_path": "test/syscalls/linux/iptables.cc", "diff": "@@ -67,7 +67,7 @@ TEST(IPTablesBasic, FailSockoptNonRaw) {\nstruct ipt_getinfo info = {};\nsnprintf(info.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\nsocklen_t info_size = sizeof(info);\n- EXPECT_THAT(getsockopt(sock, IPPROTO_IP, SO_GET_INFO, &info, &info_size),\n+ EXPECT_THAT(getsockopt(sock, IPPROTO_IP, IPT_SO_GET_INFO, &info, &info_size),\nSyscallFailsWithErrno(ENOPROTOOPT));\nASSERT_THAT(close(sock), SyscallSucceeds());\n@@ -112,7 +112,7 @@ TEST_F(IPTablesTest, InitialState) {\nstruct ipt_getinfo info = {};\nsnprintf(info.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\nsocklen_t info_size = sizeof(info);\n- ASSERT_THAT(getsockopt(s_, IPPROTO_IP, SO_GET_INFO, &info, &info_size),\n+ ASSERT_THAT(getsockopt(s_, IPPROTO_IP, IPT_SO_GET_INFO, &info, &info_size),\nSyscallSucceeds());\n// The nat table supports PREROUTING, and OUTPUT.\n@@ -148,7 +148,7 @@ TEST_F(IPTablesTest, InitialState) {\nsnprintf(entries->name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\nentries->size = info.size;\nASSERT_THAT(\n- getsockopt(s_, IPPROTO_IP, SO_GET_ENTRIES, entries, &entries_size),\n+ getsockopt(s_, IPPROTO_IP, IPT_SO_GET_ENTRIES, entries, &entries_size),\nSyscallSucceeds());\n// Verify the name and size.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/iptables.h", "new_path": "test/syscalls/linux/iptables.h", "diff": "#include <linux/netfilter/x_tables.h>\n#include <linux/netfilter_ipv4.h>\n+#include <linux/netfilter_ipv6.h>\n#include <net/if.h>\n#include <netinet/ip.h>\n#include <stdint.h>\n+//\n+// IPv4 ABI.\n+//\n+\n#define ipt_standard_target xt_standard_target\n#define ipt_entry_target xt_entry_target\n#define ipt_error_target xt_error_target\nenum SockOpts {\n// For setsockopt.\n- BASE_CTL = 64,\n- SO_SET_REPLACE = BASE_CTL,\n- SO_SET_ADD_COUNTERS,\n- SO_SET_MAX = SO_SET_ADD_COUNTERS,\n+ IPT_BASE_CTL = 64,\n+ IPT_SO_SET_REPLACE = IPT_BASE_CTL,\n+ IPT_SO_SET_ADD_COUNTERS = IPT_BASE_CTL + 1,\n+ IPT_SO_SET_MAX = IPT_SO_SET_ADD_COUNTERS,\n// For getsockopt.\n- SO_GET_INFO = BASE_CTL,\n- SO_GET_ENTRIES,\n- SO_GET_REVISION_MATCH,\n- SO_GET_REVISION_TARGET,\n- SO_GET_MAX = SO_GET_REVISION_TARGET\n+ IPT_SO_GET_INFO = IPT_BASE_CTL,\n+ IPT_SO_GET_ENTRIES = IPT_BASE_CTL + 1,\n+ IPT_SO_GET_REVISION_MATCH = IPT_BASE_CTL + 2,\n+ IPT_SO_GET_REVISION_TARGET = IPT_BASE_CTL + 3,\n+ IPT_SO_GET_MAX = IPT_SO_GET_REVISION_TARGET\n};\n// ipt_ip specifies basic matching criteria that can be applied by examining\n@@ -115,7 +120,7 @@ struct ipt_entry {\nunsigned char elems[0];\n};\n-// Passed to getsockopt(SO_GET_INFO).\n+// Passed to getsockopt(IPT_SO_GET_INFO).\nstruct ipt_getinfo {\n// The name of the table. The user only fills this in, the rest is filled in\n// when returning from getsockopt. Currently \"nat\" and \"mangle\" are supported.\n@@ -127,7 +132,7 @@ struct ipt_getinfo {\nunsigned int valid_hooks;\n// The offset into the entry table for each valid hook. The entry table is\n- // returned by getsockopt(SO_GET_ENTRIES).\n+ // returned by getsockopt(IPT_SO_GET_ENTRIES).\nunsigned int hook_entry[NF_IP_NUMHOOKS];\n// For each valid hook, the underflow is the offset into the entry table to\n@@ -142,14 +147,14 @@ struct ipt_getinfo {\nunsigned int underflow[NF_IP_NUMHOOKS];\n// The number of entries in the entry table returned by\n- // getsockopt(SO_GET_ENTRIES).\n+ // getsockopt(IPT_SO_GET_ENTRIES).\nunsigned int num_entries;\n- // The size of the entry table returned by getsockopt(SO_GET_ENTRIES).\n+ // The size of the entry table returned by getsockopt(IPT_SO_GET_ENTRIES).\nunsigned int size;\n};\n-// Passed to getsockopt(SO_GET_ENTRIES).\n+// Passed to getsockopt(IPT_SO_GET_ENTRIES).\nstruct ipt_get_entries {\n// The name of the table. The user fills this in. Currently \"nat\" and \"mangle\"\n// are supported.\n@@ -195,4 +200,103 @@ struct ipt_replace {\nstruct ipt_entry entries[0];\n};\n+//\n+// IPv6 ABI.\n+//\n+\n+enum SockOpts6 {\n+ // For setsockopt.\n+ IP6T_BASE_CTL = 64,\n+ IP6T_SO_SET_REPLACE = IP6T_BASE_CTL,\n+ IP6T_SO_SET_ADD_COUNTERS = IP6T_BASE_CTL + 1,\n+ IP6T_SO_SET_MAX = IP6T_SO_SET_ADD_COUNTERS,\n+\n+ // For getsockopt.\n+ IP6T_SO_GET_INFO = IP6T_BASE_CTL,\n+ IP6T_SO_GET_ENTRIES = IP6T_BASE_CTL + 1,\n+ IP6T_SO_GET_REVISION_MATCH = IP6T_BASE_CTL + 4,\n+ IP6T_SO_GET_REVISION_TARGET = IP6T_BASE_CTL + 5,\n+ IP6T_SO_GET_MAX = IP6T_SO_GET_REVISION_TARGET\n+};\n+\n+// ip6t_ip6 specifies basic matching criteria that can be applied by examining\n+// only the IP header of a packet.\n+struct ip6t_ip6 {\n+ // Source IP address.\n+ struct in6_addr src;\n+\n+ // Destination IP address.\n+ struct in6_addr dst;\n+\n+ // Source IP address mask.\n+ struct in6_addr smsk;\n+\n+ // Destination IP address mask.\n+ struct in6_addr dmsk;\n+\n+ // Input interface.\n+ char iniface[IFNAMSIZ];\n+\n+ // Output interface.\n+ char outiface[IFNAMSIZ];\n+\n+ // Input interface mask.\n+ unsigned char iniface_mask[IFNAMSIZ];\n+\n+ // Output interface mask.\n+ unsigned char outiface_mask[IFNAMSIZ];\n+\n+ // Transport protocol.\n+ uint16_t proto;\n+\n+ // TOS.\n+ uint8_t tos;\n+\n+ // Flags.\n+ uint8_t flags;\n+\n+ // Inverse flags.\n+ uint8_t invflags;\n+};\n+\n+// ip6t_entry is an ip6tables rule.\n+struct ip6t_entry {\n+ // Basic matching information used to match a packet's IP header.\n+ struct ip6t_ip6 ipv6;\n+\n+ // A caching field that isn't used by userspace.\n+ unsigned int nfcache;\n+\n+ // The number of bytes between the start of this entry and the rule's target.\n+ uint16_t target_offset;\n+\n+ // The total size of this rule, from the beginning of the entry to the end of\n+ // the target.\n+ uint16_t next_offset;\n+\n+ // A return pointer not used by userspace.\n+ unsigned int comefrom;\n+\n+ // Counters for packets and bytes, which we don't yet implement.\n+ struct xt_counters counters;\n+\n+ // The data for all this rules matches followed by the target. This runs\n+ // beyond the value of sizeof(struct ip6t_entry).\n+ unsigned char elems[0];\n+};\n+\n+// Passed to getsockopt(IP6T_SO_GET_ENTRIES).\n+struct ip6t_get_entries {\n+ // The name of the table.\n+ char name[XT_TABLE_MAXNAMELEN];\n+\n+ // The size of the entry table in bytes. The user fills this in with the value\n+ // from struct ipt_getinfo.size.\n+ unsigned int size;\n+\n+ // The entries for the given table. This will run past the size defined by\n+ // sizeof(struct ip6t_get_entries).\n+ struct ip6t_entry entrytable[0];\n+};\n+\n#endif // GVISOR_TEST_SYSCALLS_IPTABLES_TYPES_H_\n" } ]
Go
Apache License 2.0
google/gvisor
ip6tables: test initial state Tests that we have the correct initial (empty) state for ip6tables. #3549 PiperOrigin-RevId: 327477657
259,891
19.08.2020 13:45:20
25,200
182f66ee5e6dc0206531a31f4d0d66cbb58e8a76
ip6tables: move ipv4-specific logic into its own file A later change will introduce the equivalent IPv6 logic.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/BUILD", "new_path": "pkg/sentry/socket/netfilter/BUILD", "diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"netfilter\",\nsrcs = [\n\"extensions.go\",\n+ \"ipv4.go\",\n\"netfilter.go\",\n\"owner_matcher.go\",\n\"targets.go\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/socket/netfilter/ipv4.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package netfilter\n+\n+import (\n+ \"bytes\"\n+ \"fmt\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/binary\"\n+ \"gvisor.dev/gvisor/pkg/syserr\"\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+)\n+\n+// emptyIPv4Filter is for comparison with a rule's filters to determine whether\n+// it is also empty. It is immutable.\n+var emptyIPv4Filter = stack.IPHeaderFilter{\n+ Dst: \"\\x00\\x00\\x00\\x00\",\n+ DstMask: \"\\x00\\x00\\x00\\x00\",\n+ Src: \"\\x00\\x00\\x00\\x00\",\n+ SrcMask: \"\\x00\\x00\\x00\\x00\",\n+}\n+\n+func getEntries4(table stack.Table, info *linux.IPTGetinfo) linux.KernelIPTGetEntries {\n+ var entries linux.KernelIPTGetEntries\n+ copy(entries.Name[:], info.Name[:])\n+\n+ for ruleIdx, rule := range table.Rules {\n+ nflog(\"convert to binary: current offset: %d\", entries.Size)\n+\n+ setHooksAndUnderflow(info, table, entries.Size, ruleIdx)\n+ // Each rule corresponds to an entry.\n+ entry := linux.KernelIPTEntry{\n+ Entry: linux.IPTEntry{\n+ IP: linux.IPTIP{\n+ Protocol: uint16(rule.Filter.Protocol),\n+ },\n+ NextOffset: linux.SizeOfIPTEntry,\n+ TargetOffset: linux.SizeOfIPTEntry,\n+ },\n+ }\n+ copy(entry.Entry.IP.Dst[:], rule.Filter.Dst)\n+ copy(entry.Entry.IP.DstMask[:], rule.Filter.DstMask)\n+ copy(entry.Entry.IP.Src[:], rule.Filter.Src)\n+ copy(entry.Entry.IP.SrcMask[:], rule.Filter.SrcMask)\n+ copy(entry.Entry.IP.OutputInterface[:], rule.Filter.OutputInterface)\n+ copy(entry.Entry.IP.OutputInterfaceMask[:], rule.Filter.OutputInterfaceMask)\n+ if rule.Filter.DstInvert {\n+ entry.Entry.IP.InverseFlags |= linux.IPT_INV_DSTIP\n+ }\n+ if rule.Filter.SrcInvert {\n+ entry.Entry.IP.InverseFlags |= linux.IPT_INV_SRCIP\n+ }\n+ if rule.Filter.OutputInterfaceInvert {\n+ entry.Entry.IP.InverseFlags |= linux.IPT_INV_VIA_OUT\n+ }\n+\n+ for _, matcher := range rule.Matchers {\n+ // Serialize the matcher and add it to the\n+ // entry.\n+ serialized := marshalMatcher(matcher)\n+ nflog(\"convert to binary: matcher serialized as: %v\", serialized)\n+ if len(serialized)%8 != 0 {\n+ panic(fmt.Sprintf(\"matcher %T is not 64-bit aligned\", matcher))\n+ }\n+ entry.Elems = append(entry.Elems, serialized...)\n+ entry.Entry.NextOffset += uint16(len(serialized))\n+ entry.Entry.TargetOffset += uint16(len(serialized))\n+ }\n+\n+ // Serialize and append the target.\n+ serialized := marshalTarget(rule.Target)\n+ if len(serialized)%8 != 0 {\n+ panic(fmt.Sprintf(\"target %T is not 64-bit aligned\", rule.Target))\n+ }\n+ entry.Elems = append(entry.Elems, serialized...)\n+ entry.Entry.NextOffset += uint16(len(serialized))\n+\n+ nflog(\"convert to binary: adding entry: %+v\", entry)\n+\n+ entries.Size += uint32(entry.Entry.NextOffset)\n+ entries.Entrytable = append(entries.Entrytable, entry)\n+ info.NumEntries++\n+ }\n+\n+ info.Size = entries.Size\n+ nflog(\"convert to binary: finished with an marshalled size of %d\", info.Size)\n+ return entries\n+}\n+\n+func modifyEntries4(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, table *stack.Table) (map[uint32]int, *syserr.Error) {\n+ nflog(\"set entries: setting entries in table %q\", replace.Name.String())\n+\n+ // Convert input into a list of rules and their offsets.\n+ var offset uint32\n+ // offsets maps rule byte offsets to their position in table.Rules.\n+ offsets := map[uint32]int{}\n+ for entryIdx := uint32(0); entryIdx < replace.NumEntries; entryIdx++ {\n+ nflog(\"set entries: processing entry at offset %d\", offset)\n+\n+ // Get the struct ipt_entry.\n+ if len(optVal) < linux.SizeOfIPTEntry {\n+ nflog(\"optVal has insufficient size for entry %d\", len(optVal))\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ var entry linux.IPTEntry\n+ buf := optVal[:linux.SizeOfIPTEntry]\n+ binary.Unmarshal(buf, usermem.ByteOrder, &entry)\n+ initialOptValLen := len(optVal)\n+ optVal = optVal[linux.SizeOfIPTEntry:]\n+\n+ if entry.TargetOffset < linux.SizeOfIPTEntry {\n+ nflog(\"entry has too-small target offset %d\", entry.TargetOffset)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ // TODO(gvisor.dev/issue/170): We should support more IPTIP\n+ // filtering fields.\n+ filter, err := filterFromIPTIP(entry.IP)\n+ if err != nil {\n+ nflog(\"bad iptip: %v\", err)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ // TODO(gvisor.dev/issue/170): Matchers and targets can specify\n+ // that they only work for certain protocols, hooks, tables.\n+ // Get matchers.\n+ matchersSize := entry.TargetOffset - linux.SizeOfIPTEntry\n+ if len(optVal) < int(matchersSize) {\n+ nflog(\"entry doesn't have enough room for its matchers (only %d bytes remain)\", len(optVal))\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ matchers, err := parseMatchers(filter, optVal[:matchersSize])\n+ if err != nil {\n+ nflog(\"failed to parse matchers: %v\", err)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ optVal = optVal[matchersSize:]\n+\n+ // Get the target of the rule.\n+ targetSize := entry.NextOffset - entry.TargetOffset\n+ if len(optVal) < int(targetSize) {\n+ nflog(\"entry doesn't have enough room for its target (only %d bytes remain)\", len(optVal))\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ target, err := parseTarget(filter, optVal[:targetSize])\n+ if err != nil {\n+ nflog(\"failed to parse target: %v\", err)\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ optVal = optVal[targetSize:]\n+\n+ table.Rules = append(table.Rules, stack.Rule{\n+ Filter: filter,\n+ Target: target,\n+ Matchers: matchers,\n+ })\n+ offsets[offset] = int(entryIdx)\n+ offset += uint32(entry.NextOffset)\n+\n+ if initialOptValLen-len(optVal) != int(entry.NextOffset) {\n+ nflog(\"entry NextOffset is %d, but entry took up %d bytes\", entry.NextOffset, initialOptValLen-len(optVal))\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ }\n+ return offsets, nil\n+}\n+\n+func filterFromIPTIP(iptip linux.IPTIP) (stack.IPHeaderFilter, error) {\n+ if containsUnsupportedFields4(iptip) {\n+ return stack.IPHeaderFilter{}, fmt.Errorf(\"unsupported fields in struct iptip: %+v\", iptip)\n+ }\n+ if len(iptip.Dst) != header.IPv4AddressSize || len(iptip.DstMask) != header.IPv4AddressSize {\n+ return stack.IPHeaderFilter{}, fmt.Errorf(\"incorrect length of destination (%d) and/or destination mask (%d) fields\", len(iptip.Dst), len(iptip.DstMask))\n+ }\n+ if len(iptip.Src) != header.IPv4AddressSize || len(iptip.SrcMask) != header.IPv4AddressSize {\n+ return stack.IPHeaderFilter{}, fmt.Errorf(\"incorrect length of source (%d) and/or source mask (%d) fields\", len(iptip.Src), len(iptip.SrcMask))\n+ }\n+\n+ n := bytes.IndexByte([]byte(iptip.OutputInterface[:]), 0)\n+ if n == -1 {\n+ n = len(iptip.OutputInterface)\n+ }\n+ ifname := string(iptip.OutputInterface[:n])\n+\n+ n = bytes.IndexByte([]byte(iptip.OutputInterfaceMask[:]), 0)\n+ if n == -1 {\n+ n = len(iptip.OutputInterfaceMask)\n+ }\n+ ifnameMask := string(iptip.OutputInterfaceMask[:n])\n+\n+ return stack.IPHeaderFilter{\n+ Protocol: tcpip.TransportProtocolNumber(iptip.Protocol),\n+ Dst: tcpip.Address(iptip.Dst[:]),\n+ DstMask: tcpip.Address(iptip.DstMask[:]),\n+ DstInvert: iptip.InverseFlags&linux.IPT_INV_DSTIP != 0,\n+ Src: tcpip.Address(iptip.Src[:]),\n+ SrcMask: tcpip.Address(iptip.SrcMask[:]),\n+ SrcInvert: iptip.InverseFlags&linux.IPT_INV_SRCIP != 0,\n+ OutputInterface: ifname,\n+ OutputInterfaceMask: ifnameMask,\n+ OutputInterfaceInvert: iptip.InverseFlags&linux.IPT_INV_VIA_OUT != 0,\n+ }, nil\n+}\n+\n+func containsUnsupportedFields4(iptip linux.IPTIP) bool {\n+ // The following features are supported:\n+ // - Protocol\n+ // - Dst and DstMask\n+ // - Src and SrcMask\n+ // - The inverse destination IP check flag\n+ // - OutputInterface, OutputInterfaceMask and its inverse.\n+ var emptyInterface = [linux.IFNAMSIZ]byte{}\n+ // Disable any supported inverse flags.\n+ inverseMask := uint8(linux.IPT_INV_DSTIP) | uint8(linux.IPT_INV_SRCIP) | uint8(linux.IPT_INV_VIA_OUT)\n+ return iptip.InputInterface != emptyInterface ||\n+ iptip.InputInterfaceMask != emptyInterface ||\n+ iptip.Flags != 0 ||\n+ iptip.InverseFlags&^inverseMask != 0\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/netfilter.go", "new_path": "pkg/sentry/socket/netfilter/netfilter.go", "diff": "package netfilter\nimport (\n- \"bytes\"\n\"errors\"\n\"fmt\"\n@@ -26,8 +25,6 @@ import (\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n- \"gvisor.dev/gvisor/pkg/tcpip\"\n- \"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n@@ -37,15 +34,6 @@ import (\n// developing iptables, but can pollute sentry logs otherwise.\nconst enableLogging = false\n-// emptyFilter is for comparison with a rule's filters to determine whether it\n-// is also empty. It is immutable.\n-var emptyFilter = stack.IPHeaderFilter{\n- Dst: \"\\x00\\x00\\x00\\x00\",\n- DstMask: \"\\x00\\x00\\x00\\x00\",\n- Src: \"\\x00\\x00\\x00\\x00\",\n- SrcMask: \"\\x00\\x00\\x00\\x00\",\n-}\n-\n// nflog logs messages related to the writing and reading of iptables.\nfunc nflog(format string, args ...interface{}) {\nif enableLogging && log.IsLogging(log.Debug) {\n@@ -71,9 +59,9 @@ func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr) (linux.IPT\nreturn info, nil\n}\n-// GetEntries returns netstack's iptables rules encoded for the iptables tool.\n-func GetEntries(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen int) (linux.KernelIPTGetEntries, *syserr.Error) {\n- // Read in the struct and table name.\n+// GetEntries4 returns netstack's iptables rules encoded for the iptables tool.\n+func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen int) (linux.KernelIPTGetEntries, *syserr.Error) {\n+ // Read in the ABI struct.\nvar userEntries linux.IPTGetEntries\nif _, err := userEntries.CopyIn(t, outPtr); err != nil {\nnflog(\"couldn't copy in entries %q\", userEntries.Name)\n@@ -99,108 +87,48 @@ func GetEntries(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen\n// format expected by the iptables tool. Linux stores each table as a binary\n// blob that can only be traversed by parsing a bit, reading some offsets,\n// jumping to those offsets, parsing again, etc.\n-func convertNetstackToBinary(stack *stack.Stack, tablename linux.TableName) (linux.KernelIPTGetEntries, linux.IPTGetinfo, error) {\n- table, ok := stack.IPTables().GetTable(tablename.String())\n+func convertNetstackToBinary(stk *stack.Stack, tablename linux.TableName) (linux.KernelIPTGetEntries, linux.IPTGetinfo, error) {\n+ // The table name has to fit in the struct.\n+ if linux.XT_TABLE_MAXNAMELEN < len(tablename) {\n+ return linux.KernelIPTGetEntries{}, linux.IPTGetinfo{}, fmt.Errorf(\"table name %q too long\", tablename)\n+ }\n+\n+ table, ok := stk.IPTables().GetTable(tablename.String())\nif !ok {\nreturn linux.KernelIPTGetEntries{}, linux.IPTGetinfo{}, fmt.Errorf(\"couldn't find table %q\", tablename)\n}\n- var entries linux.KernelIPTGetEntries\n+ // Setup the info struct.\nvar info linux.IPTGetinfo\ninfo.ValidHooks = table.ValidHooks()\n-\n- // The table name has to fit in the struct.\n- if linux.XT_TABLE_MAXNAMELEN < len(tablename) {\n- return linux.KernelIPTGetEntries{}, linux.IPTGetinfo{}, fmt.Errorf(\"table name %q too long\", tablename)\n- }\ncopy(info.Name[:], tablename[:])\n- copy(entries.Name[:], tablename[:])\n- for ruleIdx, rule := range table.Rules {\n- nflog(\"convert to binary: current offset: %d\", entries.Size)\n+ entries := getEntries4(table, &info)\n+ return entries, info, nil\n+}\n+// setHooksAndUnderflow checks whether the rule at ruleIdx is a hook entrypoint\n+// or underflow, in which case it fills in info.HookEntry and info.Underflows.\n+func setHooksAndUnderflow(info *linux.IPTGetinfo, table stack.Table, offset uint32, ruleIdx int) {\n// Is this a chain entry point?\nfor hook, hookRuleIdx := range table.BuiltinChains {\nif hookRuleIdx == ruleIdx {\n- nflog(\"convert to binary: found hook %d at offset %d\", hook, entries.Size)\n- info.HookEntry[hook] = entries.Size\n+ nflog(\"convert to binary: found hook %d at offset %d\", hook, offset)\n+ info.HookEntry[hook] = offset\n}\n}\n// Is this a chain underflow point?\nfor underflow, underflowRuleIdx := range table.Underflows {\nif underflowRuleIdx == ruleIdx {\n- nflog(\"convert to binary: found underflow %d at offset %d\", underflow, entries.Size)\n- info.Underflow[underflow] = entries.Size\n- }\n- }\n-\n- // Each rule corresponds to an entry.\n- entry := linux.KernelIPTEntry{\n- Entry: linux.IPTEntry{\n- IP: linux.IPTIP{\n- Protocol: uint16(rule.Filter.Protocol),\n- },\n- NextOffset: linux.SizeOfIPTEntry,\n- TargetOffset: linux.SizeOfIPTEntry,\n- },\n- }\n- copy(entry.Entry.IP.Dst[:], rule.Filter.Dst)\n- copy(entry.Entry.IP.DstMask[:], rule.Filter.DstMask)\n- copy(entry.Entry.IP.Src[:], rule.Filter.Src)\n- copy(entry.Entry.IP.SrcMask[:], rule.Filter.SrcMask)\n- copy(entry.Entry.IP.OutputInterface[:], rule.Filter.OutputInterface)\n- copy(entry.Entry.IP.OutputInterfaceMask[:], rule.Filter.OutputInterfaceMask)\n- if rule.Filter.DstInvert {\n- entry.Entry.IP.InverseFlags |= linux.IPT_INV_DSTIP\n- }\n- if rule.Filter.SrcInvert {\n- entry.Entry.IP.InverseFlags |= linux.IPT_INV_SRCIP\n- }\n- if rule.Filter.OutputInterfaceInvert {\n- entry.Entry.IP.InverseFlags |= linux.IPT_INV_VIA_OUT\n- }\n-\n- for _, matcher := range rule.Matchers {\n- // Serialize the matcher and add it to the\n- // entry.\n- serialized := marshalMatcher(matcher)\n- nflog(\"convert to binary: matcher serialized as: %v\", serialized)\n- if len(serialized)%8 != 0 {\n- panic(fmt.Sprintf(\"matcher %T is not 64-bit aligned\", matcher))\n- }\n- entry.Elems = append(entry.Elems, serialized...)\n- entry.Entry.NextOffset += uint16(len(serialized))\n- entry.Entry.TargetOffset += uint16(len(serialized))\n- }\n-\n- // Serialize and append the target.\n- serialized := marshalTarget(rule.Target)\n- if len(serialized)%8 != 0 {\n- panic(fmt.Sprintf(\"target %T is not 64-bit aligned\", rule.Target))\n+ nflog(\"convert to binary: found underflow %d at offset %d\", underflow, offset)\n+ info.Underflow[underflow] = offset\n}\n- entry.Elems = append(entry.Elems, serialized...)\n- entry.Entry.NextOffset += uint16(len(serialized))\n-\n- nflog(\"convert to binary: adding entry: %+v\", entry)\n-\n- entries.Size += uint32(entry.Entry.NextOffset)\n- entries.Entrytable = append(entries.Entrytable, entry)\n- info.NumEntries++\n}\n-\n- nflog(\"convert to binary: finished with an marshalled size of %d\", info.Size)\n- info.Size = entries.Size\n- return entries, info, nil\n}\n// SetEntries sets iptables rules for a single table. See\n// net/ipv4/netfilter/ip_tables.c:translate_table for reference.\nfunc SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n- // Get the basic rules data (struct ipt_replace).\n- if len(optVal) < linux.SizeOfIPTReplace {\n- nflog(\"optVal has insufficient size for replace %d\", len(optVal))\n- return syserr.ErrInvalidArgument\n- }\nvar replace linux.IPTReplace\nreplaceBuf := optVal[:linux.SizeOfIPTReplace]\noptVal = optVal[linux.SizeOfIPTReplace:]\n@@ -218,79 +146,9 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\nreturn syserr.ErrInvalidArgument\n}\n- nflog(\"set entries: setting entries in table %q\", replace.Name.String())\n-\n- // Convert input into a list of rules and their offsets.\n- var offset uint32\n- // offsets maps rule byte offsets to their position in table.Rules.\n- offsets := map[uint32]int{}\n- for entryIdx := uint32(0); entryIdx < replace.NumEntries; entryIdx++ {\n- nflog(\"set entries: processing entry at offset %d\", offset)\n-\n- // Get the struct ipt_entry.\n- if len(optVal) < linux.SizeOfIPTEntry {\n- nflog(\"optVal has insufficient size for entry %d\", len(optVal))\n- return syserr.ErrInvalidArgument\n- }\n- var entry linux.IPTEntry\n- buf := optVal[:linux.SizeOfIPTEntry]\n- binary.Unmarshal(buf, usermem.ByteOrder, &entry)\n- initialOptValLen := len(optVal)\n- optVal = optVal[linux.SizeOfIPTEntry:]\n-\n- if entry.TargetOffset < linux.SizeOfIPTEntry {\n- nflog(\"entry has too-small target offset %d\", entry.TargetOffset)\n- return syserr.ErrInvalidArgument\n- }\n-\n- // TODO(gvisor.dev/issue/170): We should support more IPTIP\n- // filtering fields.\n- filter, err := filterFromIPTIP(entry.IP)\n- if err != nil {\n- nflog(\"bad iptip: %v\", err)\n- return syserr.ErrInvalidArgument\n- }\n-\n- // TODO(gvisor.dev/issue/170): Matchers and targets can specify\n- // that they only work for certain protocols, hooks, tables.\n- // Get matchers.\n- matchersSize := entry.TargetOffset - linux.SizeOfIPTEntry\n- if len(optVal) < int(matchersSize) {\n- nflog(\"entry doesn't have enough room for its matchers (only %d bytes remain)\", len(optVal))\n- return syserr.ErrInvalidArgument\n- }\n- matchers, err := parseMatchers(filter, optVal[:matchersSize])\n+ offsets, err := modifyEntries4(stk, optVal, &replace, &table)\nif err != nil {\n- nflog(\"failed to parse matchers: %v\", err)\n- return syserr.ErrInvalidArgument\n- }\n- optVal = optVal[matchersSize:]\n-\n- // Get the target of the rule.\n- targetSize := entry.NextOffset - entry.TargetOffset\n- if len(optVal) < int(targetSize) {\n- nflog(\"entry doesn't have enough room for its target (only %d bytes remain)\", len(optVal))\n- return syserr.ErrInvalidArgument\n- }\n- target, err := parseTarget(filter, optVal[:targetSize])\n- if err != nil {\n- nflog(\"failed to parse target: %v\", err)\n- return syserr.ErrInvalidArgument\n- }\n- optVal = optVal[targetSize:]\n-\n- table.Rules = append(table.Rules, stack.Rule{\n- Filter: filter,\n- Target: target,\n- Matchers: matchers,\n- })\n- offsets[offset] = int(entryIdx)\n- offset += uint32(entry.NextOffset)\n-\n- if initialOptValLen-len(optVal) != int(entry.NextOffset) {\n- nflog(\"entry NextOffset is %d, but entry took up %d bytes\", entry.NextOffset, initialOptValLen-len(optVal))\n- return syserr.ErrInvalidArgument\n- }\n+ return err\n}\n// Go through the list of supported hooks for this table and, for each\n@@ -323,7 +181,7 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n}\n}\n- // Add the user chains.\n+ // Check the user chains.\nfor ruleIdx, rule := range table.Rules {\nif _, ok := rule.Target.(stack.UserChainTarget); !ok {\ncontinue\n@@ -404,7 +262,6 @@ func parseMatchers(filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher,\n// Check some invariants.\nif match.MatchSize < linux.SizeOfXTEntryMatch {\n-\nreturn nil, fmt.Errorf(\"match size is too small, must be at least %d\", linux.SizeOfXTEntryMatch)\n}\nif len(optVal) < int(match.MatchSize) {\n@@ -429,64 +286,11 @@ func parseMatchers(filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher,\nreturn matchers, nil\n}\n-func filterFromIPTIP(iptip linux.IPTIP) (stack.IPHeaderFilter, error) {\n- if containsUnsupportedFields(iptip) {\n- return stack.IPHeaderFilter{}, fmt.Errorf(\"unsupported fields in struct iptip: %+v\", iptip)\n- }\n- if len(iptip.Dst) != header.IPv4AddressSize || len(iptip.DstMask) != header.IPv4AddressSize {\n- return stack.IPHeaderFilter{}, fmt.Errorf(\"incorrect length of destination (%d) and/or destination mask (%d) fields\", len(iptip.Dst), len(iptip.DstMask))\n- }\n- if len(iptip.Src) != header.IPv4AddressSize || len(iptip.SrcMask) != header.IPv4AddressSize {\n- return stack.IPHeaderFilter{}, fmt.Errorf(\"incorrect length of source (%d) and/or source mask (%d) fields\", len(iptip.Src), len(iptip.SrcMask))\n- }\n-\n- n := bytes.IndexByte([]byte(iptip.OutputInterface[:]), 0)\n- if n == -1 {\n- n = len(iptip.OutputInterface)\n- }\n- ifname := string(iptip.OutputInterface[:n])\n-\n- n = bytes.IndexByte([]byte(iptip.OutputInterfaceMask[:]), 0)\n- if n == -1 {\n- n = len(iptip.OutputInterfaceMask)\n- }\n- ifnameMask := string(iptip.OutputInterfaceMask[:n])\n-\n- return stack.IPHeaderFilter{\n- Protocol: tcpip.TransportProtocolNumber(iptip.Protocol),\n- Dst: tcpip.Address(iptip.Dst[:]),\n- DstMask: tcpip.Address(iptip.DstMask[:]),\n- DstInvert: iptip.InverseFlags&linux.IPT_INV_DSTIP != 0,\n- Src: tcpip.Address(iptip.Src[:]),\n- SrcMask: tcpip.Address(iptip.SrcMask[:]),\n- SrcInvert: iptip.InverseFlags&linux.IPT_INV_SRCIP != 0,\n- OutputInterface: ifname,\n- OutputInterfaceMask: ifnameMask,\n- OutputInterfaceInvert: iptip.InverseFlags&linux.IPT_INV_VIA_OUT != 0,\n- }, nil\n-}\n-\n-func containsUnsupportedFields(iptip linux.IPTIP) bool {\n- // The following features are supported:\n- // - Protocol\n- // - Dst and DstMask\n- // - Src and SrcMask\n- // - The inverse destination IP check flag\n- // - OutputInterface, OutputInterfaceMask and its inverse.\n- var emptyInterface = [linux.IFNAMSIZ]byte{}\n- // Disable any supported inverse flags.\n- inverseMask := uint8(linux.IPT_INV_DSTIP) | uint8(linux.IPT_INV_SRCIP) | uint8(linux.IPT_INV_VIA_OUT)\n- return iptip.InputInterface != emptyInterface ||\n- iptip.InputInterfaceMask != emptyInterface ||\n- iptip.Flags != 0 ||\n- iptip.InverseFlags&^inverseMask != 0\n-}\n-\nfunc validUnderflow(rule stack.Rule) bool {\nif len(rule.Matchers) != 0 {\nreturn false\n}\n- if rule.Filter != emptyFilter {\n+ if rule.Filter != emptyIPv4Filter {\nreturn false\n}\nswitch rule.Target.(type) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -949,6 +949,9 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr us\nif outLen < linux.SizeOfIPTGetinfo {\nreturn nil, syserr.ErrInvalidArgument\n}\n+ if s.family != linux.AF_INET {\n+ return nil, syserr.ErrInvalidArgument\n+ }\nstack := inet.StackFromContext(t)\nif stack == nil {\n@@ -964,12 +967,15 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr us\nif outLen < linux.SizeOfIPTGetEntries {\nreturn nil, syserr.ErrInvalidArgument\n}\n+ if s.family != linux.AF_INET {\n+ return nil, syserr.ErrInvalidArgument\n+ }\nstack := inet.StackFromContext(t)\nif stack == nil {\nreturn nil, syserr.ErrNoDevice\n}\n- entries, err := netfilter.GetEntries(t, stack.(*Stack).Stack, outPtr, outLen)\n+ entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)\nif err != nil {\nreturn nil, err\n}\n@@ -1650,12 +1656,15 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa\nreturn nil\n}\n- if s.skType == linux.SOCK_RAW && level == linux.IPPROTO_IP {\n+ if s.skType == linux.SOCK_RAW && level == linux.SOL_IP {\nswitch name {\ncase linux.IPT_SO_SET_REPLACE:\nif len(optVal) < linux.SizeOfIPTReplace {\nreturn syserr.ErrInvalidArgument\n}\n+ if s.family != linux.AF_INET {\n+ return syserr.ErrInvalidArgument\n+ }\nstack := inet.StackFromContext(t)\nif stack == nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack_vfs2.go", "new_path": "pkg/sentry/socket/netstack/netstack_vfs2.go", "diff": "@@ -239,6 +239,9 @@ func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.\nif outLen < linux.SizeOfIPTGetinfo {\nreturn nil, syserr.ErrInvalidArgument\n}\n+ if s.family != linux.AF_INET {\n+ return nil, syserr.ErrInvalidArgument\n+ }\nstack := inet.StackFromContext(t)\nif stack == nil {\n@@ -254,12 +257,15 @@ func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.\nif outLen < linux.SizeOfIPTGetEntries {\nreturn nil, syserr.ErrInvalidArgument\n}\n+ if s.family != linux.AF_INET {\n+ return nil, syserr.ErrInvalidArgument\n+ }\nstack := inet.StackFromContext(t)\nif stack == nil {\nreturn nil, syserr.ErrNoDevice\n}\n- entries, err := netfilter.GetEntries(t, stack.(*Stack).Stack, outPtr, outLen)\n+ entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)\nif err != nil {\nreturn nil, err\n}\n@@ -298,12 +304,15 @@ func (s *SocketVFS2) SetSockOpt(t *kernel.Task, level int, name int, optVal []by\nreturn nil\n}\n- if s.skType == linux.SOCK_RAW && level == linux.IPPROTO_IP {\n+ if s.skType == linux.SOCK_RAW && level == linux.SOL_IP {\nswitch name {\ncase linux.IPT_SO_SET_REPLACE:\nif len(optVal) < linux.SizeOfIPTReplace {\nreturn syserr.ErrInvalidArgument\n}\n+ if s.family != linux.AF_INET {\n+ return syserr.ErrInvalidArgument\n+ }\nstack := inet.StackFromContext(t)\nif stack == nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/socket.go", "new_path": "pkg/sentry/strace/socket.go", "diff": "@@ -632,6 +632,8 @@ var sockOptNames = map[uint64]abi.ValueSet{\nlinux.IPV6_UNICAST_IF: \"IPV6_UNICAST_IF\",\nlinux.MCAST_MSFILTER: \"MCAST_MSFILTER\",\nlinux.IPV6_ADDRFORM: \"IPV6_ADDRFORM\",\n+ linux.IP6T_SO_GET_INFO: \"IP6T_SO_GET_INFO\",\n+ linux.IP6T_SO_GET_ENTRIES: \"IP6T_SO_GET_ENTRIES\",\n},\nlinux.SOL_NETLINK: {\nlinux.NETLINK_BROADCAST_ERROR: \"NETLINK_BROADCAST_ERROR\",\n" } ]
Go
Apache License 2.0
google/gvisor
ip6tables: move ipv4-specific logic into its own file A later change will introduce the equivalent IPv6 logic. #3549 PiperOrigin-RevId: 327499064
259,975
19.08.2020 14:44:42
25,200
f8a9483002a31eb96f99c93a77293ade35a8da5a
Fix return for rseq_test. Accept 128 + SIGNAL as well as SIGNAL as valid returns for fork/exec tests. Also, make changes so that test compiles in opensource. Test had compile errors on latest Ubuntu 16.04 image with updated bazel to 3.4.0 (as well as base 2.0) used for Kokoro tests.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1972,6 +1972,7 @@ cc_binary(\ngtest,\n\"//test/util:logging\",\n\"//test/util:multiprocess_util\",\n+ \"//test/util:posix_error\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n],\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/rseq.cc", "new_path": "test/syscalls/linux/rseq.cc", "diff": "#include \"test/syscalls/linux/rseq/uapi.h\"\n#include \"test/util/logging.h\"\n#include \"test/util/multiprocess_util.h\"\n+#include \"test/util/posix_error.h\"\n#include \"test/util/test_util.h\"\nnamespace gvisor {\n@@ -31,6 +32,9 @@ namespace testing {\nnamespace {\n+using ::testing::AnyOf;\n+using ::testing::Eq;\n+\n// Syscall test for rseq (restartable sequences).\n//\n// We must be very careful about how these tests are written. Each thread may\n@@ -98,7 +102,7 @@ void RunChildTest(std::string test_case, int want_status) {\nint status = 0;\nASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());\n- ASSERT_EQ(status, want_status);\n+ ASSERT_THAT(status, AnyOf(Eq(want_status), Eq(128 + want_status)));\n}\n// Test that rseq must be aligned.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/rseq/rseq.cc", "new_path": "test/syscalls/linux/rseq/rseq.cc", "diff": "@@ -74,84 +74,95 @@ int TestUnaligned() {\n// Sanity test that registration works.\nint TestRegister() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, 0); sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, 0);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\nreturn 0;\n-};\n+}\n// Registration can't be done twice.\nint TestDoubleRegister() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, 0); sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, 0);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n- if (int ret = sys_rseq(&r, sizeof(r), 0, 0); sys_errno(ret) != EBUSY) {\n+ ret = sys_rseq(&r, sizeof(r), 0, 0);\n+ if (sys_errno(ret) != EBUSY) {\nreturn 1;\n}\nreturn 0;\n-};\n+}\n// Registration can be done again after unregister.\nint TestRegisterUnregister() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, 0); sys_errno(ret) != 0) {\n+\n+ int ret = sys_rseq(&r, sizeof(r), 0, 0);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n- if (int ret = sys_rseq(&r, sizeof(r), kRseqFlagUnregister, 0);\n- sys_errno(ret) != 0) {\n+ ret = sys_rseq(&r, sizeof(r), kRseqFlagUnregister, 0);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n- if (int ret = sys_rseq(&r, sizeof(r), 0, 0); sys_errno(ret) != 0) {\n+ ret = sys_rseq(&r, sizeof(r), 0, 0);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\nreturn 0;\n-};\n+}\n// The pointer to rseq must match on register/unregister.\nint TestUnregisterDifferentPtr() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, 0); sys_errno(ret) != 0) {\n+\n+ int ret = sys_rseq(&r, sizeof(r), 0, 0);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\nstruct rseq r2 = {};\n- if (int ret = sys_rseq(&r2, sizeof(r2), kRseqFlagUnregister, 0);\n- sys_errno(ret) != EINVAL) {\n+\n+ ret = sys_rseq(&r2, sizeof(r2), kRseqFlagUnregister, 0);\n+ if (sys_errno(ret) != EINVAL) {\nreturn 1;\n}\nreturn 0;\n-};\n+}\n// The signature must match on register/unregister.\nint TestUnregisterDifferentSignature() {\nconstexpr int kSignature = 0;\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, kSignature); sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, kSignature);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n- if (int ret = sys_rseq(&r, sizeof(r), kRseqFlagUnregister, kSignature + 1);\n- sys_errno(ret) != EPERM) {\n+ ret = sys_rseq(&r, sizeof(r), kRseqFlagUnregister, kSignature + 1);\n+ if (sys_errno(ret) != EPERM) {\nreturn 1;\n}\nreturn 0;\n-};\n+}\n// The CPU ID is initialized.\nint TestCPU() {\nstruct rseq r = {};\nr.cpu_id = kRseqCPUIDUninitialized;\n- if (int ret = sys_rseq(&r, sizeof(r), 0, 0); sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, 0);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n@@ -163,13 +174,13 @@ int TestCPU() {\n}\nreturn 0;\n-};\n+}\n// Critical section is eventually aborted.\nint TestAbort() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n- sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n@@ -185,13 +196,13 @@ int TestAbort() {\nrseq_loop(&r, &cs);\nreturn 0;\n-};\n+}\n// Abort may be before the critical section.\nint TestAbortBefore() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n- sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n@@ -207,13 +218,13 @@ int TestAbortBefore() {\nrseq_loop(&r, &cs);\nreturn 0;\n-};\n+}\n// Signature must match.\nint TestAbortSignature() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature + 1);\n- sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature + 1);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n@@ -229,13 +240,13 @@ int TestAbortSignature() {\nrseq_loop(&r, &cs);\nreturn 1;\n-};\n+}\n// Abort must not be in the critical section.\nint TestAbortPreCommit() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature + 1);\n- sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature + 1);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n@@ -251,13 +262,13 @@ int TestAbortPreCommit() {\nrseq_loop(&r, &cs);\nreturn 1;\n-};\n+}\n// rseq.rseq_cs is cleared on abort.\nint TestAbortClearsCS() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n- sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n@@ -277,13 +288,13 @@ int TestAbortClearsCS() {\n}\nreturn 0;\n-};\n+}\n// rseq.rseq_cs is cleared on abort outside of critical section.\nint TestInvalidAbortClearsCS() {\nstruct rseq r = {};\n- if (int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n- sys_errno(ret) != 0) {\n+ int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);\n+ if (sys_errno(ret) != 0) {\nreturn 1;\n}\n@@ -306,7 +317,7 @@ int TestInvalidAbortClearsCS() {\n}\nreturn 0;\n-};\n+}\n// Exit codes:\n// 0 - Pass\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/rseq/test.h", "new_path": "test/syscalls/linux/rseq/test.h", "diff": "@@ -20,22 +20,20 @@ namespace testing {\n// Test cases supported by rseq binary.\n-inline constexpr char kRseqTestUnaligned[] = \"unaligned\";\n-inline constexpr char kRseqTestRegister[] = \"register\";\n-inline constexpr char kRseqTestDoubleRegister[] = \"double-register\";\n-inline constexpr char kRseqTestRegisterUnregister[] = \"register-unregister\";\n-inline constexpr char kRseqTestUnregisterDifferentPtr[] =\n- \"unregister-different-ptr\";\n-inline constexpr char kRseqTestUnregisterDifferentSignature[] =\n+constexpr char kRseqTestUnaligned[] = \"unaligned\";\n+constexpr char kRseqTestRegister[] = \"register\";\n+constexpr char kRseqTestDoubleRegister[] = \"double-register\";\n+constexpr char kRseqTestRegisterUnregister[] = \"register-unregister\";\n+constexpr char kRseqTestUnregisterDifferentPtr[] = \"unregister-different-ptr\";\n+constexpr char kRseqTestUnregisterDifferentSignature[] =\n\"unregister-different-signature\";\n-inline constexpr char kRseqTestCPU[] = \"cpu\";\n-inline constexpr char kRseqTestAbort[] = \"abort\";\n-inline constexpr char kRseqTestAbortBefore[] = \"abort-before\";\n-inline constexpr char kRseqTestAbortSignature[] = \"abort-signature\";\n-inline constexpr char kRseqTestAbortPreCommit[] = \"abort-precommit\";\n-inline constexpr char kRseqTestAbortClearsCS[] = \"abort-clears-cs\";\n-inline constexpr char kRseqTestInvalidAbortClearsCS[] =\n- \"invalid-abort-clears-cs\";\n+constexpr char kRseqTestCPU[] = \"cpu\";\n+constexpr char kRseqTestAbort[] = \"abort\";\n+constexpr char kRseqTestAbortBefore[] = \"abort-before\";\n+constexpr char kRseqTestAbortSignature[] = \"abort-signature\";\n+constexpr char kRseqTestAbortPreCommit[] = \"abort-precommit\";\n+constexpr char kRseqTestAbortClearsCS[] = \"abort-clears-cs\";\n+constexpr char kRseqTestInvalidAbortClearsCS[] = \"invalid-abort-clears-cs\";\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Fix return for rseq_test. Accept 128 + SIGNAL as well as SIGNAL as valid returns for fork/exec tests. Also, make changes so that test compiles in opensource. Test had compile errors on latest Ubuntu 16.04 image with updated bazel to 3.4.0 (as well as base 2.0) used for Kokoro tests. PiperOrigin-RevId: 327510310
259,992
19.08.2020 17:03:21
25,200
25babd63519151eb6e70d847d8fd0172c1d7090f
Run bazel build before deleting cached gopath files bazel creates sysmlink to the cache on the first invokation. On a new clone, there are no symlink, thus `rm -rf bazel-bin/gopath` has no effect. Call `bazel build something` first, then delete cached gopath.
[ { "change_type": "MODIFY", "old_path": ".github/workflows/go.yml", "new_path": ".github/workflows/go.yml", "diff": "@@ -49,7 +49,12 @@ jobs:\nkey: ${{ runner.os }}-bazel-${{ hashFiles('WORKSPACE') }}\nrestore-keys: |\n${{ runner.os }}-bazel-\n+ # Create gopath to merge the changes. The first execution will create\n+ # symlinks to the cache, e.g. bazel-bin. Once the cache is setup, delete\n+ # old gopath files that may exist from previous runs (and could contain\n+ # files that are now deleted). Then run gopath again for good.\n- run: |\n+ make build TARGETS=\"//:gopath\"\nrm -rf bazel-bin/gopath\nmake build TARGETS=\"//:gopath\"\n- run: tools/go_branch.sh\n" } ]
Go
Apache License 2.0
google/gvisor
Run bazel build before deleting cached gopath files bazel creates sysmlink to the cache on the first invokation. On a new clone, there are no symlink, thus `rm -rf bazel-bin/gopath` has no effect. Call `bazel build something` first, then delete cached gopath. PiperOrigin-RevId: 327536044
259,962
20.08.2020 11:05:37
25,200
710adf23cd151558af718e1a8a02f2abe1059d82
Use a explicit random src for RandomID.
[ { "change_type": "MODIFY", "old_path": "pkg/test/testutil/testutil.go", "new_path": "pkg/test/testutil/testutil.go", "diff": "@@ -243,12 +243,15 @@ func writeSpec(dir string, spec *specs.Spec) error {\nreturn ioutil.WriteFile(filepath.Join(dir, \"config.json\"), b, 0755)\n}\n+// idRandomSrc is a pseudo random generator used to in RandomID.\n+var idRandomSrc = rand.New(rand.NewSource(time.Now().UnixNano()))\n+\n// RandomID returns 20 random bytes following the given prefix.\nfunc RandomID(prefix string) string {\n// Read 20 random bytes.\nb := make([]byte, 20)\n// \"[Read] always returns len(p) and a nil error.\" --godoc\n- if _, err := rand.Read(b); err != nil {\n+ if _, err := idRandomSrc.Read(b); err != nil {\npanic(\"rand.Read failed: \" + err.Error())\n}\nif prefix != \"\" {\n" } ]
Go
Apache License 2.0
google/gvisor
Use a explicit random src for RandomID. PiperOrigin-RevId: 327659759
259,962
20.08.2020 13:23:21
25,200
f12b545d8fc484f0bb48f280d161acb348263372
Skip listening TCP ports when trying to bind a free port.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -803,7 +803,20 @@ func (s *socketOpsCommon) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error {\n}\n// Issue the bind request to the endpoint.\n- return syserr.TranslateNetstackError(s.Endpoint.Bind(addr))\n+ err := s.Endpoint.Bind(addr)\n+ if err == tcpip.ErrNoPortAvailable {\n+ // Bind always returns EADDRINUSE irrespective of if the specified port was\n+ // already bound or if an ephemeral port was requested but none were\n+ // available.\n+ //\n+ // tcpip.ErrNoPortAvailable is mapped to EAGAIN in syserr package because\n+ // UDP connect returns EAGAIN on ephemeral port exhaustion.\n+ //\n+ // TCP connect returns EADDRNOTAVAIL on ephemeral port exhaustion.\n+ err = tcpip.ErrPortInUse\n+ }\n+\n+ return syserr.TranslateNetstackError(err)\n}\n// Listen implements the linux syscall listen(2) for sockets backed by\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/ports/ports.go", "new_path": "pkg/tcpip/ports/ports.go", "diff": "@@ -400,7 +400,11 @@ func (s *PortManager) isPortAvailableLocked(networks []tcpip.NetworkProtocolNumb\n// reserved by another endpoint. If port is zero, ReservePort will search for\n// an unreserved ephemeral port and reserve it, returning its value in the\n// \"port\" return value.\n-func (s *PortManager) ReservePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) (reservedPort uint16, err *tcpip.Error) {\n+//\n+// An optional testPort closure can be passed in which if provided will be used\n+// to test if the picked port can be used. The function should return true if\n+// the port is safe to use, false otherwise.\n+func (s *PortManager) ReservePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress, testPort func(port uint16) bool) (reservedPort uint16, err *tcpip.Error) {\ns.mu.Lock()\ndefer s.mu.Unlock()\n@@ -412,12 +416,23 @@ func (s *PortManager) ReservePort(networks []tcpip.NetworkProtocolNumber, transp\nif !s.reserveSpecificPort(networks, transport, addr, port, flags, bindToDevice, dst) {\nreturn 0, tcpip.ErrPortInUse\n}\n+ if testPort != nil && !testPort(port) {\n+ s.releasePortLocked(networks, transport, addr, port, flags.Bits(), bindToDevice, dst)\n+ return 0, tcpip.ErrPortInUse\n+ }\nreturn port, nil\n}\n// A port wasn't specified, so try to find one.\nreturn s.PickEphemeralPort(func(p uint16) (bool, *tcpip.Error) {\n- return s.reserveSpecificPort(networks, transport, addr, p, flags, bindToDevice, dst), nil\n+ if !s.reserveSpecificPort(networks, transport, addr, p, flags, bindToDevice, dst) {\n+ return false, nil\n+ }\n+ if testPort != nil && !testPort(p) {\n+ s.releasePortLocked(networks, transport, addr, p, flags.Bits(), bindToDevice, dst)\n+ return false, nil\n+ }\n+ return true, nil\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/ports/ports_test.go", "new_path": "pkg/tcpip/ports/ports_test.go", "diff": "@@ -332,7 +332,7 @@ func TestPortReservation(t *testing.T) {\npm.ReleasePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device, test.dest)\ncontinue\n}\n- gotPort, err := pm.ReservePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device, test.dest)\n+ gotPort, err := pm.ReservePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device, test.dest, nil /* testPort */)\nif err != test.want {\nt.Fatalf(\"ReservePort(.., .., %s, %d, %+v, %d, %v) = %v, want %v\", test.ip, test.port, test.flags, test.device, test.dest, err, test.want)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -2169,7 +2169,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc\nif sameAddr && p == e.ID.RemotePort {\nreturn false, nil\n}\n- if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr); err != nil {\n+ if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr, nil /* testPort */); err != nil {\nif err != tcpip.ErrPortInUse || !reuse {\nreturn false, nil\n}\n@@ -2207,7 +2207,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc\ntcpEP.notifyProtocolGoroutine(notifyAbort)\ntcpEP.UnlockUser()\n// Now try and Reserve again if it fails then we skip.\n- if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr); err != nil {\n+ if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr, nil /* testPort */); err != nil {\nreturn false, nil\n}\n}\n@@ -2505,47 +2505,45 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err *tcpip.Error) {\n}\n}\n- port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port, e.portFlags, e.bindToDevice, tcpip.FullAddress{})\n- if err != nil {\n- return err\n- }\n-\n- e.boundBindToDevice = e.bindToDevice\n- e.boundPortFlags = e.portFlags\n- e.isPortReserved = true\n- e.effectiveNetProtos = netProtos\n- e.ID.LocalPort = port\n-\n- // Any failures beyond this point must remove the port registration.\n- defer func(portFlags ports.Flags, bindToDevice tcpip.NICID) {\n- if err != nil {\n- e.stack.ReleasePort(netProtos, ProtocolNumber, addr.Addr, port, portFlags, bindToDevice, tcpip.FullAddress{})\n- e.isPortReserved = false\n- e.effectiveNetProtos = nil\n- e.ID.LocalPort = 0\n- e.ID.LocalAddress = \"\"\n- e.boundNICID = 0\n- e.boundBindToDevice = 0\n- e.boundPortFlags = ports.Flags{}\n- }\n- }(e.boundPortFlags, e.boundBindToDevice)\n-\n+ var nic tcpip.NICID\n// If an address is specified, we must ensure that it's one of our\n// local addresses.\nif len(addr.Addr) != 0 {\n- nic := e.stack.CheckLocalAddress(addr.NIC, netProto, addr.Addr)\n+ nic = e.stack.CheckLocalAddress(addr.NIC, netProto, addr.Addr)\nif nic == 0 {\nreturn tcpip.ErrBadLocalAddress\n}\n-\n- e.boundNICID = nic\ne.ID.LocalAddress = addr.Addr\n}\n- if err := e.stack.CheckRegisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e.boundPortFlags, e.boundBindToDevice); err != nil {\n+ port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port, e.portFlags, e.bindToDevice, tcpip.FullAddress{}, func(p uint16) bool {\n+ id := e.ID\n+ id.LocalPort = p\n+ // CheckRegisterTransportEndpoint should only return an error if there is a\n+ // listening endpoint bound with the same id and portFlags and bindToDevice\n+ // options.\n+ //\n+ // NOTE: Only listening and connected endpoint register with\n+ // demuxer. Further connected endpoints always have a remote\n+ // address/port. Hence this will only return an error if there is a matching\n+ // listening endpoint.\n+ if err := e.stack.CheckRegisterTransportEndpoint(nic, netProtos, ProtocolNumber, id, e.portFlags, e.bindToDevice); err != nil {\n+ return false\n+ }\n+ return true\n+ })\n+ if err != nil {\nreturn err\n}\n+ e.boundBindToDevice = e.bindToDevice\n+ e.boundPortFlags = e.portFlags\n+ // TODO(gvisor.dev/issue/3691): Add test to verify boundNICID is correct.\n+ e.boundNICID = nic\n+ e.isPortReserved = true\n+ e.effectiveNetProtos = netProtos\n+ e.ID.LocalPort = port\n+\n// Mark endpoint as bound.\ne.setEndpointState(StateBound)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -1226,7 +1226,7 @@ func (*endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) {\nfunc (e *endpoint) registerWithStack(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, id stack.TransportEndpointID) (stack.TransportEndpointID, tcpip.NICID, *tcpip.Error) {\nif e.ID.LocalPort == 0 {\n- port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, e.bindToDevice, tcpip.FullAddress{})\n+ port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, e.bindToDevice, tcpip.FullAddress{}, nil /* testPort */)\nif err != nil {\nreturn id, e.bindToDevice, err\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -2573,6 +2573,44 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReservedReuseAddr) {\nSyscallSucceeds());\n}\n+TEST_P(SocketMultiProtocolInetLoopbackTest,\n+ MultipleBindsAllowedNoListeningReuseAddr) {\n+ const auto& param = GetParam();\n+ // UDP sockets are allowed to bind/listen on the port w/ SO_REUSEADDR, for TCP\n+ // this is only permitted if there is no other listening socket.\n+ SKIP_IF(param.type != SOCK_STREAM);\n+ // Bind the v4 loopback on a v4 socket.\n+ const TestAddress& test_addr = V4Loopback();\n+ sockaddr_storage bound_addr = test_addr.addr;\n+ FileDescriptor bound_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+\n+ ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(bind(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ test_addr.addr_len),\n+ SyscallSucceeds());\n+ // Get the port that we bound.\n+ socklen_t bound_addr_len = test_addr.addr_len;\n+ ASSERT_THAT(\n+ getsockname(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ &bound_addr_len),\n+ SyscallSucceeds());\n+\n+ // Now create a socket and bind it to the same port, this should\n+ // succeed since there is no listening socket for the same port.\n+ FileDescriptor second_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+\n+ ASSERT_THAT(setsockopt(second_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(bind(second_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ test_addr.addr_len),\n+ SyscallSucceeds());\n+}\n+\nTEST_P(SocketMultiProtocolInetLoopbackTest, PortReuseTwoSockets) {\nauto const& param = GetParam();\nTestAddress const& test_addr = V4Loopback();\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback_nogotsan.cc", "new_path": "test/syscalls/linux/socket_inet_loopback_nogotsan.cc", "diff": "@@ -168,6 +168,71 @@ INSTANTIATE_TEST_SUITE_P(\nTestParam{V6Loopback(), V6Loopback()}),\nDescribeTestParam);\n+struct ProtocolTestParam {\n+ std::string description;\n+ int type;\n+};\n+\n+std::string DescribeProtocolTestParam(\n+ ::testing::TestParamInfo<ProtocolTestParam> const& info) {\n+ return info.param.description;\n+}\n+\n+using SocketMultiProtocolInetLoopbackTest =\n+ ::testing::TestWithParam<ProtocolTestParam>;\n+\n+TEST_P(SocketMultiProtocolInetLoopbackTest,\n+ BindAvoidsListeningPortsReuseAddr_NoRandomSave) {\n+ const auto& param = GetParam();\n+ // UDP sockets are allowed to bind/listen on the port w/ SO_REUSEADDR, for TCP\n+ // this is only permitted if there is no other listening socket.\n+ SKIP_IF(param.type != SOCK_STREAM);\n+\n+ DisableSave ds; // Too many syscalls.\n+\n+ // A map of port to file descriptor binding the port.\n+ std::map<uint16_t, FileDescriptor> listen_sockets;\n+\n+ // Exhaust all ephemeral ports.\n+ while (true) {\n+ // Bind the v4 loopback on a v4 socket.\n+ TestAddress const& test_addr = V4Loopback();\n+ sockaddr_storage bound_addr = test_addr.addr;\n+ FileDescriptor bound_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+\n+ ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ int ret = bind(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ test_addr.addr_len);\n+ if (ret != 0) {\n+ ASSERT_EQ(errno, EADDRINUSE);\n+ break;\n+ }\n+ // Get the port that we bound.\n+ socklen_t bound_addr_len = test_addr.addr_len;\n+ ASSERT_THAT(\n+ getsockname(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ &bound_addr_len),\n+ SyscallSucceeds());\n+ uint16_t port = reinterpret_cast<sockaddr_in*>(&bound_addr)->sin_port;\n+\n+ // Newly bound port should not already be in use by a listening socket.\n+ ASSERT_EQ(listen_sockets.find(port), listen_sockets.end());\n+ auto fd = bound_fd.get();\n+ listen_sockets.insert(std::make_pair(port, std::move(bound_fd)));\n+ ASSERT_THAT(listen(fd, SOMAXCONN), SyscallSucceeds());\n+ }\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(\n+ AllFamilies, SocketMultiProtocolInetLoopbackTest,\n+ ::testing::Values(ProtocolTestParam{\"TCP\", SOCK_STREAM},\n+ ProtocolTestParam{\"UDP\", SOCK_DGRAM}),\n+ DescribeProtocolTestParam);\n+\n} // namespace\n} // namespace testing\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc", "new_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc", "diff": "@@ -2121,7 +2121,7 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrReusePortDistribution) {\nSyscallSucceedsWithValue(kMessageSize));\n}\n-// Check that connect returns EADDRNOTAVAIL when out of local ephemeral ports.\n+// Check that connect returns EAGAIN when out of local ephemeral ports.\n// We disable S/R because this test creates a large number of sockets.\nTEST_P(IPv4UDPUnboundSocketTest, UDPConnectPortExhaustion_NoRandomSave) {\nauto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n@@ -2154,6 +2154,29 @@ TEST_P(IPv4UDPUnboundSocketTest, UDPConnectPortExhaustion_NoRandomSave) {\n}\n}\n+// Check that bind returns EADDRINUSE when out of local ephemeral ports.\n+// We disable S/R because this test creates a large number of sockets.\n+TEST_P(IPv4UDPUnboundSocketTest, UDPBindPortExhaustion_NoRandomSave) {\n+ auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+ constexpr int kClients = 65536;\n+ auto addr = V4Loopback();\n+ // Disable cooperative S/R as we are making too many syscalls.\n+ DisableSave ds;\n+ std::vector<std::unique_ptr<FileDescriptor>> sockets;\n+ for (int i = 0; i < kClients; i++) {\n+ auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int ret =\n+ bind(s->get(), reinterpret_cast<sockaddr*>(&addr.addr), addr.addr_len);\n+ if (ret == 0) {\n+ sockets.push_back(std::move(s));\n+ continue;\n+ }\n+ ASSERT_THAT(ret, SyscallFailsWithErrno(EADDRINUSE));\n+ break;\n+ }\n+}\n+\n// Test that socket will receive packet info control message.\nTEST_P(IPv4UDPUnboundSocketTest, SetAndReceiveIPPKTINFO) {\n// TODO(gvisor.dev/issue/1202): ioctl() is not supported by hostinet.\n" } ]
Go
Apache License 2.0
google/gvisor
Skip listening TCP ports when trying to bind a free port. PiperOrigin-RevId: 327686558
259,860
20.08.2020 14:10:46
25,200
3163aff866852e730777be4ef689b0405c6332cd
Add reference count checking to the fsimpl/host package. Includes a minor refactor for inode construction. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/BUILD", "new_path": "pkg/sentry/fsimpl/host/BUILD", "diff": "load(\"//tools:defs.bzl\", \"go_library\")\n+load(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nlicenses([\"notice\"])\n+go_template_instance(\n+ name = \"inode_refs\",\n+ out = \"inode_refs.go\",\n+ package = \"host\",\n+ prefix = \"inode\",\n+ template = \"//pkg/refs_vfs2:refs_template\",\n+ types = {\n+ \"T\": \"inode\",\n+ },\n+)\n+\n+go_template_instance(\n+ name = \"connected_endpoint_refs\",\n+ out = \"connected_endpoint_refs.go\",\n+ package = \"host\",\n+ prefix = \"ConnectedEndpoint\",\n+ template = \"//pkg/refs_vfs2:refs_template\",\n+ types = {\n+ \"T\": \"ConnectedEndpoint\",\n+ },\n+)\n+\ngo_library(\nname = \"host\",\nsrcs = [\n+ \"connected_endpoint_refs.go\",\n\"control.go\",\n\"host.go\",\n+ \"inode_refs.go\",\n\"ioctl_unsafe.go\",\n\"mmap.go\",\n\"socket.go\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -27,7 +27,6 @@ import (\n\"gvisor.dev/gvisor/pkg/fdnotifier\"\n\"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/log\"\n- \"gvisor.dev/gvisor/pkg/refs\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/hostfd\"\n@@ -41,6 +40,44 @@ import (\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n+func newInode(fs *filesystem, hostFD int, fileType linux.FileMode, isTTY bool) (*inode, error) {\n+ // Determine if hostFD is seekable. If not, this syscall will return ESPIPE\n+ // (see fs/read_write.c:llseek), e.g. for pipes, sockets, and some character\n+ // devices.\n+ _, err := unix.Seek(hostFD, 0, linux.SEEK_CUR)\n+ seekable := err != syserror.ESPIPE\n+\n+ i := &inode{\n+ hostFD: hostFD,\n+ ino: fs.NextIno(),\n+ isTTY: isTTY,\n+ wouldBlock: wouldBlock(uint32(fileType)),\n+ seekable: seekable,\n+ // NOTE(b/38213152): Technically, some obscure char devices can be memory\n+ // mapped, but we only allow regular files.\n+ canMap: fileType == linux.S_IFREG,\n+ }\n+ i.pf.inode = i\n+ i.refs.EnableLeakCheck()\n+\n+ // Non-seekable files can't be memory mapped, assert this.\n+ if !i.seekable && i.canMap {\n+ panic(\"files that can return EWOULDBLOCK (sockets, pipes, etc.) cannot be memory mapped\")\n+ }\n+\n+ // If the hostFD would block, we must set it to non-blocking and handle\n+ // blocking behavior in the sentry.\n+ if i.wouldBlock {\n+ if err := syscall.SetNonblock(i.hostFD, true); err != nil {\n+ return nil, err\n+ }\n+ if err := fdnotifier.AddFD(int32(i.hostFD), &i.queue); err != nil {\n+ return nil, err\n+ }\n+ }\n+ return i, nil\n+}\n+\n// NewFDOptions contains options to NewFD.\ntype NewFDOptions struct {\n// If IsTTY is true, the file descriptor is a TTY.\n@@ -76,44 +113,11 @@ func NewFD(ctx context.Context, mnt *vfs.Mount, hostFD int, opts *NewFDOptions)\nflags = uint32(flagsInt)\n}\n- fileMode := linux.FileMode(s.Mode)\n- fileType := fileMode.FileType()\n-\n- // Determine if hostFD is seekable. If not, this syscall will return ESPIPE\n- // (see fs/read_write.c:llseek), e.g. for pipes, sockets, and some character\n- // devices.\n- _, err := unix.Seek(hostFD, 0, linux.SEEK_CUR)\n- seekable := err != syserror.ESPIPE\n-\n- i := &inode{\n- hostFD: hostFD,\n- ino: fs.NextIno(),\n- isTTY: opts.IsTTY,\n- wouldBlock: wouldBlock(uint32(fileType)),\n- seekable: seekable,\n- // NOTE(b/38213152): Technically, some obscure char devices can be memory\n- // mapped, but we only allow regular files.\n- canMap: fileType == linux.S_IFREG,\n- }\n- i.pf.inode = i\n-\n- // Non-seekable files can't be memory mapped, assert this.\n- if !i.seekable && i.canMap {\n- panic(\"files that can return EWOULDBLOCK (sockets, pipes, etc.) cannot be memory mapped\")\n- }\n-\n- // If the hostFD would block, we must set it to non-blocking and handle\n- // blocking behavior in the sentry.\n- if i.wouldBlock {\n- if err := syscall.SetNonblock(i.hostFD, true); err != nil {\n- return nil, err\n- }\n- if err := fdnotifier.AddFD(int32(i.hostFD), &i.queue); err != nil {\n+ d := &kernfs.Dentry{}\n+ i, err := newInode(fs, hostFD, linux.FileMode(s.Mode).FileType(), opts.IsTTY)\n+ if err != nil {\nreturn nil, err\n}\n- }\n-\n- d := &kernfs.Dentry{}\nd.Init(i)\n// i.open will take a reference on d.\n@@ -188,7 +192,7 @@ type inode struct {\nlocks vfs.FileLocks\n// When the reference count reaches zero, the host fd is closed.\n- refs.AtomicRefCount\n+ refs inodeRefs\n// hostFD contains the host fd that this file was originally created from,\n// which must be available at time of restore.\n@@ -430,9 +434,19 @@ func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre\nreturn nil\n}\n+// IncRef implements kernfs.Inode.\n+func (i *inode) IncRef() {\n+ i.refs.IncRef()\n+}\n+\n+// TryIncRef implements kernfs.Inode.\n+func (i *inode) TryIncRef() bool {\n+ return i.refs.TryIncRef()\n+}\n+\n// DecRef implements kernfs.Inode.\nfunc (i *inode) DecRef(ctx context.Context) {\n- i.AtomicRefCount.DecRefWithDestructor(ctx, func(context.Context) {\n+ i.refs.DecRef(func() {\nif i.wouldBlock {\nfdnotifier.RemoveFD(int32(i.hostFD))\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/socket.go", "new_path": "pkg/sentry/fsimpl/host/socket.go", "diff": "@@ -22,7 +22,6 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/fdnotifier\"\n\"gvisor.dev/gvisor/pkg/log\"\n- \"gvisor.dev/gvisor/pkg/refs\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/control\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n\"gvisor.dev/gvisor/pkg/sentry/uniqueid\"\n@@ -59,8 +58,7 @@ func newEndpoint(ctx context.Context, hostFD int, queue *waiter.Queue) (transpor\n//\n// +stateify savable\ntype ConnectedEndpoint struct {\n- // ref keeps track of references to a ConnectedEndpoint.\n- ref refs.AtomicRefCount\n+ ConnectedEndpointRefs\n// mu protects fd below.\nmu sync.RWMutex `state:\"nosave\"`\n@@ -132,9 +130,9 @@ func NewConnectedEndpoint(ctx context.Context, hostFD int, addr string, saveable\nreturn nil, err\n}\n- // AtomicRefCounters start off with a single reference. We need two.\n- e.ref.IncRef()\n- e.ref.EnableLeakCheck(\"host.ConnectedEndpoint\")\n+ // ConnectedEndpointRefs start off with a single reference. We need two.\n+ e.IncRef()\n+ e.EnableLeakCheck()\nreturn &e, nil\n}\n@@ -318,7 +316,7 @@ func (c *ConnectedEndpoint) destroyLocked() {\n// Release implements transport.ConnectedEndpoint.Release and\n// transport.Receiver.Release.\nfunc (c *ConnectedEndpoint) Release(ctx context.Context) {\n- c.ref.DecRefWithDestructor(ctx, func(context.Context) {\n+ c.DecRef(func() {\nc.mu.Lock()\nc.destroyLocked()\nc.mu.Unlock()\n@@ -348,7 +346,7 @@ func (e *SCMConnectedEndpoint) Init() error {\n// Release implements transport.ConnectedEndpoint.Release and\n// transport.Receiver.Release.\nfunc (e *SCMConnectedEndpoint) Release(ctx context.Context) {\n- e.ref.DecRefWithDestructor(ctx, func(context.Context) {\n+ e.DecRef(func() {\ne.mu.Lock()\nif err := syscall.Close(e.fd); err != nil {\nlog.Warningf(\"Failed to close host fd %d: %v\", err)\n@@ -378,8 +376,8 @@ func NewSCMEndpoint(ctx context.Context, hostFD int, queue *waiter.Queue, addr s\nreturn nil, err\n}\n- // AtomicRefCounters start off with a single reference. We need two.\n- e.ref.IncRef()\n- e.ref.EnableLeakCheck(\"host.SCMConnectedEndpoint\")\n+ // ConnectedEndpointRefs start off with a single reference. We need two.\n+ e.IncRef()\n+ e.EnableLeakCheck()\nreturn &e, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add reference count checking to the fsimpl/host package. Includes a minor refactor for inode construction. Updates #1486. PiperOrigin-RevId: 327694933
260,003
20.08.2020 15:38:06
25,200
df4822709992c971af0f9b339d915f6e629c0225
stateify: Fix afterLoad not being called for root object
[ { "change_type": "MODIFY", "old_path": "pkg/state/decode.go", "new_path": "pkg/state/decode.go", "diff": "@@ -584,10 +584,12 @@ func (ds *decodeState) Load(obj reflect.Value) {\n})\n// Create the root object.\n- ds.objectsByID = append(ds.objectsByID, &objectDecodeState{\n+ rootOds := &objectDecodeState{\nid: 1,\nobj: obj,\n- })\n+ }\n+ ds.objectsByID = append(ds.objectsByID, rootOds)\n+ ds.pending.PushBack(rootOds)\n// Read the number of objects.\nlastID, object, err := ReadHeader(ds.r)\n" }, { "change_type": "MODIFY", "old_path": "pkg/state/tests/load_test.go", "new_path": "pkg/state/tests/load_test.go", "diff": "@@ -20,6 +20,14 @@ import (\nfunc TestLoadHooks(t *testing.T) {\nrunTestCases(t, false, \"load-hooks\", []interface{}{\n+ // Root object being a struct.\n+ afterLoadStruct{v: 1},\n+ valueLoadStruct{v: 1},\n+ genericContainer{v: &afterLoadStruct{v: 1}},\n+ genericContainer{v: &valueLoadStruct{v: 1}},\n+ sliceContainer{v: []interface{}{&afterLoadStruct{v: 1}}},\n+ sliceContainer{v: []interface{}{&valueLoadStruct{v: 1}}},\n+ // Root object being a pointer.\n&afterLoadStruct{v: 1},\n&valueLoadStruct{v: 1},\n&genericContainer{v: &afterLoadStruct{v: 1}},\n" } ]
Go
Apache License 2.0
google/gvisor
stateify: Fix afterLoad not being called for root object PiperOrigin-RevId: 327711264
259,992
20.08.2020 16:16:46
25,200
e8a25a283447443ee895d41ae7ae95e96fe62140
Enable strace+debug in syscall tests This is done to ease troubleshooting when tests fail. runsc logs are not stored when tests passe, so this will only affect failing tests and should not increase log storage too badly.
[ { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "@@ -3,33 +3,40 @@ load(\"//test/runner:defs.bzl\", \"syscall_test\")\npackage(licenses = [\"notice\"])\nsyscall_test(\n+ debug = False,\ntest = \"//test/perf/linux:clock_getres_benchmark\",\n)\nsyscall_test(\n+ debug = False,\ntest = \"//test/perf/linux:clock_gettime_benchmark\",\n)\nsyscall_test(\n+ debug = False,\ntest = \"//test/perf/linux:death_benchmark\",\n)\nsyscall_test(\n+ debug = False,\ntest = \"//test/perf/linux:epoll_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ debug = False,\ntest = \"//test/perf/linux:fork_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ debug = False,\ntest = \"//test/perf/linux:futex_benchmark\",\n)\nsyscall_test(\nsize = \"enormous\",\n+ debug = False,\nshard_count = 10,\ntags = [\"nogotsan\"],\ntest = \"//test/perf/linux:getdents_benchmark\",\n@@ -37,81 +44,96 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\n+ debug = False,\ntest = \"//test/perf/linux:getpid_benchmark\",\n)\nsyscall_test(\nsize = \"enormous\",\n+ debug = False,\ntags = [\"nogotsan\"],\ntest = \"//test/perf/linux:gettid_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ debug = False,\ntest = \"//test/perf/linux:mapping_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\nadd_overlay = True,\n+ debug = False,\ntest = \"//test/perf/linux:open_benchmark\",\n)\nsyscall_test(\n+ debug = False,\ntest = \"//test/perf/linux:pipe_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\nadd_overlay = True,\n+ debug = False,\ntest = \"//test/perf/linux:randread_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\nadd_overlay = True,\n+ debug = False,\ntest = \"//test/perf/linux:read_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ debug = False,\ntest = \"//test/perf/linux:sched_yield_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ debug = False,\ntest = \"//test/perf/linux:send_recv_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\nadd_overlay = True,\n+ debug = False,\ntest = \"//test/perf/linux:seqwrite_benchmark\",\n)\nsyscall_test(\nsize = \"enormous\",\n+ debug = False,\ntest = \"//test/perf/linux:signal_benchmark\",\n)\nsyscall_test(\n+ debug = False,\ntest = \"//test/perf/linux:sleep_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\nadd_overlay = True,\n+ debug = False,\ntest = \"//test/perf/linux:stat_benchmark\",\n)\nsyscall_test(\nsize = \"enormous\",\nadd_overlay = True,\n+ debug = False,\ntest = \"//test/perf/linux:unlink_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\nadd_overlay = True,\n+ debug = False,\ntest = \"//test/perf/linux:write_benchmark\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/runner/defs.bzl", "new_path": "test/runner/defs.bzl", "diff": "@@ -62,7 +62,8 @@ def _syscall_test(\noverlay = False,\nadd_uds_tree = False,\nvfs2 = False,\n- fuse = False):\n+ fuse = False,\n+ debug = True):\n# Prepend \"runsc\" to non-native platform names.\nfull_platform = platform if platform == \"native\" else \"runsc_\" + platform\n@@ -111,6 +112,8 @@ def _syscall_test(\n\"--add-uds-tree=\" + str(add_uds_tree),\n\"--vfs2=\" + str(vfs2),\n\"--fuse=\" + str(fuse),\n+ \"--strace=\" + str(debug),\n+ \"--debug=\" + str(debug),\n]\n# Call the rule above.\n@@ -134,6 +137,7 @@ def syscall_test(\nadd_hostinet = False,\nvfs2 = True,\nfuse = False,\n+ debug = True,\ntags = None):\n\"\"\"syscall_test is a macro that will create targets for all platforms.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -64,6 +64,8 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\n+ # Produce too many logs in the debug mode.\n+ debug = False,\nshard_count = 50,\n# Takes too long for TSAN. Since this is kind of a stress test that doesn't\n# involve much concurrency, TSAN's usefulness here is limited anyway.\n" } ]
Go
Apache License 2.0
google/gvisor
Enable strace+debug in syscall tests This is done to ease troubleshooting when tests fail. runsc logs are not stored when tests passe, so this will only affect failing tests and should not increase log storage too badly. PiperOrigin-RevId: 327717551
259,907
20.08.2020 16:25:57
25,200
73c69cb4d8e7f47ce9205844e5f6e369ff4dfa53
[vfs] Create recursive dir creation util. Refactored the recursive dir creation util in runsc/boot/vfs.go to be more flexible.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/vfs.go", "new_path": "pkg/sentry/vfs/vfs.go", "diff": "@@ -36,6 +36,7 @@ package vfs\nimport (\n\"fmt\"\n+ \"path\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n@@ -782,6 +783,38 @@ func (vfs *VirtualFilesystem) SyncAllFilesystems(ctx context.Context) error {\nreturn retErr\n}\n+// MkdirAllAt recursively creates non-existent directories on the given path\n+// (including the last component).\n+func (vfs *VirtualFilesystem) MkdirAllAt(ctx context.Context, currentPath string, root VirtualDentry, creds *auth.Credentials, mkdirOpts *MkdirOptions) error {\n+ pop := &PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(currentPath),\n+ }\n+ stat, err := vfs.StatAt(ctx, creds, pop, &StatOptions{Mask: linux.STATX_TYPE})\n+ switch err {\n+ case nil:\n+ if stat.Mask&linux.STATX_TYPE == 0 || stat.Mode&linux.FileTypeMask != linux.ModeDirectory {\n+ return syserror.ENOTDIR\n+ }\n+ // Directory already exists.\n+ return nil\n+ case syserror.ENOENT:\n+ // Expected, we will create the dir.\n+ default:\n+ return fmt.Errorf(\"stat failed for %q during directory creation: %w\", currentPath, err)\n+ }\n+\n+ // Recurse to ensure parent is created and then create the final directory.\n+ if err := vfs.MkdirAllAt(ctx, path.Dir(currentPath), root, creds, mkdirOpts); err != nil {\n+ return err\n+ }\n+ if err := vfs.MkdirAt(ctx, creds, pop, mkdirOpts); err != nil {\n+ return fmt.Errorf(\"failed to create directory %q: %w\", currentPath, err)\n+ }\n+ return nil\n+}\n+\n// A VirtualDentry represents a node in a VFS tree, by combining a Dentry\n// (which represents a node in a Filesystem's tree) and a Mount (which\n// represents the Filesystem's position in a VFS mount tree).\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/vfs.go", "new_path": "runsc/boot/vfs.go", "diff": "@@ -16,7 +16,6 @@ package boot\nimport (\n\"fmt\"\n- \"path\"\n\"sort\"\n\"strings\"\n@@ -274,7 +273,7 @@ func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *config.C\nreturn nil\n}\n- if err := c.makeSyntheticMount(ctx, submount.Destination, root, creds); err != nil {\n+ if err := c.k.VFS().MkdirAllAt(ctx, submount.Destination, root, creds, &vfs.MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}); err != nil {\nreturn err\n}\nif err := c.k.VFS().MountAt(ctx, creds, \"\", target, fsName, opts); err != nil {\n@@ -348,33 +347,6 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo\nreturn fsName, opts, nil\n}\n-func (c *containerMounter) makeSyntheticMount(ctx context.Context, currentPath string, root vfs.VirtualDentry, creds *auth.Credentials) error {\n- target := &vfs.PathOperation{\n- Root: root,\n- Start: root,\n- Path: fspath.Parse(currentPath),\n- }\n- _, err := c.k.VFS().StatAt(ctx, creds, target, &vfs.StatOptions{})\n- if err == nil {\n- log.Debugf(\"Mount point %q already exists\", currentPath)\n- return nil\n- }\n- if err != syserror.ENOENT {\n- return fmt.Errorf(\"stat failed for %q during mount point creation: %w\", currentPath, err)\n- }\n-\n- // Recurse to ensure parent is created and then create the mount point.\n- if err := c.makeSyntheticMount(ctx, path.Dir(currentPath), root, creds); err != nil {\n- return err\n- }\n- log.Debugf(\"Creating dir %q for mount point\", currentPath)\n- mkdirOpts := &vfs.MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}\n- if err := c.k.VFS().MkdirAt(ctx, creds, target, mkdirOpts); err != nil {\n- return fmt.Errorf(\"failed to create directory %q for mount: %w\", currentPath, err)\n- }\n- return nil\n-}\n-\n// mountTmpVFS2 mounts an internal tmpfs at '/tmp' if it's safe to do so.\n// Technically we don't have to mount tmpfs at /tmp, as we could just rely on\n// the host /tmp, but this is a nice optimization, and fixes some apps that call\n@@ -503,7 +475,7 @@ func (c *containerMounter) mountSharedSubmountVFS2(ctx context.Context, conf *co\nroot := mns.Root()\ndefer root.DecRef(ctx)\n- if err := c.makeSyntheticMount(ctx, mount.Destination, root, creds); err != nil {\n+ if err := c.k.VFS().MkdirAllAt(ctx, mount.Destination, root, creds, &vfs.MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}); err != nil {\nreturn err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Create recursive dir creation util. Refactored the recursive dir creation util in runsc/boot/vfs.go to be more flexible. PiperOrigin-RevId: 327719100
259,975
21.08.2020 11:37:12
25,200
c24db90be5cb4d81e71e7923fe85740721ec9d1e
Skip ElfInterpreterStaticTest for 5.X kernels. gVisor emulates 4.6 kernel versions, and test doesn't work on 5.0 versions (observed on our Ubuntu18.04 image). Skip it.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/exec_binary.cc", "new_path": "test/syscalls/linux/exec_binary.cc", "diff": "@@ -1042,6 +1042,13 @@ class ElfInterpreterStaticTest\n// Statically linked ELF with a statically linked ELF interpreter.\nTEST_P(ElfInterpreterStaticTest, Test) {\n+ // TODO(gvisor.dev/issue/3721): Test has been observed to segfault on 5.X\n+ // kernels.\n+ if (!IsRunningOnGvisor()) {\n+ auto version = ASSERT_NO_ERRNO_AND_VALUE(GetKernelVersion());\n+ SKIP_IF(version.major > 4);\n+ }\n+\nconst std::vector<char> segment_suffix = std::get<0>(GetParam());\nconst int expected_errno = std::get<1>(GetParam());\n" } ]
Go
Apache License 2.0
google/gvisor
Skip ElfInterpreterStaticTest for 5.X kernels. gVisor emulates 4.6 kernel versions, and test doesn't work on 5.0 versions (observed on our Ubuntu18.04 image). Skip it. PiperOrigin-RevId: 327845037
259,975
21.08.2020 14:19:29
25,200
c9e752b6440cfa78f7bdcd47af4c4f5fe33d57b4
Fix Inotify tests in open source. The order of unlink events (dir event/file event) is undefined, so make tests accept both orderings.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/inotify.cc", "new_path": "test/syscalls/linux/inotify.cc", "diff": "@@ -1371,8 +1371,9 @@ TEST(Inotify, HardlinksReuseSameWatch) {\n// that now and drain the resulting events.\nfile1_fd.reset();\nevents = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));\n- ASSERT_THAT(events,\n- Are({Event(IN_CLOSE_WRITE, root_wd, Basename(file1.path())),\n+ ASSERT_THAT(\n+ events,\n+ AreUnordered({Event(IN_CLOSE_WRITE, root_wd, Basename(file1.path())),\nEvent(IN_CLOSE_WRITE, file1_wd)}));\n// Try removing the link and let's see what events show up. Note that after\n@@ -1381,7 +1382,8 @@ TEST(Inotify, HardlinksReuseSameWatch) {\nconst std::string link1_path = link1.reset();\nevents = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));\n- ASSERT_THAT(events, Are({Event(IN_ATTRIB, link1_wd),\n+ ASSERT_THAT(events,\n+ AreUnordered({Event(IN_ATTRIB, link1_wd),\nEvent(IN_DELETE, root_wd, Basename(link1_path))}));\n// Now remove the other link. Since this is the last link to the file, the\n@@ -1934,14 +1936,22 @@ TEST(Inotify, IncludeUnlinkedFile_NoRandomSave) {\nASSERT_THAT(write(fd.get(), &val, sizeof(val)), SyscallSucceeds());\nstd::vector<Event> events =\nASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n- EXPECT_THAT(events, Are({\n+ EXPECT_THAT(events, AnyOf(Are({\nEvent(IN_ATTRIB, file_wd),\nEvent(IN_DELETE, dir_wd, Basename(file.path())),\nEvent(IN_ACCESS, dir_wd, Basename(file.path())),\nEvent(IN_ACCESS, file_wd),\nEvent(IN_MODIFY, dir_wd, Basename(file.path())),\nEvent(IN_MODIFY, file_wd),\n- }));\n+ }),\n+ Are({\n+ Event(IN_DELETE, dir_wd, Basename(file.path())),\n+ Event(IN_ATTRIB, file_wd),\n+ Event(IN_ACCESS, dir_wd, Basename(file.path())),\n+ Event(IN_ACCESS, file_wd),\n+ Event(IN_MODIFY, dir_wd, Basename(file.path())),\n+ Event(IN_MODIFY, file_wd),\n+ })));\nfd.reset();\nevents = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n@@ -1984,7 +1994,7 @@ TEST(Inotify, ExcludeUnlink_NoRandomSave) {\nASSERT_THAT(read(fd.get(), &val, sizeof(val)), SyscallSucceeds());\nstd::vector<Event> events =\nASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n- EXPECT_THAT(events, Are({\n+ EXPECT_THAT(events, AreUnordered({\nEvent(IN_ATTRIB, file_wd),\nEvent(IN_DELETE, dir_wd, Basename(file.path())),\n}));\n@@ -2127,12 +2137,18 @@ TEST(Inotify, ExcludeUnlinkInodeEvents_NoRandomSave) {\nASSERT_THAT(ftruncate(fd.get(), 12345), SyscallSucceeds());\nstd::vector<Event> events =\nASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n- EXPECT_THAT(events, Are({\n+ EXPECT_THAT(events, AnyOf(Are({\nEvent(IN_ATTRIB, file_wd),\nEvent(IN_DELETE, dir_wd, Basename(file.path())),\nEvent(IN_MODIFY, dir_wd, Basename(file.path())),\nEvent(IN_MODIFY, file_wd),\n- }));\n+ }),\n+ Are({\n+ Event(IN_DELETE, dir_wd, Basename(file.path())),\n+ Event(IN_ATTRIB, file_wd),\n+ Event(IN_MODIFY, dir_wd, Basename(file.path())),\n+ Event(IN_MODIFY, file_wd),\n+ })));\nconst struct timeval times[2] = {{1, 0}, {2, 0}};\nASSERT_THAT(futimes(fd.get(), times), SyscallSucceeds());\n" } ]
Go
Apache License 2.0
google/gvisor
Fix Inotify tests in open source. The order of unlink events (dir event/file event) is undefined, so make tests accept both orderings. PiperOrigin-RevId: 327873316
260,003
21.08.2020 16:33:04
25,200
bd3383a7e25b426fc6aa5a28fc8ffa6fe7fa333e
Move udp port exhaustion tests to a 'nogotsan' one. It frequently times out under GoTSAN.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -667,6 +667,13 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_ipv4_udp_unbound_loopback_test\",\n)\n+syscall_test(\n+ size = \"medium\",\n+ # Takes too long under gotsan to run.\n+ tags = [\"nogotsan\"],\n+ test = \"//test/syscalls/linux:socket_ipv4_udp_unbound_loopback_nogotsan_test\",\n+)\n+\nsyscall_test(\ntest = \"//test/syscalls/linux:socket_ip_unbound_test\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -2738,6 +2738,23 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"socket_ipv4_udp_unbound_loopback_nogotsan_test\",\n+ testonly = 1,\n+ srcs = [\n+ \"socket_ipv4_udp_unbound_loopback_nogotsan.cc\",\n+ ],\n+ linkstatic = 1,\n+ deps = [\n+ \":ip_socket_test_util\",\n+ \":socket_test_util\",\n+ gtest,\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"@com_google_absl//absl/memory\",\n+ ],\n+)\n+\ncc_binary(\nname = \"socket_ip_unbound_test\",\ntestonly = 1,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc", "new_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc", "diff": "@@ -2121,62 +2121,6 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrReusePortDistribution) {\nSyscallSucceedsWithValue(kMessageSize));\n}\n-// Check that connect returns EAGAIN when out of local ephemeral ports.\n-// We disable S/R because this test creates a large number of sockets.\n-TEST_P(IPv4UDPUnboundSocketTest, UDPConnectPortExhaustion_NoRandomSave) {\n- auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n- constexpr int kClients = 65536;\n- // Bind the first socket to the loopback and take note of the selected port.\n- auto addr = V4Loopback();\n- ASSERT_THAT(bind(receiver1->get(), reinterpret_cast<sockaddr*>(&addr.addr),\n- addr.addr_len),\n- SyscallSucceeds());\n- socklen_t addr_len = addr.addr_len;\n- ASSERT_THAT(getsockname(receiver1->get(),\n- reinterpret_cast<sockaddr*>(&addr.addr), &addr_len),\n- SyscallSucceeds());\n- EXPECT_EQ(addr_len, addr.addr_len);\n-\n- // Disable cooperative S/R as we are making too many syscalls.\n- DisableSave ds;\n- std::vector<std::unique_ptr<FileDescriptor>> sockets;\n- for (int i = 0; i < kClients; i++) {\n- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n-\n- int ret = connect(s->get(), reinterpret_cast<sockaddr*>(&addr.addr),\n- addr.addr_len);\n- if (ret == 0) {\n- sockets.push_back(std::move(s));\n- continue;\n- }\n- ASSERT_THAT(ret, SyscallFailsWithErrno(EAGAIN));\n- break;\n- }\n-}\n-\n-// Check that bind returns EADDRINUSE when out of local ephemeral ports.\n-// We disable S/R because this test creates a large number of sockets.\n-TEST_P(IPv4UDPUnboundSocketTest, UDPBindPortExhaustion_NoRandomSave) {\n- auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n- constexpr int kClients = 65536;\n- auto addr = V4Loopback();\n- // Disable cooperative S/R as we are making too many syscalls.\n- DisableSave ds;\n- std::vector<std::unique_ptr<FileDescriptor>> sockets;\n- for (int i = 0; i < kClients; i++) {\n- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n-\n- int ret =\n- bind(s->get(), reinterpret_cast<sockaddr*>(&addr.addr), addr.addr_len);\n- if (ret == 0) {\n- sockets.push_back(std::move(s));\n- continue;\n- }\n- ASSERT_THAT(ret, SyscallFailsWithErrno(EADDRINUSE));\n- break;\n- }\n-}\n-\n// Test that socket will receive packet info control message.\nTEST_P(IPv4UDPUnboundSocketTest, SetAndReceiveIPPKTINFO) {\n// TODO(gvisor.dev/issue/1202): ioctl() is not supported by hostinet.\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/syscalls/linux/socket_ipv4_udp_unbound_loopback_nogotsan.cc", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <sys/socket.h>\n+#include <sys/types.h>\n+\n+#include \"gtest/gtest.h\"\n+#include \"absl/memory/memory.h\"\n+#include \"test/syscalls/linux/ip_socket_test_util.h\"\n+#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+// Test fixture for tests that apply to IPv4 UDP sockets.\n+using IPv4UDPUnboundSocketNogotsanTest = SimpleSocketTest;\n+\n+// Check that connect returns EAGAIN when out of local ephemeral ports.\n+// We disable S/R because this test creates a large number of sockets.\n+TEST_P(IPv4UDPUnboundSocketNogotsanTest,\n+ UDPConnectPortExhaustion_NoRandomSave) {\n+ auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+ constexpr int kClients = 65536;\n+ // Bind the first socket to the loopback and take note of the selected port.\n+ auto addr = V4Loopback();\n+ ASSERT_THAT(bind(receiver1->get(), reinterpret_cast<sockaddr*>(&addr.addr),\n+ addr.addr_len),\n+ SyscallSucceeds());\n+ socklen_t addr_len = addr.addr_len;\n+ ASSERT_THAT(getsockname(receiver1->get(),\n+ reinterpret_cast<sockaddr*>(&addr.addr), &addr_len),\n+ SyscallSucceeds());\n+ EXPECT_EQ(addr_len, addr.addr_len);\n+\n+ // Disable cooperative S/R as we are making too many syscalls.\n+ DisableSave ds;\n+ std::vector<std::unique_ptr<FileDescriptor>> sockets;\n+ for (int i = 0; i < kClients; i++) {\n+ auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int ret = connect(s->get(), reinterpret_cast<sockaddr*>(&addr.addr),\n+ addr.addr_len);\n+ if (ret == 0) {\n+ sockets.push_back(std::move(s));\n+ continue;\n+ }\n+ ASSERT_THAT(ret, SyscallFailsWithErrno(EAGAIN));\n+ break;\n+ }\n+}\n+\n+// Check that bind returns EADDRINUSE when out of local ephemeral ports.\n+// We disable S/R because this test creates a large number of sockets.\n+TEST_P(IPv4UDPUnboundSocketNogotsanTest, UDPBindPortExhaustion_NoRandomSave) {\n+ auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+ constexpr int kClients = 65536;\n+ auto addr = V4Loopback();\n+ // Disable cooperative S/R as we are making too many syscalls.\n+ DisableSave ds;\n+ std::vector<std::unique_ptr<FileDescriptor>> sockets;\n+ for (int i = 0; i < kClients; i++) {\n+ auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int ret =\n+ bind(s->get(), reinterpret_cast<sockaddr*>(&addr.addr), addr.addr_len);\n+ if (ret == 0) {\n+ sockets.push_back(std::move(s));\n+ continue;\n+ }\n+ ASSERT_THAT(ret, SyscallFailsWithErrno(EADDRINUSE));\n+ break;\n+ }\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(\n+ IPv4UDPSockets, IPv4UDPUnboundSocketNogotsanTest,\n+ ::testing::ValuesIn(ApplyVec<SocketKind>(IPv4UDPUnboundSocket,\n+ AllBitwiseCombinations(List<int>{\n+ 0, SOCK_NONBLOCK}))));\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Move udp port exhaustion tests to a 'nogotsan' one. It frequently times out under GoTSAN. PiperOrigin-RevId: 327894343
260,003
21.08.2020 17:32:19
25,200
9607515aed13406a8550e8072cdaf53d4f1e437b
stateify: Fix pretty print not printing odd numbered fields.
[ { "change_type": "MODIFY", "old_path": "pkg/state/pretty/pretty.go", "new_path": "pkg/state/pretty/pretty.go", "diff": "@@ -148,7 +148,6 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\nelement, ok := format(graph, depth+1, *x.Field(i), html)\nallZero = allZero && !ok\nitems = append(items, fmt.Sprintf(\"\\t%d: %s,\", i, element))\n- i++\n}\nitems = append(items, \"}\")\nreturn strings.Join(items, tabs), !allZero\n" } ]
Go
Apache License 2.0
google/gvisor
stateify: Fix pretty print not printing odd numbered fields. PiperOrigin-RevId: 327902182
259,907
21.08.2020 20:04:31
25,200
17bc5c1b00ce0bb9944507b40b4bb6968bcdbe75
[vfs] Allow mountpoint to be an existing non-directory. Unlike linux mount(2), OCI spec allows mounting on top of an existing non-directory file.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/mount.go", "new_path": "pkg/sentry/vfs/mount.go", "diff": "@@ -18,12 +18,14 @@ import (\n\"bytes\"\n\"fmt\"\n\"math\"\n+ \"path\"\n\"sort\"\n\"strings\"\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n@@ -888,6 +890,30 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\n}\n}\n+// MakeSyntheticMountpoint creates parent directories of target if they do not\n+// exist and attempts to create a directory for the mountpoint. If a\n+// non-directory file already exists there then we allow it.\n+func (vfs *VirtualFilesystem) MakeSyntheticMountpoint(ctx context.Context, target string, root VirtualDentry, creds *auth.Credentials) error {\n+ mkdirOpts := &MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}\n+\n+ // Make sure the parent directory of target exists.\n+ if err := vfs.MkdirAllAt(ctx, path.Dir(target), root, creds, mkdirOpts); err != nil {\n+ return fmt.Errorf(\"failed to create parent directory of mountpoint %q: %w\", target, err)\n+ }\n+\n+ // Attempt to mkdir the final component. If a file (of any type) exists\n+ // then we let allow mounting on top of that because we do not require the\n+ // target to be an existing directory, unlike Linux mount(2).\n+ if err := vfs.MkdirAt(ctx, creds, &PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(target),\n+ }, mkdirOpts); err != nil && err != syserror.EEXIST {\n+ return fmt.Errorf(\"failed to create mountpoint %q: %w\", target, err)\n+ }\n+ return nil\n+}\n+\n// manglePath replaces ' ', '\\t', '\\n', and '\\\\' with their octal equivalents.\n// See Linux fs/seq_file.c:mangle_path.\nfunc manglePath(p string) string {\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/vfs.go", "new_path": "runsc/boot/vfs.go", "diff": "@@ -292,7 +292,7 @@ func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *config.C\nreturn nil, nil\n}\n- if err := c.k.VFS().MkdirAllAt(ctx, submount.Destination, root, creds, &vfs.MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}); err != nil {\n+ if err := c.k.VFS().MakeSyntheticMountpoint(ctx, submount.Destination, root, creds); err != nil {\nreturn nil, err\n}\nmnt, err := c.k.VFS().MountAt(ctx, creds, \"\", target, fsName, opts)\n@@ -496,7 +496,7 @@ func (c *containerMounter) mountSharedSubmountVFS2(ctx context.Context, conf *co\nroot := mns.Root()\ndefer root.DecRef(ctx)\n- if err := c.k.VFS().MkdirAllAt(ctx, mount.Destination, root, creds, &vfs.MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}); err != nil {\n+ if err := c.k.VFS().MakeSyntheticMountpoint(ctx, mount.Destination, root, creds); err != nil {\nreturn nil, err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Allow mountpoint to be an existing non-directory. Unlike linux mount(2), OCI spec allows mounting on top of an existing non-directory file. PiperOrigin-RevId: 327914342
260,023
21.08.2020 22:47:06
25,200
69008b68b0a70d8ae4f6c2fafcd9e60307f16bf9
Add syscall tests for SO_REUSEADDR. Add tests for socket re-bind/listen of client and server sockets with the older connection still in TIME_WAIT state and with SO_REUSEADDR enabled.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -861,36 +861,38 @@ TEST_P(SocketInetLoopbackTest, TCPResetAfterClose) {\nSyscallSucceedsWithValue(0));\n}\n-// This test is disabled under random save as the the restore run\n-// results in the stack.Seed() being different which can cause\n-// sequence number of final connect to be one that is considered\n-// old and can cause the test to be flaky.\n-TEST_P(SocketInetLoopbackTest, TCPPassiveCloseNoTimeWaitTest_NoRandomSave) {\n- auto const& param = GetParam();\n- TestAddress const& listener = param.listener;\n- TestAddress const& connector = param.connector;\n-\n+// setupTimeWaitClose sets up a socket endpoint in TIME_WAIT state.\n+// Callers can choose to perform active close on either ends of the connection\n+// and also specify if they want to enabled SO_REUSEADDR.\n+void setupTimeWaitClose(const TestAddress* listener,\n+ const TestAddress* connector, bool reuse,\n+ bool accept_close, sockaddr_storage* listen_addr,\n+ sockaddr_storage* conn_bound_addr) {\n// Create the listening socket.\n- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));\n- sockaddr_storage listen_addr = listener.addr;\n- ASSERT_THAT(bind(listen_fd.get(), reinterpret_cast<sockaddr*>(&listen_addr),\n- listener.addr_len),\n+ FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(listener->family(), SOCK_STREAM, IPPROTO_TCP));\n+ if (reuse) {\n+ ASSERT_THAT(setsockopt(listen_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ }\n+ ASSERT_THAT(bind(listen_fd.get(), reinterpret_cast<sockaddr*>(listen_addr),\n+ listener->addr_len),\nSyscallSucceeds());\nASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());\n// Get the port bound by the listening socket.\n- socklen_t addrlen = listener.addr_len;\n+ socklen_t addrlen = listener->addr_len;\nASSERT_THAT(getsockname(listen_fd.get(),\n- reinterpret_cast<sockaddr*>(&listen_addr), &addrlen),\n+ reinterpret_cast<sockaddr*>(listen_addr), &addrlen),\nSyscallSucceeds());\nuint16_t const port =\n- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));\n+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener->family(), *listen_addr));\n// Connect to the listening socket.\nFileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));\n+ Socket(connector->family(), SOCK_STREAM, IPPROTO_TCP));\n// We disable saves after this point as a S/R causes the netstack seed\n// to be regenerated which changes what ports/ISN is picked for a given\n@@ -901,11 +903,12 @@ TEST_P(SocketInetLoopbackTest, TCPPassiveCloseNoTimeWaitTest_NoRandomSave) {\n//\n// TODO(gvisor.dev/issue/940): S/R portSeed/portHint\nDisableSave ds;\n- sockaddr_storage conn_addr = connector.addr;\n- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));\n+\n+ sockaddr_storage conn_addr = connector->addr;\n+ ASSERT_NO_ERRNO(SetAddrPort(connector->family(), &conn_addr, port));\nASSERT_THAT(RetryEINTR(connect)(conn_fd.get(),\nreinterpret_cast<sockaddr*>(&conn_addr),\n- connector.addr_len),\n+ connector->addr_len),\nSyscallSucceeds());\n// Accept the connection.\n@@ -913,136 +916,145 @@ TEST_P(SocketInetLoopbackTest, TCPPassiveCloseNoTimeWaitTest_NoRandomSave) {\nASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));\n// Get the address/port bound by the connecting socket.\n- sockaddr_storage conn_bound_addr;\n- socklen_t conn_addrlen = connector.addr_len;\n+ socklen_t conn_addrlen = connector->addr_len;\nASSERT_THAT(\n- getsockname(conn_fd.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),\n+ getsockname(conn_fd.get(), reinterpret_cast<sockaddr*>(conn_bound_addr),\n&conn_addrlen),\nSyscallSucceeds());\n- // shutdown the accept FD to trigger TIME_WAIT on the accepted socket which\n- // should cause the conn_fd to follow CLOSE_WAIT->LAST_ACK->CLOSED instead of\n- // TIME_WAIT.\n- ASSERT_THAT(shutdown(accepted.get(), SHUT_RDWR), SyscallSucceeds());\n+ FileDescriptor active_closefd, passive_closefd;\n+ if (accept_close) {\n+ active_closefd = std::move(accepted);\n+ passive_closefd = std::move(conn_fd);\n+ } else {\n+ active_closefd = std::move(conn_fd);\n+ passive_closefd = std::move(accepted);\n+ }\n+\n+ // shutdown to trigger TIME_WAIT.\n+ ASSERT_THAT(shutdown(active_closefd.get(), SHUT_RDWR), SyscallSucceeds());\n{\nconst int kTimeout = 10000;\nstruct pollfd pfd = {\n- .fd = conn_fd.get(),\n+ .fd = passive_closefd.get(),\n.events = POLLIN,\n};\nASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));\nASSERT_EQ(pfd.revents, POLLIN);\n}\n+ ScopedThread t([&]() {\n+ constexpr int kTimeout = 10000;\n+ constexpr int16_t want_events = POLLHUP;\n+ struct pollfd pfd = {\n+ .fd = active_closefd.get(),\n+ .events = want_events,\n+ };\n+ ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));\n+ });\n- conn_fd.reset();\n- // This sleep is required to give conn_fd time to transition to TIME-WAIT.\n+ passive_closefd.reset();\n+ t.Join();\n+ active_closefd.reset();\n+ // This sleep is needed to reduce flake to ensure that the passive-close\n+ // ensures the state transitions to CLOSE from LAST_ACK.\nabsl::SleepFor(absl::Seconds(1));\n+}\n- // At this point conn_fd should be the one that moved to CLOSE_WAIT and\n- // eventually to CLOSED.\n-\n- // Now bind and connect a new socket and verify that we can immediately\n- // rebind the address bound by the conn_fd as it never entered TIME_WAIT.\n- const FileDescriptor conn_fd2 = ASSERT_NO_ERRNO_AND_VALUE(\n- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));\n+// These tests are disabled under random save as the the restore run\n+// results in the stack.Seed() being different which can cause\n+// sequence number of final connect to be one that is considered\n+// old and can cause the test to be flaky.\n+//\n+// Test re-binding of client and server bound addresses when the older\n+// connection is in TIME_WAIT.\n+TEST_P(SocketInetLoopbackTest, TCPPassiveCloseNoTimeWaitTest_NoRandomSave) {\n+ auto const& param = GetParam();\n+ sockaddr_storage listen_addr, conn_bound_addr;\n+ listen_addr = param.listener.addr;\n+ setupTimeWaitClose(&param.listener, &param.connector, false /*reuse*/,\n+ true /*accept_close*/, &listen_addr, &conn_bound_addr);\n- ASSERT_THAT(bind(conn_fd2.get(),\n- reinterpret_cast<sockaddr*>(&conn_bound_addr), conn_addrlen),\n- SyscallSucceeds());\n- ASSERT_THAT(RetryEINTR(connect)(conn_fd2.get(),\n- reinterpret_cast<sockaddr*>(&conn_addr),\n- conn_addrlen),\n+ // Now bind a new socket and verify that we can immediately rebind the address\n+ // bound by the conn_fd as it never entered TIME_WAIT.\n+ const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));\n+ ASSERT_THAT(bind(conn_fd.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),\n+ param.connector.addr_len),\nSyscallSucceeds());\n+\n+ FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(param.listener.family(), SOCK_STREAM, IPPROTO_TCP));\n+ ASSERT_THAT(bind(listen_fd.get(), reinterpret_cast<sockaddr*>(&listen_addr),\n+ param.listener.addr_len),\n+ SyscallFailsWithErrno(EADDRINUSE));\n}\n-TEST_P(SocketInetLoopbackTest, TCPActiveCloseTimeWaitTest_NoRandomSave) {\n+TEST_P(SocketInetLoopbackTest,\n+ TCPPassiveCloseNoTimeWaitReuseTest_NoRandomSave) {\nauto const& param = GetParam();\n- TestAddress const& listener = param.listener;\n- TestAddress const& connector = param.connector;\n+ sockaddr_storage listen_addr, conn_bound_addr;\n+ listen_addr = param.listener.addr;\n+ setupTimeWaitClose(&param.listener, &param.connector, true /*reuse*/,\n+ true /*accept_close*/, &listen_addr, &conn_bound_addr);\n- // Create the listening socket.\n- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));\n- sockaddr_storage listen_addr = listener.addr;\n+ FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(param.listener.family(), SOCK_STREAM, IPPROTO_TCP));\n+ ASSERT_THAT(setsockopt(listen_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\nASSERT_THAT(bind(listen_fd.get(), reinterpret_cast<sockaddr*>(&listen_addr),\n- listener.addr_len),\n+ param.listener.addr_len),\nSyscallSucceeds());\nASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());\n- // Get the port bound by the listening socket.\n- socklen_t addrlen = listener.addr_len;\n- ASSERT_THAT(getsockname(listen_fd.get(),\n- reinterpret_cast<sockaddr*>(&listen_addr), &addrlen),\n+ // Now bind and connect new socket and verify that we can immediately rebind\n+ // the address bound by the conn_fd as it never entered TIME_WAIT.\n+ const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));\n+ ASSERT_THAT(setsockopt(conn_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(bind(conn_fd.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),\n+ param.connector.addr_len),\nSyscallSucceeds());\nuint16_t const port =\n- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));\n-\n- // Connect to the listening socket.\n- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));\n-\n- // We disable saves after this point as a S/R causes the netstack seed\n- // to be regenerated which changes what ports/ISN is picked for a given\n- // tuple (src ip,src port, dst ip, dst port). This can cause the final\n- // SYN to use a sequence number that looks like one from the current\n- // connection in TIME_WAIT and will not be accepted causing the test\n- // to timeout.\n- //\n- // TODO(gvisor.dev/issue/940): S/R portSeed/portHint\n- DisableSave ds;\n-\n- sockaddr_storage conn_addr = connector.addr;\n- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));\n+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(param.listener.family(), listen_addr));\n+ sockaddr_storage conn_addr = param.connector.addr;\n+ ASSERT_NO_ERRNO(SetAddrPort(param.connector.family(), &conn_addr, port));\nASSERT_THAT(RetryEINTR(connect)(conn_fd.get(),\nreinterpret_cast<sockaddr*>(&conn_addr),\n- connector.addr_len),\n- SyscallSucceeds());\n-\n- // Accept the connection.\n- auto accepted =\n- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));\n-\n- // Get the address/port bound by the connecting socket.\n- sockaddr_storage conn_bound_addr;\n- socklen_t conn_addrlen = connector.addr_len;\n- ASSERT_THAT(\n- getsockname(conn_fd.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),\n- &conn_addrlen),\n+ param.connector.addr_len),\nSyscallSucceeds());\n-\n- // shutdown the conn FD to trigger TIME_WAIT on the connect socket.\n- ASSERT_THAT(shutdown(conn_fd.get(), SHUT_RDWR), SyscallSucceeds());\n- {\n- const int kTimeout = 10000;\n- struct pollfd pfd = {\n- .fd = accepted.get(),\n- .events = POLLIN,\n- };\n- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));\n- ASSERT_EQ(pfd.revents, POLLIN);\n}\n- ScopedThread t([&]() {\n- constexpr int kTimeout = 10000;\n- constexpr int16_t want_events = POLLHUP;\n- struct pollfd pfd = {\n- .fd = conn_fd.get(),\n- .events = want_events,\n- };\n- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));\n- });\n- accepted.reset();\n- t.Join();\n- conn_fd.reset();\n+TEST_P(SocketInetLoopbackTest, TCPActiveCloseTimeWaitTest_NoRandomSave) {\n+ auto const& param = GetParam();\n+ sockaddr_storage listen_addr, conn_bound_addr;\n+ listen_addr = param.listener.addr;\n+ setupTimeWaitClose(&param.listener, &param.connector, false /*reuse*/,\n+ false /*accept_close*/, &listen_addr, &conn_bound_addr);\n+ FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));\n- // Now bind and connect a new socket and verify that we can't immediately\n- // rebind the address bound by the conn_fd as it is in TIME_WAIT.\n- conn_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));\n+ ASSERT_THAT(bind(conn_fd.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),\n+ param.connector.addr_len),\n+ SyscallFailsWithErrno(EADDRINUSE));\n+}\n+TEST_P(SocketInetLoopbackTest, TCPActiveCloseTimeWaitReuseTest_NoRandomSave) {\n+ auto const& param = GetParam();\n+ sockaddr_storage listen_addr, conn_bound_addr;\n+ listen_addr = param.listener.addr;\n+ setupTimeWaitClose(&param.listener, &param.connector, true /*reuse*/,\n+ false /*accept_close*/, &listen_addr, &conn_bound_addr);\n+ FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));\n+ ASSERT_THAT(setsockopt(conn_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\nASSERT_THAT(bind(conn_fd.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),\n- conn_addrlen),\n+ param.connector.addr_len),\nSyscallFailsWithErrno(EADDRINUSE));\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add syscall tests for SO_REUSEADDR. Add tests for socket re-bind/listen of client and server sockets with the older connection still in TIME_WAIT state and with SO_REUSEADDR enabled. PiperOrigin-RevId: 327924702
259,860
24.08.2020 11:28:28
25,200
bae25d2a087d4c493b3335803dbfe53ab5505267
Update inotify documentation for gofer filesystem. We now allow hard links to be created within gofer fs (see github.com/google/gvisor/commit/f20e63e31b56784c596897e86f03441f9d05f567). Update the inotify documentation accordingly.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -703,6 +703,13 @@ type dentry struct {\nlocks vfs.FileLocks\n// Inotify watches for this dentry.\n+ //\n+ // Note that inotify may behave unexpectedly in the presence of hard links,\n+ // because dentries corresponding to the same file have separate inotify\n+ // watches when they should share the same set. This is the case because it is\n+ // impossible for us to know for sure whether two dentries correspond to the\n+ // same underlying file (see the gofer filesystem section fo vfs/inotify.md for\n+ // a more in-depth discussion on this matter).\nwatches vfs.Watches\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/linux64.go", "new_path": "pkg/sentry/syscalls/linux/linux64.go", "diff": "@@ -305,9 +305,9 @@ var AMD64 = &kernel.SyscallTable{\n250: syscalls.Error(\"keyctl\", syserror.EACCES, \"Not available to user.\", nil),\n251: syscalls.CapError(\"ioprio_set\", linux.CAP_SYS_ADMIN, \"\", nil), // requires cap_sys_nice or cap_sys_admin (depending)\n252: syscalls.CapError(\"ioprio_get\", linux.CAP_SYS_ADMIN, \"\", nil), // requires cap_sys_nice or cap_sys_admin (depending)\n- 253: syscalls.PartiallySupported(\"inotify_init\", InotifyInit, \"inotify events are only available inside the sandbox.\", nil),\n- 254: syscalls.PartiallySupported(\"inotify_add_watch\", InotifyAddWatch, \"inotify events are only available inside the sandbox.\", nil),\n- 255: syscalls.PartiallySupported(\"inotify_rm_watch\", InotifyRmWatch, \"inotify events are only available inside the sandbox.\", nil),\n+ 253: syscalls.PartiallySupported(\"inotify_init\", InotifyInit, \"Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.\", nil),\n+ 254: syscalls.PartiallySupported(\"inotify_add_watch\", InotifyAddWatch, \"Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.\", nil),\n+ 255: syscalls.PartiallySupported(\"inotify_rm_watch\", InotifyRmWatch, \"Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.\", nil),\n256: syscalls.CapError(\"migrate_pages\", linux.CAP_SYS_NICE, \"\", nil),\n257: syscalls.Supported(\"openat\", Openat),\n258: syscalls.Supported(\"mkdirat\", Mkdirat),\n@@ -346,7 +346,7 @@ var AMD64 = &kernel.SyscallTable{\n291: syscalls.Supported(\"epoll_create1\", EpollCreate1),\n292: syscalls.Supported(\"dup3\", Dup3),\n293: syscalls.Supported(\"pipe2\", Pipe2),\n- 294: syscalls.Supported(\"inotify_init1\", InotifyInit1),\n+ 294: syscalls.PartiallySupported(\"inotify_init1\", InotifyInit1, \"Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.\", nil),\n295: syscalls.Supported(\"preadv\", Preadv),\n296: syscalls.Supported(\"pwritev\", Pwritev),\n297: syscalls.Supported(\"rt_tgsigqueueinfo\", RtTgsigqueueinfo),\n@@ -454,9 +454,9 @@ var ARM64 = &kernel.SyscallTable{\n23: syscalls.Supported(\"dup\", Dup),\n24: syscalls.Supported(\"dup3\", Dup3),\n25: syscalls.PartiallySupported(\"fcntl\", Fcntl, \"Not all options are supported.\", nil),\n- 26: syscalls.Supported(\"inotify_init1\", InotifyInit1),\n- 27: syscalls.PartiallySupported(\"inotify_add_watch\", InotifyAddWatch, \"inotify events are only available inside the sandbox.\", nil),\n- 28: syscalls.PartiallySupported(\"inotify_rm_watch\", InotifyRmWatch, \"inotify events are only available inside the sandbox.\", nil),\n+ 26: syscalls.PartiallySupported(\"inotify_init1\", InotifyInit1, \"Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.\", nil),\n+ 27: syscalls.PartiallySupported(\"inotify_add_watch\", InotifyAddWatch, \"Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.\", nil),\n+ 28: syscalls.PartiallySupported(\"inotify_rm_watch\", InotifyRmWatch, \"Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.\", nil),\n29: syscalls.PartiallySupported(\"ioctl\", Ioctl, \"Only a few ioctls are implemented for backing devices and file systems.\", nil),\n30: syscalls.CapError(\"ioprio_set\", linux.CAP_SYS_ADMIN, \"\", nil), // requires cap_sys_nice or cap_sys_admin (depending)\n31: syscalls.CapError(\"ioprio_get\", linux.CAP_SYS_ADMIN, \"\", nil), // requires cap_sys_nice or cap_sys_admin (depending)\n" } ]
Go
Apache License 2.0
google/gvisor
Update inotify documentation for gofer filesystem. We now allow hard links to be created within gofer fs (see github.com/google/gvisor/commit/f20e63e31b56784c596897e86f03441f9d05f567). Update the inotify documentation accordingly. PiperOrigin-RevId: 328177485
259,881
24.08.2020 12:56:58
25,200
ab6c474210cca380e8e6504bb2368b077f50ceae
Bump build constraints to 1.17 This enables pre-release testing with 1.16. The intention is to replace these with a nogo check before the next release.
[ { "change_type": "MODIFY", "old_path": "pkg/procid/procid_amd64.s", "new_path": "pkg/procid/procid_amd64.s", "diff": "// +build amd64\n// +build go1.8\n-// +build !go1.16\n+// +build !go1.17\n#include \"textflag.h\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/procid/procid_arm64.s", "new_path": "pkg/procid/procid_arm64.s", "diff": "// +build arm64\n// +build go1.8\n-// +build !go1.16\n+// +build !go1.17\n#include \"textflag.h\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go", "new_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/mount_unsafe.go", "new_path": "pkg/sentry/vfs/mount_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sleep/sleep_unsafe.go", "new_path": "pkg/sleep/sleep_unsafe.go", "diff": "// limitations under the License.\n// +build go1.11\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/memmove_unsafe.go", "new_path": "pkg/sync/memmove_unsafe.go", "diff": "// license that can be found in the LICENSE file.\n// +build go1.12\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/mutex_unsafe.go", "new_path": "pkg/sync/mutex_unsafe.go", "diff": "// license that can be found in the LICENSE file.\n// +build go1.13\n-// +build !go1.16\n+// +build !go1.17\n// When updating the build constraint (above), check that syncMutex matches the\n// standard library sync.Mutex definition.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/rwmutex_unsafe.go", "new_path": "pkg/sync/rwmutex_unsafe.go", "diff": "// license that can be found in the LICENSE file.\n// +build go1.13\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/syncevent/waiter_unsafe.go", "new_path": "pkg/syncevent/waiter_unsafe.go", "diff": "// limitations under the License.\n// +build go1.11\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/rawfile/blockingpoll_yield_unsafe.go", "new_path": "pkg/tcpip/link/rawfile/blockingpoll_yield_unsafe.go", "diff": "// +build linux,amd64 linux,arm64\n// +build go1.12\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/time_unsafe.go", "new_path": "pkg/tcpip/time_unsafe.go", "diff": "// limitations under the License.\n// +build go1.9\n-// +build !go1.16\n+// +build !go1.17\n// Check go:linkname function signatures when updating Go version.\n" } ]
Go
Apache License 2.0
google/gvisor
Bump build constraints to 1.17 This enables pre-release testing with 1.16. The intention is to replace these with a nogo check before the next release. PiperOrigin-RevId: 328193911
259,975
24.08.2020 13:50:56
25,200
2b0b5e25210ea61c85610404d0eb7dc9383fcd14
Remove go profiling flag from dockerutil. Go profiling was removed from runsc debug in a previous change.
[ { "change_type": "MODIFY", "old_path": "pkg/test/dockerutil/dockerutil.go", "new_path": "pkg/test/dockerutil/dockerutil.go", "diff": "@@ -60,7 +60,6 @@ var (\n// enabled for each run.\npprofBlock = flag.Bool(\"pprof-block\", false, \"enables block profiling with runsc debug\")\npprofCPU = flag.Bool(\"pprof-cpu\", false, \"enables CPU profiling with runsc debug\")\n- pprofGo = flag.Bool(\"pprof-go\", false, \"enables goroutine profiling with runsc debug\")\npprofHeap = flag.Bool(\"pprof-heap\", false, \"enables heap profiling with runsc debug\")\npprofMutex = flag.Bool(\"pprof-mutex\", false, \"enables mutex profiling with runsc debug\")\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/test/dockerutil/profile.go", "new_path": "pkg/test/dockerutil/profile.go", "diff": "@@ -63,7 +63,7 @@ type Pprof struct {\n// MakePprofFromFlags makes a Pprof profile from flags.\nfunc MakePprofFromFlags(c *Container) *Pprof {\n- if !(*pprofBlock || *pprofCPU || *pprofGo || *pprofHeap || *pprofMutex) {\n+ if !(*pprofBlock || *pprofCPU || *pprofHeap || *pprofMutex) {\nreturn nil\n}\nreturn &Pprof{\n" } ]
Go
Apache License 2.0
google/gvisor
Remove go profiling flag from dockerutil. Go profiling was removed from runsc debug in a previous change. PiperOrigin-RevId: 328203826
259,885
24.08.2020 20:04:12
25,200
4ad858a586fc560608d181b72ec78db0894bce48
Flush in fsimpl/gofer.regularFileFD.OnClose() if there are no dirty pages. This is closer to indistinguishable from VFS1 behavior.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "new_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "diff": "@@ -56,10 +56,16 @@ func (fd *regularFileFD) OnClose(ctx context.Context) error {\nif !fd.vfsfd.IsWritable() {\nreturn nil\n}\n- // Skip flushing if writes may be buffered by the client, since (as with\n- // the VFS1 client) we don't flush buffered writes on close anyway.\n+ // Skip flushing if there are client-buffered writes, since (as with the\n+ // VFS1 client) we don't flush buffered writes on close anyway.\nd := fd.dentry()\n- if d.fs.opts.interop == InteropModeExclusive {\n+ if d.fs.opts.interop != InteropModeExclusive {\n+ return nil\n+ }\n+ d.dataMu.RLock()\n+ haveDirtyPages := !d.dirty.IsEmpty()\n+ d.dataMu.RUnlock()\n+ if haveDirtyPages {\nreturn nil\n}\nd.handleMu.RLock()\n" } ]
Go
Apache License 2.0
google/gvisor
Flush in fsimpl/gofer.regularFileFD.OnClose() if there are no dirty pages. This is closer to indistinguishable from VFS1 behavior. PiperOrigin-RevId: 328256068
259,860
25.08.2020 00:24:16
25,200
c61f6fcf6ab3178da3a297d3e3199379b188ce61
Fix deadlock in gofer direct IO. Fixes several java runtime tests: java/nio/channels/FileChannel/directio/ReadDirect.java java/nio/channels/FileChannel/directio/PreadDirect.java Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "new_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "diff": "@@ -123,6 +123,10 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs\nreturn 0, io.EOF\n}\n+ var (\n+ n int64\n+ readErr error\n+ )\nif fd.vfsfd.StatusFlags()&linux.O_DIRECT != 0 {\n// Lock d.metadataMu for the rest of the read to prevent d.size from\n// changing.\n@@ -133,20 +137,25 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs\nif err := d.writeback(ctx, offset, dst.NumBytes()); err != nil {\nreturn 0, err\n}\n- }\n-\nrw := getDentryReadWriter(ctx, d, offset)\n- if fd.vfsfd.StatusFlags()&linux.O_DIRECT != 0 {\n// Require the read to go to the remote file.\nrw.direct = true\n+ n, readErr = dst.CopyOutFrom(ctx, rw)\n+ putDentryReadWriter(rw)\n+ if d.fs.opts.interop != InteropModeShared {\n+ // Compare Linux's mm/filemap.c:do_generic_file_read() => file_accessed().\n+ d.touchAtimeLocked(fd.vfsfd.Mount())\n}\n- n, err := dst.CopyOutFrom(ctx, rw)\n+ } else {\n+ rw := getDentryReadWriter(ctx, d, offset)\n+ n, readErr = dst.CopyOutFrom(ctx, rw)\nputDentryReadWriter(rw)\nif d.fs.opts.interop != InteropModeShared {\n// Compare Linux's mm/filemap.c:do_generic_file_read() => file_accessed().\nd.touchAtime(fd.vfsfd.Mount())\n}\n- return n, err\n+ }\n+ return n, readErr\n}\n// Read implements vfs.FileDescriptionImpl.Read.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/time.go", "new_path": "pkg/sentry/fsimpl/gofer/time.go", "diff": "@@ -52,6 +52,20 @@ func (d *dentry) touchAtime(mnt *vfs.Mount) {\nmnt.EndWrite()\n}\n+// Preconditions: d.metadataMu is locked. d.cachedMetadataAuthoritative() == true.\n+func (d *dentry) touchAtimeLocked(mnt *vfs.Mount) {\n+ if mnt.Flags.NoATime || mnt.ReadOnly() {\n+ return\n+ }\n+ if err := mnt.CheckBeginWrite(); err != nil {\n+ return\n+ }\n+ now := d.fs.clock.Now().Nanoseconds()\n+ atomic.StoreInt64(&d.atime, now)\n+ atomic.StoreUint32(&d.atimeDirty, 1)\n+ mnt.EndWrite()\n+}\n+\n// Preconditions:\n// * d.cachedMetadataAuthoritative() == true.\n// * The caller has successfully called vfs.Mount.CheckBeginWrite().\n" } ]
Go
Apache License 2.0
google/gvisor
Fix deadlock in gofer direct IO. Fixes several java runtime tests: java/nio/channels/FileChannel/directio/ReadDirect.java java/nio/channels/FileChannel/directio/PreadDirect.java Updates #3576. PiperOrigin-RevId: 328281849
259,907
25.08.2020 09:21:59
25,200
46485f9d473b849e3780af8b757244dde3deacf9
[go-marshal] Support marshalling for structs with names starting with W. Due to how marshallable interface implementation was generated, we could not marshal a struct whose named started with W because there was a naming collision with parameter (w io.Writer) and type (w *StuctName). Used "writer" as parameter name to avoid collision.
[ { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go", "new_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go", "diff": "@@ -400,13 +400,13 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"// WriteTo implements io.WriterTo.WriteTo.\\n\")\ng.recordUsedImport(\"io\")\n- g.emit(\"func (%s *%s) WriteTo(w io.Writer) (int64, error) {\\n\", g.r, g.typeName())\n+ g.emit(\"func (%s *%s) WriteTo(writer io.Writer) (int64, error) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\nfallback := func() {\ng.emit(\"// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\\n\", g.typeName())\ng.emit(\"buf := make([]byte, %s.SizeBytes())\\n\", g.r)\ng.emit(\"%s.MarshalBytes(buf)\\n\", g.r)\n- g.emit(\"length, err := w.Write(buf)\\n\")\n+ g.emit(\"length, err := writer.Write(buf)\\n\")\ng.emit(\"return int64(length), err\\n\")\n}\nif thisPacked {\n@@ -421,7 +421,7 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\n// Fast serialization.\ng.emitCastToByteSlice(g.r, \"buf\", fmt.Sprintf(\"%s.SizeBytes()\", g.r))\n- g.emit(\"length, err := w.Write(buf)\\n\")\n+ g.emit(\"length, err := writer.Write(buf)\\n\")\ng.emitKeepAlive(g.r)\ng.emit(\"return int64(length), err\\n\")\n} else {\n" } ]
Go
Apache License 2.0
google/gvisor
[go-marshal] Support marshalling for structs with names starting with W. Due to how marshallable interface implementation was generated, we could not marshal a struct whose named started with W because there was a naming collision with parameter (w io.Writer) and type (w *StuctName). Used "writer" as parameter name to avoid collision. PiperOrigin-RevId: 328343930
259,858
25.08.2020 12:16:31
25,200
b0c53f8475d14606ef82aeddfb2f742269c1b5b7
Add nogo support to go_binary and go_test targets. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/cpuid/cpuid_parse_x86_test.go", "new_path": "pkg/cpuid/cpuid_parse_x86_test.go", "diff": "@@ -32,27 +32,27 @@ func kernelVersion() (int, int, error) {\nreturn 0, 0, err\n}\n- var r string\n+ var sb strings.Builder\nfor _, b := range u.Release {\nif b == 0 {\nbreak\n}\n- r += string(b)\n+ sb.WriteByte(byte(b))\n}\n- s := strings.Split(r, \".\")\n+ s := strings.Split(sb.String(), \".\")\nif len(s) < 2 {\n- return 0, 0, fmt.Errorf(\"kernel release missing major and minor component: %s\", r)\n+ return 0, 0, fmt.Errorf(\"kernel release missing major and minor component: %s\", sb.String())\n}\nmajor, err := strconv.Atoi(s[0])\nif err != nil {\n- return 0, 0, fmt.Errorf(\"error parsing major version %q in %q: %v\", s[0], r, err)\n+ return 0, 0, fmt.Errorf(\"error parsing major version %q in %q: %w\", s[0], sb.String(), err)\n}\nminor, err := strconv.Atoi(s[1])\nif err != nil {\n- return 0, 0, fmt.Errorf(\"error parsing minor version %q in %q: %v\", s[1], r, err)\n+ return 0, 0, fmt.Errorf(\"error parsing minor version %q in %q: %w\", s[1], sb.String(), err)\n}\nreturn major, minor, nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/seccomp/BUILD", "new_path": "pkg/seccomp/BUILD", "diff": "@@ -10,6 +10,7 @@ go_binary(\n\"seccomp_test_victim_amd64.go\",\n\"seccomp_test_victim_arm64.go\",\n],\n+ nogo = False,\ndeps = [\":seccomp\"],\n)\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/database/redis_test.go", "new_path": "test/benchmarks/database/redis_test.go", "diff": "@@ -84,12 +84,12 @@ func BenchmarkRedis(b *testing.B) {\nip, err := serverMachine.IPAddress()\nif err != nil {\n- b.Fatal(\"failed to get IP from server: %v\", err)\n+ b.Fatalf(\"failed to get IP from server: %v\", err)\n}\nserverPort, err := server.FindPort(ctx, port)\nif err != nil {\n- b.Fatal(\"failed to get IP from server: %v\", err)\n+ b.Fatalf(\"failed to get IP from server: %v\", err)\n}\nif err = harness.WaitUntilServing(ctx, clientMachine, ip, serverPort); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/fs/bazel_test.go", "new_path": "test/benchmarks/fs/bazel_test.go", "diff": "@@ -73,7 +73,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\nif bm.tmpfs {\nif out, err := container.Exec(ctx, dockerutil.ExecOpts{},\n\"cp\", \"-r\", workdir, \"/tmp/.\"); err != nil {\n- b.Fatal(\"failed to copy directory: %v %s\", err, out)\n+ b.Fatalf(\"failed to copy directory: %v (%s)\", err, out)\n}\nworkdir = \"/tmp\" + workdir\n}\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/network/node_test.go", "new_path": "test/benchmarks/network/node_test.go", "diff": "@@ -48,14 +48,14 @@ func runNode(b *testing.B, hey *tools.Hey) {\n// The machine to hold Redis and the Node Server.\nserverMachine, err := h.GetMachine()\nif err != nil {\n- b.Fatal(\"failed to get machine with: %v\", err)\n+ b.Fatalf(\"failed to get machine with: %v\", err)\n}\ndefer serverMachine.CleanUp()\n// The machine to run 'hey'.\nclientMachine, err := h.GetMachine()\nif err != nil {\n- b.Fatal(\"failed to get machine with: %v\", err)\n+ b.Fatalf(\"failed to get machine with: %v\", err)\n}\ndefer clientMachine.CleanUp()\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/network/ruby_test.go", "new_path": "test/benchmarks/network/ruby_test.go", "diff": "@@ -47,14 +47,14 @@ func runRuby(b *testing.B, hey *tools.Hey) {\n// The machine to hold Redis and the Ruby Server.\nserverMachine, err := h.GetMachine()\nif err != nil {\n- b.Fatal(\"failed to get machine with: %v\", err)\n+ b.Fatalf(\"failed to get machine with: %v\", err)\n}\ndefer serverMachine.CleanUp()\n// The machine to run 'hey'.\nclientMachine, err := h.GetMachine()\nif err != nil {\n- b.Fatal(\"failed to get machine with: %v\", err)\n+ b.Fatalf(\"failed to get machine with: %v\", err)\n}\ndefer clientMachine.CleanUp()\nctx := context.Background()\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/defs.bzl", "new_path": "test/packetimpact/runner/defs.bzl", "diff": "@@ -125,6 +125,7 @@ def packetimpact_go_test(name, size = \"small\", pure = True, expect_native_failur\nname = testbench_binary,\nsize = size,\npure = pure,\n+ nogo = False, # FIXME(gvisor.dev/issue/3374): Not working with all build systems.\ntags = [\n\"local\",\n\"manual\",\n" }, { "change_type": "MODIFY", "old_path": "test/root/crictl_test.go", "new_path": "test/root/crictl_test.go", "diff": "@@ -418,7 +418,7 @@ func setup(t *testing.T, version string) (*criutil.Crictl, func(), error) {\n// care about the docker runtime name.\nconfig = v2Template\ndefault:\n- t.Fatalf(\"unknown version: %d\", version)\n+ t.Fatalf(\"unknown version: %s\", version)\n}\nt.Logf(\"Using config: %s\", config)\n" }, { "change_type": "MODIFY", "old_path": "test/runtimes/proctor/BUILD", "new_path": "test/runtimes/proctor/BUILD", "diff": "@@ -21,6 +21,7 @@ go_test(\nsize = \"small\",\nsrcs = [\"proctor_test.go\"],\nlibrary = \":proctor\",\n+ nogo = False, # FIXME(gvisor.dev/issue/3374): Not working with all build systems.\npure = True,\ndeps = [\n\"//pkg/test/testutil\",\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/defs.bzl", "new_path": "tools/bazeldefs/defs.bzl", "diff": "@@ -87,13 +87,14 @@ def cc_binary(name, static = False, **kwargs):\n**kwargs\n)\n-def go_binary(name, static = False, pure = False, **kwargs):\n+def go_binary(name, static = False, pure = False, x_defs = None, **kwargs):\n\"\"\"Build a go binary.\nArgs:\nname: name of the target.\nstatic: build a static binary.\npure: build without cgo.\n+ x_defs: additional definitions.\n**kwargs: rest of the arguments are passed to _go_binary.\n\"\"\"\nif static:\n@@ -102,6 +103,7 @@ def go_binary(name, static = False, pure = False, **kwargs):\nkwargs[\"pure\"] = \"on\"\n_go_binary(\nname = name,\n+ x_defs = x_defs,\n**kwargs\n)\n@@ -151,6 +153,11 @@ def go_rule(rule, implementation, **kwargs):\ntoolchains = kwargs.get(\"toolchains\", []) + [\"@io_bazel_rules_go//go:toolchain\"]\nreturn rule(implementation, attrs = attrs, toolchains = toolchains, **kwargs)\n+def go_test_library(target):\n+ if hasattr(target.attr, \"embed\") and len(target.attr.embed) > 0:\n+ return target.attr.embed[0]\n+ return None\n+\ndef go_context(ctx):\ngo_ctx = _go_context(ctx)\nreturn struct(\n" }, { "change_type": "MODIFY", "old_path": "tools/defs.bzl", "new_path": "tools/defs.bzl", "diff": "@@ -27,7 +27,6 @@ gbenchmark = _gbenchmark\ngazelle = _gazelle\ngo_embed_data = _go_embed_data\ngo_path = _go_path\n-go_test = _go_test\ngtest = _gtest\ngrpcpp = _grpcpp\nloopback = _loopback\n@@ -45,17 +44,35 @@ vdso_linker_option = _vdso_linker_option\ndefault_platform = _default_platform\nplatforms = _platforms\n-def go_binary(name, **kwargs):\n+def go_binary(name, nogo = True, pure = False, static = False, x_defs = None, **kwargs):\n\"\"\"Wraps the standard go_binary.\nArgs:\nname: the rule name.\n+ nogo: enable nogo analysis.\n+ pure: build a pure Go (no CGo) binary.\n+ static: build a static binary.\n+ x_defs: additional linker definitions.\n**kwargs: standard go_binary arguments.\n\"\"\"\n_go_binary(\nname = name,\n+ pure = pure,\n+ static = static,\n+ x_defs = x_defs,\n**kwargs\n)\n+ if nogo:\n+ # Note that the nogo rule applies only for go_library and go_test\n+ # targets, therefore we construct a library from the binary sources.\n+ _go_library(\n+ name = name + \"_nogo_library\",\n+ **kwargs\n+ )\n+ nogo_test(\n+ name = name + \"_nogo\",\n+ deps = [\":\" + name + \"_nogo_library\"],\n+ )\ndef calculate_sets(srcs):\n\"\"\"Calculates special Go sets for templates.\n@@ -119,6 +136,7 @@ def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = F\nstateify: whether statify is enabled (default: true).\nmarshal: whether marshal is enabled (default: false).\nmarshal_debug: whether the gomarshal tools emits debugging output (default: false).\n+ nogo: enable nogo analysis.\n**kwargs: standard go_library arguments.\n\"\"\"\nall_srcs = srcs\n@@ -202,6 +220,24 @@ def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = F\n**kwargs\n)\n+def go_test(name, nogo = True, **kwargs):\n+ \"\"\"Wraps the standard go_test.\n+\n+ Args:\n+ name: the rule name.\n+ nogo: enable nogo analysis.\n+ **kwargs: standard go_test arguments.\n+ \"\"\"\n+ _go_test(\n+ name = name,\n+ **kwargs\n+ )\n+ if nogo:\n+ nogo_test(\n+ name = name + \"_nogo\",\n+ deps = [\":\" + name],\n+ )\n+\ndef proto_library(name, srcs, deps = None, has_services = 0, **kwargs):\n\"\"\"Wraps the standard proto_library.\n" }, { "change_type": "MODIFY", "old_path": "tools/issue_reviver/BUILD", "new_path": "tools/issue_reviver/BUILD", "diff": "@@ -5,6 +5,7 @@ package(licenses = [\"notice\"])\ngo_binary(\nname = \"issue_reviver\",\nsrcs = [\"main.go\"],\n+ nogo = False,\ndeps = [\n\"//tools/issue_reviver/github\",\n\"//tools/issue_reviver/reviver\",\n" }, { "change_type": "MODIFY", "old_path": "tools/issue_reviver/github/BUILD", "new_path": "tools/issue_reviver/github/BUILD", "diff": "@@ -21,4 +21,5 @@ go_test(\nsize = \"small\",\nsrcs = [\"github_test.go\"],\nlibrary = \":github\",\n+ nogo = False,\n)\n" }, { "change_type": "MODIFY", "old_path": "tools/nogo/check/BUILD", "new_path": "tools/nogo/check/BUILD", "diff": "@@ -7,6 +7,7 @@ package(licenses = [\"notice\"])\ngo_binary(\nname = \"check\",\nsrcs = [\"main.go\"],\n+ nogo = False,\nvisibility = [\"//visibility:public\"],\ndeps = [\"//tools/nogo\"],\n)\n" }, { "change_type": "MODIFY", "old_path": "tools/nogo/defs.bzl", "new_path": "tools/nogo/defs.bzl", "diff": "\"\"\"Nogo rules.\"\"\"\n-load(\"//tools/bazeldefs:defs.bzl\", \"go_context\", \"go_importpath\", \"go_rule\")\n+load(\"//tools/bazeldefs:defs.bzl\", \"go_context\", \"go_importpath\", \"go_rule\", \"go_test_library\")\n# NogoInfo is the serialized set of package facts for a nogo analysis.\n#\n@@ -8,10 +8,13 @@ load(\"//tools/bazeldefs:defs.bzl\", \"go_context\", \"go_importpath\", \"go_rule\")\n# with the source files as input. Note however, that the individual nogo rules\n# are simply stubs that enter into the shadow dependency tree (the \"aspect\").\nNogoInfo = provider(\n+ \"information for nogo analysis\",\nfields = {\n\"facts\": \"serialized package facts\",\n\"importpath\": \"package import path\",\n\"binaries\": \"package binary files\",\n+ \"srcs\": \"original source files (for go_test support)\",\n+ \"deps\": \"original deps (for go_test support)\",\n},\n)\n@@ -21,16 +24,29 @@ def _nogo_aspect_impl(target, ctx):\n# All work is done in the shadow properties for go rules. For a proto\n# library, we simply skip the analysis portion but still need to return a\n# valid NogoInfo to reference the generated binary.\n- if ctx.rule.kind == \"go_library\":\n+ if ctx.rule.kind in (\"go_library\", \"go_binary\", \"go_test\", \"go_tool_library\"):\nsrcs = ctx.rule.files.srcs\n- elif ctx.rule.kind == \"go_proto_library\" or ctx.rule.kind == \"go_wrap_cc\":\n+ deps = ctx.rule.attr.deps\n+ elif ctx.rule.kind in (\"go_proto_library\", \"go_wrap_cc\"):\nsrcs = []\n+ deps = ctx.rule.attr.deps\nelse:\nreturn [NogoInfo()]\n- go_ctx = go_context(ctx)\n+ # If we're using the \"library\" attribute, then we need to aggregate the\n+ # original library sources and dependencies into this target to perform\n+ # proper type analysis.\n+ if ctx.rule.kind == \"go_test\":\n+ library = go_test_library(ctx.rule)\n+ if library != None:\n+ info = library[NogoInfo]\n+ if hasattr(info, \"srcs\"):\n+ srcs = srcs + info.srcs\n+ if hasattr(info, \"deps\"):\n+ deps = deps + info.deps\n# Construct the Go environment from the go_ctx.env dictionary.\n+ go_ctx = go_context(ctx)\nenv_prefix = \" \".join([\"%s=%s\" % (key, value) for (key, value) in go_ctx.env.items()])\n# Start with all target files and srcs as input.\n@@ -41,6 +57,13 @@ def _nogo_aspect_impl(target, ctx):\n# to cleanly allow us redirect stdout to the actual output file. Perhaps\n# I'm missing something here, but the intermediate script does work.\nbinaries = target.files.to_list()\n+ objfiles = [f for f in binaries if f.path.endswith(\".a\")]\n+ if len(objfiles) > 0:\n+ # Prefer the .a files for go_library targets.\n+ target_objfile = objfiles[0]\n+ else:\n+ # Use the raw binary for go_binary and go_test targets.\n+ target_objfile = binaries[0]\ndisasm_file = ctx.actions.declare_file(target.label.name + \".out\")\ndumper = ctx.actions.declare_file(\"%s-dumper\" % ctx.label.name)\nctx.actions.write(dumper, \"\\n\".join([\n@@ -48,12 +71,12 @@ def _nogo_aspect_impl(target, ctx):\n\"%s %s tool objdump %s > %s\\n\" % (\nenv_prefix,\ngo_ctx.go.path,\n- [f.path for f in binaries if f.path.endswith(\".a\")][0],\n+ target_objfile.path,\ndisasm_file.path,\n),\n]), is_executable = True)\nctx.actions.run(\n- inputs = binaries,\n+ inputs = [target_objfile],\noutputs = [disasm_file],\ntools = go_ctx.runfiles,\nmnemonic = \"GoObjdump\",\n@@ -63,6 +86,14 @@ def _nogo_aspect_impl(target, ctx):\ninputs.append(disasm_file)\n# Extract the importpath for this package.\n+ if ctx.rule.kind == \"go_test\":\n+ # If this is a test, then it will not be imported by anything else.\n+ # We can safely set the importapth to just \"test\". Note that this\n+ # is necessary if the library also imports the core library (in\n+ # addition to including the sources directly), which happens in\n+ # some complex cases (seccomp_victim).\n+ importpath = \"test\"\n+ else:\nimportpath = go_importpath(target)\n# The nogo tool requires a configfile serialized in JSON format to do its\n@@ -84,7 +115,7 @@ def _nogo_aspect_impl(target, ctx):\n)\n# Collect all info from shadow dependencies.\n- for dep in ctx.rule.attr.deps:\n+ for dep in deps:\n# There will be no file attribute set for all transitive dependencies\n# that are not go_library or go_binary rules, such as a proto rules.\n# This is handled by the ctx.rule.kind check above.\n@@ -126,12 +157,18 @@ def _nogo_aspect_impl(target, ctx):\nfacts = facts,\nimportpath = importpath,\nbinaries = binaries,\n+ srcs = srcs,\n+ deps = deps,\n)]\nnogo_aspect = go_rule(\naspect,\nimplementation = _nogo_aspect_impl,\n- attr_aspects = [\"deps\"],\n+ attr_aspects = [\n+ \"deps\",\n+ \"library\",\n+ \"embed\",\n+ ],\nattrs = {\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo/check:check\",\n@@ -171,6 +208,10 @@ _nogo_test = rule(\ntest = True,\n)\n-def nogo_test(**kwargs):\n+def nogo_test(name, **kwargs):\ntags = kwargs.pop(\"tags\", []) + [\"nogo\"]\n- _nogo_test(tags = tags, **kwargs)\n+ _nogo_test(\n+ name = name,\n+ tags = tags,\n+ **kwargs\n+ )\n" } ]
Go
Apache License 2.0
google/gvisor
Add nogo support to go_binary and go_test targets. Updates #3374 PiperOrigin-RevId: 328378700
259,858
25.08.2020 12:21:37
25,200
3b2e50f539457556c33ea635b401a4f52e6a5274
Include shim in individual released binaries. The debian rules are also moved to the top-level, since they apply to binaries outside the //runsc directory. Fixes
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -114,7 +114,7 @@ runsc: ## Builds the runsc binary.\n.PHONY: runsc\ndebian: ## Builds the debian packages.\n- @$(call submake,build OPTIONS=\"-c opt\" TARGETS=\"//runsc:runsc-debian\")\n+ @$(call submake,build OPTIONS=\"-c opt\" TARGETS=\"//debian:debian\")\n.PHONY: debian\nsmoke-tests: ## Runs a simple smoke test after build runsc.\n@@ -301,8 +301,10 @@ $(RELEASE_KEY):\nrelease: $(RELEASE_KEY) ## Builds a release.\n@mkdir -p $(RELEASE_ROOT)\n@T=$$(mktemp -d /tmp/release.XXXXXX); \\\n- $(call submake,copy TARGETS=\"runsc\" DESTINATION=$$T) && \\\n- $(call submake,copy TARGETS=\"runsc:runsc-debian\" DESTINATION=$$T) && \\\n+ $(call submake,copy TARGETS=\"//runsc:runsc\" DESTINATION=$$T) && \\\n+ $(call submake,copy TARGETS=\"//shim/v1:gvisor-containerd-shim\" DESTINATION=$$T) && \\\n+ $(call submake,copy TARGETS=\"//shim/v2:containerd-shim-runsc-v1\" DESTINATION=$$T) && \\\n+ $(call submake,copy TARGETS=\"//debian:debian\" DESTINATION=$$T) && \\\nNIGHTLY=$(RELEASE_NIGHTLY) tools/make_release.sh $(RELEASE_KEY) $(RELEASE_ROOT) $$T/*; \\\nrc=$$?; rm -rf $$T; exit $$rc\n.PHONY: release\n" }, { "change_type": "ADD", "old_path": null, "new_path": "debian/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"pkg_deb\", \"pkg_tar\")\n+\n+package(licenses = [\"notice\"])\n+\n+pkg_tar(\n+ name = \"debian-bin\",\n+ srcs = [\n+ \"//runsc\",\n+ \"//shim/v1:gvisor-containerd-shim\",\n+ \"//shim/v2:containerd-shim-runsc-v1\",\n+ ],\n+ mode = \"0755\",\n+ package_dir = \"/usr/bin\",\n+)\n+\n+pkg_tar(\n+ name = \"debian-data\",\n+ extension = \"tar.gz\",\n+ deps = [\n+ \":debian-bin\",\n+ \"//shim:config\",\n+ ],\n+)\n+\n+genrule(\n+ name = \"debian-version\",\n+ # Note that runsc must appear in the srcs parameter and not the tools\n+ # parameter, otherwise it will not be stamped. This is reasonable, as tools\n+ # may be encoded differently in the build graph (cached more aggressively\n+ # because they are assumes to be hermetic).\n+ srcs = [\"//runsc\"],\n+ outs = [\"version.txt\"],\n+ # Note that the little dance here is necessary because files in the $(SRCS)\n+ # attribute are not executable by default, and we can't touch in place.\n+ cmd = \"cp $(location //runsc:runsc) $(@D)/runsc && \\\n+ chmod a+x $(@D)/runsc && \\\n+ $(@D)/runsc -version | grep version | sed 's/^[^0-9]*//' > $@ && \\\n+ rm -f $(@D)/runsc\",\n+ stamp = 1,\n+)\n+\n+pkg_deb(\n+ name = \"debian\",\n+ architecture = \"amd64\",\n+ data = \":debian-data\",\n+ # Note that the description_file will be flatten (all newlines removed),\n+ # and therefore it is kept to a simple one-line description. The expected\n+ # format for debian packages is \"short summary\\nLonger explanation of\n+ # tool.\" and this is impossible with the flattening.\n+ description_file = \"description\",\n+ homepage = \"https://gvisor.dev/\",\n+ maintainer = \"The gVisor Authors <[email protected]>\",\n+ package = \"runsc\",\n+ postinst = \"postinst.sh\",\n+ version_file = \":version.txt\",\n+ visibility = [\n+ \"//visibility:public\",\n+ ],\n+)\n" }, { "change_type": "RENAME", "old_path": "runsc/debian/description", "new_path": "debian/description", "diff": "" }, { "change_type": "RENAME", "old_path": "runsc/debian/postinst.sh", "new_path": "debian/postinst.sh", "diff": "" }, { "change_type": "MODIFY", "old_path": "runsc/BUILD", "new_path": "runsc/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"go_binary\", \"pkg_deb\", \"pkg_tar\")\n+load(\"//tools:defs.bzl\", \"go_binary\")\npackage(licenses = [\"notice\"])\n@@ -61,62 +61,6 @@ go_binary(\n],\n)\n-pkg_tar(\n- name = \"debian-bin\",\n- srcs = [\n- \":runsc\",\n- \"//shim/v1:gvisor-containerd-shim\",\n- \"//shim/v2:containerd-shim-runsc-v1\",\n- ],\n- mode = \"0755\",\n- package_dir = \"/usr/bin\",\n-)\n-\n-pkg_tar(\n- name = \"debian-data\",\n- extension = \"tar.gz\",\n- deps = [\n- \":debian-bin\",\n- \"//shim:config\",\n- ],\n-)\n-\n-genrule(\n- name = \"deb-version\",\n- # Note that runsc must appear in the srcs parameter and not the tools\n- # parameter, otherwise it will not be stamped. This is reasonable, as tools\n- # may be encoded differently in the build graph (cached more aggressively\n- # because they are assumes to be hermetic).\n- srcs = [\":runsc\"],\n- outs = [\"version.txt\"],\n- # Note that the little dance here is necessary because files in the $(SRCS)\n- # attribute are not executable by default, and we can't touch in place.\n- cmd = \"cp $(location :runsc) $(@D)/runsc && \\\n- chmod a+x $(@D)/runsc && \\\n- $(@D)/runsc -version | grep version | sed 's/^[^0-9]*//' > $@ && \\\n- rm -f $(@D)/runsc\",\n- stamp = 1,\n-)\n-\n-pkg_deb(\n- name = \"runsc-debian\",\n- architecture = \"amd64\",\n- data = \":debian-data\",\n- # Note that the description_file will be flatten (all newlines removed),\n- # and therefore it is kept to a simple one-line description. The expected\n- # format for debian packages is \"short summary\\nLonger explanation of\n- # tool.\" and this is impossible with the flattening.\n- description_file = \"debian/description\",\n- homepage = \"https://gvisor.dev/\",\n- maintainer = \"The gVisor Authors <[email protected]>\",\n- package = \"runsc\",\n- postinst = \"debian/postinst.sh\",\n- version_file = \":version.txt\",\n- visibility = [\n- \"//visibility:public\",\n- ],\n-)\n-\nsh_test(\nname = \"version_test\",\nsize = \"small\",\n" }, { "change_type": "MODIFY", "old_path": "shim/BUILD", "new_path": "shim/BUILD", "diff": "@@ -10,6 +10,6 @@ pkg_tar(\nmode = \"0644\",\npackage_dir = \"/etc/containerd\",\nvisibility = [\n- \"//runsc:__pkg__\",\n+ \"//visibility:public\",\n],\n)\n" } ]
Go
Apache License 2.0
google/gvisor
Include shim in individual released binaries. The debian rules are also moved to the top-level, since they apply to binaries outside the //runsc directory. Fixes #3665 PiperOrigin-RevId: 328379709
259,891
25.08.2020 13:41:23
25,200
3cba0a41d9839d5c93dd2d7deb27de5254412e96
remove iptables sockopt special cases iptables sockopts were kludged into an unnecessary check, this properly relegates them to the {get,set}SockOptIP functions.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -983,53 +983,12 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr us\nreturn &val, nil\n}\n- if s.skType == linux.SOCK_RAW && level == linux.IPPROTO_IP {\n- switch name {\n- case linux.IPT_SO_GET_INFO:\n- if outLen < linux.SizeOfIPTGetinfo {\n- return nil, syserr.ErrInvalidArgument\n- }\n- if s.family != linux.AF_INET {\n- return nil, syserr.ErrInvalidArgument\n- }\n-\n- stack := inet.StackFromContext(t)\n- if stack == nil {\n- return nil, syserr.ErrNoDevice\n- }\n- info, err := netfilter.GetInfo(t, stack.(*Stack).Stack, outPtr)\n- if err != nil {\n- return nil, err\n- }\n- return &info, nil\n-\n- case linux.IPT_SO_GET_ENTRIES:\n- if outLen < linux.SizeOfIPTGetEntries {\n- return nil, syserr.ErrInvalidArgument\n- }\n- if s.family != linux.AF_INET {\n- return nil, syserr.ErrInvalidArgument\n- }\n-\n- stack := inet.StackFromContext(t)\n- if stack == nil {\n- return nil, syserr.ErrNoDevice\n- }\n- entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)\n- if err != nil {\n- return nil, err\n- }\n- return &entries, nil\n-\n- }\n- }\n-\n- return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outLen)\n+ return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outPtr, outLen)\n}\n// GetSockOpt can be used to implement the linux syscall getsockopt(2) for\n// sockets backed by a commonEndpoint.\n-func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name, outLen int) (marshal.Marshallable, *syserr.Error) {\n+func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\nswitch level {\ncase linux.SOL_SOCKET:\nreturn getSockOptSocket(t, s, ep, family, skType, name, outLen)\n@@ -1041,7 +1000,7 @@ func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family in\nreturn getSockOptIPv6(t, ep, name, outLen)\ncase linux.SOL_IP:\n- return getSockOptIP(t, ep, name, outLen, family)\n+ return getSockOptIP(t, s, ep, name, outPtr, outLen, family)\ncase linux.SOL_UDP,\nlinux.SOL_ICMPV6,\n@@ -1560,7 +1519,7 @@ func getSockOptIPv6(t *kernel.Task, ep commonEndpoint, name, outLen int) (marsha\n}\n// getSockOptIP implements GetSockOpt when level is SOL_IP.\n-func getSockOptIP(t *kernel.Task, ep commonEndpoint, name, outLen int, family int) (marshal.Marshallable, *syserr.Error) {\n+func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr usermem.Addr, outLen int, family int) (marshal.Marshallable, *syserr.Error) {\nswitch name {\ncase linux.IP_TTL:\nif outLen < sizeOfInt32 {\n@@ -1676,6 +1635,46 @@ func getSockOptIP(t *kernel.Task, ep commonEndpoint, name, outLen int, family in\na, _ := ConvertAddress(linux.AF_INET, tcpip.FullAddress(v))\nreturn a.(*linux.SockAddrInet), nil\n+ case linux.IPT_SO_GET_INFO:\n+ if outLen < linux.SizeOfIPTGetinfo {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ // Only valid for raw IPv4 sockets.\n+ if family, skType, _ := s.Type(); family != linux.AF_INET || skType != linux.SOCK_RAW {\n+ return nil, syserr.ErrProtocolNotAvailable\n+ }\n+\n+ stack := inet.StackFromContext(t)\n+ if stack == nil {\n+ return nil, syserr.ErrNoDevice\n+ }\n+ info, err := netfilter.GetInfo(t, stack.(*Stack).Stack, outPtr)\n+ if err != nil {\n+ return nil, err\n+ }\n+ return &info, nil\n+\n+ case linux.IPT_SO_GET_ENTRIES:\n+ if outLen < linux.SizeOfIPTGetEntries {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ // Only valid for raw IPv4 sockets.\n+ if family, skType, _ := s.Type(); family != linux.AF_INET || skType != linux.SOCK_RAW {\n+ return nil, syserr.ErrProtocolNotAvailable\n+ }\n+\n+ stack := inet.StackFromContext(t)\n+ if stack == nil {\n+ return nil, syserr.ErrNoDevice\n+ }\n+ entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)\n+ if err != nil {\n+ return nil, err\n+ }\n+ return &entries, nil\n+\ndefault:\nemitUnimplementedEventIP(t, name)\n}\n@@ -1709,29 +1708,6 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa\nreturn nil\n}\n- if s.skType == linux.SOCK_RAW && level == linux.SOL_IP {\n- switch name {\n- case linux.IPT_SO_SET_REPLACE:\n- if len(optVal) < linux.SizeOfIPTReplace {\n- return syserr.ErrInvalidArgument\n- }\n- if s.family != linux.AF_INET {\n- return syserr.ErrInvalidArgument\n- }\n-\n- stack := inet.StackFromContext(t)\n- if stack == nil {\n- return syserr.ErrNoDevice\n- }\n- // Stack must be a netstack stack.\n- return netfilter.SetEntries(stack.(*Stack).Stack, optVal)\n-\n- case linux.IPT_SO_SET_ADD_COUNTERS:\n- // TODO(gvisor.dev/issue/170): Counter support.\n- return nil\n- }\n- }\n-\nreturn SetSockOpt(t, s, s.Endpoint, level, name, optVal)\n}\n@@ -1749,7 +1725,7 @@ func SetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, level int\nreturn setSockOptIPv6(t, ep, name, optVal)\ncase linux.SOL_IP:\n- return setSockOptIP(t, ep, name, optVal)\n+ return setSockOptIP(t, s, ep, name, optVal)\ncase linux.SOL_UDP,\nlinux.SOL_ICMPV6,\n@@ -2160,7 +2136,7 @@ func parseIntOrChar(buf []byte) (int32, *syserr.Error) {\n}\n// setSockOptIP implements SetSockOpt when level is SOL_IP.\n-func setSockOptIP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\n+func setSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\nswitch name {\ncase linux.IP_MULTICAST_TTL:\nv, err := parseIntOrChar(optVal)\n@@ -2280,6 +2256,27 @@ func setSockOptIP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *s\n}\nreturn syserr.TranslateNetstackError(ep.SetSockOptBool(tcpip.IPHdrIncludedOption, v != 0))\n+ case linux.IPT_SO_SET_REPLACE:\n+ if len(optVal) < linux.SizeOfIPTReplace {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ // Only valid for raw IPv4 sockets.\n+ if family, skType, _ := s.Type(); family != linux.AF_INET || skType != linux.SOCK_RAW {\n+ return syserr.ErrProtocolNotAvailable\n+ }\n+\n+ stack := inet.StackFromContext(t)\n+ if stack == nil {\n+ return syserr.ErrNoDevice\n+ }\n+ // Stack must be a netstack stack.\n+ return netfilter.SetEntries(stack.(*Stack).Stack, optVal)\n+\n+ case linux.IPT_SO_SET_ADD_COUNTERS:\n+ // TODO(gvisor.dev/issue/170): Counter support.\n+ return nil\n+\ncase linux.IP_ADD_SOURCE_MEMBERSHIP,\nlinux.IP_BIND_ADDRESS_NO_PORT,\nlinux.IP_BLOCK_SOURCE,\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack_vfs2.go", "new_path": "pkg/sentry/socket/netstack/netstack_vfs2.go", "diff": "@@ -21,10 +21,8 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/sockfs\"\n- \"gvisor.dev/gvisor/pkg/sentry/inet\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/socket\"\n- \"gvisor.dev/gvisor/pkg/sentry/socket/netfilter\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -233,48 +231,7 @@ func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.\nreturn &val, nil\n}\n- if s.skType == linux.SOCK_RAW && level == linux.IPPROTO_IP {\n- switch name {\n- case linux.IPT_SO_GET_INFO:\n- if outLen < linux.SizeOfIPTGetinfo {\n- return nil, syserr.ErrInvalidArgument\n- }\n- if s.family != linux.AF_INET {\n- return nil, syserr.ErrInvalidArgument\n- }\n-\n- stack := inet.StackFromContext(t)\n- if stack == nil {\n- return nil, syserr.ErrNoDevice\n- }\n- info, err := netfilter.GetInfo(t, stack.(*Stack).Stack, outPtr)\n- if err != nil {\n- return nil, err\n- }\n- return &info, nil\n-\n- case linux.IPT_SO_GET_ENTRIES:\n- if outLen < linux.SizeOfIPTGetEntries {\n- return nil, syserr.ErrInvalidArgument\n- }\n- if s.family != linux.AF_INET {\n- return nil, syserr.ErrInvalidArgument\n- }\n-\n- stack := inet.StackFromContext(t)\n- if stack == nil {\n- return nil, syserr.ErrNoDevice\n- }\n- entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)\n- if err != nil {\n- return nil, err\n- }\n- return &entries, nil\n-\n- }\n- }\n-\n- return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outLen)\n+ return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outPtr, outLen)\n}\n// SetSockOpt implements the linux syscall setsockopt(2) for sockets backed by\n@@ -304,29 +261,6 @@ func (s *SocketVFS2) SetSockOpt(t *kernel.Task, level int, name int, optVal []by\nreturn nil\n}\n- if s.skType == linux.SOCK_RAW && level == linux.SOL_IP {\n- switch name {\n- case linux.IPT_SO_SET_REPLACE:\n- if len(optVal) < linux.SizeOfIPTReplace {\n- return syserr.ErrInvalidArgument\n- }\n- if s.family != linux.AF_INET {\n- return syserr.ErrInvalidArgument\n- }\n-\n- stack := inet.StackFromContext(t)\n- if stack == nil {\n- return syserr.ErrNoDevice\n- }\n- // Stack must be a netstack stack.\n- return netfilter.SetEntries(stack.(*Stack).Stack, optVal)\n-\n- case linux.IPT_SO_SET_ADD_COUNTERS:\n- // TODO(gvisor.dev/issue/170): Counter support.\n- return nil\n- }\n- }\n-\nreturn SetSockOpt(t, s, s.Endpoint, level, name, optVal)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -194,7 +194,7 @@ func (s *SocketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO,\n// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by\n// a transport.Endpoint.\nfunc (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\n- return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outLen)\n+ return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen)\n}\n// Listen implements the linux syscall listen(2) for sockets backed by\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix_vfs2.go", "new_path": "pkg/sentry/socket/unix/unix_vfs2.go", "diff": "@@ -91,7 +91,7 @@ func NewFileDescription(ep transport.Endpoint, stype linux.SockType, flags uint3\n// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by\n// a transport.Endpoint.\nfunc (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\n- return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outLen)\n+ return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen)\n}\n// blockingAccept implements a blocking version of accept(2), that is, if no\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/iptables.cc", "new_path": "test/syscalls/linux/iptables.cc", "diff": "@@ -67,12 +67,43 @@ TEST(IPTablesBasic, FailSockoptNonRaw) {\nstruct ipt_getinfo info = {};\nsnprintf(info.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\nsocklen_t info_size = sizeof(info);\n- EXPECT_THAT(getsockopt(sock, IPPROTO_IP, IPT_SO_GET_INFO, &info, &info_size),\n+ EXPECT_THAT(getsockopt(sock, SOL_IP, IPT_SO_GET_INFO, &info, &info_size),\nSyscallFailsWithErrno(ENOPROTOOPT));\nASSERT_THAT(close(sock), SyscallSucceeds());\n}\n+TEST(IPTablesBasic, GetInfoErrorPrecedence) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int sock;\n+ ASSERT_THAT(sock = socket(AF_INET, SOCK_DGRAM, 0), SyscallSucceeds());\n+\n+ // When using the wrong type of socket and a too-short optlen, we should get\n+ // EINVAL.\n+ struct ipt_getinfo info = {};\n+ snprintf(info.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\n+ socklen_t info_size = sizeof(info) - 1;\n+ ASSERT_THAT(getsockopt(sock, SOL_IP, IPT_SO_GET_INFO, &info, &info_size),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST(IPTablesBasic, GetEntriesErrorPrecedence) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int sock;\n+ ASSERT_THAT(sock = socket(AF_INET, SOCK_DGRAM, 0), SyscallSucceeds());\n+\n+ // When using the wrong type of socket and a too-short optlen, we should get\n+ // EINVAL.\n+ struct ipt_get_entries entries = {};\n+ socklen_t entries_size = sizeof(struct ipt_get_entries) - 1;\n+ snprintf(entries.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\n+ ASSERT_THAT(\n+ getsockopt(sock, SOL_IP, IPT_SO_GET_ENTRIES, &entries, &entries_size),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n// Fixture for iptables tests.\nclass IPTablesTest : public ::testing::Test {\nprotected:\n@@ -112,7 +143,7 @@ TEST_F(IPTablesTest, InitialState) {\nstruct ipt_getinfo info = {};\nsnprintf(info.name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\nsocklen_t info_size = sizeof(info);\n- ASSERT_THAT(getsockopt(s_, IPPROTO_IP, IPT_SO_GET_INFO, &info, &info_size),\n+ ASSERT_THAT(getsockopt(s_, SOL_IP, IPT_SO_GET_INFO, &info, &info_size),\nSyscallSucceeds());\n// The nat table supports PREROUTING, and OUTPUT.\n@@ -148,7 +179,7 @@ TEST_F(IPTablesTest, InitialState) {\nsnprintf(entries->name, XT_TABLE_MAXNAMELEN, \"%s\", kNatTablename);\nentries->size = info.size;\nASSERT_THAT(\n- getsockopt(s_, IPPROTO_IP, IPT_SO_GET_ENTRIES, entries, &entries_size),\n+ getsockopt(s_, SOL_IP, IPT_SO_GET_ENTRIES, entries, &entries_size),\nSyscallSucceeds());\n// Verify the name and size.\n" } ]
Go
Apache License 2.0
google/gvisor
remove iptables sockopt special cases iptables sockopts were kludged into an unnecessary check, this properly relegates them to the {get,set}SockOptIP functions. PiperOrigin-RevId: 328395135
259,858
25.08.2020 14:28:03
25,200
e65089029922b8a56bde8a82e4add1556204beaf
Provide --secret-keyring parameter (for newer gpg).
[ { "change_type": "MODIFY", "old_path": "tools/make_apt.sh", "new_path": "tools/make_apt.sh", "diff": "@@ -64,8 +64,8 @@ trap cleanup EXIT\n# is not found. This isn't actually a failure for us, because we don't require\n# the public (this may be stored separately). The second import will succeed\n# because, in reality, the first import succeeded and it's a no-op.\n-gpg --no-default-keyring --keyring \"${keyring}\" --import \"${private_key}\" || \\\n- gpg --no-default-keyring --keyring \"${keyring}\" --import \"${private_key}\"\n+gpg --no-default-keyring --keyring \"${keyring}\" --secret-keyring \"${keyring}\" --import \"${private_key}\" || \\\n+ gpg --no-default-keyring --keyring \"${keyring}\" --secret-keyring \"${keyring}\" --import \"${private_key}\"\n# Copy the packages into the root.\nfor pkg in \"$@\"; do\n" } ]
Go
Apache License 2.0
google/gvisor
Provide --secret-keyring parameter (for newer gpg). PiperOrigin-RevId: 328403914
259,885
25.08.2020 14:59:59
25,200
bee07a2d68dedd49fa5790803fb2f7076323a616
Link to PHP bug for disabled disk space tests.
[ { "change_type": "MODIFY", "old_path": "test/runtimes/exclude_php7.3.6.csv", "new_path": "test/runtimes/exclude_php7.3.6.csv", "diff": "@@ -13,13 +13,13 @@ ext/session/tests/session_set_save_handler_class_018.phpt,,\next/session/tests/session_set_save_handler_iface_003.phpt,,\next/session/tests/session_set_save_handler_sid_001.phpt,,\next/session/tests/session_set_save_handler_variation4.phpt,,\n-ext/standard/tests/file/disk.phpt,,Test bug\n-ext/standard/tests/file/disk_free_space_basic.phpt,,Test bug\n-ext/standard/tests/file/disk_free_space_error.phpt,,Test bug\n-ext/standard/tests/file/disk_free_space_variation.phpt,,Test bug\n-ext/standard/tests/file/disk_total_space_basic.phpt,,Test bug\n-ext/standard/tests/file/disk_total_space_error.phpt,,Test bug\n-ext/standard/tests/file/disk_total_space_variation.phpt,,Test bug\n+ext/standard/tests/file/disk.phpt,https://bugs.php.net/bug.php?id=80018,\n+ext/standard/tests/file/disk_free_space_basic.phpt,https://bugs.php.net/bug.php?id=80018,\n+ext/standard/tests/file/disk_free_space_error.phpt,https://bugs.php.net/bug.php?id=80018,\n+ext/standard/tests/file/disk_free_space_variation.phpt,https://bugs.php.net/bug.php?id=80018,\n+ext/standard/tests/file/disk_total_space_basic.phpt,https://bugs.php.net/bug.php?id=80018,\n+ext/standard/tests/file/disk_total_space_error.phpt,https://bugs.php.net/bug.php?id=80018,\n+ext/standard/tests/file/disk_total_space_variation.phpt,https://bugs.php.net/bug.php?id=80018,\next/standard/tests/file/fopen_variation19.phpt,b/162894964,\next/standard/tests/file/lstat_stat_variation14.phpt,,Flaky\next/standard/tests/file/php_fd_wrapper_01.phpt,,\n" } ]
Go
Apache License 2.0
google/gvisor
Link to PHP bug for disabled disk space tests. PiperOrigin-RevId: 328410399
259,907
25.08.2020 15:26:54
25,200
430487c9e7cb4425def8605c6730aa4a168b000d
[go-marshal] Enable auto-marshalling for host tty.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/tty.go", "new_path": "pkg/abi/linux/tty.go", "diff": "@@ -23,6 +23,8 @@ const (\n)\n// Winsize is struct winsize, defined in uapi/asm-generic/termios.h.\n+//\n+// +marshal\ntype Winsize struct {\nRow uint16\nCol uint16\n@@ -31,6 +33,8 @@ type Winsize struct {\n}\n// Termios is struct termios, defined in uapi/asm-generic/termbits.h.\n+//\n+// +marshal\ntype Termios struct {\nInputFlags uint32\nOutputFlags uint32\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/BUILD", "new_path": "pkg/sentry/fs/host/BUILD", "diff": "@@ -55,6 +55,7 @@ go_library(\n\"//pkg/unet\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"//tools/go_marshal/primitive\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/tty.go", "new_path": "pkg/sentry/fs/host/tty.go", "diff": "@@ -24,6 +24,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// LINT.IfChange\n@@ -123,6 +124,11 @@ func (t *TTYFileOperations) Release(ctx context.Context) {\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ task := kernel.TaskFromContext(ctx)\n+ if task == nil {\n+ return 0, syserror.ENOTTY\n+ }\n+\n// Ignore arg[0]. This is the real FD:\nfd := t.fileOperations.iops.fileState.FD()\nioctl := args[1].Uint64()\n@@ -132,9 +138,7 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO\nif err != nil {\nreturn 0, err\n}\n- _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), termios, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err = termios.CopyOut(task, args[2].Pointer())\nreturn 0, err\ncase linux.TCSETS, linux.TCSETSW, linux.TCSETSF:\n@@ -146,9 +150,7 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO\n}\nvar termios linux.Termios\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &termios, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := termios.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\nerr := ioctlSetTermios(fd, ioctl, &termios)\n@@ -173,10 +175,8 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO\n// Map the ProcessGroup into a ProcessGroupID in the task's PID\n// namespace.\n- pgID := pidns.IDOfProcessGroup(t.fgProcessGroup)\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), &pgID, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ pgID := primitive.Int32(pidns.IDOfProcessGroup(t.fgProcessGroup))\n+ _, err := pgID.CopyOut(task, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCSPGRP:\n@@ -184,11 +184,6 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO\n// Equivalent to tcsetpgrp(fd, *argp).\n// Set the foreground process group ID of this terminal.\n- task := kernel.TaskFromContext(ctx)\n- if task == nil {\n- return 0, syserror.ENOTTY\n- }\n-\nt.mu.Lock()\ndefer t.mu.Unlock()\n@@ -208,12 +203,11 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO\nreturn 0, syserror.ENOTTY\n}\n- var pgID kernel.ProcessGroupID\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &pgID, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ var pgIDP primitive.Int32\n+ if _, err := pgIDP.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\n+ pgID := kernel.ProcessGroupID(pgIDP)\n// pgID must be non-negative.\nif pgID < 0 {\n@@ -242,9 +236,7 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO\nif err != nil {\nreturn 0, err\n}\n- _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), winsize, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err = winsize.CopyOut(task, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCSWINSZ:\n@@ -255,9 +247,7 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO\n// background ones) can set the winsize.\nvar winsize linux.Winsize\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &winsize, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := winsize.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\nerr := ioctlSetWinsize(fd, &winsize)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/BUILD", "new_path": "pkg/sentry/fsimpl/host/BUILD", "diff": "@@ -72,6 +72,7 @@ go_library(\n\"//pkg/unet\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"//tools/go_marshal/primitive\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/tty.go", "new_path": "pkg/sentry/fsimpl/host/tty.go", "diff": "@@ -25,6 +25,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// TTYFileDescription implements vfs.FileDescriptionImpl for a host file\n@@ -143,6 +144,11 @@ func (t *TTYFileDescription) Write(ctx context.Context, src usermem.IOSequence,\n// Ioctl implements vfs.FileDescriptionImpl.\nfunc (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ task := kernel.TaskFromContext(ctx)\n+ if task == nil {\n+ return 0, syserror.ENOTTY\n+ }\n+\n// Ignore arg[0]. This is the real FD:\nfd := t.inode.hostFD\nioctl := args[1].Uint64()\n@@ -152,9 +158,7 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch\nif err != nil {\nreturn 0, err\n}\n- _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), termios, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err = termios.CopyOut(task, args[2].Pointer())\nreturn 0, err\ncase linux.TCSETS, linux.TCSETSW, linux.TCSETSF:\n@@ -166,9 +170,7 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch\n}\nvar termios linux.Termios\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &termios, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := termios.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\nerr := ioctlSetTermios(fd, ioctl, &termios)\n@@ -192,10 +194,8 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch\ndefer t.mu.Unlock()\n// Map the ProcessGroup into a ProcessGroupID in the task's PID namespace.\n- pgID := pidns.IDOfProcessGroup(t.fgProcessGroup)\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), &pgID, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ pgID := primitive.Int32(pidns.IDOfProcessGroup(t.fgProcessGroup))\n+ _, err := pgID.CopyOut(task, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCSPGRP:\n@@ -203,11 +203,6 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch\n// Equivalent to tcsetpgrp(fd, *argp).\n// Set the foreground process group ID of this terminal.\n- task := kernel.TaskFromContext(ctx)\n- if task == nil {\n- return 0, syserror.ENOTTY\n- }\n-\nt.mu.Lock()\ndefer t.mu.Unlock()\n@@ -226,12 +221,11 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch\nreturn 0, syserror.ENOTTY\n}\n- var pgID kernel.ProcessGroupID\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &pgID, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ var pgIDP primitive.Int32\n+ if _, err := pgIDP.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\n+ pgID := kernel.ProcessGroupID(pgIDP)\n// pgID must be non-negative.\nif pgID < 0 {\n@@ -260,9 +254,7 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch\nif err != nil {\nreturn 0, err\n}\n- _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), winsize, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err = winsize.CopyOut(task, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCSWINSZ:\n@@ -273,9 +265,7 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch\n// set the winsize.\nvar winsize linux.Winsize\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &winsize, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := winsize.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\nerr := ioctlSetWinsize(fd, &winsize)\n" } ]
Go
Apache License 2.0
google/gvisor
[go-marshal] Enable auto-marshalling for host tty. PiperOrigin-RevId: 328415633
259,964
25.08.2020 16:13:39
25,200
70a7a3ac704a47ec50525d06438ba4983da3af8b
Only send an ICMP error message if UDP checksum is valid. Test: - TestV4UnknownDestination - TestV6UnknownDestination
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -1366,6 +1366,22 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\nreturn result\n}\n+// verifyChecksum verifies the checksum unless RX checksum offload is enabled.\n+// On IPv4, UDP checksum is optional, and a zero value means the transmitter\n+// omitted the checksum generation (RFC768).\n+// On IPv6, UDP checksum is not optional (RFC2460 Section 8.1).\n+func verifyChecksum(r *stack.Route, hdr header.UDP, pkt *stack.PacketBuffer) bool {\n+ if r.Capabilities()&stack.CapabilityRXChecksumOffload == 0 &&\n+ (hdr.Checksum() != 0 || r.NetProto == header.IPv6ProtocolNumber) {\n+ xsum := r.PseudoHeaderChecksum(ProtocolNumber, hdr.Length())\n+ for _, v := range pkt.Data.Views() {\n+ xsum = header.Checksum(v, xsum)\n+ }\n+ return hdr.CalculateChecksum(xsum) == 0xffff\n+ }\n+ return true\n+}\n+\n// HandlePacket is called by the stack when new packets arrive to this transport\n// endpoint.\nfunc (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, pkt *stack.PacketBuffer) {\n@@ -1387,23 +1403,12 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, pk\nreturn\n}\n- // Verify checksum unless RX checksum offload is enabled.\n- // On IPv4, UDP checksum is optional, and a zero value means\n- // the transmitter omitted the checksum generation (RFC768).\n- // On IPv6, UDP checksum is not optional (RFC2460 Section 8.1).\n- if r.Capabilities()&stack.CapabilityRXChecksumOffload == 0 &&\n- (hdr.Checksum() != 0 || r.NetProto == header.IPv6ProtocolNumber) {\n- xsum := r.PseudoHeaderChecksum(ProtocolNumber, hdr.Length())\n- for _, v := range pkt.Data.Views() {\n- xsum = header.Checksum(v, xsum)\n- }\n- if hdr.CalculateChecksum(xsum) != 0xffff {\n+ if !verifyChecksum(r, hdr, pkt) {\n// Checksum Error.\ne.stack.Stats().UDP.ChecksumErrors.Increment()\ne.stats.ReceiveErrors.ChecksumErrors.Increment()\nreturn\n}\n- }\ne.stack.Stats().UDP.PacketsReceived.Increment()\ne.stats.PacketsReceived.Increment()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/protocol.go", "new_path": "pkg/tcpip/transport/udp/protocol.go", "diff": "@@ -88,7 +88,12 @@ func (p *protocol) HandleUnknownDestinationPacket(r *stack.Route, id stack.Trans\nr.Stack().Stats().UDP.MalformedPacketsReceived.Increment()\nreturn true\n}\n- // TODO(b/129426613): only send an ICMP message if UDP checksum is valid.\n+\n+ if !verifyChecksum(r, hdr, pkt) {\n+ // Checksum Error.\n+ r.Stack().Stats().UDP.ChecksumErrors.Increment()\n+ return true\n+ }\n// Only send ICMP error if the address is not a multicast/broadcast\n// v4/v6 address or the source is not the unspecified address.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/udp_test.go", "new_path": "pkg/tcpip/transport/udp/udp_test.go", "diff": "@@ -403,18 +403,35 @@ func (c *testContext) getPacketAndVerify(flow testFlow, checkers ...checker.Netw\n}\n// injectPacket creates a packet of the given flow and with the given payload,\n-// and injects it into the link endpoint.\n-func (c *testContext) injectPacket(flow testFlow, payload []byte) {\n+// and injects it into the link endpoint. If badChecksum is true, the packet has\n+// a bad checksum in the UDP header.\n+func (c *testContext) injectPacket(flow testFlow, payload []byte, badChecksum bool) {\nc.t.Helper()\nh := flow.header4Tuple(incoming)\nif flow.isV4() {\nbuf := c.buildV4Packet(payload, &h)\n+ if badChecksum {\n+ // Invalidate the UDP header checksum field, taking care to avoid\n+ // overflow to zero, which would disable checksum validation.\n+ for u := header.UDP(buf[header.IPv4MinimumSize:]); ; {\n+ u.SetChecksum(u.Checksum() + 1)\n+ if u.Checksum() != 0 {\n+ break\n+ }\n+ }\n+ }\nc.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n}))\n} else {\nbuf := c.buildV6Packet(payload, &h)\n+ if badChecksum {\n+ // Invalidate the UDP header checksum field (Unlike IPv4, zero is\n+ // a valid checksum value for IPv6 so no need to avoid it).\n+ u := header.UDP(buf[header.IPv6MinimumSize:])\n+ u.SetChecksum(u.Checksum() + 1)\n+ }\nc.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n}))\n@@ -569,7 +586,7 @@ func testReadInternal(c *testContext, flow testFlow, packetShouldBeDropped, expe\nc.t.Helper()\npayload := newPayload()\n- c.injectPacket(flow, payload)\n+ c.injectPacket(flow, payload, false)\n// Try to receive the data.\nwe, ch := waiter.NewChannelEntry(nil)\n@@ -925,7 +942,7 @@ func TestReadFromMulticastStats(t *testing.T) {\n}\npayload := newPayload()\n- c.injectPacket(flow, payload)\n+ c.injectPacket(flow, payload, false)\nvar want uint64 = 0\nif flow.isReverseMulticast() {\n@@ -1727,21 +1744,33 @@ func TestV4UnknownDestination(t *testing.T) {\n// so that the final generated IPv4 packet is larger than\n// header.IPv4MinimumProcessableDatagramSize.\nlargePayload bool\n+ // badChecksum if true, will set an invalid checksum in the\n+ // header.\n+ badChecksum bool\n}{\n- {unicastV4, true, false},\n- {unicastV4, true, true},\n- {multicastV4, false, false},\n- {multicastV4, false, true},\n- {broadcast, false, false},\n- {broadcast, false, true},\n- }\n+ {unicastV4, true, false, false},\n+ {unicastV4, true, true, false},\n+ {unicastV4, false, false, true},\n+ {unicastV4, false, true, true},\n+ {multicastV4, false, false, false},\n+ {multicastV4, false, true, false},\n+ {broadcast, false, false, false},\n+ {broadcast, false, true, false},\n+ }\n+ checksumErrors := uint64(0)\nfor _, tc := range testCases {\n- t.Run(fmt.Sprintf(\"flow:%s icmpRequired:%t largePayload:%t\", tc.flow, tc.icmpRequired, tc.largePayload), func(t *testing.T) {\n+ t.Run(fmt.Sprintf(\"flow:%s icmpRequired:%t largePayload:%t badChecksum:%t\", tc.flow, tc.icmpRequired, tc.largePayload, tc.badChecksum), func(t *testing.T) {\npayload := newPayload()\nif tc.largePayload {\npayload = newMinPayload(576)\n}\n- c.injectPacket(tc.flow, payload)\n+ c.injectPacket(tc.flow, payload, tc.badChecksum)\n+ if tc.badChecksum {\n+ checksumErrors++\n+ if got, want := c.s.Stats().UDP.ChecksumErrors.Value(), checksumErrors; got != want {\n+ t.Fatalf(\"got stats.UDP.ChecksumErrors.Value() = %d, want = %d\", got, want)\n+ }\n+ }\nif !tc.icmpRequired {\nctx, cancel := context.WithTimeout(context.Background(), time.Second)\ndefer cancel()\n@@ -1806,19 +1835,31 @@ func TestV6UnknownDestination(t *testing.T) {\n// largePayload if true will result in a payload large enough to\n// create an IPv6 packet > header.IPv6MinimumMTU bytes.\nlargePayload bool\n+ // badChecksum if true, will set an invalid checksum in the\n+ // header.\n+ badChecksum bool\n}{\n- {unicastV6, true, false},\n- {unicastV6, true, true},\n- {multicastV6, false, false},\n- {multicastV6, false, true},\n- }\n+ {unicastV6, true, false, false},\n+ {unicastV6, true, true, false},\n+ {unicastV6, false, false, true},\n+ {unicastV6, false, true, true},\n+ {multicastV6, false, false, false},\n+ {multicastV6, false, true, false},\n+ }\n+ checksumErrors := uint64(0)\nfor _, tc := range testCases {\n- t.Run(fmt.Sprintf(\"flow:%s icmpRequired:%t largePayload:%t\", tc.flow, tc.icmpRequired, tc.largePayload), func(t *testing.T) {\n+ t.Run(fmt.Sprintf(\"flow:%s icmpRequired:%t largePayload:%t badChecksum:%t\", tc.flow, tc.icmpRequired, tc.largePayload, tc.badChecksum), func(t *testing.T) {\npayload := newPayload()\nif tc.largePayload {\npayload = newMinPayload(1280)\n}\n- c.injectPacket(tc.flow, payload)\n+ c.injectPacket(tc.flow, payload, tc.badChecksum)\n+ if tc.badChecksum {\n+ checksumErrors++\n+ if got, want := c.s.Stats().UDP.ChecksumErrors.Value(), checksumErrors; got != want {\n+ t.Fatalf(\"got stats.UDP.ChecksumErrors.Value() = %d, want = %d\", got, want)\n+ }\n+ }\nif !tc.icmpRequired {\nctx, cancel := context.WithTimeout(context.Background(), time.Second)\ndefer cancel()\n@@ -1953,34 +1994,21 @@ func TestShortHeader(t *testing.T) {\n}\n}\n-// TestIncrementChecksumErrorsV4 verifies if a checksum error is detected,\n+// TestBadChecksumErrors verifies if a checksum error is detected,\n// global and endpoint stats are incremented.\n-func TestIncrementChecksumErrorsV4(t *testing.T) {\n+func TestBadChecksumErrors(t *testing.T) {\n+ for _, flow := range []testFlow{unicastV4, unicastV6} {\nc := newDualTestContext(t, defaultMTU)\ndefer c.cleanup()\n- c.createEndpoint(ipv4.ProtocolNumber)\n+ c.createEndpoint(flow.sockProto())\n// Bind to wildcard.\nif err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {\nc.t.Fatalf(\"Bind failed: %s\", err)\n}\npayload := newPayload()\n- h := unicastV4.header4Tuple(incoming)\n- buf := c.buildV4Packet(payload, &h)\n-\n- // Invalidate the UDP header checksum field, taking care to avoid\n- // overflow to zero, which would disable checksum validation.\n- for u := header.UDP(buf[header.IPv4MinimumSize:]); ; {\n- u.SetChecksum(u.Checksum() + 1)\n- if u.Checksum() != 0 {\n- break\n- }\n- }\n-\n- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: buf.ToVectorisedView(),\n- }))\n+ c.injectPacket(flow, payload, true /* badChecksum */)\nconst want = 1\nif got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {\n@@ -1990,38 +2018,6 @@ func TestIncrementChecksumErrorsV4(t *testing.T) {\nt.Errorf(\"got EP Stats.ReceiveErrors.ChecksumErrors stats = %d, want = %d\", got, want)\n}\n}\n-\n-// TestIncrementChecksumErrorsV6 verifies if a checksum error is detected,\n-// global and endpoint stats are incremented.\n-func TestIncrementChecksumErrorsV6(t *testing.T) {\n- c := newDualTestContext(t, defaultMTU)\n- defer c.cleanup()\n-\n- c.createEndpoint(ipv6.ProtocolNumber)\n- // Bind to wildcard.\n- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {\n- c.t.Fatalf(\"Bind failed: %s\", err)\n- }\n-\n- payload := newPayload()\n- h := unicastV6.header4Tuple(incoming)\n- buf := c.buildV6Packet(payload, &h)\n-\n- // Invalidate the UDP header checksum field.\n- u := header.UDP(buf[header.IPv6MinimumSize:])\n- u.SetChecksum(u.Checksum() + 1)\n-\n- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: buf.ToVectorisedView(),\n- }))\n-\n- const want = 1\n- if got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {\n- t.Errorf(\"got stats.UDP.ChecksumErrors.Value() = %d, want = %d\", got, want)\n- }\n- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.ChecksumErrors.Value(); got != want {\n- t.Errorf(\"got EP Stats.ReceiveErrors.ChecksumErrors stats = %d, want = %d\", got, want)\n- }\n}\n// TestPayloadModifiedV4 verifies if a checksum error is detected,\n" } ]
Go
Apache License 2.0
google/gvisor
Only send an ICMP error message if UDP checksum is valid. Test: - TestV4UnknownDestination - TestV6UnknownDestination PiperOrigin-RevId: 328424137
259,885
25.08.2020 16:38:07
25,200
247dcd62d436943ad0bf8455c2be22bc36da6637
Return non-zero size for tmpfs statfs(2). This does not implement accepting or enforcing any size limit, which will be more complex and has performance implications; it just returns a fixed non-zero size. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/fs.go", "new_path": "pkg/abi/linux/fs.go", "diff": "@@ -44,17 +44,18 @@ type Statfs struct {\n// Type is one of the filesystem magic values, defined above.\nType uint64\n- // BlockSize is the data block size.\n+ // BlockSize is the optimal transfer block size in bytes.\nBlockSize int64\n- // Blocks is the number of data blocks in use.\n+ // Blocks is the maximum number of data blocks the filesystem may store, in\n+ // units of BlockSize.\nBlocks uint64\n- // BlocksFree is the number of free blocks.\n+ // BlocksFree is the number of free data blocks, in units of BlockSize.\nBlocksFree uint64\n- // BlocksAvailable is the number of blocks free for use by\n- // unprivileged users.\n+ // BlocksAvailable is the number of data blocks free for use by\n+ // unprivileged users, in units of BlockSize.\nBlocksAvailable uint64\n// Files is the number of used file nodes on the filesystem.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fs/tmpfs/tmpfs.go", "diff": "package tmpfs\nimport (\n+ \"math\"\n+\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n@@ -32,9 +34,15 @@ import (\nvar fsInfo = fs.Info{\nType: linux.TMPFS_MAGIC,\n+ // tmpfs currently does not support configurable size limits. In Linux,\n+ // such a tmpfs mount will return f_blocks == f_bfree == f_bavail == 0 from\n+ // statfs(2). However, many applications treat this as having a size limit\n+ // of 0. To work around this, claim to have a very large but non-zero size,\n+ // chosen to ensure that BlockSize * Blocks does not overflow int64 (which\n+ // applications may also handle incorrectly).\n// TODO(b/29637826): allow configuring a tmpfs size and enforce it.\n- TotalBlocks: 0,\n- FreeBlocks: 0,\n+ TotalBlocks: math.MaxInt64 / usermem.PageSize,\n+ FreeBlocks: math.MaxInt64 / usermem.PageSize,\n}\n// rename implements fs.InodeOperations.Rename for tmpfs nodes.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -25,7 +25,6 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n- \"gvisor.dev/gvisor/pkg/usermem\"\n)\n// Sync implements vfs.FilesystemImpl.Sync.\n@@ -706,16 +705,7 @@ func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linu\nif _, err := resolveLocked(ctx, rp); err != nil {\nreturn linux.Statfs{}, err\n}\n- statfs := linux.Statfs{\n- Type: linux.TMPFS_MAGIC,\n- BlockSize: usermem.PageSize,\n- FragmentSize: usermem.PageSize,\n- NameLength: linux.NAME_MAX,\n- // TODO(b/29637826): Allow configuring a tmpfs size and enforce it.\n- Blocks: 0,\n- BlocksFree: 0,\n- }\n- return statfs, nil\n+ return globalStatfs, nil\n}\n// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -201,6 +201,25 @@ func (fs *filesystem) Release(ctx context.Context) {\nfs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)\n}\n+// immutable\n+var globalStatfs = linux.Statfs{\n+ Type: linux.TMPFS_MAGIC,\n+ BlockSize: usermem.PageSize,\n+ FragmentSize: usermem.PageSize,\n+ NameLength: linux.NAME_MAX,\n+\n+ // tmpfs currently does not support configurable size limits. In Linux,\n+ // such a tmpfs mount will return f_blocks == f_bfree == f_bavail == 0 from\n+ // statfs(2). However, many applications treat this as having a size limit\n+ // of 0. To work around this, claim to have a very large but non-zero size,\n+ // chosen to ensure that BlockSize * Blocks does not overflow int64 (which\n+ // applications may also handle incorrectly).\n+ // TODO(b/29637826): allow configuring a tmpfs size and enforce it.\n+ Blocks: math.MaxInt64 / usermem.PageSize,\n+ BlocksFree: math.MaxInt64 / usermem.PageSize,\n+ BlocksAvailable: math.MaxInt64 / usermem.PageSize,\n+}\n+\n// dentry implements vfs.DentryImpl.\ntype dentry struct {\nvfsd vfs.Dentry\n@@ -698,6 +717,11 @@ func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions)\nreturn nil\n}\n+// StatFS implements vfs.FileDescriptionImpl.StatFS.\n+func (fd *fileDescription) StatFS(ctx context.Context) (linux.Statfs, error) {\n+ return globalStatfs, nil\n+}\n+\n// Listxattr implements vfs.FileDescriptionImpl.Listxattr.\nfunc (fd *fileDescription) Listxattr(ctx context.Context, size uint64) ([]string, error) {\nreturn fd.inode().listxattr(size)\n" } ]
Go
Apache License 2.0
google/gvisor
Return non-zero size for tmpfs statfs(2). This does not implement accepting or enforcing any size limit, which will be more complex and has performance implications; it just returns a fixed non-zero size. Updates #1936 PiperOrigin-RevId: 328428588
259,922
21.08.2020 13:48:49
-32,400
03fb0dbf4ce6c6f7f5b8a0c2da7e0df49e1444b5
use is-active instead of status
[ { "change_type": "MODIFY", "old_path": "debian/postinst.sh", "new_path": "debian/postinst.sh", "diff": "@@ -21,7 +21,7 @@ fi\n# Update docker configuration.\nif [ -f /etc/docker/daemon.json ]; then\nrunsc install\n- if systemctl status docker 2>/dev/null; then\n+ if systemctl is-active -q docker; then\nsystemctl restart docker || echo \"unable to restart docker; you must do so manually.\" >&2\nfi\nfi\n" } ]
Go
Apache License 2.0
google/gvisor
use is-active instead of status
259,975
25.08.2020 22:01:00
25,200
ebf5293374bc94e01eb58ebe29a1e53aa404d3a7
Fix SocketPairTest and BadSocketPairTest in opensource.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -97,11 +97,13 @@ TEST(BadSocketPairArgs, ValidateErrForBadCallsToSocketPair) {\nASSERT_THAT(socketpair(AF_INET6, 0, 0, fd),\nSyscallFailsWithErrno(ESOCKTNOSUPPORT));\n- // Invalid AF will return ENOAFSUPPORT.\n+ // Invalid AF will return ENOAFSUPPORT or EPERM.\nASSERT_THAT(socketpair(AF_MAX, 0, 0, fd),\n- SyscallFailsWithErrno(EAFNOSUPPORT));\n+ ::testing::AnyOf(SyscallFailsWithErrno(EAFNOSUPPORT),\n+ SyscallFailsWithErrno(EPERM)));\nASSERT_THAT(socketpair(8675309, 0, 0, fd),\n- SyscallFailsWithErrno(EAFNOSUPPORT));\n+ ::testing::AnyOf(SyscallFailsWithErrno(EAFNOSUPPORT),\n+ SyscallFailsWithErrno(EPERM)));\n}\nenum class Operation {\n@@ -116,7 +118,8 @@ std::string OperationToString(Operation operation) {\nreturn \"Bind\";\ncase Operation::Connect:\nreturn \"Connect\";\n- case Operation::SendTo:\n+ // Operation::SendTo is the default.\n+ default:\nreturn \"SendTo\";\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_udp_generic.cc", "new_path": "test/syscalls/linux/socket_ip_udp_generic.cc", "diff": "@@ -435,8 +435,10 @@ TEST_P(UDPSocketPairTest, TOSRecvMismatch) {\n// Test that an IPv4 socket does not support the IPv6 TClass option.\nTEST_P(UDPSocketPairTest, TClassRecvMismatch) {\n- // This should only test AF_INET sockets for the mismatch behavior.\n- SKIP_IF(GetParam().domain != AF_INET);\n+ // This should only test AF_INET6 sockets for the mismatch behavior.\n+ SKIP_IF(GetParam().domain != AF_INET6);\n+ // IPV6_RECVTCLASS is only valid for SOCK_DGRAM and SOCK_RAW.\n+ SKIP_IF(GetParam().type != SOCK_DGRAM | GetParam().type != SOCK_RAW);\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n" } ]
Go
Apache License 2.0
google/gvisor
Fix SocketPairTest and BadSocketPairTest in opensource. PiperOrigin-RevId: 328467152
260,004
27.08.2020 13:43:47
25,200
a5f1e742601aa0281cb2a170a97160dc35220ec3
Skip IPv6UDPUnboundSocketNetlinkTest on native linux ...while we figure out of we want to consider the loopback interface bound to all IPs in an assigned IPv6 subnet, or not (to maintain compatibility with Linux).
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ipv6_udp_unbound_netlink.cc", "new_path": "test/syscalls/linux/socket_ipv6_udp_unbound_netlink.cc", "diff": "@@ -26,7 +26,10 @@ namespace testing {\n// Checks that the loopback interface considers itself bound to all IPs in an\n// associated subnet.\nTEST_P(IPv6UDPUnboundSocketNetlinkTest, JoinSubnet) {\n- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n+ // TODO(b/166440211): Only run this test on gvisor or remove if the loopback\n+ // interface should not consider itself bound to all IPs in an IPv6 subnet.\n+ SKIP_IF(!IsRunningOnGvisor() ||\n+ !ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n// Add an IP address to the loopback interface.\nLink loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());\n" } ]
Go
Apache License 2.0
google/gvisor
Skip IPv6UDPUnboundSocketNetlinkTest on native linux ...while we figure out of we want to consider the loopback interface bound to all IPs in an assigned IPv6 subnet, or not (to maintain compatibility with Linux). PiperOrigin-RevId: 328807974
259,975
27.08.2020 14:09:25
25,200
26c588f06368c21130bb356c6088b76ea715d1ef
Fix BadSocketPair for open source. BadSocketPair test will return several errnos (EPREM, ESOCKTNOSUPPORT, EAFNOSUPPORT) meaning the test is just too specific. Checking the syscall fails is appropriate.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -97,13 +97,9 @@ TEST(BadSocketPairArgs, ValidateErrForBadCallsToSocketPair) {\nASSERT_THAT(socketpair(AF_INET6, 0, 0, fd),\nSyscallFailsWithErrno(ESOCKTNOSUPPORT));\n- // Invalid AF will return ENOAFSUPPORT or EPERM.\n- ASSERT_THAT(socketpair(AF_MAX, 0, 0, fd),\n- ::testing::AnyOf(SyscallFailsWithErrno(EAFNOSUPPORT),\n- SyscallFailsWithErrno(EPERM)));\n- ASSERT_THAT(socketpair(8675309, 0, 0, fd),\n- ::testing::AnyOf(SyscallFailsWithErrno(EAFNOSUPPORT),\n- SyscallFailsWithErrno(EPERM)));\n+ // Invalid AF will fail.\n+ ASSERT_THAT(socketpair(AF_MAX, 0, 0, fd), SyscallFails());\n+ ASSERT_THAT(socketpair(8675309, 0, 0, fd), SyscallFails());\n}\nenum class Operation {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix BadSocketPair for open source. BadSocketPair test will return several errnos (EPREM, ESOCKTNOSUPPORT, EAFNOSUPPORT) meaning the test is just too specific. Checking the syscall fails is appropriate. PiperOrigin-RevId: 328813071
259,907
27.08.2020 16:28:36
25,200
57877b420caa02bf4c60004c7b434ceef8603b26
[go-marshal] Support for usermem.IOOpts.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_usermem.go", "new_path": "pkg/sentry/kernel/task_usermem.go", "diff": "@@ -301,3 +301,30 @@ func (t *Task) IovecsIOSequence(addr usermem.Addr, iovcnt int, opts usermem.IOOp\nOpts: opts,\n}, nil\n}\n+\n+// CopyContextWithOpts wraps a task to allow copying memory to and from the\n+// task memory with user specified usermem.IOOpts.\n+type CopyContextWithOpts struct {\n+ *Task\n+ opts usermem.IOOpts\n+}\n+\n+// AsCopyContextWithOpts wraps the task and returns it as CopyContextWithOpts.\n+func (t *Task) AsCopyContextWithOpts(opts usermem.IOOpts) *CopyContextWithOpts {\n+ return &CopyContextWithOpts{t, opts}\n+}\n+\n+// CopyInString copies a string in from the task's memory.\n+func (t *CopyContextWithOpts) CopyInString(addr usermem.Addr, maxLen int) (string, error) {\n+ return usermem.CopyStringIn(t, t.MemoryManager(), addr, maxLen, t.opts)\n+}\n+\n+// CopyInBytes copies task memory into dst from an IO context.\n+func (t *CopyContextWithOpts) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) {\n+ return t.MemoryManager().CopyIn(t, addr, dst, t.opts)\n+}\n+\n+// CopyOutBytes copies src into task memoryfrom an IO context.\n+func (t *CopyContextWithOpts) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) {\n+ return t.MemoryManager().CopyOut(t, addr, src, t.opts)\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
[go-marshal] Support for usermem.IOOpts. PiperOrigin-RevId: 328839759
259,853
27.08.2020 16:50:11
25,200
dc008fbbccf69deba55fd7649424bc568f766596
unix: return ECONNREFUSE if a socket file exists but a socket isn't bound to it
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -1512,8 +1512,10 @@ func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath\npath: opts.Addr,\n}, nil\n}\n+ if d.endpoint != nil {\nreturn d.endpoint, nil\n}\n+ }\nreturn nil, syserror.ECONNREFUSED\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -783,6 +783,9 @@ func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath\n}\nswitch impl := d.inode.impl.(type) {\ncase *socketFile:\n+ if impl.ep == nil {\n+ return nil, syserror.ECONNREFUSED\n+ }\nreturn impl.ep, nil\ndefault:\nreturn nil, syserror.ECONNREFUSED\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/mknod.cc", "new_path": "test/syscalls/linux/mknod.cc", "diff": "#include <errno.h>\n#include <fcntl.h>\n+#include <sys/socket.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <sys/un.h>\n@@ -103,6 +104,24 @@ TEST(MknodTest, UnimplementedTypesReturnError) {\nASSERT_THAT(mknod(path.c_str(), S_IFBLK, 0), SyscallFailsWithErrno(EPERM));\n}\n+TEST(MknodTest, Socket) {\n+ ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());\n+\n+ SKIP_IF(IsRunningOnGvisor() && IsRunningWithVFS1());\n+\n+ ASSERT_THAT(mknod(\"./file0\", S_IFSOCK | S_IRUSR | S_IWUSR, 0),\n+ SyscallSucceeds());\n+\n+ int sk;\n+ ASSERT_THAT(sk = socket(AF_UNIX, SOCK_SEQPACKET, 0), SyscallSucceeds());\n+ FileDescriptor fd(sk);\n+\n+ struct sockaddr_un addr = {.sun_family = AF_UNIX};\n+ absl::SNPrintF(addr.sun_path, sizeof(addr.sun_path), \"./file0\");\n+ ASSERT_THAT(connect(sk, (struct sockaddr *)&addr, sizeof(addr)),\n+ SyscallFailsWithErrno(ECONNREFUSED));\n+}\n+\nTEST(MknodTest, Fifo) {\nconst std::string fifo = NewTempAbsPath();\nASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n" } ]
Go
Apache License 2.0
google/gvisor
unix: return ECONNREFUSE if a socket file exists but a socket isn't bound to it PiperOrigin-RevId: 328843560
259,907
27.08.2020 19:25:23
25,200
421e35020bbca240d8f1cb5a2a3efd39750c4589
[go-marshal] Enable auto-marshalling for tundev.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/devices/tundev/tundev.go", "new_path": "pkg/sentry/devices/tundev/tundev.go", "diff": "@@ -64,12 +64,13 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg\nrequest := args[1].Uint()\ndata := args[2].Pointer()\n- switch request {\n- case linux.TUNSETIFF:\nt := kernel.TaskFromContext(ctx)\nif t == nil {\npanic(\"Ioctl should be called from a task context\")\n}\n+\n+ switch request {\n+ case linux.TUNSETIFF:\nif !t.HasCapability(linux.CAP_NET_ADMIN) {\nreturn 0, syserror.EPERM\n}\n@@ -79,9 +80,7 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg\n}\nvar req linux.IFReq\n- if _, err := usermem.CopyObjectIn(ctx, uio, data, &req, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := req.CopyIn(t, data); err != nil {\nreturn 0, err\n}\nflags := usermem.ByteOrder.Uint16(req.Data[:])\n@@ -97,9 +96,7 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg\nflags := fd.device.Flags() | linux.IFF_NOFILTER\nusermem.ByteOrder.PutUint16(req.Data[:], flags)\n- _, err := usermem.CopyObjectOut(ctx, uio, data, &req, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := req.CopyOut(t, data)\nreturn 0, err\ndefault:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/dev/net_tun.go", "new_path": "pkg/sentry/fs/dev/net_tun.go", "diff": "@@ -89,12 +89,13 @@ func (fops *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io u\nrequest := args[1].Uint()\ndata := args[2].Pointer()\n- switch request {\n- case linux.TUNSETIFF:\nt := kernel.TaskFromContext(ctx)\nif t == nil {\npanic(\"Ioctl should be called from a task context\")\n}\n+\n+ switch request {\n+ case linux.TUNSETIFF:\nif !t.HasCapability(linux.CAP_NET_ADMIN) {\nreturn 0, syserror.EPERM\n}\n@@ -104,9 +105,7 @@ func (fops *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io u\n}\nvar req linux.IFReq\n- if _, err := usermem.CopyObjectIn(ctx, io, data, &req, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ if _, err := req.CopyIn(t, data); err != nil {\nreturn 0, err\n}\nflags := usermem.ByteOrder.Uint16(req.Data[:])\n@@ -122,9 +121,7 @@ func (fops *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io u\nflags := fops.device.Flags() | linux.IFF_NOFILTER\nusermem.ByteOrder.PutUint16(req.Data[:], flags)\n- _, err := usermem.CopyObjectOut(ctx, io, data, &req, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := req.CopyOut(t, data)\nreturn 0, err\ndefault:\n" } ]
Go
Apache License 2.0
google/gvisor
[go-marshal] Enable auto-marshalling for tundev. PiperOrigin-RevId: 328863725
260,004
28.08.2020 05:06:50
25,200
8ae0ab722c2e5d1bacb96fbac4baa25b49dadd3a
Use a single NetworkEndpoint per address This change was already done as of but conflicted with that change and it was missed in reviews. This change fixes the conflict.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -137,6 +137,7 @@ func newNIC(stack *Stack, id tcpip.NICID, name string, ep LinkEndpoint, ctx NICC\nnic.mu.ndp.initializeTempAddrState()\n// Check for Neighbor Unreachability Detection support.\n+ var nud NUDHandler\nif ep.Capabilities()&CapabilityResolutionRequired != 0 && len(stack.linkAddrResolvers) != 0 && stack.useNeighborCache {\nrng := rand.New(rand.NewSource(stack.clock.NowNanoseconds()))\nnic.neigh = &neighborCache{\n@@ -144,16 +145,24 @@ func newNIC(stack *Stack, id tcpip.NICID, name string, ep LinkEndpoint, ctx NICC\nstate: NewNUDState(stack.nudConfigs, rng),\ncache: make(map[tcpip.Address]*neighborEntry, neighborCacheSize),\n}\n+\n+ // An interface value that holds a nil pointer but non-nil type is not the\n+ // same as the nil interface. Because of this, nud must only be assignd if\n+ // nic.neigh is non-nil since a nil reference to a neighborCache is not\n+ // valid.\n+ //\n+ // See https://golang.org/doc/faq#nil_error for more information.\n+ nud = nic.neigh\n}\n- // Register supported packet endpoint protocols.\n+ // Register supported packet and network endpoint protocols.\nfor _, netProto := range header.Ethertypes {\nnic.mu.packetEPs[netProto] = []PacketEndpoint{}\n}\nfor _, netProto := range stack.networkProtocols {\nnetNum := netProto.Number()\nnic.mu.packetEPs[netNum] = nil\n- nic.networkEndpoints[netNum] = netProto.NewEndpoint(id, stack, nic.neigh, nic, ep, stack)\n+ nic.networkEndpoints[netNum] = netProto.NewEndpoint(id, stack, nud, nic, ep, stack)\n}\nnic.linkEP.Attach(nic)\n@@ -819,24 +828,11 @@ func (n *NIC) addAddressLocked(protocolAddress tcpip.ProtocolAddress, peb Primar\n}\n}\n- netProto, ok := n.stack.networkProtocols[protocolAddress.Protocol]\n+ ep, ok := n.networkEndpoints[protocolAddress.Protocol]\nif !ok {\nreturn nil, tcpip.ErrUnknownProtocol\n}\n- var nud NUDHandler\n- if n.neigh != nil {\n- // An interface value that holds a nil concrete value is itself non-nil.\n- // For this reason, n.neigh cannot be passed directly to NewEndpoint so\n- // NetworkEndpoints don't confuse it for non-nil.\n- //\n- // See https://golang.org/doc/faq#nil_error for more information.\n- nud = n.neigh\n- }\n-\n- // Create the new network endpoint.\n- ep := netProto.NewEndpoint(n.id, n.stack, nud, n, n.linkEP, n.stack)\n-\nisIPv6Unicast := protocolAddress.Protocol == header.IPv6ProtocolNumber && header.IsV6UnicastAddress(protocolAddress.AddressWithPrefix.Address)\n// If the address is an IPv6 address and it is a permanent address,\n" } ]
Go
Apache License 2.0
google/gvisor
Use a single NetworkEndpoint per address This change was already done as of https://github.com/google/gvisor/commit/1736b2208f but https://github.com/google/gvisor/commit/a174aa7597 conflicted with that change and it was missed in reviews. This change fixes the conflict. PiperOrigin-RevId: 328920372
259,891
28.08.2020 10:33:44
25,200
b3ff31d041c9455614a2a9f2a7be10afb6613357
fix panic when calling SO_ORIGINAL_DST without initializing iptables Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables.go", "new_path": "pkg/tcpip/stack/iptables.go", "diff": "@@ -427,5 +427,10 @@ func (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx\n// OriginalDst returns the original destination of redirected connections. It\n// returns an error if the connection doesn't exist or isn't redirected.\nfunc (it *IPTables) OriginalDst(epID TransportEndpointID) (tcpip.Address, uint16, *tcpip.Error) {\n+ it.mu.RLock()\n+ defer it.mu.RUnlock()\n+ if !it.modified {\n+ return \"\", 0, tcpip.ErrNotConnected\n+ }\nreturn it.connections.originalDst(epID)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/iptables.cc", "new_path": "test/syscalls/linux/iptables.cc", "diff": "@@ -104,6 +104,19 @@ TEST(IPTablesBasic, GetEntriesErrorPrecedence) {\nSyscallFailsWithErrno(EINVAL));\n}\n+TEST(IPTablesBasic, OriginalDstErrors) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ int sock;\n+ ASSERT_THAT(sock = socket(AF_INET, SOCK_STREAM, 0), SyscallSucceeds());\n+\n+ // Sockets not affected by NAT should fail to find an original destination.\n+ struct sockaddr_in addr = {};\n+ socklen_t addr_len = sizeof(addr);\n+ EXPECT_THAT(getsockopt(sock, SOL_IP, SO_ORIGINAL_DST, &addr, &addr_len),\n+ SyscallFailsWithErrno(ENOTCONN));\n+}\n+\n// Fixture for iptables tests.\nclass IPTablesTest : public ::testing::Test {\nprotected:\n" } ]
Go
Apache License 2.0
google/gvisor
fix panic when calling SO_ORIGINAL_DST without initializing iptables Reported-by: [email protected] PiperOrigin-RevId: 328963899
259,860
28.08.2020 11:26:25
25,200
8b9cb36d1c74f71da5bc70b73330291f1df298ad
Fix EOF handling for splice. Also, add corresponding EOF tests for splice/sendfile. Discovered by syzkaller.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/splice.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/splice.go", "diff": "@@ -141,9 +141,14 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\ninOffset += n\n}\ndefault:\n- panic(\"not possible\")\n+ panic(\"at least one end of splice must be a pipe\")\n}\n+ if n == 0 && err == io.EOF {\n+ // We reached the end of the file. Eat the error and exit the loop.\n+ err = nil\n+ break\n+ }\nif n != 0 || err != syserror.ErrWouldBlock || nonBlock {\nbreak\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/sendfile.cc", "new_path": "test/syscalls/linux/sendfile.cc", "diff": "@@ -533,6 +533,22 @@ TEST(SendFileTest, SendPipeWouldBlock) {\nSyscallFailsWithErrno(EWOULDBLOCK));\n}\n+TEST(SendFileTest, SendPipeEOF) {\n+ // Create and open an empty input file.\n+ const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const FileDescriptor inf =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));\n+\n+ // Setup the output named pipe.\n+ int fds[2];\n+ ASSERT_THAT(pipe2(fds, O_NONBLOCK), SyscallSucceeds());\n+ const FileDescriptor rfd(fds[0]);\n+ const FileDescriptor wfd(fds[1]);\n+\n+ EXPECT_THAT(sendfile(wfd.get(), inf.get(), nullptr, 123),\n+ SyscallSucceedsWithValue(0));\n+}\n+\nTEST(SendFileTest, SendPipeBlocks) {\n// Create temp file.\nconstexpr char kData[] =\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/splice.cc", "new_path": "test/syscalls/linux/splice.cc", "diff": "@@ -298,6 +298,23 @@ TEST(SpliceTest, ToPipe) {\nEXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);\n}\n+TEST(SpliceTest, ToPipeEOF) {\n+ // Create and open an empty input file.\n+ const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const FileDescriptor in_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));\n+\n+ // Create a new pipe.\n+ int fds[2];\n+ ASSERT_THAT(pipe(fds), SyscallSucceeds());\n+ const FileDescriptor rfd(fds[0]);\n+ const FileDescriptor wfd(fds[1]);\n+\n+ // Splice from the empty file to the pipe.\n+ EXPECT_THAT(splice(in_fd.get(), nullptr, wfd.get(), nullptr, 123, 0),\n+ SyscallSucceedsWithValue(0));\n+}\n+\nTEST(SpliceTest, ToPipeOffset) {\n// Open the input file.\nconst TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n" } ]
Go
Apache License 2.0
google/gvisor
Fix EOF handling for splice. Also, add corresponding EOF tests for splice/sendfile. Discovered by syzkaller. PiperOrigin-RevId: 328975990
260,003
31.08.2020 12:01:46
25,200
ba25485d96833b3852c2fbbca508414b3b96d430
stateify: Bring back struct field and type names in pretty print
[ { "change_type": "MODIFY", "old_path": "pkg/state/pretty/pretty.go", "new_path": "pkg/state/pretty/pretty.go", "diff": "@@ -26,12 +26,17 @@ import (\n\"gvisor.dev/gvisor/pkg/state/wire\"\n)\n-func formatRef(x *wire.Ref, graph uint64, html bool) string {\n+type printer struct {\n+ html bool\n+ typeSpecs map[string]*wire.Type\n+}\n+\n+func (p *printer) formatRef(x *wire.Ref, graph uint64) string {\nbaseRef := fmt.Sprintf(\"g%dr%d\", graph, x.Root)\nfullRef := baseRef\nif len(x.Dots) > 0 {\n// See wire.Ref; Type valid if Dots non-zero.\n- typ, _ := formatType(x.Type, graph, html)\n+ typ, _ := p.formatType(x.Type, graph)\nvar buf strings.Builder\nbuf.WriteString(\"(*\")\nbuf.WriteString(typ)\n@@ -51,34 +56,40 @@ func formatRef(x *wire.Ref, graph uint64, html bool) string {\nbuf.WriteString(\")\")\nfullRef = buf.String()\n}\n- if html {\n+ if p.html {\nreturn fmt.Sprintf(\"<a href=\\\"#%s\\\">%s</a>\", baseRef, fullRef)\n}\nreturn fullRef\n}\n-func formatType(t wire.TypeSpec, graph uint64, html bool) (string, bool) {\n+func (p *printer) formatType(t wire.TypeSpec, graph uint64) (string, bool) {\nswitch x := t.(type) {\ncase wire.TypeID:\n- base := fmt.Sprintf(\"g%dt%d\", graph, x)\n- if html {\n- return fmt.Sprintf(\"<a href=\\\"#%s\\\">%s</a>\", base, base), true\n+ tag := fmt.Sprintf(\"g%dt%d\", graph, x)\n+ desc := tag\n+ if spec, ok := p.typeSpecs[tag]; ok {\n+ desc += fmt.Sprintf(\"=%s\", spec.Name)\n+ } else {\n+ desc += \"!missing-type-spec\"\n+ }\n+ if p.html {\n+ return fmt.Sprintf(\"<a href=\\\"#%s\\\">%s</a>\", tag, desc), true\n}\n- return fmt.Sprintf(\"%s\", base), true\n+ return desc, true\ncase wire.TypeSpecNil:\nreturn \"\", false // Only nil type.\ncase *wire.TypeSpecPointer:\n- element, _ := formatType(x.Type, graph, html)\n+ element, _ := p.formatType(x.Type, graph)\nreturn fmt.Sprintf(\"(*%s)\", element), true\ncase *wire.TypeSpecArray:\n- element, _ := formatType(x.Type, graph, html)\n+ element, _ := p.formatType(x.Type, graph)\nreturn fmt.Sprintf(\"[%d](%s)\", x.Count, element), true\ncase *wire.TypeSpecSlice:\n- element, _ := formatType(x.Type, graph, html)\n+ element, _ := p.formatType(x.Type, graph)\nreturn fmt.Sprintf(\"([]%s)\", element), true\ncase *wire.TypeSpecMap:\n- key, _ := formatType(x.Key, graph, html)\n- value, _ := formatType(x.Value, graph, html)\n+ key, _ := p.formatType(x.Key, graph)\n+ value, _ := p.formatType(x.Value, graph)\nreturn fmt.Sprintf(\"(map[%s]%s)\", key, value), true\ndefault:\npanic(fmt.Sprintf(\"unreachable: unknown type %T\", t))\n@@ -87,7 +98,7 @@ func formatType(t wire.TypeSpec, graph uint64, html bool) (string, bool) {\n// format formats a single object, for pretty-printing. It also returns whether\n// the value is a non-zero value.\n-func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bool) {\n+func (p *printer) format(graph uint64, depth int, encoded wire.Object) (string, bool) {\nswitch x := encoded.(type) {\ncase wire.Nil:\nreturn \"nil\", false\n@@ -98,7 +109,7 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\ncase *wire.Complex128:\nreturn fmt.Sprintf(\"%f+%fi\", real(*x), imag(*x)), *x != 0.0\ncase *wire.Ref:\n- return formatRef(x, graph, html), x.Root != 0\n+ return p.formatRef(x, graph), x.Root != 0\ncase *wire.Type:\ntabs := \"\\n\" + strings.Repeat(\"\\t\", depth)\nitems := make([]string, 0, len(x.Fields)+2)\n@@ -109,7 +120,7 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\nitems = append(items, \"}\")\nreturn strings.Join(items, tabs), true // No zero value.\ncase *wire.Slice:\n- return fmt.Sprintf(\"%s{len:%d,cap:%d}\", formatRef(&x.Ref, graph, html), x.Length, x.Capacity), x.Capacity != 0\n+ return fmt.Sprintf(\"%s{len:%d,cap:%d}\", p.formatRef(&x.Ref, graph), x.Length, x.Capacity), x.Capacity != 0\ncase *wire.Array:\nif len(x.Contents) == 0 {\nreturn \"[]\", false\n@@ -119,7 +130,7 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\nitems = append(items, \"[\")\ntabs := \"\\n\" + strings.Repeat(\"\\t\", depth)\nfor i := 0; i < len(x.Contents); i++ {\n- item, ok := format(graph, depth+1, x.Contents[i], html)\n+ item, ok := p.format(graph, depth+1, x.Contents[i])\nif !ok {\nzeros = append(zeros, fmt.Sprintf(\"\\t%s,\", item))\ncontinue\n@@ -136,7 +147,9 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\nitems = append(items, \"]\")\nreturn strings.Join(items, tabs), len(zeros) < len(x.Contents)\ncase *wire.Struct:\n- typ, _ := formatType(x.TypeID, graph, html)\n+ tag := fmt.Sprintf(\"g%dt%d\", graph, x.TypeID)\n+ spec, _ := p.typeSpecs[tag]\n+ typ, _ := p.formatType(x.TypeID, graph)\nif x.Fields() == 0 {\nreturn fmt.Sprintf(\"struct[%s]{}\", typ), false\n}\n@@ -145,9 +158,15 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\ntabs := \"\\n\" + strings.Repeat(\"\\t\", depth)\nallZero := true\nfor i := 0; i < x.Fields(); i++ {\n- element, ok := format(graph, depth+1, *x.Field(i), html)\n+ var name string\n+ if spec != nil && i < len(spec.Fields) {\n+ name = spec.Fields[i]\n+ } else {\n+ name = fmt.Sprintf(\"%d\", i)\n+ }\n+ element, ok := p.format(graph, depth+1, *x.Field(i))\nallZero = allZero && !ok\n- items = append(items, fmt.Sprintf(\"\\t%d: %s,\", i, element))\n+ items = append(items, fmt.Sprintf(\"\\t%s: %s,\", name, element))\n}\nitems = append(items, \"}\")\nreturn strings.Join(items, tabs), !allZero\n@@ -159,15 +178,15 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\nitems = append(items, \"map{\")\ntabs := \"\\n\" + strings.Repeat(\"\\t\", depth)\nfor i := 0; i < len(x.Keys); i++ {\n- key, _ := format(graph, depth+1, x.Keys[i], html)\n- value, _ := format(graph, depth+1, x.Values[i], html)\n+ key, _ := p.format(graph, depth+1, x.Keys[i])\n+ value, _ := p.format(graph, depth+1, x.Values[i])\nitems = append(items, fmt.Sprintf(\"\\t%s: %s,\", key, value))\n}\nitems = append(items, \"}\")\nreturn strings.Join(items, tabs), true\ncase *wire.Interface:\n- typ, typOk := formatType(x.Type, graph, html)\n- element, elementOk := format(graph, depth+1, x.Value, html)\n+ typ, typOk := p.formatType(x.Type, graph)\n+ element, elementOk := p.format(graph, depth+1, x.Value)\nreturn fmt.Sprintf(\"interface[%s]{%s}\", typ, element), typOk || elementOk\ndefault:\n// Must be a primitive; use reflection.\n@@ -176,11 +195,11 @@ func format(graph uint64, depth int, encoded wire.Object, html bool) (string, bo\n}\n// printStream is the basic print implementation.\n-func printStream(w io.Writer, r wire.Reader, html bool) (err error) {\n+func (p *printer) printStream(w io.Writer, r wire.Reader) (err error) {\n// current graph ID.\nvar graph uint64\n- if html {\n+ if p.html {\nfmt.Fprintf(w, \"<pre>\")\ndefer fmt.Fprintf(w, \"</pre>\")\n}\n@@ -195,6 +214,8 @@ func printStream(w io.Writer, r wire.Reader, html bool) (err error) {\n}\n}()\n+ p.typeSpecs = make(map[string]*wire.Type)\n+\nfor {\n// Find the first object to begin generation.\nlength, object, err := state.ReadHeader(r)\n@@ -222,18 +243,19 @@ func printStream(w io.Writer, r wire.Reader, html bool) (err error) {\n// loop in decode.go. But we don't register type information,\n// etc. and just print the raw structures.\nvar (\n- oid uint64 = 1\ntid uint64 = 1\n+ objects []wire.Object\n)\n- for oid <= length {\n+ for oid := uint64(1); oid <= length; {\n// Unmarshal the object.\nencoded := wire.Load(r)\n// Is this a type?\n- if _, ok := encoded.(*wire.Type); ok {\n- str, _ := format(graph, 0, encoded, html)\n+ if typ, ok := encoded.(*wire.Type); ok {\n+ str, _ := p.format(graph, 0, encoded)\ntag := fmt.Sprintf(\"g%dt%d\", graph, tid)\n- if html {\n+ p.typeSpecs[tag] = typ\n+ if p.html {\n// See below.\ntag = fmt.Sprintf(\"<a name=\\\"%s\\\">%s</a><a href=\\\"#%s\\\">&#9875;</a>\", tag, tag, tag)\n}\n@@ -244,17 +266,24 @@ func printStream(w io.Writer, r wire.Reader, html bool) (err error) {\ncontinue\n}\n+ // Otherwise, it is a node.\n+ objects = append(objects, encoded)\n+ oid++\n+ }\n+\n+ for i, encoded := range objects {\n+ // oid starts at 1.\n+ oid := i + 1\n// Format the node.\n- str, _ := format(graph, 0, encoded, html)\n+ str, _ := p.format(graph, 0, encoded)\ntag := fmt.Sprintf(\"g%dr%d\", graph, oid)\n- if html {\n+ if p.html {\n// Create a little tag with an anchor next to it for linking.\ntag = fmt.Sprintf(\"<a name=\\\"%s\\\">%s</a><a href=\\\"#%s\\\">&#9875;</a>\", tag, tag, tag)\n}\nif _, err := fmt.Fprintf(w, \"%s = %s\\n\", tag, str); err != nil {\nreturn err\n}\n- oid++\n}\n}\n@@ -263,10 +292,10 @@ func printStream(w io.Writer, r wire.Reader, html bool) (err error) {\n// PrintText reads the stream from r and prints text to w.\nfunc PrintText(w io.Writer, r wire.Reader) error {\n- return printStream(w, r, false /* html */)\n+ return (&printer{}).printStream(w, r)\n}\n// PrintHTML reads the stream from r and prints html to w.\nfunc PrintHTML(w io.Writer, r wire.Reader) error {\n- return printStream(w, r, true /* html */)\n+ return (&printer{html: true}).printStream(w, r)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
stateify: Bring back struct field and type names in pretty print PiperOrigin-RevId: 329349158
259,885
31.08.2020 13:55:18
25,200
6cdfa4fee06dfe4d37b79a8426906c99fe294964
Don't use read-only host FD for writable gofer dentries in VFS2. As documented for gofer.dentry.hostFD.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1472,8 +1472,9 @@ func (d *dentry) ensureSharedHandle(ctx context.Context, read, write, trunc bool\nreturn err\n}\n- if d.hostFD < 0 && openReadable && h.fd >= 0 {\n- // We have no existing FD; use the new FD for at least reading.\n+ if d.hostFD < 0 && h.fd >= 0 && openReadable && (d.writeFile.isNil() || openWritable) {\n+ // We have no existing FD, and the new FD meets the requirements\n+ // for d.hostFD, so start using it.\nd.hostFD = h.fd\n} else if d.hostFD >= 0 && d.writeFile.isNil() && openWritable {\n// We have an existing read-only FD, but the file has just been\n" } ]
Go
Apache License 2.0
google/gvisor
Don't use read-only host FD for writable gofer dentries in VFS2. As documented for gofer.dentry.hostFD. PiperOrigin-RevId: 329372319
259,975
31.08.2020 17:15:14
25,200
67484384935fa814e978f08dfa0f0bdbddbbd371
Fix bug in bazel build benchmark.
[ { "change_type": "MODIFY", "old_path": "test/benchmarks/fs/bazel_test.go", "new_path": "test/benchmarks/fs/bazel_test.go", "diff": "@@ -62,7 +62,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\ncontainer := machine.GetContainer(ctx, b)\ndefer container.CleanUp(ctx)\n- // Start a container and sleep by an order of b.N.\n+ // Start a container and sleep.\nif err := container.Spawn(ctx, dockerutil.RunOpts{\nImage: image,\n}, \"sleep\", fmt.Sprintf(\"%d\", 1000000)); err != nil {\n@@ -70,12 +70,13 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\n}\n// If we are running on a tmpfs, copy to /tmp which is a tmpfs.\n+ prefix := \"\"\nif bm.tmpfs {\nif out, err := container.Exec(ctx, dockerutil.ExecOpts{},\n\"cp\", \"-r\", workdir, \"/tmp/.\"); err != nil {\nb.Fatalf(\"failed to copy directory: %v (%s)\", err, out)\n}\n- workdir = \"/tmp\" + workdir\n+ prefix = \"/tmp\"\n}\n// Restart profiles after the copy.\n@@ -94,7 +95,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\nb.StartTimer()\ngot, err := container.Exec(ctx, dockerutil.ExecOpts{\n- WorkDir: workdir,\n+ WorkDir: prefix + workdir,\n}, \"bazel\", \"build\", \"-c\", \"opt\", target)\nif err != nil {\nb.Fatalf(\"build failed with: %v\", err)\n@@ -107,7 +108,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\n}\n// Clean bazel in case we use b.N.\n_, err = container.Exec(ctx, dockerutil.ExecOpts{\n- WorkDir: workdir,\n+ WorkDir: prefix + workdir,\n}, \"bazel\", \"clean\")\nif err != nil {\nb.Fatalf(\"build failed with: %v\", err)\n" } ]
Go
Apache License 2.0
google/gvisor
Fix bug in bazel build benchmark. PiperOrigin-RevId: 329409802
259,905
01.09.2020 14:55:57
-28,800
66ee7c0e98a98d4046a23b85af42bc68b5ab6b13
Dup stdio FDs for VFS2 when starting a child container Currently the stdio FDs are not dupped and will be closed unexpectedly in VFS2 when starting a child container. This patch fixes this issue. Fixes:
[ { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -689,9 +689,18 @@ func (l *Loader) startContainer(spec *specs.Spec, conf *config.Config, cid strin\nreturn fmt.Errorf(\"creating new process: %v\", err)\n}\n- // setupContainerFS() dups stdioFDs, so we don't need to dup them here.\n+ // VFS1 dups stdioFDs, so we don't need to dup them here. VFS2 takes\n+ // ownership of the passed FDs, and we need to dup them here.\nfor _, f := range files[:3] {\n+ if !kernel.VFS2Enabled {\ninfo.stdioFDs = append(info.stdioFDs, int(f.Fd()))\n+ } else {\n+ fd, err := unix.Dup(int(f.Fd()))\n+ if err != nil {\n+ return fmt.Errorf(\"failed to dup file: %v\", err)\n+ }\n+ info.stdioFDs = append(info.stdioFDs, fd)\n+ }\n}\n// Can't take ownership away from os.File. dup them to get a new FDs.\n" } ]
Go
Apache License 2.0
google/gvisor
Dup stdio FDs for VFS2 when starting a child container Currently the stdio FDs are not dupped and will be closed unexpectedly in VFS2 when starting a child container. This patch fixes this issue. Fixes: #3821 Signed-off-by: Tiwei Bie <[email protected]>
259,884
01.09.2020 01:26:10
25,200
f4be726fde31dac926052b03f5dc666fd4dd4783
Use 1080p background image. This makes the background image on the top page 1/3 as big and allows it to load in roughly half the time.
[ { "change_type": "MODIFY", "old_path": "website/_sass/front.scss", "new_path": "website/_sass/front.scss", "diff": ".jumbotron {\n- background-image: url(/assets/images/background.jpg);\n+ background-image: url(/assets/images/background_1080p.jpg);\nbackground-position: center;\nbackground-repeat: no-repeat;\nbackground-size: cover;\n" }, { "change_type": "ADD", "old_path": "website/assets/images/background_1080p.jpg", "new_path": "website/assets/images/background_1080p.jpg", "diff": "Binary files /dev/null and b/website/assets/images/background_1080p.jpg differ\n" } ]
Go
Apache License 2.0
google/gvisor
Use 1080p background image. This makes the background image on the top page 1/3 as big and allows it to load in roughly half the time. PiperOrigin-RevId: 329462030
259,905
01.09.2020 16:01:22
-28,800
d5f20209cc34a5357486a2f495bfe30b5d6e2056
fsimpl/host: fix the order of removing FD notifier FD notifier should be removed before we close the FD, otherwise there will be race condition that another FD which has the same value is opened and added before the existing FD notifier is removed. Fixes:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/socket.go", "new_path": "pkg/sentry/fsimpl/host/socket.go", "diff": "@@ -348,10 +348,10 @@ func (e *SCMConnectedEndpoint) Init() error {\nfunc (e *SCMConnectedEndpoint) Release(ctx context.Context) {\ne.DecRef(func() {\ne.mu.Lock()\n+ fdnotifier.RemoveFD(int32(e.fd))\nif err := syscall.Close(e.fd); err != nil {\nlog.Warningf(\"Failed to close host fd %d: %v\", err)\n}\n- fdnotifier.RemoveFD(int32(e.fd))\ne.destroyLocked()\ne.mu.Unlock()\n})\n" } ]
Go
Apache License 2.0
google/gvisor
fsimpl/host: fix the order of removing FD notifier FD notifier should be removed before we close the FD, otherwise there will be race condition that another FD which has the same value is opened and added before the existing FD notifier is removed. Fixes: #3823 Signed-off-by: Tiwei Bie <[email protected]>
259,907
01.09.2020 12:59:49
25,200
723fb5c1164872fd1de418d794aa1e9eced8fd94
[go-marshal] Enable auto-marshalling for fs/tty.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/tty.go", "new_path": "pkg/abi/linux/tty.go", "diff": "@@ -341,6 +341,7 @@ var DefaultSlaveTermios = KernelTermios{\n// include/uapi/asm-generic/termios.h.\n//\n// +stateify savable\n+// +marshal\ntype WindowSize struct {\nRows uint16\nCols uint16\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tty/BUILD", "new_path": "pkg/sentry/fs/tty/BUILD", "diff": "@@ -31,6 +31,7 @@ go_library(\n\"//pkg/syserror\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"//tools/go_marshal/primitive\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tty/line_discipline.go", "new_path": "pkg/sentry/fs/tty/line_discipline.go", "diff": "@@ -21,6 +21,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n@@ -115,27 +116,23 @@ func newLineDiscipline(termios linux.KernelTermios) *lineDiscipline {\n}\n// getTermios gets the linux.Termios for the tty.\n-func (l *lineDiscipline) getTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+func (l *lineDiscipline) getTermios(task *kernel.Task, args arch.SyscallArguments) (uintptr, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n// We must copy a Termios struct, not KernelTermios.\nt := l.termios.ToTermios()\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), t, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := t.CopyOut(task, args[2].Pointer())\nreturn 0, err\n}\n// setTermios sets a linux.Termios for the tty.\n-func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+func (l *lineDiscipline) setTermios(task *kernel.Task, args arch.SyscallArguments) (uintptr, error) {\nl.termiosMu.Lock()\ndefer l.termiosMu.Unlock()\noldCanonEnabled := l.termios.LEnabled(linux.ICANON)\n// We must copy a Termios struct, not KernelTermios.\nvar t linux.Termios\n- _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &t, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := t.CopyIn(task, args[2].Pointer())\nl.termios.FromTermios(t)\n// If canonical mode is turned off, move bytes from inQueue's wait\n@@ -152,21 +149,17 @@ func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arc\nreturn 0, err\n}\n-func (l *lineDiscipline) windowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+func (l *lineDiscipline) windowSize(t *kernel.Task, args arch.SyscallArguments) error {\nl.sizeMu.Lock()\ndefer l.sizeMu.Unlock()\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), l.size, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := l.size.CopyOut(t, args[2].Pointer())\nreturn err\n}\n-func (l *lineDiscipline) setWindowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+func (l *lineDiscipline) setWindowSize(t *kernel.Task, args arch.SyscallArguments) error {\nl.sizeMu.Lock()\ndefer l.sizeMu.Unlock()\n- _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &l.size, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := l.size.CopyIn(t, args[2].Pointer())\nreturn err\n}\n@@ -182,8 +175,8 @@ func (l *lineDiscipline) slaveReadiness() waiter.EventMask {\nreturn l.outQueue.writeReadiness(&l.termios) | l.inQueue.readReadiness(&l.termios)\n}\n-func (l *lineDiscipline) inputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n- return l.inQueue.readableSize(ctx, io, args)\n+func (l *lineDiscipline) inputQueueReadSize(t *kernel.Task, args arch.SyscallArguments) error {\n+ return l.inQueue.readableSize(t, args)\n}\nfunc (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\n@@ -217,8 +210,8 @@ func (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequ\nreturn 0, syserror.ErrWouldBlock\n}\n-func (l *lineDiscipline) outputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n- return l.outQueue.readableSize(ctx, io, args)\n+func (l *lineDiscipline) outputQueueReadSize(t *kernel.Task, args arch.SyscallArguments) error {\n+ return l.outQueue.readableSize(t, args)\n}\nfunc (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tty/master.go", "new_path": "pkg/sentry/fs/tty/master.go", "diff": "@@ -20,10 +20,12 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/unimpl\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// LINT.IfChange\n@@ -152,46 +154,51 @@ func (mf *masterFileOperations) Write(ctx context.Context, _ *fs.File, src userm\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (mf *masterFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ t := kernel.TaskFromContext(ctx)\n+ if t == nil {\n+ // ioctl(2) may only be called from a task goroutine.\n+ return 0, syserror.ENOTTY\n+ }\n+\nswitch cmd := args[1].Uint(); cmd {\ncase linux.FIONREAD: // linux.FIONREAD == linux.TIOCINQ\n// Get the number of bytes in the output queue read buffer.\n- return 0, mf.t.ld.outputQueueReadSize(ctx, io, args)\n+ return 0, mf.t.ld.outputQueueReadSize(t, args)\ncase linux.TCGETS:\n// N.B. TCGETS on the master actually returns the configuration\n// of the slave end.\n- return mf.t.ld.getTermios(ctx, io, args)\n+ return mf.t.ld.getTermios(t, args)\ncase linux.TCSETS:\n// N.B. TCSETS on the master actually affects the configuration\n// of the slave end.\n- return mf.t.ld.setTermios(ctx, io, args)\n+ return mf.t.ld.setTermios(t, args)\ncase linux.TCSETSW:\n// TODO(b/29356795): This should drain the output queue first.\n- return mf.t.ld.setTermios(ctx, io, args)\n+ return mf.t.ld.setTermios(t, args)\ncase linux.TIOCGPTN:\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(mf.t.n), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ nP := primitive.Uint32(mf.t.n)\n+ _, err := nP.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCSPTLCK:\n// TODO(b/29356795): Implement pty locking. For now just pretend we do.\nreturn 0, nil\ncase linux.TIOCGWINSZ:\n- return 0, mf.t.ld.windowSize(ctx, io, args)\n+ return 0, mf.t.ld.windowSize(t, args)\ncase linux.TIOCSWINSZ:\n- return 0, mf.t.ld.setWindowSize(ctx, io, args)\n+ return 0, mf.t.ld.setWindowSize(t, args)\ncase linux.TIOCSCTTY:\n// Make the given terminal the controlling terminal of the\n// calling process.\n- return 0, mf.t.setControllingTTY(ctx, io, args, true /* isMaster */)\n+ return 0, mf.t.setControllingTTY(ctx, args, true /* isMaster */)\ncase linux.TIOCNOTTY:\n// Release this process's controlling terminal.\n- return 0, mf.t.releaseControllingTTY(ctx, io, args, true /* isMaster */)\n+ return 0, mf.t.releaseControllingTTY(ctx, args, true /* isMaster */)\ncase linux.TIOCGPGRP:\n// Get the foreground process group.\n- return mf.t.foregroundProcessGroup(ctx, io, args, true /* isMaster */)\n+ return mf.t.foregroundProcessGroup(ctx, args, true /* isMaster */)\ncase linux.TIOCSPGRP:\n// Set the foreground process group.\n- return mf.t.setForegroundProcessGroup(ctx, io, args, true /* isMaster */)\n+ return mf.t.setForegroundProcessGroup(ctx, args, true /* isMaster */)\ndefault:\nmaybeEmitUnimplementedEvent(ctx, cmd)\nreturn 0, syserror.ENOTTY\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tty/queue.go", "new_path": "pkg/sentry/fs/tty/queue.go", "diff": "@@ -19,10 +19,12 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// LINT.IfChange\n@@ -85,17 +87,15 @@ func (q *queue) writeReadiness(t *linux.KernelTermios) waiter.EventMask {\n}\n// readableSize writes the number of readable bytes to userspace.\n-func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+func (q *queue) readableSize(t *kernel.Task, args arch.SyscallArguments) error {\nq.mu.Lock()\ndefer q.mu.Unlock()\n- var size int32\n+ size := primitive.Int32(0)\nif q.readable {\n- size = int32(len(q.readBuf))\n+ size = primitive.Int32(len(q.readBuf))\n}\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), size, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := size.CopyOut(t, args[2].Pointer())\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tty/slave.go", "new_path": "pkg/sentry/fs/tty/slave.go", "diff": "@@ -20,9 +20,11 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// LINT.IfChange\n@@ -136,39 +138,44 @@ func (sf *slaveFileOperations) Write(ctx context.Context, _ *fs.File, src userme\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (sf *slaveFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ t := kernel.TaskFromContext(ctx)\n+ if t == nil {\n+ // ioctl(2) may only be called from a task goroutine.\n+ return 0, syserror.ENOTTY\n+ }\n+\nswitch cmd := args[1].Uint(); cmd {\ncase linux.FIONREAD: // linux.FIONREAD == linux.TIOCINQ\n// Get the number of bytes in the input queue read buffer.\n- return 0, sf.si.t.ld.inputQueueReadSize(ctx, io, args)\n+ return 0, sf.si.t.ld.inputQueueReadSize(t, args)\ncase linux.TCGETS:\n- return sf.si.t.ld.getTermios(ctx, io, args)\n+ return sf.si.t.ld.getTermios(t, args)\ncase linux.TCSETS:\n- return sf.si.t.ld.setTermios(ctx, io, args)\n+ return sf.si.t.ld.setTermios(t, args)\ncase linux.TCSETSW:\n// TODO(b/29356795): This should drain the output queue first.\n- return sf.si.t.ld.setTermios(ctx, io, args)\n+ return sf.si.t.ld.setTermios(t, args)\ncase linux.TIOCGPTN:\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(sf.si.t.n), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ nP := primitive.Uint32(sf.si.t.n)\n+ _, err := nP.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCGWINSZ:\n- return 0, sf.si.t.ld.windowSize(ctx, io, args)\n+ return 0, sf.si.t.ld.windowSize(t, args)\ncase linux.TIOCSWINSZ:\n- return 0, sf.si.t.ld.setWindowSize(ctx, io, args)\n+ return 0, sf.si.t.ld.setWindowSize(t, args)\ncase linux.TIOCSCTTY:\n// Make the given terminal the controlling terminal of the\n// calling process.\n- return 0, sf.si.t.setControllingTTY(ctx, io, args, false /* isMaster */)\n+ return 0, sf.si.t.setControllingTTY(ctx, args, false /* isMaster */)\ncase linux.TIOCNOTTY:\n// Release this process's controlling terminal.\n- return 0, sf.si.t.releaseControllingTTY(ctx, io, args, false /* isMaster */)\n+ return 0, sf.si.t.releaseControllingTTY(ctx, args, false /* isMaster */)\ncase linux.TIOCGPGRP:\n// Get the foreground process group.\n- return sf.si.t.foregroundProcessGroup(ctx, io, args, false /* isMaster */)\n+ return sf.si.t.foregroundProcessGroup(ctx, args, false /* isMaster */)\ncase linux.TIOCSPGRP:\n// Set the foreground process group.\n- return sf.si.t.setForegroundProcessGroup(ctx, io, args, false /* isMaster */)\n+ return sf.si.t.setForegroundProcessGroup(ctx, args, false /* isMaster */)\ndefault:\nmaybeEmitUnimplementedEvent(ctx, cmd)\nreturn 0, syserror.ENOTTY\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tty/terminal.go", "new_path": "pkg/sentry/fs/tty/terminal.go", "diff": "@@ -20,7 +20,7 @@ import (\n\"gvisor.dev/gvisor/pkg/refs\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n- \"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// LINT.IfChange\n@@ -64,7 +64,7 @@ func newTerminal(ctx context.Context, d *dirInodeOperations, n uint32) *Terminal\n// setControllingTTY makes tm the controlling terminal of the calling thread\n// group.\n-func (tm *Terminal) setControllingTTY(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) error {\n+func (tm *Terminal) setControllingTTY(ctx context.Context, args arch.SyscallArguments, isMaster bool) error {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"setControllingTTY must be called from a task context\")\n@@ -75,7 +75,7 @@ func (tm *Terminal) setControllingTTY(ctx context.Context, io usermem.IO, args a\n// releaseControllingTTY removes tm as the controlling terminal of the calling\n// thread group.\n-func (tm *Terminal) releaseControllingTTY(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) error {\n+func (tm *Terminal) releaseControllingTTY(ctx context.Context, args arch.SyscallArguments, isMaster bool) error {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"releaseControllingTTY must be called from a task context\")\n@@ -85,7 +85,7 @@ func (tm *Terminal) releaseControllingTTY(ctx context.Context, io usermem.IO, ar\n}\n// foregroundProcessGroup gets the process group ID of tm's foreground process.\n-func (tm *Terminal) foregroundProcessGroup(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\n+func (tm *Terminal) foregroundProcessGroup(ctx context.Context, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"foregroundProcessGroup must be called from a task context\")\n@@ -97,24 +97,21 @@ func (tm *Terminal) foregroundProcessGroup(ctx context.Context, io usermem.IO, a\n}\n// Write it out to *arg.\n- _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(ret), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ retP := primitive.Int32(ret)\n+ _, err = retP.CopyOut(task, args[2].Pointer())\nreturn 0, err\n}\n// foregroundProcessGroup sets tm's foreground process.\n-func (tm *Terminal) setForegroundProcessGroup(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\n+func (tm *Terminal) setForegroundProcessGroup(ctx context.Context, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"setForegroundProcessGroup must be called from a task context\")\n}\n// Read in the process group ID.\n- var pgid int32\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &pgid, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ var pgid primitive.Int32\n+ if _, err := pgid.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/BUILD", "new_path": "pkg/sentry/fsimpl/devpts/BUILD", "diff": "@@ -43,6 +43,8 @@ go_library(\n\"//pkg/syserror\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"//tools/go_marshal/marshal\",\n+ \"//tools/go_marshal/primitive\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/line_discipline.go", "new_path": "pkg/sentry/fsimpl/devpts/line_discipline.go", "diff": "@@ -21,6 +21,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n@@ -113,27 +114,23 @@ func newLineDiscipline(termios linux.KernelTermios) *lineDiscipline {\n}\n// getTermios gets the linux.Termios for the tty.\n-func (l *lineDiscipline) getTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+func (l *lineDiscipline) getTermios(task *kernel.Task, args arch.SyscallArguments) (uintptr, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n// We must copy a Termios struct, not KernelTermios.\nt := l.termios.ToTermios()\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), t, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := t.CopyOut(task, args[2].Pointer())\nreturn 0, err\n}\n// setTermios sets a linux.Termios for the tty.\n-func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+func (l *lineDiscipline) setTermios(task *kernel.Task, args arch.SyscallArguments) (uintptr, error) {\nl.termiosMu.Lock()\ndefer l.termiosMu.Unlock()\noldCanonEnabled := l.termios.LEnabled(linux.ICANON)\n// We must copy a Termios struct, not KernelTermios.\nvar t linux.Termios\n- _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &t, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := t.CopyIn(task, args[2].Pointer())\nl.termios.FromTermios(t)\n// If canonical mode is turned off, move bytes from inQueue's wait\n@@ -150,21 +147,17 @@ func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arc\nreturn 0, err\n}\n-func (l *lineDiscipline) windowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+func (l *lineDiscipline) windowSize(t *kernel.Task, args arch.SyscallArguments) error {\nl.sizeMu.Lock()\ndefer l.sizeMu.Unlock()\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), l.size, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := l.size.CopyOut(t, args[2].Pointer())\nreturn err\n}\n-func (l *lineDiscipline) setWindowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+func (l *lineDiscipline) setWindowSize(t *kernel.Task, args arch.SyscallArguments) error {\nl.sizeMu.Lock()\ndefer l.sizeMu.Unlock()\n- _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &l.size, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := l.size.CopyIn(t, args[2].Pointer())\nreturn err\n}\n@@ -180,8 +173,8 @@ func (l *lineDiscipline) slaveReadiness() waiter.EventMask {\nreturn l.outQueue.writeReadiness(&l.termios) | l.inQueue.readReadiness(&l.termios)\n}\n-func (l *lineDiscipline) inputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n- return l.inQueue.readableSize(ctx, io, args)\n+func (l *lineDiscipline) inputQueueReadSize(t *kernel.Task, io usermem.IO, args arch.SyscallArguments) error {\n+ return l.inQueue.readableSize(t, io, args)\n}\nfunc (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\n@@ -215,8 +208,8 @@ func (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequ\nreturn 0, syserror.ErrWouldBlock\n}\n-func (l *lineDiscipline) outputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n- return l.outQueue.readableSize(ctx, io, args)\n+func (l *lineDiscipline) outputQueueReadSize(t *kernel.Task, io usermem.IO, args arch.SyscallArguments) error {\n+ return l.outQueue.readableSize(t, io, args)\n}\nfunc (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/master.go", "new_path": "pkg/sentry/fsimpl/devpts/master.go", "diff": "@@ -20,12 +20,14 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/unimpl\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// masterInode is the inode for the master end of the Terminal.\n@@ -131,46 +133,51 @@ func (mfd *masterFileDescription) Write(ctx context.Context, src usermem.IOSeque\n// Ioctl implements vfs.FileDescriptionImpl.Ioctl.\nfunc (mfd *masterFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ t := kernel.TaskFromContext(ctx)\n+ if t == nil {\n+ // ioctl(2) may only be called from a task goroutine.\n+ return 0, syserror.ENOTTY\n+ }\n+\nswitch cmd := args[1].Uint(); cmd {\ncase linux.FIONREAD: // linux.FIONREAD == linux.TIOCINQ\n// Get the number of bytes in the output queue read buffer.\n- return 0, mfd.t.ld.outputQueueReadSize(ctx, io, args)\n+ return 0, mfd.t.ld.outputQueueReadSize(t, io, args)\ncase linux.TCGETS:\n// N.B. TCGETS on the master actually returns the configuration\n// of the slave end.\n- return mfd.t.ld.getTermios(ctx, io, args)\n+ return mfd.t.ld.getTermios(t, args)\ncase linux.TCSETS:\n// N.B. TCSETS on the master actually affects the configuration\n// of the slave end.\n- return mfd.t.ld.setTermios(ctx, io, args)\n+ return mfd.t.ld.setTermios(t, args)\ncase linux.TCSETSW:\n// TODO(b/29356795): This should drain the output queue first.\n- return mfd.t.ld.setTermios(ctx, io, args)\n+ return mfd.t.ld.setTermios(t, args)\ncase linux.TIOCGPTN:\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(mfd.t.n), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ nP := primitive.Uint32(mfd.t.n)\n+ _, err := nP.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCSPTLCK:\n// TODO(b/29356795): Implement pty locking. For now just pretend we do.\nreturn 0, nil\ncase linux.TIOCGWINSZ:\n- return 0, mfd.t.ld.windowSize(ctx, io, args)\n+ return 0, mfd.t.ld.windowSize(t, args)\ncase linux.TIOCSWINSZ:\n- return 0, mfd.t.ld.setWindowSize(ctx, io, args)\n+ return 0, mfd.t.ld.setWindowSize(t, args)\ncase linux.TIOCSCTTY:\n// Make the given terminal the controlling terminal of the\n// calling process.\n- return 0, mfd.t.setControllingTTY(ctx, io, args, true /* isMaster */)\n+ return 0, mfd.t.setControllingTTY(ctx, args, true /* isMaster */)\ncase linux.TIOCNOTTY:\n// Release this process's controlling terminal.\n- return 0, mfd.t.releaseControllingTTY(ctx, io, args, true /* isMaster */)\n+ return 0, mfd.t.releaseControllingTTY(ctx, args, true /* isMaster */)\ncase linux.TIOCGPGRP:\n// Get the foreground process group.\n- return mfd.t.foregroundProcessGroup(ctx, io, args, true /* isMaster */)\n+ return mfd.t.foregroundProcessGroup(ctx, args, true /* isMaster */)\ncase linux.TIOCSPGRP:\n// Set the foreground process group.\n- return mfd.t.setForegroundProcessGroup(ctx, io, args, true /* isMaster */)\n+ return mfd.t.setForegroundProcessGroup(ctx, args, true /* isMaster */)\ndefault:\nmaybeEmitUnimplementedEvent(ctx, cmd)\nreturn 0, syserror.ENOTTY\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/queue.go", "new_path": "pkg/sentry/fsimpl/devpts/queue.go", "diff": "@@ -19,10 +19,12 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// waitBufMaxBytes is the maximum size of a wait buffer. It is based on\n@@ -83,17 +85,15 @@ func (q *queue) writeReadiness(t *linux.KernelTermios) waiter.EventMask {\n}\n// readableSize writes the number of readable bytes to userspace.\n-func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+func (q *queue) readableSize(t *kernel.Task, io usermem.IO, args arch.SyscallArguments) error {\nq.mu.Lock()\ndefer q.mu.Unlock()\n- var size int32\n+ size := primitive.Int32(0)\nif q.readable {\n- size = int32(len(q.readBuf))\n+ size = primitive.Int32(len(q.readBuf))\n}\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), size, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ _, err := size.CopyOut(t, args[2].Pointer())\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/slave.go", "new_path": "pkg/sentry/fsimpl/devpts/slave.go", "diff": "@@ -20,11 +20,13 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// slaveInode is the inode for the slave end of the Terminal.\n@@ -135,39 +137,44 @@ func (sfd *slaveFileDescription) Write(ctx context.Context, src usermem.IOSequen\n// Ioctl implements vfs.FileDescriptionImpl.Ioctl.\nfunc (sfd *slaveFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ t := kernel.TaskFromContext(ctx)\n+ if t == nil {\n+ // ioctl(2) may only be called from a task goroutine.\n+ return 0, syserror.ENOTTY\n+ }\n+\nswitch cmd := args[1].Uint(); cmd {\ncase linux.FIONREAD: // linux.FIONREAD == linux.TIOCINQ\n// Get the number of bytes in the input queue read buffer.\n- return 0, sfd.inode.t.ld.inputQueueReadSize(ctx, io, args)\n+ return 0, sfd.inode.t.ld.inputQueueReadSize(t, io, args)\ncase linux.TCGETS:\n- return sfd.inode.t.ld.getTermios(ctx, io, args)\n+ return sfd.inode.t.ld.getTermios(t, args)\ncase linux.TCSETS:\n- return sfd.inode.t.ld.setTermios(ctx, io, args)\n+ return sfd.inode.t.ld.setTermios(t, args)\ncase linux.TCSETSW:\n// TODO(b/29356795): This should drain the output queue first.\n- return sfd.inode.t.ld.setTermios(ctx, io, args)\n+ return sfd.inode.t.ld.setTermios(t, args)\ncase linux.TIOCGPTN:\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(sfd.inode.t.n), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ nP := primitive.Uint32(sfd.inode.t.n)\n+ _, err := nP.CopyOut(t, args[2].Pointer())\nreturn 0, err\ncase linux.TIOCGWINSZ:\n- return 0, sfd.inode.t.ld.windowSize(ctx, io, args)\n+ return 0, sfd.inode.t.ld.windowSize(t, args)\ncase linux.TIOCSWINSZ:\n- return 0, sfd.inode.t.ld.setWindowSize(ctx, io, args)\n+ return 0, sfd.inode.t.ld.setWindowSize(t, args)\ncase linux.TIOCSCTTY:\n// Make the given terminal the controlling terminal of the\n// calling process.\n- return 0, sfd.inode.t.setControllingTTY(ctx, io, args, false /* isMaster */)\n+ return 0, sfd.inode.t.setControllingTTY(ctx, args, false /* isMaster */)\ncase linux.TIOCNOTTY:\n// Release this process's controlling terminal.\n- return 0, sfd.inode.t.releaseControllingTTY(ctx, io, args, false /* isMaster */)\n+ return 0, sfd.inode.t.releaseControllingTTY(ctx, args, false /* isMaster */)\ncase linux.TIOCGPGRP:\n// Get the foreground process group.\n- return sfd.inode.t.foregroundProcessGroup(ctx, io, args, false /* isMaster */)\n+ return sfd.inode.t.foregroundProcessGroup(ctx, args, false /* isMaster */)\ncase linux.TIOCSPGRP:\n// Set the foreground process group.\n- return sfd.inode.t.setForegroundProcessGroup(ctx, io, args, false /* isMaster */)\n+ return sfd.inode.t.setForegroundProcessGroup(ctx, args, false /* isMaster */)\ndefault:\nmaybeEmitUnimplementedEvent(ctx, cmd)\nreturn 0, syserror.ENOTTY\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/devpts/terminal.go", "new_path": "pkg/sentry/fsimpl/devpts/terminal.go", "diff": "@@ -19,7 +19,7 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n- \"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/tools/go_marshal/primitive\"\n)\n// Terminal is a pseudoterminal.\n@@ -54,7 +54,7 @@ func newTerminal(n uint32) *Terminal {\n// setControllingTTY makes tm the controlling terminal of the calling thread\n// group.\n-func (tm *Terminal) setControllingTTY(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) error {\n+func (tm *Terminal) setControllingTTY(ctx context.Context, args arch.SyscallArguments, isMaster bool) error {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"setControllingTTY must be called from a task context\")\n@@ -65,7 +65,7 @@ func (tm *Terminal) setControllingTTY(ctx context.Context, io usermem.IO, args a\n// releaseControllingTTY removes tm as the controlling terminal of the calling\n// thread group.\n-func (tm *Terminal) releaseControllingTTY(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) error {\n+func (tm *Terminal) releaseControllingTTY(ctx context.Context, args arch.SyscallArguments, isMaster bool) error {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"releaseControllingTTY must be called from a task context\")\n@@ -75,7 +75,7 @@ func (tm *Terminal) releaseControllingTTY(ctx context.Context, io usermem.IO, ar\n}\n// foregroundProcessGroup gets the process group ID of tm's foreground process.\n-func (tm *Terminal) foregroundProcessGroup(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\n+func (tm *Terminal) foregroundProcessGroup(ctx context.Context, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"foregroundProcessGroup must be called from a task context\")\n@@ -87,24 +87,21 @@ func (tm *Terminal) foregroundProcessGroup(ctx context.Context, io usermem.IO, a\n}\n// Write it out to *arg.\n- _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(ret), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n+ retP := primitive.Int32(ret)\n+ _, err = retP.CopyOut(task, args[2].Pointer())\nreturn 0, err\n}\n// foregroundProcessGroup sets tm's foreground process.\n-func (tm *Terminal) setForegroundProcessGroup(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\n+func (tm *Terminal) setForegroundProcessGroup(ctx context.Context, args arch.SyscallArguments, isMaster bool) (uintptr, error) {\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\npanic(\"setForegroundProcessGroup must be called from a task context\")\n}\n// Read in the process group ID.\n- var pgid int32\n- if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &pgid, usermem.IOOpts{\n- AddressSpaceActive: true,\n- }); err != nil {\n+ var pgid primitive.Int32\n+ if _, err := pgid.CopyIn(task, args[2].Pointer()); err != nil {\nreturn 0, err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
[go-marshal] Enable auto-marshalling for fs/tty. PiperOrigin-RevId: 329564614
259,896
01.09.2020 13:38:44
25,200
04c284f8c2015b801c929325a6304e601eb94e56
Fix panic when calling dup2().
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_table.go", "new_path": "pkg/sentry/kernel/fd_table.go", "diff": "@@ -112,7 +112,7 @@ func (f *FDTable) loadDescriptorTable(m map[int32]descriptor) {\nctx := context.Background()\nf.init() // Initialize table.\nfor fd, d := range m {\n- f.setAll(fd, d.file, d.fileVFS2, d.flags)\n+ f.setAll(ctx, fd, d.file, d.fileVFS2, d.flags)\n// Note that we do _not_ need to acquire a extra table reference here. The\n// table reference will already be accounted for in the file, so we drop the\n@@ -127,7 +127,7 @@ func (f *FDTable) loadDescriptorTable(m map[int32]descriptor) {\n}\n// drop drops the table reference.\n-func (f *FDTable) drop(file *fs.File) {\n+func (f *FDTable) drop(ctx context.Context, file *fs.File) {\n// Release locks.\nfile.Dirent.Inode.LockCtx.Posix.UnlockRegion(f, lock.LockRange{0, lock.LockEOF})\n@@ -145,14 +145,13 @@ func (f *FDTable) drop(file *fs.File) {\nd.InotifyEvent(ev, 0)\n// Drop the table reference.\n- file.DecRef(context.Background())\n+ file.DecRef(ctx)\n}\n// dropVFS2 drops the table reference.\n-func (f *FDTable) dropVFS2(file *vfs.FileDescription) {\n+func (f *FDTable) dropVFS2(ctx context.Context, file *vfs.FileDescription) {\n// Release any POSIX lock possibly held by the FDTable. Range {0, 0} means the\n// entire file.\n- ctx := context.Background()\nerr := file.UnlockPOSIX(ctx, f, 0, 0, linux.SEEK_SET)\nif err != nil && err != syserror.ENOLCK {\npanic(fmt.Sprintf(\"UnlockPOSIX failed: %v\", err))\n@@ -289,7 +288,7 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\n// Install all entries.\nfor i := fd; i < end && len(fds) < len(files); i++ {\nif d, _, _ := f.get(i); d == nil {\n- f.set(i, files[len(fds)], flags) // Set the descriptor.\n+ f.set(ctx, i, files[len(fds)], flags) // Set the descriptor.\nfds = append(fds, i) // Record the file descriptor.\n}\n}\n@@ -297,7 +296,7 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\n// Failure? Unwind existing FDs.\nif len(fds) < len(files) {\nfor _, i := range fds {\n- f.set(i, nil, FDFlags{}) // Zap entry.\n+ f.set(ctx, i, nil, FDFlags{}) // Zap entry.\n}\nreturn nil, syscall.EMFILE\n}\n@@ -344,7 +343,7 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes\n// Install all entries.\nfor i := fd; i < end && len(fds) < len(files); i++ {\nif d, _, _ := f.getVFS2(i); d == nil {\n- f.setVFS2(i, files[len(fds)], flags) // Set the descriptor.\n+ f.setVFS2(ctx, i, files[len(fds)], flags) // Set the descriptor.\nfds = append(fds, i) // Record the file descriptor.\n}\n}\n@@ -352,7 +351,7 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes\n// Failure? Unwind existing FDs.\nif len(fds) < len(files) {\nfor _, i := range fds {\n- f.setVFS2(i, nil, FDFlags{}) // Zap entry.\n+ f.setVFS2(ctx, i, nil, FDFlags{}) // Zap entry.\n}\nreturn nil, syscall.EMFILE\n}\n@@ -397,7 +396,7 @@ func (f *FDTable) NewFDVFS2(ctx context.Context, minfd int32, file *vfs.FileDesc\n}\nfor fd < end {\nif d, _, _ := f.getVFS2(fd); d == nil {\n- f.setVFS2(fd, file, flags)\n+ f.setVFS2(ctx, fd, file, flags)\nif fd == f.next {\n// Update next search start position.\nf.next = fd + 1\n@@ -439,14 +438,14 @@ func (f *FDTable) newFDAt(ctx context.Context, fd int32, file *fs.File, fileVFS2\n// Install the entry.\nf.mu.Lock()\ndefer f.mu.Unlock()\n- f.setAll(fd, file, fileVFS2, flags)\n+ f.setAll(ctx, fd, file, fileVFS2, flags)\nreturn nil\n}\n// SetFlags sets the flags for the given file descriptor.\n//\n// True is returned iff flags were changed.\n-func (f *FDTable) SetFlags(fd int32, flags FDFlags) error {\n+func (f *FDTable) SetFlags(ctx context.Context, fd int32, flags FDFlags) error {\nif fd < 0 {\n// Don't accept negative FDs.\nreturn syscall.EBADF\n@@ -462,14 +461,14 @@ func (f *FDTable) SetFlags(fd int32, flags FDFlags) error {\n}\n// Update the flags.\n- f.set(fd, file, flags)\n+ f.set(ctx, fd, file, flags)\nreturn nil\n}\n// SetFlagsVFS2 sets the flags for the given file descriptor.\n//\n// True is returned iff flags were changed.\n-func (f *FDTable) SetFlagsVFS2(fd int32, flags FDFlags) error {\n+func (f *FDTable) SetFlagsVFS2(ctx context.Context, fd int32, flags FDFlags) error {\nif fd < 0 {\n// Don't accept negative FDs.\nreturn syscall.EBADF\n@@ -485,7 +484,7 @@ func (f *FDTable) SetFlagsVFS2(fd int32, flags FDFlags) error {\n}\n// Update the flags.\n- f.setVFS2(fd, file, flags)\n+ f.setVFS2(ctx, fd, file, flags)\nreturn nil\n}\n@@ -584,9 +583,9 @@ func (f *FDTable) Fork(ctx context.Context) *FDTable {\n// reference for the clone. We don't need anything else.\nswitch {\ncase file != nil:\n- clone.set(fd, file, flags)\n+ clone.set(ctx, fd, file, flags)\ncase fileVFS2 != nil:\n- clone.setVFS2(fd, fileVFS2, flags)\n+ clone.setVFS2(ctx, fd, fileVFS2, flags)\n}\n})\nreturn clone\n@@ -595,7 +594,7 @@ func (f *FDTable) Fork(ctx context.Context) *FDTable {\n// Remove removes an FD from and returns a non-file iff successful.\n//\n// N.B. Callers are required to use DecRef when they are done.\n-func (f *FDTable) Remove(fd int32) (*fs.File, *vfs.FileDescription) {\n+func (f *FDTable) Remove(ctx context.Context, fd int32) (*fs.File, *vfs.FileDescription) {\nif fd < 0 {\nreturn nil, nil\n}\n@@ -618,7 +617,7 @@ func (f *FDTable) Remove(fd int32) (*fs.File, *vfs.FileDescription) {\norig2.IncRef()\n}\nif orig != nil || orig2 != nil {\n- f.setAll(fd, nil, nil, FDFlags{}) // Zap entry.\n+ f.setAll(ctx, fd, nil, nil, FDFlags{}) // Zap entry.\n}\nreturn orig, orig2\n}\n@@ -630,7 +629,7 @@ func (f *FDTable) RemoveIf(ctx context.Context, cond func(*fs.File, *vfs.FileDes\nf.forEach(ctx, func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {\nif cond(file, fileVFS2, flags) {\n- f.set(fd, nil, FDFlags{}) // Clear from table.\n+ f.set(ctx, fd, nil, FDFlags{}) // Clear from table.\n// Update current available position.\nif fd < f.next {\nf.next = fd\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_table_test.go", "new_path": "pkg/sentry/kernel/fd_table_test.go", "diff": "@@ -72,7 +72,7 @@ func TestFDTableMany(t *testing.T) {\n}\ni := int32(2)\n- fdTable.Remove(i)\n+ fdTable.Remove(ctx, i)\nif fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil || fds[0] != i {\nt.Fatalf(\"Allocated %v FDs but wanted to allocate %v: %v\", i, maxFD, err)\n}\n@@ -93,7 +93,7 @@ func TestFDTableOverLimit(t *testing.T) {\nt.Fatalf(\"fdTable.NewFDs(maxFD-3, {f,f,f}): got %v, wanted nil\", err)\n} else {\nfor _, fd := range fds {\n- fdTable.Remove(fd)\n+ fdTable.Remove(ctx, fd)\n}\n}\n@@ -150,13 +150,13 @@ func TestFDTable(t *testing.T) {\nt.Fatalf(\"fdTable.Get(2): got a %v, wanted nil\", ref)\n}\n- ref, _ := fdTable.Remove(1)\n+ ref, _ := fdTable.Remove(ctx, 1)\nif ref == nil {\nt.Fatalf(\"fdTable.Remove(1) for an existing FD: failed, want success\")\n}\nref.DecRef(ctx)\n- if ref, _ := fdTable.Remove(1); ref != nil {\n+ if ref, _ := fdTable.Remove(ctx, 1); ref != nil {\nt.Fatalf(\"r.Remove(1) for a removed FD: got success, want failure\")\n}\n})\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_table_unsafe.go", "new_path": "pkg/sentry/kernel/fd_table_unsafe.go", "diff": "@@ -18,6 +18,7 @@ import (\n\"sync/atomic\"\n\"unsafe\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n)\n@@ -84,8 +85,8 @@ func (f *FDTable) getAll(fd int32) (*fs.File, *vfs.FileDescription, FDFlags, boo\n// reference needed by the table iff the file is different.\n//\n// Precondition: mu must be held.\n-func (f *FDTable) set(fd int32, file *fs.File, flags FDFlags) {\n- f.setAll(fd, file, nil, flags)\n+func (f *FDTable) set(ctx context.Context, fd int32, file *fs.File, flags FDFlags) {\n+ f.setAll(ctx, fd, file, nil, flags)\n}\n// setVFS2 sets an entry.\n@@ -94,8 +95,8 @@ func (f *FDTable) set(fd int32, file *fs.File, flags FDFlags) {\n// reference needed by the table iff the file is different.\n//\n// Precondition: mu must be held.\n-func (f *FDTable) setVFS2(fd int32, file *vfs.FileDescription, flags FDFlags) {\n- f.setAll(fd, nil, file, flags)\n+func (f *FDTable) setVFS2(ctx context.Context, fd int32, file *vfs.FileDescription, flags FDFlags) {\n+ f.setAll(ctx, fd, nil, file, flags)\n}\n// setAll sets an entry.\n@@ -104,7 +105,7 @@ func (f *FDTable) setVFS2(fd int32, file *vfs.FileDescription, flags FDFlags) {\n// reference needed by the table iff the file is different.\n//\n// Precondition: mu must be held.\n-func (f *FDTable) setAll(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {\n+func (f *FDTable) setAll(ctx context.Context, fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {\nif file != nil && fileVFS2 != nil {\npanic(\"VFS1 and VFS2 files set\")\n}\n@@ -152,11 +153,11 @@ func (f *FDTable) setAll(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription,\nswitch {\ncase orig.file != nil:\nif desc == nil || desc.file != orig.file {\n- f.drop(orig.file)\n+ f.drop(ctx, orig.file)\n}\ncase orig.fileVFS2 != nil:\nif desc == nil || desc.fileVFS2 != orig.fileVFS2 {\n- f.dropVFS2(orig.fileVFS2)\n+ f.dropVFS2(ctx, orig.fileVFS2)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -601,12 +601,12 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// Shared flags between file and socket.\nswitch request {\ncase linux.FIONCLEX:\n- t.FDTable().SetFlags(fd, kernel.FDFlags{\n+ t.FDTable().SetFlags(t, fd, kernel.FDFlags{\nCloseOnExec: false,\n})\nreturn 0, nil, nil\ncase linux.FIOCLEX:\n- t.FDTable().SetFlags(fd, kernel.FDFlags{\n+ t.FDTable().SetFlags(t, fd, kernel.FDFlags{\nCloseOnExec: true,\n})\nreturn 0, nil, nil\n@@ -787,7 +787,7 @@ func Close(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// Note that Remove provides a reference on the file that we may use to\n// flush. It is still active until we drop the final reference below\n// (and other reference-holding operations complete).\n- file, _ := t.FDTable().Remove(fd)\n+ file, _ := t.FDTable().Remove(t, fd)\nif file == nil {\nreturn 0, nil, syserror.EBADF\n}\n@@ -941,7 +941,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn uintptr(flags.ToLinuxFDFlags()), nil, nil\ncase linux.F_SETFD:\nflags := args[2].Uint()\n- err := t.FDTable().SetFlags(fd, kernel.FDFlags{\n+ err := t.FDTable().SetFlags(t, fd, kernel.FDFlags{\nCloseOnExec: flags&linux.FD_CLOEXEC != 0,\n})\nreturn 0, nil, err\n@@ -1154,6 +1154,10 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\nreturn 0, nil, nil\n}\n+// LINT.ThenChange(vfs2/fd.go)\n+\n+// LINT.IfChange\n+\nfunc mkdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error {\npath, _, err := copyInPath(t, addr, false /* allowEmpty */)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_pipe.go", "new_path": "pkg/sentry/syscalls/linux/sys_pipe.go", "diff": "@@ -48,7 +48,7 @@ func pipe2(t *kernel.Task, addr usermem.Addr, flags uint) (uintptr, error) {\nif _, err := t.CopyOut(addr, fds); err != nil {\nfor _, fd := range fds {\n- if file, _ := t.FDTable().Remove(fd); file != nil {\n+ if file, _ := t.FDTable().Remove(t, fd); file != nil {\nfile.DecRef(t)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -249,7 +249,7 @@ func SocketPair(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy\n// Copy the file descriptors out.\nif _, err := t.CopyOut(socks, fds); err != nil {\nfor _, fd := range fds {\n- if file, _ := t.FDTable().Remove(fd); file != nil {\n+ if file, _ := t.FDTable().Remove(t, fd); file != nil {\nfile.DecRef(t)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "diff": "@@ -34,7 +34,7 @@ func Close(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// Note that Remove provides a reference on the file that we may use to\n// flush. It is still active until we drop the final reference below\n// (and other reference-holding operations complete).\n- _, file := t.FDTable().Remove(fd)\n+ _, file := t.FDTable().Remove(t, fd)\nif file == nil {\nreturn 0, nil, syserror.EBADF\n}\n@@ -137,7 +137,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn uintptr(flags.ToLinuxFDFlags()), nil, nil\ncase linux.F_SETFD:\nflags := args[2].Uint()\n- err := t.FDTable().SetFlagsVFS2(fd, kernel.FDFlags{\n+ err := t.FDTable().SetFlagsVFS2(t, fd, kernel.FDFlags{\nCloseOnExec: flags&linux.FD_CLOEXEC != 0,\n})\nreturn 0, nil, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/ioctl.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/ioctl.go", "diff": "@@ -34,13 +34,13 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// Handle ioctls that apply to all FDs.\nswitch args[1].Int() {\ncase linux.FIONCLEX:\n- t.FDTable().SetFlagsVFS2(fd, kernel.FDFlags{\n+ t.FDTable().SetFlagsVFS2(t, fd, kernel.FDFlags{\nCloseOnExec: false,\n})\nreturn 0, nil, nil\ncase linux.FIOCLEX:\n- t.FDTable().SetFlagsVFS2(fd, kernel.FDFlags{\n+ t.FDTable().SetFlagsVFS2(t, fd, kernel.FDFlags{\nCloseOnExec: true,\n})\nreturn 0, nil, nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/pipe.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/pipe.go", "diff": "@@ -53,7 +53,7 @@ func pipe2(t *kernel.Task, addr usermem.Addr, flags int32) error {\n}\nif _, err := t.CopyOut(addr, fds); err != nil {\nfor _, fd := range fds {\n- if _, file := t.FDTable().Remove(fd); file != nil {\n+ if _, file := t.FDTable().Remove(t, fd); file != nil {\nfile.DecRef(t)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "diff": "@@ -252,7 +252,7 @@ func SocketPair(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy\nif _, err := t.CopyOut(addr, fds); err != nil {\nfor _, fd := range fds {\n- if _, file := t.FDTable().Remove(fd); file != nil {\n+ if _, file := t.FDTable().Remove(t, fd); file != nil {\nfile.DecRef(t)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix panic when calling dup2(). PiperOrigin-RevId: 329572337
259,860
01.09.2020 17:19:26
25,200
c67d8ece09f44900951c5e20d9c551b9759c7aba
Test opening file handles with different permissions. These were problematic for vfs2 gofers before correctly implementing separate read/write handles.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/open_create.cc", "new_path": "test/syscalls/linux/open_create.cc", "diff": "@@ -88,21 +88,21 @@ TEST(CreateTest, CreateExclusively) {\nSyscallFailsWithErrno(EEXIST));\n}\n-TEST(CreateTeast, CreatWithOTrunc) {\n+TEST(CreateTest, CreatWithOTrunc) {\nstd::string dirpath = JoinPath(GetAbsoluteTestTmpdir(), \"truncd\");\nASSERT_THAT(mkdir(dirpath.c_str(), 0777), SyscallSucceeds());\nASSERT_THAT(open(dirpath.c_str(), O_CREAT | O_TRUNC, 0666),\nSyscallFailsWithErrno(EISDIR));\n}\n-TEST(CreateTeast, CreatDirWithOTruncAndReadOnly) {\n+TEST(CreateTest, CreatDirWithOTruncAndReadOnly) {\nstd::string dirpath = JoinPath(GetAbsoluteTestTmpdir(), \"truncd\");\nASSERT_THAT(mkdir(dirpath.c_str(), 0777), SyscallSucceeds());\nASSERT_THAT(open(dirpath.c_str(), O_CREAT | O_TRUNC | O_RDONLY, 0666),\nSyscallFailsWithErrno(EISDIR));\n}\n-TEST(CreateTeast, CreatFileWithOTruncAndReadOnly) {\n+TEST(CreateTest, CreatFileWithOTruncAndReadOnly) {\nstd::string dirpath = JoinPath(GetAbsoluteTestTmpdir(), \"truncfile\");\nint dirfd;\nASSERT_THAT(dirfd = open(dirpath.c_str(), O_RDWR | O_CREAT, 0666),\n@@ -149,6 +149,116 @@ TEST(CreateTest, OpenCreateROThenRW) {\nEXPECT_THAT(WriteFd(fd2.get(), &c, 1), SyscallSucceedsWithValue(1));\n}\n+TEST(CreateTest, ChmodReadToWriteBetweenOpens_NoRandomSave) {\n+ // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to\n+ // override file read/write permissions. CAP_DAC_READ_SEARCH needs to be\n+ // cleared for the same reason.\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, false));\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_READ_SEARCH, false));\n+\n+ const TempPath file =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0400));\n+\n+ const FileDescriptor rfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));\n+\n+ // Cannot restore after making permissions more restrictive.\n+ const DisableSave ds;\n+ ASSERT_THAT(fchmod(rfd.get(), 0200), SyscallSucceeds());\n+\n+ EXPECT_THAT(open(file.path().c_str(), O_RDONLY),\n+ SyscallFailsWithErrno(EACCES));\n+\n+ const FileDescriptor wfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));\n+\n+ char c = 'x';\n+ EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ c = 0;\n+ EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ EXPECT_EQ(c, 'x');\n+}\n+\n+TEST(CreateTest, ChmodWriteToReadBetweenOpens_NoRandomSave) {\n+ // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to\n+ // override file read/write permissions.\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, false));\n+\n+ const TempPath file =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0200));\n+\n+ const FileDescriptor wfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));\n+\n+ // Cannot restore after making permissions more restrictive.\n+ const DisableSave ds;\n+ ASSERT_THAT(fchmod(wfd.get(), 0400), SyscallSucceeds());\n+\n+ EXPECT_THAT(open(file.path().c_str(), O_WRONLY),\n+ SyscallFailsWithErrno(EACCES));\n+\n+ const FileDescriptor rfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));\n+\n+ char c = 'x';\n+ EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ c = 0;\n+ EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ EXPECT_EQ(c, 'x');\n+}\n+\n+TEST(CreateTest, CreateWithReadFlagNotAllowedByMode_NoRandomSave) {\n+ // The only time we can open a file with flags forbidden by its permissions\n+ // is when we are creating the file. We cannot re-open with the same flags,\n+ // so we cannot restore an fd obtained from such an operation.\n+ const DisableSave ds;\n+\n+ // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to\n+ // override file read/write permissions. CAP_DAC_READ_SEARCH needs to be\n+ // cleared for the same reason.\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, false));\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_READ_SEARCH, false));\n+\n+ // Create and open a file with read flag but without read permissions.\n+ const std::string path = NewTempAbsPath();\n+ const FileDescriptor rfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CREAT | O_RDONLY, 0222));\n+\n+ EXPECT_THAT(open(path.c_str(), O_RDONLY), SyscallFailsWithErrno(EACCES));\n+ const FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_WRONLY));\n+\n+ char c = 'x';\n+ EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ c = 0;\n+ EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ EXPECT_EQ(c, 'x');\n+}\n+\n+TEST(CreateTest, CreateWithWriteFlagNotAllowedByMode_NoRandomSave) {\n+ // The only time we can open a file with flags forbidden by its permissions\n+ // is when we are creating the file. We cannot re-open with the same flags,\n+ // so we cannot restore an fd obtained from such an operation.\n+ const DisableSave ds;\n+\n+ // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to\n+ // override file read/write permissions.\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, false));\n+\n+ // Create and open a file with write flag but without write permissions.\n+ const std::string path = NewTempAbsPath();\n+ const FileDescriptor wfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CREAT | O_WRONLY, 0444));\n+\n+ EXPECT_THAT(open(path.c_str(), O_WRONLY), SyscallFailsWithErrno(EACCES));\n+ const FileDescriptor rfd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_RDONLY));\n+\n+ char c = 'x';\n+ EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ c = 0;\n+ EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));\n+ EXPECT_EQ(c, 'x');\n+}\n+\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Test opening file handles with different permissions. These were problematic for vfs2 gofers before correctly implementing separate read/write handles. PiperOrigin-RevId: 329613261
259,992
01.09.2020 19:20:37
25,200
37a217aca4a128d66a6dd4969375ea81bd879ac7
Implement setattr+clunk in 9P This is to cover the common pattern: open->read/write->close, where SetAttr needs to be called to update atime/mtime before the file is closed. Benchmark results: BM_OpenReadClose/10240 CPU setattr+clunk: 63783 ns VFS2: 68109 ns VFS1: 72507 ns Updates
[ { "change_type": "MODIFY", "old_path": "pkg/p9/client_file.go", "new_path": "pkg/p9/client_file.go", "diff": "@@ -54,6 +54,8 @@ func (c *Client) newFile(fid FID) *clientFile {\n//\n// This proxies all of the interfaces found in file.go.\ntype clientFile struct {\n+ DisallowServerCalls\n+\n// client is the originating client.\nclient *Client\n@@ -283,6 +285,39 @@ func (c *clientFile) Close() error {\nreturn nil\n}\n+// SetAttrClose implements File.SetAttrClose.\n+func (c *clientFile) SetAttrClose(valid SetAttrMask, attr SetAttr) error {\n+ if !versionSupportsTsetattrclunk(c.client.version) {\n+ setAttrErr := c.SetAttr(valid, attr)\n+\n+ // Try to close file even in case of failure above. Since the state of the\n+ // file is unknown to the caller, it will not attempt to close the file\n+ // again.\n+ if err := c.Close(); err != nil {\n+ return err\n+ }\n+\n+ return setAttrErr\n+ }\n+\n+ // Avoid double close.\n+ if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) {\n+ return syscall.EBADF\n+ }\n+\n+ // Send the message.\n+ if err := c.client.sendRecv(&Tsetattrclunk{FID: c.fid, Valid: valid, SetAttr: attr}, &Rsetattrclunk{}); err != nil {\n+ // If an error occurred, we toss away the FID. This isn't ideal,\n+ // but I'm not sure what else makes sense in this context.\n+ log.Warningf(\"Tsetattrclunk failed, losing FID %v: %v\", c.fid, err)\n+ return err\n+ }\n+\n+ // Return the FID to the pool.\n+ c.client.fidPool.Put(uint64(c.fid))\n+ return nil\n+}\n+\n// Open implements File.Open.\nfunc (c *clientFile) Open(flags OpenFlags) (*fd.FD, QID, uint32, error) {\nif atomic.LoadUint32(&c.closed) != 0 {\n@@ -681,6 +716,3 @@ func (c *clientFile) Flush() error {\nreturn c.client.sendRecv(&Tflushf{FID: c.fid}, &Rflushf{})\n}\n-\n-// Renamed implements File.Renamed.\n-func (c *clientFile) Renamed(newDir File, newName string) {}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/file.go", "new_path": "pkg/p9/file.go", "diff": "@@ -135,6 +135,14 @@ type File interface {\n// On the server, Close has no concurrency guarantee.\nClose() error\n+ // SetAttrClose is the equivalent of calling SetAttr() followed by Close().\n+ // This can be used to set file times before closing the file in a single\n+ // operation.\n+ //\n+ // On the server, SetAttr has a write concurrency guarantee.\n+ // On the server, Close has no concurrency guarantee.\n+ SetAttrClose(valid SetAttrMask, attr SetAttr) error\n+\n// Open must be called prior to using Read, Write or Readdir. Once Open\n// is called, some operations, such as Walk, will no longer work.\n//\n@@ -286,3 +294,19 @@ type DefaultWalkGetAttr struct{}\nfunc (DefaultWalkGetAttr) WalkGetAttr([]string) ([]QID, File, AttrMask, Attr, error) {\nreturn nil, nil, AttrMask{}, Attr{}, syscall.ENOSYS\n}\n+\n+// DisallowClientCalls panics if a client-only function is called.\n+type DisallowClientCalls struct{}\n+\n+// SetAttrClose implements File.SetAttrClose.\n+func (DisallowClientCalls) SetAttrClose(SetAttrMask, SetAttr) error {\n+ panic(\"SetAttrClose should not be called on the server\")\n+}\n+\n+// DisallowServerCalls panics if a server-only function is called.\n+type DisallowServerCalls struct{}\n+\n+// Renamed implements File.Renamed.\n+func (*clientFile) Renamed(File, string) {\n+ panic(\"Renamed should not be called on the client\")\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/handlers.go", "new_path": "pkg/p9/handlers.go", "diff": "@@ -123,6 +123,37 @@ func (t *Tclunk) handle(cs *connState) message {\nreturn &Rclunk{}\n}\n+func (t *Tsetattrclunk) handle(cs *connState) message {\n+ ref, ok := cs.LookupFID(t.FID)\n+ if !ok {\n+ return newErr(syscall.EBADF)\n+ }\n+ defer ref.DecRef()\n+\n+ setAttrErr := ref.safelyWrite(func() error {\n+ // We don't allow setattr on files that have been deleted.\n+ // This might be technically incorrect, as it's possible that\n+ // there were multiple links and you can still change the\n+ // corresponding inode information.\n+ if ref.isDeleted() {\n+ return syscall.EINVAL\n+ }\n+\n+ // Set the attributes.\n+ return ref.file.SetAttr(t.Valid, t.SetAttr)\n+ })\n+\n+ // Try to delete FID even in case of failure above. Since the state of the\n+ // file is unknown to the caller, it will not attempt to close the file again.\n+ if !cs.DeleteFID(t.FID) {\n+ return newErr(syscall.EBADF)\n+ }\n+ if setAttrErr != nil {\n+ return newErr(setAttrErr)\n+ }\n+ return &Rsetattrclunk{}\n+}\n+\n// handle implements handler.handle.\nfunc (t *Tremove) handle(cs *connState) message {\nref, ok := cs.LookupFID(t.FID)\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/messages.go", "new_path": "pkg/p9/messages.go", "diff": "@@ -317,6 +317,64 @@ func (r *Rclunk) String() string {\nreturn \"Rclunk{}\"\n}\n+// Tsetattrclunk is a setattr+close request.\n+type Tsetattrclunk struct {\n+ // FID is the FID to change.\n+ FID FID\n+\n+ // Valid is the set of bits which will be used.\n+ Valid SetAttrMask\n+\n+ // SetAttr is the set request.\n+ SetAttr SetAttr\n+}\n+\n+// decode implements encoder.decode.\n+func (t *Tsetattrclunk) decode(b *buffer) {\n+ t.FID = b.ReadFID()\n+ t.Valid.decode(b)\n+ t.SetAttr.decode(b)\n+}\n+\n+// encode implements encoder.encode.\n+func (t *Tsetattrclunk) encode(b *buffer) {\n+ b.WriteFID(t.FID)\n+ t.Valid.encode(b)\n+ t.SetAttr.encode(b)\n+}\n+\n+// Type implements message.Type.\n+func (*Tsetattrclunk) Type() MsgType {\n+ return MsgTsetattrclunk\n+}\n+\n+// String implements fmt.Stringer.\n+func (t *Tsetattrclunk) String() string {\n+ return fmt.Sprintf(\"Tsetattrclunk{FID: %d, Valid: %v, SetAttr: %s}\", t.FID, t.Valid, t.SetAttr)\n+}\n+\n+// Rsetattrclunk is a setattr+close response.\n+type Rsetattrclunk struct {\n+}\n+\n+// decode implements encoder.decode.\n+func (*Rsetattrclunk) decode(*buffer) {\n+}\n+\n+// encode implements encoder.encode.\n+func (*Rsetattrclunk) encode(*buffer) {\n+}\n+\n+// Type implements message.Type.\n+func (*Rsetattrclunk) Type() MsgType {\n+ return MsgRsetattrclunk\n+}\n+\n+// String implements fmt.Stringer.\n+func (r *Rsetattrclunk) String() string {\n+ return \"Rsetattrclunk{}\"\n+}\n+\n// Tremove is a remove request.\n//\n// This will eventually be replaced by Tunlinkat.\n@@ -2657,6 +2715,8 @@ func init() {\nmsgRegistry.register(MsgRlconnect, func() message { return &Rlconnect{} })\nmsgRegistry.register(MsgTallocate, func() message { return &Tallocate{} })\nmsgRegistry.register(MsgRallocate, func() message { return &Rallocate{} })\n+ msgRegistry.register(MsgTsetattrclunk, func() message { return &Tsetattrclunk{} })\n+ msgRegistry.register(MsgRsetattrclunk, func() message { return &Rsetattrclunk{} })\nmsgRegistry.register(MsgTchannel, func() message { return &Tchannel{} })\nmsgRegistry.register(MsgRchannel, func() message { return &Rchannel{} })\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/messages_test.go", "new_path": "pkg/p9/messages_test.go", "diff": "@@ -376,6 +376,30 @@ func TestEncodeDecode(t *testing.T) {\n&Rumknod{\nRmknod{QID: QID{Type: 1}},\n},\n+ &Tsetattrclunk{\n+ FID: 1,\n+ Valid: SetAttrMask{\n+ Permissions: true,\n+ UID: true,\n+ GID: true,\n+ Size: true,\n+ ATime: true,\n+ MTime: true,\n+ CTime: true,\n+ ATimeNotSystemTime: true,\n+ MTimeNotSystemTime: true,\n+ },\n+ SetAttr: SetAttr{\n+ Permissions: 1,\n+ UID: 2,\n+ GID: 3,\n+ Size: 4,\n+ ATimeSeconds: 5,\n+ ATimeNanoSeconds: 6,\n+ MTimeSeconds: 7,\n+ MTimeNanoSeconds: 8,\n+ },\n+ },\n}\nfor _, enc := range objs {\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/p9.go", "new_path": "pkg/p9/p9.go", "diff": "@@ -316,85 +316,87 @@ type MsgType uint8\n// MsgType declarations.\nconst (\nMsgTlerror MsgType = 6\n- MsgRlerror = 7\n- MsgTstatfs = 8\n- MsgRstatfs = 9\n- MsgTlopen = 12\n- MsgRlopen = 13\n- MsgTlcreate = 14\n- MsgRlcreate = 15\n- MsgTsymlink = 16\n- MsgRsymlink = 17\n- MsgTmknod = 18\n- MsgRmknod = 19\n- MsgTrename = 20\n- MsgRrename = 21\n- MsgTreadlink = 22\n- MsgRreadlink = 23\n- MsgTgetattr = 24\n- MsgRgetattr = 25\n- MsgTsetattr = 26\n- MsgRsetattr = 27\n- MsgTlistxattr = 28\n- MsgRlistxattr = 29\n- MsgTxattrwalk = 30\n- MsgRxattrwalk = 31\n- MsgTxattrcreate = 32\n- MsgRxattrcreate = 33\n- MsgTgetxattr = 34\n- MsgRgetxattr = 35\n- MsgTsetxattr = 36\n- MsgRsetxattr = 37\n- MsgTremovexattr = 38\n- MsgRremovexattr = 39\n- MsgTreaddir = 40\n- MsgRreaddir = 41\n- MsgTfsync = 50\n- MsgRfsync = 51\n- MsgTlink = 70\n- MsgRlink = 71\n- MsgTmkdir = 72\n- MsgRmkdir = 73\n- MsgTrenameat = 74\n- MsgRrenameat = 75\n- MsgTunlinkat = 76\n- MsgRunlinkat = 77\n- MsgTversion = 100\n- MsgRversion = 101\n- MsgTauth = 102\n- MsgRauth = 103\n- MsgTattach = 104\n- MsgRattach = 105\n- MsgTflush = 108\n- MsgRflush = 109\n- MsgTwalk = 110\n- MsgRwalk = 111\n- MsgTread = 116\n- MsgRread = 117\n- MsgTwrite = 118\n- MsgRwrite = 119\n- MsgTclunk = 120\n- MsgRclunk = 121\n- MsgTremove = 122\n- MsgRremove = 123\n- MsgTflushf = 124\n- MsgRflushf = 125\n- MsgTwalkgetattr = 126\n- MsgRwalkgetattr = 127\n- MsgTucreate = 128\n- MsgRucreate = 129\n- MsgTumkdir = 130\n- MsgRumkdir = 131\n- MsgTumknod = 132\n- MsgRumknod = 133\n- MsgTusymlink = 134\n- MsgRusymlink = 135\n- MsgTlconnect = 136\n- MsgRlconnect = 137\n- MsgTallocate = 138\n- MsgRallocate = 139\n- MsgTchannel = 250\n- MsgRchannel = 251\n+ MsgRlerror MsgType = 7\n+ MsgTstatfs MsgType = 8\n+ MsgRstatfs MsgType = 9\n+ MsgTlopen MsgType = 12\n+ MsgRlopen MsgType = 13\n+ MsgTlcreate MsgType = 14\n+ MsgRlcreate MsgType = 15\n+ MsgTsymlink MsgType = 16\n+ MsgRsymlink MsgType = 17\n+ MsgTmknod MsgType = 18\n+ MsgRmknod MsgType = 19\n+ MsgTrename MsgType = 20\n+ MsgRrename MsgType = 21\n+ MsgTreadlink MsgType = 22\n+ MsgRreadlink MsgType = 23\n+ MsgTgetattr MsgType = 24\n+ MsgRgetattr MsgType = 25\n+ MsgTsetattr MsgType = 26\n+ MsgRsetattr MsgType = 27\n+ MsgTlistxattr MsgType = 28\n+ MsgRlistxattr MsgType = 29\n+ MsgTxattrwalk MsgType = 30\n+ MsgRxattrwalk MsgType = 31\n+ MsgTxattrcreate MsgType = 32\n+ MsgRxattrcreate MsgType = 33\n+ MsgTgetxattr MsgType = 34\n+ MsgRgetxattr MsgType = 35\n+ MsgTsetxattr MsgType = 36\n+ MsgRsetxattr MsgType = 37\n+ MsgTremovexattr MsgType = 38\n+ MsgRremovexattr MsgType = 39\n+ MsgTreaddir MsgType = 40\n+ MsgRreaddir MsgType = 41\n+ MsgTfsync MsgType = 50\n+ MsgRfsync MsgType = 51\n+ MsgTlink MsgType = 70\n+ MsgRlink MsgType = 71\n+ MsgTmkdir MsgType = 72\n+ MsgRmkdir MsgType = 73\n+ MsgTrenameat MsgType = 74\n+ MsgRrenameat MsgType = 75\n+ MsgTunlinkat MsgType = 76\n+ MsgRunlinkat MsgType = 77\n+ MsgTversion MsgType = 100\n+ MsgRversion MsgType = 101\n+ MsgTauth MsgType = 102\n+ MsgRauth MsgType = 103\n+ MsgTattach MsgType = 104\n+ MsgRattach MsgType = 105\n+ MsgTflush MsgType = 108\n+ MsgRflush MsgType = 109\n+ MsgTwalk MsgType = 110\n+ MsgRwalk MsgType = 111\n+ MsgTread MsgType = 116\n+ MsgRread MsgType = 117\n+ MsgTwrite MsgType = 118\n+ MsgRwrite MsgType = 119\n+ MsgTclunk MsgType = 120\n+ MsgRclunk MsgType = 121\n+ MsgTremove MsgType = 122\n+ MsgRremove MsgType = 123\n+ MsgTflushf MsgType = 124\n+ MsgRflushf MsgType = 125\n+ MsgTwalkgetattr MsgType = 126\n+ MsgRwalkgetattr MsgType = 127\n+ MsgTucreate MsgType = 128\n+ MsgRucreate MsgType = 129\n+ MsgTumkdir MsgType = 130\n+ MsgRumkdir MsgType = 131\n+ MsgTumknod MsgType = 132\n+ MsgRumknod MsgType = 133\n+ MsgTusymlink MsgType = 134\n+ MsgRusymlink MsgType = 135\n+ MsgTlconnect MsgType = 136\n+ MsgRlconnect MsgType = 137\n+ MsgTallocate MsgType = 138\n+ MsgRallocate MsgType = 139\n+ MsgTsetattrclunk MsgType = 140\n+ MsgRsetattrclunk MsgType = 141\n+ MsgTchannel MsgType = 250\n+ MsgRchannel MsgType = 251\n)\n// QIDType represents the file type for QIDs.\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/p9test/client_test.go", "new_path": "pkg/p9/p9test/client_test.go", "diff": "@@ -1225,22 +1225,31 @@ func TestOpen(t *testing.T) {\nfunc TestClose(t *testing.T) {\ntype closeTest struct {\nname string\n- closeFn func(backend *Mock, f p9.File)\n+ closeFn func(backend *Mock, f p9.File) error\n}\ncases := []closeTest{\n{\nname: \"close\",\n- closeFn: func(_ *Mock, f p9.File) {\n- f.Close()\n+ closeFn: func(_ *Mock, f p9.File) error {\n+ return f.Close()\n},\n},\n{\nname: \"remove\",\n- closeFn: func(backend *Mock, f p9.File) {\n+ closeFn: func(backend *Mock, f p9.File) error {\n// Allow the rename call in the parent, automatically translated.\nbackend.parent.EXPECT().UnlinkAt(gomock.Any(), gomock.Any()).Times(1)\n- f.(deprecatedRemover).Remove()\n+ return f.(deprecatedRemover).Remove()\n+ },\n+ },\n+ {\n+ name: \"setAttrClose\",\n+ closeFn: func(backend *Mock, f p9.File) error {\n+ valid := p9.SetAttrMask{ATime: true}\n+ attr := p9.SetAttr{ATimeSeconds: 1, ATimeNanoSeconds: 2}\n+ backend.EXPECT().SetAttr(valid, attr).Times(1)\n+ return f.SetAttrClose(valid, attr)\n},\n},\n}\n@@ -1258,7 +1267,9 @@ func TestClose(t *testing.T) {\n_, backend, f := walkHelper(h, name, root)\n// Close via the prescribed method.\n- tc.closeFn(backend, f)\n+ if err := tc.closeFn(backend, f); err != nil {\n+ t.Fatalf(\"closeFn failed: %v\", err)\n+ }\n// Everything should fail with EBADF.\nif _, _, err := f.Walk(nil); err != syscall.EBADF {\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/version.go", "new_path": "pkg/p9/version.go", "diff": "@@ -26,7 +26,7 @@ const (\n//\n// Clients are expected to start requesting this version number and\n// to continuously decrement it until a Tversion request succeeds.\n- highestSupportedVersion uint32 = 11\n+ highestSupportedVersion uint32 = 12\n// lowestSupportedVersion is the lowest supported version X in a\n// version string of the format 9P2000.L.Google.X.\n@@ -173,3 +173,9 @@ func versionSupportsGetSetXattr(v uint32) bool {\nfunc versionSupportsListRemoveXattr(v uint32) bool {\nreturn v >= 11\n}\n+\n+// versionSupportsTsetattrclunk returns true if version v supports\n+// the Tsetattrclunk message.\n+func versionSupportsTsetattrclunk(v uint32) bool {\n+ return v >= 12\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1300,30 +1300,36 @@ func (d *dentry) destroyLocked(ctx context.Context) {\nd.handleMu.Unlock()\nif !d.file.isNil() {\n+ valid := p9.SetAttrMask{}\n+ attr := p9.SetAttr{}\nif !d.isDeleted() {\n// Write dirty timestamps back to the remote filesystem.\n- atimeDirty := atomic.LoadUint32(&d.atimeDirty) != 0\n- mtimeDirty := atomic.LoadUint32(&d.mtimeDirty) != 0\n- if atimeDirty || mtimeDirty {\n+ if atomic.LoadUint32(&d.atimeDirty) != 0 {\n+ valid.ATime = true\n+ valid.ATimeNotSystemTime = true\natime := atomic.LoadInt64(&d.atime)\n+ attr.ATimeSeconds = uint64(atime / 1e9)\n+ attr.ATimeNanoSeconds = uint64(atime % 1e9)\n+ }\n+ if atomic.LoadUint32(&d.mtimeDirty) != 0 {\n+ valid.MTime = true\n+ valid.MTimeNotSystemTime = true\nmtime := atomic.LoadInt64(&d.mtime)\n- if err := d.file.setAttr(ctx, p9.SetAttrMask{\n- ATime: atimeDirty,\n- ATimeNotSystemTime: atimeDirty,\n- MTime: mtimeDirty,\n- MTimeNotSystemTime: mtimeDirty,\n- }, p9.SetAttr{\n- ATimeSeconds: uint64(atime / 1e9),\n- ATimeNanoSeconds: uint64(atime % 1e9),\n- MTimeSeconds: uint64(mtime / 1e9),\n- MTimeNanoSeconds: uint64(mtime % 1e9),\n- }); err != nil {\n- log.Warningf(\"gofer.dentry.destroyLocked: failed to write dirty timestamps back: %v\", err)\n+ attr.MTimeSeconds = uint64(mtime / 1e9)\n+ attr.MTimeNanoSeconds = uint64(mtime % 1e9)\n}\n}\n+\n+ // Check if attributes need to be changed before closing the file.\n+ if valid.ATime || valid.MTime {\n+ if err := d.file.setAttrClose(ctx, valid, attr); err != nil {\n+ log.Warningf(\"gofer.dentry.destroyLocked: failed to close file with write dirty timestamps: %v\", err)\n+ }\n+ } else if err := d.file.close(ctx); err != nil {\n+ log.Warningf(\"gofer.dentry.destroyLocked: failed to close file: %v\", err)\n}\n- d.file.close(ctx)\nd.file = p9file{}\n+\n// Remove d from the set of syncable dentries.\nd.fs.syncMu.Lock()\ndelete(d.fs.syncableDentries, d)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/p9file.go", "new_path": "pkg/sentry/fsimpl/gofer/p9file.go", "diff": "@@ -127,6 +127,13 @@ func (f p9file) close(ctx context.Context) error {\nreturn err\n}\n+func (f p9file) setAttrClose(ctx context.Context, valid p9.SetAttrMask, attr p9.SetAttr) error {\n+ ctx.UninterruptibleSleepStart(false)\n+ err := f.file.SetAttrClose(valid, attr)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n+}\n+\nfunc (f p9file) open(ctx context.Context, flags p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\nctx.UninterruptibleSleepStart(false)\nfdobj, qid, iounit, err := f.file.Open(flags)\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -181,6 +181,8 @@ func (a *attachPoint) makeQID(stat unix.Stat_t) p9.QID {\n// The few exceptions where it cannot be done are: utimensat on symlinks, and\n// Connect() for the socket address.\ntype localFile struct {\n+ p9.DisallowClientCalls\n+\n// attachPoint is the attachPoint that serves this localFile.\nattachPoint *attachPoint\n" }, { "change_type": "MODIFY", "old_path": "test/perf/linux/BUILD", "new_path": "test/perf/linux/BUILD", "diff": "@@ -354,3 +354,19 @@ cc_binary(\n\"//test/util:test_util\",\n],\n)\n+\n+cc_binary(\n+ name = \"open_read_close_benchmark\",\n+ testonly = 1,\n+ srcs = [\n+ \"open_read_close_benchmark.cc\",\n+ ],\n+ deps = [\n+ gbenchmark,\n+ gtest,\n+ \"//test/util:fs_util\",\n+ \"//test/util:logging\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/perf/linux/open_read_close_benchmark.cc", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <fcntl.h>\n+#include <stdlib.h>\n+#include <unistd.h>\n+\n+#include <memory>\n+#include <string>\n+#include <vector>\n+\n+#include \"gtest/gtest.h\"\n+#include \"benchmark/benchmark.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/logging.h\"\n+#include \"test/util/temp_path.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+void BM_OpenReadClose(benchmark::State& state) {\n+ const int size = state.range(0);\n+ std::vector<TempPath> cache;\n+ for (int i = 0; i < size; i++) {\n+ auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\n+ GetAbsoluteTestTmpdir(), \"some content\", 0644));\n+ cache.emplace_back(std::move(path));\n+ }\n+\n+ char buf[1];\n+ unsigned int seed = 1;\n+ for (auto _ : state) {\n+ const int chosen = rand_r(&seed) % size;\n+ int fd = open(cache[chosen].path().c_str(), O_RDONLY);\n+ TEST_CHECK(fd != -1);\n+ TEST_CHECK(read(fd, buf, 1) == 1);\n+ close(fd);\n+ }\n+}\n+\n+// Gofer dentry cache is 1000 by default. Go over it to force files to be closed\n+// for real.\n+BENCHMARK(BM_OpenReadClose)->Range(1000, 16384)->UseRealTime();\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Implement setattr+clunk in 9P This is to cover the common pattern: open->read/write->close, where SetAttr needs to be called to update atime/mtime before the file is closed. Benchmark results: BM_OpenReadClose/10240 CPU setattr+clunk: 63783 ns VFS2: 68109 ns VFS1: 72507 ns Updates #1198 PiperOrigin-RevId: 329628461
259,975
01.09.2020 21:01:33
25,200
563f28b7d56c11a418856f1defeb64e72beb0e9a
Fix statfs test for opensource.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/statfs.cc", "new_path": "test/syscalls/linux/statfs.cc", "diff": "@@ -43,9 +43,6 @@ TEST(StatfsTest, InternalTmpfs) {\nstruct statfs st;\nEXPECT_THAT(statfs(temp_file.path().c_str(), &st), SyscallSucceeds());\n- // Note: We could be an overlay or goferfs on some configurations.\n- EXPECT_TRUE(st.f_type == TMPFS_MAGIC || st.f_type == OVERLAYFS_SUPER_MAGIC ||\n- st.f_type == V9FS_MAGIC);\n}\nTEST(StatfsTest, InternalDevShm) {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix statfs test for opensource. PiperOrigin-RevId: 329638946
259,907
02.09.2020 11:11:23
25,200
8ab08cdc01e2e4b36a050ffc3acad682aebaa7b5
[runtime tests] Exclude flaky nodejs test
[ { "change_type": "MODIFY", "old_path": "test/runtimes/exclude_nodejs12.4.0.csv", "new_path": "test/runtimes/exclude_nodejs12.4.0.csv", "diff": "@@ -11,8 +11,9 @@ parallel/test-dgram-socket-buffer-size.js,b/68847921,\nparallel/test-dns-channel-timeout.js,b/161893056,\nparallel/test-fs-access.js,,\nparallel/test-fs-watchfile.js,,Flaky - File already exists error\n-parallel/test-fs-write-stream.js,,Flaky\n-parallel/test-fs-write-stream-throw-type-error.js,b/110226209,\n+parallel/test-fs-write-stream.js,b/166819807,Flaky\n+parallel/test-fs-write-stream-double-close,b/166819807,Flaky\n+parallel/test-fs-write-stream-throw-type-error.js,b/166819807,Flaky\nparallel/test-http-writable-true-after-close.js,,Flaky - Mismatched <anonymous> function calls. Expected exactly 1 actual 2\nparallel/test-os.js,b/63997097,\nparallel/test-net-server-listen-options.js,,Flaky - EADDRINUSE\n" } ]
Go
Apache License 2.0
google/gvisor
[runtime tests] Exclude flaky nodejs test PiperOrigin-RevId: 329749191
259,907
02.09.2020 15:39:51
25,200
0ca0d8e0110d284120497569dca1b85d3ec227fe
[vfs] Fix error handling in overlayfs OpenAt. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "new_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "diff": "@@ -743,6 +743,9 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nstart := rp.Start().Impl().(*dentry)\nif rp.Done() {\n+ if mayCreate && rp.MustBeDir() {\n+ return nil, syserror.EISDIR\n+ }\nif mustCreate {\nreturn nil, syserror.EEXIST\n}\n@@ -766,6 +769,10 @@ afterTrailingSymlink:\nif err := parent.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {\nreturn nil, err\n}\n+ // Reject attempts to open directories with O_CREAT.\n+ if mayCreate && rp.MustBeDir() {\n+ return nil, syserror.EISDIR\n+ }\n// Determine whether or not we need to create a file.\nparent.dirMu.Lock()\nchild, err := fs.stepLocked(ctx, rp, parent, false /* mayFollowSymlinks */, &ds)\n@@ -774,12 +781,11 @@ afterTrailingSymlink:\nparent.dirMu.Unlock()\nreturn fd, err\n}\n- if err != nil {\nparent.dirMu.Unlock()\n+ if err != nil {\nreturn nil, err\n}\n// Open existing child or follow symlink.\n- parent.dirMu.Unlock()\nif mustCreate {\nreturn nil, syserror.EEXIST\n}\n@@ -794,6 +800,9 @@ afterTrailingSymlink:\nstart = parent\ngoto afterTrailingSymlink\n}\n+ if rp.MustBeDir() && !child.isDir() {\n+ return nil, syserror.ENOTDIR\n+ }\nif mayWrite {\nif err := child.copyUpLocked(ctx); err != nil {\nreturn nil, err\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Fix error handling in overlayfs OpenAt. Updates #1199 PiperOrigin-RevId: 329802274
259,907
02.09.2020 17:56:30
25,200
1fec861939e393f637979e9ee5ef1a253d06c89d
[vfs] Implement xattr for overlayfs.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/inode_overlay.go", "new_path": "pkg/sentry/fs/inode_overlay.go", "diff": "@@ -16,7 +16,6 @@ package fs\nimport (\n\"fmt\"\n- \"strings\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n@@ -539,7 +538,7 @@ func overlayGetXattr(ctx context.Context, o *overlayEntry, name string, size uin\n// Don't forward the value of the extended attribute if it would\n// unexpectedly change the behavior of a wrapping overlay layer.\n- if strings.HasPrefix(XattrOverlayPrefix, name) {\n+ if isXattrOverlay(name) {\nreturn \"\", syserror.ENODATA\n}\n@@ -555,7 +554,7 @@ func overlayGetXattr(ctx context.Context, o *overlayEntry, name string, size uin\nfunc overlaySetxattr(ctx context.Context, o *overlayEntry, d *Dirent, name, value string, flags uint32) error {\n// Don't allow changes to overlay xattrs through a setxattr syscall.\n- if strings.HasPrefix(XattrOverlayPrefix, name) {\n+ if isXattrOverlay(name) {\nreturn syserror.EPERM\n}\n@@ -578,7 +577,7 @@ func overlayListXattr(ctx context.Context, o *overlayEntry, size uint64) (map[st\nfor name := range names {\n// Same as overlayGetXattr, we shouldn't forward along\n// overlay attributes.\n- if strings.HasPrefix(XattrOverlayPrefix, name) {\n+ if isXattrOverlay(name) {\ndelete(names, name)\n}\n}\n@@ -587,7 +586,7 @@ func overlayListXattr(ctx context.Context, o *overlayEntry, size uint64) (map[st\nfunc overlayRemoveXattr(ctx context.Context, o *overlayEntry, d *Dirent, name string) error {\n// Don't allow changes to overlay xattrs through a removexattr syscall.\n- if strings.HasPrefix(XattrOverlayPrefix, name) {\n+ if isXattrOverlay(name) {\nreturn syserror.EPERM\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1067,6 +1067,21 @@ func (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes)\nreturn vfs.GenericCheckPermissions(creds, ats, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid)))\n}\n+func (d *dentry) checkXattrPermissions(creds *auth.Credentials, name string, ats vfs.AccessTypes) error {\n+ // We only support xattrs prefixed with \"user.\" (see b/148380782). Currently,\n+ // there is no need to expose any other xattrs through a gofer.\n+ if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {\n+ return syserror.EOPNOTSUPP\n+ }\n+ mode := linux.FileMode(atomic.LoadUint32(&d.mode))\n+ kuid := auth.KUID(atomic.LoadUint32(&d.uid))\n+ kgid := auth.KGID(atomic.LoadUint32(&d.gid))\n+ if err := vfs.GenericCheckPermissions(creds, ats, mode, kuid, kgid); err != nil {\n+ return err\n+ }\n+ return vfs.CheckXattrPermissions(creds, ats, mode, kuid, name)\n+}\n+\nfunc (d *dentry) mayDelete(creds *auth.Credentials, child *dentry) error {\nreturn vfs.CheckDeleteSticky(creds, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&child.uid)))\n}\n@@ -1357,8 +1372,6 @@ func (d *dentry) setDeleted() {\natomic.StoreUint32(&d.deleted, 1)\n}\n-// We only support xattrs prefixed with \"user.\" (see b/148380782). Currently,\n-// there is no need to expose any other xattrs through a gofer.\nfunc (d *dentry) listxattr(ctx context.Context, creds *auth.Credentials, size uint64) ([]string, error) {\nif d.file.isNil() || !d.userXattrSupported() {\nreturn nil, nil\n@@ -1369,6 +1382,7 @@ func (d *dentry) listxattr(ctx context.Context, creds *auth.Credentials, size ui\n}\nxattrs := make([]string, 0, len(xattrMap))\nfor x := range xattrMap {\n+ // We only support xattrs in the user.* namespace.\nif strings.HasPrefix(x, linux.XATTR_USER_PREFIX) {\nxattrs = append(xattrs, x)\n}\n@@ -1380,15 +1394,9 @@ func (d *dentry) getxattr(ctx context.Context, creds *auth.Credentials, opts *vf\nif d.file.isNil() {\nreturn \"\", syserror.ENODATA\n}\n- if err := d.checkPermissions(creds, vfs.MayRead); err != nil {\n+ if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayRead); err != nil {\nreturn \"\", err\n}\n- if !strings.HasPrefix(opts.Name, linux.XATTR_USER_PREFIX) {\n- return \"\", syserror.EOPNOTSUPP\n- }\n- if !d.userXattrSupported() {\n- return \"\", syserror.ENODATA\n- }\nreturn d.file.getXattr(ctx, opts.Name, opts.Size)\n}\n@@ -1396,15 +1404,9 @@ func (d *dentry) setxattr(ctx context.Context, creds *auth.Credentials, opts *vf\nif d.file.isNil() {\nreturn syserror.EPERM\n}\n- if err := d.checkPermissions(creds, vfs.MayWrite); err != nil {\n+ if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayWrite); err != nil {\nreturn err\n}\n- if !strings.HasPrefix(opts.Name, linux.XATTR_USER_PREFIX) {\n- return syserror.EOPNOTSUPP\n- }\n- if !d.userXattrSupported() {\n- return syserror.EPERM\n- }\nreturn d.file.setXattr(ctx, opts.Name, opts.Value, opts.Flags)\n}\n@@ -1412,15 +1414,9 @@ func (d *dentry) removexattr(ctx context.Context, creds *auth.Credentials, name\nif d.file.isNil() {\nreturn syserror.EPERM\n}\n- if err := d.checkPermissions(creds, vfs.MayWrite); err != nil {\n+ if err := d.checkXattrPermissions(creds, name, vfs.MayWrite); err != nil {\nreturn err\n}\n- if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {\n- return syserror.EOPNOTSUPP\n- }\n- if !d.userXattrSupported() {\n- return syserror.EPERM\n- }\nreturn d.file.removeXattr(ctx, name)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/copy_up.go", "new_path": "pkg/sentry/fsimpl/overlay/copy_up.go", "diff": "@@ -91,6 +91,10 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {\nif err != nil {\nctx.Warningf(\"Unrecoverable overlayfs inconsistency: failed to delete upper layer file after copy-up error: %v\", err)\n}\n+ if d.upperVD.Ok() {\n+ d.upperVD.DecRef(ctx)\n+ d.upperVD = vfs.VirtualDentry{}\n+ }\n}\nswitch ftype {\ncase linux.S_IFREG:\n@@ -234,7 +238,10 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {\npanic(fmt.Sprintf(\"unexpected file type %o\", ftype))\n}\n- // TODO(gvisor.dev/issue/1199): copy up xattrs\n+ if err := d.copyXattrsLocked(ctx); err != nil {\n+ cleanupUndoCopyUp()\n+ return err\n+ }\n// Update the dentry's device and inode numbers (except for directories,\n// for which these remain overlay-assigned).\n@@ -246,14 +253,10 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {\nMask: linux.STATX_INO,\n})\nif err != nil {\n- d.upperVD.DecRef(ctx)\n- d.upperVD = vfs.VirtualDentry{}\ncleanupUndoCopyUp()\nreturn err\n}\nif upperStat.Mask&linux.STATX_INO == 0 {\n- d.upperVD.DecRef(ctx)\n- d.upperVD = vfs.VirtualDentry{}\ncleanupUndoCopyUp()\nreturn syserror.EREMOTE\n}\n@@ -265,3 +268,42 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {\natomic.StoreUint32(&d.copiedUp, 1)\nreturn nil\n}\n+\n+// copyXattrsLocked copies a subset of lower's extended attributes to upper.\n+// Attributes that configure an overlay in the lower are not copied up.\n+//\n+// Preconditions: d.copyMu must be locked for writing.\n+func (d *dentry) copyXattrsLocked(ctx context.Context) error {\n+ vfsObj := d.fs.vfsfs.VirtualFilesystem()\n+ lowerPop := &vfs.PathOperation{Root: d.lowerVDs[0], Start: d.lowerVDs[0]}\n+ upperPop := &vfs.PathOperation{Root: d.upperVD, Start: d.upperVD}\n+\n+ lowerXattrs, err := vfsObj.ListxattrAt(ctx, d.fs.creds, lowerPop, 0)\n+ if err != nil {\n+ if err == syserror.EOPNOTSUPP {\n+ // There are no guarantees as to the contents of lowerXattrs.\n+ return nil\n+ }\n+ ctx.Warningf(\"failed to copy up xattrs because ListxattrAt failed: %v\", err)\n+ return err\n+ }\n+\n+ for _, name := range lowerXattrs {\n+ // Do not copy up overlay attributes.\n+ if isOverlayXattr(name) {\n+ continue\n+ }\n+\n+ value, err := vfsObj.GetxattrAt(ctx, d.fs.creds, lowerPop, &vfs.GetxattrOptions{Name: name, Size: 0})\n+ if err != nil {\n+ ctx.Warningf(\"failed to copy up xattrs because GetxattrAt failed: %v\", err)\n+ return err\n+ }\n+\n+ if err := vfsObj.SetxattrAt(ctx, d.fs.creds, upperPop, &vfs.SetxattrOptions{Name: name, Value: value}); err != nil {\n+ ctx.Warningf(\"failed to copy up xattrs because SetxattrAt failed: %v\", err)\n+ return err\n+ }\n+ }\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "new_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "diff": "package overlay\nimport (\n+ \"strings\"\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -27,10 +28,15 @@ import (\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n+// _OVL_XATTR_PREFIX is an extended attribute key prefix to identify overlayfs\n+// attributes.\n+// Linux: fs/overlayfs/overlayfs.h:OVL_XATTR_PREFIX\n+const _OVL_XATTR_PREFIX = linux.XATTR_TRUSTED_PREFIX + \"overlay.\"\n+\n// _OVL_XATTR_OPAQUE is an extended attribute key whose value is set to \"y\" for\n// opaque directories.\n// Linux: fs/overlayfs/overlayfs.h:OVL_XATTR_OPAQUE\n-const _OVL_XATTR_OPAQUE = linux.XATTR_TRUSTED_PREFIX + \"overlay.opaque\"\n+const _OVL_XATTR_OPAQUE = _OVL_XATTR_PREFIX + \"opaque\"\nfunc isWhiteout(stat *linux.Statx) bool {\nreturn stat.Mode&linux.S_IFMT == linux.S_IFCHR && stat.RdevMajor == 0 && stat.RdevMinor == 0\n@@ -1347,18 +1353,42 @@ func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error\nreturn nil\n}\n+// isOverlayXattr returns whether the given extended attribute configures the\n+// overlay.\n+func isOverlayXattr(name string) bool {\n+ return strings.HasPrefix(name, _OVL_XATTR_PREFIX)\n+}\n+\n// ListxattrAt implements vfs.FilesystemImpl.ListxattrAt.\nfunc (fs *filesystem) ListxattrAt(ctx context.Context, rp *vfs.ResolvingPath, size uint64) ([]string, error) {\nvar ds *[]*dentry\nfs.renameMu.RLock()\ndefer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n- _, err := fs.resolveLocked(ctx, rp, &ds)\n+ d, err := fs.resolveLocked(ctx, rp, &ds)\nif err != nil {\nreturn nil, err\n}\n- // TODO(gvisor.dev/issue/1199): Linux overlayfs actually allows listxattr,\n- // but not any other xattr syscalls. For now we just reject all of them.\n- return nil, syserror.ENOTSUP\n+\n+ return fs.listXattr(ctx, d, size)\n+}\n+\n+func (fs *filesystem) listXattr(ctx context.Context, d *dentry, size uint64) ([]string, error) {\n+ vfsObj := d.fs.vfsfs.VirtualFilesystem()\n+ top := d.topLayer()\n+ names, err := vfsObj.ListxattrAt(ctx, fs.creds, &vfs.PathOperation{Root: top, Start: top}, size)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ // Filter out all overlay attributes.\n+ n := 0\n+ for _, name := range names {\n+ if !isOverlayXattr(name) {\n+ names[n] = name\n+ n++\n+ }\n+ }\n+ return names[:n], err\n}\n// GetxattrAt implements vfs.FilesystemImpl.GetxattrAt.\n@@ -1366,11 +1396,29 @@ func (fs *filesystem) GetxattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt\nvar ds *[]*dentry\nfs.renameMu.RLock()\ndefer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n- _, err := fs.resolveLocked(ctx, rp, &ds)\n+ d, err := fs.resolveLocked(ctx, rp, &ds)\nif err != nil {\nreturn \"\", err\n}\n- return \"\", syserror.ENOTSUP\n+\n+ return fs.getXattr(ctx, d, rp.Credentials(), &opts)\n+}\n+\n+func (fs *filesystem) getXattr(ctx context.Context, d *dentry, creds *auth.Credentials, opts *vfs.GetxattrOptions) (string, error) {\n+ if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayRead); err != nil {\n+ return \"\", err\n+ }\n+\n+ // Return EOPNOTSUPP when fetching an overlay attribute.\n+ // See fs/overlayfs/super.c:ovl_own_xattr_get().\n+ if isOverlayXattr(opts.Name) {\n+ return \"\", syserror.EOPNOTSUPP\n+ }\n+\n+ // Analogous to fs/overlayfs/super.c:ovl_other_xattr_get().\n+ vfsObj := d.fs.vfsfs.VirtualFilesystem()\n+ top := d.topLayer()\n+ return vfsObj.GetxattrAt(ctx, fs.creds, &vfs.PathOperation{Root: top, Start: top}, opts)\n}\n// SetxattrAt implements vfs.FilesystemImpl.SetxattrAt.\n@@ -1378,11 +1426,36 @@ func (fs *filesystem) SetxattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt\nvar ds *[]*dentry\nfs.renameMu.RLock()\ndefer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n- _, err := fs.resolveLocked(ctx, rp, &ds)\n+ d, err := fs.resolveLocked(ctx, rp, &ds)\nif err != nil {\nreturn err\n}\n- return syserror.ENOTSUP\n+\n+ return fs.setXattrLocked(ctx, d, rp.Mount(), rp.Credentials(), &opts)\n+}\n+\n+// Precondition: fs.renameMu must be locked.\n+func (fs *filesystem) setXattrLocked(ctx context.Context, d *dentry, mnt *vfs.Mount, creds *auth.Credentials, opts *vfs.SetxattrOptions) error {\n+ if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayWrite); err != nil {\n+ return err\n+ }\n+\n+ // Return EOPNOTSUPP when setting an overlay attribute.\n+ // See fs/overlayfs/super.c:ovl_own_xattr_set().\n+ if isOverlayXattr(opts.Name) {\n+ return syserror.EOPNOTSUPP\n+ }\n+\n+ // Analogous to fs/overlayfs/super.c:ovl_other_xattr_set().\n+ if err := mnt.CheckBeginWrite(); err != nil {\n+ return err\n+ }\n+ defer mnt.EndWrite()\n+ if err := d.copyUpLocked(ctx); err != nil {\n+ return err\n+ }\n+ vfsObj := d.fs.vfsfs.VirtualFilesystem()\n+ return vfsObj.SetxattrAt(ctx, fs.creds, &vfs.PathOperation{Root: d.upperVD, Start: d.upperVD}, opts)\n}\n// RemovexattrAt implements vfs.FilesystemImpl.RemovexattrAt.\n@@ -1390,11 +1463,36 @@ func (fs *filesystem) RemovexattrAt(ctx context.Context, rp *vfs.ResolvingPath,\nvar ds *[]*dentry\nfs.renameMu.RLock()\ndefer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)\n- _, err := fs.resolveLocked(ctx, rp, &ds)\n+ d, err := fs.resolveLocked(ctx, rp, &ds)\nif err != nil {\nreturn err\n}\n- return syserror.ENOTSUP\n+\n+ return fs.removeXattrLocked(ctx, d, rp.Mount(), rp.Credentials(), name)\n+}\n+\n+// Precondition: fs.renameMu must be locked.\n+func (fs *filesystem) removeXattrLocked(ctx context.Context, d *dentry, mnt *vfs.Mount, creds *auth.Credentials, name string) error {\n+ if err := d.checkXattrPermissions(creds, name, vfs.MayWrite); err != nil {\n+ return err\n+ }\n+\n+ // Like SetxattrAt, return EOPNOTSUPP when removing an overlay attribute.\n+ // Linux passes the remove request to xattr_handler->set.\n+ // See fs/xattr.c:vfs_removexattr().\n+ if isOverlayXattr(name) {\n+ return syserror.EOPNOTSUPP\n+ }\n+\n+ if err := mnt.CheckBeginWrite(); err != nil {\n+ return err\n+ }\n+ defer mnt.EndWrite()\n+ if err := d.copyUpLocked(ctx); err != nil {\n+ return err\n+ }\n+ vfsObj := d.fs.vfsfs.VirtualFilesystem()\n+ return vfsObj.RemovexattrAt(ctx, fs.creds, &vfs.PathOperation{Root: d.upperVD, Start: d.upperVD}, name)\n}\n// PrependPath implements vfs.FilesystemImpl.PrependPath.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/overlay.go", "new_path": "pkg/sentry/fsimpl/overlay/overlay.go", "diff": "@@ -570,6 +570,16 @@ func (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes)\nreturn vfs.GenericCheckPermissions(creds, ats, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid)))\n}\n+func (d *dentry) checkXattrPermissions(creds *auth.Credentials, name string, ats vfs.AccessTypes) error {\n+ mode := linux.FileMode(atomic.LoadUint32(&d.mode))\n+ kuid := auth.KUID(atomic.LoadUint32(&d.uid))\n+ kgid := auth.KGID(atomic.LoadUint32(&d.gid))\n+ if err := vfs.GenericCheckPermissions(creds, ats, mode, kuid, kgid); err != nil {\n+ return err\n+ }\n+ return vfs.CheckXattrPermissions(creds, ats, mode, kuid, name)\n+}\n+\n// statInternalMask is the set of stat fields that is set by\n// dentry.statInternalTo().\nconst statInternalMask = linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID | linux.STATX_INO\n@@ -622,6 +632,32 @@ func (fd *fileDescription) dentry() *dentry {\nreturn fd.vfsfd.Dentry().Impl().(*dentry)\n}\n+// Listxattr implements vfs.FileDescriptionImpl.Listxattr.\n+func (fd *fileDescription) Listxattr(ctx context.Context, size uint64) ([]string, error) {\n+ return fd.filesystem().listXattr(ctx, fd.dentry(), size)\n+}\n+\n+// Getxattr implements vfs.FileDescriptionImpl.Getxattr.\n+func (fd *fileDescription) Getxattr(ctx context.Context, opts vfs.GetxattrOptions) (string, error) {\n+ return fd.filesystem().getXattr(ctx, fd.dentry(), auth.CredentialsFromContext(ctx), &opts)\n+}\n+\n+// Setxattr implements vfs.FileDescriptionImpl.Setxattr.\n+func (fd *fileDescription) Setxattr(ctx context.Context, opts vfs.SetxattrOptions) error {\n+ fs := fd.filesystem()\n+ fs.renameMu.RLock()\n+ defer fs.renameMu.RUnlock()\n+ return fs.setXattrLocked(ctx, fd.dentry(), fd.vfsfd.Mount(), auth.CredentialsFromContext(ctx), &opts)\n+}\n+\n+// Removexattr implements vfs.FileDescriptionImpl.Removexattr.\n+func (fd *fileDescription) Removexattr(ctx context.Context, name string) error {\n+ fs := fd.filesystem()\n+ fs.renameMu.RLock()\n+ defer fs.renameMu.RUnlock()\n+ return fs.removeXattrLocked(ctx, fd.dentry(), fd.vfsfd.Mount(), auth.CredentialsFromContext(ctx), name)\n+}\n+\n// LockPOSIX implements vfs.FileDescriptionImpl.LockPOSIX.\nfunc (fd *fileDescription) LockPOSIX(ctx context.Context, uid fslock.UniqueID, t fslock.LockType, start, length uint64, whence int16, block fslock.Blocker) error {\nreturn fd.Locks().LockPOSIX(ctx, &fd.vfsfd, uid, t, start, length, whence, block)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -652,44 +652,18 @@ func (i *inode) removexattr(creds *auth.Credentials, name string) error {\n}\nfunc (i *inode) checkXattrPermissions(creds *auth.Credentials, name string, ats vfs.AccessTypes) error {\n- switch {\n- case ats&vfs.MayRead == vfs.MayRead:\n- if err := i.checkPermissions(creds, vfs.MayRead); err != nil {\n- return err\n+ // We currently only support extended attributes in the user.* and\n+ // trusted.* namespaces. See b/148380782.\n+ if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) && !strings.HasPrefix(name, linux.XATTR_TRUSTED_PREFIX) {\n+ return syserror.EOPNOTSUPP\n}\n- case ats&vfs.MayWrite == vfs.MayWrite:\n- if err := i.checkPermissions(creds, vfs.MayWrite); err != nil {\n+ mode := linux.FileMode(atomic.LoadUint32(&i.mode))\n+ kuid := auth.KUID(atomic.LoadUint32(&i.uid))\n+ kgid := auth.KGID(atomic.LoadUint32(&i.gid))\n+ if err := vfs.GenericCheckPermissions(creds, ats, mode, kuid, kgid); err != nil {\nreturn err\n}\n- default:\n- panic(fmt.Sprintf(\"checkXattrPermissions called with impossible AccessTypes: %v\", ats))\n- }\n-\n- switch {\n- case strings.HasPrefix(name, linux.XATTR_TRUSTED_PREFIX):\n- // The trusted.* namespace can only be accessed by privileged\n- // users.\n- if creds.HasCapability(linux.CAP_SYS_ADMIN) {\n- return nil\n- }\n- if ats&vfs.MayWrite == vfs.MayWrite {\n- return syserror.EPERM\n- }\n- return syserror.ENODATA\n- case strings.HasPrefix(name, linux.XATTR_USER_PREFIX):\n- // Extended attributes in the user.* namespace are only\n- // supported for regular files and directories.\n- filetype := linux.S_IFMT & atomic.LoadUint32(&i.mode)\n- if filetype == linux.S_IFREG || filetype == linux.S_IFDIR {\n- return nil\n- }\n- if ats&vfs.MayWrite == vfs.MayWrite {\n- return syserror.EPERM\n- }\n- return syserror.ENODATA\n-\n- }\n- return syserror.EOPNOTSUPP\n+ return vfs.CheckXattrPermissions(creds, ats, mode, kuid, name)\n}\n// fileDescription is embedded by tmpfs implementations of\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/permissions.go", "new_path": "pkg/sentry/vfs/permissions.go", "diff": "@@ -16,6 +16,7 @@ package vfs\nimport (\n\"math\"\n+ \"strings\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n@@ -284,3 +285,40 @@ func CheckLimit(ctx context.Context, offset, size int64) (int64, error) {\n}\nreturn size, nil\n}\n+\n+// CheckXattrPermissions checks permissions for extended attribute access.\n+// This is analogous to fs/xattr.c:xattr_permission(). Some key differences:\n+// * Does not check for read-only filesystem property.\n+// * Does not check inode immutability or append only mode. In both cases EPERM\n+// must be returned by filesystem implementations.\n+// * Does not do inode permission checks. Filesystem implementations should\n+// handle inode permission checks as they may differ across implementations.\n+func CheckXattrPermissions(creds *auth.Credentials, ats AccessTypes, mode linux.FileMode, kuid auth.KUID, name string) error {\n+ switch {\n+ case strings.HasPrefix(name, linux.XATTR_TRUSTED_PREFIX):\n+ // The trusted.* namespace can only be accessed by privileged\n+ // users.\n+ if creds.HasCapability(linux.CAP_SYS_ADMIN) {\n+ return nil\n+ }\n+ if ats.MayWrite() {\n+ return syserror.EPERM\n+ }\n+ return syserror.ENODATA\n+ case strings.HasPrefix(name, linux.XATTR_USER_PREFIX):\n+ // In the user.* namespace, only regular files and directories can have\n+ // extended attributes. For sticky directories, only the owner and\n+ // privileged users can write attributes.\n+ filetype := mode.FileType()\n+ if filetype != linux.ModeRegular && filetype != linux.ModeDirectory {\n+ if ats.MayWrite() {\n+ return syserror.EPERM\n+ }\n+ return syserror.ENODATA\n+ }\n+ if filetype == linux.ModeDirectory && mode&linux.ModeSticky != 0 && ats.MayWrite() && !CanActAsOwner(creds, kuid) {\n+ return syserror.EPERM\n+ }\n+ }\n+ return nil\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Implement xattr for overlayfs. PiperOrigin-RevId: 329825497
259,884
02.09.2020 19:37:06
25,200
a8c174c04751799514ec2e89111e7d03ddf68b70
Update version in cni tutorial Update the cniVersion used in the CNI tutorial so that it works with containerd 1.2. Containerd 1.2 includes a version of the cri plugin (release/1.2) that, in turn, includes a version of the cni library (0.6.0) that only supports up to 0.3.1.
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/tutorials/cni.md", "new_path": "g3doc/user_guide/tutorials/cni.md", "diff": "@@ -47,7 +47,7 @@ sudo mkdir -p /etc/cni/net.d\nsudo sh -c 'cat > /etc/cni/net.d/10-bridge.conf << EOF\n{\n- \"cniVersion\": \"0.4.0\",\n+ \"cniVersion\": \"0.3.1\",\n\"name\": \"mynet\",\n\"type\": \"bridge\",\n\"bridge\": \"cni0\",\n@@ -65,7 +65,7 @@ EOF'\nsudo sh -c 'cat > /etc/cni/net.d/99-loopback.conf << EOF\n{\n- \"cniVersion\": \"0.4.0\",\n+ \"cniVersion\": \"0.3.1\",\n\"name\": \"lo\",\n\"type\": \"loopback\"\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Update version in cni tutorial Update the cniVersion used in the CNI tutorial so that it works with containerd 1.2. Containerd 1.2 includes a version of the cri plugin (release/1.2) that, in turn, includes a version of the cni library (0.6.0) that only supports up to 0.3.1. https://github.com/containernetworking/cni/blob/v0.6.0/pkg/version/version.go#L38 PiperOrigin-RevId: 329837188
259,962
03.09.2020 17:34:56
25,200
805861ca37c0a6302ebc91f25dfa99618da137e2
Use fine-grained mutex for stack.cleanupEndpoints. stack.cleanupEndpoints is protected by the stack.mu but that can cause contention as the stack mutex is already acquired in a lot of hot paths during new endpoint creation /cleanup etc. Moving this to a fine grained mutex should reduce contention on the stack.mu.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -418,6 +418,9 @@ type Stack struct {\nmu sync.RWMutex\nnics map[tcpip.NICID]*NIC\nforwarding bool\n+\n+ // cleanupEndpointsMu protects cleanupEndpoints.\n+ cleanupEndpointsMu sync.Mutex\ncleanupEndpoints map[TransportEndpoint]struct{}\n// route is the route table passed in by the user via SetRouteTable(),\n@@ -1528,10 +1531,9 @@ func (s *Stack) UnregisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip\n// StartTransportEndpointCleanup removes the endpoint with the given id from\n// the stack transport dispatcher. It also transitions it to the cleanup stage.\nfunc (s *Stack) StartTransportEndpointCleanup(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) {\n- s.mu.Lock()\n- defer s.mu.Unlock()\n-\n+ s.cleanupEndpointsMu.Lock()\ns.cleanupEndpoints[ep] = struct{}{}\n+ s.cleanupEndpointsMu.Unlock()\ns.demux.unregisterEndpoint(netProtos, protocol, id, ep, flags, bindToDevice)\n}\n@@ -1539,9 +1541,9 @@ func (s *Stack) StartTransportEndpointCleanup(nicID tcpip.NICID, netProtos []tcp\n// CompleteTransportEndpointCleanup removes the endpoint from the cleanup\n// stage.\nfunc (s *Stack) CompleteTransportEndpointCleanup(ep TransportEndpoint) {\n- s.mu.Lock()\n+ s.cleanupEndpointsMu.Lock()\ndelete(s.cleanupEndpoints, ep)\n- s.mu.Unlock()\n+ s.cleanupEndpointsMu.Unlock()\n}\n// FindTransportEndpoint finds an endpoint that most closely matches the provided\n@@ -1584,23 +1586,23 @@ func (s *Stack) RegisteredEndpoints() []TransportEndpoint {\n// CleanupEndpoints returns endpoints currently in the cleanup state.\nfunc (s *Stack) CleanupEndpoints() []TransportEndpoint {\n- s.mu.Lock()\n+ s.cleanupEndpointsMu.Lock()\nes := make([]TransportEndpoint, 0, len(s.cleanupEndpoints))\nfor e := range s.cleanupEndpoints {\nes = append(es, e)\n}\n- s.mu.Unlock()\n+ s.cleanupEndpointsMu.Unlock()\nreturn es\n}\n// RestoreCleanupEndpoints adds endpoints to cleanup tracking. This is useful\n// for restoring a stack after a save.\nfunc (s *Stack) RestoreCleanupEndpoints(es []TransportEndpoint) {\n- s.mu.Lock()\n+ s.cleanupEndpointsMu.Lock()\nfor _, e := range es {\ns.cleanupEndpoints[e] = struct{}{}\n}\n- s.mu.Unlock()\n+ s.cleanupEndpointsMu.Unlock()\n}\n// Close closes all currently registered transport endpoints.\n" } ]
Go
Apache License 2.0
google/gvisor
Use fine-grained mutex for stack.cleanupEndpoints. stack.cleanupEndpoints is protected by the stack.mu but that can cause contention as the stack mutex is already acquired in a lot of hot paths during new endpoint creation /cleanup etc. Moving this to a fine grained mutex should reduce contention on the stack.mu. PiperOrigin-RevId: 330026151
259,907
03.09.2020 21:42:49
25,200
b6d6a120d07ec0d1b3c0a3abc9e78b6e0bbd3966
Fix the release workflow.
[ { "change_type": "MODIFY", "old_path": "tools/make_apt.sh", "new_path": "tools/make_apt.sh", "diff": "@@ -54,18 +54,21 @@ declare -r release=\"${root}/dists/${suite}\"\nmkdir -p \"${release}\"\n# Create a temporary keyring, and ensure it is cleaned up.\n+# Using separate homedir allows us to install apt repositories multiple times\n+# using the same key. This is a limitation in GnuPG pre-2.1.\ndeclare -r keyring=$(mktemp /tmp/keyringXXXXXX.gpg)\n+declare -r homedir=$(mktemp -d /tmp/homedirXXXXXX)\ncleanup() {\n- rm -f \"${keyring}\"\n+ rm -rf \"${keyring}\" \"${homedir}\"\n}\ntrap cleanup EXIT\n# We attempt the import twice because the first one will fail if the public key\n# is not found. This isn't actually a failure for us, because we don't require\n-# the public (this may be stored separately). The second import will succeed\n+# the public key (this may be stored separately). The second import will succeed\n# because, in reality, the first import succeeded and it's a no-op.\n-gpg --no-default-keyring --keyring \"${keyring}\" --secret-keyring \"${keyring}\" --import \"${private_key}\" || \\\n- gpg --no-default-keyring --keyring \"${keyring}\" --secret-keyring \"${keyring}\" --import \"${private_key}\"\n+gpg --no-default-keyring --keyring \"${keyring}\" --homedir \"${homedir}\" --import \"${private_key}\" || \\\n+ gpg --no-default-keyring --keyring \"${keyring}\" --homedir \"${homedir}\" --import \"${private_key}\"\n# Copy the packages into the root.\nfor pkg in \"$@\"; do\n" } ]
Go
Apache License 2.0
google/gvisor
Fix the release workflow. PiperOrigin-RevId: 330049242
259,860
03.09.2020 23:29:13
25,200
c564293b65eefcf1342023694e4aae82314de014
Adjust input file offset when sendfile only completes a partial write. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/BUILD", "new_path": "pkg/sentry/syscalls/linux/vfs2/BUILD", "diff": "@@ -44,6 +44,7 @@ go_library(\n\"//pkg/context\",\n\"//pkg/fspath\",\n\"//pkg/gohacks\",\n+ \"//pkg/log\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/fs/lock\",\n\"//pkg/sentry/fsbridge\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/splice.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/splice.go", "diff": "@@ -18,6 +18,7 @@ import (\n\"io\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/pipe\"\n@@ -390,16 +391,21 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc\nerr = dw.waitForOut(t)\n}\nif err != nil {\n- // We didn't complete the write. Only\n- // report the bytes that were actually\n- // written, and rewind the offset.\n+ // We didn't complete the write. Only report the bytes that were actually\n+ // written, and rewind offsets as needed.\nnotWritten := int64(len(wbuf))\nn -= notWritten\n- if offset != -1 {\n- // TODO(gvisor.dev/issue/3779): The inFile offset will be incorrect if we\n- // roll back, because it has already been advanced by the full amount.\n- // Merely seeking on inFile does not work, because there may be concurrent\n- // file operations.\n+ if offset == -1 {\n+ // We modified the offset of the input file itself during the read\n+ // operation. Rewind it.\n+ if _, seekErr := inFile.Seek(t, -notWritten, linux.SEEK_CUR); seekErr != nil {\n+ // Log the error but don't return it, since the write has already\n+ // completed successfully.\n+ log.Warningf(\"failed to roll back input file offset: %v\", seekErr)\n+ }\n+ } else {\n+ // The sendfile call was provided an offset parameter that should be\n+ // adjusted to reflect the number of bytes sent. Rewind it.\noffset -= notWritten\n}\nbreak\n" } ]
Go
Apache License 2.0
google/gvisor
Adjust input file offset when sendfile only completes a partial write. Fixes #3779. PiperOrigin-RevId: 330057268
259,905
08.09.2020 15:50:29
-28,800
ceab2e21de0856c403dc4e37d7f3b68ee970f3d5
Fix the use after nil check on args.MountNamespaceVFS2 The args.MountNamespaceVFS2 is used again after the nil check, instead, mntnsVFS2 which holds the expected reference should be used. This patch fixes this issue. Fixes:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -888,17 +888,18 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\nopener fsbridge.Lookup\nfsContext *FSContext\nmntns *fs.MountNamespace\n+ mntnsVFS2 *vfs.MountNamespace\n)\nif VFS2Enabled {\n- mntnsVFS2 := args.MountNamespaceVFS2\n+ mntnsVFS2 = args.MountNamespaceVFS2\nif mntnsVFS2 == nil {\n// MountNamespaceVFS2 adds a reference to the namespace, which is\n// transferred to the new process.\nmntnsVFS2 = k.globalInit.Leader().MountNamespaceVFS2()\n}\n// Get the root directory from the MountNamespace.\n- root := args.MountNamespaceVFS2.Root()\n+ root := mntnsVFS2.Root()\n// The call to newFSContext below will take a reference on root, so we\n// don't need to hold this one.\ndefer root.DecRef(ctx)\n@@ -1008,7 +1009,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\nUTSNamespace: args.UTSNamespace,\nIPCNamespace: args.IPCNamespace,\nAbstractSocketNamespace: args.AbstractSocketNamespace,\n- MountNamespaceVFS2: args.MountNamespaceVFS2,\n+ MountNamespaceVFS2: mntnsVFS2,\nContainerID: args.ContainerID,\n}\nt, err := k.tasks.NewTask(config)\n" } ]
Go
Apache License 2.0
google/gvisor
Fix the use after nil check on args.MountNamespaceVFS2 The args.MountNamespaceVFS2 is used again after the nil check, instead, mntnsVFS2 which holds the expected reference should be used. This patch fixes this issue. Fixes: #3855 Signed-off-by: Tiwei Bie <[email protected]>
259,962
08.09.2020 12:29:26
25,200
38cdb0579b698d67abad4f7f6ba18a84eef66bd7
Fix data race in tcp.GetSockOpt. e.ID can't be read without holding e.mu. GetSockOpt was reading e.ID when looking up OriginalDst without holding e.mu.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -2019,8 +2019,10 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) *tcpip.Error {\ne.UnlockUser()\ncase *tcpip.OriginalDestinationOption:\n+ e.LockUser()\nipt := e.stack.IPTables()\naddr, port, err := ipt.OriginalDst(e.ID)\n+ e.UnlockUser()\nif err != nil {\nreturn err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix data race in tcp.GetSockOpt. e.ID can't be read without holding e.mu. GetSockOpt was reading e.ID when looking up OriginalDst without holding e.mu. PiperOrigin-RevId: 330562293
259,907
08.09.2020 14:40:57
25,200
682c0edcdcdfbf26be5d0d6efa2319f6f5259df2
[vfs] overlayfs: decref VD when not using it. overlay/filesystem.go:lookupLocked() did not DecRef the VD on some error paths when it would not end up saving or using the VD.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "new_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "diff": "@@ -211,6 +211,7 @@ func (fs *filesystem) lookupLocked(ctx context.Context, parent *dentry, name str\nlookupErr = err\nreturn false\n}\n+ defer childVD.DecRef(ctx)\nmask := uint32(linux.STATX_TYPE)\nif !existsOnAnyLayer {\n@@ -249,6 +250,7 @@ func (fs *filesystem) lookupLocked(ctx context.Context, parent *dentry, name str\n}\n// Update child to include this layer.\n+ childVD.IncRef()\nif isUpper {\nchild.upperVD = childVD\nchild.copiedUp = 1\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] overlayfs: decref VD when not using it. overlay/filesystem.go:lookupLocked() did not DecRef the VD on some error paths when it would not end up saving or using the VD. PiperOrigin-RevId: 330589742
259,885
08.09.2020 18:31:17
25,200
8d3551da6a41c30fa9c04a5b0cef712c089d5d04
Implement synthetic mountpoints for kernfs.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/BUILD", "new_path": "pkg/sentry/fsimpl/kernfs/BUILD", "diff": "@@ -83,6 +83,7 @@ go_library(\n\"slot_list.go\",\n\"static_directory_refs.go\",\n\"symlink.go\",\n+ \"synthetic_directory.go\",\n],\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/kernfs/filesystem.go", "diff": "@@ -360,8 +360,11 @@ func (fs *Filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\ndefer rp.Mount().EndWrite()\nchildVFSD, err := parentInode.NewDir(ctx, pc, opts)\nif err != nil {\n+ if !opts.ForSyntheticMountpoint || err == syserror.EEXIST {\nreturn err\n}\n+ childVFSD = newSyntheticDirectory(rp.Credentials(), opts.Mode)\n+ }\nparentVFSD.Impl().(*Dentry).InsertChild(pc, childVFSD.Impl().(*Dentry))\nreturn nil\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/fsimpl/kernfs/synthetic_directory.go", "diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package kernfs\n+\n+import (\n+ \"fmt\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n+)\n+\n+// syntheticDirectory implements kernfs.Inode for a directory created by\n+// MkdirAt(ForSyntheticMountpoint=true).\n+//\n+// +stateify savable\n+type syntheticDirectory struct {\n+ InodeAttrs\n+ InodeNoStatFS\n+ InodeNoopRefCount\n+ InodeNoDynamicLookup\n+ InodeNotSymlink\n+ OrderedChildren\n+\n+ locks vfs.FileLocks\n+}\n+\n+var _ Inode = (*syntheticDirectory)(nil)\n+\n+func newSyntheticDirectory(creds *auth.Credentials, perm linux.FileMode) *vfs.Dentry {\n+ inode := &syntheticDirectory{}\n+ inode.Init(creds, 0 /* devMajor */, 0 /* devMinor */, 0 /* ino */, perm)\n+ d := &Dentry{}\n+ d.Init(inode)\n+ return &d.vfsd\n+}\n+\n+func (dir *syntheticDirectory) Init(creds *auth.Credentials, devMajor, devMinor uint32, ino uint64, perm linux.FileMode) {\n+ if perm&^linux.PermissionsMask != 0 {\n+ panic(fmt.Sprintf(\"perm contains non-permission bits: %#o\", perm))\n+ }\n+ dir.InodeAttrs.Init(creds, devMajor, devMinor, ino, linux.S_IFDIR|perm)\n+ dir.OrderedChildren.Init(OrderedChildrenOptions{\n+ Writable: true,\n+ })\n+}\n+\n+// Open implements Inode.Open.\n+func (dir *syntheticDirectory) Open(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ fd, err := NewGenericDirectoryFD(rp.Mount(), vfsd, &dir.OrderedChildren, &dir.locks, &opts, GenericDirectoryFDOptions{})\n+ if err != nil {\n+ return nil, err\n+ }\n+ return &fd.vfsfd, nil\n+}\n+\n+// NewFile implements Inode.NewFile.\n+func (dir *syntheticDirectory) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (*vfs.Dentry, error) {\n+ return nil, syserror.EPERM\n+}\n+\n+// NewDir implements Inode.NewDir.\n+func (dir *syntheticDirectory) NewDir(ctx context.Context, name string, opts vfs.MkdirOptions) (*vfs.Dentry, error) {\n+ if !opts.ForSyntheticMountpoint {\n+ return nil, syserror.EPERM\n+ }\n+ subdird := newSyntheticDirectory(auth.CredentialsFromContext(ctx), opts.Mode&linux.PermissionsMask)\n+ if err := dir.OrderedChildren.Insert(name, subdird); err != nil {\n+ subdird.DecRef(ctx)\n+ return nil, err\n+ }\n+ return subdird, nil\n+}\n+\n+// NewLink implements Inode.NewLink.\n+func (dir *syntheticDirectory) NewLink(ctx context.Context, name string, target Inode) (*vfs.Dentry, error) {\n+ return nil, syserror.EPERM\n+}\n+\n+// NewSymlink implements Inode.NewSymlink.\n+func (dir *syntheticDirectory) NewSymlink(ctx context.Context, name, target string) (*vfs.Dentry, error) {\n+ return nil, syserror.EPERM\n+}\n+\n+// NewNode implements Inode.NewNode.\n+func (dir *syntheticDirectory) NewNode(ctx context.Context, name string, opts vfs.MknodOptions) (*vfs.Dentry, error) {\n+ return nil, syserror.EPERM\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Implement synthetic mountpoints for kernfs. PiperOrigin-RevId: 330629897
259,884
08.09.2020 21:57:28
25,200
00479af515289f42a63ab14cec128ab030120b38
Add a Docker Compose tutorial Adds a Docker Compose tutorial to the website that shows how to start a Wordpress site and includes information about how to get DNS working. Fixes
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/tutorials/BUILD", "new_path": "g3doc/user_guide/tutorials/BUILD", "diff": "@@ -14,6 +14,15 @@ doc(\nweight = \"10\",\n)\n+doc(\n+ name = \"docker_compose\",\n+ src = \"docker-compose.md\",\n+ category = \"User Guide\",\n+ permalink = \"/docs/tutorials/docker-compose/\",\n+ subcategory = \"Tutorials\",\n+ weight = \"20\",\n+)\n+\ndoc(\nname = \"kubernetes\",\nsrc = \"kubernetes.md\",\n@@ -24,7 +33,7 @@ doc(\n],\npermalink = \"/docs/tutorials/kubernetes/\",\nsubcategory = \"Tutorials\",\n- weight = \"20\",\n+ weight = \"30\",\n)\ndoc(\n@@ -33,5 +42,5 @@ doc(\ncategory = \"User Guide\",\npermalink = \"/docs/tutorials/cni/\",\nsubcategory = \"Tutorials\",\n- weight = \"30\",\n+ weight = \"40\",\n)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "g3doc/user_guide/tutorials/docker-compose.md", "diff": "+# Wordpress with Docker Compose\n+\n+This page shows you how to deploy a sample [WordPress][wordpress] site using\n+[Docker Compose][docker-compose].\n+\n+### Before you begin\n+\n+[Follow these instructions][docker-install] to install runsc with Docker. This\n+document assumes that Docker and Docker Compose are installed and the runtime\n+name chosen for gVisor is `runsc`.\n+\n+### Configuration\n+\n+We'll start by creating the `docker-compose.yaml` file to specify our services.\n+We will specify two services, a `wordpress` service for the Wordpress Apache\n+server, and a `db` service for MySQL. We will configure Wordpress to connect to\n+MySQL via the `db` service host name.\n+\n+> **Note:** Docker Compose uses it's own network by default and allows services\n+> to communicate using their service name. Docker Compose does this by setting\n+> up a DNS server at IP address 127.0.0.11 and configuring containers to use it\n+> via [resolv.conf][resolv.conf]. This IP is not addressable inside a gVisor\n+> sandbox so it's important that we set the DNS IP address to the alternative\n+> `8.8.8.8` and use a network that allows routing to it. See\n+> [Networking in Compose][compose-networking] for more details.\n+\n+> **Note:** The `runtime` field was removed from services in the 3.x version of\n+> the API in versions of docker-compose < 1.27.0. You will need to write your\n+> `docker-compose.yaml` file using the 2.x format or use docker-compose >=\n+> 1.27.0. See this [issue](https://github.com/docker/compose/issues/6239) for\n+> more details.\n+\n+```yaml\n+version: '2.3'\n+\n+services:\n+ db:\n+ image: mysql:5.7\n+ volumes:\n+ - db_data:/var/lib/mysql\n+ restart: always\n+ environment:\n+ MYSQL_ROOT_PASSWORD: somewordpress\n+ MYSQL_DATABASE: wordpress\n+ MYSQL_USER: wordpress\n+ MYSQL_PASSWORD: wordpress\n+ # All services must be on the same network to communicate.\n+ network_mode: \"bridge\"\n+\n+ wordpress:\n+ depends_on:\n+ - db\n+ # When using the \"bridge\" network specify links.\n+ links:\n+ - db\n+ image: wordpress:latest\n+ ports:\n+ - \"8080:80\"\n+ restart: always\n+ environment:\n+ WORDPRESS_DB_HOST: db:3306\n+ WORDPRESS_DB_USER: wordpress\n+ WORDPRESS_DB_PASSWORD: wordpress\n+ WORDPRESS_DB_NAME: wordpress\n+ # Specify the dns address if needed.\n+ dns:\n+ - 8.8.8.8\n+ # All services must be on the same network to communicate.\n+ network_mode: \"bridge\"\n+ # Specify the runtime used by Docker. Must be set up in\n+ # /etc/docker/daemon.json.\n+ runtime: \"runsc\"\n+\n+volumes:\n+ db_data: {}\n+```\n+\n+Once you have a `docker-compose.yaml` in the current directory you can start the\n+containers:\n+\n+```bash\n+docker-compose up\n+```\n+\n+Once the containers have started you can access wordpress at\n+http://localhost:8080.\n+\n+Congrats! You now how a working wordpress site up and running using Docker\n+Compose.\n+\n+### What's next\n+\n+Learn how to deploy [WordPress with Kubernetes][wordpress-k8s].\n+\n+[docker-compose]: https://docs.docker.com/compose/\n+[docker-install]: ../quick_start/docker.md\n+[wordpress]: https://wordpress.com/\n+[resolv.conf]: https://man7.org/linux/man-pages/man5/resolv.conf.5.html\n+[wordpress-k8s]: kubernetes.md\n+[compose-networking]: https://docs.docker.com/compose/networking/\n" }, { "change_type": "MODIFY", "old_path": "g3doc/user_guide/tutorials/docker.md", "new_path": "g3doc/user_guide/tutorials/docker.md", "diff": "@@ -60,9 +60,11 @@ Congratulations! You have just deployed a WordPress site using Docker.\n### What's next\n-[Learn how to deploy WordPress with Kubernetes][wordpress-k8s].\n+Learn how to deploy WordPress with [Kubernetes][wordpress-k8s] or\n+[Docker Compose][wordpress-compose].\n[docker]: https://www.docker.com/\n-[docker-install]: /docs/user_guide/quick_start/docker/\n+[docker-install]: ../quick_start/docker.md\n[wordpress]: https://wordpress.com/\n-[wordpress-k8s]: /docs/tutorials/kubernetes/\n+[wordpress-k8s]: kubernetes.md\n+[wordpress-compose]: docker-compose.md\n" }, { "change_type": "MODIFY", "old_path": "website/BUILD", "new_path": "website/BUILD", "diff": "@@ -157,6 +157,7 @@ docs(\n\"//g3doc/user_guide/quick_start:oci\",\n\"//g3doc/user_guide/tutorials:cni\",\n\"//g3doc/user_guide/tutorials:docker\",\n+ \"//g3doc/user_guide/tutorials:docker_compose\",\n\"//g3doc/user_guide/tutorials:kubernetes\",\n],\n)\n" } ]
Go
Apache License 2.0
google/gvisor
Add a Docker Compose tutorial Adds a Docker Compose tutorial to the website that shows how to start a Wordpress site and includes information about how to get DNS working. Fixes #115 PiperOrigin-RevId: 330652842
259,884
09.09.2020 09:06:26
25,200
26439f9a43a6a23bd9fbc64f2b43ca843ad25d99
Add syntax highlighting to website Adds a syntax highlighting theme css so that code snippets are highlighted properly.
[ { "change_type": "MODIFY", "old_path": "images/jekyll/Dockerfile", "new_path": "images/jekyll/Dockerfile", "diff": "FROM jekyll/jekyll:4.0.0\nUSER root\n+\nRUN gem install \\\nhtml-proofer:3.10.2 \\\nnokogiri:1.10.1 \\\n@@ -10,5 +11,9 @@ RUN gem install \\\njekyll-relative-links:0.6.1 \\\njekyll-feed:0.13.0 \\\njekyll-sitemap:1.4.0\n+\n+# checks.rb is used with html-proofer for presubmit checks.\nCOPY checks.rb /checks.rb\n-CMD [\"/usr/gem/gems/jekyll-4.0.0/exe/jekyll\", \"build\", \"-t\", \"-s\", \"/input\", \"-d\", \"/output\"]\n+\n+COPY build.sh /build.sh\n+CMD [\"/build.sh\"]\n" }, { "change_type": "ADD", "old_path": null, "new_path": "images/jekyll/build.sh", "diff": "+#!/bin/bash\n+\n+# Copyright 2020 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+set -euxo pipefail\n+\n+# Generate the syntax highlighting css file.\n+/usr/gem/bin/rougify style github >/input/_sass/syntax.css\n+/usr/gem/bin/jekyll build -t -s /input -d /output\n" }, { "change_type": "MODIFY", "old_path": "website/css/main.scss", "new_path": "website/css/main.scss", "diff": "-@import 'style.scss';\n-@import 'front.scss';\n-@import 'navbar.scss';\n-@import 'sidebar.scss';\n-@import 'footer.scss';\n+// The main style sheet for gvisor.dev\n+\n+// NOTE: Do not include file extensions to import .sass and .css files seamlessly.\n+@import \"style\";\n+@import \"front\";\n+@import \"navbar\";\n+@import \"sidebar\";\n+@import \"footer\";\n+// syntax is generated by rougify.\n+@import \"syntax\";\n" } ]
Go
Apache License 2.0
google/gvisor
Add syntax highlighting to website Adds a syntax highlighting theme css so that code snippets are highlighted properly. PiperOrigin-RevId: 330733737
259,884
09.09.2020 10:08:06
25,200
fb281eea75799d662790cdf86ce0d55dc56b7470
Fix formatting for Kubernetes tutorial
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/tutorials/kubernetes.md", "new_path": "g3doc/user_guide/tutorials/kubernetes.md", "diff": "@@ -23,12 +23,12 @@ gcloud beta container node-pools create sandbox-pool --cluster=${CLUSTER_NAME} -\nIf you prefer to use the console, select your cluster and select the **ADD NODE\nPOOL** button:\n-![+ ADD NODE POOL](./node-pool-button.png)\n+![+ ADD NODE POOL](node-pool-button.png)\nThen select the **Image type** with **Containerd** and select **Enable sandbox\nwith gVisor** option. Select other options as you like:\n-![+ NODE POOL](./add-node-pool.png)\n+![+ NODE POOL](add-node-pool.png)\n### Check that gVisor is enabled\n@@ -57,47 +57,149 @@ curl -LO https://k8s.io/examples/application/wordpress/mysql-deployment.yaml\nAdd a **spec.template.spec.runtimeClassName** set to **gvisor** to both files,\nas shown below:\n-**wordpress-deployment.yaml:** ```yaml apiVersion: v1 kind: Service metadata:\n-name: wordpress labels: app: wordpress spec: ports: - port: 80 selector: app:\n-wordpress tier: frontend\n-\n-## type: LoadBalancer\n-\n-apiVersion: v1 kind: PersistentVolumeClaim metadata: name: wp-pv-claim labels:\n-app: wordpress spec: accessModes: - ReadWriteOnce resources: requests:\n-\n-## storage: 20Gi\n-\n-apiVersion: apps/v1 kind: Deployment metadata: name: wordpress labels: app:\n-wordpress spec: selector: matchLabels: app: wordpress tier: frontend strategy:\n-type: Recreate template: metadata: labels: app: wordpress tier: frontend spec:\n-runtimeClassName: gvisor # ADD THIS LINE containers: - image:\n-wordpress:4.8-apache name: wordpress env: - name: WORDPRESS_DB_HOST value:\n-wordpress-mysql - name: WORDPRESS_DB_PASSWORD valueFrom: secretKeyRef: name:\n-mysql-pass key: password ports: - containerPort: 80 name: wordpress\n-volumeMounts: - name: wordpress-persistent-storage mountPath: /var/www/html\n-volumes: - name: wordpress-persistent-storage persistentVolumeClaim: claimName:\n-wp-pv-claim ```\n-\n-**mysql-deployment.yaml:** ```yaml apiVersion: v1 kind: Service metadata: name:\n-wordpress-mysql labels: app: wordpress spec: ports: - port: 3306 selector: app:\n-wordpress tier: mysql\n-\n-## clusterIP: None\n-\n-apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mysql-pv-claim\n-labels: app: wordpress spec: accessModes: - ReadWriteOnce resources: requests:\n-\n-## storage: 20Gi\n+**wordpress-deployment.yaml:**\n+\n+```yaml\n+apiVersion: v1\n+kind: Service\n+metadata:\n+ name: wordpress\n+ labels:\n+ app: wordpress\n+spec:\n+ ports:\n+ - port: 80\n+ selector:\n+ app: wordpress\n+ tier: frontend\n+ type: LoadBalancer\n+---\n+apiVersion: v1\n+kind: PersistentVolumeClaim\n+metadata:\n+ name: wp-pv-claim\n+ labels:\n+ app: wordpress\n+spec:\n+ accessModes:\n+ - ReadWriteOnce\n+ resources:\n+ requests:\n+ storage: 20Gi\n+---\n+apiVersion: apps/v1\n+kind: Deployment\n+metadata:\n+ name: wordpress\n+ labels:\n+ app: wordpress\n+spec:\n+ selector:\n+ matchLabels:\n+ app: wordpress\n+ tier: frontend\n+ strategy:\n+ type: Recreate\n+ template:\n+ metadata:\n+ labels:\n+ app: wordpress\n+ tier: frontend\n+ spec:\n+ runtimeClassName: gvisor # ADD THIS LINE\n+ containers:\n+ - image: wordpress:4.8-apache\n+ name: wordpress\n+ env:\n+ - name: WORDPRESS_DB_HOST\n+ value: wordpress-mysql\n+ - name: WORDPRESS_DB_PASSWORD\n+ valueFrom:\n+ secretKeyRef:\n+ name: mysql-pass\n+ key: password\n+ ports:\n+ - containerPort: 80\n+ name: wordpress\n+ volumeMounts:\n+ - name: wordpress-persistent-storage\n+ mountPath: /var/www/html\n+ volumes:\n+ - name: wordpress-persistent-storage\n+ persistentVolumeClaim:\n+ claimName: wp-pv-claim\n+```\n-apiVersion: apps/v1 kind: Deployment metadata: name: wordpress-mysql labels:\n-app: wordpress spec: selector: matchLabels: app: wordpress tier: mysql strategy:\n-type: Recreate template: metadata: labels: app: wordpress tier: mysql spec:\n-runtimeClassName: gvisor # ADD THIS LINE containers: - image: mysql:5.6 name:\n-mysql env: - name: MYSQL_ROOT_PASSWORD valueFrom: secretKeyRef: name: mysql-pass\n-key: password ports: - containerPort: 3306 name: mysql volumeMounts: - name:\n-mysql-persistent-storage mountPath: /var/lib/mysql volumes: - name:\n-mysql-persistent-storage persistentVolumeClaim: claimName: mysql-pv-claim ```\n+**mysql-deployment.yaml:**\n+\n+```yaml\n+apiVersion: v1\n+kind: Service\n+metadata:\n+ name: wordpress-mysql\n+ labels:\n+ app: wordpress\n+spec:\n+ ports:\n+ - port: 3306\n+ selector:\n+ app: wordpress\n+ tier: mysql\n+ clusterIP: None\n+---\n+apiVersion: v1\n+kind: PersistentVolumeClaim\n+metadata:\n+ name: mysql-pv-claim\n+ labels:\n+ app: wordpress\n+spec:\n+ accessModes:\n+ - ReadWriteOnce\n+ resources:\n+ requests:\n+ storage: 20Gi\n+---\n+apiVersion: apps/v1\n+kind: Deployment\n+metadata:\n+ name: wordpress-mysql\n+ labels:\n+ app: wordpress\n+spec:\n+ selector:\n+ matchLabels:\n+ app: wordpress\n+ tier: mysql\n+ strategy:\n+ type: Recreate\n+ template:\n+ metadata:\n+ labels:\n+ app: wordpress\n+ tier: mysql\n+ spec:\n+ runtimeClassName: gvisor # ADD THIS LINE\n+ containers:\n+ - image: mysql:5.6\n+ name: mysql\n+ env:\n+ - name: MYSQL_ROOT_PASSWORD\n+ valueFrom:\n+ secretKeyRef:\n+ name: mysql-pass\n+ key: password\n+ ports:\n+ - containerPort: 3306\n+ name: mysql\n+ volumeMounts:\n+ - name: mysql-persistent-storage\n+ mountPath: /var/lib/mysql\n+ volumes:\n+ - name: mysql-persistent-storage\n+ persistentVolumeClaim:\n+ claimName: mysql-pv-claim\n+```\nNote that apart from `runtimeClassName: gvisor`, nothing else about the\nDeployment has is changed.\n" } ]
Go
Apache License 2.0
google/gvisor
Fix formatting for Kubernetes tutorial PiperOrigin-RevId: 330745430
259,853
09.09.2020 10:38:48
25,200
27897621da066fb7612781ae435d2bb6b7759433
github: run actions for feature branches
[ { "change_type": "MODIFY", "old_path": ".github/workflows/build.yml", "new_path": ".github/workflows/build.yml", "diff": "@@ -3,9 +3,11 @@ on:\npush:\nbranches:\n- master\n+ - feature/**\npull_request:\nbranches:\n- master\n+ - feature/**\njobs:\ndefault:\n" }, { "change_type": "MODIFY", "old_path": ".github/workflows/go.yml", "new_path": ".github/workflows/go.yml", "diff": "@@ -3,9 +3,11 @@ on:\npush:\nbranches:\n- master\n+ - feature/**\npull_request:\nbranches:\n- master\n+ - feature/**\njobs:\ngenerate:\n" } ]
Go
Apache License 2.0
google/gvisor
github: run actions for feature branches Signed-off-by: Andrei Vagin <[email protected]>
259,885
09.09.2020 12:47:24
25,200
f3172c3a11dda8aa21bbaf51b7f63196592cf1da
Don't sched_setaffinity in ptrace platform.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/BUILD", "new_path": "pkg/sentry/platform/ptrace/BUILD", "diff": "@@ -30,7 +30,6 @@ go_library(\n\"//pkg/safecopy\",\n\"//pkg/seccomp\",\n\"//pkg/sentry/arch\",\n- \"//pkg/sentry/hostcpu\",\n\"//pkg/sentry/memmap\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/platform/interrupt\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/filters.go", "new_path": "pkg/sentry/platform/ptrace/filters.go", "diff": "@@ -25,7 +25,6 @@ import (\nfunc (*PTrace) SyscallFilters() seccomp.SyscallRules {\nreturn seccomp.SyscallRules{\nunix.SYS_GETCPU: {},\n- unix.SYS_SCHED_SETAFFINITY: {},\nsyscall.SYS_PTRACE: {},\nsyscall.SYS_TGKILL: {},\nsyscall.SYS_WAIT4: {},\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess.go", "new_path": "pkg/sentry/platform/ptrace/subprocess.go", "diff": "@@ -518,11 +518,6 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {\n}\ndefer c.interrupt.Disable()\n- // Ensure that the CPU set is bound appropriately; this makes the\n- // emulation below several times faster, presumably by avoiding\n- // interprocessor wakeups and by simplifying the schedule.\n- t.bind()\n-\n// Set registers.\nif err := t.setRegs(regs); err != nil {\npanic(fmt.Sprintf(\"ptrace set regs (%+v) failed: %v\", regs, err))\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go", "new_path": "pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go", "diff": "package ptrace\nimport (\n- \"sync/atomic\"\n\"syscall\"\n\"unsafe\"\n- \"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/sentry/hostcpu\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n)\n-// maskPool contains reusable CPU masks for setting affinity. Unfortunately,\n-// runtime.NumCPU doesn't actually record the number of CPUs on the system, it\n-// just records the number of CPUs available in the scheduler affinity set at\n-// startup. This may a) change over time and b) gives a number far lower than\n-// the maximum indexable CPU. To prevent lots of allocation in the hot path, we\n-// use a pool to store large masks that we can reuse during bind.\n-var maskPool = sync.Pool{\n- New: func() interface{} {\n- const maxCPUs = 1024 // Not a hard limit; see below.\n- return make([]uintptr, maxCPUs/64)\n- },\n-}\n-\n// unmaskAllSignals unmasks all signals on the current thread.\n//\n//go:nosplit\n@@ -49,47 +32,3 @@ func unmaskAllSignals() syscall.Errno {\n_, _, errno := syscall.RawSyscall6(syscall.SYS_RT_SIGPROCMASK, linux.SIG_SETMASK, uintptr(unsafe.Pointer(&set)), 0, linux.SignalSetSize, 0, 0)\nreturn errno\n}\n-\n-// setCPU sets the CPU affinity.\n-func (t *thread) setCPU(cpu uint32) error {\n- mask := maskPool.Get().([]uintptr)\n- n := int(cpu / 64)\n- v := uintptr(1 << uintptr(cpu%64))\n- if n >= len(mask) {\n- // See maskPool note above. We've actually exceeded the number\n- // of available cores. Grow the mask and return it.\n- mask = make([]uintptr, n+1)\n- }\n- mask[n] |= v\n- if _, _, errno := syscall.RawSyscall(\n- unix.SYS_SCHED_SETAFFINITY,\n- uintptr(t.tid),\n- uintptr(len(mask)*8),\n- uintptr(unsafe.Pointer(&mask[0]))); errno != 0 {\n- return errno\n- }\n- mask[n] &^= v\n- maskPool.Put(mask)\n- return nil\n-}\n-\n-// bind attempts to ensure that the thread is on the same CPU as the current\n-// thread. This provides no guarantees as it is fundamentally a racy operation:\n-// CPU sets may change and we may be rescheduled in the middle of this\n-// operation. As a result, no failures are reported.\n-//\n-// Precondition: the current runtime thread should be locked.\n-func (t *thread) bind() {\n- currentCPU := hostcpu.GetCPU()\n-\n- if oldCPU := atomic.SwapUint32(&t.cpu, currentCPU); oldCPU != currentCPU {\n- // Set the affinity on the thread and save the CPU for next\n- // round; we don't expect CPUs to bounce around too frequently.\n- //\n- // (It's worth noting that we could move CPUs between this point\n- // and when the tracee finishes executing. But that would be\n- // roughly the status quo anyways -- we're just maximizing our\n- // chances of colocation, not guaranteeing it.)\n- t.setCPU(currentCPU)\n- }\n-}\n" } ]
Go
Apache License 2.0
google/gvisor
Don't sched_setaffinity in ptrace platform. PiperOrigin-RevId: 330777900
259,885
09.09.2020 18:38:02
25,200
644ac7b6bcf547cb44da6f298c92de4b3806c426
Unlock VFS.mountMu before FilesystemImpl calls for /proc/[pid]/{mounts,mountinfo}. Also move VFS.MakeSyntheticMountpoint() (which is a utility wrapper around VFS.MkdirAllAt(), itself a utility wrapper around VFS.MkdirAt()) to not be in the middle of the implementation of these proc files. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/mount.go", "new_path": "pkg/sentry/vfs/mount.go", "diff": "@@ -18,14 +18,12 @@ import (\n\"bytes\"\n\"fmt\"\n\"math\"\n- \"path\"\n\"sort\"\n\"strings\"\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n- \"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n@@ -740,11 +738,23 @@ func (mntns *MountNamespace) Root() VirtualDentry {\n//\n// Preconditions: taskRootDir.Ok().\nfunc (vfs *VirtualFilesystem) GenerateProcMounts(ctx context.Context, taskRootDir VirtualDentry, buf *bytes.Buffer) {\n- vfs.mountMu.Lock()\n- defer vfs.mountMu.Unlock()\nrootMnt := taskRootDir.mount\n+\n+ vfs.mountMu.Lock()\nmounts := rootMnt.submountsLocked()\n+ // Take a reference on mounts since we need to drop vfs.mountMu before\n+ // calling vfs.PathnameReachable() (=> FilesystemImpl.PrependPath()).\n+ for _, mnt := range mounts {\n+ mnt.IncRef()\n+ }\n+ vfs.mountMu.Unlock()\n+ defer func() {\n+ for _, mnt := range mounts {\n+ mnt.DecRef(ctx)\n+ }\n+ }()\nsort.Slice(mounts, func(i, j int) bool { return mounts[i].ID < mounts[j].ID })\n+\nfor _, mnt := range mounts {\n// Get the path to this mount relative to task root.\nmntRootVD := VirtualDentry{\n@@ -755,7 +765,7 @@ func (vfs *VirtualFilesystem) GenerateProcMounts(ctx context.Context, taskRootDi\nif err != nil {\n// For some reason we didn't get a path. Log a warning\n// and run with empty path.\n- ctx.Warningf(\"Error getting pathname for mount root %+v: %v\", mnt.root, err)\n+ ctx.Warningf(\"VFS.GenerateProcMounts: error getting pathname for mount root %+v: %v\", mnt.root, err)\npath = \"\"\n}\nif path == \"\" {\n@@ -789,11 +799,25 @@ func (vfs *VirtualFilesystem) GenerateProcMounts(ctx context.Context, taskRootDi\n//\n// Preconditions: taskRootDir.Ok().\nfunc (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRootDir VirtualDentry, buf *bytes.Buffer) {\n- vfs.mountMu.Lock()\n- defer vfs.mountMu.Unlock()\nrootMnt := taskRootDir.mount\n+\n+ vfs.mountMu.Lock()\nmounts := rootMnt.submountsLocked()\n+ // Take a reference on mounts since we need to drop vfs.mountMu before\n+ // calling vfs.PathnameReachable() (=> FilesystemImpl.PrependPath()) or\n+ // vfs.StatAt() (=> FilesystemImpl.StatAt()).\n+ for _, mnt := range mounts {\n+ mnt.IncRef()\n+ }\n+ vfs.mountMu.Unlock()\n+ defer func() {\n+ for _, mnt := range mounts {\n+ mnt.DecRef(ctx)\n+ }\n+ }()\nsort.Slice(mounts, func(i, j int) bool { return mounts[i].ID < mounts[j].ID })\n+\n+ creds := auth.CredentialsFromContext(ctx)\nfor _, mnt := range mounts {\n// Get the path to this mount relative to task root.\nmntRootVD := VirtualDentry{\n@@ -804,7 +828,7 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\nif err != nil {\n// For some reason we didn't get a path. Log a warning\n// and run with empty path.\n- ctx.Warningf(\"Error getting pathname for mount root %+v: %v\", mnt.root, err)\n+ ctx.Warningf(\"VFS.GenerateProcMountInfo: error getting pathname for mount root %+v: %v\", mnt.root, err)\npath = \"\"\n}\nif path == \"\" {\n@@ -817,9 +841,10 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\nRoot: mntRootVD,\nStart: mntRootVD,\n}\n- statx, err := vfs.StatAt(ctx, auth.NewAnonymousCredentials(), pop, &StatOptions{})\n+ statx, err := vfs.StatAt(ctx, creds, pop, &StatOptions{})\nif err != nil {\n// Well that's not good. Ignore this mount.\n+ ctx.Warningf(\"VFS.GenerateProcMountInfo: failed to stat mount root %+v: %v\", mnt.root, err)\nbreak\n}\n@@ -831,6 +856,9 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\nfmt.Fprintf(buf, \"%d \", mnt.ID)\n// (2) Parent ID (or this ID if there is no parent).\n+ // Note that even if the call to mnt.parent() races with Mount\n+ // destruction (which is possible since we're not holding vfs.mountMu),\n+ // its Mount.ID will still be valid.\npID := mnt.ID\nif p := mnt.parent(); p != nil {\npID = p.ID\n@@ -879,30 +907,6 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\n}\n}\n-// MakeSyntheticMountpoint creates parent directories of target if they do not\n-// exist and attempts to create a directory for the mountpoint. If a\n-// non-directory file already exists there then we allow it.\n-func (vfs *VirtualFilesystem) MakeSyntheticMountpoint(ctx context.Context, target string, root VirtualDentry, creds *auth.Credentials) error {\n- mkdirOpts := &MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}\n-\n- // Make sure the parent directory of target exists.\n- if err := vfs.MkdirAllAt(ctx, path.Dir(target), root, creds, mkdirOpts); err != nil {\n- return fmt.Errorf(\"failed to create parent directory of mountpoint %q: %w\", target, err)\n- }\n-\n- // Attempt to mkdir the final component. If a file (of any type) exists\n- // then we let allow mounting on top of that because we do not require the\n- // target to be an existing directory, unlike Linux mount(2).\n- if err := vfs.MkdirAt(ctx, creds, &PathOperation{\n- Root: root,\n- Start: root,\n- Path: fspath.Parse(target),\n- }, mkdirOpts); err != nil && err != syserror.EEXIST {\n- return fmt.Errorf(\"failed to create mountpoint %q: %w\", target, err)\n- }\n- return nil\n-}\n-\n// manglePath replaces ' ', '\\t', '\\n', and '\\\\' with their octal equivalents.\n// See Linux fs/seq_file.c:mangle_path.\nfunc manglePath(p string) string {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/vfs.go", "new_path": "pkg/sentry/vfs/vfs.go", "diff": "@@ -819,6 +819,30 @@ func (vfs *VirtualFilesystem) MkdirAllAt(ctx context.Context, currentPath string\nreturn nil\n}\n+// MakeSyntheticMountpoint creates parent directories of target if they do not\n+// exist and attempts to create a directory for the mountpoint. If a\n+// non-directory file already exists there then we allow it.\n+func (vfs *VirtualFilesystem) MakeSyntheticMountpoint(ctx context.Context, target string, root VirtualDentry, creds *auth.Credentials) error {\n+ mkdirOpts := &MkdirOptions{Mode: 0777, ForSyntheticMountpoint: true}\n+\n+ // Make sure the parent directory of target exists.\n+ if err := vfs.MkdirAllAt(ctx, path.Dir(target), root, creds, mkdirOpts); err != nil {\n+ return fmt.Errorf(\"failed to create parent directory of mountpoint %q: %w\", target, err)\n+ }\n+\n+ // Attempt to mkdir the final component. If a file (of any type) exists\n+ // then we let allow mounting on top of that because we do not require the\n+ // target to be an existing directory, unlike Linux mount(2).\n+ if err := vfs.MkdirAt(ctx, creds, &PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(target),\n+ }, mkdirOpts); err != nil && err != syserror.EEXIST {\n+ return fmt.Errorf(\"failed to create mountpoint %q: %w\", target, err)\n+ }\n+ return nil\n+}\n+\n// A VirtualDentry represents a node in a VFS tree, by combining a Dentry\n// (which represents a node in a Filesystem's tree) and a Mount (which\n// represents the Filesystem's position in a VFS mount tree).\n" } ]
Go
Apache License 2.0
google/gvisor
Unlock VFS.mountMu before FilesystemImpl calls for /proc/[pid]/{mounts,mountinfo}. Also move VFS.MakeSyntheticMountpoint() (which is a utility wrapper around VFS.MkdirAllAt(), itself a utility wrapper around VFS.MkdirAt()) to not be in the middle of the implementation of these proc files. Fixes #3878 PiperOrigin-RevId: 330843106
259,884
09.09.2020 20:11:16
25,200
1ab097b08fc16d67b90f094a4316883c289ef77f
Add note about kubeadm to the FAQ Fixes
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/FAQ.md", "new_path": "g3doc/user_guide/FAQ.md", "diff": "@@ -96,6 +96,30 @@ containerd.\nSee [issue #1765](https://gvisor.dev/issue/1765) for more details.\n+### I'm getting an error like `RuntimeHandler \"runsc\" not supported` {#runtime-handler}\n+\n+This error indicates that the Kubernetes CRI runtime was not set up to handle\n+`runsc` as a runtime handler. Please ensure that containerd configuration has\n+been created properly and containerd has been restarted. See the\n+[containerd quick start](containerd/quick_start.md) for more details.\n+\n+If you have ensured that containerd has been set up properly and you used\n+kubeadm to create your cluster please check if Docker is also installed on that\n+system. Kubeadm prefers using Docker if both Docker and containerd are\n+installed.\n+\n+Please recreate your cluster and set the `--cni-socket` option on kubeadm\n+commands. For example:\n+\n+```bash\n+kubeadm init --cni-socket=/var/run/containerd/containerd.sock` ...\n+```\n+\n+To fix an existing cluster edit the `/var/lib/kubelet/kubeadm-flags.env` file\n+and set the `--container-runtime` flag to `remote` and set the\n+`--container-runtime-endpoint` flag to point to the containerd socket. e.g.\n+`/var/run/containerd/containerd.sock`.\n+\n### My container cannot resolve another container's name when using Docker user defined bridge {#docker-bridge}\nThis is normally indicated by errors like `bad address 'container-name'` when\n" } ]
Go
Apache License 2.0
google/gvisor
Add note about kubeadm to the FAQ Fixes #3277 PiperOrigin-RevId: 330853338
259,907
10.09.2020 10:38:19
25,200
50c99a86d1c6807c67cdc52102b1fc570426669f
[vfs] Disable nlink tests for overlayfs. Overlayfs intentionally does not compute nlink for directories (because it can be really expensive). Linux returns 1, VFS2 returns 2 and VFS1 actually calculates the correct value.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/stat.cc", "new_path": "test/syscalls/linux/stat.cc", "diff": "@@ -97,6 +97,11 @@ TEST_F(StatTest, FstatatSymlink) {\n}\nTEST_F(StatTest, Nlinks) {\n+ // Skip this test if we are testing overlayfs because overlayfs does not\n+ // (intentionally) return the correct nlink value for directories.\n+ // See fs/overlayfs/inode.c:ovl_getattr().\n+ SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(GetAbsoluteTestTmpdir())));\n+\nTempPath basedir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n// Directory is initially empty, it should contain 2 links (one from itself,\n@@ -432,6 +437,11 @@ TEST_F(StatTest, ZeroLinksOpenFdRegularFileChild_NoRandomSave) {\n// Test link counts with a directory as the child.\nTEST_F(StatTest, LinkCountsWithDirChild) {\n+ // Skip this test if we are testing overlayfs because overlayfs does not\n+ // (intentionally) return the correct nlink value for directories.\n+ // See fs/overlayfs/inode.c:ovl_getattr().\n+ SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(GetAbsoluteTestTmpdir())));\n+\nconst TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n// Before a child is added the two links are \".\" and the link from the parent.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/statfs.cc", "new_path": "test/syscalls/linux/statfs.cc", "diff": "@@ -27,10 +27,6 @@ namespace testing {\nnamespace {\n-// From linux/magic.h. For some reason, not defined in the headers for some\n-// build environments.\n-#define OVERLAYFS_SUPER_MAGIC 0x794c7630\n-\nTEST(StatfsTest, CannotStatBadPath) {\nauto temp_file = NewTempAbsPathInDir(\"/tmp\");\n" }, { "change_type": "MODIFY", "old_path": "test/util/fs_util.cc", "new_path": "test/util/fs_util.cc", "diff": "@@ -649,5 +649,19 @@ PosixErrorOr<bool> IsTmpfs(const std::string& path) {\n}\n#endif // __linux__\n+PosixErrorOr<bool> IsOverlayfs(const std::string& path) {\n+ struct statfs stat;\n+ if (statfs(path.c_str(), &stat)) {\n+ if (errno == ENOENT) {\n+ // Nothing at path, don't raise this as an error. Instead, just report no\n+ // overlayfs at path.\n+ return false;\n+ }\n+ return PosixError(errno,\n+ absl::StrFormat(\"statfs(\\\"%s\\\", %#p)\", path, &stat));\n+ }\n+ return stat.f_type == OVERLAYFS_SUPER_MAGIC;\n+}\n+\n} // namespace testing\n} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/util/fs_util.h", "new_path": "test/util/fs_util.h", "diff": "@@ -38,6 +38,10 @@ constexpr int kOLargeFile = 00400000;\n#error \"Unknown architecture\"\n#endif\n+// From linux/magic.h. For some reason, not defined in the headers for some\n+// build environments.\n+#define OVERLAYFS_SUPER_MAGIC 0x794c7630\n+\n// Returns a status or the current working directory.\nPosixErrorOr<std::string> GetCWD();\n@@ -184,6 +188,9 @@ PosixErrorOr<std::string> ProcessExePath(int pid);\nPosixErrorOr<bool> IsTmpfs(const std::string& path);\n#endif // __linux__\n+// IsOverlayfs returns true if the file at path is backed by overlayfs.\n+PosixErrorOr<bool> IsOverlayfs(const std::string& path);\n+\nnamespace internal {\n// Not part of the public API.\nstd::string JoinPathImpl(std::initializer_list<absl::string_view> paths);\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Disable nlink tests for overlayfs. Overlayfs intentionally does not compute nlink for directories (because it can be really expensive). Linux returns 1, VFS2 returns 2 and VFS1 actually calculates the correct value. PiperOrigin-RevId: 330967139
259,907
10.09.2020 11:40:46
25,200
14e0eb6e0f58da34246c85ec6aa2b4a9beabc63e
[vfs] Add vfs2 runtime tests.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -157,6 +157,10 @@ syscall-tests: syscall-ptrace-tests syscall-kvm-tests syscall-native-tests\n@$(call submake,install-test-runtime)\n@$(call submake,test-runtime OPTIONS=\"--test_timeout=10800\" TARGETS=\"//test/runtimes:$*\")\n+%-runtime-tests_vfs2: load-runtimes_%\n+ @$(call submake,install-test-runtime RUNTIME=\"vfs2\" ARGS=\"--vfs2\")\n+ @$(call submake,test-runtime RUNTIME=\"vfs2\" OPTIONS=\"--test_timeout=10800\" TARGETS=\"//test/runtimes:$*\")\n+\ndo-tests: runsc\n@$(call submake,run TARGETS=\"//runsc\" ARGS=\"--rootless do true\")\n@$(call submake,run TARGETS=\"//runsc\" ARGS=\"--rootless -network=none do true\")\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Add vfs2 runtime tests. PiperOrigin-RevId: 330981912
259,907
10.09.2020 16:48:03
25,200
365545855f7713236d77d3e263ad09ebffa85bb2
[vfs] Disable inode number equality check for overlayfs. Overlayfs does not persist a directory's inode number even while it is mounted. See fs/overlayfs/inode.c:ovl_map_dev_ino(). VFS2 generates a new inode number for directories everytime in lookup.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/mount.cc", "new_path": "test/syscalls/linux/mount.cc", "diff": "@@ -147,8 +147,15 @@ TEST(MountTest, UmountDetach) {\n// Unmount the tmpfs.\nmount.Release()();\n+ // Only check for inode number equality if the directory is not in overlayfs.\n+ // If xino option is not enabled and if all overlayfs layers do not belong to\n+ // the same filesystem then \"the value of st_ino for directory objects may not\n+ // be persistent and could change even while the overlay filesystem is\n+ // mounted.\" -- Documentation/filesystems/overlayfs.txt\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(dir.path()))) {\nconst struct stat after2 = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));\nEXPECT_EQ(before.st_ino, after2.st_ino);\n+ }\n// Can still read file after unmounting.\nstd::vector<char> buf(sizeof(kContents));\n@@ -213,9 +220,16 @@ TEST(MountTest, MountTmpfs) {\n}\n// Now that dir is unmounted again, we should have the old inode back.\n+ // Only check for inode number equality if the directory is not in overlayfs.\n+ // If xino option is not enabled and if all overlayfs layers do not belong to\n+ // the same filesystem then \"the value of st_ino for directory objects may not\n+ // be persistent and could change even while the overlay filesystem is\n+ // mounted.\" -- Documentation/filesystems/overlayfs.txt\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(dir.path()))) {\nconst struct stat after = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));\nEXPECT_EQ(before.st_ino, after.st_ino);\n}\n+}\nTEST(MountTest, MountTmpfsMagicValIgnored) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n" } ]
Go
Apache License 2.0
google/gvisor
[vfs] Disable inode number equality check for overlayfs. Overlayfs does not persist a directory's inode number even while it is mounted. See fs/overlayfs/inode.c:ovl_map_dev_ino(). VFS2 generates a new inode number for directories everytime in lookup. PiperOrigin-RevId: 331045037
259,962
11.09.2020 11:53:54
25,200
831ab2dd993e834933e0b23310cd616dbc0551ad
Fix host unix socket to not swallow EOF incorrectly. Fixes an error where in case of a receive buffer larger than the host send buffer size for a host backed unix dgram socket we would end up swallowing EOF from recvmsg syscall causing the read() to block forever.
[ { "change_type": "MODIFY", "old_path": "pkg/safemem/BUILD", "new_path": "pkg/safemem/BUILD", "diff": "@@ -11,9 +11,7 @@ go_library(\n\"seq_unsafe.go\",\n],\nvisibility = [\"//:sandbox\"],\n- deps = [\n- \"//pkg/safecopy\",\n- ],\n+ deps = [\"//pkg/safecopy\"],\n)\ngo_test(\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/socket_unsafe.go", "new_path": "pkg/sentry/fs/host/socket_unsafe.go", "diff": "@@ -65,10 +65,10 @@ func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int64) (\ncontrolTrunc = msg.Flags&syscall.MSG_CTRUNC == syscall.MSG_CTRUNC\nif n > length {\n- return length, n, msg.Controllen, controlTrunc, err\n+ return length, n, msg.Controllen, controlTrunc, nil\n}\n- return n, n, msg.Controllen, controlTrunc, err\n+ return n, n, msg.Controllen, controlTrunc, nil\n}\n// fdWriteVec sends from bufs to fd.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/socket_unsafe.go", "new_path": "pkg/sentry/fsimpl/host/socket_unsafe.go", "diff": "@@ -63,10 +63,10 @@ func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int64) (\ncontrolTrunc = msg.Flags&syscall.MSG_CTRUNC == syscall.MSG_CTRUNC\nif n > length {\n- return length, n, msg.Controllen, controlTrunc, err\n+ return length, n, msg.Controllen, controlTrunc, nil\n}\n- return n, n, msg.Controllen, controlTrunc, err\n+ return n, n, msg.Controllen, controlTrunc, nil\n}\n// fdWriteVec sends from bufs to fd.\n" } ]
Go
Apache License 2.0
google/gvisor
Fix host unix socket to not swallow EOF incorrectly. Fixes an error where in case of a receive buffer larger than the host send buffer size for a host backed unix dgram socket we would end up swallowing EOF from recvmsg syscall causing the read() to block forever. PiperOrigin-RevId: 331192810
259,885
11.09.2020 13:00:02
25,200
9a5635eb179f3a21c51e912aa87277025d7950e9
Implement copy-up-coherent mmap for VFS2 overlayfs. This is very similar to copy-up-coherent mmap in the VFS1 overlay, with the minor wrinkle that there is no fs.InodeOperations.Mappable(). Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/copy_up.go", "new_path": "pkg/sentry/fsimpl/overlay/copy_up.go", "diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/memmap\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n@@ -81,6 +82,8 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {\nStart: d.parent.upperVD,\nPath: fspath.Parse(d.name),\n}\n+ // Used during copy-up of memory-mapped regular files.\n+ var mmapOpts *memmap.MMapOpts\ncleanupUndoCopyUp := func() {\nvar err error\nif ftype == linux.S_IFDIR {\n@@ -136,6 +139,25 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {\nbreak\n}\n}\n+ d.mapsMu.Lock()\n+ defer d.mapsMu.Unlock()\n+ if d.wrappedMappable != nil {\n+ // We may have memory mappings of the file on the lower layer.\n+ // Switch to mapping the file on the upper layer instead.\n+ mmapOpts = &memmap.MMapOpts{\n+ Perms: usermem.ReadWrite,\n+ MaxPerms: usermem.ReadWrite,\n+ }\n+ if err := newFD.ConfigureMMap(ctx, mmapOpts); err != nil {\n+ cleanupUndoCopyUp()\n+ return err\n+ }\n+ if mmapOpts.MappingIdentity != nil {\n+ mmapOpts.MappingIdentity.DecRef(ctx)\n+ }\n+ // Don't actually switch Mappables until the end of copy-up; see\n+ // below for why.\n+ }\nif err := newFD.SetStat(ctx, vfs.SetStatOptions{\nStat: linux.Statx{\nMask: linux.STATX_UID | linux.STATX_GID,\n@@ -265,6 +287,62 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {\natomic.StoreUint64(&d.ino, upperStat.Ino)\n}\n+ if mmapOpts != nil && mmapOpts.Mappable != nil {\n+ // Note that if mmapOpts != nil, then d.mapsMu is locked for writing\n+ // (from the S_IFREG path above).\n+\n+ // Propagate mappings of d to the new Mappable. Remember which mappings\n+ // we added so we can remove them on failure.\n+ upperMappable := mmapOpts.Mappable\n+ allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)\n+ for seg := d.lowerMappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {\n+ added := make(memmap.MappingsOfRange)\n+ for m := range seg.Value() {\n+ if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil {\n+ for m := range added {\n+ upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)\n+ }\n+ for mr, mappings := range allAdded {\n+ for m := range mappings {\n+ upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable)\n+ }\n+ }\n+ return err\n+ }\n+ added[m] = struct{}{}\n+ }\n+ allAdded[seg.Range()] = added\n+ }\n+\n+ // Switch to the new Mappable. We do this at the end of copy-up\n+ // because:\n+ //\n+ // - We need to switch Mappables (by changing d.wrappedMappable) before\n+ // invalidating Translations from the old Mappable (to pick up\n+ // Translations from the new one).\n+ //\n+ // - We need to lock d.dataMu while changing d.wrappedMappable, but\n+ // must invalidate Translations with d.dataMu unlocked (due to lock\n+ // ordering).\n+ //\n+ // - Consequently, once we unlock d.dataMu, other threads may\n+ // immediately observe the new (copied-up) Mappable, which we want to\n+ // delay until copy-up is guaranteed to succeed.\n+ d.dataMu.Lock()\n+ lowerMappable := d.wrappedMappable\n+ d.wrappedMappable = upperMappable\n+ d.dataMu.Unlock()\n+ d.lowerMappings.InvalidateAll(memmap.InvalidateOpts{})\n+\n+ // Remove mappings from the old Mappable.\n+ for seg := d.lowerMappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {\n+ for m := range seg.Value() {\n+ lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)\n+ }\n+ }\n+ d.lowerMappings.RemoveAll()\n+ }\n+\natomic.StoreUint32(&d.copiedUp, 1)\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/non_directory.go", "new_path": "pkg/sentry/fsimpl/overlay/non_directory.go", "diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/memmap\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/sync\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n@@ -256,10 +257,105 @@ func (fd *nonDirectoryFD) Sync(ctx context.Context) error {\n// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\nfunc (fd *nonDirectoryFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\n- wrappedFD, err := fd.getCurrentFD(ctx)\n+ if err := fd.ensureMappable(ctx, opts); err != nil {\n+ return err\n+ }\n+ return vfs.GenericConfigureMMap(&fd.vfsfd, fd.dentry(), opts)\n+}\n+\n+// ensureMappable ensures that fd.dentry().wrappedMappable is not nil.\n+func (fd *nonDirectoryFD) ensureMappable(ctx context.Context, opts *memmap.MMapOpts) error {\n+ d := fd.dentry()\n+\n+ // Fast path if we already have a Mappable for the current top layer.\n+ if atomic.LoadUint32(&d.isMappable) != 0 {\n+ return nil\n+ }\n+\n+ // Only permit mmap of regular files, since other file types may have\n+ // unpredictable behavior when mmapped (e.g. /dev/zero).\n+ if atomic.LoadUint32(&d.mode)&linux.S_IFMT != linux.S_IFREG {\n+ return syserror.ENODEV\n+ }\n+\n+ // Get a Mappable for the current top layer.\n+ fd.mu.Lock()\n+ defer fd.mu.Unlock()\n+ d.copyMu.RLock()\n+ defer d.copyMu.RUnlock()\n+ if atomic.LoadUint32(&d.isMappable) != 0 {\n+ return nil\n+ }\n+ wrappedFD, err := fd.currentFDLocked(ctx)\nif err != nil {\nreturn err\n}\n- defer wrappedFD.DecRef(ctx)\n- return wrappedFD.ConfigureMMap(ctx, opts)\n+ if err := wrappedFD.ConfigureMMap(ctx, opts); err != nil {\n+ return err\n+ }\n+ if opts.MappingIdentity != nil {\n+ opts.MappingIdentity.DecRef(ctx)\n+ opts.MappingIdentity = nil\n+ }\n+ // Use this Mappable for all mappings of this layer (unless we raced with\n+ // another call to ensureMappable).\n+ d.mapsMu.Lock()\n+ defer d.mapsMu.Unlock()\n+ d.dataMu.Lock()\n+ defer d.dataMu.Unlock()\n+ if d.wrappedMappable == nil {\n+ d.wrappedMappable = opts.Mappable\n+ atomic.StoreUint32(&d.isMappable, 1)\n+ }\n+ return nil\n+}\n+\n+// AddMapping implements memmap.Mappable.AddMapping.\n+func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {\n+ d.mapsMu.Lock()\n+ defer d.mapsMu.Unlock()\n+ if err := d.wrappedMappable.AddMapping(ctx, ms, ar, offset, writable); err != nil {\n+ return err\n+ }\n+ if !d.isCopiedUp() {\n+ d.lowerMappings.AddMapping(ms, ar, offset, writable)\n+ }\n+ return nil\n+}\n+\n+// RemoveMapping implements memmap.Mappable.RemoveMapping.\n+func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {\n+ d.mapsMu.Lock()\n+ defer d.mapsMu.Unlock()\n+ d.wrappedMappable.RemoveMapping(ctx, ms, ar, offset, writable)\n+ if !d.isCopiedUp() {\n+ d.lowerMappings.RemoveMapping(ms, ar, offset, writable)\n+ }\n+}\n+\n+// CopyMapping implements memmap.Mappable.CopyMapping.\n+func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {\n+ d.mapsMu.Lock()\n+ defer d.mapsMu.Unlock()\n+ if err := d.wrappedMappable.CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil {\n+ return err\n+ }\n+ if !d.isCopiedUp() {\n+ d.lowerMappings.AddMapping(ms, dstAR, offset, writable)\n+ }\n+ return nil\n+}\n+\n+// Translate implements memmap.Mappable.Translate.\n+func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {\n+ d.dataMu.RLock()\n+ defer d.dataMu.RUnlock()\n+ return d.wrappedMappable.Translate(ctx, required, optional, at)\n+}\n+\n+// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable.\n+func (d *dentry) InvalidateUnsavable(ctx context.Context) error {\n+ d.mapsMu.Lock()\n+ defer d.mapsMu.Unlock()\n+ return d.wrappedMappable.InvalidateUnsavable(ctx)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/overlay.go", "new_path": "pkg/sentry/fsimpl/overlay/overlay.go", "diff": "// filesystem.renameMu\n// dentry.dirMu\n// dentry.copyMu\n+// *** \"memmap.Mappable locks\" below this point\n+// dentry.mapsMu\n+// *** \"memmap.Mappable locks taken by Translate\" below this point\n+// dentry.dataMu\n//\n// Locking dentry.dirMu in multiple dentries requires that parent dentries are\n// locked before child dentries, and that filesystem.renameMu is locked to\n@@ -37,6 +41,7 @@ import (\n\"gvisor.dev/gvisor/pkg/fspath\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/memmap\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -419,6 +424,35 @@ type dentry struct {\ndevMinor uint32\nino uint64\n+ // If this dentry represents a regular file, then:\n+ //\n+ // - mapsMu is used to synchronize between copy-up and memmap.Mappable\n+ // methods on dentry preceding mm.MemoryManager.activeMu in the lock order.\n+ //\n+ // - dataMu is used to synchronize between copy-up and\n+ // dentry.(memmap.Mappable).Translate.\n+ //\n+ // - lowerMappings tracks memory mappings of the file. lowerMappings is\n+ // used to invalidate mappings of the lower layer when the file is copied\n+ // up to ensure that they remain coherent with subsequent writes to the\n+ // file. (Note that, as of this writing, Linux overlayfs does not do this;\n+ // this feature is a gVisor extension.) lowerMappings is protected by\n+ // mapsMu.\n+ //\n+ // - If this dentry is copied-up, then wrappedMappable is the Mappable\n+ // obtained from a call to the current top layer's\n+ // FileDescription.ConfigureMMap(). Once wrappedMappable becomes non-nil\n+ // (from a call to nonDirectoryFD.ensureMappable()), it cannot become nil.\n+ // wrappedMappable is protected by mapsMu and dataMu.\n+ //\n+ // - isMappable is non-zero iff wrappedMappable is non-nil. isMappable is\n+ // accessed using atomic memory operations.\n+ mapsMu sync.Mutex\n+ lowerMappings memmap.MappingSet\n+ dataMu sync.RWMutex\n+ wrappedMappable memmap.Mappable\n+ isMappable uint32\n+\nlocks vfs.FileLocks\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Implement copy-up-coherent mmap for VFS2 overlayfs. This is very similar to copy-up-coherent mmap in the VFS1 overlay, with the minor wrinkle that there is no fs.InodeOperations.Mappable(). Updates #1199 PiperOrigin-RevId: 331206314
259,881
11.09.2020 13:00:30
25,200
490e5c83bd647639e36a9f7e871af33fa64b9bb1
Make nogo more robust to variety of stdlib layouts.
[ { "change_type": "MODIFY", "old_path": "tools/nogo/nogo.go", "new_path": "tools/nogo/nogo.go", "diff": "@@ -202,29 +202,41 @@ func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]str\nconfig.Srcs[i] = path.Clean(config.Srcs[i])\n}\n- // Calculate the root directory.\n- longestPrefix := path.Dir(config.Srcs[0])\n- for _, file := range config.Srcs[1:] {\n- for i := 0; i < len(file) && i < len(longestPrefix); i++ {\n- if file[i] != longestPrefix[i] {\n- // Truncate here; will stop the loop.\n- longestPrefix = longestPrefix[:i]\n- break\n- }\n- }\n+ // Calculate the root source directory. This is always a directory\n+ // named 'src', of which we simply take the first we find. This is a\n+ // bit fragile, but works for all currently known Go source\n+ // configurations.\n+ //\n+ // Note that there may be extra files outside of the root source\n+ // directory; we simply ignore those.\n+ rootSrcPrefix := \"\"\n+ for _, file := range config.Srcs {\n+ const src = \"/src/\"\n+ i := strings.Index(file, src)\n+ if i == -1 {\n+ // Superfluous file.\n+ continue\n}\n- if len(longestPrefix) > 0 && longestPrefix[len(longestPrefix)-1] != '/' {\n- longestPrefix += \"/\"\n+\n+ // Index of first character after /src/.\n+ i += len(src)\n+ rootSrcPrefix = file[:i]\n+ break\n}\n// Aggregate all files by directory.\npackages := make(map[string]*packageConfig)\nfor _, file := range config.Srcs {\n+ if !strings.HasPrefix(file, rootSrcPrefix) {\n+ // Superflouous file.\n+ continue\n+ }\n+\nd := path.Dir(file)\n- if len(longestPrefix) >= len(d) {\n+ if len(rootSrcPrefix) >= len(d) {\ncontinue // Not a file.\n}\n- pkg := path.Dir(file)[len(longestPrefix):]\n+ pkg := d[len(rootSrcPrefix):]\n// Skip cmd packages and obvious test files: see above.\nif strings.HasPrefix(pkg, \"cmd/\") || strings.HasSuffix(file, \"_test.go\") {\ncontinue\n@@ -303,6 +315,11 @@ func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]str\ncheckOne(pkg)\n}\n+ // Sanity check.\n+ if len(stdlibFacts) == 0 {\n+ return nil, nil, fmt.Errorf(\"no stdlib facts found: misconfiguration?\")\n+ }\n+\n// Write out all findings.\nfactData, err := json.Marshal(stdlibFacts)\nif err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Make nogo more robust to variety of stdlib layouts. PiperOrigin-RevId: 331206424
259,933
11.09.2020 13:26:33
25,200
325f7036b051a705d5ae595d2f3c351084262532
Use correct test device name in Fuchsia packetimpact Packetimpact on Fuchsia was formerly using the Linux test device name. This change fixes that.
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/dut.go", "new_path": "test/packetimpact/runner/dut.go", "diff": "@@ -171,11 +171,8 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\n}},\n}\n- // Add ctrlNet as eth1 and testNet as eth2.\n- const testNetDev = \"eth2\"\n-\ndevice := mkDevice(dut)\n- remoteIPv6, remoteMAC, dutDeviceID := device.Prepare(ctx, t, runOpts, ctrlNet, testNet, containerAddr)\n+ remoteIPv6, remoteMAC, dutDeviceID, testNetDev := device.Prepare(ctx, t, runOpts, ctrlNet, testNet, containerAddr)\n// Create the Docker container for the testbench.\ntestbench := dockerutil.MakeNativeContainer(ctx, logger(\"testbench\"))\n@@ -282,8 +279,8 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\n// DUT describes how to setup/teardown the dut for packetimpact tests.\ntype DUT interface {\n// Prepare prepares the dut, starts posix_server and returns the IPv6, MAC\n- // address and the interface ID for the testNet on DUT.\n- Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network, containerAddr net.IP) (net.IP, net.HardwareAddr, uint32)\n+ // address, the interface ID, and the interface name for the testNet on DUT.\n+ Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network, containerAddr net.IP) (net.IP, net.HardwareAddr, uint32, string)\n// Logs retrieves the logs from the dut.\nLogs(ctx context.Context) (string, error)\n}\n@@ -301,7 +298,7 @@ func NewDockerDUT(c *dockerutil.Container) DUT {\n}\n// Prepare implements DUT.Prepare.\n-func (dut *DockerDUT) Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network, containerAddr net.IP) (net.IP, net.HardwareAddr, uint32) {\n+func (dut *DockerDUT) Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network, containerAddr net.IP) (net.IP, net.HardwareAddr, uint32, string) {\nconst containerPosixServerBinary = \"/packetimpact/posix_server\"\ndut.c.CopyFiles(&runOpts, \"/packetimpact\", \"test/packetimpact/dut/posix_server\")\n@@ -345,7 +342,9 @@ func (dut *DockerDUT) Prepare(ctx context.Context, t *testing.T, runOpts dockeru\nt.Fatalf(\"unable to set IPv6 address on container %s\", dut.c.Name)\n}\n}\n- return remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID\n+ const testNetDev = \"eth2\"\n+\n+ return remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID, testNetDev\n}\n// Logs implements DUT.Logs.\n" } ]
Go
Apache License 2.0
google/gvisor
Use correct test device name in Fuchsia packetimpact Packetimpact on Fuchsia was formerly using the Linux test device name. This change fixes that. PiperOrigin-RevId: 331211518
259,964
12.09.2020 23:19:34
25,200
b6ca96b9b9e71ba57653cf9d3ef0767771949378
Cap reassembled IPv6 packets at 65535 octets IPv4 can accept 65536-octet reassembled packets. Test: ipv4_test.TestInvalidFragments ipv4_test.TestReceiveFragments ipv6.TestInvalidIPv6Fragments ipv6.TestReceiveIPv6Fragments Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv4.go", "new_path": "pkg/tcpip/header/ipv4.go", "diff": "@@ -88,6 +88,16 @@ const (\n// units, the header cannot exceed 15*4 = 60 bytes.\nIPv4MaximumHeaderSize = 60\n+ // IPv4MaximumPayloadSize is the maximum size of a valid IPv4 payload.\n+ //\n+ // Linux limits this to 65,515 octets (the max IP datagram size - the IPv4\n+ // header size). But RFC 791 section 3.2 discusses the design of the IPv4\n+ // fragment \"allows 2**13 = 8192 fragments of 8 octets each for a total of\n+ // 65,536 octets. Note that this is consistent with the the datagram total\n+ // length field (of course, the header is counted in the total length and not\n+ // in the fragments).\"\n+ IPv4MaximumPayloadSize = 65536\n+\n// MinIPFragmentPayloadSize is the minimum number of payload bytes that\n// the first fragment must carry when an IPv4 packet is fragmented.\nMinIPFragmentPayloadSize = 8\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv6.go", "new_path": "pkg/tcpip/header/ipv6.go", "diff": "@@ -74,6 +74,10 @@ const (\n// IPv6AddressSize is the size, in bytes, of an IPv6 address.\nIPv6AddressSize = 16\n+ // IPv6MaximumPayloadSize is the maximum size of a valid IPv6 payload per\n+ // RFC 8200 Section 4.5.\n+ IPv6MaximumPayloadSize = 65535\n+\n// IPv6ProtocolNumber is IPv6's network protocol number.\nIPv6ProtocolNumber tcpip.NetworkProtocolNumber = 0x86dd\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/udp.go", "new_path": "pkg/tcpip/header/udp.go", "diff": "@@ -16,6 +16,7 @@ package header\nimport (\n\"encoding/binary\"\n+ \"math\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n@@ -55,6 +56,10 @@ const (\n// UDPMinimumSize is the minimum size of a valid UDP packet.\nUDPMinimumSize = 8\n+ // UDPMaximumSize is the maximum size of a valid UDP packet. The length field\n+ // in the UDP header is 16 bits as per RFC 768.\n+ UDPMaximumSize = math.MaxUint16\n+\n// UDPProtocolNumber is UDP's transport protocol number.\nUDPProtocolNumber tcpip.TransportProtocolNumber = 17\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -404,11 +404,15 @@ func (e *endpoint) HandlePacket(r *stack.Route, pkt *stack.PacketBuffer) {\nreturn\n}\n// The packet is a fragment, let's try to reassemble it.\n- last := h.FragmentOffset() + uint16(pkt.Data.Size()) - 1\n- // Drop the packet if the fragmentOffset is incorrect. i.e the\n- // combination of fragmentOffset and pkt.Data.size() causes a\n- // wrap around resulting in last being less than the offset.\n- if last < h.FragmentOffset() {\n+ start := h.FragmentOffset()\n+ // Drop the fragment if the size of the reassembled payload would exceed the\n+ // maximum payload size.\n+ //\n+ // Note that this addition doesn't overflow even on 32bit architecture\n+ // because pkt.Data.Size() should not exceed 65535 (the max IP datagram\n+ // size). Otherwise the packet would've been rejected as invalid before\n+ // reaching here.\n+ if int(start)+pkt.Data.Size() > header.IPv4MaximumPayloadSize {\nr.Stats().IP.MalformedPacketsReceived.Increment()\nr.Stats().IP.MalformedFragmentsReceived.Increment()\nreturn\n@@ -425,8 +429,8 @@ func (e *endpoint) HandlePacket(r *stack.Route, pkt *stack.PacketBuffer) {\nID: uint32(h.ID()),\nProtocol: proto,\n},\n- h.FragmentOffset(),\n- last,\n+ start,\n+ start+uint16(pkt.Data.Size())-1,\nh.More(),\nproto,\npkt.Data,\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4_test.go", "new_path": "pkg/tcpip/network/ipv4/ipv4_test.go", "diff": "@@ -372,115 +372,308 @@ func TestFragmentationErrors(t *testing.T) {\n}\nfunc TestInvalidFragments(t *testing.T) {\n+ const (\n+ nicID = 1\n+ linkAddr = tcpip.LinkAddress(\"\\x0a\\x0b\\x0c\\x0d\\x0e\\x0e\")\n+ addr1 = \"\\x0a\\x00\\x00\\x01\"\n+ addr2 = \"\\x0a\\x00\\x00\\x02\"\n+ tos = 0\n+ ident = 1\n+ ttl = 48\n+ protocol = 6\n+ )\n+\n+ payloadGen := func(payloadLen int) []byte {\n+ payload := make([]byte, payloadLen)\n+ for i := 0; i < len(payload); i++ {\n+ payload[i] = 0x30\n+ }\n+ return payload\n+ }\n+\n+ type fragmentData struct {\n+ ipv4fields header.IPv4Fields\n+ payload []byte\n+ autoChecksum bool // if true, the Checksum field will be overwritten.\n+ }\n+\n// These packets have both IHL and TotalLength set to 0.\n- testCases := []struct {\n+ tests := []struct {\nname string\n- packets [][]byte\n+ fragments []fragmentData\nwantMalformedIPPackets uint64\nwantMalformedFragments uint64\n}{\n{\n- \"ihl_totallen_zero_valid_frag_offset\",\n- [][]byte{\n- {0x40, 0x30, 0x00, 0x00, 0x6c, 0x74, 0x7d, 0x30, 0x30, 0x30, 0x30, 0x30, 0x39, 0x32, 0x39, 0x33, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ name: \"IHL and TotalLength zero, FragmentOffset non-zero\",\n+ fragments: []fragmentData{\n+ {\n+ ipv4fields: header.IPv4Fields{\n+ IHL: 0,\n+ TOS: tos,\n+ TotalLength: 0,\n+ ID: ident,\n+ Flags: header.IPv4FlagDontFragment | header.IPv4FlagMoreFragments,\n+ FragmentOffset: 59776,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(12),\n+ autoChecksum: true,\n},\n- 1,\n- 0,\n},\n+ wantMalformedIPPackets: 1,\n+ wantMalformedFragments: 0,\n+ },\n+ {\n+ name: \"IHL and TotalLength zero, FragmentOffset zero\",\n+ fragments: []fragmentData{\n{\n- \"ihl_totallen_zero_invalid_frag_offset\",\n- [][]byte{\n- {0x40, 0x30, 0x00, 0x00, 0x6c, 0x74, 0x20, 0x00, 0x30, 0x30, 0x30, 0x30, 0x39, 0x32, 0x39, 0x33, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ ipv4fields: header.IPv4Fields{\n+ IHL: 0,\n+ TOS: tos,\n+ TotalLength: 0,\n+ ID: ident,\n+ Flags: header.IPv4FlagMoreFragments,\n+ FragmentOffset: 0,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(12),\n+ autoChecksum: true,\n+ },\n},\n- 1,\n- 0,\n+ wantMalformedIPPackets: 1,\n+ wantMalformedFragments: 0,\n},\n{\n- // Total Length of 37(20 bytes IP header + 17 bytes of\n- // payload)\n- // Frag Offset of 0x1ffe = 8190*8 = 65520\n- // Leading to the fragment end to be past 65535.\n- \"ihl_totallen_valid_invalid_frag_offset_1\",\n- [][]byte{\n- {0x45, 0x30, 0x00, 0x25, 0x6c, 0x74, 0x1f, 0xfe, 0x30, 0x30, 0x30, 0x30, 0x39, 0x32, 0x39, 0x33, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ // Payload 17 octets and Fragment offset 65520\n+ // Leading to the fragment end to be past 65536.\n+ name: \"fragment ends past 65536\",\n+ fragments: []fragmentData{\n+ {\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 17,\n+ ID: ident,\n+ Flags: 0,\n+ FragmentOffset: 65520,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(17),\n+ autoChecksum: true,\n+ },\n},\n- 1,\n- 1,\n+ wantMalformedIPPackets: 1,\n+ wantMalformedFragments: 1,\n},\n- // The following 3 tests were found by running a fuzzer and were\n- // triggering a panic in the IPv4 reassembler code.\n{\n- \"ihl_less_than_ipv4_minimum_size_1\",\n- [][]byte{\n- {0x42, 0x30, 0x0, 0x30, 0x30, 0x40, 0x0, 0xf3, 0x30, 0x1, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n- {0x42, 0x30, 0x0, 0x8, 0x30, 0x40, 0x20, 0x0, 0x30, 0x1, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ // Payload 16 octets and fragment offset 65520\n+ // Leading to the fragment end to be exactly 65536.\n+ name: \"fragment ends exactly at 65536\",\n+ fragments: []fragmentData{\n+ {\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 16,\n+ ID: ident,\n+ Flags: 0,\n+ FragmentOffset: 65520,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(16),\n+ autoChecksum: true,\n+ },\n},\n- 2,\n- 0,\n+ wantMalformedIPPackets: 0,\n+ wantMalformedFragments: 0,\n},\n{\n- \"ihl_less_than_ipv4_minimum_size_2\",\n- [][]byte{\n- {0x42, 0x30, 0x0, 0x30, 0x30, 0x40, 0xb3, 0x12, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n- {0x42, 0x30, 0x0, 0x8, 0x30, 0x40, 0x20, 0x0, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ name: \"IHL less than IPv4 minimum size\",\n+ fragments: []fragmentData{\n+ {\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize - 12,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 28,\n+ ID: ident,\n+ Flags: 0,\n+ FragmentOffset: 1944,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(28),\n+ autoChecksum: true,\n+ },\n+ {\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize - 12,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize - 12,\n+ ID: ident,\n+ Flags: header.IPv4FlagMoreFragments,\n+ FragmentOffset: 0,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(28),\n+ autoChecksum: true,\n},\n- 2,\n- 0,\n+ },\n+ wantMalformedIPPackets: 2,\n+ wantMalformedFragments: 0,\n+ },\n+ {\n+ name: \"fragment with short TotalLength and extra payload\",\n+ fragments: []fragmentData{\n+ {\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize + 4,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 28,\n+ ID: ident,\n+ Flags: 0,\n+ FragmentOffset: 28816,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(28),\n+ autoChecksum: true,\n},\n{\n- \"ihl_less_than_ipv4_minimum_size_3\",\n- [][]byte{\n- {0x42, 0x30, 0x0, 0x30, 0x30, 0x40, 0xb3, 0x30, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n- {0x42, 0x30, 0x0, 0x8, 0x30, 0x40, 0x20, 0x0, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize + 4,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 4,\n+ ID: ident,\n+ Flags: header.IPv4FlagMoreFragments,\n+ FragmentOffset: 0,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n},\n- 2,\n- 0,\n+ payload: payloadGen(28),\n+ autoChecksum: true,\n},\n+ },\n+ wantMalformedIPPackets: 1,\n+ wantMalformedFragments: 1,\n+ },\n+ {\n+ name: \"multiple fragments with More Fragments flag set to false\",\n+ fragments: []fragmentData{\n{\n- \"fragment_with_short_total_len_extra_payload\",\n- [][]byte{\n- {0x46, 0x30, 0x00, 0x30, 0x30, 0x40, 0x0e, 0x12, 0x30, 0x06, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n- {0x46, 0x30, 0x00, 0x18, 0x30, 0x40, 0x20, 0x00, 0x30, 0x06, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 8,\n+ ID: ident,\n+ Flags: 0,\n+ FragmentOffset: 128,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(8),\n+ autoChecksum: true,\n},\n- 1,\n- 1,\n+ {\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 8,\n+ ID: ident,\n+ Flags: 0,\n+ FragmentOffset: 8,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(8),\n+ autoChecksum: true,\n},\n{\n- \"multiple_fragments_with_more_fragments_set_to_false\",\n- [][]byte{\n- {0x45, 0x00, 0x00, 0x1c, 0x30, 0x40, 0x00, 0x10, 0x00, 0x06, 0x34, 0x69, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},\n- {0x45, 0x00, 0x00, 0x1c, 0x30, 0x40, 0x00, 0x01, 0x61, 0x06, 0x34, 0x69, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},\n- {0x45, 0x00, 0x00, 0x1c, 0x30, 0x40, 0x20, 0x00, 0x00, 0x06, 0x34, 0x1e, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},\n+ ipv4fields: header.IPv4Fields{\n+ IHL: header.IPv4MinimumSize,\n+ TOS: tos,\n+ TotalLength: header.IPv4MinimumSize + 8,\n+ ID: ident,\n+ Flags: header.IPv4FlagMoreFragments,\n+ FragmentOffset: 0,\n+ TTL: ttl,\n+ Protocol: protocol,\n+ SrcAddr: addr1,\n+ DstAddr: addr2,\n+ },\n+ payload: payloadGen(8),\n+ autoChecksum: true,\n},\n- 1,\n- 1,\n+ },\n+ wantMalformedIPPackets: 1,\n+ wantMalformedFragments: 1,\n},\n}\n- for _, tc := range testCases {\n- t.Run(tc.name, func(t *testing.T) {\n- const nicID tcpip.NICID = 42\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocol{\nipv4.NewProtocol(),\n},\n})\n+ e := channel.New(0, 1500, linkAddr)\n+ if err := s.CreateNIC(nicID, e); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n+ }\n+ if err := s.AddAddress(nicID, ipv4.ProtocolNumber, addr2); err != nil {\n+ t.Fatalf(\"AddAddress(%d, %d, %s) = %s\", nicID, header.IPv4ProtocolNumber, addr2, err)\n+ }\n+\n+ for _, f := range test.fragments {\n+ pktSize := header.IPv4MinimumSize + len(f.payload)\n+ hdr := buffer.NewPrependable(pktSize)\n- var linkAddr = tcpip.LinkAddress([]byte{0x30, 0x30, 0x30, 0x30, 0x30, 0x30})\n- var remoteLinkAddr = tcpip.LinkAddress([]byte{0x30, 0x30, 0x30, 0x30, 0x30, 0x31})\n- ep := channel.New(10, 1500, linkAddr)\n- s.CreateNIC(nicID, sniffer.New(ep))\n+ ip := header.IPv4(hdr.Prepend(pktSize))\n+ ip.Encode(&f.ipv4fields)\n+ copy(ip[header.IPv4MinimumSize:], f.payload)\n+\n+ if f.autoChecksum {\n+ ip.SetChecksum(0)\n+ ip.SetChecksum(^ip.CalculateChecksum())\n+ }\n- for _, pkt := range tc.packets {\n- ep.InjectLinkAddr(header.IPv4ProtocolNumber, remoteLinkAddr, stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: buffer.NewVectorisedView(len(pkt), []buffer.View{pkt}),\n+ vv := hdr.View().ToVectorisedView()\n+ e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: vv,\n}))\n}\n- if got, want := s.Stats().IP.MalformedPacketsReceived.Value(), tc.wantMalformedIPPackets; got != want {\n+ if got, want := s.Stats().IP.MalformedPacketsReceived.Value(), test.wantMalformedIPPackets; got != want {\nt.Errorf(\"incorrect Stats.IP.MalformedPacketsReceived, got: %d, want: %d\", got, want)\n}\n- if got, want := s.Stats().IP.MalformedFragmentsReceived.Value(), tc.wantMalformedFragments; got != want {\n+ if got, want := s.Stats().IP.MalformedFragmentsReceived.Value(), test.wantMalformedFragments; got != want {\nt.Errorf(\"incorrect Stats.IP.MalformedFragmentsReceived, got: %d, want: %d\", got, want)\n}\n})\n@@ -534,6 +727,9 @@ func TestReceiveFragments(t *testing.T) {\n// the fragment block size of 8 (RFC 791 section 3.1 page 14).\nipv4Payload3Addr1ToAddr2 := udpGen(127, 3, addr1, addr2)\nudpPayload3Addr1ToAddr2 := ipv4Payload3Addr1ToAddr2[header.UDPMinimumSize:]\n+ // Used to test the max reassembled payload length (65,535 octets).\n+ ipv4Payload4Addr1ToAddr2 := udpGen(header.UDPMaximumSize-header.UDPMinimumSize, 4, addr1, addr2)\n+ udpPayload4Addr1ToAddr2 := ipv4Payload4Addr1ToAddr2[header.UDPMinimumSize:]\ntype fragmentData struct {\nsrcAddr tcpip.Address\n@@ -827,6 +1023,28 @@ func TestReceiveFragments(t *testing.T) {\n},\nexpectedPayloads: nil,\n},\n+ {\n+ name: \"Two fragments reassembled into a maximum UDP packet\",\n+ fragments: []fragmentData{\n+ {\n+ srcAddr: addr1,\n+ dstAddr: addr2,\n+ id: 1,\n+ flags: header.IPv4FlagMoreFragments,\n+ fragmentOffset: 0,\n+ payload: ipv4Payload4Addr1ToAddr2[:65512],\n+ },\n+ {\n+ srcAddr: addr1,\n+ dstAddr: addr2,\n+ id: 1,\n+ flags: 0,\n+ fragmentOffset: 65512,\n+ payload: ipv4Payload4Addr1ToAddr2[65512:],\n+ },\n+ },\n+ expectedPayloads: [][]byte{udpPayload4Addr1ToAddr2},\n+ },\n}\nfor _, test := range tests {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6.go", "new_path": "pkg/tcpip/network/ipv6/ipv6.go", "diff": "@@ -311,12 +311,10 @@ func (e *endpoint) HandlePacket(r *stack.Route, pkt *stack.PacketBuffer) {\n// The packet is a fragment, let's try to reassemble it.\nstart := extHdr.FragmentOffset() * header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit\n- last := start + uint16(fragmentPayloadLen) - 1\n- // Drop the packet if the fragmentOffset is incorrect. i.e the\n- // combination of fragmentOffset and pkt.Data.size() causes a\n- // wrap around resulting in last being less than the offset.\n- if last < start {\n+ // Drop the fragment if the size of the reassembled payload would exceed\n+ // the maximum payload size.\n+ if int(start)+fragmentPayloadLen > header.IPv6MaximumPayloadSize {\nr.Stats().IP.MalformedPacketsReceived.Increment()\nr.Stats().IP.MalformedFragmentsReceived.Increment()\nreturn\n@@ -333,7 +331,7 @@ func (e *endpoint) HandlePacket(r *stack.Route, pkt *stack.PacketBuffer) {\nID: extHdr.ID(),\n},\nstart,\n- last,\n+ start+uint16(fragmentPayloadLen)-1,\nextHdr.More(),\nuint8(rawPayload.Identifier),\nrawPayload.Buf,\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6_test.go", "new_path": "pkg/tcpip/network/ipv6/ipv6_test.go", "diff": "package ipv6\nimport (\n+ \"math\"\n\"testing\"\n\"github.com/google/go-cmp/cmp\"\n@@ -687,6 +688,7 @@ func TestReceiveIPv6Fragments(t *testing.T) {\n// Used to test cases where the fragment blocks are not a multiple of\n// the fragment block size of 8 (RFC 8200 section 4.5).\nudpPayload3Length = 127\n+ udpPayload4Length = header.IPv6MaximumPayloadSize - header.UDPMinimumSize\nfragmentExtHdrLen = 8\n// Note, not all routing extension headers will be 8 bytes but this test\n// uses 8 byte routing extension headers for most sub tests.\n@@ -731,6 +733,10 @@ func TestReceiveIPv6Fragments(t *testing.T) {\nudpPayload3Addr1ToAddr2 := udpPayload3Addr1ToAddr2Buf[:]\nipv6Payload3Addr1ToAddr2 := udpGen(udpPayload3Addr1ToAddr2, 3, addr1, addr2)\n+ var udpPayload4Addr1ToAddr2Buf [udpPayload4Length]byte\n+ udpPayload4Addr1ToAddr2 := udpPayload4Addr1ToAddr2Buf[:]\n+ ipv6Payload4Addr1ToAddr2 := udpGen(udpPayload4Addr1ToAddr2, 4, addr1, addr2)\n+\ntests := []struct {\nname string\nexpectedPayload []byte\n@@ -1019,6 +1025,44 @@ func TestReceiveIPv6Fragments(t *testing.T) {\n},\nexpectedPayloads: nil,\n},\n+ {\n+ name: \"Two fragments reassembled into a maximum UDP packet\",\n+ fragments: []fragmentData{\n+ {\n+ srcAddr: addr1,\n+ dstAddr: addr2,\n+ nextHdr: fragmentExtHdrID,\n+ data: buffer.NewVectorisedView(\n+ fragmentExtHdrLen+65520,\n+ []buffer.View{\n+ // Fragment extension header.\n+ //\n+ // Fragment offset = 0, More = true, ID = 1\n+ buffer.View([]byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1}),\n+\n+ ipv6Payload4Addr1ToAddr2[:65520],\n+ },\n+ ),\n+ },\n+ {\n+ srcAddr: addr1,\n+ dstAddr: addr2,\n+ nextHdr: fragmentExtHdrID,\n+ data: buffer.NewVectorisedView(\n+ fragmentExtHdrLen+len(ipv6Payload4Addr1ToAddr2)-65520,\n+ []buffer.View{\n+ // Fragment extension header.\n+ //\n+ // Fragment offset = 8190, More = false, ID = 1\n+ buffer.View([]byte{uint8(header.UDPProtocolNumber), 0, 255, 240, 0, 0, 0, 1}),\n+\n+ ipv6Payload4Addr1ToAddr2[65520:],\n+ },\n+ ),\n+ },\n+ },\n+ expectedPayloads: [][]byte{udpPayload4Addr1ToAddr2},\n+ },\n{\nname: \"Two fragments with per-fragment routing header with zero segments left\",\nfragments: []fragmentData{\n@@ -1572,3 +1616,96 @@ func TestReceiveIPv6Fragments(t *testing.T) {\n})\n}\n}\n+\n+func TestInvalidIPv6Fragments(t *testing.T) {\n+ const (\n+ nicID = 1\n+ fragmentExtHdrLen = 8\n+ )\n+\n+ payloadGen := func(payloadLen int) []byte {\n+ payload := make([]byte, payloadLen)\n+ for i := 0; i < len(payload); i++ {\n+ payload[i] = 0x30\n+ }\n+ return payload\n+ }\n+\n+ tests := []struct {\n+ name string\n+ fragments []fragmentData\n+ wantMalformedIPPackets uint64\n+ wantMalformedFragments uint64\n+ }{\n+ {\n+ name: \"fragments reassembled into a payload exceeding the max IPv6 payload size\",\n+ fragments: []fragmentData{\n+ {\n+ srcAddr: addr1,\n+ dstAddr: addr2,\n+ nextHdr: fragmentExtHdrID,\n+ data: buffer.NewVectorisedView(\n+ fragmentExtHdrLen+(header.IPv6MaximumPayloadSize+1)-16,\n+ []buffer.View{\n+ // Fragment extension header.\n+ // Fragment offset = 8190, More = false, ID = 1\n+ buffer.View([]byte{uint8(header.UDPProtocolNumber), 0,\n+ ((header.IPv6MaximumPayloadSize + 1) - 16) >> 8,\n+ ((header.IPv6MaximumPayloadSize + 1) - 16) & math.MaxUint8,\n+ 0, 0, 0, 1}),\n+ // Payload length = 16\n+ payloadGen(16),\n+ },\n+ ),\n+ },\n+ },\n+ wantMalformedIPPackets: 1,\n+ wantMalformedFragments: 1,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocol{\n+ NewProtocol(),\n+ },\n+ })\n+ e := channel.New(0, 1500, linkAddr1)\n+ if err := s.CreateNIC(nicID, e); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n+ }\n+ if err := s.AddAddress(nicID, ProtocolNumber, addr2); err != nil {\n+ t.Fatalf(\"AddAddress(%d, %d, %s) = %s\", nicID, ProtocolNumber, addr2, err)\n+ }\n+\n+ for _, f := range test.fragments {\n+ hdr := buffer.NewPrependable(header.IPv6MinimumSize)\n+\n+ // Serialize IPv6 fixed header.\n+ ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))\n+ ip.Encode(&header.IPv6Fields{\n+ PayloadLength: uint16(f.data.Size()),\n+ NextHeader: f.nextHdr,\n+ HopLimit: 255,\n+ SrcAddr: f.srcAddr,\n+ DstAddr: f.dstAddr,\n+ })\n+\n+ vv := hdr.View().ToVectorisedView()\n+ vv.Append(f.data)\n+\n+ e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: vv,\n+ }))\n+ }\n+\n+ if got, want := s.Stats().IP.MalformedPacketsReceived.Value(), test.wantMalformedIPPackets; got != want {\n+ t.Errorf(\"got Stats.IP.MalformedPacketsReceived = %d, want = %d\", got, want)\n+ }\n+ if got, want := s.Stats().IP.MalformedFragmentsReceived.Value(), test.wantMalformedFragments; got != want {\n+ t.Errorf(\"got Stats.IP.MalformedFragmentsReceived = %d, want = %d\", got, want)\n+ }\n+ })\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Cap reassembled IPv6 packets at 65535 octets IPv4 can accept 65536-octet reassembled packets. Test: - ipv4_test.TestInvalidFragments - ipv4_test.TestReceiveFragments - ipv6.TestInvalidIPv6Fragments - ipv6.TestReceiveIPv6Fragments Fixes #3770 PiperOrigin-RevId: 331382977
259,891
14.09.2020 11:08:56
25,200
833ceb0f14485ee11719b52a42087dfac14de856
Fix modprobe dependency The modprobe command only takes 1 module per invocation. The second module name is being passed as a module parameter.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -211,7 +211,8 @@ packetdrill-tests: load-packetdrill\n.PHONY: packetdrill-tests\npacketimpact-tests: load-packetimpact\n- @sudo modprobe iptable_filter ip6table_filter\n+ @sudo modprobe iptable_filter\n+ @sudo modprobe ip6table_filter\n@$(call submake,install-test-runtime RUNTIME=\"packetimpact\")\n@$(call submake,test-runtime OPTIONS=\"--jobs=HOST_CPUS*3 --local_test_jobs=HOST_CPUS*3\" RUNTIME=\"packetimpact\" TARGETS=\"$(shell $(MAKE) query TARGETS='attr(tags, packetimpact, tests(//...))')\")\n.PHONY: packetimpact-tests\n" } ]
Go
Apache License 2.0
google/gvisor
Fix modprobe dependency The modprobe command only takes 1 module per invocation. The second module name is being passed as a module parameter. PiperOrigin-RevId: 331585765
260,023
14.09.2020 14:31:02
25,200
05d2ebee5e4ebc31cd71f6064ca433a58692be76
Test RST handling in TIME_WAIT. gVisor stack ignores RSTs when in TIME_WAIT which is not the default Linux behavior. Add a packetimpact test to test the same. Also update code comments to reflect the rationale for the current gVisor behavior.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/rcv.go", "new_path": "pkg/tcpip/transport/tcp/rcv.go", "diff": "@@ -436,6 +436,13 @@ func (r *receiver) handleTimeWaitSegment(s *segment) (resetTimeWait bool, newSyn\n// Just silently drop any RST packets in TIME_WAIT. We do not support\n// TIME_WAIT assasination as a result we confirm w/ fix 1 as described\n// in https://tools.ietf.org/html/rfc1337#section-3.\n+ //\n+ // This behavior overrides RFC793 page 70 where we transition to CLOSED\n+ // on receiving RST, which is also default Linux behavior.\n+ // On Linux the RST can be ignored by setting sysctl net.ipv4.tcp_rfc1337.\n+ //\n+ // As we do not yet support PAWS, we are being conservative in ignoring\n+ // RSTs by default.\nif s.flagIsSet(header.TCPFlagRst) {\nreturn false, false\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/BUILD", "new_path": "test/packetimpact/tests/BUILD", "diff": "@@ -259,6 +259,18 @@ packetimpact_go_test(\n],\n)\n+packetimpact_go_test(\n+ name = \"tcp_timewait_reset\",\n+ srcs = [\"tcp_timewait_reset_test.go\"],\n+ # TODO(b/168523247): Fix netstack then remove the line below.\n+ expect_netstack_failure = True,\n+ deps = [\n+ \"//pkg/tcpip/header\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\npacketimpact_go_test(\nname = \"icmpv6_param_problem\",\nsrcs = [\"icmpv6_param_problem_test.go\"],\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/packetimpact/tests/tcp_timewait_reset_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp_timewait_reset_test\n+\n+import (\n+ \"flag\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ testbench.RegisterFlags(flag.CommandLine)\n+}\n+\n+// TestTimeWaitReset tests handling of RST when in TIME_WAIT state.\n+func TestTimeWaitReset(t *testing.T) {\n+ dut := testbench.NewDUT(t)\n+ defer dut.TearDown()\n+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)\n+ defer dut.Close(t, listenFD)\n+ conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})\n+ defer conn.Close(t)\n+\n+ conn.Connect(t)\n+ acceptFD, _ := dut.Accept(t, listenFD)\n+\n+ // Trigger active close.\n+ dut.Close(t, acceptFD)\n+\n+ _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)\n+ if err != nil {\n+ t.Fatalf(\"expected a FIN: %s\", err)\n+ }\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})\n+ // Send a FIN, DUT should transition to TIME_WAIT from FIN_WAIT2.\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)})\n+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {\n+ t.Fatalf(\"expected an ACK for our FIN: %s\", err)\n+ }\n+\n+ // Send a RST, the DUT should transition to CLOSED from TIME_WAIT.\n+ // This is the default Linux behavior, it can be changed to ignore RSTs via\n+ // sysctl net.ipv4.tcp_rfc1337.\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)})\n+\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})\n+ // The DUT should reply with RST to our ACK as the state should have\n+ // transitioned to CLOSED.\n+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {\n+ t.Fatalf(\"expected a RST: %s\", err)\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Test RST handling in TIME_WAIT. gVisor stack ignores RSTs when in TIME_WAIT which is not the default Linux behavior. Add a packetimpact test to test the same. Also update code comments to reflect the rationale for the current gVisor behavior. PiperOrigin-RevId: 331629879
259,992
14.09.2020 16:02:07
25,200
52ffeb2d6475fa2e427daf246abdd32e4825f3f8
Add note about gofer link(2) limitation
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/linux64.go", "new_path": "pkg/sentry/syscalls/linux/linux64.go", "diff": "@@ -138,7 +138,7 @@ var AMD64 = &kernel.SyscallTable{\n83: syscalls.Supported(\"mkdir\", Mkdir),\n84: syscalls.Supported(\"rmdir\", Rmdir),\n85: syscalls.Supported(\"creat\", Creat),\n- 86: syscalls.Supported(\"link\", Link),\n+ 86: syscalls.PartiallySupported(\"link\", Link, \"Limited support with Gofer. Link count and linked files may get out of sync because gVisor is not aware of external hardlinks.\", nil),\n87: syscalls.Supported(\"unlink\", Unlink),\n88: syscalls.Supported(\"symlink\", Symlink),\n89: syscalls.Supported(\"readlink\", Readlink),\n@@ -317,7 +317,7 @@ var AMD64 = &kernel.SyscallTable{\n262: syscalls.Supported(\"fstatat\", Fstatat),\n263: syscalls.Supported(\"unlinkat\", Unlinkat),\n264: syscalls.Supported(\"renameat\", Renameat),\n- 265: syscalls.Supported(\"linkat\", Linkat),\n+ 265: syscalls.PartiallySupported(\"linkat\", Linkat, \"See link(2).\", nil),\n266: syscalls.Supported(\"symlinkat\", Symlinkat),\n267: syscalls.Supported(\"readlinkat\", Readlinkat),\n268: syscalls.Supported(\"fchmodat\", Fchmodat),\n" } ]
Go
Apache License 2.0
google/gvisor
Add note about gofer link(2) limitation PiperOrigin-RevId: 331648296
259,885
15.09.2020 13:17:26
25,200
86b31a80777dd906b275fb4b21888e93836f8a9b
RFC: design for a 9P replacement Tentatively `lisafs` (LInux SAndbox FileSystem).
[ { "change_type": "ADD", "old_path": null, "new_path": "pkg/lisafs/README.md", "diff": "+# Replacing 9P\n+\n+## Background\n+\n+The Linux filesystem model consists of the following key aspects (modulo mounts,\n+which are outside the scope of this discussion):\n+\n+- A `struct inode` represents a \"filesystem object\", such as a directory or a\n+ regular file. \"Filesystem object\" is most precisely defined by the practical\n+ properties of an inode, such as an immutable type (regular file, directory,\n+ symbolic link, etc.) and its independence from the path originally used to\n+ obtain it.\n+\n+- A `struct dentry` represents a node in a filesystem tree. Semantically, each\n+ dentry is immutably associated with an inode representing the filesystem\n+ object at that position. (Linux implements optimizations involving reuse of\n+ unreferenced dentries, which allows their associated inodes to change, but\n+ this is outside the scope of this discussion.)\n+\n+- A `struct file` represents an open file description (hereafter FD) and is\n+ needed to perform I/O. Each FD is immutably associated with the dentry\n+ through which it was opened.\n+\n+The current gVisor virtual filesystem implementation (hereafter VFS1) closely\n+imitates the Linux design:\n+\n+- `struct inode` => `fs.Inode`\n+\n+- `struct dentry` => `fs.Dirent`\n+\n+- `struct file` => `fs.File`\n+\n+gVisor accesses most external filesystems through a variant of the 9P2000.L\n+protocol, including extensions for performance (`walkgetattr`) and for features\n+not supported by vanilla 9P2000.L (`flushf`, `lconnect`). The 9P protocol family\n+is inode-based; 9P fids represent a file (equivalently \"file system object\"),\n+and the protocol is structured around alternatively obtaining fids to represent\n+files (with `walk` and, in gVisor, `walkgetattr`) and performing operations on\n+those fids.\n+\n+In the sections below, a **shared** filesystem is a filesystem that is *mutably*\n+accessible by multiple concurrent clients, such that a **non-shared** filesystem\n+is a filesystem that is either read-only or accessible by only a single client.\n+\n+## Problems\n+\n+### Serialization of Path Component RPCs\n+\n+Broadly speaking, VFS1 traverses each path component in a pathname, alternating\n+between verifying that each traversed dentry represents an inode that represents\n+a searchable directory and moving to the next dentry in the path.\n+\n+In the context of a remote filesystem, the structure of this traversal means\n+that - modulo caching - a path involving N components requires at least N-1\n+*sequential* RPCs to obtain metadata for intermediate directories, incurring\n+significant latency. (In vanilla 9P2000.L, 2(N-1) RPCs are required: N-1 `walk`\n+and N-1 `getattr`. We added the `walkgetattr` RPC to reduce this overhead.) On\n+non-shared filesystems, this overhead is primarily significant during\n+application startup; caching mitigates much of this overhead at steady state. On\n+shared filesystems, where correct caching requires revalidation (requiring RPCs\n+for each revalidated directory anyway), this overhead is consistently ruinous.\n+\n+### Inefficient RPCs\n+\n+9P is not exceptionally economical with RPCs in general. In addition to the\n+issue described above:\n+\n+- Opening an existing file in 9P involves at least 2 RPCs: `walk` to produce\n+ an unopened fid representing the file, and `lopen` to open the fid.\n+\n+- Creating a file also involves at least 2 RPCs: `walk` to produce an unopened\n+ fid representing the parent directory, and `lcreate` to create the file and\n+ convert the fid to an open fid representing the created file. In practice,\n+ both the Linux and gVisor 9P clients expect to have an unopened fid for the\n+ created file (necessitating an additional `walk`), as well as attributes for\n+ the created file (necessitating an additional `getattr`), for a total of 4\n+ RPCs. (In a shared filesystem, where whether a file already exists can\n+ change between RPCs, a correct implementation of `open(O_CREAT)` would have\n+ to alternate between these two paths (plus `clunk`ing the temporary fid\n+ between alternations, since the nature of the `fid` differs between the two\n+ paths). Neither Linux nor gVisor implement the required alternation, so\n+ `open(O_CREAT)` without `O_EXCL` can spuriously fail with `EEXIST` on both.)\n+\n+- Closing (`clunk`ing) a fid requires an RPC. VFS1 issues this RPC\n+ asynchronously in an attempt to reduce critical path latency, but scheduling\n+ overhead makes this not clearly advantageous in practice.\n+\n+- `read` and `readdir` can return partial reads without a way to indicate EOF,\n+ necessitating an additional final read to detect EOF.\n+\n+- Operations that affect filesystem state do not consistently return updated\n+ filesystem state. In gVisor, the client implementation attempts to handle\n+ this by tracking what it thinks updated state \"should\" be; this is complex,\n+ and especially brittle for timestamps (which are often not arbitrarily\n+ settable). In Linux, the client implemtation invalidates cached metadata\n+ whenever it performs such an operation, and reloads it when a dentry\n+ corresponding to an inode with no valid cached metadata is revalidated; this\n+ is simple, but necessitates an additional `getattr`.\n+\n+### Dentry/Inode Ambiguity\n+\n+As noted above, 9P's documentation tends to imply that unopened fids represent\n+an inode. In practice, most filesystem APIs present very limited interfaces for\n+working with inodes at best, such that the interpretation of unopened fids\n+varies:\n+\n+- Linux's 9P client associates unopened fids with (dentry, uid) pairs. When\n+ caching is enabled, it also associates each inode with the first fid opened\n+ writably that references that inode, in order to support page cache\n+ writeback.\n+\n+- gVisor's 9P client associates unopened fids with inodes, and also caches\n+ opened fids in inodes in a manner similar to Linux.\n+\n+- The runsc fsgofer associates unopened fids with both \"dentries\" (host\n+ filesystem paths) and \"inodes\" (host file descriptors); which is used\n+ depends on the operation invoked on the fid.\n+\n+For non-shared filesystems, this confusion has resulted in correctness issues\n+that are (in gVisor) currently handled by a number of coarse-grained locks that\n+serialize renames with all other filesystem operations. For shared filesystems,\n+this means inconsistent behavior in the presence of concurrent mutation.\n+\n+## Design\n+\n+Almost all Linux filesystem syscalls describe filesystem resources in one of two\n+ways:\n+\n+- Path-based: A filesystem position is described by a combination of a\n+ starting position and a sequence of path components relative to that\n+ position, where the starting position is one of:\n+\n+ - The VFS root (defined by mount namespace and chroot), for absolute paths\n+\n+ - The VFS position of an existing FD, for relative paths passed to `*at`\n+ syscalls (e.g. `statat`)\n+\n+ - The current working directory, for relative paths passed to non-`*at`\n+ syscalls and `*at` syscalls with `AT_FDCWD`\n+\n+- File-description-based: A filesystem object is described by an existing FD,\n+ passed to a `f*` syscall (e.g. `fstat`).\n+\n+Many of our issues with 9P arise from its (and VFS') interposition of a model\n+based on inodes between the filesystem syscall API and filesystem\n+implementations. We propose to replace 9P with a protocol that does not feature\n+inodes at all, and instead closely follows the filesystem syscall API by\n+featuring only path-based and FD-based operations, with minimal deviations as\n+necessary to ameliorate deficiencies in the syscall interface (see below). This\n+approach addresses the issues described above:\n+\n+- Even on shared filesystems, most application filesystem syscalls are\n+ translated to a single RPC (possibly excepting special cases described\n+ below), which is a logical lower bound.\n+\n+- The behavior of application syscalls on shared filesystems is\n+ straightforwardly predictable: path-based syscalls are translated to\n+ path-based RPCs, which will re-lookup the file at that path, and FD-based\n+ syscalls are translated to FD-based RPCs, which use an existing open file\n+ without performing another lookup. (This is at least true on gofers that\n+ proxy the host local filesystem; other filesystems that lack support for\n+ e.g. certain operations on FDs may have different behavior, but this\n+ divergence is at least still predictable and inherent to the underlying\n+ filesystem implementation.)\n+\n+Note that this approach is only feasible in gVisor's next-generation virtual\n+filesystem (VFS2), which does not assume the existence of inodes and allows the\n+remote filesystem client to translate whole path-based syscalls into RPCs. Thus\n+one of the unavoidable tradeoffs associated with such a protocol vs. 9P is the\n+inability to construct a Linux client that is performance-competitive with\n+gVisor.\n+\n+### File Permissions\n+\n+Many filesystem operations are side-effectual, such that file permissions must\n+be checked before such operations take effect. The simplest approach to file\n+permission checking is for the sentry to obtain permissions from the remote\n+filesystem, then apply permission checks in the sentry before performing the\n+application-requested operation. However, this requires an additional RPC per\n+application syscall (which can't be mitigated by caching on shared filesystems).\n+Alternatively, we may delegate file permission checking to gofers. In general,\n+file permission checks depend on the following properties of the accessor:\n+\n+- Filesystem UID/GID\n+\n+- Supplementary GIDs\n+\n+- Effective capabilities in the accessor's user namespace (i.e. the accessor's\n+ effective capability set)\n+\n+- All UIDs and GIDs mapped in the accessor's user namespace (which determine\n+ if the accessor's capabilities apply to accessed files)\n+\n+We may choose to delay implementation of file permission checking delegation,\n+although this is potentially costly since it doubles the number of required RPCs\n+for most operations on shared filesystems. We may also consider compromise\n+options, such as only delegating file permission checks for accessors in the\n+root user namespace.\n+\n+### Symbolic Links\n+\n+gVisor usually interprets symbolic link targets in its VFS rather than on the\n+filesystem containing the symbolic link; thus e.g. a symlink to\n+\"/proc/self/maps\" on a remote filesystem resolves to said file in the sentry's\n+procfs rather than the host's. This implies that:\n+\n+- Remote filesystem servers that proxy filesystems supporting symlinks must\n+ check if each path component is a symlink during path traversal.\n+\n+- Absolute symlinks require that the sentry restart the operation at its\n+ contextual VFS root (which is task-specific and may not be on a remote\n+ filesystem at all), so if a remote filesystem server encounters an absolute\n+ symlink during path traversal on behalf of a path-based operation, it must\n+ terminate path traversal and return the symlink target.\n+\n+- Relative symlinks begin target resolution in the parent directory of the\n+ symlink, so in theory most relative symlinks can be handled automatically\n+ during the path traversal that encounters the symlink, provided that said\n+ traversal is supplied with the number of remaining symlinks before `ELOOP`.\n+ However, the new path traversed by the symlink target may cross VFS mount\n+ boundaries, such that it's only safe for remote filesystem servers to\n+ speculatively follow relative symlinks for side-effect-free operations such\n+ as `stat` (where the sentry can simply ignore results that are inapplicable\n+ due to crossing mount boundaries). We may choose to delay implementation of\n+ this feature, at the cost of an additional RPC per relative symlink (note\n+ that even if the symlink target crosses a mount boundary, the sentry will\n+ need to `stat` the path to the mount boundary to confirm that each traversed\n+ component is an accessible directory); until it is implemented, relative\n+ symlinks may be handled like absolute symlinks, by terminating path\n+ traversal and returning the symlink target.\n+\n+The possibility of symlinks (and the possibility of a compromised sentry) means\n+that the sentry may issue RPCs with paths that, in the absence of symlinks,\n+would traverse beyond the root of the remote filesystem. For example, the sentry\n+may issue an RPC with a path like \"/foo/../..\", on the premise that if \"/foo\" is\n+a symlink then the resulting path may be elsewhere on the remote filesystem. To\n+handle this, path traversal must also track its current depth below the remote\n+filesystem root, and terminate path traversal if it would ascend beyond this\n+point.\n+\n+### Path Traversal\n+\n+Since path-based VFS operations will translate to path-based RPCs, filesystem\n+servers will need to handle path traversal. From the perspective of a given\n+filesystem implementation in the server, there are two basic approaches to path\n+traversal:\n+\n+- Inode-walk: For each path component, obtain a handle to the underlying\n+ filesystem object (e.g. with `open(O_PATH)`), check if that object is a\n+ symlink (as described above) and that that object is accessible by the\n+ caller (e.g. with `fstat()`), then continue to the next path component (e.g.\n+ with `openat()`). This ensures that the checked filesystem object is the one\n+ used to obtain the next object in the traversal, which is intuitively\n+ appealing. However, while this approach works for host local filesystems, it\n+ requires features that are not widely supported by other filesystems.\n+\n+- Path-walk: For each path component, use a path-based operation to determine\n+ if the filesystem object currently referred to by that path component is a\n+ symlink / is accessible. This is highly portable, but suffers from quadratic\n+ behavior (at the level of the underlying filesystem implementation, the\n+ first path component will be traversed a number of times equal to the number\n+ of path components in the path).\n+\n+The implementation should support either option by delegating path traversal to\n+filesystem implementations within the server (like VFS and the remote filesystem\n+protocol itself), as inode-walking is still safe, efficient, amenable to FD\n+caching, and implementable on non-shared host local filesystems (a sufficiently\n+common case as to be worth considering in the design).\n+\n+Both approaches are susceptible to race conditions that may permit sandboxed\n+filesystem escapes:\n+\n+- Under inode-walk, a malicious application may cause a directory to be moved\n+ (with `rename`) during path traversal, such that the filesystem\n+ implementation incorrectly determines whether subsequent inodes are located\n+ in paths that should be visible to sandboxed applications.\n+\n+- Under path-walk, a malicious application may cause a non-symlink file to be\n+ replaced with a symlink during path traversal, such that following path\n+ operations will incorrectly follow the symlink.\n+\n+Both race conditions can, to some extent, be mitigated in filesystem server\n+implementations by synchronizing path traversal with the hazardous operations in\n+question. However, shared filesystems are frequently used to share data between\n+sandboxed and unsandboxed applications in a controlled way, and in some cases a\n+malicious sandboxed application may be able to take advantage of a hazardous\n+filesystem operation performed by an unsandboxed application. In some cases,\n+filesystem features may be available to ensure safety even in such cases (e.g.\n+[the new openat2() syscall](https://man7.org/linux/man-pages/man2/openat2.2.html)),\n+but it is not clear how to solve this problem in general. (Note that this issue\n+is not specific to our design; rather, it is a fundamental limitation of\n+filesystem sandboxing.)\n+\n+### Filesystem Multiplexing\n+\n+A given sentry may need to access multiple distinct remote filesystems (e.g.\n+different volumes for a given container). In many cases, there is no advantage\n+to serving these filesystems from distinct filesystem servers, or accessing them\n+through distinct connections (factors such as maximum RPC concurrency should be\n+based on available host resources). Therefore, the protocol should support\n+multiplexing of distinct filesystem trees within a single session. 9P supports\n+this by allowing multiple calls to the `attach` RPC to produce fids representing\n+distinct filesystem trees, but this is somewhat clunky; we propose a much\n+simpler mechanism wherein each message that conveys a path also conveys a\n+numeric filesystem ID that identifies a filesystem tree.\n+\n+## Alternatives Considered\n+\n+### Additional Extensions to 9P\n+\n+There are at least three conceptual aspects to 9P:\n+\n+- Wire format: messages with a 4-byte little-endian size prefix, strings with\n+ a 2-byte little-endian size prefix, etc. Whether the wire format is worth\n+ retaining is unclear; in particular, it's unclear that the 9P wire format\n+ has a significant advantage over protobufs, which are substantially easier\n+ to extend. Note that the official Go protobuf implementation is widely known\n+ to suffer from a significant number of performance deficiencies, so if we\n+ choose to switch to protobuf, we may need to use an alternative toolchain\n+ such as `gogo/protobuf` (which is also widely used in the Go ecosystem, e.g.\n+ by Kubernetes).\n+\n+- Filesystem model: fids, qids, etc. Discarding this is one of the motivations\n+ for this proposal.\n+\n+- RPCs: Twalk, Tlopen, etc. In addition to previously-described\n+ inefficiencies, most of these are dependent on the filesystem model and\n+ therefore must be discarded.\n+\n+### FUSE\n+\n+The FUSE (Filesystem in Userspace) protocol is frequently used to provide\n+arbitrary userspace filesystem implementations to a host Linux kernel.\n+Unfortunately, FUSE is also inode-based, and therefore doesn't address any of\n+the problems we have with 9P.\n+\n+### virtio-fs\n+\n+virtio-fs is an ongoing project aimed at improving Linux VM filesystem\n+performance when accessing Linux host filesystems (vs. virtio-9p). In brief, it\n+is based on:\n+\n+- Using a FUSE client in the guest that communicates over virtio with a FUSE\n+ server in the host.\n+\n+- Using DAX to map the host page cache into the guest.\n+\n+- Using a file metadata table in shared memory to avoid VM exits for metadata\n+ updates.\n+\n+None of these improvements seem applicable to gVisor:\n+\n+- As explained above, FUSE is still inode-based, so it is still susceptible to\n+ most of the problems we have with 9P.\n+\n+- Our use of host file descriptors already allows us to leverage the host page\n+ cache for file contents.\n+\n+- Our need for shared filesystem coherence is usually based on a user\n+ requirement that an out-of-sandbox filesystem mutation is guaranteed to be\n+ visible by all subsequent observations from within the sandbox, or vice\n+ versa; it's not clear that this can be guaranteed without a synchronous\n+ signaling mechanism like an RPC.\n" } ]
Go
Apache License 2.0
google/gvisor
RFC: design for a 9P replacement Tentatively `lisafs` (LInux SAndbox FileSystem). PiperOrigin-RevId: 331839246
259,896
15.09.2020 13:39:47
25,200
7f89a26e18edfe4335eaab965fb7a03eaebb2682
Release FDTable lock before dropping the fds. This is needed for SO_LINGER, where close() is blocked for linger timeout and we are holding the FDTable lock for the entire timeout which will not allow us to create/delete other fds. We have to release the locks and then drop the fds.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_table.go", "new_path": "pkg/sentry/kernel/fd_table.go", "diff": "@@ -113,7 +113,9 @@ func (f *FDTable) loadDescriptorTable(m map[int32]descriptor) {\nf.init() // Initialize table.\nf.used = 0\nfor fd, d := range m {\n- f.setAll(ctx, fd, d.file, d.fileVFS2, d.flags)\n+ if file, fileVFS2 := f.setAll(ctx, fd, d.file, d.fileVFS2, d.flags); file != nil || fileVFS2 != nil {\n+ panic(\"VFS1 or VFS2 files set\")\n+ }\n// Note that we do _not_ need to acquire a extra table reference here. The\n// table reference will already be accounted for in the file, so we drop the\n@@ -273,7 +275,6 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\n}\nf.mu.Lock()\n- defer f.mu.Unlock()\n// From f.next to find available fd.\nif fd < f.next {\n@@ -283,7 +284,8 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\n// Install all entries.\nfor i := fd; i < end && len(fds) < len(files); i++ {\nif d, _, _ := f.get(i); d == nil {\n- f.set(ctx, i, files[len(fds)], flags) // Set the descriptor.\n+ // Set the descriptor.\n+ f.set(ctx, i, files[len(fds)], flags)\nfds = append(fds, i) // Record the file descriptor.\n}\n}\n@@ -291,7 +293,16 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\n// Failure? Unwind existing FDs.\nif len(fds) < len(files) {\nfor _, i := range fds {\n- f.set(ctx, i, nil, FDFlags{}) // Zap entry.\n+ f.set(ctx, i, nil, FDFlags{})\n+ }\n+ f.mu.Unlock()\n+\n+ // Drop the reference taken by the call to f.set() that\n+ // originally installed the file. Don't call f.drop()\n+ // (generating inotify events, etc.) since the file should\n+ // appear to have never been inserted into f.\n+ for _, file := range files[:len(fds)] {\n+ file.DecRef(ctx)\n}\nreturn nil, syscall.EMFILE\n}\n@@ -301,6 +312,7 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\nf.next = fds[len(fds)-1] + 1\n}\n+ f.mu.Unlock()\nreturn fds, nil\n}\n@@ -328,7 +340,6 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes\n}\nf.mu.Lock()\n- defer f.mu.Unlock()\n// From f.next to find available fd.\nif fd < f.next {\n@@ -338,7 +349,8 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes\n// Install all entries.\nfor i := fd; i < end && len(fds) < len(files); i++ {\nif d, _, _ := f.getVFS2(i); d == nil {\n- f.setVFS2(ctx, i, files[len(fds)], flags) // Set the descriptor.\n+ // Set the descriptor.\n+ f.setVFS2(ctx, i, files[len(fds)], flags)\nfds = append(fds, i) // Record the file descriptor.\n}\n}\n@@ -346,7 +358,16 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes\n// Failure? Unwind existing FDs.\nif len(fds) < len(files) {\nfor _, i := range fds {\n- f.setVFS2(ctx, i, nil, FDFlags{}) // Zap entry.\n+ f.setVFS2(ctx, i, nil, FDFlags{})\n+ }\n+ f.mu.Unlock()\n+\n+ // Drop the reference taken by the call to f.setVFS2() that\n+ // originally installed the file. Don't call f.dropVFS2()\n+ // (generating inotify events, etc.) since the file should\n+ // appear to have never been inserted into f.\n+ for _, file := range files[:len(fds)] {\n+ file.DecRef(ctx)\n}\nreturn nil, syscall.EMFILE\n}\n@@ -356,6 +377,7 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes\nf.next = fds[len(fds)-1] + 1\n}\n+ f.mu.Unlock()\nreturn fds, nil\n}\n@@ -407,34 +429,49 @@ func (f *FDTable) NewFDVFS2(ctx context.Context, minfd int32, file *vfs.FileDesc\n// reference for that FD, the ref count for that existing reference is\n// decremented.\nfunc (f *FDTable) NewFDAt(ctx context.Context, fd int32, file *fs.File, flags FDFlags) error {\n- return f.newFDAt(ctx, fd, file, nil, flags)\n+ df, _, err := f.newFDAt(ctx, fd, file, nil, flags)\n+ if err != nil {\n+ return err\n+ }\n+ if df != nil {\n+ f.drop(ctx, df)\n+ }\n+ return nil\n}\n// NewFDAtVFS2 sets the file reference for the given FD. If there is an active\n// reference for that FD, the ref count for that existing reference is\n// decremented.\nfunc (f *FDTable) NewFDAtVFS2(ctx context.Context, fd int32, file *vfs.FileDescription, flags FDFlags) error {\n- return f.newFDAt(ctx, fd, nil, file, flags)\n+ _, dfVFS2, err := f.newFDAt(ctx, fd, nil, file, flags)\n+ if err != nil {\n+ return err\n+ }\n+ if dfVFS2 != nil {\n+ f.dropVFS2(ctx, dfVFS2)\n+ }\n+ return nil\n}\n-func (f *FDTable) newFDAt(ctx context.Context, fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) error {\n+func (f *FDTable) newFDAt(ctx context.Context, fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) (*fs.File, *vfs.FileDescription, error) {\nif fd < 0 {\n// Don't accept negative FDs.\n- return syscall.EBADF\n+ return nil, nil, syscall.EBADF\n}\n// Check the limit for the provided file.\nif limitSet := limits.FromContext(ctx); limitSet != nil {\nif lim := limitSet.Get(limits.NumberOfFiles); lim.Cur != limits.Infinity && uint64(fd) >= lim.Cur {\n- return syscall.EMFILE\n+ return nil, nil, syscall.EMFILE\n}\n}\n// Install the entry.\nf.mu.Lock()\ndefer f.mu.Unlock()\n- f.setAll(ctx, fd, file, fileVFS2, flags)\n- return nil\n+\n+ df, dfVFS2 := f.setAll(ctx, fd, file, fileVFS2, flags)\n+ return df, dfVFS2, nil\n}\n// SetFlags sets the flags for the given file descriptor.\n@@ -552,11 +589,8 @@ func (f *FDTable) Fork(ctx context.Context) *FDTable {\nf.forEach(ctx, func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {\n// The set function here will acquire an appropriate table\n// reference for the clone. We don't need anything else.\n- switch {\n- case file != nil:\n- clone.set(ctx, fd, file, flags)\n- case fileVFS2 != nil:\n- clone.setVFS2(ctx, fd, fileVFS2, flags)\n+ if df, dfVFS2 := clone.setAll(ctx, fd, file, fileVFS2, flags); df != nil || dfVFS2 != nil {\n+ panic(\"VFS1 or VFS2 files set\")\n}\n})\nreturn clone\n@@ -571,7 +605,6 @@ func (f *FDTable) Remove(ctx context.Context, fd int32) (*fs.File, *vfs.FileDesc\n}\nf.mu.Lock()\n- defer f.mu.Unlock()\n// Update current available position.\nif fd < f.next {\n@@ -587,24 +620,51 @@ func (f *FDTable) Remove(ctx context.Context, fd int32) (*fs.File, *vfs.FileDesc\ncase orig2 != nil:\norig2.IncRef()\n}\n+\nif orig != nil || orig2 != nil {\n- f.setAll(ctx, fd, nil, nil, FDFlags{}) // Zap entry.\n+ orig, orig2 = f.setAll(ctx, fd, nil, nil, FDFlags{}) // Zap entry.\n+ }\n+ f.mu.Unlock()\n+\n+ if orig != nil {\n+ f.drop(ctx, orig)\n}\n+ if orig2 != nil {\n+ f.dropVFS2(ctx, orig2)\n+ }\n+\nreturn orig, orig2\n}\n// RemoveIf removes all FDs where cond is true.\nfunc (f *FDTable) RemoveIf(ctx context.Context, cond func(*fs.File, *vfs.FileDescription, FDFlags) bool) {\n- f.mu.Lock()\n- defer f.mu.Unlock()\n+ // TODO(gvisor.dev/issue/1624): Remove fs.File slice.\n+ var files []*fs.File\n+ var filesVFS2 []*vfs.FileDescription\n+ f.mu.Lock()\nf.forEach(ctx, func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {\nif cond(file, fileVFS2, flags) {\n- f.set(ctx, fd, nil, FDFlags{}) // Clear from table.\n+ df, dfVFS2 := f.setAll(ctx, fd, nil, nil, FDFlags{}) // Clear from table.\n+ if df != nil {\n+ files = append(files, df)\n+ }\n+ if dfVFS2 != nil {\n+ filesVFS2 = append(filesVFS2, dfVFS2)\n+ }\n// Update current available position.\nif fd < f.next {\nf.next = fd\n}\n}\n})\n+ f.mu.Unlock()\n+\n+ for _, file := range files {\n+ f.drop(ctx, file)\n+ }\n+\n+ for _, file := range filesVFS2 {\n+ f.dropVFS2(ctx, file)\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_table_unsafe.go", "new_path": "pkg/sentry/kernel/fd_table_unsafe.go", "diff": "@@ -86,33 +86,30 @@ func (f *FDTable) CurrentMaxFDs() int {\nreturn len(slice)\n}\n-// set sets an entry.\n-//\n-// This handles accounting changes, as well as acquiring and releasing the\n-// reference needed by the table iff the file is different.\n+// set sets an entry for VFS1, refer to setAll().\n//\n// Precondition: mu must be held.\n-func (f *FDTable) set(ctx context.Context, fd int32, file *fs.File, flags FDFlags) {\n- f.setAll(ctx, fd, file, nil, flags)\n+func (f *FDTable) set(ctx context.Context, fd int32, file *fs.File, flags FDFlags) *fs.File {\n+ dropFile, _ := f.setAll(ctx, fd, file, nil, flags)\n+ return dropFile\n}\n-// setVFS2 sets an entry.\n-//\n-// This handles accounting changes, as well as acquiring and releasing the\n-// reference needed by the table iff the file is different.\n+// setVFS2 sets an entry for VFS2, refer to setAll().\n//\n// Precondition: mu must be held.\n-func (f *FDTable) setVFS2(ctx context.Context, fd int32, file *vfs.FileDescription, flags FDFlags) {\n- f.setAll(ctx, fd, nil, file, flags)\n+func (f *FDTable) setVFS2(ctx context.Context, fd int32, file *vfs.FileDescription, flags FDFlags) *vfs.FileDescription {\n+ _, dropFile := f.setAll(ctx, fd, nil, file, flags)\n+ return dropFile\n}\n-// setAll sets an entry.\n-//\n-// This handles accounting changes, as well as acquiring and releasing the\n-// reference needed by the table iff the file is different.\n+// setAll sets the file description referred to by fd to file/fileVFS2. If\n+// file/fileVFS2 are non-nil, it takes a reference on them. If setAll replaces\n+// an existing file description, it returns it with the FDTable's reference\n+// transferred to the caller, which must call f.drop/dropVFS2() on the returned\n+// file after unlocking f.mu.\n//\n// Precondition: mu must be held.\n-func (f *FDTable) setAll(ctx context.Context, fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {\n+func (f *FDTable) setAll(ctx context.Context, fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) (*fs.File, *vfs.FileDescription) {\nif file != nil && fileVFS2 != nil {\npanic(\"VFS1 and VFS2 files set\")\n}\n@@ -155,25 +152,25 @@ func (f *FDTable) setAll(ctx context.Context, fd int32, file *fs.File, fileVFS2\n}\n}\n- // Drop the table reference.\n+ // Adjust used.\n+ switch {\n+ case orig == nil && desc != nil:\n+ atomic.AddInt32(&f.used, 1)\n+ case orig != nil && desc == nil:\n+ atomic.AddInt32(&f.used, -1)\n+ }\n+\nif orig != nil {\nswitch {\ncase orig.file != nil:\nif desc == nil || desc.file != orig.file {\n- f.drop(ctx, orig.file)\n+ return orig.file, nil\n}\ncase orig.fileVFS2 != nil:\nif desc == nil || desc.fileVFS2 != orig.fileVFS2 {\n- f.dropVFS2(ctx, orig.fileVFS2)\n- }\n+ return nil, orig.fileVFS2\n}\n}\n-\n- // Adjust used.\n- switch {\n- case orig == nil && desc != nil:\n- atomic.AddInt32(&f.used, 1)\n- case orig != nil && desc == nil:\n- atomic.AddInt32(&f.used, -1)\n}\n+ return nil, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Release FDTable lock before dropping the fds. This is needed for SO_LINGER, where close() is blocked for linger timeout and we are holding the FDTable lock for the entire timeout which will not allow us to create/delete other fds. We have to release the locks and then drop the fds. PiperOrigin-RevId: 331844185
259,951
15.09.2020 14:47:34
25,200
72a30b11486b48394fa0edca500b80e4ca83b10c
Move reusable IPv4 test code into a testutil module and refactor it The refactor aims to simplify the package, by replacing the Go channel with a PacketBuffer slice. This code will be reused by tests for IPv6 fragmentation.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/BUILD", "new_path": "pkg/tcpip/network/ipv4/BUILD", "diff": "@@ -30,6 +30,7 @@ go_test(\n\"//pkg/tcpip/link/channel\",\n\"//pkg/tcpip/link/sniffer\",\n\"//pkg/tcpip/network/ipv4\",\n+ \"//pkg/tcpip/network/testutil\",\n\"//pkg/tcpip/stack\",\n\"//pkg/tcpip/transport/tcp\",\n\"//pkg/tcpip/transport/udp\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4_test.go", "new_path": "pkg/tcpip/network/ipv4/ipv4_test.go", "diff": "@@ -17,8 +17,6 @@ package ipv4_test\nimport (\n\"bytes\"\n\"encoding/hex\"\n- \"fmt\"\n- \"math/rand\"\n\"testing\"\n\"github.com/google/go-cmp/cmp\"\n@@ -28,6 +26,7 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/link/channel\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/sniffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv4\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/network/testutil\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcp\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/udp\"\n@@ -92,31 +91,6 @@ func TestExcludeBroadcast(t *testing.T) {\n})\n}\n-// makeRandPkt generates a randomize packet. hdrLength indicates how much\n-// data should already be in the header before WritePacket. extraLength\n-// indicates how much extra space should be in the header. The payload is made\n-// from many Views of the sizes listed in viewSizes.\n-func makeRandPkt(hdrLength int, extraLength int, viewSizes []int) *stack.PacketBuffer {\n- var views []buffer.View\n- totalLength := 0\n- for _, s := range viewSizes {\n- newView := buffer.NewView(s)\n- rand.Read(newView)\n- views = append(views, newView)\n- totalLength += s\n- }\n-\n- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n- ReserveHeaderBytes: hdrLength + extraLength,\n- Data: buffer.NewVectorisedView(totalLength, views),\n- })\n- pkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n- if _, err := rand.Read(pkt.TransportHeader().Push(hdrLength)); err != nil {\n- panic(fmt.Sprintf(\"rand.Read: %s\", err))\n- }\n- return pkt\n-}\n-\n// comparePayloads compared the contents of all the packets against the contents\n// of the source packet.\nfunc compareFragments(t *testing.T, packets []*stack.PacketBuffer, sourcePacketInfo *stack.PacketBuffer, mtu uint32) {\n@@ -186,63 +160,19 @@ func compareFragments(t *testing.T, packets []*stack.PacketBuffer, sourcePacketI\n}\n}\n-type errorChannel struct {\n- *channel.Endpoint\n- Ch chan *stack.PacketBuffer\n- packetCollectorErrors []*tcpip.Error\n-}\n-\n-// newErrorChannel creates a new errorChannel endpoint. Each call to WritePacket\n-// will return successive errors from packetCollectorErrors until the list is\n-// empty and then return nil each time.\n-func newErrorChannel(size int, mtu uint32, linkAddr tcpip.LinkAddress, packetCollectorErrors []*tcpip.Error) *errorChannel {\n- return &errorChannel{\n- Endpoint: channel.New(size, mtu, linkAddr),\n- Ch: make(chan *stack.PacketBuffer, size),\n- packetCollectorErrors: packetCollectorErrors,\n- }\n-}\n-\n-// Drain removes all outbound packets from the channel and counts them.\n-func (e *errorChannel) Drain() int {\n- c := 0\n- for {\n- select {\n- case <-e.Ch:\n- c++\n- default:\n- return c\n- }\n- }\n-}\n-\n-// WritePacket stores outbound packets into the channel.\n-func (e *errorChannel) WritePacket(r *stack.Route, gso *stack.GSO, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) *tcpip.Error {\n- select {\n- case e.Ch <- pkt:\n- default:\n- }\n-\n- nextError := (*tcpip.Error)(nil)\n- if len(e.packetCollectorErrors) > 0 {\n- nextError = e.packetCollectorErrors[0]\n- e.packetCollectorErrors = e.packetCollectorErrors[1:]\n- }\n- return nextError\n-}\n-\n-type context struct {\n+type testRoute struct {\nstack.Route\n- linkEP *errorChannel\n+\n+ linkEP *testutil.TestEndpoint\n}\n-func buildContext(t *testing.T, packetCollectorErrors []*tcpip.Error, mtu uint32) context {\n+func buildTestRoute(t *testing.T, ep *channel.Endpoint, packetCollectorErrors []*tcpip.Error) testRoute {\n// Make the packet and write it.\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocol{ipv4.NewProtocol()},\n})\n- ep := newErrorChannel(100 /* Enough for all tests. */, mtu, \"\", packetCollectorErrors)\n- s.CreateNIC(1, ep)\n+ testEP := testutil.NewTestEndpoint(ep, packetCollectorErrors)\n+ s.CreateNIC(1, testEP)\nconst (\nsrc = \"\\x10\\x00\\x00\\x01\"\ndst = \"\\x10\\x00\\x00\\x02\"\n@@ -262,9 +192,12 @@ func buildContext(t *testing.T, packetCollectorErrors []*tcpip.Error, mtu uint32\nif err != nil {\nt.Fatalf(\"s.FindRoute got %v, want %v\", err, nil)\n}\n- return context{\n+ t.Cleanup(func() {\n+ testEP.Close()\n+ })\n+ return testRoute{\nRoute: r,\n- linkEP: ep,\n+ linkEP: testEP,\n}\n}\n@@ -277,8 +210,8 @@ func TestFragmentation(t *testing.T) {\ndescription string\nmtu uint32\ngso *stack.GSO\n- hdrLength int\n- extraLength int\n+ transportHeaderLength int\n+ extraHeaderReserveLength int\npayloadViewsSizes []int\nexpectedFrags int\n}{\n@@ -295,10 +228,10 @@ func TestFragmentation(t *testing.T) {\nfor _, ft := range fragTests {\nt.Run(ft.description, func(t *testing.T) {\n- pkt := makeRandPkt(ft.hdrLength, ft.extraLength, ft.payloadViewsSizes)\n+ r := buildTestRoute(t, channel.New(0, ft.mtu, \"\"), nil)\n+ pkt := testutil.MakeRandPkt(ft.transportHeaderLength, ft.extraHeaderReserveLength, ft.payloadViewsSizes, header.IPv4ProtocolNumber)\nsource := pkt.Clone()\n- c := buildContext(t, nil, ft.mtu)\n- err := c.Route.WritePacket(ft.gso, stack.NetworkHeaderParams{\n+ err := r.WritePacket(ft.gso, stack.NetworkHeaderParams{\nProtocol: tcp.ProtocolNumber,\nTTL: 42,\nTOS: stack.DefaultTOS,\n@@ -307,24 +240,13 @@ func TestFragmentation(t *testing.T) {\nt.Errorf(\"err got %v, want %v\", err, nil)\n}\n- var results []*stack.PacketBuffer\n- L:\n- for {\n- select {\n- case pi := <-c.linkEP.Ch:\n- results = append(results, pi)\n- default:\n- break L\n- }\n- }\n-\n- if got, want := len(results), ft.expectedFrags; got != want {\n- t.Errorf(\"len(result) got %d, want %d\", got, want)\n+ if got, want := len(r.linkEP.WrittenPackets), ft.expectedFrags; got != want {\n+ t.Errorf(\"len(r.linkEP.WrittenPackets) got %d, want %d\", got, want)\n}\n- if got, want := len(results), int(c.Route.Stats().IP.PacketsSent.Value()); got != want {\n- t.Errorf(\"no errors yet len(result) got %d, want %d\", got, want)\n+ if got, want := len(r.linkEP.WrittenPackets), int(r.Stats().IP.PacketsSent.Value()); got != want {\n+ t.Errorf(\"no errors yet len(r.linkEP.WrittenPackets) got %d, want %d\", got, want)\n}\n- compareFragments(t, results, source, ft.mtu)\n+ compareFragments(t, r.linkEP.WrittenPackets, source, ft.mtu)\n})\n}\n}\n@@ -335,21 +257,21 @@ func TestFragmentationErrors(t *testing.T) {\nfragTests := []struct {\ndescription string\nmtu uint32\n- hdrLength int\n+ transportHeaderLength int\npayloadViewsSizes []int\npacketCollectorErrors []*tcpip.Error\n}{\n{\"NoFrag\", 2000, 0, []int{1000}, []*tcpip.Error{tcpip.ErrAborted}},\n{\"ErrorOnFirstFrag\", 500, 0, []int{1000}, []*tcpip.Error{tcpip.ErrAborted}},\n{\"ErrorOnSecondFrag\", 500, 0, []int{1000}, []*tcpip.Error{nil, tcpip.ErrAborted}},\n- {\"ErrorOnFirstFragMTUSmallerThanHdr\", 500, 1000, []int{500}, []*tcpip.Error{tcpip.ErrAborted}},\n+ {\"ErrorOnFirstFragMTUSmallerThanHeader\", 500, 1000, []int{500}, []*tcpip.Error{tcpip.ErrAborted}},\n}\nfor _, ft := range fragTests {\nt.Run(ft.description, func(t *testing.T) {\n- pkt := makeRandPkt(ft.hdrLength, header.IPv4MinimumSize, ft.payloadViewsSizes)\n- c := buildContext(t, ft.packetCollectorErrors, ft.mtu)\n- err := c.Route.WritePacket(&stack.GSO{}, stack.NetworkHeaderParams{\n+ r := buildTestRoute(t, channel.New(0, ft.mtu, \"\"), ft.packetCollectorErrors)\n+ pkt := testutil.MakeRandPkt(ft.transportHeaderLength, header.IPv4MinimumSize, ft.payloadViewsSizes, header.IPv4ProtocolNumber)\n+ err := r.WritePacket(&stack.GSO{}, stack.NetworkHeaderParams{\nProtocol: tcp.ProtocolNumber,\nTTL: 42,\nTOS: stack.DefaultTOS,\n@@ -364,7 +286,7 @@ func TestFragmentationErrors(t *testing.T) {\nif got, want := err, ft.packetCollectorErrors[len(ft.packetCollectorErrors)-1]; got != want {\nt.Errorf(\"err got %v, want %v\", got, want)\n}\n- if got, want := c.linkEP.Drain(), int(c.Route.Stats().IP.PacketsSent.Value())+1; err != nil && got != want {\n+ if got, want := len(r.linkEP.WrittenPackets), int(r.Stats().IP.PacketsSent.Value())+1; err != nil && got != want {\nt.Errorf(\"after linkEP error len(result) got %d, want %d\", got, want)\n}\n})\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/network/testutil/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_library\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"testutil\",\n+ srcs = [\n+ \"testutil.go\",\n+ ],\n+ visibility = [\"//pkg/tcpip/network/ipv4:__pkg__\"],\n+ deps = [\n+ \"//pkg/tcpip\",\n+ \"//pkg/tcpip/buffer\",\n+ \"//pkg/tcpip/link/channel\",\n+ \"//pkg/tcpip/stack\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/network/testutil/testutil.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package testutil defines types and functions used to test Network Layer\n+// functionality such as IP fragmentation.\n+package testutil\n+\n+import (\n+ \"fmt\"\n+ \"math/rand\"\n+\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/channel\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+)\n+\n+// TestEndpoint is an endpoint used for testing, it stores packets written to it\n+// and can mock errors.\n+type TestEndpoint struct {\n+ *channel.Endpoint\n+\n+ // WrittenPackets is where we store packets written via WritePacket().\n+ WrittenPackets []*stack.PacketBuffer\n+\n+ packetCollectorErrors []*tcpip.Error\n+}\n+\n+// NewTestEndpoint creates a new TestEndpoint endpoint.\n+//\n+// packetCollectorErrors can be used to set error values and each call to\n+// WritePacket will remove the first one from the slice and return it until\n+// the slice is empty - at that point it will return nil every time.\n+func NewTestEndpoint(ep *channel.Endpoint, packetCollectorErrors []*tcpip.Error) *TestEndpoint {\n+ return &TestEndpoint{\n+ Endpoint: ep,\n+ WrittenPackets: make([]*stack.PacketBuffer, 0),\n+ packetCollectorErrors: packetCollectorErrors,\n+ }\n+}\n+\n+// WritePacket stores outbound packets and may return an error if one was\n+// injected.\n+func (e *TestEndpoint) WritePacket(_ *stack.Route, _ *stack.GSO, _ tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) *tcpip.Error {\n+ e.WrittenPackets = append(e.WrittenPackets, pkt)\n+\n+ if len(e.packetCollectorErrors) > 0 {\n+ nextError := e.packetCollectorErrors[0]\n+ e.packetCollectorErrors = e.packetCollectorErrors[1:]\n+ return nextError\n+ }\n+\n+ return nil\n+}\n+\n+// MakeRandPkt generates a randomized packet. transportHeaderLength indicates\n+// how many random bytes will be copied in the Transport Header.\n+// extraHeaderReserveLength indicates how much extra space will be reserved for\n+// the other headers. The payload is made from Views of the sizes listed in\n+// viewSizes.\n+func MakeRandPkt(transportHeaderLength int, extraHeaderReserveLength int, viewSizes []int, proto tcpip.NetworkProtocolNumber) *stack.PacketBuffer {\n+ var views buffer.VectorisedView\n+\n+ for _, s := range viewSizes {\n+ newView := buffer.NewView(s)\n+ if _, err := rand.Read(newView); err != nil {\n+ panic(fmt.Sprintf(\"rand.Read: %s\", err))\n+ }\n+ views.AppendView(newView)\n+ }\n+\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ ReserveHeaderBytes: transportHeaderLength + extraHeaderReserveLength,\n+ Data: views,\n+ })\n+ pkt.NetworkProtocolNumber = proto\n+ if _, err := rand.Read(pkt.TransportHeader().Push(transportHeaderLength)); err != nil {\n+ panic(fmt.Sprintf(\"rand.Read: %s\", err))\n+ }\n+ return pkt\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Move reusable IPv4 test code into a testutil module and refactor it The refactor aims to simplify the package, by replacing the Go channel with a PacketBuffer slice. This code will be reused by tests for IPv6 fragmentation. PiperOrigin-RevId: 331860411
259,860
15.09.2020 14:50:45
25,200
a004f0d082458ce98114b59a3e5d41c2e74541d6
Support setting STATX_SIZE for kernfs.InodeAttrs. Make setting STATX_SIZE a no-op, if it is valid for the given permissions and file type. Also update proc tests, which were overfitted before. Fixes Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go", "new_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go", "diff": "@@ -259,9 +259,19 @@ func (a *InodeAttrs) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *aut\nif opts.Stat.Mask == 0 {\nreturn nil\n}\n- if opts.Stat.Mask&^(linux.STATX_MODE|linux.STATX_UID|linux.STATX_GID) != 0 {\n+\n+ // Note that not all fields are modifiable. For example, the file type and\n+ // inode numbers are immutable after node creation. Setting the size is often\n+ // allowed by kernfs files but does not do anything. If some other behavior is\n+ // needed, the embedder should consider extending SetStat.\n+ //\n+ // TODO(gvisor.dev/issue/1193): Implement other stat fields like timestamps.\n+ if opts.Stat.Mask&^(linux.STATX_MODE|linux.STATX_UID|linux.STATX_GID|linux.STATX_SIZE) != 0 {\nreturn syserror.EPERM\n}\n+ if opts.Stat.Mask&linux.STATX_SIZE != 0 && a.Mode().IsDir() {\n+ return syserror.EISDIR\n+ }\nif err := vfs.CheckSetStat(ctx, creds, &opts, a.Mode(), auth.KUID(atomic.LoadUint32(&a.uid)), auth.KGID(atomic.LoadUint32(&a.gid))); err != nil {\nreturn err\n}\n@@ -284,13 +294,6 @@ func (a *InodeAttrs) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *aut\natomic.StoreUint32(&a.gid, stat.GID)\n}\n- // Note that not all fields are modifiable. For example, the file type and\n- // inode numbers are immutable after node creation.\n-\n- // TODO(gvisor.dev/issue/1193): Implement other stat fields like timestamps.\n- // Also, STATX_SIZE will need some special handling, because read-only static\n- // files should return EIO for truncate operations.\n-\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "@@ -797,17 +797,12 @@ TEST(ProcCpuinfo, DeniesWriteNonRoot) {\nconstexpr int kNobody = 65534;\nEXPECT_THAT(syscall(SYS_setuid, kNobody), SyscallSucceeds());\nEXPECT_THAT(open(\"/proc/cpuinfo\", O_WRONLY), SyscallFailsWithErrno(EACCES));\n- // TODO(gvisor.dev/issue/1193): Properly support setting size attributes in\n- // kernfs.\n- if (!IsRunningOnGvisor() || IsRunningWithVFS1()) {\n- EXPECT_THAT(truncate(\"/proc/cpuinfo\", 123),\n- SyscallFailsWithErrno(EACCES));\n- }\n+ EXPECT_THAT(truncate(\"/proc/cpuinfo\", 123), SyscallFailsWithErrno(EACCES));\n});\n}\n// With root privileges, it is possible to open /proc/cpuinfo with write mode,\n-// but all write operations will return EIO.\n+// but all write operations should fail.\nTEST(ProcCpuinfo, DeniesWriteRoot) {\n// VFS1 does not behave differently for root/non-root.\nSKIP_IF(IsRunningWithVFS1());\n@@ -816,16 +811,10 @@ TEST(ProcCpuinfo, DeniesWriteRoot) {\nint fd;\nEXPECT_THAT(fd = open(\"/proc/cpuinfo\", O_WRONLY), SyscallSucceeds());\nif (fd > 0) {\n- EXPECT_THAT(write(fd, \"x\", 1), SyscallFailsWithErrno(EIO));\n- EXPECT_THAT(pwrite(fd, \"x\", 1, 123), SyscallFailsWithErrno(EIO));\n- }\n- // TODO(gvisor.dev/issue/1193): Properly support setting size attributes in\n- // kernfs.\n- if (!IsRunningOnGvisor() || IsRunningWithVFS1()) {\n- if (fd > 0) {\n- EXPECT_THAT(ftruncate(fd, 123), SyscallFailsWithErrno(EIO));\n- }\n- EXPECT_THAT(truncate(\"/proc/cpuinfo\", 123), SyscallFailsWithErrno(EIO));\n+ // Truncate is not tested--it may succeed on some kernels without doing\n+ // anything.\n+ EXPECT_THAT(write(fd, \"x\", 1), SyscallFails());\n+ EXPECT_THAT(pwrite(fd, \"x\", 1, 123), SyscallFails());\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Support setting STATX_SIZE for kernfs.InodeAttrs. Make setting STATX_SIZE a no-op, if it is valid for the given permissions and file type. Also update proc tests, which were overfitted before. Fixes #3842. Updates #1193. PiperOrigin-RevId: 331861087
259,885
15.09.2020 15:12:50
25,200
456c6c33e104e7421a5a815bba69d46b289ae724
Invert dependency between the context and amutex packages. This is to allow the syserror package to depend on the context package in a future change.
[ { "change_type": "MODIFY", "old_path": "pkg/amutex/BUILD", "new_path": "pkg/amutex/BUILD", "diff": "@@ -6,7 +6,10 @@ go_library(\nname = \"amutex\",\nsrcs = [\"amutex.go\"],\nvisibility = [\"//:sandbox\"],\n- deps = [\"//pkg/syserror\"],\n+ deps = [\n+ \"//pkg/context\",\n+ \"//pkg/syserror\",\n+ ],\n)\ngo_test(\n" }, { "change_type": "MODIFY", "old_path": "pkg/amutex/amutex.go", "new_path": "pkg/amutex/amutex.go", "diff": "@@ -19,41 +19,17 @@ package amutex\nimport (\n\"sync/atomic\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n// Sleeper must be implemented by users of the abortable mutex to allow for\n// cancellation of waits.\n-type Sleeper interface {\n- // SleepStart is called by the AbortableMutex.Lock() function when the\n- // mutex is contended and the goroutine is about to sleep.\n- //\n- // A channel can be returned that causes the sleep to be canceled if\n- // it's readable. If no cancellation is desired, nil can be returned.\n- SleepStart() <-chan struct{}\n-\n- // SleepFinish is called by AbortableMutex.Lock() once a contended mutex\n- // is acquired or the wait is aborted.\n- SleepFinish(success bool)\n-\n- // Interrupted returns true if the wait is aborted.\n- Interrupted() bool\n-}\n+type Sleeper = context.ChannelSleeper\n// NoopSleeper is a stateless no-op implementation of Sleeper for anonymous\n// embedding in other types that do not support cancelation.\n-type NoopSleeper struct{}\n-\n-// SleepStart implements Sleeper.SleepStart.\n-func (NoopSleeper) SleepStart() <-chan struct{} {\n- return nil\n-}\n-\n-// SleepFinish implements Sleeper.SleepFinish.\n-func (NoopSleeper) SleepFinish(success bool) {}\n-\n-// Interrupted implements Sleeper.Interrupted.\n-func (NoopSleeper) Interrupted() bool { return false }\n+type NoopSleeper = context.Context\n// Block blocks until either receiving from ch succeeds (in which case it\n// returns nil) or sleeper is interrupted (in which case it returns\n" }, { "change_type": "MODIFY", "old_path": "pkg/context/BUILD", "new_path": "pkg/context/BUILD", "diff": "@@ -7,7 +7,6 @@ go_library(\nsrcs = [\"context.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n- \"//pkg/amutex\",\n\"//pkg/log\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/context/context.go", "new_path": "pkg/context/context.go", "diff": "@@ -26,7 +26,6 @@ import (\n\"context\"\n\"time\"\n- \"gvisor.dev/gvisor/pkg/amutex\"\n\"gvisor.dev/gvisor/pkg/log\"\n)\n@@ -68,9 +67,10 @@ func ThreadGroupIDFromContext(ctx Context) (tgid int32, ok bool) {\n// In both cases, values extracted from the Context should be used instead.\ntype Context interface {\nlog.Logger\n- amutex.Sleeper\ncontext.Context\n+ ChannelSleeper\n+\n// UninterruptibleSleepStart indicates the beginning of an uninterruptible\n// sleep state (equivalent to Linux's TASK_UNINTERRUPTIBLE). If deactivate\n// is true and the Context represents a Task, the Task's AddressSpace is\n@@ -85,29 +85,60 @@ type Context interface {\nUninterruptibleSleepFinish(activate bool)\n}\n-// NoopSleeper is a noop implementation of amutex.Sleeper and UninterruptibleSleep\n-// methods for anonymous embedding in other types that do not implement sleeps.\n-type NoopSleeper struct {\n- amutex.NoopSleeper\n+// A ChannelSleeper represents a goroutine that may sleep interruptibly, where\n+// interruption is indicated by a channel becoming readable.\n+type ChannelSleeper interface {\n+ // SleepStart is called before going to sleep interruptibly. If SleepStart\n+ // returns a non-nil channel and that channel becomes ready for receiving\n+ // while the goroutine is sleeping, the goroutine should be woken, and\n+ // SleepFinish(false) should be called. Otherwise, SleepFinish(true) should\n+ // be called after the goroutine stops sleeping.\n+ SleepStart() <-chan struct{}\n+\n+ // SleepFinish is called after an interruptibly-sleeping goroutine stops\n+ // sleeping, as documented by SleepStart.\n+ SleepFinish(success bool)\n+\n+ // Interrupted returns true if the channel returned by SleepStart is\n+ // ready for receiving.\n+ Interrupted() bool\n+}\n+\n+// NoopSleeper is a noop implementation of ChannelSleeper and\n+// Context.UninterruptibleSleep* methods for anonymous embedding in other types\n+// that do not implement special behavior around sleeps.\n+type NoopSleeper struct{}\n+\n+// SleepStart implements ChannelSleeper.SleepStart.\n+func (NoopSleeper) SleepStart() <-chan struct{} {\n+ return nil\n+}\n+\n+// SleepFinish implements ChannelSleeper.SleepFinish.\n+func (NoopSleeper) SleepFinish(success bool) {}\n+\n+// Interrupted implements ChannelSleeper.Interrupted.\n+func (NoopSleeper) Interrupted() bool {\n+ return false\n}\n-// UninterruptibleSleepStart does nothing.\n-func (NoopSleeper) UninterruptibleSleepStart(bool) {}\n+// UninterruptibleSleepStart implements Context.UninterruptibleSleepStart.\n+func (NoopSleeper) UninterruptibleSleepStart(deactivate bool) {}\n-// UninterruptibleSleepFinish does nothing.\n-func (NoopSleeper) UninterruptibleSleepFinish(bool) {}\n+// UninterruptibleSleepFinish implements Context.UninterruptibleSleepFinish.\n+func (NoopSleeper) UninterruptibleSleepFinish(activate bool) {}\n-// Deadline returns zero values, meaning no deadline.\n+// Deadline implements context.Context.Deadline.\nfunc (NoopSleeper) Deadline() (time.Time, bool) {\nreturn time.Time{}, false\n}\n-// Done returns nil.\n+// Done implements context.Context.Done.\nfunc (NoopSleeper) Done() <-chan struct{} {\nreturn nil\n}\n-// Err returns nil.\n+// Err returns context.Context.Err.\nfunc (NoopSleeper) Err() error {\nreturn nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Invert dependency between the context and amutex packages. This is to allow the syserror package to depend on the context package in a future change. PiperOrigin-RevId: 331866252
260,001
15.09.2020 19:00:14
25,200
cb2e3c946a376097f695118384cf3a147905aa18
Implement gvisor verity fs ioctl with GETFLAGS
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/ioctl.go", "new_path": "pkg/abi/linux/ioctl.go", "diff": "@@ -113,6 +113,12 @@ const (\n_IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS\n)\n+// Constants from uapi/linux/fs.h.\n+const (\n+ FS_IOC_GETFLAGS = 2147771905\n+ FS_VERITY_FL = 1048576\n+)\n+\n// Constants from uapi/linux/fsverity.h.\nconst (\nFS_IOC_ENABLE_VERITY = 1082156677\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/BUILD", "new_path": "pkg/sentry/fsimpl/verity/BUILD", "diff": "@@ -16,6 +16,7 @@ go_library(\n\"//pkg/merkletree\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/fs/lock\",\n+ \"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/socket/unix/transport\",\n\"//pkg/sentry/vfs\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -32,6 +32,7 @@ import (\n\"gvisor.dev/gvisor/pkg/merkletree\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/sync\"\n@@ -589,11 +590,25 @@ func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO, arg\nreturn 0, nil\n}\n+func (fd *fileDescription) getFlags(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ f := int32(0)\n+\n+ // All enabled files should store a root hash. This flag is not settable\n+ // via FS_IOC_SETFLAGS.\n+ if len(fd.d.rootHash) != 0 {\n+ f |= linux.FS_VERITY_FL\n+ }\n+ _, err := kernel.TaskFromContext(ctx).CopyOut(args[2].Pointer(), f)\n+ return 0, err\n+}\n+\n// Ioctl implements vfs.FileDescriptionImpl.Ioctl.\nfunc (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nswitch cmd := args[1].Uint(); cmd {\ncase linux.FS_IOC_ENABLE_VERITY:\nreturn fd.enableVerity(ctx, uio, args)\n+ case linux.FS_IOC_GETFLAGS:\n+ return fd.getFlags(ctx, uio, args)\ndefault:\nreturn fd.lowerFD.Ioctl(ctx, uio, args)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Implement gvisor verity fs ioctl with GETFLAGS PiperOrigin-RevId: 331905347