author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,992 | 01.10.2019 13:49:35 | 25,200 | 739f53fc17e3e3ed82dc2bc920f49fa91738a437 | Add runsc logs to kokoro artifacts | [
{
"change_type": "MODIFY",
"old_path": "kokoro/docker_tests.cfg",
"new_path": "kokoro/docker_tests.cfg",
"diff": "@@ -5,5 +5,6 @@ action {\nregex: \"**/sponge_log.xml\"\nregex: \"**/sponge_log.log\"\nregex: \"**/outputs.zip\"\n+ regex: \"**/runsc_logs_*.tar.gz\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "kokoro/hostnet_tests.cfg",
"new_path": "kokoro/hostnet_tests.cfg",
"diff": "@@ -5,5 +5,6 @@ action {\nregex: \"**/sponge_log.xml\"\nregex: \"**/sponge_log.log\"\nregex: \"**/outputs.zip\"\n+ regex: \"**/runsc_logs_*.tar.gz\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "kokoro/kvm_tests.cfg",
"new_path": "kokoro/kvm_tests.cfg",
"diff": "@@ -5,5 +5,6 @@ action {\nregex: \"**/sponge_log.xml\"\nregex: \"**/sponge_log.log\"\nregex: \"**/outputs.zip\"\n+ regex: \"**/runsc_logs_*.tar.gz\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "kokoro/overlay_tests.cfg",
"new_path": "kokoro/overlay_tests.cfg",
"diff": "@@ -5,5 +5,6 @@ action {\nregex: \"**/sponge_log.xml\"\nregex: \"**/sponge_log.log\"\nregex: \"**/outputs.zip\"\n+ regex: \"**/runsc_logs_*.tar.gz\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "kokoro/root_tests.cfg",
"new_path": "kokoro/root_tests.cfg",
"diff": "@@ -5,5 +5,6 @@ action {\nregex: \"**/sponge_log.xml\"\nregex: \"**/sponge_log.log\"\nregex: \"**/outputs.zip\"\n+ regex: \"**/runsc_logs_*.tar.gz\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/common_bazel.sh",
"new_path": "scripts/common_bazel.sh",
"diff": "@@ -79,9 +79,16 @@ function collect_logs() {\n# Collect sentry logs, if any.\nif [[ -v RUNSC_LOGS_DIR ]] && [[ -d \"${RUNSC_LOGS_DIR}\" ]]; then\n- local -r logs=$(ls \"${RUNSC_LOGS_DIR}\")\n+ # Check if the directory is empty or not (only the first line it needed).\n+ local -r logs=$(ls \"${RUNSC_LOGS_DIR}\" | head -n1)\nif [[ \"${logs}\" ]]; then\n- tar --create --gzip --file=\"${KOKORO_ARTIFACTS_DIR}/${RUNTIME}.tar.gz\" -C \"${RUNSC_LOGS_DIR}\" .\n+ local -r archive=runsc_logs_\"${RUNTIME}\".tar.gz\n+ if [[ -v KOKORO_BUILD_ARTIFACTS_SUBDIR ]]; then\n+ echo \"runsc logs will be uploaded to:\"\n+ echo \" gsutil cp gs://gvisor/logs/${KOKORO_BUILD_ARTIFACTS_SUBDIR}/${archive} /tmp\"\n+ echo \" https://storage.cloud.google.com/gvisor/logs/${KOKORO_BUILD_ARTIFACTS_SUBDIR}/${archive}\"\n+ fi\n+ tar --create --gzip --file=\"${KOKORO_ARTIFACTS_DIR}/${archive}\" -C \"${RUNSC_LOGS_DIR}\" .\nfi\nfi\nfi\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add runsc logs to kokoro artifacts
PiperOrigin-RevId: 272286122 |
259,881 | 01.10.2019 15:41:32 | 25,200 | 0d483985c57a2d001039d17bd198e2eca0f4ff7f | Include AT_SECURE in the aux vector
gVisor does not currently implement the functionality that would result in
AT_SECURE = 1, but Linux includes AT_SECURE = 0 in the normal case, so we
should do the same. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_identity.go",
"new_path": "pkg/sentry/kernel/task_identity.go",
"diff": "@@ -465,8 +465,8 @@ func (t *Task) SetKeepCaps(k bool) {\n// disables the features we don't support anyway, is always set. This\n// drastically simplifies this function.\n//\n-// - We don't implement AT_SECURE, because no_new_privs always being set means\n-// that the conditions that require AT_SECURE never arise. (Compare Linux's\n+// - We don't set AT_SECURE = 1, because no_new_privs always being set means\n+// that the conditions that require AT_SECURE = 1 never arise. (Compare Linux's\n// security/commoncap.c:cap_bprm_set_creds() and cap_bprm_secureexec().)\n//\n// - We don't check for CAP_SYS_ADMIN in prctl(PR_SET_SECCOMP), since\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -308,6 +308,9 @@ func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, r\narch.AuxEntry{linux.AT_EUID, usermem.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())},\narch.AuxEntry{linux.AT_GID, usermem.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())},\narch.AuxEntry{linux.AT_EGID, usermem.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())},\n+ // The conditions that require AT_SECURE = 1 never arise. See\n+ // kernel.Task.updateCredsForExecLocked.\n+ arch.AuxEntry{linux.AT_SECURE, 0},\narch.AuxEntry{linux.AT_CLKTCK, linux.CLOCKS_PER_SEC},\narch.AuxEntry{linux.AT_EXECFN, execfn},\narch.AuxEntry{linux.AT_RANDOM, random},\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc.cc",
"new_path": "test/syscalls/linux/proc.cc",
"diff": "@@ -440,6 +440,11 @@ TEST(ProcSelfAuxv, EntryPresence) {\nEXPECT_EQ(auxv_entries.count(AT_PHENT), 1);\nEXPECT_EQ(auxv_entries.count(AT_PHNUM), 1);\nEXPECT_EQ(auxv_entries.count(AT_BASE), 1);\n+ EXPECT_EQ(auxv_entries.count(AT_UID), 1);\n+ EXPECT_EQ(auxv_entries.count(AT_EUID), 1);\n+ EXPECT_EQ(auxv_entries.count(AT_GID), 1);\n+ EXPECT_EQ(auxv_entries.count(AT_EGID), 1);\n+ EXPECT_EQ(auxv_entries.count(AT_SECURE), 1);\nEXPECT_EQ(auxv_entries.count(AT_CLKTCK), 1);\nEXPECT_EQ(auxv_entries.count(AT_RANDOM), 1);\nEXPECT_EQ(auxv_entries.count(AT_EXECFN), 1);\n"
}
] | Go | Apache License 2.0 | google/gvisor | Include AT_SECURE in the aux vector
gVisor does not currently implement the functionality that would result in
AT_SECURE = 1, but Linux includes AT_SECURE = 0 in the normal case, so we
should do the same.
PiperOrigin-RevId: 272311488 |
259,853 | 01.10.2019 16:24:19 | 25,200 | 29207cef141983647816b2270fe6419e56630c64 | runsc: remove todo from the build file
b/135475885 was fixed by cl/271434565. | [
{
"change_type": "MODIFY",
"old_path": "runsc/BUILD",
"new_path": "runsc/BUILD",
"diff": "@@ -91,11 +91,6 @@ pkg_deb(\nmaintainer = \"The gVisor Authors <[email protected]>\",\npackage = \"runsc\",\npostinst = \"debian/postinst.sh\",\n- tags = [\n- # TODO(b/135475885): pkg_deb requires python2:\n- # https://github.com/bazelbuild/bazel/issues/8443\n- \"manual\",\n- ],\nversion_file = \":version.txt\",\nvisibility = [\n\"//visibility:public\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: remove todo from the build file
b/135475885 was fixed by cl/271434565.
PiperOrigin-RevId: 272320178 |
259,881 | 01.10.2019 16:44:27 | 25,200 | 03ce4dd86c9acd6b6148f68d5d2cf025d8c254bb | Remove extra --rm | [
{
"change_type": "MODIFY",
"old_path": "scripts/dev.sh",
"new_path": "scripts/dev.sh",
"diff": "@@ -58,7 +58,7 @@ if [[ ${REFRESH} -eq 0 ]]; then\necho\necho \"Runtimes ${RUNTIME} and ${RUNTIME}-d (debug enabled) setup.\"\necho \"Use --runtime=\"${RUNTIME}\" with your Docker command.\"\n- echo \" docker run --rm --runtime=\"${RUNTIME}\" --rm hello-world\"\n+ echo \" docker run --rm --runtime=\"${RUNTIME}\" hello-world\"\necho\necho \"If you rebuild, use $0 --refresh.\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove extra --rm
PiperOrigin-RevId: 272324038 |
259,881 | 02.10.2019 14:00:32 | 25,200 | 61e40819d9db05aecb8b36a46d529bd8fe425dcf | Sanity test that open(2) on a UDS fails
Spoiler alert: it doesn't. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -333,6 +333,7 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\":socket_test_util\",\n+ \"//test/util:file_descriptor\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n\"@com_google_googletest//:gtest\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket.cc",
"new_path": "test/syscalls/linux/socket.cc",
"diff": "#include \"gtest/gtest.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/file_descriptor.h\"\n#include \"test/util/test_util.h\"\nnamespace gvisor {\n@@ -57,5 +58,28 @@ TEST(SocketTest, ProtocolInet) {\n}\n}\n+using SocketOpenTest = ::testing::TestWithParam<int>;\n+\n+// UDS cannot be opened.\n+TEST_P(SocketOpenTest, Unix) {\n+ // FIXME(b/142001530): Open incorrectly succeeds on gVisor.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ FileDescriptor bound =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));\n+\n+ struct sockaddr_un addr =\n+ ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(/*abstract=*/false, AF_UNIX));\n+\n+ ASSERT_THAT(bind(bound.get(), reinterpret_cast<struct sockaddr*>(&addr),\n+ sizeof(addr)),\n+ SyscallSucceeds());\n+\n+ EXPECT_THAT(open(addr.sun_path, GetParam()), SyscallFailsWithErrno(ENXIO));\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(OpenModes, SocketOpenTest,\n+ ::testing::Values(O_RDONLY, O_RDWR));\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_test_util.h",
"new_path": "test/syscalls/linux/socket_test_util.h",
"diff": "@@ -83,6 +83,8 @@ inline ssize_t SendFd(int fd, void* buf, size_t count, int flags) {\ncount);\n}\n+PosixErrorOr<struct sockaddr_un> UniqueUnixAddr(bool abstract, int domain);\n+\n// A Creator<T> is a function that attempts to create and return a new T. (This\n// is copy/pasted from cloud/gvisor/api/sandbox_util.h and is just duplicated\n// here for clarity.)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Sanity test that open(2) on a UDS fails
Spoiler alert: it doesn't.
PiperOrigin-RevId: 272513529 |
259,853 | 03.10.2019 13:35:24 | 25,200 | db218fdfcf16b664c990f02c94ed89ac2c1ad314 | Don't report partialResult errors from sendfile
The input file descriptor is always a regular file, so sendfile can't lose any
data if it will not be able to write them to the output file descriptor.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_splice.go",
"new_path": "pkg/sentry/syscalls/linux/sys_splice.go",
"diff": "@@ -159,9 +159,14 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc\n}, outFile.Flags().NonBlocking)\n}\n+ // Sendfile can't lose any data because inFD is always a regual file.\n+ if n != 0 {\n+ err = nil\n+ }\n+\n// We can only pass a single file to handleIOError, so pick inFile\n// arbitrarily. This is used only for debugging purposes.\n- return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, \"sendfile\", inFile)\n+ return uintptr(n), nil, handleIOError(t, false, err, kernel.ERESTARTSYS, \"sendfile\", inFile)\n}\n// Splice implements splice(2).\n@@ -305,6 +310,11 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo\nDup: true,\n}, nonBlock)\n+ // Tee doesn't change a state of inFD, so it can't lose any data.\n+ if n != 0 {\n+ err = nil\n+ }\n+\n// See above; inFile is chosen arbitrarily here.\n- return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, \"tee\", inFile)\n+ return uintptr(n), nil, handleIOError(t, false, err, kernel.ERESTARTSYS, \"tee\", inFile)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -1905,6 +1905,7 @@ cc_binary(\nsrcs = [\"sendfile.cc\"],\nlinkstatic = 1,\ndeps = [\n+ \"//test/util:eventfd_util\",\n\"//test/util:file_descriptor\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/sendfile.cc",
"new_path": "test/syscalls/linux/sendfile.cc",
"diff": "// limitations under the License.\n#include <fcntl.h>\n+#include <sys/eventfd.h>\n#include <sys/sendfile.h>\n#include <unistd.h>\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n+#include \"test/util/eventfd_util.h\"\n#include \"test/util/file_descriptor.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -511,6 +513,23 @@ TEST(SendFileTest, SendPipeBlocks) {\nSyscallSucceedsWithValue(kDataSize));\n}\n+TEST(SendFileTest, SendToSpecialFile) {\n+ // Create temp file.\n+ const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\n+ GetAbsoluteTestTmpdir(), \"\", TempPath::kDefaultFileMode));\n+\n+ const FileDescriptor inf =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));\n+ constexpr int kSize = 0x7ff;\n+ ASSERT_THAT(ftruncate(inf.get(), kSize), SyscallSucceeds());\n+\n+ auto eventfd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());\n+\n+ // eventfd can accept a number of bytes which is a multiple of 8.\n+ EXPECT_THAT(sendfile(eventfd.get(), inf.get(), nullptr, 0xfffff),\n+ SyscallSucceedsWithValue(kSize & (~7)));\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't report partialResult errors from sendfile
The input file descriptor is always a regular file, so sendfile can't lose any
data if it will not be able to write them to the output file descriptor.
Reported-by: [email protected]
PiperOrigin-RevId: 272730357 |
259,891 | 04.10.2019 14:19:08 | 25,200 | 7ef1c44a7fe027d60c92b44515655a612d40d034 | Change linux.FileMode from uint to uint16, and update VFS to use FileMode.
In Linux (include/linux/types.h), mode_t is an unsigned short. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/file.go",
"new_path": "pkg/abi/linux/file.go",
"diff": "@@ -271,7 +271,7 @@ type Statx struct {\n}\n// FileMode represents a mode_t.\n-type FileMode uint\n+type FileMode uint16\n// Permissions returns just the permission bits.\nfunc (m FileMode) Permissions() FileMode {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/memfs/directory.go",
"new_path": "pkg/sentry/fsimpl/memfs/directory.go",
"diff": "@@ -32,7 +32,7 @@ type directory struct {\nchildList dentryList\n}\n-func (fs *filesystem) newDirectory(creds *auth.Credentials, mode uint16) *inode {\n+func (fs *filesystem) newDirectory(creds *auth.Credentials, mode linux.FileMode) *inode {\ndir := &directory{}\ndir.inode.init(dir, fs, creds, mode)\ndir.inode.nlink = 2 // from \".\" and parent directory or \"..\" for root\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/memfs/memfs.go",
"new_path": "pkg/sentry/fsimpl/memfs/memfs.go",
"diff": "@@ -137,7 +137,7 @@ type inode struct {\nimpl interface{} // immutable\n}\n-func (i *inode) init(impl interface{}, fs *filesystem, creds *auth.Credentials, mode uint16) {\n+func (i *inode) init(impl interface{}, fs *filesystem, creds *auth.Credentials, mode linux.FileMode) {\ni.refs = 1\ni.mode = uint32(mode)\ni.uid = uint32(creds.EffectiveKUID)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/memfs/regular_file.go",
"new_path": "pkg/sentry/fsimpl/memfs/regular_file.go",
"diff": "@@ -37,7 +37,7 @@ type regularFile struct {\ndataLen int64\n}\n-func (fs *filesystem) newRegularFile(creds *auth.Credentials, mode uint16) *inode {\n+func (fs *filesystem) newRegularFile(creds *auth.Credentials, mode linux.FileMode) *inode {\nfile := ®ularFile{}\nfile.inode.init(file, fs, creds, mode)\nfile.inode.nlink = 1 // from parent directory\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/options.go",
"new_path": "pkg/sentry/vfs/options.go",
"diff": "@@ -31,14 +31,14 @@ type GetDentryOptions struct {\n// FilesystemImpl.MkdirAt().\ntype MkdirOptions struct {\n// Mode is the file mode bits for the created directory.\n- Mode uint16\n+ Mode linux.FileMode\n}\n// MknodOptions contains options to VirtualFilesystem.MknodAt() and\n// FilesystemImpl.MknodAt().\ntype MknodOptions struct {\n// Mode is the file type and mode bits for the created file.\n- Mode uint16\n+ Mode linux.FileMode\n// If Mode specifies a character or block device special file, DevMajor and\n// DevMinor are the major and minor device numbers for the created device.\n@@ -61,7 +61,7 @@ type OpenOptions struct {\n// If FilesystemImpl.OpenAt() creates a file, Mode is the file mode for the\n// created file.\n- Mode uint16\n+ Mode linux.FileMode\n}\n// ReadOptions contains options to FileDescription.PRead(),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Change linux.FileMode from uint to uint16, and update VFS to use FileMode.
In Linux (include/linux/types.h), mode_t is an unsigned short.
PiperOrigin-RevId: 272956350 |
259,885 | 04.10.2019 14:55:11 | 25,200 | b941e357615a7b0e04dbf6535cafacfbb4b7e276 | Return EIO from p9 if flipcall.Endpoint.Connect() fails.
Also ensure that all flipcall transport errors not returned by p9 (converted to
EIO by the client, or dropped on the floor by channel server goroutines) are
logged. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/client.go",
"new_path": "pkg/p9/client.go",
"diff": "@@ -505,12 +505,27 @@ func (c *Client) sendRecvChannel(t message, r message) error {\nch.active = false\nc.channelsMu.Unlock()\nc.channelsWg.Done()\n- return err\n+ // Map all transport errors to EIO, but ensure that the real error\n+ // is logged.\n+ log.Warningf(\"p9.Client.sendRecvChannel: flipcall.Endpoint.Connect: %v\", err)\n+ return syscall.EIO\n}\n}\n- // Send the message.\n- err := ch.sendRecv(c, t, r)\n+ // Send the request and receive the server's response.\n+ rsz, err := ch.send(t)\n+ if err != nil {\n+ // See above.\n+ c.channelsMu.Lock()\n+ ch.active = false\n+ c.channelsMu.Unlock()\n+ c.channelsWg.Done()\n+ log.Warningf(\"p9.Client.sendRecvChannel: p9.channel.send: %v\", err)\n+ return syscall.EIO\n+ }\n+\n+ // Parse the server's response.\n+ _, retErr := ch.recv(r, rsz)\n// Release the channel.\nc.channelsMu.Lock()\n@@ -523,7 +538,7 @@ func (c *Client) sendRecvChannel(t message, r message) error {\nc.channelsMu.Unlock()\nc.channelsWg.Done()\n- return err\n+ return retErr\n}\n// Version returns the negotiated 9P2000.L.Google version number.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/server.go",
"new_path": "pkg/p9/server.go",
"diff": "@@ -452,7 +452,9 @@ func (cs *connState) initializeChannels() (err error) {\ncs.channelWg.Add(1)\ngo func() { // S/R-SAFE: Server side.\ndefer cs.channelWg.Done()\n- res.service(cs)\n+ if err := res.service(cs); err != nil {\n+ log.Warningf(\"p9.channel.service: %v\", err)\n+ }\n}()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/transport_flipcall.go",
"new_path": "pkg/p9/transport_flipcall.go",
"diff": "@@ -132,7 +132,7 @@ func (ch *channel) send(m message) (uint32, error) {\nif filer, ok := m.(filer); ok {\nif f := filer.FilePayload(); f != nil {\nif err := ch.fds.SendFD(f.FD()); err != nil {\n- return 0, syscall.EIO // Map everything to EIO.\n+ return 0, err\n}\nf.Close() // Per sendRecvLegacy.\nsentFD = true // To mark below.\n@@ -162,15 +162,7 @@ func (ch *channel) send(m message) (uint32, error) {\n}\n// Perform the one-shot communication.\n- n, err := ch.data.SendRecv(ssz)\n- if err != nil {\n- if n > 0 {\n- return n, nil\n- }\n- return 0, syscall.EIO // See above.\n- }\n-\n- return n, nil\n+ return ch.data.SendRecv(ssz)\n}\n// recv decodes a message that exists on the channel.\n@@ -249,15 +241,3 @@ func (ch *channel) recv(r message, rsz uint32) (message, error) {\nreturn r, nil\n}\n-\n-// sendRecv sends the given message over the channel.\n-//\n-// This is used by the client.\n-func (ch *channel) sendRecv(c *Client, m, r message) error {\n- rsz, err := ch.send(m)\n- if err != nil {\n- return err\n- }\n- _, err = ch.recv(r, rsz)\n- return err\n-}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Return EIO from p9 if flipcall.Endpoint.Connect() fails.
Also ensure that all flipcall transport errors not returned by p9 (converted to
EIO by the client, or dropped on the floor by channel server goroutines) are
logged.
PiperOrigin-RevId: 272963663 |
259,884 | 06.10.2019 21:06:53 | 25,200 | 5ac2cc54918c480bd40ec3f05c9ce93a2d7afa99 | Add SECURITY.md.
Adds minimal security policy info to SECURITY.md. This allows Github to
advertise the security policy doc for the repo.
See:
See: | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -133,11 +133,9 @@ The [gvisor-users mailing list][gvisor-users-list] and\n[gvisor-dev mailing list][gvisor-dev-list] are good starting points for\nquestions and discussion.\n-## Security\n+## Security Policy\n-Sensitive security-related questions, comments and disclosures can be sent to\n-the [gvisor-security mailing list][gvisor-security-list]. The full security\n-disclosure policy is defined in the [community][community] repository.\n+See [SECURITY.md](SECURITY.md).\n## Contributing\n@@ -147,7 +145,6 @@ See [Contributing.md](CONTRIBUTING.md).\n[community]: https://gvisor.googlesource.com/community\n[docker]: https://www.docker.com\n[git]: https://git-scm.com\n-[gvisor-security-list]: https://groups.google.com/forum/#!forum/gvisor-security\n[gvisor-users-list]: https://groups.google.com/forum/#!forum/gvisor-users\n[gvisor-dev-list]: https://groups.google.com/forum/#!forum/gvisor-dev\n[oci]: https://www.opencontainers.org\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "SECURITY.md",
"diff": "+# Security and Vulnerability Reporting\n+\n+Sensitive security-related questions, comments, and reports should be sent to\n+the [gvisor-security mailing list][gvisor-security-list]. You should receive a\n+prompt response, typically within 48 hours.\n+\n+Policies for security list access, vulnerability embargo, and vulnerability\n+disclosure are outlined in the [community][community] repository.\n+\n+[community]: https://gvisor.googlesource.com/community\n+[gvisor-security-list]: https://groups.google.com/forum/#!forum/gvisor-security\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add SECURITY.md.
Adds minimal security policy info to SECURITY.md. This allows Github to
advertise the security policy doc for the repo.
See: https://github.blog/changelog/2019-05-23-security-policy/
See: https://help.github.com/en/articles/adding-a-security-policy-to-your-repository
PiperOrigin-RevId: 273214306 |
259,891 | 07.10.2019 13:39:18 | 25,200 | 6a9823794975d2401ae1bda3937a63de959192ab | Rename epsocket to netstack. | [
{
"change_type": "RENAME",
"old_path": "pkg/sentry/socket/epsocket/BUILD",
"new_path": "pkg/sentry/socket/netstack/BUILD",
"diff": "@@ -3,15 +3,15 @@ package(licenses = [\"notice\"])\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\ngo_library(\n- name = \"epsocket\",\n+ name = \"netstack\",\nsrcs = [\n\"device.go\",\n- \"epsocket.go\",\n+ \"netstack.go\",\n\"provider.go\",\n\"save_restore.go\",\n\"stack.go\",\n],\n- importpath = \"gvisor.dev/gvisor/pkg/sentry/socket/epsocket\",\n+ importpath = \"gvisor.dev/gvisor/pkg/sentry/socket/netstack\",\nvisibility = [\n\"//pkg/sentry:internal\",\n],\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/socket/epsocket/device.go",
"new_path": "pkg/sentry/socket/netstack/device.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package epsocket\n+package netstack\nimport \"gvisor.dev/gvisor/pkg/sentry/device\"\n-// epsocketDevice is the endpoint socket virtual device.\n-var epsocketDevice = device.NewAnonDevice()\n+// netstackDevice is the endpoint socket virtual device.\n+var netstackDevice = device.NewAnonDevice()\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// Package epsocket provides an implementation of the socket.Socket interface\n+// Package netstack provides an implementation of the socket.Socket interface\n// that is backed by a tcpip.Endpoint.\n//\n// It does not depend on any particular endpoint implementation, and thus can\n// Lock ordering: netstack => mm: ioSequencePayload copies user memory inside\n// tcpip.Endpoint.Write(). Netstack is allowed to (and does) hold locks during\n// this operation.\n-package epsocket\n+package netstack\nimport (\n\"bytes\"\n@@ -176,7 +176,7 @@ var Metrics = tcpip.Stats{\nconst sizeOfInt32 int = 4\n-var errStackType = syserr.New(\"expected but did not receive an epsocket.Stack\", linux.EINVAL)\n+var errStackType = syserr.New(\"expected but did not receive a netstack.Stack\", linux.EINVAL)\n// ntohs converts a 16-bit number from network byte order to host byte order. It\n// assumes that the host is little endian.\n@@ -262,8 +262,8 @@ type SocketOperations struct {\n// valid when timestampValid is true. It is protected by readMu.\ntimestampNS int64\n- // sockOptInq corresponds to TCP_INQ. It is implemented on the epsocket\n- // level, because it takes into account data from readView.\n+ // sockOptInq corresponds to TCP_INQ. It is implemented at this level\n+ // because it takes into account data from readView.\nsockOptInq bool\n}\n@@ -275,7 +275,7 @@ func New(t *kernel.Task, family int, skType linux.SockType, protocol int, queue\n}\n}\n- dirent := socket.NewDirent(t, epsocketDevice)\n+ dirent := socket.NewDirent(t, netstackDevice)\ndefer dirent.DecRef()\nreturn fs.NewFile(t, dirent, fs.FileFlags{Read: true, Write: true, NonSeekable: true}, &SocketOperations{\nQueue: queue,\n@@ -760,7 +760,7 @@ func (s *SocketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error {\n// tcpip.Endpoint.\nfunc (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (interface{}, *syserr.Error) {\n// TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is\n- // implemented specifically for epsocket.SocketOperations rather than\n+ // implemented specifically for netstack.SocketOperations rather than\n// commonEndpoint. commonEndpoint should be extended to support socket\n// options where the implementation is not shared, as unix sockets need\n// their own support for SO_TIMESTAMP.\n@@ -1229,7 +1229,7 @@ func getSockOptIP(t *kernel.Task, ep commonEndpoint, name, outLen int) (interfac\n// tcpip.Endpoint.\nfunc (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVal []byte) *syserr.Error {\n// TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is\n- // implemented specifically for epsocket.SocketOperations rather than\n+ // implemented specifically for netstack.SocketOperations rather than\n// commonEndpoint. commonEndpoint should be extended to support socket\n// options where the implementation is not shared, as unix sockets need\n// their own support for SO_TIMESTAMP.\n@@ -2235,7 +2235,7 @@ func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (s *SocketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n- // SIOCGSTAMP is implemented by epsocket rather than all commonEndpoint\n+ // SIOCGSTAMP is implemented by netstack rather than all commonEndpoint\n// sockets.\n// TODO(b/78348848): Add a commonEndpoint method to support SIOCGSTAMP.\nswitch args[1].Int() {\n@@ -2538,7 +2538,7 @@ func ifconfIoctl(ctx context.Context, io usermem.IO, ifc *linux.IFConf) error {\n// Flag values and meanings are described in greater detail in netdevice(7) in\n// the SIOCGIFFLAGS section.\nfunc interfaceStatusFlags(stack inet.Stack, name string) (uint32, *syserr.Error) {\n- // epsocket should only ever be passed an epsocket.Stack.\n+ // We should only ever be passed a netstack.Stack.\nepstack, ok := stack.(*Stack)\nif !ok {\nreturn 0, errStackType\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/socket/epsocket/provider.go",
"new_path": "pkg/sentry/socket/netstack/provider.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package epsocket\n+package netstack\nimport (\n\"syscall\"\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/socket/epsocket/save_restore.go",
"new_path": "pkg/sentry/socket/netstack/save_restore.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package epsocket\n+package netstack\nimport (\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/socket/epsocket/stack.go",
"new_path": "pkg/sentry/socket/netstack/stack.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package epsocket\n+package netstack\nimport (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/BUILD",
"new_path": "pkg/sentry/socket/unix/BUILD",
"diff": "@@ -24,7 +24,7 @@ go_library(\n\"//pkg/sentry/safemem\",\n\"//pkg/sentry/socket\",\n\"//pkg/sentry/socket/control\",\n- \"//pkg/sentry/socket/epsocket\",\n+ \"//pkg/sentry/socket/netstack\",\n\"//pkg/sentry/socket/unix/transport\",\n\"//pkg/sentry/usermem\",\n\"//pkg/syserr\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/unix.go",
"new_path": "pkg/sentry/socket/unix/unix.go",
"diff": "@@ -31,7 +31,7 @@ import (\nktime \"gvisor.dev/gvisor/pkg/sentry/kernel/time\"\n\"gvisor.dev/gvisor/pkg/sentry/socket\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/control\"\n- \"gvisor.dev/gvisor/pkg/sentry/socket/epsocket\"\n+ \"gvisor.dev/gvisor/pkg/sentry/socket/netstack\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n@@ -40,8 +40,8 @@ import (\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n-// SocketOperations is a Unix socket. It is similar to an epsocket, except it\n-// is backed by a transport.Endpoint instead of a tcpip.Endpoint.\n+// SocketOperations is a Unix socket. It is similar to a netstack socket,\n+// except it is backed by a transport.Endpoint instead of a tcpip.Endpoint.\n//\n// +stateify savable\ntype SocketOperations struct {\n@@ -116,7 +116,7 @@ func (s *SocketOperations) Endpoint() transport.Endpoint {\n// extractPath extracts and validates the address.\nfunc extractPath(sockaddr []byte) (string, *syserr.Error) {\n- addr, _, err := epsocket.AddressAndFamily(linux.AF_UNIX, sockaddr, true /* strict */)\n+ addr, _, err := netstack.AddressAndFamily(linux.AF_UNIX, sockaddr, true /* strict */)\nif err != nil {\nreturn \"\", err\n}\n@@ -143,7 +143,7 @@ func (s *SocketOperations) GetPeerName(t *kernel.Task) (linux.SockAddr, uint32,\nreturn nil, 0, syserr.TranslateNetstackError(err)\n}\n- a, l := epsocket.ConvertAddress(linux.AF_UNIX, addr)\n+ a, l := netstack.ConvertAddress(linux.AF_UNIX, addr)\nreturn a, l, nil\n}\n@@ -155,19 +155,19 @@ func (s *SocketOperations) GetSockName(t *kernel.Task) (linux.SockAddr, uint32,\nreturn nil, 0, syserr.TranslateNetstackError(err)\n}\n- a, l := epsocket.ConvertAddress(linux.AF_UNIX, addr)\n+ a, l := netstack.ConvertAddress(linux.AF_UNIX, addr)\nreturn a, l, nil\n}\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (s *SocketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n- return epsocket.Ioctl(ctx, s.ep, io, args)\n+ return netstack.Ioctl(ctx, s.ep, io, args)\n}\n// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by\n// a transport.Endpoint.\nfunc (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (interface{}, *syserr.Error) {\n- return epsocket.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outLen)\n+ return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outLen)\n}\n// Listen implements the linux syscall listen(2) for sockets backed by\n@@ -474,13 +474,13 @@ func (s *SocketOperations) EventUnregister(e *waiter.Entry) {\n// SetSockOpt implements the linux syscall setsockopt(2) for sockets backed by\n// a transport.Endpoint.\nfunc (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVal []byte) *syserr.Error {\n- return epsocket.SetSockOpt(t, s, s.ep, level, name, optVal)\n+ return netstack.SetSockOpt(t, s, s.ep, level, name, optVal)\n}\n// Shutdown implements the linux syscall shutdown(2) for sockets backed by\n// a transport.Endpoint.\nfunc (s *SocketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error {\n- f, err := epsocket.ConvertShutdown(how)\n+ f, err := netstack.ConvertShutdown(how)\nif err != nil {\nreturn err\n}\n@@ -546,7 +546,7 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nvar from linux.SockAddr\nvar fromLen uint32\nif r.From != nil && len([]byte(r.From.Addr)) != 0 {\n- from, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From)\n+ from, fromLen = netstack.ConvertAddress(linux.AF_UNIX, *r.From)\n}\nif r.ControlTrunc {\n@@ -581,7 +581,7 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nvar from linux.SockAddr\nvar fromLen uint32\nif r.From != nil {\n- from, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From)\n+ from, fromLen = netstack.ConvertAddress(linux.AF_UNIX, *r.From)\n}\nif r.ControlTrunc {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/BUILD",
"new_path": "pkg/sentry/strace/BUILD",
"diff": "@@ -32,8 +32,8 @@ go_library(\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/socket/control\",\n- \"//pkg/sentry/socket/epsocket\",\n\"//pkg/sentry/socket/netlink\",\n+ \"//pkg/sentry/socket/netstack\",\n\"//pkg/sentry/syscalls/linux\",\n\"//pkg/sentry/usermem\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/socket.go",
"new_path": "pkg/sentry/strace/socket.go",
"diff": "@@ -23,8 +23,8 @@ import (\n\"gvisor.dev/gvisor/pkg/binary\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/control\"\n- \"gvisor.dev/gvisor/pkg/sentry/socket/epsocket\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/netlink\"\n+ \"gvisor.dev/gvisor/pkg/sentry/socket/netstack\"\nslinux \"gvisor.dev/gvisor/pkg/sentry/syscalls/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n)\n@@ -332,7 +332,7 @@ func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string {\nswitch family {\ncase linux.AF_INET, linux.AF_INET6, linux.AF_UNIX:\n- fa, _, err := epsocket.AddressAndFamily(int(family), b, true /* strict */)\n+ fa, _, err := netstack.AddressAndFamily(int(family), b, true /* strict */)\nif err != nil {\nreturn fmt.Sprintf(\"%#x {Family: %s, error extracting address: %v}\", addr, familyStr, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -57,10 +57,10 @@ go_library(\n\"//pkg/sentry/pgalloc\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/sighandling\",\n- \"//pkg/sentry/socket/epsocket\",\n\"//pkg/sentry/socket/hostinet\",\n\"//pkg/sentry/socket/netlink\",\n\"//pkg/sentry/socket/netlink/route\",\n+ \"//pkg/sentry/socket/netstack\",\n\"//pkg/sentry/socket/unix\",\n\"//pkg/sentry/state\",\n\"//pkg/sentry/strace\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -27,7 +27,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/control\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n- \"gvisor.dev/gvisor/pkg/sentry/socket/epsocket\"\n+ \"gvisor.dev/gvisor/pkg/sentry/socket/netstack\"\n\"gvisor.dev/gvisor/pkg/sentry/state\"\n\"gvisor.dev/gvisor/pkg/sentry/time\"\n\"gvisor.dev/gvisor/pkg/sentry/watchdog\"\n@@ -142,7 +142,7 @@ func newController(fd int, l *Loader) (*controller, error) {\n}\nsrv.Register(manager)\n- if eps, ok := l.k.NetworkStack().(*epsocket.Stack); ok {\n+ if eps, ok := l.k.NetworkStack().(*netstack.Stack); ok {\nnet := &Network{\nStack: eps.Stack,\n}\n@@ -355,7 +355,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nfs.SetRestoreEnvironment(*renv)\n// Prepare to load from the state file.\n- if eps, ok := networkStack.(*epsocket.Stack); ok {\n+ if eps, ok := networkStack.(*netstack.Stack); ok {\nstack.StackFromEnv = eps.Stack // FIXME(b/36201077)\n}\ninfo, err := specFile.Stat()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -62,10 +62,10 @@ import (\n\"gvisor.dev/gvisor/runsc/specutils\"\n// Include supported socket providers.\n- \"gvisor.dev/gvisor/pkg/sentry/socket/epsocket\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/hostinet\"\n_ \"gvisor.dev/gvisor/pkg/sentry/socket/netlink\"\n_ \"gvisor.dev/gvisor/pkg/sentry/socket/netlink/route\"\n+ \"gvisor.dev/gvisor/pkg/sentry/socket/netstack\"\n_ \"gvisor.dev/gvisor/pkg/sentry/socket/unix\"\n)\n@@ -914,11 +914,11 @@ func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\n// NetworkNone sets up loopback using netstack.\nnetProtos := []stack.NetworkProtocol{ipv4.NewProtocol(), ipv6.NewProtocol(), arp.NewProtocol()}\ntransProtos := []stack.TransportProtocol{tcp.NewProtocol(), udp.NewProtocol(), icmp.NewProtocol4()}\n- s := epsocket.Stack{stack.New(stack.Options{\n+ s := netstack.Stack{stack.New(stack.Options{\nNetworkProtocols: netProtos,\nTransportProtocols: transProtos,\nClock: clock,\n- Stats: epsocket.Metrics,\n+ Stats: netstack.Metrics,\nHandleLocal: true,\n// Enable raw sockets for users with sufficient\n// privileges.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Rename epsocket to netstack.
PiperOrigin-RevId: 273365058 |
259,884 | 07.10.2019 15:54:13 | 25,200 | da9e18f24dfdd776d58e0d5bf6345449af724923 | Add tests for $HOME
Adds two tests. One to make sure that $HOME is set when starting a container
via 'docker run' and one to make sure that $HOME is set for each container in a
multi-container sandbox.
Issue | [
{
"change_type": "MODIFY",
"old_path": "runsc/criutil/criutil.go",
"new_path": "runsc/criutil/criutil.go",
"diff": "@@ -157,13 +157,55 @@ func (cc *Crictl) RmPod(podID string) error {\nreturn err\n}\n-// StartPodAndContainer pulls an image, then starts a sandbox and container in\n-// that sandbox. It returns the pod ID and container ID.\n-func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string, string, error) {\n+// StartContainer pulls the given image ands starts the container in the\n+// sandbox with the given podID.\n+func (cc *Crictl) StartContainer(podID, image, sbSpec, contSpec string) (string, error) {\n+ // Write the specs to files that can be read by crictl.\n+ sbSpecFile, err := testutil.WriteTmpFile(\"sbSpec\", sbSpec)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"failed to write sandbox spec: %v\", err)\n+ }\n+ contSpecFile, err := testutil.WriteTmpFile(\"contSpec\", contSpec)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"failed to write container spec: %v\", err)\n+ }\n+\n+ return cc.startContainer(podID, image, sbSpecFile, contSpecFile)\n+}\n+\n+func (cc *Crictl) startContainer(podID, image, sbSpecFile, contSpecFile string) (string, error) {\nif err := cc.Pull(image); err != nil {\n- return \"\", \"\", fmt.Errorf(\"failed to pull %s: %v\", image, err)\n+ return \"\", fmt.Errorf(\"failed to pull %s: %v\", image, err)\n+ }\n+\n+ contID, err := cc.Create(podID, contSpecFile, sbSpecFile)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"failed to create container in pod %q: %v\", podID, err)\n+ }\n+\n+ if _, err := cc.Start(contID); err != nil {\n+ return \"\", fmt.Errorf(\"failed to start container %q in pod %q: %v\", contID, podID, err)\n+ }\n+\n+ return contID, nil\n}\n+// StopContainer stops and deletes the container with the given container ID.\n+func (cc *Crictl) StopContainer(contID string) error {\n+ if err := cc.Stop(contID); err != nil {\n+ return fmt.Errorf(\"failed to stop container %q: %v\", contID, err)\n+ }\n+\n+ if err := cc.Rm(contID); err != nil {\n+ return fmt.Errorf(\"failed to remove container %q: %v\", contID, err)\n+ }\n+\n+ return nil\n+}\n+\n+// StartPodAndContainer pulls an image, then starts a sandbox and container in\n+// that sandbox. It returns the pod ID and container ID.\n+func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string, string, error) {\n// Write the specs to files that can be read by crictl.\nsbSpecFile, err := testutil.WriteTmpFile(\"sbSpec\", sbSpec)\nif err != nil {\n@@ -179,28 +221,17 @@ func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string,\nreturn \"\", \"\", err\n}\n- contID, err := cc.Create(podID, contSpecFile, sbSpecFile)\n- if err != nil {\n- return \"\", \"\", fmt.Errorf(\"failed to create container in pod %q: %v\", podID, err)\n- }\n+ contID, err := cc.startContainer(podID, image, sbSpecFile, contSpecFile)\n- if _, err := cc.Start(contID); err != nil {\n- return \"\", \"\", fmt.Errorf(\"failed to start container %q in pod %q: %v\", contID, podID, err)\n- }\n-\n- return podID, contID, nil\n+ return podID, contID, err\n}\n// StopPodAndContainer stops a container and pod.\nfunc (cc *Crictl) StopPodAndContainer(podID, contID string) error {\n- if err := cc.Stop(contID); err != nil {\n+ if err := cc.StopContainer(contID); err != nil {\nreturn fmt.Errorf(\"failed to stop container %q in pod %q: %v\", contID, podID, err)\n}\n- if err := cc.Rm(contID); err != nil {\n- return fmt.Errorf(\"failed to remove container %q in pod %q: %v\", contID, podID, err)\n- }\n-\nif err := cc.StopPod(podID); err != nil {\nreturn fmt.Errorf(\"failed to stop pod %q: %v\", podID, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/e2e/exec_test.go",
"new_path": "test/e2e/exec_test.go",
"diff": "@@ -208,8 +208,27 @@ func TestExecEnv(t *testing.T) {\nif err != nil {\nt.Fatalf(\"docker exec failed: %v\", err)\n}\n- if want := \"BAR\"; !strings.Contains(got, want) {\n- t.Errorf(\"wanted exec output to contain %q, got %q\", want, got)\n+ if got, want := strings.TrimSpace(got), \"BAR\"; got != want {\n+ t.Errorf(\"bad output from 'docker exec'. Got %q; Want %q.\", got, want)\n+ }\n+}\n+\n+// TestRunEnvHasHome tests that run always has HOME environment set.\n+func TestRunEnvHasHome(t *testing.T) {\n+ // Base alpine image does not have any environment variables set.\n+ if err := dockerutil.Pull(\"alpine\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n+ }\n+ d := dockerutil.MakeDocker(\"run-env-test\")\n+\n+ // Exec \"echo $HOME\". The 'bin' user's home dir is '/bin'.\n+ got, err := d.RunFg(\"--user\", \"bin\", \"alpine\", \"/bin/sh\", \"-c\", \"echo $HOME\")\n+ if err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ defer d.CleanUp()\n+ if got, want := strings.TrimSpace(got), \"/bin\"; got != want {\n+ t.Errorf(\"bad output from 'docker run'. Got %q; Want %q.\", got, want)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/root/crictl_test.go",
"new_path": "test/root/crictl_test.go",
"diff": "@@ -126,6 +126,59 @@ func TestMountOverSymlinks(t *testing.T) {\n}\n}\n+// TestHomeDir tests that the HOME environment variable is set for\n+// multi-containers.\n+func TestHomeDir(t *testing.T) {\n+ // Setup containerd and crictl.\n+ crictl, cleanup, err := setup(t)\n+ if err != nil {\n+ t.Fatalf(\"failed to setup crictl: %v\", err)\n+ }\n+ defer cleanup()\n+ contSpec := testdata.SimpleSpec(\"root\", \"k8s.gcr.io/busybox\", []string{\"sleep\", \"1000\"})\n+ podID, contID, err := crictl.StartPodAndContainer(\"k8s.gcr.io/busybox\", testdata.Sandbox, contSpec)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ t.Run(\"root container\", func(t *testing.T) {\n+ out, err := crictl.Exec(contID, \"sh\", \"-c\", \"echo $HOME\")\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ if got, want := strings.TrimSpace(string(out)), \"/root\"; got != want {\n+ t.Fatalf(\"Home directory invalid. Got %q, Want : %q\", got, want)\n+ }\n+ })\n+\n+ t.Run(\"sub-container\", func(t *testing.T) {\n+ // Create a sub container in the same pod.\n+ subContSpec := testdata.SimpleSpec(\"subcontainer\", \"k8s.gcr.io/busybox\", []string{\"sleep\", \"1000\"})\n+ subContID, err := crictl.StartContainer(podID, \"k8s.gcr.io/busybox\", testdata.Sandbox, subContSpec)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ out, err := crictl.Exec(subContID, \"sh\", \"-c\", \"echo $HOME\")\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ if got, want := strings.TrimSpace(string(out)), \"/root\"; got != want {\n+ t.Fatalf(\"Home directory invalid. Got %q, Want: %q\", got, want)\n+ }\n+\n+ if err := crictl.StopContainer(subContID); err != nil {\n+ t.Fatal(err)\n+ }\n+ })\n+\n+ // Stop everything.\n+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {\n+ t.Fatal(err)\n+ }\n+\n+}\n+\n// setup sets up before a test. Specifically it:\n// * Creates directories and a socket for containerd to utilize.\n// * Runs containerd and waits for it to reach a \"ready\" state for testing.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/root/testdata/BUILD",
"new_path": "test/root/testdata/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_library(\n\"httpd.go\",\n\"httpd_mount_paths.go\",\n\"sandbox.go\",\n+ \"simple.go\",\n],\nimportpath = \"gvisor.dev/gvisor/test/root/testdata\",\nvisibility = [\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/root/testdata/simple.go",
"diff": "+// Copyright 2018 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package testdata\n+\n+import (\n+ \"encoding/json\"\n+ \"fmt\"\n+)\n+\n+// SimpleSpec returns a JSON config for a simple container that runs the\n+// specified command in the specified image.\n+func SimpleSpec(name, image string, cmd []string) string {\n+ cmds, err := json.Marshal(cmd)\n+ if err != nil {\n+ // This shouldn't happen.\n+ panic(err)\n+ }\n+ return fmt.Sprintf(`\n+{\n+ \"metadata\": {\n+ \"name\": %q\n+ },\n+ \"image\": {\n+ \"image\": %q\n+ },\n+ \"command\": %s\n+ }\n+`, name, image, cmds)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add tests for $HOME
Adds two tests. One to make sure that $HOME is set when starting a container
via 'docker run' and one to make sure that $HOME is set for each container in a
multi-container sandbox.
Issue #701
PiperOrigin-RevId: 273395763 |
259,891 | 07.10.2019 18:14:52 | 25,200 | 1de0cf3563502c1460964fc2fc9dca1ee447449a | Remove unnecessary context parameter for new pipes. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tmpfs/tmpfs.go",
"new_path": "pkg/sentry/fs/tmpfs/tmpfs.go",
"diff": "@@ -324,7 +324,7 @@ type Fifo struct {\n// NewFifo creates a new named pipe.\nfunc NewFifo(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions, msrc *fs.MountSource) *fs.Inode {\n// First create a pipe.\n- p := pipe.NewPipe(ctx, true /* isNamed */, pipe.DefaultPipeSize, usermem.PageSize)\n+ p := pipe.NewPipe(true /* isNamed */, pipe.DefaultPipeSize, usermem.PageSize)\n// Build pipe InodeOperations.\niops := pipe.NewInodeOperations(ctx, perms, p)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/node_test.go",
"new_path": "pkg/sentry/kernel/pipe/node_test.go",
"diff": "@@ -85,11 +85,11 @@ func testOpen(ctx context.Context, t *testing.T, n fs.InodeOperations, flags fs.\n}\nfunc newNamedPipe(t *testing.T) *Pipe {\n- return NewPipe(contexttest.Context(t), true, DefaultPipeSize, usermem.PageSize)\n+ return NewPipe(true, DefaultPipeSize, usermem.PageSize)\n}\nfunc newAnonPipe(t *testing.T) *Pipe {\n- return NewPipe(contexttest.Context(t), false, DefaultPipeSize, usermem.PageSize)\n+ return NewPipe(false, DefaultPipeSize, usermem.PageSize)\n}\n// assertRecvBlocks ensures that a recv attempt on c blocks for at least\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/pipe.go",
"new_path": "pkg/sentry/kernel/pipe/pipe.go",
"diff": "@@ -98,7 +98,7 @@ type Pipe struct {\n// NewPipe initializes and returns a pipe.\n//\n// N.B. The size and atomicIOBytes will be bounded.\n-func NewPipe(ctx context.Context, isNamed bool, sizeBytes, atomicIOBytes int64) *Pipe {\n+func NewPipe(isNamed bool, sizeBytes, atomicIOBytes int64) *Pipe {\nif sizeBytes < MinimumPipeSize {\nsizeBytes = MinimumPipeSize\n}\n@@ -121,7 +121,7 @@ func NewPipe(ctx context.Context, isNamed bool, sizeBytes, atomicIOBytes int64)\n// NewConnectedPipe initializes a pipe and returns a pair of objects\n// representing the read and write ends of the pipe.\nfunc NewConnectedPipe(ctx context.Context, sizeBytes, atomicIOBytes int64) (*fs.File, *fs.File) {\n- p := NewPipe(ctx, false /* isNamed */, sizeBytes, atomicIOBytes)\n+ p := NewPipe(false /* isNamed */, sizeBytes, atomicIOBytes)\n// Build an fs.Dirent for the pipe which will be shared by both\n// returned files.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove unnecessary context parameter for new pipes.
PiperOrigin-RevId: 273421634 |
259,992 | 08.10.2019 13:34:46 | 25,200 | b9cdbc26bc676caeda1fdc1b30956888116a12be | Ignore mount options that are not supported in shared mounts
Options that do not change mount behavior inside the Sentry are
irrelevant and should not be used when looking for possible
incompatibilities between master and slave mounts. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -64,6 +64,9 @@ const (\nnonefs = \"none\"\n)\n+// tmpfs has some extra supported options that we must pass through.\n+var tmpfsAllowedOptions = []string{\"mode\", \"uid\", \"gid\"}\n+\nfunc addOverlay(ctx context.Context, conf *Config, lower *fs.Inode, name string, lowerFlags fs.MountSourceFlags) (*fs.Inode, error) {\n// Upper layer uses the same flags as lower, but it must be read-write.\nupperFlags := lowerFlags\n@@ -172,27 +175,25 @@ func p9MountOptions(fd int, fa FileAccessType) []string {\nfunc parseAndFilterOptions(opts []string, allowedKeys ...string) ([]string, error) {\nvar out []string\nfor _, o := range opts {\n- kv := strings.Split(o, \"=\")\n- switch len(kv) {\n- case 1:\n- if specutils.ContainsStr(allowedKeys, o) {\n- out = append(out, o)\n- continue\n+ ok, err := parseMountOption(o, allowedKeys...)\n+ if err != nil {\n+ return nil, err\n}\n- log.Warningf(\"ignoring unsupported key %q\", kv)\n- case 2:\n- if specutils.ContainsStr(allowedKeys, kv[0]) {\n+ if ok {\nout = append(out, o)\n- continue\n- }\n- log.Warningf(\"ignoring unsupported key %q\", kv[0])\n- default:\n- return nil, fmt.Errorf(\"invalid option %q\", o)\n}\n}\nreturn out, nil\n}\n+func parseMountOption(opt string, allowedKeys ...string) (bool, error) {\n+ kv := strings.SplitN(opt, \"=\", 3)\n+ if len(kv) > 2 {\n+ return false, fmt.Errorf(\"invalid option %q\", opt)\n+ }\n+ return specutils.ContainsStr(allowedKeys, kv[0]), nil\n+}\n+\n// mountDevice returns a device string based on the fs type and target\n// of the mount.\nfunc mountDevice(m specs.Mount) string {\n@@ -207,6 +208,8 @@ func mountDevice(m specs.Mount) string {\nfunc mountFlags(opts []string) fs.MountSourceFlags {\nmf := fs.MountSourceFlags{}\n+ // Note: changes to supported options must be reflected in\n+ // isSupportedMountFlag() as well.\nfor _, o := range opts {\nswitch o {\ncase \"rw\":\n@@ -224,6 +227,18 @@ func mountFlags(opts []string) fs.MountSourceFlags {\nreturn mf\n}\n+func isSupportedMountFlag(fstype, opt string) bool {\n+ switch opt {\n+ case \"rw\", \"ro\", \"noatime\", \"noexec\":\n+ return true\n+ }\n+ if fstype == tmpfs {\n+ ok, err := parseMountOption(opt, tmpfsAllowedOptions...)\n+ return ok && err == nil\n+ }\n+ return false\n+}\n+\nfunc mustFindFilesystem(name string) fs.Filesystem {\nfs, ok := fs.FindFilesystem(name)\nif !ok {\n@@ -427,6 +442,39 @@ func (m *mountHint) isSupported() bool {\nreturn m.mount.Type == tmpfs && m.share == pod\n}\n+// checkCompatible verifies that shared mount is compatible with master.\n+// For now enforce that all options are the same. Once bind mount is properly\n+// supported, then we should ensure the master is less restrictive than the\n+// container, e.g. master can be 'rw' while container mounts as 'ro'.\n+func (m *mountHint) checkCompatible(mount specs.Mount) error {\n+ // Remove options that don't affect to mount's behavior.\n+ masterOpts := filterUnsupportedOptions(m.mount)\n+ slaveOpts := filterUnsupportedOptions(mount)\n+\n+ if len(masterOpts) != len(slaveOpts) {\n+ return fmt.Errorf(\"mount options in annotations differ from container mount, annotation: %s, mount: %s\", masterOpts, slaveOpts)\n+ }\n+\n+ sort.Strings(masterOpts)\n+ sort.Strings(slaveOpts)\n+ for i, opt := range masterOpts {\n+ if opt != slaveOpts[i] {\n+ return fmt.Errorf(\"mount options in annotations differ from container mount, annotation: %s, mount: %s\", masterOpts, slaveOpts)\n+ }\n+ }\n+ return nil\n+}\n+\n+func filterUnsupportedOptions(mount specs.Mount) []string {\n+ rv := make([]string, 0, len(mount.Options))\n+ for _, o := range mount.Options {\n+ if isSupportedMountFlag(mount.Type, o) {\n+ rv = append(rv, o)\n+ }\n+ }\n+ return rv\n+}\n+\n// podMountHints contains a collection of mountHints for the pod.\ntype podMountHints struct {\nmounts map[string]*mountHint\n@@ -699,9 +747,7 @@ func (c *containerMounter) getMountNameAndOptions(conf *Config, m specs.Mount) (\nfsName = sysfs\ncase tmpfs:\nfsName = m.Type\n-\n- // tmpfs has some extra supported options that we must pass through.\n- opts, err = parseAndFilterOptions(m.Options, \"mode\", \"uid\", \"gid\")\n+ opts, err = parseAndFilterOptions(m.Options, tmpfsAllowedOptions...)\ncase bind:\nfd := c.fds.remove()\n@@ -786,17 +832,8 @@ func (c *containerMounter) mountSubmount(ctx context.Context, conf *Config, mns\n// mountSharedSubmount binds mount to a previously mounted volume that is shared\n// among containers in the same pod.\nfunc (c *containerMounter) mountSharedSubmount(ctx context.Context, mns *fs.MountNamespace, root *fs.Dirent, mount specs.Mount, source *mountHint) error {\n- // For now enforce that all options are the same. Once bind mount is properly\n- // supported, then we should ensure the master is less restrictive than the\n- // container, e.g. master can be 'rw' while container mounts as 'ro'.\n- if len(mount.Options) != len(source.mount.Options) {\n- return fmt.Errorf(\"mount options in annotations differ from container mount, annotation: %s, mount: %s\", source.mount.Options, mount.Options)\n- }\n- sort.Strings(mount.Options)\n- for i, opt := range mount.Options {\n- if opt != source.mount.Options[i] {\n- return fmt.Errorf(\"mount options in annotations differ from container mount, annotation: %s, mount: %s\", source.mount.Options, mount.Options)\n- }\n+ if err := source.checkCompatible(mount); err != nil {\n+ return err\n}\nmaxTraversals := uint(0)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -1297,6 +1297,53 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {\n}\n}\n+// Test that unsupported pod mounts options are ignored when matching master and\n+// slave mounts.\n+func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {\n+ conf := testutil.TestConfig()\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\n+ // Setup the containers.\n+ sleep := []string{\"/bin/sleep\", \"100\"}\n+ podSpec, ids := createSpecs(sleep, sleep)\n+ mnt0 := specs.Mount{\n+ Destination: \"/mydir/test\",\n+ Source: \"/some/dir\",\n+ Type: \"tmpfs\",\n+ Options: []string{\"rw\", \"rbind\", \"relatime\"},\n+ }\n+ podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)\n+\n+ mnt1 := mnt0\n+ mnt1.Destination = \"/mydir2/test2\"\n+ mnt1.Options = []string{\"rw\", \"nosuid\"}\n+ podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)\n+\n+ createSharedMount(mnt0, \"test-mount\", podSpec...)\n+\n+ containers, cleanup, err := startContainers(conf, podSpec, ids)\n+ if err != nil {\n+ t.Fatalf(\"error starting containers: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ execs := []execDesc{\n+ {\n+ c: containers[0],\n+ cmd: []string{\"/usr/bin/test\", \"-d\", mnt0.Destination},\n+ desc: \"directory is mounted in container0\",\n+ },\n+ {\n+ c: containers[1],\n+ cmd: []string{\"/usr/bin/test\", \"-d\", mnt1.Destination},\n+ desc: \"directory is mounted in container1\",\n+ },\n+ }\n+ if err := execMany(execs); err != nil {\n+ t.Fatal(err.Error())\n+ }\n+}\n+\n// Test that one container can send an FD to another container, even though\n// they have distinct MountNamespaces.\nfunc TestMultiContainerMultiRootCanHandleFDs(t *testing.T) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ignore mount options that are not supported in shared mounts
Options that do not change mount behavior inside the Sentry are
irrelevant and should not be used when looking for possible
incompatibilities between master and slave mounts.
PiperOrigin-RevId: 273593486 |
259,974 | 29.08.2019 10:10:26 | 0 | ebbf2b7fbdcda80d158b72276786a4a1dcad664a | Enable pkg/atomicbitops support on arm64. | [
{
"change_type": "MODIFY",
"old_path": "pkg/atomicbitops/BUILD",
"new_path": "pkg/atomicbitops/BUILD",
"diff": "@@ -8,6 +8,7 @@ go_library(\nsrcs = [\n\"atomic_bitops.go\",\n\"atomic_bitops_amd64.s\",\n+ \"atomic_bitops_arm64.s\",\n\"atomic_bitops_common.go\",\n],\nimportpath = \"gvisor.dev/gvisor/pkg/atomicbitops\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/atomicbitops/atomic_bitops.go",
"new_path": "pkg/atomicbitops/atomic_bitops.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// +build amd64\n+// +build amd64 arm64\n// Package atomicbitops provides basic bitwise operations in an atomic way.\n// The implementation on amd64 leverages the LOCK prefix directly instead of\n-// relying on the generic cas primitives.\n+// relying on the generic cas primitives, and the arm64 leverages the LDAXR\n+// and STLXR pair primitives.\n//\n// WARNING: the bitwise ops provided in this package doesn't imply any memory\n// ordering. Using them to construct locks must employ proper memory barriers.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/atomicbitops/atomic_bitops_common.go",
"new_path": "pkg/atomicbitops/atomic_bitops_common.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// +build !amd64\n+// +build !amd64,!arm64\npackage atomicbitops\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable pkg/atomicbitops support on arm64.
Signed-off-by: Haibo Xu <[email protected]>
Change-Id: I1646aaa6f07b5ec31c39c318b70f48693fe59a7c |
259,858 | 10.10.2019 12:45:34 | 25,200 | f8b18593198cf7ca1adfca19d846e66080b07942 | Fix signalfd polling.
The signalfd descriptors otherwise always show as available. This can lead
programs to spin, assuming they are looking to see what signals are pending.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/signalfd/signalfd.go",
"new_path": "pkg/sentry/kernel/signalfd/signalfd.go",
"diff": "@@ -121,7 +121,10 @@ func (s *SignalOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\n// Readiness implements waiter.Waitable.Readiness.\nfunc (s *SignalOperations) Readiness(mask waiter.EventMask) waiter.EventMask {\n- return mask & waiter.EventIn\n+ if mask&waiter.EventIn != 0 && s.target.PendingSignals()&s.Mask() != 0 {\n+ return waiter.EventIn // Pending signals.\n+ }\n+ return 0\n}\n// EventRegister implements waiter.Waitable.EventRegister.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/signalfd.cc",
"new_path": "test/syscalls/linux/signalfd.cc",
"diff": "@@ -312,6 +312,23 @@ TEST(Signalfd, KillStillKills) {\nEXPECT_EXIT(tgkill(getpid(), gettid(), SIGKILL), KilledBySignal(SIGKILL), \"\");\n}\n+TEST(Signalfd, Ppoll) {\n+ sigset_t mask;\n+ sigemptyset(&mask);\n+ sigaddset(&mask, SIGKILL);\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, SFD_CLOEXEC));\n+\n+ // Ensure that the given ppoll blocks.\n+ struct pollfd pfd = {};\n+ pfd.fd = fd.get();\n+ pfd.events = POLLIN;\n+ struct timespec timeout = {};\n+ timeout.tv_sec = 1;\n+ EXPECT_THAT(RetryEINTR(ppoll)(&pfd, 1, &timeout, &mask),\n+ SyscallSucceedsWithValue(0));\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix signalfd polling.
The signalfd descriptors otherwise always show as available. This can lead
programs to spin, assuming they are looking to see what signals are pending.
Updates #139
PiperOrigin-RevId: 274017890 |
259,881 | 10.10.2019 13:39:55 | 25,200 | a5170fd825efbea0550137b5979f7bd08398aa55 | Allow rt_sigreturn in runsc gofer
rt_sigreturn is required for signal handling (e.g., SIGSEGV for nil-pointer
dereference). Before this, nil-pointer dereferences cause a syscall violation
instead of a panic. | [
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/filter/config.go",
"new_path": "runsc/fsgofer/filter/config.go",
"diff": "@@ -177,6 +177,7 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_RENAMEAT: {},\nsyscall.SYS_RESTART_SYSCALL: {},\nsyscall.SYS_RT_SIGPROCMASK: {},\n+ syscall.SYS_RT_SIGRETURN: {},\nsyscall.SYS_SCHED_YIELD: {},\nsyscall.SYS_SENDMSG: []seccomp.Rule{\n// Used by fdchannel.Endpoint.SendFD().\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow rt_sigreturn in runsc gofer
rt_sigreturn is required for signal handling (e.g., SIGSEGV for nil-pointer
dereference). Before this, nil-pointer dereferences cause a syscall violation
instead of a panic.
PiperOrigin-RevId: 274028767 |
259,884 | 10.10.2019 14:41:41 | 25,200 | 065339193e4309a8c771ba88058c3b2d96c07e78 | Update TODO for OCI seccomp support. | [
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils.go",
"new_path": "runsc/specutils/specutils.go",
"diff": "@@ -92,7 +92,7 @@ func ValidateSpec(spec *specs.Spec) error {\nlog.Warningf(\"AppArmor profile %q is being ignored\", spec.Process.ApparmorProfile)\n}\n- // TODO(b/72226747): Apply seccomp to application inside sandbox.\n+ // TODO(gvisor.dev/issue/510): Apply seccomp to application inside sandbox.\nif spec.Linux != nil && spec.Linux.Seccomp != nil {\nlog.Warningf(\"Seccomp spec is being ignored\")\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update TODO for OCI seccomp support.
PiperOrigin-RevId: 274042343 |
259,962 | 10.10.2019 15:13:39 | 25,200 | c7e901f47a09eaac56bd4813227edff016fa6bff | Fix bugs in fragment handling.
Strengthen the header.IPv4.IsValid check to correctly check
for IHL/TotalLength fields. Also add a check to make sure
fragmentOffsets + size of the fragment do not cause a wrap
around for the end of the fragment. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -142,6 +142,8 @@ var Metrics = tcpip.Stats{\nPacketsDelivered: mustCreateMetric(\"/netstack/ip/packets_delivered\", \"Total number of incoming IP packets that are successfully delivered to the transport layer via HandlePacket.\"),\nPacketsSent: mustCreateMetric(\"/netstack/ip/packets_sent\", \"Total number of IP packets sent via WritePacket.\"),\nOutgoingPacketErrors: mustCreateMetric(\"/netstack/ip/outgoing_packet_errors\", \"Total number of IP packets which failed to write to a link-layer endpoint.\"),\n+ MalformedPacketsReceived: mustCreateMetric(\"/netstack/ip/malformed_packets_received\", \"Total number of IP packets which failed IP header validation checks.\"),\n+ MalformedFragmentsReceived: mustCreateMetric(\"/netstack/ip/malformed_fragments_received\", \"Total number of IP fragments which failed IP fragment validation checks.\"),\n},\nTCP: tcpip.TCPStats{\nActiveConnectionOpenings: mustCreateMetric(\"/netstack/tcp/active_connection_openings\", \"Number of connections opened successfully via Connect.\"),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ipv4.go",
"new_path": "pkg/tcpip/header/ipv4.go",
"diff": "@@ -284,7 +284,7 @@ func (b IPv4) IsValid(pktSize int) bool {\nhlen := int(b.HeaderLength())\ntlen := int(b.TotalLength())\n- if hlen > tlen || tlen > pktSize {\n+ if hlen < IPv4MinimumSize || hlen > tlen || tlen > pktSize {\nreturn false\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -296,6 +296,7 @@ func (e *endpoint) HandlePacket(r *stack.Route, vv buffer.VectorisedView) {\nheaderView := vv.First()\nh := header.IPv4(headerView)\nif !h.IsValid(vv.Size()) {\n+ r.Stats().IP.MalformedPacketsReceived.Increment()\nreturn\n}\n@@ -306,8 +307,23 @@ func (e *endpoint) HandlePacket(r *stack.Route, vv buffer.VectorisedView) {\nmore := (h.Flags() & header.IPv4FlagMoreFragments) != 0\nif more || h.FragmentOffset() != 0 {\n+ if vv.Size() == 0 {\n+ // Drop the packet as it's marked as a fragment but has\n+ // no payload.\n+ r.Stats().IP.MalformedPacketsReceived.Increment()\n+ r.Stats().IP.MalformedFragmentsReceived.Increment()\n+ return\n+ }\n// The packet is a fragment, let's try to reassemble it.\nlast := h.FragmentOffset() + uint16(vv.Size()) - 1\n+ // Drop the packet if the fragmentOffset is incorrect. i.e the\n+ // combination of fragmentOffset and vv.size() causes a wrap\n+ // around resulting in last being less than the offset.\n+ if last < h.FragmentOffset() {\n+ r.Stats().IP.MalformedPacketsReceived.Increment()\n+ r.Stats().IP.MalformedFragmentsReceived.Increment()\n+ return\n+ }\nvar ready bool\nvv, ready = e.fragmentation.Process(hash.IPv4FragmentHash(h), h.FragmentOffset(), last, more, vv)\nif !ready {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"diff": "@@ -366,3 +366,107 @@ func TestFragmentationErrors(t *testing.T) {\n})\n}\n}\n+\n+func TestInvalidFragments(t *testing.T) {\n+ // These packets have both IHL and TotalLength set to 0.\n+ testCases := []struct {\n+ name string\n+ packets [][]byte\n+ wantMalformedIPPackets uint64\n+ wantMalformedFragments uint64\n+ }{\n+ {\n+ \"ihl_totallen_zero_valid_frag_offset\",\n+ [][]byte{\n+ {0x40, 0x30, 0x00, 0x00, 0x6c, 0x74, 0x7d, 0x30, 0x30, 0x30, 0x30, 0x30, 0x39, 0x32, 0x39, 0x33, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ },\n+ 1,\n+ 0,\n+ },\n+ {\n+ \"ihl_totallen_zero_invalid_frag_offset\",\n+ [][]byte{\n+ {0x40, 0x30, 0x00, 0x00, 0x6c, 0x74, 0x20, 0x00, 0x30, 0x30, 0x30, 0x30, 0x39, 0x32, 0x39, 0x33, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ },\n+ 1,\n+ 0,\n+ },\n+ {\n+ // Total Length of 37(20 bytes IP header + 17 bytes of\n+ // payload)\n+ // Frag Offset of 0x1ffe = 8190*8 = 65520\n+ // Leading to the fragment end to be past 65535.\n+ \"ihl_totallen_valid_invalid_frag_offset_1\",\n+ [][]byte{\n+ {0x45, 0x30, 0x00, 0x25, 0x6c, 0x74, 0x1f, 0xfe, 0x30, 0x30, 0x30, 0x30, 0x39, 0x32, 0x39, 0x33, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ },\n+ 1,\n+ 1,\n+ },\n+ // The following 3 tests were found by running a fuzzer and were\n+ // triggering a panic in the IPv4 reassembler code.\n+ {\n+ \"ihl_less_than_ipv4_minimum_size_1\",\n+ [][]byte{\n+ {0x42, 0x30, 0x0, 0x30, 0x30, 0x40, 0x0, 0xf3, 0x30, 0x1, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ {0x42, 0x30, 0x0, 0x8, 0x30, 0x40, 0x20, 0x0, 0x30, 0x1, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ },\n+ 2,\n+ 0,\n+ },\n+ {\n+ \"ihl_less_than_ipv4_minimum_size_2\",\n+ [][]byte{\n+ {0x42, 0x30, 0x0, 0x30, 0x30, 0x40, 0xb3, 0x12, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ {0x42, 0x30, 0x0, 0x8, 0x30, 0x40, 0x20, 0x0, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ },\n+ 2,\n+ 0,\n+ },\n+ {\n+ \"ihl_less_than_ipv4_minimum_size_3\",\n+ [][]byte{\n+ {0x42, 0x30, 0x0, 0x30, 0x30, 0x40, 0xb3, 0x30, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ {0x42, 0x30, 0x0, 0x8, 0x30, 0x40, 0x20, 0x0, 0x30, 0x6, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ },\n+ 2,\n+ 0,\n+ },\n+ {\n+ \"fragment_with_short_total_len_extra_payload\",\n+ [][]byte{\n+ {0x46, 0x30, 0x00, 0x30, 0x30, 0x40, 0x0e, 0x12, 0x30, 0x06, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ {0x46, 0x30, 0x00, 0x18, 0x30, 0x40, 0x20, 0x00, 0x30, 0x06, 0x30, 0x30, 0x73, 0x73, 0x69, 0x6e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},\n+ },\n+ 1,\n+ 1,\n+ },\n+ }\n+\n+ for _, tc := range testCases {\n+ t.Run(tc.name, func(t *testing.T) {\n+ const nicid tcpip.NICID = 42\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocol{\n+ ipv4.NewProtocol(),\n+ },\n+ })\n+\n+ var linkAddr = tcpip.LinkAddress([]byte{0x30, 0x30, 0x30, 0x30, 0x30, 0x30})\n+ var remoteLinkAddr = tcpip.LinkAddress([]byte{0x30, 0x30, 0x30, 0x30, 0x30, 0x31})\n+ ep := channel.New(10, 1500, linkAddr)\n+ s.CreateNIC(nicid, sniffer.New(ep))\n+\n+ for _, pkt := range tc.packets {\n+ ep.InjectLinkAddr(header.IPv4ProtocolNumber, remoteLinkAddr, buffer.NewVectorisedView(len(pkt), []buffer.View{pkt}))\n+ }\n+\n+ if got, want := s.Stats().IP.MalformedPacketsReceived.Value(), tc.wantMalformedIPPackets; got != want {\n+ t.Errorf(\"incorrect Stats.IP.MalformedPacketsReceived, got: %d, want: %d\", got, want)\n+ }\n+ if got, want := s.Stats().IP.MalformedFragmentsReceived.Value(), tc.wantMalformedFragments; got != want {\n+ t.Errorf(\"incorrect Stats.IP.MalformedFragmentsReceived, got: %d, want: %d\", got, want)\n+ }\n+ })\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -853,6 +853,14 @@ type IPStats struct {\n// OutgoingPacketErrors is the total number of IP packets which failed\n// to write to a link-layer endpoint.\nOutgoingPacketErrors *StatCounter\n+\n+ // MalformedPacketsReceived is the total number of IP Packets that were\n+ // dropped due to the IP packet header failing validation checks.\n+ MalformedPacketsReceived *StatCounter\n+\n+ // MalformedFragmentsReceived is the total number of IP Fragments that were\n+ // dropped due to the fragment failing validation checks.\n+ MalformedFragmentsReceived *StatCounter\n}\n// TCPStats collects TCP-specific stats.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix bugs in fragment handling.
Strengthen the header.IPv4.IsValid check to correctly check
for IHL/TotalLength fields. Also add a check to make sure
fragmentOffsets + size of the fragment do not cause a wrap
around for the end of the fragment.
PiperOrigin-RevId: 274049313 |
259,962 | 14.10.2019 12:49:50 | 25,200 | a2964259702cc4ed3f10d2b8e352a36ef26f0215 | Use a different fanoutID for each new fdbased endpoint. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "@@ -178,6 +178,14 @@ type Options struct {\nRXChecksumOffload bool\n}\n+// fanoutID is used for AF_PACKET based endpoints to enable PACKET_FANOUT\n+// support in the host kernel. This allows us to use multiple FD's to receive\n+// from the same underlying NIC. The fanoutID needs to be the same for a given\n+// set of FD's that point to the same NIC. Trying to set the PACKET_FANOUT\n+// option for an FD with a fanoutID already in use by another FD for a different\n+// NIC will return an EINVAL.\n+var fanoutID = 1\n+\n// New creates a new fd-based endpoint.\n//\n// Makes fd non-blocking, but does not take ownership of fd, which must remain\n@@ -245,6 +253,10 @@ func New(opts *Options) (stack.LinkEndpoint, error) {\ne.inboundDispatchers = append(e.inboundDispatchers, inboundDispatcher)\n}\n+ // Increment fanoutID to ensure that we don't re-use the same fanoutID for\n+ // the next endpoint.\n+ fanoutID++\n+\nreturn e, nil\n}\n@@ -265,7 +277,6 @@ func createInboundDispatcher(e *endpoint, fd int, isSocket bool) (linkDispatcher\ncase *unix.SockaddrLinklayer:\n// enable PACKET_FANOUT mode is the underlying socket is\n// of type AF_PACKET.\n- const fanoutID = 1\nconst fanoutType = 0x8000 // PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG\nfanoutArg := fanoutID | fanoutType<<16\nif err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_FANOUT, fanoutArg); err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use a different fanoutID for each new fdbased endpoint.
PiperOrigin-RevId: 274638272 |
259,891 | 14.10.2019 15:20:35 | 25,200 | 2302afb53d5d0a6714438649f0ab0a2c020af288 | Reorder BUILD license and load functions in netstack. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/BUILD",
"new_path": "pkg/tcpip/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\npackage(licenses = [\"notice\"])\n-load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n-\ngo_library(\nname = \"tcpip\",\nsrcs = [\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/buffer/BUILD",
"new_path": "pkg/tcpip/buffer/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\npackage(licenses = [\"notice\"])\n-load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n-\ngo_library(\nname = \"buffer\",\nsrcs = [\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/BUILD",
"new_path": "pkg/tcpip/header/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\npackage(licenses = [\"notice\"])\n-load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n-\ngo_library(\nname = \"header\",\nsrcs = [\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/iptables/BUILD",
"new_path": "pkg/tcpip/iptables/BUILD",
"diff": "-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_library(\nname = \"iptables\",\nsrcs = [\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/BUILD",
"new_path": "pkg/tcpip/network/fragmentation/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n-\n-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_template_instance(\nname = \"reassembler_list\",\nout = \"reassembler_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/seqnum/BUILD",
"new_path": "pkg/tcpip/seqnum/BUILD",
"diff": "-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_library(\nname = \"seqnum\",\nsrcs = [\"seqnum.go\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/BUILD",
"new_path": "pkg/tcpip/stack/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n-\n-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_template_instance(\nname = \"linkaddrentry_list\",\nout = \"linkaddrentry_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/BUILD",
"new_path": "pkg/tcpip/transport/icmp/BUILD",
"diff": "-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_template_instance(\nname = \"icmp_packet_list\",\nout = \"icmp_packet_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/BUILD",
"new_path": "pkg/tcpip/transport/raw/BUILD",
"diff": "-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_template_instance(\nname = \"packet_list\",\nout = \"packet_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n-\n-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_template_instance(\nname = \"tcp_segment_list\",\nout = \"tcp_segment_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/BUILD",
"new_path": "pkg/tcpip/transport/udp/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n-\n-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_template_instance(\nname = \"udp_packet_list\",\nout = \"udp_packet_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/waiter/BUILD",
"new_path": "pkg/waiter/BUILD",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n-\n-package(licenses = [\"notice\"])\n-\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+package(licenses = [\"notice\"])\n+\ngo_template_instance(\nname = \"waiter_list\",\nout = \"waiter_list.go\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Reorder BUILD license and load functions in netstack.
PiperOrigin-RevId: 274672346 |
259,883 | 02.04.2019 17:13:00 | -28,800 | e3d4a6773923a884986aaa4bb272431ce27764e2 | support /proc/net/snmp
This proc file contains statistics according to [1].
[1] | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"bytes\"\n\"fmt\"\n\"io\"\n+ \"reflect\"\n\"time\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -33,6 +34,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n)\n// newNet creates a new proc net entry.\n@@ -41,6 +43,7 @@ func (p *proc) newNetDir(ctx context.Context, k *kernel.Kernel, msrc *fs.MountSo\nif s := p.k.NetworkStack(); s != nil {\ncontents = map[string]*fs.Inode{\n\"dev\": seqfile.NewSeqFileInode(ctx, &netDev{s: s}, msrc),\n+ \"snmp\": seqfile.NewSeqFileInode(ctx, &netSnmp{s: s}, msrc),\n// The following files are simple stubs until they are\n// implemented in netstack, if the file contains a\n@@ -195,6 +198,118 @@ func (n *netDev) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]se\nreturn data, 0\n}\n+// netSnmp implements seqfile.SeqSource for /proc/net/snmp.\n+//\n+// +stateify savable\n+type netSnmp struct {\n+ s inet.Stack\n+}\n+\n+// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.\n+func (n *netSnmp) NeedsUpdate(generation int64) bool {\n+ return true\n+}\n+\n+type snmpLine struct {\n+ prefix string\n+ header string\n+}\n+\n+var snmp = []snmpLine{\n+ {\n+ prefix: \"Ip\",\n+ header: \"Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates\",\n+ },\n+ {\n+ prefix: \"Icmp\",\n+ header: \"InMsgs InErrors InCsumErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps\",\n+ },\n+ {\n+ prefix: \"IcmpMsg\",\n+ },\n+ {\n+ prefix: \"Tcp\",\n+ header: \"RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors\",\n+ },\n+ {\n+ prefix: \"Udp\",\n+ header: \"InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti\",\n+ },\n+ {\n+ prefix: \"UdpLite\",\n+ header: \"InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti\",\n+ },\n+}\n+\n+func toSlice(a interface{}) []uint64 {\n+ v := reflect.Indirect(reflect.ValueOf(a))\n+ return v.Slice(0, v.Len()).Interface().([]uint64)\n+}\n+\n+func sprintSlice(s []uint64) string {\n+ if len(s) == 0 {\n+ return \"\"\n+ }\n+ r := fmt.Sprint(s)\n+ return r[1 : len(r)-1] // Remove \"[]\" introduced by fmt of slice.\n+}\n+\n+// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. See Linux's\n+// net/core/net-procfs.c:dev_seq_show.\n+func (n *netSnmp) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+ if h != nil {\n+ return nil, 0\n+ }\n+\n+ contents := make([]string, 0, len(snmp)*2)\n+ types := []interface{}{\n+ &inet.StatSNMPIP{},\n+ &inet.StatSNMPICMP{},\n+ nil, // TODO(gvisor.dev/issue/628): Support IcmpMsg stats.\n+ &inet.StatSNMPTCP{},\n+ &inet.StatSNMPUDP{},\n+ &inet.StatSNMPUDPLite{},\n+ }\n+ for i, stat := range types {\n+ line := snmp[i]\n+ if stat == nil {\n+ contents = append(\n+ contents,\n+ fmt.Sprintf(\"%s:\\n\", line.prefix),\n+ fmt.Sprintf(\"%s:\\n\", line.prefix),\n+ )\n+ continue\n+ }\n+ if err := n.s.Statistics(stat, line.prefix); err != nil {\n+ if err == syserror.EOPNOTSUPP {\n+ log.Infof(\"Failed to retrieve %s of /proc/net/snmp: %v\", line.prefix, err)\n+ } else {\n+ log.Warningf(\"Failed to retrieve %s of /proc/net/snmp: %v\", line.prefix, err)\n+ }\n+ }\n+ var values string\n+ if line.prefix == \"Tcp\" {\n+ tcp := stat.(*inet.StatSNMPTCP)\n+ // \"Tcp\" needs special processing because MaxConn is signed. RFC 2012.\n+ values = fmt.Sprintf(\"%s %d %s\", sprintSlice(tcp[:3]), int64(tcp[3]), sprintSlice(tcp[4:]))\n+ } else {\n+ values = sprintSlice(toSlice(stat))\n+ }\n+ contents = append(\n+ contents,\n+ fmt.Sprintf(\"%s: %s\\n\", line.prefix, line.header),\n+ fmt.Sprintf(\"%s: %s\\n\", line.prefix, values),\n+ )\n+ }\n+\n+ data := make([]seqfile.SeqData, 0, len(snmp)*2)\n+ for _, l := range contents {\n+ data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*netSnmp)(nil)})\n+ }\n+\n+ return data, 0\n+}\n+\n// netUnix implements seqfile.SeqSource for /proc/net/unix.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/inet/inet.go",
"new_path": "pkg/sentry/inet/inet.go",
"diff": "@@ -153,3 +153,23 @@ type Route struct {\n// GatewayAddr is the route gateway address (RTA_GATEWAY).\nGatewayAddr []byte\n}\n+\n+// Below SNMP metrics are from Linux/usr/include/linux/snmp.h.\n+\n+// StatSNMPIP describes Ip line of /proc/net/snmp.\n+type StatSNMPIP [19]uint64\n+\n+// StatSNMPICMP describes Icmp line of /proc/net/snmp.\n+type StatSNMPICMP [27]uint64\n+\n+// StatSNMPICMPMSG describes IcmpMsg line of /proc/net/snmp.\n+type StatSNMPICMPMSG [512]uint64\n+\n+// StatSNMPTCP describes Tcp line of /proc/net/snmp.\n+type StatSNMPTCP [15]uint64\n+\n+// StatSNMPUDP describes Udp line of /proc/net/snmp.\n+type StatSNMPUDP [8]uint64\n+\n+// StatSNMPUDPLite describes UdpLite line of /proc/net/snmp.\n+type StatSNMPUDPLite [8]uint64\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -1552,6 +1552,7 @@ cc_binary(\nsrcs = [\"proc_net.cc\"],\nlinkstatic = 1,\ndeps = [\n+ \":socket_test_util\",\n\"//test/util:capability_util\",\n\"//test/util:file_descriptor\",\n\"//test/util:fs_util\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc_net.cc",
"new_path": "test/syscalls/linux/proc_net.cc",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-#include \"gtest/gtest.h\"\n+#include <arpa/inet.h>\n+#include <errno.h>\n+#include <netinet/in.h>\n+#include <sys/types.h>\n+#include <sys/socket.h>\n+#include <sys/syscall.h>\n+\n+#include \"absl/strings/str_split.h\"\n#include \"gtest/gtest.h\"\n#include \"test/util/capability_util.h\"\n+#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/util/file_descriptor.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/test_util.h\"\n@@ -57,6 +65,209 @@ TEST(ProcSysNetIpv4Sack, CanReadAndWrite) {\nEXPECT_EQ(buf, to_write);\n}\n+PosixErrorOr<uint64_t> GetSNMPMetricFromProc(const std::string snmp,\n+ const std::string &type,\n+ const std::string &item) {\n+ std::vector<std::string> snmp_vec = absl::StrSplit(snmp, '\\n');\n+\n+ // /proc/net/snmp prints a line of headers followed by a line of metrics.\n+ // Only search the headers.\n+ for (unsigned i = 0; i < snmp_vec.size(); i = i + 2) {\n+ if (!absl::StartsWith(snmp_vec[i], type)) continue;\n+\n+ std::vector<std::string> fields =\n+ absl::StrSplit(snmp_vec[i], ' ', absl::SkipWhitespace());\n+\n+ EXPECT_TRUE((i + 1) < snmp_vec.size());\n+ std::vector<std::string> values =\n+ absl::StrSplit(snmp_vec[i + 1], ' ', absl::SkipWhitespace());\n+\n+ EXPECT_TRUE(!fields.empty() && fields.size() == values.size());\n+\n+ // Metrics start at the first index.\n+ for (unsigned j = 1; j < fields.size(); j++) {\n+ if (fields[j] == item) {\n+ uint64_t val;\n+ if (!absl::SimpleAtoi(values[j], &val)) {\n+ return PosixError(EINVAL,\n+ absl::StrCat(\"field is not a number: \", values[j]));\n+ }\n+\n+ return val;\n+ }\n+ }\n+ }\n+ // We should never get here.\n+ return PosixError(\n+ EINVAL, absl::StrCat(\"failed to find \", type, \"/\", item, \" in:\", snmp));\n+}\n+\n+TEST(ProcNetSnmp, TcpReset) {\n+ // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.\n+ const DisableSave ds;\n+\n+ uint64_t oldAttemptFails;\n+ uint64_t oldActiveOpens;\n+ uint64_t oldOutRsts;\n+ auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ oldActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"ActiveOpens\"));\n+ oldOutRsts = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"OutRsts\"));\n+ oldAttemptFails = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"AttemptFails\"));\n+\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, 0));\n+\n+ struct sockaddr_in sin = {\n+ .sin_family = AF_INET,\n+ .sin_port = htons(1234),\n+ };\n+ sin.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n+ ASSERT_THAT(connect(s.get(), (struct sockaddr *)&sin, sizeof(sin)),\n+ SyscallFailsWithErrno(ECONNREFUSED));\n+\n+ uint64_t newAttemptFails;\n+ uint64_t newActiveOpens;\n+ uint64_t newOutRsts;\n+ snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ newActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"ActiveOpens\"));\n+ newOutRsts = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"OutRsts\"));\n+ newAttemptFails = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"AttemptFails\"));\n+\n+ EXPECT_EQ(oldActiveOpens, newActiveOpens - 1);\n+ EXPECT_EQ(oldOutRsts, newOutRsts - 1);\n+ EXPECT_EQ(oldAttemptFails, newAttemptFails - 1);\n+}\n+\n+TEST(ProcNetSnmp, TcpEstab) {\n+ // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.\n+ const DisableSave ds;\n+\n+ uint64_t oldEstabResets;\n+ uint64_t oldActiveOpens;\n+ uint64_t oldPassiveOpens;\n+ uint64_t oldCurrEstab;\n+ auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ oldActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"ActiveOpens\"));\n+ oldPassiveOpens = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"PassiveOpens\"));\n+ oldCurrEstab = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"CurrEstab\"));\n+ oldEstabResets = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"EstabResets\"));\n+\n+ FileDescriptor s_listen =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, 0));\n+\n+ struct sockaddr_in sin = {\n+ .sin_family = AF_INET,\n+ .sin_port = htons(1234),\n+ };\n+ sin.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n+ ASSERT_THAT(bind(s_listen.get(), (struct sockaddr *)&sin, sizeof(sin)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(listen(s_listen.get(), 1), SyscallSucceeds());\n+\n+ FileDescriptor s_connect =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, 0));\n+ ASSERT_THAT(connect(s_connect.get(), (struct sockaddr *)&sin, sizeof(sin)),\n+ SyscallSucceeds());\n+\n+ auto s_accept =\n+ ASSERT_NO_ERRNO_AND_VALUE(Accept(s_listen.get(), nullptr, nullptr));\n+\n+ uint64_t newEstabResets;\n+ uint64_t newActiveOpens;\n+ uint64_t newPassiveOpens;\n+ uint64_t newCurrEstab;\n+ snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ newActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"ActiveOpens\"));\n+ newPassiveOpens = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"PassiveOpens\"));\n+ newCurrEstab = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"CurrEstab\"));\n+\n+ EXPECT_EQ(oldActiveOpens, newActiveOpens - 1);\n+ EXPECT_EQ(oldPassiveOpens, newPassiveOpens - 1);\n+ EXPECT_EQ(oldCurrEstab, newCurrEstab - 2);\n+\n+ ASSERT_THAT(send(s_connect.get(), \"a\", 1, 0), SyscallSucceedsWithValue(1));\n+\n+ s_accept.reset(-1);\n+ s_connect.reset(-1);\n+\n+ snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ newCurrEstab = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"CurrEstab\"));\n+ newEstabResets = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"EstabResets\"));\n+\n+ EXPECT_EQ(oldCurrEstab, newCurrEstab);\n+ EXPECT_EQ(oldEstabResets, newEstabResets - 2);\n+}\n+\n+TEST(ProcNetSnmp, UdpNoPorts) {\n+ // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.\n+ const DisableSave ds;\n+\n+ uint64_t oldOutDatagrams;\n+ uint64_t oldNoPorts;\n+ auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ oldOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"OutDatagrams\"));\n+ oldNoPorts = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"NoPorts\"));\n+\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n+\n+ struct sockaddr_in sin = {\n+ .sin_family = AF_INET,\n+ .sin_port = htons(1234),\n+ };\n+ sin.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n+ ASSERT_THAT(sendto(s.get(), \"a\", 1, 0, (struct sockaddr *)&sin, sizeof(sin)),\n+ SyscallSucceedsWithValue(1));\n+\n+ uint64_t newOutDatagrams;\n+ uint64_t newNoPorts;\n+ snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ newOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"OutDatagrams\"));\n+ newNoPorts = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"NoPorts\"));\n+\n+ EXPECT_EQ(oldOutDatagrams, newOutDatagrams - 1);\n+ EXPECT_EQ(oldNoPorts, newNoPorts - 1);\n+}\n+\n+TEST(ProcNetSnmp, UdpIn) {\n+ // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.\n+ const DisableSave ds;\n+\n+ uint64_t oldOutDatagrams;\n+ uint64_t oldInDatagrams;\n+ auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ oldOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"OutDatagrams\"));\n+ oldInDatagrams = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"InDatagrams\"));\n+\n+ FileDescriptor server =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n+\n+ struct sockaddr_in sin = {\n+ .sin_family = AF_INET,\n+ .sin_port = htons(1234),\n+ };\n+ sin.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n+ ASSERT_THAT(bind(server.get(), (struct sockaddr *)&sin, sizeof(sin)),\n+ SyscallSucceeds());\n+\n+ FileDescriptor client =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));\n+ ASSERT_THAT(sendto(client.get(), \"a\", 1, 0, (struct sockaddr *)&sin,\n+ sizeof(sin)), SyscallSucceedsWithValue(1));\n+\n+ char buf[128];\n+ ASSERT_THAT(recvfrom(server.get(), buf, sizeof(buf), 0, NULL, NULL),\n+ SyscallSucceedsWithValue(1));\n+\n+ uint64_t newOutDatagrams;\n+ uint64_t newInDatagrams;\n+ snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\n+ newOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"OutDatagrams\"));\n+ newInDatagrams = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Udp\", \"InDatagrams\"));\n+\n+ EXPECT_EQ(oldOutDatagrams, newOutDatagrams - 1);\n+ EXPECT_EQ(oldInDatagrams, newInDatagrams - 1);\n+}\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | support /proc/net/snmp
This proc file contains statistics according to [1].
[1] https://tools.ietf.org/html/rfc2013
Signed-off-by: Jianfeng Tan <[email protected]>
Change-Id: I9662132085edd8a7783d356ce4237d7ac0800d94 |
259,883 | 18.04.2019 11:41:13 | -28,800 | b94505ecc020e63a7e5cab0f1bb5ea898ea05ec5 | support /proc/net/route
This proc file reports routing information to applications inside the
container. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/netlink_route.go",
"new_path": "pkg/abi/linux/netlink_route.go",
"diff": "@@ -325,3 +325,9 @@ const (\nRTA_SPORT = 28\nRTA_DPORT = 29\n)\n+\n+// Route flags, from include/uapi/linux/route.h.\n+const (\n+ RTF_GATEWAY = 0x2\n+ RTF_UP = 0x1\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/BUILD",
"new_path": "pkg/sentry/fs/proc/BUILD",
"diff": "@@ -53,6 +53,7 @@ go_library(\n\"//pkg/sentry/usage\",\n\"//pkg/sentry/usermem\",\n\"//pkg/syserror\",\n+ \"//pkg/tcpip/header\",\n\"//pkg/waiter\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -35,6 +35,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n// newNet creates a new proc net entry.\n@@ -60,7 +61,7 @@ func (p *proc) newNetDir(ctx context.Context, k *kernel.Kernel, msrc *fs.MountSo\n// (ClockGetres returns 1ns resolution).\n\"psched\": newStaticProcInode(ctx, msrc, []byte(fmt.Sprintf(\"%08x %08x %08x %08x\\n\", uint64(time.Microsecond/time.Nanosecond), 64, 1000000, uint64(time.Second/time.Nanosecond)))),\n\"ptype\": newStaticProcInode(ctx, msrc, []byte(\"Type Device Function\")),\n- \"route\": newStaticProcInode(ctx, msrc, []byte(\"Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT\")),\n+ \"route\": seqfile.NewSeqFileInode(ctx, &netRoute{s: s}, msrc),\n\"tcp\": seqfile.NewSeqFileInode(ctx, &netTCP{k: k}, msrc),\n\"udp\": seqfile.NewSeqFileInode(ctx, &netUDP{k: k}, msrc),\n\"unix\": seqfile.NewSeqFileInode(ctx, &netUnix{k: k}, msrc),\n@@ -310,6 +311,81 @@ func (n *netSnmp) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s\nreturn data, 0\n}\n+// netRoute implements seqfile.SeqSource for /proc/net/route.\n+//\n+// +stateify savable\n+type netRoute struct {\n+ s inet.Stack\n+}\n+\n+// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.\n+func (n *netRoute) NeedsUpdate(generation int64) bool {\n+ return true\n+}\n+\n+// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n+// See Linux's net/ipv4/fib_trie.c:fib_route_seq_show.\n+func (n *netRoute) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+ if h != nil {\n+ return nil, 0\n+ }\n+\n+ interfaces := n.s.Interfaces()\n+ contents := []string{\"Iface\\tDestination\\tGateway\\tFlags\\tRefCnt\\tUse\\tMetric\\tMask\\tMTU\\tWindow\\tIRTT\"}\n+ for _, rt := range n.s.RouteTable() {\n+ // /proc/net/route only includes ipv4 routes.\n+ if rt.Family != linux.AF_INET {\n+ continue\n+ }\n+\n+ // /proc/net/route does not include broadcast or multicast routes.\n+ if rt.Type == linux.RTN_BROADCAST || rt.Type == linux.RTN_MULTICAST {\n+ continue\n+ }\n+\n+ iface, ok := interfaces[rt.OutputInterface]\n+ if !ok || iface.Name == \"lo\" {\n+ continue\n+ }\n+\n+ var (\n+ gw uint32\n+ prefix uint32\n+ flags = linux.RTF_UP\n+ )\n+ if len(rt.GatewayAddr) == header.IPv4AddressSize {\n+ flags |= linux.RTF_GATEWAY\n+ gw = usermem.ByteOrder.Uint32(rt.GatewayAddr)\n+ }\n+ if len(rt.DstAddr) == header.IPv4AddressSize {\n+ prefix = usermem.ByteOrder.Uint32(rt.DstAddr)\n+ }\n+ l := fmt.Sprintf(\n+ \"%s\\t%08X\\t%08X\\t%04X\\t%d\\t%d\\t%d\\t%08X\\t%d\\t%d\\t%d\",\n+ iface.Name,\n+ prefix,\n+ gw,\n+ flags,\n+ 0, // RefCnt.\n+ 0, // Use.\n+ 0, // Metric.\n+ (uint32(1)<<rt.DstLen)-1,\n+ 0, // MTU.\n+ 0, // Window.\n+ 0, // RTT.\n+ )\n+ contents = append(contents, l)\n+ }\n+\n+ var data []seqfile.SeqData\n+ for _, l := range contents {\n+ l = fmt.Sprintf(\"%-127s\\n\", l)\n+ data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*netRoute)(nil)})\n+ }\n+\n+ return data, 0\n+}\n+\n// netUnix implements seqfile.SeqSource for /proc/net/unix.\n//\n// +stateify savable\n"
}
] | Go | Apache License 2.0 | google/gvisor | support /proc/net/route
This proc file reports routing information to applications inside the
container.
Signed-off-by: Jianfeng Tan <[email protected]>
Change-Id: I498e47f8c4c185419befbb42d849d0b099ec71f3 |
259,883 | 16.05.2019 16:21:46 | -28,800 | dd7d1f825d2f6464b61287b3a324c13139b0d661 | hostinet: support /proc/net/snmp and /proc/net/dev
For hostinet, we inherit the data from host procfs. To to that, we
cache the fds for these files for later reads.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/stack.go",
"new_path": "pkg/sentry/socket/hostinet/stack.go",
"diff": "@@ -16,8 +16,11 @@ package hostinet\nimport (\n\"fmt\"\n+ \"io\"\n\"io/ioutil\"\n\"os\"\n+ \"reflect\"\n+ \"strconv\"\n\"strings\"\n\"syscall\"\n@@ -26,6 +29,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/context\"\n\"gvisor.dev/gvisor/pkg/sentry/inet\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.dev/gvisor/pkg/syserr\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n@@ -51,6 +55,8 @@ type Stack struct {\ntcpRecvBufSize inet.TCPBufferSize\ntcpSendBufSize inet.TCPBufferSize\ntcpSACKEnabled bool\n+ netDevFile *os.File\n+ netSNMPFile *os.File\n}\n// NewStack returns an empty Stack containing no configuration.\n@@ -98,6 +104,18 @@ func (s *Stack) Configure() error {\nlog.Warningf(\"Failed to read if TCP SACK if enabled, setting to true\")\n}\n+ if f, err := os.Open(\"/proc/net/dev\"); err != nil {\n+ log.Warningf(\"Failed to open /proc/net/dev: %v\", err)\n+ } else {\n+ s.netDevFile = f\n+ }\n+\n+ if f, err := os.Open(\"/proc/net/snmp\"); err != nil {\n+ log.Warningf(\"Failed to open /proc/net/snmp: %v\", err)\n+ } else {\n+ s.netSNMPFile = f\n+ }\n+\nreturn nil\n}\n@@ -326,9 +344,95 @@ func (s *Stack) SetTCPSACKEnabled(enabled bool) error {\nreturn syserror.EACCES\n}\n+// getLine reads one line from proc file, with specified prefix.\n+// The last argument, withHeader, specifies if it contains line header.\n+func getLine(f *os.File, prefix string, withHeader bool) string {\n+ data := make([]byte, 4096)\n+\n+ if _, err := f.Seek(0, 0); err != nil {\n+ return \"\"\n+ }\n+\n+ if _, err := io.ReadFull(f, data); err != io.ErrUnexpectedEOF {\n+ return \"\"\n+ }\n+\n+ prefix = prefix + \":\"\n+ lines := strings.Split(string(data), \"\\n\")\n+ for _, l := range lines {\n+ l = strings.TrimSpace(l)\n+ if strings.HasPrefix(l, prefix) {\n+ if withHeader {\n+ withHeader = false\n+ continue\n+ }\n+ return l\n+ }\n+ }\n+ return \"\"\n+}\n+\n+func toSlice(i interface{}) []uint64 {\n+ v := reflect.Indirect(reflect.ValueOf(i))\n+ return v.Slice(0, v.Len()).Interface().([]uint64)\n+}\n+\n// Statistics implements inet.Stack.Statistics.\nfunc (s *Stack) Statistics(stat interface{}, arg string) error {\n- return syserror.EOPNOTSUPP\n+ var (\n+ snmpTCP bool\n+ rawLine string\n+ sliceStat []uint64\n+ )\n+\n+ switch stat.(type) {\n+ case *inet.StatDev:\n+ if s.netDevFile == nil {\n+ return fmt.Errorf(\"/proc/net/dev is not opened for hostinet\")\n+ }\n+ rawLine = getLine(s.netDevFile, arg, false /* with no header */)\n+ case *inet.StatSNMPIP, *inet.StatSNMPICMP, *inet.StatSNMPICMPMSG, *inet.StatSNMPTCP, *inet.StatSNMPUDP, *inet.StatSNMPUDPLite:\n+ if s.netSNMPFile == nil {\n+ return fmt.Errorf(\"/proc/net/snmp is not opened for hostinet\")\n+ }\n+ rawLine = getLine(s.netSNMPFile, arg, true)\n+ default:\n+ return syserr.ErrEndpointOperation.ToError()\n+ }\n+\n+ if rawLine == \"\" {\n+ return fmt.Errorf(\"Failed to get raw line\")\n+ }\n+\n+ parts := strings.SplitN(rawLine, \":\", 2)\n+ if len(parts) != 2 {\n+ return fmt.Errorf(\"Failed to get prefix from: %q\", rawLine)\n+ }\n+\n+ sliceStat = toSlice(stat)\n+ fields := strings.Fields(strings.TrimSpace(parts[1]))\n+ if len(fields) != len(sliceStat) {\n+ return fmt.Errorf(\"Failed to parse fields: %q\", rawLine)\n+ }\n+ if _, ok := stat.(*inet.StatSNMPTCP); ok {\n+ snmpTCP = true\n+ }\n+ for i := 0; i < len(sliceStat); i++ {\n+ var err error\n+ if snmpTCP && i == 3 {\n+ var tmp int64\n+ // MaxConn field is signed, RFC 2012.\n+ tmp, err = strconv.ParseInt(fields[i], 10, 64)\n+ sliceStat[i] = uint64(tmp) // Convert back to int before use.\n+ } else {\n+ sliceStat[i], err = strconv.ParseUint(fields[i], 10, 64)\n+ }\n+ if err != nil {\n+ return fmt.Errorf(\"Failed to parse field %d from: %q, %v\", i, rawLine, err)\n+ }\n+ }\n+\n+ return nil\n}\n// RouteTable implements inet.Stack.RouteTable.\n"
}
] | Go | Apache License 2.0 | google/gvisor | hostinet: support /proc/net/snmp and /proc/net/dev
For hostinet, we inherit the data from host procfs. To to that, we
cache the fds for these files for later reads.
Fixes #506
Signed-off-by: Jianfeng Tan <[email protected]>
Change-Id: I2f81215477455b9c59acf67e33f5b9af28ee0165 |
259,883 | 29.08.2019 16:23:11 | 0 | aee2c93366f451b9cc0a62430185749556fc3900 | netstack: add counters for tcp CurrEstab and EstabResets | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -148,6 +148,8 @@ var Metrics = tcpip.Stats{\nTCP: tcpip.TCPStats{\nActiveConnectionOpenings: mustCreateMetric(\"/netstack/tcp/active_connection_openings\", \"Number of connections opened successfully via Connect.\"),\nPassiveConnectionOpenings: mustCreateMetric(\"/netstack/tcp/passive_connection_openings\", \"Number of connections opened successfully via Listen.\"),\n+ CurrentEstablished: mustCreateMetric(\"/netstack/tcp/current_established\", \"Number of connections in either ESTABLISHED or CLOSE-WAIT state now.\"),\n+ EstablishedResets: mustCreateMetric(\"/netstack/tcp/established_resets\", \"Number of times TCP connections have made a direct transition to the CLOSED state from either the ESTABLISHED state or the CLOSE-WAIT state\"),\nListenOverflowSynDrop: mustCreateMetric(\"/netstack/tcp/listen_overflow_syn_drop\", \"Number of times the listen queue overflowed and a SYN was dropped.\"),\nListenOverflowAckDrop: mustCreateMetric(\"/netstack/tcp/listen_overflow_ack_drop\", \"Number of times the listen queue overflowed and the final ACK in the handshake was dropped.\"),\nListenOverflowSynCookieSent: mustCreateMetric(\"/netstack/tcp/listen_overflow_syn_cookie_sent\", \"Number of times a SYN cookie was sent.\"),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -673,6 +673,11 @@ func (s *StatCounter) Increment() {\ns.IncrementBy(1)\n}\n+// Decrement minuses one to the counter.\n+func (s *StatCounter) Decrement() {\n+ s.IncrementBy(^uint64(0))\n+}\n+\n// Value returns the current value of the counter.\nfunc (s *StatCounter) Value() uint64 {\nreturn atomic.LoadUint64(&s.count)\n@@ -881,6 +886,15 @@ type TCPStats struct {\n// successfully via Listen.\nPassiveConnectionOpenings *StatCounter\n+ // CurrentEstablished is the number of TCP connections for which the\n+ // current state is either ESTABLISHED or CLOSE-WAIT.\n+ CurrentEstablished *StatCounter\n+\n+ // EstablishedResets is the number of times TCP connections have made\n+ // a direct transition to the CLOSED state from either the\n+ // ESTABLISHED state or the CLOSE-WAIT state.\n+ EstablishedResets *StatCounter\n+\n// ListenOverflowSynDrop is the number of times the listen queue overflowed\n// and a SYN was dropped.\nListenOverflowSynDrop *StatCounter\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/accept.go",
"new_path": "pkg/tcpip/transport/tcp/accept.go",
"diff": "@@ -297,7 +297,10 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head\nreturn nil, err\n}\nep.mu.Lock()\n+ if ep.state != StateEstablished {\n+ ep.stack.Stats().TCP.CurrentEstablished.Increment()\nep.state = StateEstablished\n+ }\nep.mu.Unlock()\n// Update the receive window scaling. We can't do it before the\n@@ -519,6 +522,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) {\nn.tsOffset = 0\n// Switch state to connected.\n+ n.stack.Stats().TCP.CurrentEstablished.Increment()\nn.state = StateEstablished\n// Do the delivery in a separate goroutine so\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -754,6 +754,10 @@ func (e *endpoint) handleClose() *tcpip.Error {\nfunc (e *endpoint) resetConnectionLocked(err *tcpip.Error) {\n// Only send a reset if the connection is being aborted for a reason\n// other than receiving a reset.\n+ if e.state == StateEstablished || e.state == StateCloseWait {\n+ e.stack.Stats().TCP.EstablishedResets.Increment()\n+ e.stack.Stats().TCP.CurrentEstablished.Decrement()\n+ }\ne.state = StateError\ne.HardError = err\nif err != tcpip.ErrConnectionReset {\n@@ -924,6 +928,10 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\ne.lastErrorMu.Unlock()\ne.mu.Lock()\n+ if e.state == StateEstablished || e.state == StateCloseWait {\n+ e.stack.Stats().TCP.EstablishedResets.Increment()\n+ e.stack.Stats().TCP.CurrentEstablished.Decrement()\n+ }\ne.state = StateError\ne.HardError = err\n@@ -954,7 +962,10 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\n// Tell waiters that the endpoint is connected and writable.\ne.mu.Lock()\n+ if e.state != StateEstablished {\n+ e.stack.Stats().TCP.CurrentEstablished.Increment()\ne.state = StateEstablished\n+ }\ndrained := e.drainDone != nil\ne.mu.Unlock()\nif drained {\n@@ -1115,6 +1126,10 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\n// Mark endpoint as closed.\ne.mu.Lock()\nif e.state != StateError {\n+ if e.state == StateEstablished || e.state == StateCloseWait {\n+ e.stack.Stats().TCP.EstablishedResets.Increment()\n+ e.stack.Stats().TCP.CurrentEstablished.Decrement()\n+ }\ne.state = StateClose\n}\n// Lock released below.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1729,6 +1729,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc\ne.segmentQueue.mu.Unlock()\ne.snd.updateMaxPayloadSize(int(e.route.MTU()), 0)\ne.state = StateEstablished\n+ e.stack.Stats().TCP.CurrentEstablished.Increment()\n}\nif run {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -674,6 +674,7 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se\ndefault:\ns.ep.state = StateFinWait1\n}\n+ s.ep.stack.Stats().TCP.CurrentEstablished.Decrement()\ns.ep.mu.Unlock()\n} else {\n// We're sending a non-FIN segment.\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: add counters for tcp CurrEstab and EstabResets
Signed-off-by: Jianfeng Tan <[email protected]> |
259,883 | 20.05.2019 11:26:10 | 0 | d277bfba2702b319d8336b65429cf8775661ea2f | epsocket: support /proc/net/snmp
Netstack has its own stats, we use this to fill /proc/net/snmp.
Note that some metrics are not recorded in Netstack, which will be shown
as 0 in the proc file. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/stack.go",
"new_path": "pkg/sentry/socket/netstack/stack.go",
"diff": "@@ -144,8 +144,99 @@ func (s *Stack) SetTCPSACKEnabled(enabled bool) error {\n// Statistics implements inet.Stack.Statistics.\nfunc (s *Stack) Statistics(stat interface{}, arg string) error {\n+ switch stats := stat.(type) {\n+ case *inet.StatSNMPIP:\n+ ip := Metrics.IP\n+ *stats = inet.StatSNMPIP{\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/Forwarding.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/DefaultTTL.\n+ ip.PacketsReceived.Value(), // InReceives.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/InHdrErrors.\n+ ip.InvalidAddressesReceived.Value(), // InAddrErrors.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/ForwDatagrams.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/InUnknownProtos.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/InDiscards.\n+ ip.PacketsDelivered.Value(), // InDelivers.\n+ ip.PacketsSent.Value(), // OutRequests.\n+ ip.OutgoingPacketErrors.Value(), // OutDiscards.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/OutNoRoutes.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/ReasmTimeout.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/ReasmReqds.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/ReasmOKs.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/ReasmFails.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/FragOKs.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/FragFails.\n+ 0, // TODO(gvisor.dev/issue/969): Support Ip/FragCreates.\n+ }\n+ case *inet.StatSNMPICMP:\n+ in := Metrics.ICMP.V4PacketsReceived.ICMPv4PacketStats\n+ out := Metrics.ICMP.V4PacketsSent.ICMPv4PacketStats\n+ *stats = inet.StatSNMPICMP{\n+ 0, // TODO(gvisor.dev/issue/969): Support Icmp/InMsgs.\n+ Metrics.ICMP.V4PacketsSent.Dropped.Value(), // InErrors.\n+ 0, // TODO(gvisor.dev/issue/969): Support Icmp/InCsumErrors.\n+ in.DstUnreachable.Value(), // InDestUnreachs.\n+ in.TimeExceeded.Value(), // InTimeExcds.\n+ in.ParamProblem.Value(), // InParmProbs.\n+ in.SrcQuench.Value(), // InSrcQuenchs.\n+ in.Redirect.Value(), // InRedirects.\n+ in.Echo.Value(), // InEchos.\n+ in.EchoReply.Value(), // InEchoReps.\n+ in.Timestamp.Value(), // InTimestamps.\n+ in.TimestampReply.Value(), // InTimestampReps.\n+ in.InfoRequest.Value(), // InAddrMasks.\n+ in.InfoReply.Value(), // InAddrMaskReps.\n+ 0, // TODO(gvisor.dev/issue/969): Support Icmp/OutMsgs.\n+ Metrics.ICMP.V4PacketsReceived.Invalid.Value(), // OutErrors.\n+ out.DstUnreachable.Value(), // OutDestUnreachs.\n+ out.TimeExceeded.Value(), // OutTimeExcds.\n+ out.ParamProblem.Value(), // OutParmProbs.\n+ out.SrcQuench.Value(), // OutSrcQuenchs.\n+ out.Redirect.Value(), // OutRedirects.\n+ out.Echo.Value(), // OutEchos.\n+ out.EchoReply.Value(), // OutEchoReps.\n+ out.Timestamp.Value(), // OutTimestamps.\n+ out.TimestampReply.Value(), // OutTimestampReps.\n+ out.InfoRequest.Value(), // OutAddrMasks.\n+ out.InfoReply.Value(), // OutAddrMaskReps.\n+ }\n+ case *inet.StatSNMPTCP:\n+ tcp := Metrics.TCP\n+ // RFC 2012 (updates 1213): SNMPv2-MIB-TCP.\n+ *stats = inet.StatSNMPTCP{\n+ 1, // RtoAlgorithm.\n+ 200, // RtoMin.\n+ 120000, // RtoMax.\n+ (1<<64 - 1), // MaxConn.\n+ tcp.ActiveConnectionOpenings.Value(), // ActiveOpens.\n+ tcp.PassiveConnectionOpenings.Value(), // PassiveOpens.\n+ tcp.FailedConnectionAttempts.Value(), // AttemptFails.\n+ tcp.EstablishedResets.Value(), // EstabResets.\n+ tcp.CurrentEstablished.Value(), // CurrEstab.\n+ tcp.ValidSegmentsReceived.Value(), // InSegs.\n+ tcp.SegmentsSent.Value(), // OutSegs.\n+ tcp.Retransmits.Value(), // RetransSegs.\n+ tcp.InvalidSegmentsReceived.Value(), // InErrs.\n+ tcp.ResetsSent.Value(), // OutRsts.\n+ tcp.ChecksumErrors.Value(), // InCsumErrors.\n+ }\n+ case *inet.StatSNMPUDP:\n+ udp := Metrics.UDP\n+ *stats = inet.StatSNMPUDP{\n+ udp.PacketsReceived.Value(), // InDatagrams.\n+ udp.UnknownPortErrors.Value(), // NoPorts.\n+ 0, // TODO(gvisor.dev/issue/969): Support Udp/InErrors.\n+ udp.PacketsSent.Value(), // OutDatagrams.\n+ udp.ReceiveBufferErrors.Value(), // RcvbufErrors.\n+ 0, // TODO(gvisor.dev/issue/969): Support Udp/SndbufErrors.\n+ 0, // TODO(gvisor.dev/issue/969): Support Udp/InCsumErrors.\n+ 0, // TODO(gvisor.dev/issue/969): Support Udp/IgnoredMulti.\n+ }\n+ default:\nreturn syserr.ErrEndpointOperation.ToError()\n}\n+ return nil\n+}\n// RouteTable implements inet.Stack.RouteTable.\nfunc (s *Stack) RouteTable() []inet.Route {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/accept.go",
"new_path": "pkg/tcpip/transport/tcp/accept.go",
"diff": "@@ -297,10 +297,8 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head\nreturn nil, err\n}\nep.mu.Lock()\n- if ep.state != StateEstablished {\nep.stack.Stats().TCP.CurrentEstablished.Increment()\nep.state = StateEstablished\n- }\nep.mu.Unlock()\n// Update the receive window scaling. We can't do it before the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -928,10 +928,8 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\ne.lastErrorMu.Unlock()\ne.mu.Lock()\n- if e.state == StateEstablished || e.state == StateCloseWait {\ne.stack.Stats().TCP.EstablishedResets.Increment()\ne.stack.Stats().TCP.CurrentEstablished.Decrement()\n- }\ne.state = StateError\ne.HardError = err\n@@ -1126,10 +1124,8 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\n// Mark endpoint as closed.\ne.mu.Lock()\nif e.state != StateError {\n- if e.state == StateEstablished || e.state == StateCloseWait {\ne.stack.Stats().TCP.EstablishedResets.Increment()\ne.stack.Stats().TCP.CurrentEstablished.Decrement()\n- }\ne.state = StateClose\n}\n// Lock released below.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc_net.cc",
"new_path": "test/syscalls/linux/proc_net.cc",
"diff": "#include <arpa/inet.h>\n#include <errno.h>\n#include <netinet/in.h>\n+#include <poll.h>\n#include <sys/types.h>\n#include <sys/socket.h>\n#include <sys/syscall.h>\n#include \"absl/strings/str_split.h\"\n+#include \"absl/time/clock.h\"\n+#include \"absl/time/time.h\"\n#include \"gtest/gtest.h\"\n#include \"test/util/capability_util.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n@@ -184,11 +187,31 @@ TEST(ProcNetSnmp, TcpEstab) {\nEXPECT_EQ(oldPassiveOpens, newPassiveOpens - 1);\nEXPECT_EQ(oldCurrEstab, newCurrEstab - 2);\n+ // Send 1 byte from client to server.\nASSERT_THAT(send(s_connect.get(), \"a\", 1, 0), SyscallSucceedsWithValue(1));\n+ constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.\n+\n+ // Wait until server-side fd sees the data on its side but don't read it.\n+ struct pollfd poll_fd = {s_accept.get(), POLLIN, 0};\n+ ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),\n+ SyscallSucceedsWithValue(1));\n+\n+ // Now close server-side fd without reading the data which leads to a RST\n+ // packet sent to client side.\ns_accept.reset(-1);\n+\n+ // Wait until client-side fd sees RST packet.\n+ struct pollfd poll_fd1 = {s_connect.get(), POLLIN, 0};\n+ ASSERT_THAT(RetryEINTR(poll)(&poll_fd1, 1, kPollTimeoutMs),\n+ SyscallSucceedsWithValue(1));\n+\n+ // Now close client-side fd.\ns_connect.reset(-1);\n+ // Wait until the process of the netstack.\n+ absl::SleepFor(absl::Seconds(1.0));\n+\nsnmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/snmp\"));\nnewCurrEstab = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"CurrEstab\"));\nnewEstabResets = ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, \"Tcp\", \"EstabResets\"));\n"
}
] | Go | Apache License 2.0 | google/gvisor | epsocket: support /proc/net/snmp
Netstack has its own stats, we use this to fill /proc/net/snmp.
Note that some metrics are not recorded in Netstack, which will be shown
as 0 in the proc file.
Signed-off-by: Jianfeng Tan <[email protected]>
Change-Id: Ie0089184507d16f49bc0057b4b0482094417ebe1 |
259,885 | 15.10.2019 18:39:16 | 25,200 | 0457a4c4cb67537b09f47e58764f22434dbeff02 | Minor vfs.FileDescriptionImpl fixes.
Pass context.Context to OnClose().
Pass memmap.MMapOpts to ConfigureMMap() by pointer so that implementations
can actually mutate it as required. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/ext/directory.go",
"new_path": "pkg/sentry/fsimpl/ext/directory.go",
"diff": "@@ -301,8 +301,8 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in\nreturn offset, nil\n}\n-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.\n-func (fd *directoryFD) ConfigureMMap(ctx context.Context, opts memmap.MMapOpts) error {\n+// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\n+func (fd *directoryFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\n// mmap(2) specifies that EACCESS should be returned for non-regular file fds.\nreturn syserror.EACCES\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/ext/file_description.go",
"new_path": "pkg/sentry/fsimpl/ext/file_description.go",
"diff": "@@ -43,9 +43,6 @@ func (fd *fileDescription) inode() *inode {\nreturn fd.vfsfd.VirtualDentry().Dentry().Impl().(*dentry).inode\n}\n-// OnClose implements vfs.FileDescriptionImpl.OnClose.\n-func (fd *fileDescription) OnClose() error { return nil }\n-\n// StatusFlags implements vfs.FileDescriptionImpl.StatusFlags.\nfunc (fd *fileDescription) StatusFlags(ctx context.Context) (uint32, error) {\nreturn fd.flags, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/ext/regular_file.go",
"new_path": "pkg/sentry/fsimpl/ext/regular_file.go",
"diff": "@@ -152,8 +152,8 @@ func (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (\nreturn offset, nil\n}\n-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.\n-func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts memmap.MMapOpts) error {\n+// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\n+func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\n// TODO(b/134676337): Implement mmap(2).\nreturn syserror.ENODEV\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/ext/symlink.go",
"new_path": "pkg/sentry/fsimpl/ext/symlink.go",
"diff": "@@ -105,7 +105,7 @@ func (fd *symlinkFD) Seek(ctx context.Context, offset int64, whence int32) (int6\nreturn 0, syserror.EBADF\n}\n-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.\n-func (fd *symlinkFD) ConfigureMMap(ctx context.Context, opts memmap.MMapOpts) error {\n+// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\n+func (fd *symlinkFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\nreturn syserror.EBADF\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description.go",
"new_path": "pkg/sentry/vfs/file_description.go",
"diff": "@@ -102,7 +102,7 @@ type FileDescriptionImpl interface {\n// OnClose is called when a file descriptor representing the\n// FileDescription is closed. Note that returning a non-nil error does not\n// prevent the file descriptor from being closed.\n- OnClose() error\n+ OnClose(ctx context.Context) error\n// StatusFlags returns file description status flags, as for\n// fcntl(F_GETFL).\n@@ -180,7 +180,7 @@ type FileDescriptionImpl interface {\n// ConfigureMMap mutates opts to implement mmap(2) for the file. Most\n// implementations that support memory mapping can call\n// GenericConfigureMMap with the appropriate memmap.Mappable.\n- ConfigureMMap(ctx context.Context, opts memmap.MMapOpts) error\n+ ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error\n// Ioctl implements the ioctl(2) syscall.\nIoctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description_impl_util.go",
"new_path": "pkg/sentry/vfs/file_description_impl_util.go",
"diff": "@@ -45,7 +45,7 @@ type FileDescriptionDefaultImpl struct{}\n// OnClose implements FileDescriptionImpl.OnClose analogously to\n// file_operations::flush == NULL in Linux.\n-func (FileDescriptionDefaultImpl) OnClose() error {\n+func (FileDescriptionDefaultImpl) OnClose(ctx context.Context) error {\nreturn nil\n}\n@@ -117,7 +117,7 @@ func (FileDescriptionDefaultImpl) Sync(ctx context.Context) error {\n// ConfigureMMap implements FileDescriptionImpl.ConfigureMMap analogously to\n// file_operations::mmap == NULL in Linux.\n-func (FileDescriptionDefaultImpl) ConfigureMMap(ctx context.Context, opts memmap.MMapOpts) error {\n+func (FileDescriptionDefaultImpl) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\nreturn syserror.ENODEV\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Minor vfs.FileDescriptionImpl fixes.
- Pass context.Context to OnClose().
- Pass memmap.MMapOpts to ConfigureMMap() by pointer so that implementations
can actually mutate it as required.
PiperOrigin-RevId: 274934967 |
259,881 | 16.10.2019 13:23:44 | 25,200 | de9a8e0eb7d66e75c4367be3437c4eb36a080f67 | Remove death from exec test names
These aren't actually death tests in the GUnit sense. i.e., they don't call
EXPECT_EXIT or EXPECT_DEATH. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/exec.cc",
"new_path": "test/syscalls/linux/exec.cc",
"diff": "@@ -140,57 +140,57 @@ void CheckOutput(const std::string& filename, const ExecveArray& argv,\nEXPECT_TRUE(absl::StrContains(output, expect_stderr)) << output;\n}\n-TEST(ExecDeathTest, EmptyPath) {\n+TEST(ExecTest, EmptyPath) {\nint execve_errno;\nASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(\"\", {}, {}, nullptr, &execve_errno));\nEXPECT_EQ(execve_errno, ENOENT);\n}\n-TEST(ExecDeathTest, Basic) {\n+TEST(ExecTest, Basic) {\nCheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)}, {},\nArgEnvExitStatus(0, 0),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n\"));\n}\n-TEST(ExecDeathTest, OneArg) {\n+TEST(ExecTest, OneArg) {\nCheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload), \"1\"},\n{}, ArgEnvExitStatus(1, 0),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n\"));\n}\n-TEST(ExecDeathTest, FiveArg) {\n+TEST(ExecTest, FiveArg) {\nCheckOutput(WorkloadPath(kBasicWorkload),\n{WorkloadPath(kBasicWorkload), \"1\", \"2\", \"3\", \"4\", \"5\"}, {},\nArgEnvExitStatus(5, 0),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n2\\n3\\n4\\n5\\n\"));\n}\n-TEST(ExecDeathTest, OneEnv) {\n+TEST(ExecTest, OneEnv) {\nCheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)},\n{\"1\"}, ArgEnvExitStatus(0, 1),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n\"));\n}\n-TEST(ExecDeathTest, FiveEnv) {\n+TEST(ExecTest, FiveEnv) {\nCheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)},\n{\"1\", \"2\", \"3\", \"4\", \"5\"}, ArgEnvExitStatus(0, 5),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n2\\n3\\n4\\n5\\n\"));\n}\n-TEST(ExecDeathTest, OneArgOneEnv) {\n+TEST(ExecTest, OneArgOneEnv) {\nCheckOutput(WorkloadPath(kBasicWorkload),\n{WorkloadPath(kBasicWorkload), \"arg\"}, {\"env\"},\nArgEnvExitStatus(1, 1),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\narg\\nenv\\n\"));\n}\n-TEST(ExecDeathTest, InterpreterScript) {\n+TEST(ExecTest, InterpreterScript) {\nCheckOutput(WorkloadPath(kExitScript), {WorkloadPath(kExitScript), \"25\"}, {},\nArgEnvExitStatus(25, 0), \"\");\n}\n// Everything after the path in the interpreter script is a single argument.\n-TEST(ExecDeathTest, InterpreterScriptArgSplit) {\n+TEST(ExecTest, InterpreterScriptArgSplit) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n@@ -204,7 +204,7 @@ TEST(ExecDeathTest, InterpreterScriptArgSplit) {\n}\n// Original argv[0] is replaced with the script path.\n-TEST(ExecDeathTest, InterpreterScriptArgvZero) {\n+TEST(ExecTest, InterpreterScriptArgvZero) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n@@ -218,7 +218,7 @@ TEST(ExecDeathTest, InterpreterScriptArgvZero) {\n// Original argv[0] is replaced with the script path, exactly as passed to\n// execve.\n-TEST(ExecDeathTest, InterpreterScriptArgvZeroRelative) {\n+TEST(ExecTest, InterpreterScriptArgvZeroRelative) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n@@ -235,7 +235,7 @@ TEST(ExecDeathTest, InterpreterScriptArgvZeroRelative) {\n}\n// argv[0] is added as the script path, even if there was none.\n-TEST(ExecDeathTest, InterpreterScriptArgvZeroAdded) {\n+TEST(ExecTest, InterpreterScriptArgvZeroAdded) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n@@ -248,7 +248,7 @@ TEST(ExecDeathTest, InterpreterScriptArgvZeroAdded) {\n}\n// A NUL byte in the script line ends parsing.\n-TEST(ExecDeathTest, InterpreterScriptArgNUL) {\n+TEST(ExecTest, InterpreterScriptArgNUL) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n@@ -263,7 +263,7 @@ TEST(ExecDeathTest, InterpreterScriptArgNUL) {\n}\n// Trailing whitespace following interpreter path is ignored.\n-TEST(ExecDeathTest, InterpreterScriptTrailingWhitespace) {\n+TEST(ExecTest, InterpreterScriptTrailingWhitespace) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n@@ -276,7 +276,7 @@ TEST(ExecDeathTest, InterpreterScriptTrailingWhitespace) {\n}\n// Multiple whitespace characters between interpreter and arg allowed.\n-TEST(ExecDeathTest, InterpreterScriptArgWhitespace) {\n+TEST(ExecTest, InterpreterScriptArgWhitespace) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n@@ -288,7 +288,7 @@ TEST(ExecDeathTest, InterpreterScriptArgWhitespace) {\nabsl::StrCat(link.path(), \"\\nfoo\\n\", script.path(), \"\\n\"));\n}\n-TEST(ExecDeathTest, InterpreterScriptNoPath) {\n+TEST(ExecTest, InterpreterScriptNoPath) {\nTempPath script = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateFileWith(GetAbsoluteTestTmpdir(), \"#!\", 0755));\n@@ -299,7 +299,7 @@ TEST(ExecDeathTest, InterpreterScriptNoPath) {\n}\n// AT_EXECFN is the path passed to execve.\n-TEST(ExecDeathTest, ExecFn) {\n+TEST(ExecTest, ExecFn) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kStateWorkload)));\n@@ -318,14 +318,14 @@ TEST(ExecDeathTest, ExecFn) {\nabsl::StrCat(script_relative, \"\\n\"));\n}\n-TEST(ExecDeathTest, ExecName) {\n+TEST(ExecTest, ExecName) {\nstd::string path = WorkloadPath(kStateWorkload);\nCheckOutput(path, {path, \"PrintExecName\"}, {}, ArgEnvExitStatus(0, 0),\nabsl::StrCat(Basename(path).substr(0, 15), \"\\n\"));\n}\n-TEST(ExecDeathTest, ExecNameScript) {\n+TEST(ExecTest, ExecNameScript) {\n// Symlink through /tmp to ensure the path is short enough.\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kStateWorkload)));\n@@ -341,14 +341,14 @@ TEST(ExecDeathTest, ExecNameScript) {\n}\n// execve may be called by a multithreaded process.\n-TEST(ExecDeathTest, WithSiblingThread) {\n+TEST(ExecTest, WithSiblingThread) {\nCheckOutput(\"/proc/self/exe\", {\"/proc/self/exe\", kExecWithThread}, {},\nW_EXITCODE(42, 0), \"\");\n}\n// execve may be called from a thread other than the leader of a multithreaded\n// process.\n-TEST(ExecDeathTest, FromSiblingThread) {\n+TEST(ExecTest, FromSiblingThread) {\nCheckOutput(\"/proc/self/exe\", {\"/proc/self/exe\", kExecFromThread}, {},\nW_EXITCODE(42, 0), \"\");\n}\n@@ -376,7 +376,7 @@ void SignalHandler(int signo) {\n// Signal handlers are reset on execve(2), unless they have default or ignored\n// disposition.\n-TEST(ExecStateDeathTest, HandlerReset) {\n+TEST(ExecStateTest, HandlerReset) {\nstruct sigaction sa;\nsa.sa_handler = SignalHandler;\nASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());\n@@ -392,7 +392,7 @@ TEST(ExecStateDeathTest, HandlerReset) {\n}\n// Ignored signal dispositions are not reset.\n-TEST(ExecStateDeathTest, IgnorePreserved) {\n+TEST(ExecStateTest, IgnorePreserved) {\nstruct sigaction sa;\nsa.sa_handler = SIG_IGN;\nASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());\n@@ -408,7 +408,7 @@ TEST(ExecStateDeathTest, IgnorePreserved) {\n}\n// Signal masks are not reset on exec\n-TEST(ExecStateDeathTest, SignalMask) {\n+TEST(ExecStateTest, SignalMask) {\nsigset_t s;\nsigemptyset(&s);\nsigaddset(&s, SIGUSR1);\n@@ -425,7 +425,7 @@ TEST(ExecStateDeathTest, SignalMask) {\n// itimers persist across execve.\n// N.B. Timers created with timer_create(2) should not be preserved!\n-TEST(ExecStateDeathTest, ItimerPreserved) {\n+TEST(ExecStateTest, ItimerPreserved) {\n// The fork in ForkAndExec clears itimers, so only set them up after fork.\nauto setup_itimer = [] {\n// Ignore SIGALRM, as we don't actually care about timer\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove death from exec test names
These aren't actually death tests in the GUnit sense. i.e., they don't call
EXPECT_EXIT or EXPECT_DEATH.
PiperOrigin-RevId: 275099957 |
259,881 | 16.10.2019 14:28:21 | 25,200 | bbdcf44ebbf1e001a134c9412719891f150befd6 | Fix syscall changes lost in rebase
These syscalls were changed in the amd64 file around the time the arm64 PR was
sent out, so their changes got lost.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_arm64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_arm64.go",
"diff": "@@ -107,10 +107,10 @@ var ARM64 = &kernel.SyscallTable{\n71: syscalls.Supported(\"sendfile\", Sendfile),\n72: syscalls.Supported(\"pselect\", Pselect),\n73: syscalls.Supported(\"ppoll\", Ppoll),\n- 74: syscalls.ErrorWithEvent(\"signalfd4\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/139\"}), // TODO(b/19846426)\n+ 74: syscalls.PartiallySupported(\"signalfd4\", Signalfd4, \"Semantics are slightly different.\", []string{\"gvisor.dev/issue/139\"}),\n75: syscalls.ErrorWithEvent(\"vmsplice\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/138\"}), // TODO(b/29354098)\n76: syscalls.PartiallySupported(\"splice\", Splice, \"Stub implementation.\", []string{\"gvisor.dev/issue/138\"}), // TODO(b/29354098)\n- 77: syscalls.ErrorWithEvent(\"tee\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/138\"}), // TODO(b/29354098)\n+ 77: syscalls.Supported(\"tee\", Tee),\n78: syscalls.Supported(\"readlinkat\", Readlinkat),\n80: syscalls.Supported(\"fstat\", Fstat),\n81: syscalls.PartiallySupported(\"sync\", Sync, \"Full data flush is not guaranteed at this time.\", nil),\n@@ -245,7 +245,7 @@ var ARM64 = &kernel.SyscallTable{\n210: syscalls.PartiallySupported(\"shutdown\", Shutdown, \"Not all flags and control messages are supported.\", nil),\n211: syscalls.Supported(\"sendmsg\", SendMsg),\n212: syscalls.PartiallySupported(\"recvmsg\", RecvMsg, \"Not all flags and control messages are supported.\", nil),\n- 213: syscalls.ErrorWithEvent(\"readahead\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/261\"}), // TODO(b/29351341)\n+ 213: syscalls.Supported(\"readahead\", Readahead),\n214: syscalls.Supported(\"brk\", Brk),\n215: syscalls.Supported(\"munmap\", Munmap),\n216: syscalls.Supported(\"mremap\", Mremap),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix syscall changes lost in rebase
These syscalls were changed in the amd64 file around the time the arm64 PR was
sent out, so their changes got lost.
Updates #63
PiperOrigin-RevId: 275114194 |
260,004 | 17.10.2019 13:05:03 | 25,200 | 962aa235de4e614147dd00b55967614e93ba2660 | NDP Neighbor Solicitations sent during DAD must have an IP hop limit of 255
NDP Neighbor Solicitations sent during Duplicate Address Detection must have an
IP hop limit of 255, as all NDP Neighbor Solicitations should have.
Test: Test that DAD messages have the IPv6 hop limit field set to 255. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv6.go",
"new_path": "pkg/tcpip/header/icmpv6.go",
"diff": "@@ -80,6 +80,13 @@ const (\n// icmpv6SequenceOffset is the offset of the sequence field\n// in a ICMPv6 Echo Request/Reply message.\nicmpv6SequenceOffset = 6\n+\n+ // NDPHopLimit is the expected IP hop limit value of 255 for received\n+ // NDP packets, as per RFC 4861 sections 4.1 - 4.5, 6.1.1, 6.1.2, 7.1.1,\n+ // 7.1.2 and 8.1. If the hop limit value is not 255, nodes MUST silently\n+ // drop the NDP packet. All outgoing NDP packets must use this value for\n+ // its IP hop limit field.\n+ NDPHopLimit = 255\n)\n// ICMPv6Type is the ICMP type field described in RFC 4443 and friends.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -21,15 +21,6 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\n-const (\n- // ndpHopLimit is the expected IP hop limit value of 255 for received\n- // NDP packets, as per RFC 4861 sections 4.1 - 4.5, 6.1.1, 6.1.2, 7.1.1,\n- // 7.1.2 and 8.1. If the hop limit value is not 255, nodes MUST silently\n- // drop the NDP packet. All outgoing NDP packets must use this value for\n- // its IP hop limit field.\n- ndpHopLimit = 255\n-)\n-\n// handleControl handles the case when an ICMP packet contains the headers of\n// the original packet that caused the ICMP one to be sent. This information is\n// used to find out which transport endpoint must be notified about the ICMP\n@@ -90,7 +81,7 @@ func (e *endpoint) handleICMP(r *stack.Route, netHeader buffer.View, vv buffer.V\nheader.ICMPv6RouterSolicit,\nheader.ICMPv6RouterAdvert,\nheader.ICMPv6RedirectMsg:\n- if iph.HopLimit() != ndpHopLimit {\n+ if iph.HopLimit() != header.NDPHopLimit {\nreceived.Invalid.Increment()\nreturn\n}\n@@ -217,7 +208,7 @@ func (e *endpoint) handleICMP(r *stack.Route, netHeader buffer.View, vv buffer.V\n//\n// The IP Hop Limit field has a value of 255, i.e., the packet\n// could not possibly have been forwarded by a router.\n- if err := r.WritePacket(nil /* gso */, hdr, buffer.VectorisedView{}, stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: ndpHopLimit, TOS: stack.DefaultTOS}); err != nil {\n+ if err := r.WritePacket(nil /* gso */, hdr, buffer.VectorisedView{}, stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: header.NDPHopLimit, TOS: stack.DefaultTOS}); err != nil {\nsent.Dropped.Increment()\nreturn\n}\n@@ -359,7 +350,7 @@ func (*protocol) LinkAddressRequest(addr, localAddr tcpip.Address, linkEP stack.\nip.Encode(&header.IPv6Fields{\nPayloadLength: length,\nNextHeader: uint8(header.ICMPv6ProtocolNumber),\n- HopLimit: ndpHopLimit,\n+ HopLimit: header.NDPHopLimit,\nSrcAddr: r.LocalAddress,\nDstAddr: r.RemoteAddress,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"new_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"diff": "@@ -143,7 +143,7 @@ func TestICMPCounts(t *testing.T) {\nip.Encode(&header.IPv6Fields{\nPayloadLength: uint16(payloadLength),\nNextHeader: uint8(header.ICMPv6ProtocolNumber),\n- HopLimit: ndpHopLimit,\n+ HopLimit: header.NDPHopLimit,\nSrcAddr: r.LocalAddress,\nDstAddr: r.RemoteAddress,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"new_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"diff": "@@ -150,7 +150,7 @@ func TestHopLimitValidation(t *testing.T) {\n// Receive the NDP packet with an invalid hop limit\n// value.\n- handleIPv6Payload(hdr, ndpHopLimit-1, ep, &r)\n+ handleIPv6Payload(hdr, header.NDPHopLimit-1, ep, &r)\n// Invalid count should have increased.\nif got := invalid.Value(); got != 1 {\n@@ -164,7 +164,7 @@ func TestHopLimitValidation(t *testing.T) {\n}\n// Receive the NDP packet with a valid hop limit value.\n- handleIPv6Payload(hdr, ndpHopLimit, ep, &r)\n+ handleIPv6Payload(hdr, header.NDPHopLimit, ep, &r)\n// Rx count of NDP packet of type typ.typ should have\n// increased.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/ndp.go",
"new_path": "pkg/tcpip/stack/ndp.go",
"diff": "@@ -239,7 +239,7 @@ func (ndp *ndpState) doDuplicateAddressDetection(n *NIC, addr tcpip.Address, rem\npkt.SetChecksum(header.ICMPv6Checksum(pkt, r.LocalAddress, r.RemoteAddress, buffer.VectorisedView{}))\nsent := r.Stats().ICMP.V6PacketsSent\n- if err := r.WritePacket(nil, hdr, buffer.VectorisedView{}, NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: r.DefaultTTL(), TOS: DefaultTOS}); err != nil {\n+ if err := r.WritePacket(nil, hdr, buffer.VectorisedView{}, NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: header.NDPHopLimit, TOS: DefaultTOS}); err != nil {\nsent.Dropped.Increment()\nreturn false, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/ndp_test.go",
"new_path": "pkg/tcpip/stack/ndp_test.go",
"diff": "@@ -173,6 +173,7 @@ func TestDADResolve(t *testing.T) {\n// Check NDP packet.\nchecker.IPv6(t, p.Header.ToVectorisedView().First(),\n+ checker.TTL(header.NDPHopLimit),\nchecker.NDPNS(\nchecker.NDPNSTargetAddress(addr1)))\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | NDP Neighbor Solicitations sent during DAD must have an IP hop limit of 255
NDP Neighbor Solicitations sent during Duplicate Address Detection must have an
IP hop limit of 255, as all NDP Neighbor Solicitations should have.
Test: Test that DAD messages have the IPv6 hop limit field set to 255.
PiperOrigin-RevId: 275321680 |
259,891 | 17.10.2019 13:08:27 | 25,200 | dfdbdf14fa101e850bb3361f91da6362b98d11d0 | Refactor pipe to support VFS2.
* Pulls common functionality (IO and locking on open) into pipe_util.go.
* Adds pipe/vfs.go, which implements a subset of vfs.FileDescriptionImpl.
A subsequent change will add support for pipes in memfs. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/BUILD",
"new_path": "pkg/sentry/kernel/pipe/BUILD",
"diff": "@@ -24,8 +24,10 @@ go_library(\n\"device.go\",\n\"node.go\",\n\"pipe.go\",\n+ \"pipe_util.go\",\n\"reader.go\",\n\"reader_writer.go\",\n+ \"vfs.go\",\n\"writer.go\",\n],\nimportpath = \"gvisor.dev/gvisor/pkg/sentry/kernel/pipe\",\n@@ -40,6 +42,7 @@ go_library(\n\"//pkg/sentry/fs/fsutil\",\n\"//pkg/sentry/safemem\",\n\"//pkg/sentry/usermem\",\n+ \"//pkg/sentry/vfs\",\n\"//pkg/syserror\",\n\"//pkg/waiter\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/node.go",
"new_path": "pkg/sentry/kernel/pipe/node.go",
"diff": "@@ -18,7 +18,6 @@ import (\n\"sync\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/amutex\"\n\"gvisor.dev/gvisor/pkg/sentry/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n@@ -91,10 +90,10 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\nswitch {\ncase flags.Read && !flags.Write: // O_RDONLY.\nr := i.p.Open(ctx, d, flags)\n- i.newHandleLocked(&i.rWakeup)\n+ newHandleLocked(&i.rWakeup)\nif i.p.isNamed && !flags.NonBlocking && !i.p.HasWriters() {\n- if !i.waitFor(&i.wWakeup, ctx) {\n+ if !waitFor(&i.mu, &i.wWakeup, ctx) {\nr.DecRef()\nreturn nil, syserror.ErrInterrupted\n}\n@@ -107,7 +106,7 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\ncase flags.Write && !flags.Read: // O_WRONLY.\nw := i.p.Open(ctx, d, flags)\n- i.newHandleLocked(&i.wWakeup)\n+ newHandleLocked(&i.wWakeup)\nif i.p.isNamed && !i.p.HasReaders() {\n// On a nonblocking, write-only open, the open fails with ENXIO if the\n@@ -117,7 +116,7 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\nreturn nil, syserror.ENXIO\n}\n- if !i.waitFor(&i.rWakeup, ctx) {\n+ if !waitFor(&i.mu, &i.rWakeup, ctx) {\nw.DecRef()\nreturn nil, syserror.ErrInterrupted\n}\n@@ -127,8 +126,8 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\ncase flags.Read && flags.Write: // O_RDWR.\n// Pipes opened for read-write always succeeds without blocking.\nrw := i.p.Open(ctx, d, flags)\n- i.newHandleLocked(&i.rWakeup)\n- i.newHandleLocked(&i.wWakeup)\n+ newHandleLocked(&i.rWakeup)\n+ newHandleLocked(&i.wWakeup)\nreturn rw, nil\ndefault:\n@@ -136,65 +135,6 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\n}\n}\n-// waitFor blocks until the underlying pipe has at least one reader/writer is\n-// announced via 'wakeupChan', or until 'sleeper' is cancelled. Any call to this\n-// function will block for either readers or writers, depending on where\n-// 'wakeupChan' points.\n-//\n-// f.mu must be held by the caller. waitFor returns with f.mu held, but it will\n-// drop f.mu before blocking for any reader/writers.\n-func (i *inodeOperations) waitFor(wakeupChan *chan struct{}, sleeper amutex.Sleeper) bool {\n- // Ideally this function would simply use a condition variable. However, the\n- // wait needs to be interruptible via 'sleeper', so we must sychronize via a\n- // channel. The synchronization below relies on the fact that closing a\n- // channel unblocks all receives on the channel.\n-\n- // Does an appropriate wakeup channel already exist? If not, create a new\n- // one. This is all done under f.mu to avoid races.\n- if *wakeupChan == nil {\n- *wakeupChan = make(chan struct{})\n- }\n-\n- // Grab a local reference to the wakeup channel since it may disappear as\n- // soon as we drop f.mu.\n- wakeup := *wakeupChan\n-\n- // Drop the lock and prepare to sleep.\n- i.mu.Unlock()\n- cancel := sleeper.SleepStart()\n-\n- // Wait for either a new reader/write to be signalled via 'wakeup', or\n- // for the sleep to be cancelled.\n- select {\n- case <-wakeup:\n- sleeper.SleepFinish(true)\n- case <-cancel:\n- sleeper.SleepFinish(false)\n- }\n-\n- // Take the lock and check if we were woken. If we were woken and\n- // interrupted, the former takes priority.\n- i.mu.Lock()\n- select {\n- case <-wakeup:\n- return true\n- default:\n- return false\n- }\n-}\n-\n-// newHandleLocked signals a new pipe reader or writer depending on where\n-// 'wakeupChan' points. This unblocks any corresponding reader or writer\n-// waiting for the other end of the channel to be opened, see Fifo.waitFor.\n-//\n-// i.mu must be held.\n-func (*inodeOperations) newHandleLocked(wakeupChan *chan struct{}) {\n- if *wakeupChan != nil {\n- close(*wakeupChan)\n- *wakeupChan = nil\n- }\n-}\n-\nfunc (*inodeOperations) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error {\nreturn syserror.EPIPE\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/pipe.go",
"new_path": "pkg/sentry/kernel/pipe/pipe.go",
"diff": "@@ -111,11 +111,27 @@ func NewPipe(isNamed bool, sizeBytes, atomicIOBytes int64) *Pipe {\nif atomicIOBytes > sizeBytes {\natomicIOBytes = sizeBytes\n}\n- return &Pipe{\n- isNamed: isNamed,\n- max: sizeBytes,\n- atomicIOBytes: atomicIOBytes,\n+ var p Pipe\n+ initPipe(&p, isNamed, sizeBytes, atomicIOBytes)\n+ return &p\n}\n+\n+func initPipe(pipe *Pipe, isNamed bool, sizeBytes, atomicIOBytes int64) {\n+ if sizeBytes < MinimumPipeSize {\n+ sizeBytes = MinimumPipeSize\n+ }\n+ if sizeBytes > MaximumPipeSize {\n+ sizeBytes = MaximumPipeSize\n+ }\n+ if atomicIOBytes <= 0 {\n+ atomicIOBytes = 1\n+ }\n+ if atomicIOBytes > sizeBytes {\n+ atomicIOBytes = sizeBytes\n+ }\n+ pipe.isNamed = isNamed\n+ pipe.max = sizeBytes\n+ pipe.atomicIOBytes = atomicIOBytes\n}\n// NewConnectedPipe initializes a pipe and returns a pair of objects\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/kernel/pipe/pipe_util.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package pipe\n+\n+import (\n+ \"io\"\n+ \"math\"\n+ \"sync\"\n+ \"syscall\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/amutex\"\n+ \"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n+)\n+\n+// This file contains Pipe file functionality that is tied to neither VFS nor\n+// the old fs architecture.\n+\n+// Release cleans up the pipe's state.\n+func (p *Pipe) Release() {\n+ p.rClose()\n+ p.wClose()\n+\n+ // Wake up readers and writers.\n+ p.Notify(waiter.EventIn | waiter.EventOut)\n+}\n+\n+// Read reads from the Pipe into dst.\n+func (p *Pipe) Read(ctx context.Context, dst usermem.IOSequence) (int64, error) {\n+ n, err := p.read(ctx, readOps{\n+ left: func() int64 {\n+ return dst.NumBytes()\n+ },\n+ limit: func(l int64) {\n+ dst = dst.TakeFirst64(l)\n+ },\n+ read: func(buf *buffer) (int64, error) {\n+ n, err := dst.CopyOutFrom(ctx, buf)\n+ dst = dst.DropFirst64(n)\n+ return n, err\n+ },\n+ })\n+ if n > 0 {\n+ p.Notify(waiter.EventOut)\n+ }\n+ return n, err\n+}\n+\n+// WriteTo writes to w from the Pipe.\n+func (p *Pipe) WriteTo(ctx context.Context, w io.Writer, count int64, dup bool) (int64, error) {\n+ ops := readOps{\n+ left: func() int64 {\n+ return count\n+ },\n+ limit: func(l int64) {\n+ count = l\n+ },\n+ read: func(buf *buffer) (int64, error) {\n+ n, err := buf.ReadToWriter(w, count, dup)\n+ count -= n\n+ return n, err\n+ },\n+ }\n+ if dup {\n+ // There is no notification for dup operations.\n+ return p.dup(ctx, ops)\n+ }\n+ n, err := p.read(ctx, ops)\n+ if n > 0 {\n+ p.Notify(waiter.EventOut)\n+ }\n+ return n, err\n+}\n+\n+// Write writes to the Pipe from src.\n+func (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error) {\n+ n, err := p.write(ctx, writeOps{\n+ left: func() int64 {\n+ return src.NumBytes()\n+ },\n+ limit: func(l int64) {\n+ src = src.TakeFirst64(l)\n+ },\n+ write: func(buf *buffer) (int64, error) {\n+ n, err := src.CopyInTo(ctx, buf)\n+ src = src.DropFirst64(n)\n+ return n, err\n+ },\n+ })\n+ if n > 0 {\n+ p.Notify(waiter.EventIn)\n+ }\n+ return n, err\n+}\n+\n+// ReadFrom reads from r to the Pipe.\n+func (p *Pipe) ReadFrom(ctx context.Context, r io.Reader, count int64) (int64, error) {\n+ n, err := p.write(ctx, writeOps{\n+ left: func() int64 {\n+ return count\n+ },\n+ limit: func(l int64) {\n+ count = l\n+ },\n+ write: func(buf *buffer) (int64, error) {\n+ n, err := buf.WriteFromReader(r, count)\n+ count -= n\n+ return n, err\n+ },\n+ })\n+ if n > 0 {\n+ p.Notify(waiter.EventIn)\n+ }\n+ return n, err\n+}\n+\n+// Readiness returns the ready events in the underlying pipe.\n+func (p *Pipe) Readiness(mask waiter.EventMask) waiter.EventMask {\n+ return p.rwReadiness() & mask\n+}\n+\n+// Ioctl implements ioctls on the Pipe.\n+func (p *Pipe) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ // Switch on ioctl request.\n+ switch int(args[1].Int()) {\n+ case linux.FIONREAD:\n+ v := p.queued()\n+ if v > math.MaxInt32 {\n+ v = math.MaxInt32 // Silently truncate.\n+ }\n+ // Copy result to user-space.\n+ _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+ return 0, err\n+ default:\n+ return 0, syscall.ENOTTY\n+ }\n+}\n+\n+// waitFor blocks until the underlying pipe has at least one reader/writer is\n+// announced via 'wakeupChan', or until 'sleeper' is cancelled. Any call to this\n+// function will block for either readers or writers, depending on where\n+// 'wakeupChan' points.\n+//\n+// mu must be held by the caller. waitFor returns with mu held, but it will\n+// drop mu before blocking for any reader/writers.\n+func waitFor(mu *sync.Mutex, wakeupChan *chan struct{}, sleeper amutex.Sleeper) bool {\n+ // Ideally this function would simply use a condition variable. However, the\n+ // wait needs to be interruptible via 'sleeper', so we must sychronize via a\n+ // channel. The synchronization below relies on the fact that closing a\n+ // channel unblocks all receives on the channel.\n+\n+ // Does an appropriate wakeup channel already exist? If not, create a new\n+ // one. This is all done under f.mu to avoid races.\n+ if *wakeupChan == nil {\n+ *wakeupChan = make(chan struct{})\n+ }\n+\n+ // Grab a local reference to the wakeup channel since it may disappear as\n+ // soon as we drop f.mu.\n+ wakeup := *wakeupChan\n+\n+ // Drop the lock and prepare to sleep.\n+ mu.Unlock()\n+ cancel := sleeper.SleepStart()\n+\n+ // Wait for either a new reader/write to be signalled via 'wakeup', or\n+ // for the sleep to be cancelled.\n+ select {\n+ case <-wakeup:\n+ sleeper.SleepFinish(true)\n+ case <-cancel:\n+ sleeper.SleepFinish(false)\n+ }\n+\n+ // Take the lock and check if we were woken. If we were woken and\n+ // interrupted, the former takes priority.\n+ mu.Lock()\n+ select {\n+ case <-wakeup:\n+ return true\n+ default:\n+ return false\n+ }\n+}\n+\n+// newHandleLocked signals a new pipe reader or writer depending on where\n+// 'wakeupChan' points. This unblocks any corresponding reader or writer\n+// waiting for the other end of the channel to be opened, see Fifo.waitFor.\n+//\n+// Precondition: the mutex protecting wakeupChan must be held.\n+func newHandleLocked(wakeupChan *chan struct{}) {\n+ if *wakeupChan != nil {\n+ close(*wakeupChan)\n+ *wakeupChan = nil\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/reader_writer.go",
"new_path": "pkg/sentry/kernel/pipe/reader_writer.go",
"diff": "@@ -16,16 +16,12 @@ package pipe\nimport (\n\"io\"\n- \"math\"\n- \"syscall\"\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n- \"gvisor.dev/gvisor/pkg/waiter\"\n)\n// ReaderWriter satisfies the FileOperations interface and services both\n@@ -45,124 +41,27 @@ type ReaderWriter struct {\n*Pipe\n}\n-// Release implements fs.FileOperations.Release.\n-func (rw *ReaderWriter) Release() {\n- rw.Pipe.rClose()\n- rw.Pipe.wClose()\n-\n- // Wake up readers and writers.\n- rw.Pipe.Notify(waiter.EventIn | waiter.EventOut)\n-}\n-\n// Read implements fs.FileOperations.Read.\nfunc (rw *ReaderWriter) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) {\n- n, err := rw.Pipe.read(ctx, readOps{\n- left: func() int64 {\n- return dst.NumBytes()\n- },\n- limit: func(l int64) {\n- dst = dst.TakeFirst64(l)\n- },\n- read: func(buf *buffer) (int64, error) {\n- n, err := dst.CopyOutFrom(ctx, buf)\n- dst = dst.DropFirst64(n)\n- return n, err\n- },\n- })\n- if n > 0 {\n- rw.Pipe.Notify(waiter.EventOut)\n- }\n- return n, err\n+ return rw.Pipe.Read(ctx, dst)\n}\n// WriteTo implements fs.FileOperations.WriteTo.\nfunc (rw *ReaderWriter) WriteTo(ctx context.Context, _ *fs.File, w io.Writer, count int64, dup bool) (int64, error) {\n- ops := readOps{\n- left: func() int64 {\n- return count\n- },\n- limit: func(l int64) {\n- count = l\n- },\n- read: func(buf *buffer) (int64, error) {\n- n, err := buf.ReadToWriter(w, count, dup)\n- count -= n\n- return n, err\n- },\n- }\n- if dup {\n- // There is no notification for dup operations.\n- return rw.Pipe.dup(ctx, ops)\n- }\n- n, err := rw.Pipe.read(ctx, ops)\n- if n > 0 {\n- rw.Pipe.Notify(waiter.EventOut)\n- }\n- return n, err\n+ return rw.Pipe.WriteTo(ctx, w, count, dup)\n}\n// Write implements fs.FileOperations.Write.\nfunc (rw *ReaderWriter) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) {\n- n, err := rw.Pipe.write(ctx, writeOps{\n- left: func() int64 {\n- return src.NumBytes()\n- },\n- limit: func(l int64) {\n- src = src.TakeFirst64(l)\n- },\n- write: func(buf *buffer) (int64, error) {\n- n, err := src.CopyInTo(ctx, buf)\n- src = src.DropFirst64(n)\n- return n, err\n- },\n- })\n- if n > 0 {\n- rw.Pipe.Notify(waiter.EventIn)\n- }\n- return n, err\n+ return rw.Pipe.Write(ctx, src)\n}\n// ReadFrom implements fs.FileOperations.WriteTo.\nfunc (rw *ReaderWriter) ReadFrom(ctx context.Context, _ *fs.File, r io.Reader, count int64) (int64, error) {\n- n, err := rw.Pipe.write(ctx, writeOps{\n- left: func() int64 {\n- return count\n- },\n- limit: func(l int64) {\n- count = l\n- },\n- write: func(buf *buffer) (int64, error) {\n- n, err := buf.WriteFromReader(r, count)\n- count -= n\n- return n, err\n- },\n- })\n- if n > 0 {\n- rw.Pipe.Notify(waiter.EventIn)\n- }\n- return n, err\n-}\n-\n-// Readiness returns the ready events in the underlying pipe.\n-func (rw *ReaderWriter) Readiness(mask waiter.EventMask) waiter.EventMask {\n- return rw.Pipe.rwReadiness() & mask\n+ return rw.Pipe.ReadFrom(ctx, r, count)\n}\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (rw *ReaderWriter) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n- // Switch on ioctl request.\n- switch int(args[1].Int()) {\n- case linux.FIONREAD:\n- v := rw.queued()\n- if v > math.MaxInt32 {\n- v = math.MaxInt32 // Silently truncate.\n- }\n- // Copy result to user-space.\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n- return 0, err\n- default:\n- return 0, syscall.ENOTTY\n- }\n+ return rw.Pipe.Ioctl(ctx, io, args)\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/kernel/pipe/vfs.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package pipe\n+\n+import (\n+ \"sync\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n+)\n+\n+// This file contains types enabling the pipe package to be used with the vfs\n+// package.\n+\n+// VFSPipe represents the actual pipe, analagous to an inode. VFSPipes should\n+// not be copied.\n+type VFSPipe struct {\n+ // mu protects the fields below.\n+ mu sync.Mutex `state:\"nosave\"`\n+\n+ // pipe is the underlying pipe.\n+ pipe Pipe\n+\n+ // Channels for synchronizing the creation of new readers and writers\n+ // of this fifo. See waitFor and newHandleLocked.\n+ //\n+ // These are not saved/restored because all waiters are unblocked on\n+ // save, and either automatically restart (via ERESTARTSYS) or return\n+ // EINTR on resume. On restarts via ERESTARTSYS, the appropriate\n+ // channel will be recreated.\n+ rWakeup chan struct{} `state:\"nosave\"`\n+ wWakeup chan struct{} `state:\"nosave\"`\n+}\n+\n+// NewVFSPipe returns an initialized VFSPipe.\n+func NewVFSPipe(sizeBytes, atomicIOBytes int64) *VFSPipe {\n+ var vp VFSPipe\n+ initPipe(&vp.pipe, true /* isNamed */, sizeBytes, atomicIOBytes)\n+ return &vp\n+}\n+\n+// NewVFSPipeFD opens a named pipe. Named pipes have special blocking semantics\n+// during open:\n+//\n+// \"Normally, opening the FIFO blocks until the other end is opened also. A\n+// process can open a FIFO in nonblocking mode. In this case, opening for\n+// read-only will succeed even if no-one has opened on the write side yet,\n+// opening for write-only will fail with ENXIO (no such device or address)\n+// unless the other end has already been opened. Under Linux, opening a FIFO\n+// for read and write will succeed both in blocking and nonblocking mode. POSIX\n+// leaves this behavior undefined. This can be used to open a FIFO for writing\n+// while there are no readers available.\" - fifo(7)\n+func (vp *VFSPipe) NewVFSPipeFD(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, vfsfd *vfs.FileDescription, flags uint32) (*VFSPipeFD, error) {\n+ vp.mu.Lock()\n+ defer vp.mu.Unlock()\n+\n+ readable := vfs.MayReadFileWithOpenFlags(flags)\n+ writable := vfs.MayWriteFileWithOpenFlags(flags)\n+ if !readable && !writable {\n+ return nil, syserror.EINVAL\n+ }\n+\n+ vfd, err := vp.open(rp, vfsd, vfsfd, flags)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ switch {\n+ case readable && writable:\n+ // Pipes opened for read-write always succeed without blocking.\n+ newHandleLocked(&vp.rWakeup)\n+ newHandleLocked(&vp.wWakeup)\n+\n+ case readable:\n+ newHandleLocked(&vp.rWakeup)\n+ // If this pipe is being opened as nonblocking and there's no\n+ // writer, we have to wait for a writer to open the other end.\n+ if flags&linux.O_NONBLOCK == 0 && !vp.pipe.HasWriters() && !waitFor(&vp.mu, &vp.wWakeup, ctx) {\n+ return nil, syserror.EINTR\n+ }\n+\n+ case writable:\n+ newHandleLocked(&vp.wWakeup)\n+\n+ if !vp.pipe.HasReaders() {\n+ // Nonblocking, write-only opens fail with ENXIO when\n+ // the read side isn't open yet.\n+ if flags&linux.O_NONBLOCK != 0 {\n+ return nil, syserror.ENXIO\n+ }\n+ // Wait for a reader to open the other end.\n+ if !waitFor(&vp.mu, &vp.rWakeup, ctx) {\n+ return nil, syserror.EINTR\n+ }\n+ }\n+\n+ default:\n+ panic(\"invalid pipe flags: must be readable, writable, or both\")\n+ }\n+\n+ return vfd, nil\n+}\n+\n+// Preconditions: vp.mu must be held.\n+func (vp *VFSPipe) open(rp *vfs.ResolvingPath, vfsd *vfs.Dentry, vfsfd *vfs.FileDescription, flags uint32) (*VFSPipeFD, error) {\n+ var fd VFSPipeFD\n+ fd.flags = flags\n+ fd.readable = vfs.MayReadFileWithOpenFlags(flags)\n+ fd.writable = vfs.MayWriteFileWithOpenFlags(flags)\n+ fd.vfsfd = vfsfd\n+ fd.pipe = &vp.pipe\n+ if fd.writable {\n+ // The corresponding Mount.EndWrite() is in VFSPipe.Release().\n+ if err := rp.Mount().CheckBeginWrite(); err != nil {\n+ return nil, err\n+ }\n+ }\n+\n+ switch {\n+ case fd.readable && fd.writable:\n+ vp.pipe.rOpen()\n+ vp.pipe.wOpen()\n+ case fd.readable:\n+ vp.pipe.rOpen()\n+ case fd.writable:\n+ vp.pipe.wOpen()\n+ default:\n+ panic(\"invalid pipe flags: must be readable, writable, or both\")\n+ }\n+\n+ return &fd, nil\n+}\n+\n+// VFSPipeFD implements a subset of vfs.FileDescriptionImpl for pipes. It is\n+// expected that filesystesm will use this in a struct implementing\n+// vfs.FileDescriptionImpl.\n+type VFSPipeFD struct {\n+ pipe *Pipe\n+ flags uint32\n+ readable bool\n+ writable bool\n+ vfsfd *vfs.FileDescription\n+}\n+\n+// Release implements vfs.FileDescriptionImpl.Release.\n+func (fd *VFSPipeFD) Release() {\n+ var event waiter.EventMask\n+ if fd.readable {\n+ fd.pipe.rClose()\n+ event |= waiter.EventIn\n+ }\n+ if fd.writable {\n+ fd.pipe.wClose()\n+ event |= waiter.EventOut\n+ }\n+ if event == 0 {\n+ panic(\"invalid pipe flags: must be readable, writable, or both\")\n+ }\n+\n+ if fd.writable {\n+ fd.vfsfd.VirtualDentry().Mount().EndWrite()\n+ }\n+\n+ fd.pipe.Notify(event)\n+}\n+\n+// OnClose implements vfs.FileDescriptionImpl.OnClose.\n+func (fd *VFSPipeFD) OnClose() error {\n+ return nil\n+}\n+\n+// PRead implements vfs.FileDescriptionImpl.PRead.\n+func (fd *VFSPipeFD) PRead(_ context.Context, _ usermem.IOSequence, _ int64, _ vfs.ReadOptions) (int64, error) {\n+ return 0, syserror.ESPIPE\n+}\n+\n+// Read implements vfs.FileDescriptionImpl.Read.\n+func (fd *VFSPipeFD) Read(ctx context.Context, dst usermem.IOSequence, _ vfs.ReadOptions) (int64, error) {\n+ if !fd.readable {\n+ return 0, syserror.EINVAL\n+ }\n+\n+ return fd.pipe.Read(ctx, dst)\n+}\n+\n+// PWrite implements vfs.FileDescriptionImpl.PWrite.\n+func (fd *VFSPipeFD) PWrite(_ context.Context, _ usermem.IOSequence, _ int64, _ vfs.WriteOptions) (int64, error) {\n+ return 0, syserror.ESPIPE\n+}\n+\n+// Write implements vfs.FileDescriptionImpl.Write.\n+func (fd *VFSPipeFD) Write(ctx context.Context, src usermem.IOSequence, _ vfs.WriteOptions) (int64, error) {\n+ if !fd.writable {\n+ return 0, syserror.EINVAL\n+ }\n+\n+ return fd.pipe.Write(ctx, src)\n+}\n+\n+// Ioctl implements vfs.FileDescriptionImpl.Ioctl.\n+func (fd *VFSPipeFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ return fd.pipe.Ioctl(ctx, uio, args)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor pipe to support VFS2.
* Pulls common functionality (IO and locking on open) into pipe_util.go.
* Adds pipe/vfs.go, which implements a subset of vfs.FileDescriptionImpl.
A subsequent change will add support for pipes in memfs.
PiperOrigin-RevId: 275322385 |
259,858 | 12.09.2019 00:26:09 | 25,200 | 015a1b57d6b701cb8b687fb70aa9a5dbcb1edc25 | Add apt-based instructions. | [
{
"change_type": "DELETE",
"old_path": "content/docs/includes/index.md",
"new_path": null,
"diff": "-+++\n-headless = true\n-+++\n"
},
{
"change_type": "DELETE",
"old_path": "content/docs/includes/install_gvisor.md",
"new_path": null,
"diff": "-The easiest way to get `runsc` is from the [latest nightly\n-build][latest-nightly]. After you download the binary, check it against the\n-SHA512 [checksum file][latest-hash].\n-\n-Older builds can also be found here (note that some days may not have releases\n-due to failing builds):\n-\n- `https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/runsc`\n-\n-With corresponding SHA512 checksums here:\n-\n- `https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/runsc.sha512`\n-\n-**It is important to copy this binary to a location that is accessible to all\n-users, and ensure it is executable by all users**, since `runsc` executes itself\n-as user `nobody` to avoid unnecessary privileges. The `/usr/local/bin` directory is\n-a good place to put the `runsc` binary.\n-\n-```bash\n-(\n- set -e\n- if [ -e runsc ]; then rm runsc; fi\n- wget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n- if [ -e runsc.sha512 ]; then rm runsc.sha512; fi\n- wget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n- sha512sum -c runsc.sha512\n- sudo mv runsc /usr/local/bin\n- sudo chown root:root /usr/local/bin/runsc\n- sudo chmod 0755 /usr/local/bin/runsc\n-)\n-```\n-\n-[latest-nightly]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n-\n-[latest-hash]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n-\n-[oci]: https://www.opencontainers.org\n"
},
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/_index.md",
"new_path": "content/docs/user_guide/_index.md",
"diff": "@@ -3,10 +3,5 @@ title = \"User Guide\"\nweight = 10\n+++\n-gVisor can be used with Docker, Kubernetes, or directly using `runsc` with crafted OCI\n-spec for your container. Use the links below to see detailed instructions for each\n-of them:\n-\n- * [Docker](./docker/): The quickest and easiest way to get started.\n- * [Kubernetes](./kubernetes/): Isolate Pods in your K8s cluster with gVisor.\n- * [OCI Quick Start](./oci/): Expert mode. Customize gVisor for your environment.\n+Get started with either [installation instructions](./install/) or\n+[quick start guides](./quick_start).\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "content/docs/user_guide/install.md",
"diff": "++++\n+title = \"Installation\"\n+weight = 20\n++++\n+\n+> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}\n+> ([older Linux][old-linux]).\n+\n+## Versions\n+\n+The `runsc` binaries and repositories are available in multiple versions and\n+release channels. First, you should pick the version you'd like to install. For\n+experimentation, the nightly release is recommended. For production use, the\n+latest release is recommended.\n+\n+After selecting an appropriate release channel from the options below, proceed\n+to the preferred installation mechanism: manual or from an `apt` repository.\n+\n+### Nightly\n+\n+Nightly releases are built most nights from the master branch, and are available\n+at the following URL:\n+\n+ `https://storage.googleapis.com/gvisor/releases/nightly/latest`\n+\n+Specific nightly releases can be found at:\n+\n+ `https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}`\n+\n+Note that a release may not be available for every day.\n+\n+To use a nightly release, use one of the above URLs for `URL` in the manual\n+instructions below. For `apt`, use `nightly` for `DIST` below.\n+\n+### Latest release\n+\n+The latest official release is available at the following URL:\n+\n+ `https://storage.googleapis.com/gvisor/releases/release/latest`\n+\n+To use the latest release, use the above URL for `URL` in the manual\n+instructions below. For `apt`, use `latest` for `DIST` below.\n+\n+### Specific release\n+\n+A given release release is available at the following URL:\n+\n+ `https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}`\n+\n+See the [releases][releases] page for information about specific releases.\n+\n+This will include point updates for the release, if required. To use a specific\n+release, use the above URL for `URL` in the manual instructions below. For\n+`apt`, use `${yyyymmdd}` for `DIST` below.\n+\n+### Point release\n+\n+A given point release is available at the following URL:\n+\n+ `https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}.${rc}`\n+\n+Unlike the specific release above, which may include updates, this release will\n+not change. To use a specific point release, use the above URL for `URL` in the\n+manual instructions below. For apt, use `${yyyymmdd}.${rc}` for `DIST` below.\n+\n+## From an `apt` repository\n+\n+First, appropriate dependencies must be installed to allow `apt` to install\n+packages via https:\n+\n+```bash\n+sudo apt-get update && \\\n+sudo apt-get install -y \\\n+ apt-transport-https \\\n+ ca-certificates \\\n+ curl \\\n+ gnupg-agent \\\n+ software-properties-common\n+```\n+\n+Next, the key used to sign archives should be added to your `apt` keychain:\n+\n+```bash\n+curl -fsSL https://gvisor.dev/archive.key | sudo apt-key add -\n+```\n+\n+Based on the release type, you will need to substitute `${DIST}` below, using\n+one of:\n+\n+ * `nightly`: For all nightly releases.\n+ * `latest`: For the latest release.\n+ * `${yyyymmdd}`: For specific releases.\n+ * `${yyyymmdd}.${rc}`: For a specific point release.\n+\n+The repository for the release you wish to install should be added:\n+\n+```bash\n+sudo add-apt-repository \\\n+ \"deb https://storage.googleapis.com/gvisor/releases\" \\\n+ \"${DIST}\" \\\n+ main\n+```\n+\n+For example, to install the latest official release, you can use:\n+\n+```bash\n+sudo add-apt-repository \\\n+ \"deb https://storage.googleapis.com/gvisor/releases\" \\\n+ latest \\\n+ main\n+```\n+\n+Now the runsc package can be installed:\n+\n+```bash\n+sudo apt-get update && sudo apt-get install -y runsc\n+```\n+\n+If you have Docker installed, it will be automatically configured.\n+\n+## Manually\n+\n+After selecting an appropriate `URL` above, you can download `runsc` directly\n+from `${URL}/runsc` ([latest][latest-nightly]) and a checksum hash from\n+`${URL}/runsc.sha512` ([latest][latest-hash]).\n+\n+For example, this binary can be downloaded, validated, and placed in an\n+appropriate location by running:\n+\n+```bash\n+(\n+ set -e\n+ wget ${URL}/runsc\n+ wget ${URL/runsc.sha512\n+ sha512sum -c runsc.sha512\n+ sudo mv runsc /usr/local/bin\n+ sudo chown root:root /usr/local/bin/runsc\n+ sudo chmod 0755 /usr/local/bin/runsc\n+)\n+```\n+\n+**It is important to copy this binary to a location that is accessible to all\n+users, and ensure it is executable by all users**, since `runsc` executes itself\n+as user `nobody` to avoid unnecessary privileges. The `/usr/local/bin` directory\n+is a good place to put the `runsc` binary.\n+\n+After installation, the`runsc` binary comes with an `install` command that can\n+optionally automatically configure Docker:\n+\n+```bash\n+runsc install\n+```\n+\n+[latest-nightly]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n+\n+[latest-hash]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n+\n+[oci]: https://www.opencontainers.org\n+\n+[old-linux]: /docs/user_guide/networking/#gso\n+\n+[releases]: https://github.com/google/gvisor/releases\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "content/docs/user_guide/quick_start/_index.md",
"diff": "++++\n+title = \"Quick Start\"\n+weight = 10\n++++\n+\n+gVisor can be used with Docker, Kubernetes, or directly using `runsc` with\n+crafted OCI spec for your container. Use the links below to see detailed\n+instructions for each of them:\n+\n+ * [Docker](./docker/): The quickest and easiest way to get started.\n+ * [Kubernetes](./kubernetes/): Isolate Pods in your K8s cluster with gVisor.\n+ * [OCI](./oci/): Expert mode. Customize gVisor for your environment.\n"
},
{
"change_type": "RENAME",
"old_path": "content/docs/user_guide/docker.md",
"new_path": "content/docs/user_guide/quick_start/docker.md",
"diff": "+++\n-title = \"Docker Quick Start\"\n+title = \"Docker\"\nweight = 10\n+++\n+\n+> Note: This guide requires Docker version 17.09.0 or greater. Refer to the\n+> [Docker documentation][docker] for how to install it.\n+\nThis guide will help you quickly get started running Docker containers using\ngVisor.\n## Install gVisor\n-> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}\n-> ([older Linux][old-linux]).\n+First, install gVisor using the [install instructions][install].\n-{{% readfile file=\"docs/includes/install_gvisor.md\" markdown=\"true\" %}}\n+If you use the `apt` repository or the `automated` install, then you can skip\n+the next section and proceed straight to running a container.\n## Configuring Docker\n-> Note: This guide requires Docker version 17.09.0 or greater. Refer to the\n-> [Docker documentation][docker] for how to install it.\n-\nFirst you will need to configure Docker to use `runsc` by adding a runtime\nentry to your Docker configuration (`/etc/docker/daemon.json`). You may have to\ncreate this file if it does not exist. Also, some Docker versions also require\n@@ -88,5 +89,5 @@ Next, look at the different options available for gVisor: [platform](../platform\n[network](../networking/), [filesystem](../filesystem/).\n[docker]: https://docs.docker.com/install/\n-[old-linux]: /docs/user_guide/networking/#gso\n+\n[storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver\n"
},
{
"change_type": "RENAME",
"old_path": "content/docs/user_guide/kubernetes.md",
"new_path": "content/docs/user_guide/quick_start/kubernetes.md",
"diff": "title = \"Kubernetes\"\nweight = 20\n+++\n+\ngVisor can be used to run Kubernetes pods and has several integration points\nwith Kubernetes.\n"
},
{
"change_type": "RENAME",
"old_path": "content/docs/user_guide/oci.md",
"new_path": "content/docs/user_guide/quick_start/oci.md",
"diff": "+++\n-title = \"OCI Quick Start\"\n+title = \"OCI\"\nweight = 30\n+++\n+\nThis guide will quickly get you started running your first gVisor sandbox\ncontainer using the runtime directly with the default platform.\n## Install gVisor\n-> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}\n-> ([older Linux][old-linux]).\n-\n-{{% readfile file=\"docs/includes/install_gvisor.md\" markdown=\"true\" %}}\n+First, install gVisor using the [install instructions][install].\n## Run an OCI compatible container\n@@ -48,4 +46,5 @@ sudo runsc run hello\nNext try [running gVisor using Docker](../docker/).\n[oci]: https://opencontainers.org/\n-[old-linux]: /docs/user_guide/networking/#gso\n+\n+[install]: /docs/user_guide/install\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/archive.key",
"diff": "+-----BEGIN PGP PUBLIC KEY BLOCK-----\n+\n+mQINBF0meAYBEACcBYPOSBiKtid+qTQlbgKGPxUYt0cNZiQqWXylhYUT4PuNlNx5\n+s+sBLFvNTpdTrXMmZ8NkekyjD1HardWvebvJT4u+Ho/9jUr4rP71cNwNtocz/w8G\n+DsUXSLgH8SDkq6xw0L+5eGc78BBg9cOeBeFBm3UPgxTBXS9Zevoi2w1lzSxkXvjx\n+cGzltzMZfPXERljgLzp9AAfhg/2ouqVQm37fY+P/NDzFMJ1XHPIIp9KJl/prBVud\n+jJJteFZ5sgL6MwjBQq2kw+q2Jb8Zfjl0BeXDgGMN5M5lGhX2wTfiMbfo7KWyzRnB\n+RpSP3BxlLqYeQUuLG5Yx8z3oA3uBkuKaFOKvXtiScxmGM/+Ri2YM3m66imwDhtmP\n+AKwTPI3Re4gWWOffglMVSv2sUAY32XZ74yXjY1VhK3bN3WFUPGrgQx4X7GP0A1Te\n+lzqkT3VSMXieImTASosK5L5Q8rryvgCeI9tQLn9EpYFCtU3LXvVgTreGNEEjMOnL\n+dR7yOU+Fs775stn6ucqmdYarx7CvKUrNAhgEeHMonLe1cjYScF7NfLO1GIrQKJR2\n+DE0f+uJZ52inOkO8ufh3WVQJSYszuS3HCY7w5oj1aP38k/y9zZdZvVvwAWZaiqBQ\n+iwjVs6Kub76VVZZhRDf4iYs8k1Zh64nXdfQt250d8U5yMPF3wIJ+c1yhxwARAQAB\n+tCpUaGUgZ1Zpc29yIEF1dGhvcnMgPGd2aXNvci1ib3RAZ29vZ2xlLmNvbT6JAlQE\n+EwEKAD4WIQRvHfheOnHCSRjnJ9VvxtVU4yvZQwUCXSZ4BgIbAwUJA8JnAAULCQgH\n+AgYVCgkICwIEFgIDAQIeAQIXgAAKCRBvxtVU4yvZQ5WFD/9VZXMW5I2rKV+2gTHT\n+CsW74kZVi1VFdAVYiUJZXw2jJNtcg3xdgBcscYPyecyka/6TS2q7q2fOGAzCZkcR\n+e3lLzkGAngMlZ7PdHAE0PDMNFaeMZW0dxNH68vn7AiA1y2XwENnxVec7iXQH6aX5\n+xUNg2OCiv5f6DJItHc/Q4SvFUi8QK7TT/GYE1RJXVJlLqfO6y4V8SeqfM+FHpHZM\n+gzrwdTgsNiEm4lMjWcgb2Ib4i2JUVAjIRPfcpysiV5E7c3SPXyu4bOovKKlbhiJ1\n+Q1M9M0zHik34Kjf4YNO1EW936j7Msd181CJt5Bl9XvlhPb8gey/ygpIvcicLx6M5\n+lRJTy4z1TtkmtZ7E8EbJZWoPTaHlA6hoMtGeE35j3vMZN1qZYaYt26eFOxxhh7PA\n+J0h1lS7T2O8u1c2JKhKvajtdmbqbJgI8FRhVsMoVBnqDK5aE9MOAso36OibfweEL\n+8iV2z8JnBpWtbbUEaWro4knPtbLJbQFvXVietm3cFsbGg+DMIwI6x6HcU91IEFYI\n+Sv4orK7xgLuM+f6dxo/Wel3ht18dg3x3krBLALTYBidRfnQYYR3sTfLquB8b5WaY\n+o829L2Bop9GBygdLevkHHN5It6q8CVpn0H5HEJMNaDOX1LcPbf0CKwkkAVCBd9YZ\n+eAX38ds9LliK7XPXdC4c+zEkGA==\n+=x8TG\n+-----END PGP PUBLIC KEY BLOCK-----\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add apt-based instructions. |
259,884 | 18.10.2019 00:32:14 | 14,400 | b5ccc29b9506b2fc4e5dd24a53e589ef4e334280 | Fixed some review issues
Fixed some issues that came up during review.
Update section titles to have a full phrase
Fix ${URL}
Have short install script clean up after itself so it can be re-run.
Small wording changes. | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/install.md",
"new_path": "content/docs/user_guide/install.md",
"diff": "@@ -9,7 +9,7 @@ weight = 20\n## Versions\nThe `runsc` binaries and repositories are available in multiple versions and\n-release channels. First, you should pick the version you'd like to install. For\n+release channels. You should pick the version you'd like to install. For\nexperimentation, the nightly release is recommended. For production use, the\nlatest release is recommended.\n@@ -63,7 +63,7 @@ Unlike the specific release above, which may include updates, this release will\nnot change. To use a specific point release, use the above URL for `URL` in the\nmanual instructions below. For apt, use `${yyyymmdd}.${rc}` for `DIST` below.\n-## From an `apt` repository\n+## Install from an `apt` repository\nFirst, appropriate dependencies must be installed to allow `apt` to install\npackages via https:\n@@ -118,7 +118,7 @@ sudo apt-get update && sudo apt-get install -y runsc\nIf you have Docker installed, it will be automatically configured.\n-## Manually\n+## Install manually\nAfter selecting an appropriate `URL` above, you can download `runsc` directly\nfrom `${URL}/runsc` ([latest][latest-nightly]) and a checksum hash from\n@@ -131,8 +131,9 @@ appropriate location by running:\n(\nset -e\nwget ${URL}/runsc\n- wget ${URL/runsc.sha512\n+ wget ${URL}/runsc.sha512\nsha512sum -c runsc.sha512\n+ rm -f runsc.sha512\nsudo mv runsc /usr/local/bin\nsudo chown root:root /usr/local/bin/runsc\nsudo chmod 0755 /usr/local/bin/runsc\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fixed some review issues
Fixed some issues that came up during review.
- Update section titles to have a full phrase
- Fix ${URL}
- Have short install script clean up after itself so it can be re-run.
- Small wording changes. |
259,853 | 18.10.2019 13:39:12 | 25,200 | 4c7f849b252d43b0d684aeb08e0d54d717fdd7de | test: use a bigger buffer to fill a socket
Otherwise we need to do a lot of system calls and cooperative_save tests work
slow. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_unix_non_stream.cc",
"new_path": "test/syscalls/linux/socket_unix_non_stream.cc",
"diff": "@@ -231,11 +231,21 @@ TEST_P(UnixNonStreamSocketPairTest, SendTimeout) {\nsetsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\nSyscallSucceeds());\n- char buf[100] = {};\n+ const int buf_size = 5 * kPageSize;\n+ EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF, &buf_size,\n+ sizeof(buf_size)),\n+ SyscallSucceeds());\n+ EXPECT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_RCVBUF, &buf_size,\n+ sizeof(buf_size)),\n+ SyscallSucceeds());\n+\n+ // The buffer size should be big enough to avoid many iterations in the next\n+ // loop. Otherwise, this will slow down cooperative_save tests.\n+ std::vector<char> buf(kPageSize);\nfor (;;) {\nint ret;\nASSERT_THAT(\n- ret = RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),\n+ ret = RetryEINTR(send)(sockets->first_fd(), buf.data(), buf.size(), 0),\n::testing::AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EAGAIN)));\nif (ret == -1) {\nbreak;\n"
}
] | Go | Apache License 2.0 | google/gvisor | test: use a bigger buffer to fill a socket
Otherwise we need to do a lot of system calls and cooperative_save tests work
slow.
PiperOrigin-RevId: 275536957 |
260,023 | 18.10.2019 15:58:40 | 25,200 | 487d3b2358239254e757f4ce3b0cd31615d80e1b | Fix typo while initializing protocol for UDP endpoints.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -140,7 +140,7 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQue\nstack: s,\nTransportEndpointInfo: stack.TransportEndpointInfo{\nNetProto: netProto,\n- TransProto: header.TCPProtocolNumber,\n+ TransProto: header.UDPProtocolNumber,\n},\nwaiterQueue: waiterQueue,\n// RFC 1075 section 5.4 recommends a TTL of 1 for membership\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix typo while initializing protocol for UDP endpoints.
Fixes #763
PiperOrigin-RevId: 275563222 |
259,992 | 18.10.2019 16:13:04 | 25,200 | 74044f2cca498ed3144baccc5d3b7af534829966 | Add more instructions to test/README.md | [
{
"change_type": "MODIFY",
"old_path": "test/README.md",
"new_path": "test/README.md",
"diff": "@@ -10,9 +10,31 @@ they may need extra setup in the test machine and extra configuration to run.\nfunctionality.\n- **image:** basic end to end test for popular images. These require the same\nsetup as integration tests.\n-- **root:** tests that require to be run as root.\n+- **root:** tests that require to be run as root. These require the same setup\n+ as integration tests.\n- **util:** utilities library to support the tests.\nFor the above noted cases, the relevant runtime must be installed via `runsc\n-install` before running. This is handled automatically by the test scripts in\n-the `kokoro` directory.\n+install` before running. Just note that they require specific configuration to\n+work. This is handled automatically by the test scripts in the `scripts`\n+directory and they can be used to run tests locally on your machine. They are\n+also used to run these tests in `kokoro`.\n+\n+**Example:**\n+\n+To run image and integration tests, run:\n+\n+`./scripts/docker_test.sh`\n+\n+To run root tests, run:\n+\n+`./scripts/root_test.sh`\n+\n+There are a few other interesting variations for image and integration tests:\n+\n+* overlay: sets writable overlay inside the sentry\n+* hostnet: configures host network pass-thru, instead of netstack\n+* kvm: runsc the test using the KVM platform, instead of ptrace\n+\n+The test will build runsc, configure it with your local docker, restart\n+`dockerd`, and run tests. The location for runsc logs is printed to the output.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/e2e/exec_test.go",
"new_path": "test/e2e/exec_test.go",
"diff": "@@ -109,7 +109,7 @@ func TestExecPrivileged(t *testing.T) {\nt.Logf(\"Exec CapEff: %v\", got)\nwant := fmt.Sprintf(\"CapEff:\\t%016x\\n\", specutils.AllCapabilitiesUint64()&^bits.MaskOf64(int(linux.CAP_NET_RAW)))\nif got != want {\n- t.Errorf(\"wrong capabilities, got: %q, want: %q\", got, want)\n+ t.Errorf(\"Wrong capabilities, got: %q, want: %q. Make sure runsc is not using '--net-raw'\", got, want)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add more instructions to test/README.md
PiperOrigin-RevId: 275565958 |
259,891 | 19.10.2019 11:48:09 | 25,200 | 652f7b1d0fef6f522baaed091d4820a48014092c | Add support for pipes in VFS2. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/memfs/BUILD",
"new_path": "pkg/sentry/fsimpl/memfs/BUILD",
"diff": "@@ -24,14 +24,18 @@ go_library(\n\"directory.go\",\n\"filesystem.go\",\n\"memfs.go\",\n+ \"named_pipe.go\",\n\"regular_file.go\",\n\"symlink.go\",\n],\nimportpath = \"gvisor.dev/gvisor/pkg/sentry/fsimpl/memfs\",\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/amutex\",\n+ \"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/kernel/auth\",\n+ \"//pkg/sentry/kernel/pipe\",\n\"//pkg/sentry/usermem\",\n\"//pkg/sentry/vfs\",\n\"//pkg/syserror\",\n@@ -54,3 +58,19 @@ go_test(\n\"//pkg/syserror\",\n],\n)\n+\n+go_test(\n+ name = \"memfs_test\",\n+ size = \"small\",\n+ srcs = [\"pipe_test.go\"],\n+ embed = [\":memfs\"],\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//pkg/sentry/context\",\n+ \"//pkg/sentry/context/contexttest\",\n+ \"//pkg/sentry/kernel/auth\",\n+ \"//pkg/sentry/usermem\",\n+ \"//pkg/sentry/vfs\",\n+ \"//pkg/syserror\",\n+ ],\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/memfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/memfs/filesystem.go",
"diff": "@@ -233,7 +233,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nif err != nil {\nreturn err\n}\n- _, err = checkCreateLocked(rp, parentVFSD, parentInode)\n+ pc, err := checkCreateLocked(rp, parentVFSD, parentInode)\nif err != nil {\nreturn err\n}\n@@ -241,8 +241,40 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nreturn err\n}\ndefer rp.Mount().EndWrite()\n- // TODO: actually implement mknod\n+\n+ switch opts.Mode.FileType() {\n+ case 0:\n+ // \"Zero file type is equivalent to type S_IFREG.\" - mknod(2)\n+ fallthrough\n+ case linux.ModeRegular:\n+ // TODO(b/138862511): Implement.\n+ return syserror.EINVAL\n+\n+ case linux.ModeNamedPipe:\n+ child := fs.newDentry(fs.newNamedPipe(rp.Credentials(), opts.Mode))\n+ parentVFSD.InsertChild(&child.vfsd, pc)\n+ parentInode.impl.(*directory).childList.PushBack(child)\n+ return nil\n+\n+ case linux.ModeSocket:\n+ // TODO(b/138862511): Implement.\n+ return syserror.EINVAL\n+\n+ case linux.ModeCharacterDevice:\n+ fallthrough\n+ case linux.ModeBlockDevice:\n+ // TODO(b/72101894): We don't support creating block or character\n+ // devices at the moment.\n+ //\n+ // When we start supporting block and character devices, we'll\n+ // need to check for CAP_MKNOD here.\nreturn syserror.EPERM\n+\n+ default:\n+ // \"EINVAL - mode requested creation of something other than a\n+ // regular file, device special file, FIFO or socket.\" - mknod(2)\n+ return syserror.EINVAL\n+ }\n}\n// OpenAt implements vfs.FilesystemImpl.OpenAt.\n@@ -250,8 +282,9 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\n// Filter out flags that are not supported by memfs. O_DIRECTORY and\n// O_NOFOLLOW have no effect here (they're handled by VFS by setting\n// appropriate bits in rp), but are returned by\n- // FileDescriptionImpl.StatusFlags().\n- opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC | linux.O_DIRECTORY | linux.O_NOFOLLOW\n+ // FileDescriptionImpl.StatusFlags(). O_NONBLOCK is supported only by\n+ // pipes.\n+ opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC | linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK\nif opts.Flags&linux.O_CREAT == 0 {\nfs.mu.RLock()\n@@ -260,7 +293,7 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nif err != nil {\nreturn nil, err\n}\n- return inode.open(rp, vfsd, opts.Flags, false)\n+ return inode.open(ctx, rp, vfsd, opts.Flags, false)\n}\nmustCreate := opts.Flags&linux.O_EXCL != 0\n@@ -275,7 +308,7 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nif mustCreate {\nreturn nil, syserror.EEXIST\n}\n- return inode.open(rp, vfsd, opts.Flags, false)\n+ return inode.open(ctx, rp, vfsd, opts.Flags, false)\n}\nafterTrailingSymlink:\n// Walk to the parent directory of the last path component.\n@@ -320,7 +353,7 @@ afterTrailingSymlink:\nchild := fs.newDentry(childInode)\nvfsd.InsertChild(&child.vfsd, pc)\ninode.impl.(*directory).childList.PushBack(child)\n- return childInode.open(rp, &child.vfsd, opts.Flags, true)\n+ return childInode.open(ctx, rp, &child.vfsd, opts.Flags, true)\n}\n// Open existing file or follow symlink.\nif mustCreate {\n@@ -336,10 +369,10 @@ afterTrailingSymlink:\n// symlink target.\ngoto afterTrailingSymlink\n}\n- return childInode.open(rp, childVFSD, opts.Flags, false)\n+ return childInode.open(ctx, rp, childVFSD, opts.Flags, false)\n}\n-func (i *inode) open(rp *vfs.ResolvingPath, vfsd *vfs.Dentry, flags uint32, afterCreate bool) (*vfs.FileDescription, error) {\n+func (i *inode) open(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, flags uint32, afterCreate bool) (*vfs.FileDescription, error) {\nats := vfs.AccessTypesForOpenFlags(flags)\nif !afterCreate {\nif err := i.checkPermissions(rp.Credentials(), ats, i.isDir()); err != nil {\n@@ -378,6 +411,8 @@ func (i *inode) open(rp *vfs.ResolvingPath, vfsd *vfs.Dentry, flags uint32, afte\ncase *symlink:\n// Can't open symlinks without O_PATH (which is unimplemented).\nreturn nil, syserror.ELOOP\n+ case *namedPipe:\n+ return newNamedPipeFD(ctx, impl, rp, vfsd, flags)\ndefault:\npanic(fmt.Sprintf(\"unknown inode type: %T\", i.impl))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/memfs/memfs.go",
"new_path": "pkg/sentry/fsimpl/memfs/memfs.go",
"diff": "@@ -227,6 +227,8 @@ func (i *inode) statTo(stat *linux.Statx) {\nstat.Mask |= linux.STATX_SIZE | linux.STATX_BLOCKS\nstat.Size = uint64(len(impl.target))\nstat.Blocks = allocatedBlocksForSize(stat.Size)\n+ case *namedPipe:\n+ stat.Mode |= linux.S_IFIFO\ndefault:\npanic(fmt.Sprintf(\"unknown inode type: %T\", i.impl))\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fsimpl/memfs/named_pipe.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package memfs\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/sentry/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/pipe\"\n+ \"gvisor.dev/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+)\n+\n+type namedPipe struct {\n+ inode inode\n+\n+ pipe *pipe.VFSPipe\n+}\n+\n+// Preconditions:\n+// * fs.mu must be locked.\n+// * rp.Mount().CheckBeginWrite() has been called successfully.\n+func (fs *filesystem) newNamedPipe(creds *auth.Credentials, mode linux.FileMode) *inode {\n+ file := &namedPipe{pipe: pipe.NewVFSPipe(pipe.DefaultPipeSize, usermem.PageSize)}\n+ file.inode.init(file, fs, creds, mode)\n+ file.inode.nlink = 1 // Only the parent has a link.\n+ return &file.inode\n+}\n+\n+// namedPipeFD implements vfs.FileDescriptionImpl. Methods are implemented\n+// entirely via struct embedding.\n+type namedPipeFD struct {\n+ fileDescription\n+\n+ *pipe.VFSPipeFD\n+}\n+\n+func newNamedPipeFD(ctx context.Context, np *namedPipe, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, flags uint32) (*vfs.FileDescription, error) {\n+ var err error\n+ var fd namedPipeFD\n+ fd.VFSPipeFD, err = np.pipe.NewVFSPipeFD(ctx, rp, vfsd, &fd.vfsfd, flags)\n+ if err != nil {\n+ return nil, err\n+ }\n+ fd.vfsfd.Init(&fd, rp.Mount(), vfsd)\n+ return &fd.vfsfd, nil\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fsimpl/memfs/pipe_test.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package memfs\n+\n+import (\n+ \"bytes\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/sentry/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/context/contexttest\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n+)\n+\n+const fileName = \"mypipe\"\n+\n+func TestSeparateFDs(t *testing.T) {\n+ ctx, creds, vfsObj, root := setup(t)\n+ defer root.DecRef()\n+\n+ // Open the read side. This is done in a concurrently because opening\n+ // One end the pipe blocks until the other end is opened.\n+ pop := vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Pathname: fileName,\n+ FollowFinalSymlink: true,\n+ }\n+ rfdchan := make(chan *vfs.FileDescription)\n+ go func() {\n+ openOpts := vfs.OpenOptions{Flags: linux.O_RDONLY}\n+ rfd, _ := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)\n+ rfdchan <- rfd\n+ }()\n+\n+ // Open the write side.\n+ openOpts := vfs.OpenOptions{Flags: linux.O_WRONLY}\n+ wfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)\n+ if err != nil {\n+ t.Fatalf(\"failed to open pipe for writing %q: %v\", fileName, err)\n+ }\n+ defer wfd.DecRef()\n+\n+ rfd, ok := <-rfdchan\n+ if !ok {\n+ t.Fatalf(\"failed to open pipe for reading %q\", fileName)\n+ }\n+ defer rfd.DecRef()\n+\n+ const msg = \"vamos azul\"\n+ checkEmpty(ctx, t, rfd)\n+ checkWrite(ctx, t, wfd, msg)\n+ checkRead(ctx, t, rfd, msg)\n+}\n+\n+func TestNonblockingRead(t *testing.T) {\n+ ctx, creds, vfsObj, root := setup(t)\n+ defer root.DecRef()\n+\n+ // Open the read side as nonblocking.\n+ pop := vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Pathname: fileName,\n+ FollowFinalSymlink: true,\n+ }\n+ openOpts := vfs.OpenOptions{Flags: linux.O_RDONLY | linux.O_NONBLOCK}\n+ rfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)\n+ if err != nil {\n+ t.Fatalf(\"failed to open pipe for reading %q: %v\", fileName, err)\n+ }\n+ defer rfd.DecRef()\n+\n+ // Open the write side.\n+ openOpts = vfs.OpenOptions{Flags: linux.O_WRONLY}\n+ wfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)\n+ if err != nil {\n+ t.Fatalf(\"failed to open pipe for writing %q: %v\", fileName, err)\n+ }\n+ defer wfd.DecRef()\n+\n+ const msg = \"geh blau\"\n+ checkEmpty(ctx, t, rfd)\n+ checkWrite(ctx, t, wfd, msg)\n+ checkRead(ctx, t, rfd, msg)\n+}\n+\n+func TestNonblockingWriteError(t *testing.T) {\n+ ctx, creds, vfsObj, root := setup(t)\n+ defer root.DecRef()\n+\n+ // Open the write side as nonblocking, which should return ENXIO.\n+ pop := vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Pathname: fileName,\n+ FollowFinalSymlink: true,\n+ }\n+ openOpts := vfs.OpenOptions{Flags: linux.O_WRONLY | linux.O_NONBLOCK}\n+ _, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)\n+ if err != syserror.ENXIO {\n+ t.Fatalf(\"expected ENXIO, but got error: %v\", err)\n+ }\n+}\n+\n+func TestSingleFD(t *testing.T) {\n+ ctx, creds, vfsObj, root := setup(t)\n+ defer root.DecRef()\n+\n+ // Open the pipe as readable and writable.\n+ pop := vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Pathname: fileName,\n+ FollowFinalSymlink: true,\n+ }\n+ openOpts := vfs.OpenOptions{Flags: linux.O_RDWR}\n+ fd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)\n+ if err != nil {\n+ t.Fatalf(\"failed to open pipe for writing %q: %v\", fileName, err)\n+ }\n+ defer fd.DecRef()\n+\n+ const msg = \"forza blu\"\n+ checkEmpty(ctx, t, fd)\n+ checkWrite(ctx, t, fd, msg)\n+ checkRead(ctx, t, fd, msg)\n+}\n+\n+// setup creates a VFS with a pipe in the root directory at path fileName. The\n+// returned VirtualDentry must be DecRef()'d be the caller. It calls t.Fatal\n+// upon failure.\n+func setup(t *testing.T) (context.Context, *auth.Credentials, *vfs.VirtualFilesystem, vfs.VirtualDentry) {\n+ ctx := contexttest.Context(t)\n+ creds := auth.CredentialsFromContext(ctx)\n+\n+ // Create VFS.\n+ vfsObj := vfs.New()\n+ vfsObj.MustRegisterFilesystemType(\"memfs\", FilesystemType{})\n+ mntns, err := vfsObj.NewMountNamespace(ctx, creds, \"\", \"memfs\", &vfs.NewFilesystemOptions{})\n+ if err != nil {\n+ t.Fatalf(\"failed to create tmpfs root mount: %v\", err)\n+ }\n+\n+ // Create the pipe.\n+ root := mntns.Root()\n+ pop := vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Pathname: fileName,\n+ FollowFinalSymlink: true,\n+ }\n+ mknodOpts := vfs.MknodOptions{Mode: linux.ModeNamedPipe | 0644}\n+ if err := vfsObj.MknodAt(ctx, creds, &pop, &mknodOpts); err != nil {\n+ t.Fatalf(\"failed to create file %q: %v\", fileName, err)\n+ }\n+\n+ // Sanity check: the file pipe exists and has the correct mode.\n+ stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Pathname: fileName,\n+ FollowFinalSymlink: true,\n+ }, &vfs.StatOptions{})\n+ if err != nil {\n+ t.Fatalf(\"stat(%q) failed: %v\", fileName, err)\n+ }\n+ if stat.Mode&^linux.S_IFMT != 0644 {\n+ t.Errorf(\"got wrong permissions (%0o)\", stat.Mode)\n+ }\n+ if stat.Mode&linux.S_IFMT != linux.ModeNamedPipe {\n+ t.Errorf(\"got wrong file type (%0o)\", stat.Mode)\n+ }\n+\n+ return ctx, creds, vfsObj, root\n+}\n+\n+// checkEmpty calls t.Fatal if the pipe in fd is not empty.\n+func checkEmpty(ctx context.Context, t *testing.T, fd *vfs.FileDescription) {\n+ readData := make([]byte, 1)\n+ dst := usermem.BytesIOSequence(readData)\n+ bytesRead, err := fd.Impl().Read(ctx, dst, vfs.ReadOptions{})\n+ if err != syserror.ErrWouldBlock {\n+ t.Fatalf(\"expected ErrWouldBlock reading from empty pipe %q, but got: %v\", fileName, err)\n+ }\n+ if bytesRead != 0 {\n+ t.Fatalf(\"expected to read 0 bytes, but got %d\", bytesRead)\n+ }\n+}\n+\n+// checkWrite calls t.Fatal if it fails to write all of msg to fd.\n+func checkWrite(ctx context.Context, t *testing.T, fd *vfs.FileDescription, msg string) {\n+ writeData := []byte(msg)\n+ src := usermem.BytesIOSequence(writeData)\n+ bytesWritten, err := fd.Impl().Write(ctx, src, vfs.WriteOptions{})\n+ if err != nil {\n+ t.Fatalf(\"error writing to pipe %q: %v\", fileName, err)\n+ }\n+ if bytesWritten != int64(len(writeData)) {\n+ t.Fatalf(\"expected to write %d bytes, but wrote %d\", len(writeData), bytesWritten)\n+ }\n+}\n+\n+// checkRead calls t.Fatal if it fails to read msg from fd.\n+func checkRead(ctx context.Context, t *testing.T, fd *vfs.FileDescription, msg string) {\n+ readData := make([]byte, len(msg))\n+ dst := usermem.BytesIOSequence(readData)\n+ bytesRead, err := fd.Impl().Read(ctx, dst, vfs.ReadOptions{})\n+ if err != nil {\n+ t.Fatalf(\"error reading from pipe %q: %v\", fileName, err)\n+ }\n+ if bytesRead != int64(len(msg)) {\n+ t.Fatalf(\"expected to read %d bytes, but got %d\", len(msg), bytesRead)\n+ }\n+ if !bytes.Equal(readData, []byte(msg)) {\n+ t.Fatalf(\"expected to read %q from pipe, but got %q\", msg, string(readData))\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/vfs.go",
"new_path": "pkg/sentry/kernel/pipe/vfs.go",
"diff": "@@ -182,7 +182,7 @@ func (fd *VFSPipeFD) Release() {\n}\n// OnClose implements vfs.FileDescriptionImpl.OnClose.\n-func (fd *VFSPipeFD) OnClose() error {\n+func (fd *VFSPipeFD) OnClose(_ context.Context) error {\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/syscalls.go",
"new_path": "pkg/sentry/vfs/syscalls.go",
"diff": "@@ -96,6 +96,26 @@ func (vfs *VirtualFilesystem) MkdirAt(ctx context.Context, creds *auth.Credentia\n}\n}\n+// MknodAt creates a file of the given mode at the given path. It returns an\n+// error from the syserror package.\n+func (vfs *VirtualFilesystem) MknodAt(ctx context.Context, creds *auth.Credentials, pop *PathOperation, opts *MknodOptions) error {\n+ rp, err := vfs.getResolvingPath(creds, pop)\n+ if err != nil {\n+ return nil\n+ }\n+ for {\n+ if err = rp.mount.fs.impl.MknodAt(ctx, rp, *opts); err == nil {\n+ vfs.putResolvingPath(rp)\n+ return nil\n+ }\n+ // Handle mount traversals.\n+ if !rp.handleError(err) {\n+ vfs.putResolvingPath(rp)\n+ return err\n+ }\n+ }\n+}\n+\n// OpenAt returns a FileDescription providing access to the file at the given\n// path. A reference is taken on the returned FileDescription.\nfunc (vfs *VirtualFilesystem) OpenAt(ctx context.Context, creds *auth.Credentials, pop *PathOperation, opts *OpenOptions) (*FileDescription, error) {\n@@ -198,8 +218,6 @@ func (fd *FileDescription) SetStatusFlags(ctx context.Context, flags uint32) err\n//\n// - VFS.LinkAt()\n//\n-// - VFS.MknodAt()\n-//\n// - VFS.ReadlinkAt()\n//\n// - VFS.RenameAt()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for pipes in VFS2.
PiperOrigin-RevId: 275650307 |
259,851 | 20.10.2019 05:19:58 | -19,080 | 53e921eb38e38867363f2203d2f883dadf3b5a2d | spell corrected | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/checkpoint_restore.md",
"new_path": "content/docs/user_guide/checkpoint_restore.md",
"diff": "@@ -19,7 +19,7 @@ the directory path within which the checkpoint state-file will be created. The\nfile will be called `checkpoint.img` and necessary directories will be created\nif they do not yet exist.\n-> Note: Two checkpoints cannot be saved to the save directory; every image-path\n+> Note: Two checkpoints cannot be saved to the same directory; every image-path\nprovided must be unique.\n```bash\n"
}
] | Go | Apache License 2.0 | google/gvisor | spell corrected |
259,860 | 21.10.2019 14:53:44 | 25,200 | 0b569b7caebc2b7785732ed6cb8248cf0568783f | Add basic implementation of execveat syscall and associated tests.
Allow file descriptors of directories as well as AT_FDCWD. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"diff": "@@ -362,7 +362,7 @@ var AMD64 = &kernel.SyscallTable{\n319: syscalls.Supported(\"memfd_create\", MemfdCreate),\n320: syscalls.CapError(\"kexec_file_load\", linux.CAP_SYS_BOOT, \"\", nil),\n321: syscalls.CapError(\"bpf\", linux.CAP_SYS_ADMIN, \"\", nil),\n- 322: syscalls.ErrorWithEvent(\"execveat\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/265\"}), // TODO(b/118901836)\n+ 322: syscalls.PartiallySupported(\"execveat\", Execveat, \"No support for AT_EMPTY_PATH, AT_SYMLINK_FOLLOW.\", nil),\n323: syscalls.ErrorWithEvent(\"userfaultfd\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/266\"}), // TODO(b/118906345)\n324: syscalls.ErrorWithEvent(\"membarrier\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/267\"}), // TODO(b/118904897)\n325: syscalls.PartiallySupported(\"mlock2\", Mlock2, \"Stub implementation. The sandbox lacks appropriate permissions.\", nil),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"new_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"diff": "package linux\nimport (\n+ \"path\"\n\"syscall\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/sched\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n@@ -67,8 +69,22 @@ func Execve(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nargvAddr := args[1].Pointer()\nenvvAddr := args[2].Pointer()\n- // Extract our arguments.\n- filename, err := t.CopyInString(filenameAddr, linux.PATH_MAX)\n+ return execveat(t, linux.AT_FDCWD, filenameAddr, argvAddr, envvAddr, 0)\n+}\n+\n+// Execveat implements linux syscall execveat(2).\n+func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ dirFD := args[0].Int()\n+ pathnameAddr := args[1].Pointer()\n+ argvAddr := args[2].Pointer()\n+ envvAddr := args[3].Pointer()\n+ flags := args[4].Int()\n+\n+ return execveat(t, dirFD, pathnameAddr, argvAddr, envvAddr, flags)\n+}\n+\n+func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr usermem.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {\n+ pathname, err := t.CopyInString(pathnameAddr, linux.PATH_MAX)\nif err != nil {\nreturn 0, nil, err\n}\n@@ -89,14 +105,38 @@ func Execve(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\n}\n}\n+ if flags != 0 {\n+ // TODO(b/128449944): Handle AT_EMPTY_PATH and AT_SYMLINK_NOFOLLOW.\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ return 0, nil, syserror.ENOSYS\n+ }\n+\nroot := t.FSContext().RootDirectory()\ndefer root.DecRef()\n- wd := t.FSContext().WorkingDirectory()\n+\n+ var wd *fs.Dirent\n+ if dirFD == linux.AT_FDCWD || path.IsAbs(pathname) {\n+ // If pathname is absolute, LoadTaskImage() will ignore the wd.\n+ wd = t.FSContext().WorkingDirectory()\n+ } else {\n+ // Need to extract the given FD.\n+ f := t.GetFile(dirFD)\n+ if f == nil {\n+ return 0, nil, syserror.EBADF\n+ }\n+ defer f.DecRef()\n+\n+ wd = f.Dirent\n+ wd.IncRef()\n+ if !fs.IsDir(wd.Inode.StableAttr) {\n+ return 0, nil, syserror.ENOTDIR\n+ }\n+ }\ndefer wd.DecRef()\n// Load the new TaskContext.\nmaxTraversals := uint(linux.MaxSymlinkTraversals)\n- tc, se := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, &maxTraversals, filename, nil, argv, envv, t.Arch().FeatureSet())\n+ tc, se := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, &maxTraversals, pathname, nil, argv, envv, t.Arch().FeatureSet())\nif se != nil {\nreturn 0, nil, se.ToError()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -670,6 +670,7 @@ cc_binary(\n\"//test/util:thread_util\",\n\"@com_google_absl//absl/strings\",\n\"@com_google_absl//absl/synchronization\",\n+ \"@com_google_absl//absl/types:optional\",\n\"@com_google_googletest//:gtest\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/exec.cc",
"new_path": "test/syscalls/linux/exec.cc",
"diff": "#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/synchronization/mutex.h\"\n+#include \"absl/types/optional.h\"\n#include \"test/util/file_descriptor.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/multiprocess_util.h\"\n@@ -68,11 +69,12 @@ constexpr char kExit42[] = \"--exec_exit_42\";\nconstexpr char kExecWithThread[] = \"--exec_exec_with_thread\";\nconstexpr char kExecFromThread[] = \"--exec_exec_from_thread\";\n-// Runs filename with argv and checks that the exit status is expect_status and\n-// that stderr contains expect_stderr.\n-void CheckOutput(const std::string& filename, const ExecveArray& argv,\n- const ExecveArray& envv, int expect_status,\n- const std::string& expect_stderr) {\n+// Runs file specified by dirfd and pathname with argv and checks that the exit\n+// status is expect_status and that stderr contains expect_stderr.\n+void CheckExecHelper(const absl::optional<int32_t> dirfd,\n+ const std::string& pathname, const ExecveArray& argv,\n+ const ExecveArray& envv, const int flags,\n+ int expect_status, const std::string& expect_stderr) {\nint pipe_fds[2];\nASSERT_THAT(pipe2(pipe_fds, O_CLOEXEC), SyscallSucceeds());\n@@ -110,8 +112,15 @@ void CheckOutput(const std::string& filename, const ExecveArray& argv,\n// CloexecEventfd depend on that not happening.\n};\n- auto kill = ASSERT_NO_ERRNO_AND_VALUE(\n- ForkAndExec(filename, argv, envv, remap_stderr, &child, &execve_errno));\n+ Cleanup kill;\n+ if (dirfd.has_value()) {\n+ kill = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(*dirfd, pathname, argv,\n+ envv, flags, remap_stderr,\n+ &child, &execve_errno));\n+ } else {\n+ kill = ASSERT_NO_ERRNO_AND_VALUE(\n+ ForkAndExec(pathname, argv, envv, remap_stderr, &child, &execve_errno));\n+ }\nASSERT_EQ(0, execve_errno);\n@@ -140,6 +149,21 @@ void CheckOutput(const std::string& filename, const ExecveArray& argv,\nEXPECT_TRUE(absl::StrContains(output, expect_stderr)) << output;\n}\n+void CheckExec(const std::string& filename, const ExecveArray& argv,\n+ const ExecveArray& envv, int expect_status,\n+ const std::string& expect_stderr) {\n+ CheckExecHelper(/*dirfd=*/absl::optional<int32_t>(), filename, argv, envv,\n+ /*flags=*/0, expect_status, expect_stderr);\n+}\n+\n+void CheckExecveat(const int32_t dirfd, const std::string& pathname,\n+ const ExecveArray& argv, const ExecveArray& envv,\n+ const int flags, int expect_status,\n+ const std::string& expect_stderr) {\n+ CheckExecHelper(absl::optional<int32_t>(dirfd), pathname, argv, envv, flags,\n+ expect_status, expect_stderr);\n+}\n+\nTEST(ExecTest, EmptyPath) {\nint execve_errno;\nASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(\"\", {}, {}, nullptr, &execve_errno));\n@@ -147,45 +171,44 @@ TEST(ExecTest, EmptyPath) {\n}\nTEST(ExecTest, Basic) {\n- CheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)}, {},\n+ CheckExec(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)}, {},\nArgEnvExitStatus(0, 0),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n\"));\n}\nTEST(ExecTest, OneArg) {\n- CheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload), \"1\"},\n+ CheckExec(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload), \"1\"},\n{}, ArgEnvExitStatus(1, 0),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n\"));\n}\nTEST(ExecTest, FiveArg) {\n- CheckOutput(WorkloadPath(kBasicWorkload),\n+ CheckExec(WorkloadPath(kBasicWorkload),\n{WorkloadPath(kBasicWorkload), \"1\", \"2\", \"3\", \"4\", \"5\"}, {},\nArgEnvExitStatus(5, 0),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n2\\n3\\n4\\n5\\n\"));\n}\nTEST(ExecTest, OneEnv) {\n- CheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)},\n- {\"1\"}, ArgEnvExitStatus(0, 1),\n+ CheckExec(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)}, {\"1\"},\n+ ArgEnvExitStatus(0, 1),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n\"));\n}\nTEST(ExecTest, FiveEnv) {\n- CheckOutput(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)},\n+ CheckExec(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload)},\n{\"1\", \"2\", \"3\", \"4\", \"5\"}, ArgEnvExitStatus(0, 5),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\n1\\n2\\n3\\n4\\n5\\n\"));\n}\nTEST(ExecTest, OneArgOneEnv) {\n- CheckOutput(WorkloadPath(kBasicWorkload),\n- {WorkloadPath(kBasicWorkload), \"arg\"}, {\"env\"},\n- ArgEnvExitStatus(1, 1),\n+ CheckExec(WorkloadPath(kBasicWorkload), {WorkloadPath(kBasicWorkload), \"arg\"},\n+ {\"env\"}, ArgEnvExitStatus(1, 1),\nabsl::StrCat(WorkloadPath(kBasicWorkload), \"\\narg\\nenv\\n\"));\n}\nTEST(ExecTest, InterpreterScript) {\n- CheckOutput(WorkloadPath(kExitScript), {WorkloadPath(kExitScript), \"25\"}, {},\n+ CheckExec(WorkloadPath(kExitScript), {WorkloadPath(kExitScript), \"25\"}, {},\nArgEnvExitStatus(25, 0), \"\");\n}\n@@ -199,7 +222,7 @@ TEST(ExecTest, InterpreterScriptArgSplit) {\nGetAbsoluteTestTmpdir(), absl::StrCat(\"#!\", link.path(), \" foo bar\"),\n0755));\n- CheckOutput(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),\n+ CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),\nabsl::StrCat(link.path(), \"\\nfoo bar\\n\", script.path(), \"\\n\"));\n}\n@@ -212,7 +235,7 @@ TEST(ExecTest, InterpreterScriptArgvZero) {\nTempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\nGetAbsoluteTestTmpdir(), absl::StrCat(\"#!\", link.path()), 0755));\n- CheckOutput(script.path(), {\"REPLACED\"}, {}, ArgEnvExitStatus(1, 0),\n+ CheckExec(script.path(), {\"REPLACED\"}, {}, ArgEnvExitStatus(1, 0),\nabsl::StrCat(link.path(), \"\\n\", script.path(), \"\\n\"));\n}\n@@ -230,7 +253,7 @@ TEST(ExecTest, InterpreterScriptArgvZeroRelative) {\nauto script_relative =\nASSERT_NO_ERRNO_AND_VALUE(GetRelativePath(cwd, script.path()));\n- CheckOutput(script_relative, {\"REPLACED\"}, {}, ArgEnvExitStatus(1, 0),\n+ CheckExec(script_relative, {\"REPLACED\"}, {}, ArgEnvExitStatus(1, 0),\nabsl::StrCat(link.path(), \"\\n\", script_relative, \"\\n\"));\n}\n@@ -243,7 +266,7 @@ TEST(ExecTest, InterpreterScriptArgvZeroAdded) {\nTempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\nGetAbsoluteTestTmpdir(), absl::StrCat(\"#!\", link.path()), 0755));\n- CheckOutput(script.path(), {}, {}, ArgEnvExitStatus(1, 0),\n+ CheckExec(script.path(), {}, {}, ArgEnvExitStatus(1, 0),\nabsl::StrCat(link.path(), \"\\n\", script.path(), \"\\n\"));\n}\n@@ -258,7 +281,7 @@ TEST(ExecTest, InterpreterScriptArgNUL) {\nabsl::StrCat(\"#!\", link.path(), \" foo\", std::string(1, '\\0'), \"bar\"),\n0755));\n- CheckOutput(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),\n+ CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),\nabsl::StrCat(link.path(), \"\\nfoo\\n\", script.path(), \"\\n\"));\n}\n@@ -271,7 +294,7 @@ TEST(ExecTest, InterpreterScriptTrailingWhitespace) {\nTempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\nGetAbsoluteTestTmpdir(), absl::StrCat(\"#!\", link.path(), \" \"), 0755));\n- CheckOutput(script.path(), {script.path()}, {}, ArgEnvExitStatus(1, 0),\n+ CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(1, 0),\nabsl::StrCat(link.path(), \"\\n\", script.path(), \"\\n\"));\n}\n@@ -284,7 +307,7 @@ TEST(ExecTest, InterpreterScriptArgWhitespace) {\nTempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\nGetAbsoluteTestTmpdir(), absl::StrCat(\"#!\", link.path(), \" foo\"), 0755));\n- CheckOutput(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),\n+ CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),\nabsl::StrCat(link.path(), \"\\nfoo\\n\", script.path(), \"\\n\"));\n}\n@@ -314,14 +337,14 @@ TEST(ExecTest, ExecFn) {\nauto script_relative =\nASSERT_NO_ERRNO_AND_VALUE(GetRelativePath(cwd, script.path()));\n- CheckOutput(script_relative, {script_relative}, {}, ArgEnvExitStatus(0, 0),\n+ CheckExec(script_relative, {script_relative}, {}, ArgEnvExitStatus(0, 0),\nabsl::StrCat(script_relative, \"\\n\"));\n}\nTEST(ExecTest, ExecName) {\nstd::string path = WorkloadPath(kStateWorkload);\n- CheckOutput(path, {path, \"PrintExecName\"}, {}, ArgEnvExitStatus(0, 0),\n+ CheckExec(path, {path, \"PrintExecName\"}, {}, ArgEnvExitStatus(0, 0),\nabsl::StrCat(Basename(path).substr(0, 15), \"\\n\"));\n}\n@@ -336,20 +359,20 @@ TEST(ExecTest, ExecNameScript) {\nstd::string script_path = script.path();\n- CheckOutput(script_path, {script_path}, {}, ArgEnvExitStatus(0, 0),\n+ CheckExec(script_path, {script_path}, {}, ArgEnvExitStatus(0, 0),\nabsl::StrCat(Basename(script_path).substr(0, 15), \"\\n\"));\n}\n// execve may be called by a multithreaded process.\nTEST(ExecTest, WithSiblingThread) {\n- CheckOutput(\"/proc/self/exe\", {\"/proc/self/exe\", kExecWithThread}, {},\n+ CheckExec(\"/proc/self/exe\", {\"/proc/self/exe\", kExecWithThread}, {},\nW_EXITCODE(42, 0), \"\");\n}\n// execve may be called from a thread other than the leader of a multithreaded\n// process.\nTEST(ExecTest, FromSiblingThread) {\n- CheckOutput(\"/proc/self/exe\", {\"/proc/self/exe\", kExecFromThread}, {},\n+ CheckExec(\"/proc/self/exe\", {\"/proc/self/exe\", kExecFromThread}, {},\nW_EXITCODE(42, 0), \"\");\n}\n@@ -388,7 +411,7 @@ TEST(ExecStateTest, HandlerReset) {\nabsl::StrCat(absl::Hex(reinterpret_cast<uintptr_t>(SIG_DFL))),\n};\n- CheckOutput(WorkloadPath(kStateWorkload), args, {}, W_EXITCODE(0, 0), \"\");\n+ CheckExec(WorkloadPath(kStateWorkload), args, {}, W_EXITCODE(0, 0), \"\");\n}\n// Ignored signal dispositions are not reset.\n@@ -404,7 +427,7 @@ TEST(ExecStateTest, IgnorePreserved) {\nabsl::StrCat(absl::Hex(reinterpret_cast<uintptr_t>(SIG_IGN))),\n};\n- CheckOutput(WorkloadPath(kStateWorkload), args, {}, W_EXITCODE(0, 0), \"\");\n+ CheckExec(WorkloadPath(kStateWorkload), args, {}, W_EXITCODE(0, 0), \"\");\n}\n// Signal masks are not reset on exec\n@@ -420,7 +443,7 @@ TEST(ExecStateTest, SignalMask) {\nabsl::StrCat(SIGUSR1),\n};\n- CheckOutput(WorkloadPath(kStateWorkload), args, {}, W_EXITCODE(0, 0), \"\");\n+ CheckExec(WorkloadPath(kStateWorkload), args, {}, W_EXITCODE(0, 0), \"\");\n}\n// itimers persist across execve.\n@@ -472,7 +495,7 @@ TEST(ExecStateTest, ItimerPreserved) {\nTEST(ProcSelfExe, ChangesAcrossExecve) {\n// See exec_proc_exe_workload for more details. We simply\n// assert that the /proc/self/exe link changes across execve.\n- CheckOutput(WorkloadPath(kProcExeWorkload),\n+ CheckExec(WorkloadPath(kProcExeWorkload),\n{WorkloadPath(kProcExeWorkload),\nASSERT_NO_ERRNO_AND_VALUE(ProcessExePath(getpid()))},\n{}, W_EXITCODE(0, 0), \"\");\n@@ -484,7 +507,7 @@ TEST(ExecTest, CloexecNormalFile) {\nconst FileDescriptor fd_closed_on_exec =\nASSERT_NO_ERRNO_AND_VALUE(Open(tempFile.path(), O_RDONLY | O_CLOEXEC));\n- CheckOutput(WorkloadPath(kAssertClosedWorkload),\n+ CheckExec(WorkloadPath(kAssertClosedWorkload),\n{WorkloadPath(kAssertClosedWorkload),\nabsl::StrCat(fd_closed_on_exec.get())},\n{}, W_EXITCODE(0, 0), \"\");\n@@ -494,7 +517,7 @@ TEST(ExecTest, CloexecNormalFile) {\nconst FileDescriptor fd_open_on_exec =\nASSERT_NO_ERRNO_AND_VALUE(Open(tempFile.path(), O_RDONLY));\n- CheckOutput(WorkloadPath(kAssertClosedWorkload),\n+ CheckExec(WorkloadPath(kAssertClosedWorkload),\n{WorkloadPath(kAssertClosedWorkload),\nabsl::StrCat(fd_open_on_exec.get())},\n{}, W_EXITCODE(2, 0), \"\");\n@@ -505,11 +528,42 @@ TEST(ExecTest, CloexecEventfd) {\nASSERT_THAT(efd = eventfd(0, EFD_CLOEXEC), SyscallSucceeds());\nFileDescriptor fd(efd);\n- CheckOutput(WorkloadPath(kAssertClosedWorkload),\n+ CheckExec(WorkloadPath(kAssertClosedWorkload),\n{WorkloadPath(kAssertClosedWorkload), absl::StrCat(fd.get())}, {},\nW_EXITCODE(0, 0), \"\");\n}\n+TEST(ExecveatTest, BasicWithFDCWD) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ CheckExecveat(AT_FDCWD, path, {path}, {}, /*flags=*/0, ArgEnvExitStatus(0, 0),\n+ absl::StrCat(path, \"\\n\"));\n+}\n+\n+TEST(ExecveatTest, Basic) {\n+ std::string absolute_path = WorkloadPath(kBasicWorkload);\n+ std::string parent_dir = std::string(Dirname(absolute_path));\n+ std::string relative_path = std::string(Basename(absolute_path));\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));\n+\n+ CheckExecveat(dirfd.get(), relative_path, {absolute_path}, {}, /*flags=*/0,\n+ ArgEnvExitStatus(0, 0), absl::StrCat(absolute_path, \"\\n\"));\n+}\n+\n+TEST(ExecveatTest, AbsolutePathWithFDCWD) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ CheckExecveat(AT_FDCWD, path, {path}, {}, ArgEnvExitStatus(0, 0), 0,\n+ absl::StrCat(path, \"\\n\"));\n+}\n+\n+TEST(ExecveatTest, AbsolutePath) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ // File descriptor should be ignored when an absolute path is given.\n+ const int32_t badFD = -1;\n+ CheckExecveat(badFD, path, {path}, {}, ArgEnvExitStatus(0, 0), 0,\n+ absl::StrCat(path, \"\\n\"));\n+}\n+\n// Priority consistent across calls to execve()\nTEST(GetpriorityTest, ExecveMaintainsPriority) {\nint prio = 16;\n@@ -522,9 +576,8 @@ TEST(GetpriorityTest, ExecveMaintainsPriority) {\n// Program run (priority_execve) will exit(X) where\n// X=getpriority(PRIO_PROCESS,0). Check that this exit value is prio.\n- CheckOutput(WorkloadPath(kPriorityWorkload),\n- {WorkloadPath(kPriorityWorkload)}, {},\n- W_EXITCODE(expected_exit_code, 0), \"\");\n+ CheckExec(WorkloadPath(kPriorityWorkload), {WorkloadPath(kPriorityWorkload)},\n+ {}, W_EXITCODE(expected_exit_code, 0), \"\");\n}\nvoid ExecWithThread() {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/multiprocess_util.cc",
"new_path": "test/util/multiprocess_util.cc",
"diff": "#include \"test/util/multiprocess_util.h\"\n+#include <asm/unistd.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <signal.h>\nnamespace gvisor {\nnamespace testing {\n-PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,\n- const ExecveArray& argv,\n- const ExecveArray& envv,\n- const std::function<void()>& fn, pid_t* child,\n- int* execve_errno) {\n+namespace {\n+\n+// exec_fn wraps a variant of the exec family, e.g. execve or execveat.\n+PosixErrorOr<Cleanup> ForkAndExecHelper(const std::function<void()>& exec_fn,\n+ const std::function<void()>& fn,\n+ pid_t* child, int* execve_errno) {\nint pfds[2];\nint ret = pipe2(pfds, O_CLOEXEC);\nif (ret < 0) {\n@@ -76,7 +78,9 @@ PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,\nfn();\n}\n- execve(filename.c_str(), argv.get(), envv.get());\n+ // Call variant of exec function.\n+ exec_fn();\n+\nint error = errno;\nif (WriteFd(pfds[1], &error, sizeof(error)) != sizeof(error)) {\n// We can't do much if the write fails, but we can at least exit with a\n@@ -116,6 +120,36 @@ PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,\nreturn std::move(cleanup);\n}\n+} // namespace\n+\n+PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,\n+ const ExecveArray& argv,\n+ const ExecveArray& envv,\n+ const std::function<void()>& fn, pid_t* child,\n+ int* execve_errno) {\n+ char* const* argv_data = argv.get();\n+ char* const* envv_data = envv.get();\n+ const std::function<void()> exec_fn = [=] {\n+ execve(filename.c_str(), argv_data, envv_data);\n+ };\n+ return ForkAndExecHelper(exec_fn, fn, child, execve_errno);\n+}\n+\n+PosixErrorOr<Cleanup> ForkAndExecveat(const int32_t dirfd,\n+ const std::string& pathname,\n+ const ExecveArray& argv,\n+ const ExecveArray& envv, const int flags,\n+ const std::function<void()>& fn,\n+ pid_t* child, int* execve_errno) {\n+ char* const* argv_data = argv.get();\n+ char* const* envv_data = envv.get();\n+ const std::function<void()> exec_fn = [=] {\n+ syscall(__NR_execveat, dirfd, pathname.c_str(), argv_data, envv_data,\n+ flags);\n+ };\n+ return ForkAndExecHelper(exec_fn, fn, child, execve_errno);\n+}\n+\nPosixErrorOr<int> InForkedProcess(const std::function<void()>& fn) {\npid_t pid = fork();\nif (pid == 0) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/multiprocess_util.h",
"new_path": "test/util/multiprocess_util.h",
"diff": "@@ -102,6 +102,13 @@ inline PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,\nreturn ForkAndExec(filename, argv, envv, [] {}, child, execve_errno);\n}\n+// Equivalent to ForkAndExec, except using dirfd and flags with execveat.\n+PosixErrorOr<Cleanup> ForkAndExecveat(int32_t dirfd, const std::string& pathname,\n+ const ExecveArray& argv,\n+ const ExecveArray& envv, int flags,\n+ const std::function<void()>& fn,\n+ pid_t* child, int* execve_errno);\n+\n// Calls fn in a forked subprocess and returns the exit status of the\n// subprocess.\n//\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add basic implementation of execveat syscall and associated tests.
Allow file descriptors of directories as well as AT_FDCWD.
PiperOrigin-RevId: 275929668 |
260,004 | 22.10.2019 13:53:01 | 25,200 | c356fe2ebb4aa2ca2bf0afc88a8af96f3c61bb25 | Respect new PrimaryEndpointBehavior when addresses gets promoted to permanent
This change makes sure that when an address which is already known by a NIC and
has kind = permanentExpired gets promoted to permanent, the new
PrimaryEndpointBehavior is respected. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/BUILD",
"new_path": "pkg/tcpip/stack/BUILD",
"diff": "@@ -73,6 +73,7 @@ go_test(\n\"//pkg/tcpip/transport/icmp\",\n\"//pkg/tcpip/transport/udp\",\n\"//pkg/waiter\",\n+ \"@com_github_google_go-cmp//cmp:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -335,9 +335,31 @@ func (n *NIC) addPermanentAddressLocked(protocolAddress tcpip.ProtocolAddress, p\n// The NIC already have a permanent endpoint with that address.\nreturn nil, tcpip.ErrDuplicateAddress\ncase permanentExpired, temporary:\n- // Promote the endpoint to become permanent.\n+ // Promote the endpoint to become permanent and respect\n+ // the new peb.\nif ref.tryIncRef() {\nref.setKind(permanent)\n+\n+ refs := n.primary[ref.protocol]\n+ for i, r := range refs {\n+ if r == ref {\n+ switch peb {\n+ case CanBePrimaryEndpoint:\n+ return ref, nil\n+ case FirstPrimaryEndpoint:\n+ if i == 0 {\n+ return ref, nil\n+ }\n+ n.primary[r.protocol] = append(refs[:i], refs[i+1:]...)\n+ case NeverPrimaryEndpoint:\n+ n.primary[r.protocol] = append(refs[:i], refs[i+1:]...)\n+ return ref, nil\n+ }\n+ }\n+ }\n+\n+ n.insertPrimaryEndpointLocked(ref, peb)\n+\nreturn ref, nil\n}\n// tryIncRef failing means the endpoint is scheduled to be removed once\n@@ -406,12 +428,7 @@ func (n *NIC) addAddressLocked(protocolAddress tcpip.ProtocolAddress, peb Primar\nn.endpoints[id] = ref\n- switch peb {\n- case CanBePrimaryEndpoint:\n- n.primary[protocolAddress.Protocol] = append(n.primary[protocolAddress.Protocol], ref)\n- case FirstPrimaryEndpoint:\n- n.primary[protocolAddress.Protocol] = append([]*referencedNetworkEndpoint{ref}, n.primary[protocolAddress.Protocol]...)\n- }\n+ n.insertPrimaryEndpointLocked(ref, peb)\n// If we are adding a tentative IPv6 address, start DAD.\nif isIPv6Unicast && kind == permanentTentative {\n@@ -533,6 +550,19 @@ func (n *NIC) AddressRanges() []tcpip.Subnet {\nreturn append(sns, n.addressRanges...)\n}\n+// insertPrimaryEndpointLocked adds r to n's primary endpoint list as required\n+// by peb.\n+//\n+// n MUST be locked.\n+func (n *NIC) insertPrimaryEndpointLocked(r *referencedNetworkEndpoint, peb PrimaryEndpointBehavior) {\n+ switch peb {\n+ case CanBePrimaryEndpoint:\n+ n.primary[r.protocol] = append(n.primary[r.protocol], r)\n+ case FirstPrimaryEndpoint:\n+ n.primary[r.protocol] = append([]*referencedNetworkEndpoint{r}, n.primary[r.protocol]...)\n+ }\n+}\n+\nfunc (n *NIC) removeEndpointLocked(r *referencedNetworkEndpoint) {\nid := *r.ep.ID()\n@@ -550,9 +580,10 @@ func (n *NIC) removeEndpointLocked(r *referencedNetworkEndpoint) {\n}\ndelete(n.endpoints, id)\n- for i, ref := range n.primary[r.protocol] {\n+ refs := n.primary[r.protocol]\n+ for i, ref := range refs {\nif ref == r {\n- n.primary[r.protocol] = append(n.primary[r.protocol][:i], n.primary[r.protocol][i+1:]...)\n+ n.primary[r.protocol] = append(refs[:i], refs[i+1:]...)\nbreak\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_test.go",
"new_path": "pkg/tcpip/stack/stack_test.go",
"diff": "@@ -26,6 +26,7 @@ import (\n\"testing\"\n\"time\"\n+ \"github.com/google/go-cmp/cmp\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -2013,3 +2014,139 @@ func TestNICAutoGenAddrDoesDAD(t *testing.T) {\nt.Fatalf(\"got stack.GetMainNICAddress(_, _) = %s, want = %s\", addr, want)\n}\n}\n+\n+// TestNewPEB tests that a new PrimaryEndpointBehavior value (peb) is respected\n+// when an address's kind gets \"promoted\" to permanent from permanentExpired.\n+func TestNewPEBOnPromotionToPermanent(t *testing.T) {\n+ pebs := []stack.PrimaryEndpointBehavior{\n+ stack.NeverPrimaryEndpoint,\n+ stack.CanBePrimaryEndpoint,\n+ stack.FirstPrimaryEndpoint,\n+ }\n+\n+ for _, pi := range pebs {\n+ for _, ps := range pebs {\n+ t.Run(fmt.Sprintf(\"%d-to-%d\", pi, ps), func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocol{fakeNetFactory()},\n+ })\n+ ep1 := channel.New(10, defaultMTU, \"\")\n+ if err := s.CreateNIC(1, ep1); err != nil {\n+ t.Fatal(\"CreateNIC failed:\", err)\n+ }\n+\n+ // Add a permanent address with initial\n+ // PrimaryEndpointBehavior (peb), pi. If pi is\n+ // NeverPrimaryEndpoint, the address should not\n+ // be returned by a call to GetMainNICAddress;\n+ // else, it should.\n+ if err := s.AddAddressWithOptions(1, fakeNetNumber, \"\\x01\", pi); err != nil {\n+ t.Fatal(\"AddAddressWithOptions failed:\", err)\n+ }\n+ addr, err := s.GetMainNICAddress(1, fakeNetNumber)\n+ if err != nil {\n+ t.Fatal(\"s.GetMainNICAddress failed:\", err)\n+ }\n+ if pi == stack.NeverPrimaryEndpoint {\n+ if want := (tcpip.AddressWithPrefix{}); addr != want {\n+ t.Fatalf(\"got GetMainNICAddress = %s, want = %s\", addr, want)\n+\n+ }\n+ } else if addr.Address != \"\\x01\" {\n+ t.Fatalf(\"got GetMainNICAddress = %s, want = 1\", addr.Address)\n+ }\n+\n+ {\n+ subnet, err := tcpip.NewSubnet(\"\\x00\", \"\\x00\")\n+ if err != nil {\n+ t.Fatalf(\"NewSubnet failed:\", err)\n+ }\n+ s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: \"\\x00\", NIC: 1}})\n+ }\n+\n+ // Take a route through the address so its ref\n+ // count gets incremented and does not actually\n+ // get deleted when RemoveAddress is called\n+ // below. This is because we want to test that a\n+ // new peb is respected when an address gets\n+ // \"promoted\" to permanent from a\n+ // permanentExpired kind.\n+ r, err := s.FindRoute(1, \"\\x01\", \"\\x02\", fakeNetNumber, false)\n+ if err != nil {\n+ t.Fatal(\"FindRoute failed:\", err)\n+ }\n+ defer r.Release()\n+ if err := s.RemoveAddress(1, \"\\x01\"); err != nil {\n+ t.Fatalf(\"RemoveAddress failed:\", err)\n+ }\n+\n+ //\n+ // At this point, the address should still be\n+ // known by the NIC, but have its\n+ // kind = permanentExpired.\n+ //\n+\n+ // Add some other address with peb set to\n+ // FirstPrimaryEndpoint.\n+ if err := s.AddAddressWithOptions(1, fakeNetNumber, \"\\x03\", stack.FirstPrimaryEndpoint); err != nil {\n+ t.Fatal(\"AddAddressWithOptions failed:\", err)\n+\n+ }\n+\n+ // Add back the address we removed earlier and\n+ // make sure the new peb was respected.\n+ // (The address should just be promoted now).\n+ if err := s.AddAddressWithOptions(1, fakeNetNumber, \"\\x01\", ps); err != nil {\n+ t.Fatal(\"AddAddressWithOptions failed:\", err)\n+ }\n+ var primaryAddrs []tcpip.Address\n+ for _, pa := range s.NICInfo()[1].ProtocolAddresses {\n+ primaryAddrs = append(primaryAddrs, pa.AddressWithPrefix.Address)\n+ }\n+ var expectedList []tcpip.Address\n+ switch ps {\n+ case stack.FirstPrimaryEndpoint:\n+ expectedList = []tcpip.Address{\n+ \"\\x01\",\n+ \"\\x03\",\n+ }\n+ case stack.CanBePrimaryEndpoint:\n+ expectedList = []tcpip.Address{\n+ \"\\x03\",\n+ \"\\x01\",\n+ }\n+ case stack.NeverPrimaryEndpoint:\n+ expectedList = []tcpip.Address{\n+ \"\\x03\",\n+ }\n+ }\n+ if !cmp.Equal(primaryAddrs, expectedList) {\n+ t.Fatalf(\"got NIC's primary addresses = %v, want = %v\", primaryAddrs, expectedList)\n+ }\n+\n+ // Once we remove the other address, if the new\n+ // peb, ps, was NeverPrimaryEndpoint, no address\n+ // should be returned by a call to\n+ // GetMainNICAddress; else, our original address\n+ // should be returned.\n+ if err := s.RemoveAddress(1, \"\\x03\"); err != nil {\n+ t.Fatalf(\"RemoveAddress failed:\", err)\n+ }\n+ addr, err = s.GetMainNICAddress(1, fakeNetNumber)\n+ if err != nil {\n+ t.Fatal(\"s.GetMainNICAddress failed:\", err)\n+ }\n+ if ps == stack.NeverPrimaryEndpoint {\n+ if want := (tcpip.AddressWithPrefix{}); addr != want {\n+ t.Fatalf(\"got GetMainNICAddress = %s, want = %s\", addr, want)\n+\n+ }\n+ } else {\n+ if addr.Address != \"\\x01\" {\n+ t.Fatalf(\"got GetMainNICAddress = %s, want = 1\", addr.Address)\n+ }\n+ }\n+ })\n+ }\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Respect new PrimaryEndpointBehavior when addresses gets promoted to permanent
This change makes sure that when an address which is already known by a NIC and
has kind = permanentExpired gets promoted to permanent, the new
PrimaryEndpointBehavior is respected.
PiperOrigin-RevId: 276136317 |
260,004 | 22.10.2019 14:40:27 | 25,200 | 515e0558d4f8f7c890e72bdaf4c8b41b31cd270c | Add a type to represent the NDP Router Advertisement message.
This change is in preparation for NDP Router Discovery where the stack will need
to handle NDP Router Advertisments.
Tests: Test that given an NDP Router Advertisement buffer (body of an ICMPv6
packet, correct values are returned by the field getters). | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/BUILD",
"new_path": "pkg/tcpip/header/BUILD",
"diff": "@@ -19,6 +19,7 @@ go_library(\n\"ndp_neighbor_advert.go\",\n\"ndp_neighbor_solicit.go\",\n\"ndp_options.go\",\n+ \"ndp_router_advert.go\",\n\"tcp.go\",\n\"udp.go\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ndp_neighbor_advert.go",
"new_path": "pkg/tcpip/header/ndp_neighbor_advert.go",
"diff": "@@ -18,6 +18,8 @@ import \"gvisor.dev/gvisor/pkg/tcpip\"\n// NDPNeighborAdvert is an NDP Neighbor Advertisement message. It will\n// only contain the body of an ICMPv6 packet.\n+//\n+// See RFC 4861 section 4.4 for more details.\ntype NDPNeighborAdvert []byte\nconst (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ndp_neighbor_solicit.go",
"new_path": "pkg/tcpip/header/ndp_neighbor_solicit.go",
"diff": "@@ -18,6 +18,8 @@ import \"gvisor.dev/gvisor/pkg/tcpip\"\n// NDPNeighborSolicit is an NDP Neighbor Solicitation message. It will only\n// contain the body of an ICMPv6 packet.\n+//\n+// See RFC 4861 section 4.3 for more details.\ntype NDPNeighborSolicit []byte\nconst (\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/header/ndp_router_advert.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package header\n+\n+import (\n+ \"encoding/binary\"\n+ \"time\"\n+)\n+\n+// NDPRouterAdvert is an NDP Router Advertisement message. It will only contain\n+// the body of an ICMPv6 packet.\n+//\n+// See RFC 4861 section 4.2 for more details.\n+type NDPRouterAdvert []byte\n+\n+const (\n+ // NDPRAMinimumSize is the minimum size of a valid NDP Router\n+ // Advertisement message (body of an ICMPv6 packet).\n+ NDPRAMinimumSize = 12\n+\n+ // ndpRACurrHopLimitOffset is the byte of the Curr Hop Limit field\n+ // within an NDPRouterAdvert.\n+ ndpRACurrHopLimitOffset = 0\n+\n+ // ndpRAFlagsOffset is the byte with the NDP RA bit-fields/flags\n+ // within an NDPRouterAdvert.\n+ ndpRAFlagsOffset = 1\n+\n+ // ndpRAManagedAddrConfFlagMask is the mask of the Managed Address\n+ // Configuration flag within the bit-field/flags byte of an\n+ // NDPRouterAdvert.\n+ ndpRAManagedAddrConfFlagMask = (1 << 7)\n+\n+ // ndpRAOtherConfFlagMask is the mask of the Other Configuration flag\n+ // within the bit-field/flags byte of an NDPRouterAdvert.\n+ ndpRAOtherConfFlagMask = (1 << 6)\n+\n+ // ndpRARouterLifetimeOffset is the start of the 2-byte Router Lifetime\n+ // field within an NDPRouterAdvert.\n+ ndpRARouterLifetimeOffset = 2\n+\n+ // ndpRAReachableTimeOffset is the start of the 4-byte Reachable Time\n+ // field within an NDPRouterAdvert.\n+ ndpRAReachableTimeOffset = 4\n+\n+ // ndpRARetransTimerOffset is the start of the 4-byte Retrans Timer\n+ // field within an NDPRouterAdvert.\n+ ndpRARetransTimerOffset = 8\n+\n+ // ndpRAOptionsOffset is the start of the NDP options in an\n+ // NDPRouterAdvert.\n+ ndpRAOptionsOffset = 12\n+)\n+\n+// CurrHopLimit returns the value of the Curr Hop Limit field.\n+func (b NDPRouterAdvert) CurrHopLimit() uint8 {\n+ return b[ndpRACurrHopLimitOffset]\n+}\n+\n+// ManagedAddrConfFlag returns the value of the Managed Address Configuration\n+// flag.\n+func (b NDPRouterAdvert) ManagedAddrConfFlag() bool {\n+ return b[ndpRAFlagsOffset]&ndpRAManagedAddrConfFlagMask != 0\n+}\n+\n+// OtherConfFlag returns the value of the Other Configuration flag.\n+func (b NDPRouterAdvert) OtherConfFlag() bool {\n+ return b[ndpRAFlagsOffset]&ndpRAOtherConfFlagMask != 0\n+}\n+\n+// RouterLifetime returns the lifetime associated with the default router. A\n+// value of 0 means the source of the Router Advertisement is not a default\n+// router and SHOULD NOT appear on the default router list. Note, a value of 0\n+// only means that the router should not be used as a default router, it does\n+// not apply to other information contained in the Router Advertisement.\n+func (b NDPRouterAdvert) RouterLifetime() time.Duration {\n+ // The field is the time in seconds, as per RFC 4861 section 4.2.\n+ return time.Second * time.Duration(binary.BigEndian.Uint16(b[ndpRARouterLifetimeOffset:]))\n+}\n+\n+// ReachableTime returns the time that a node assumes a neighbor is reachable\n+// after having received a reachability confirmation. A value of 0 means\n+// that it is unspecified by the source of the Router Advertisement message.\n+func (b NDPRouterAdvert) ReachableTime() time.Duration {\n+ // The field is the time in milliseconds, as per RFC 4861 section 4.2.\n+ return time.Millisecond * time.Duration(binary.BigEndian.Uint32(b[ndpRAReachableTimeOffset:]))\n+}\n+\n+// RetransTimer returns the time between retransmitted Neighbor Solicitation\n+// messages. A value of 0 means that it is unspecified by the source of the\n+// Router Advertisement message.\n+func (b NDPRouterAdvert) RetransTimer() time.Duration {\n+ // The field is the time in milliseconds, as per RFC 4861 section 4.2.\n+ return time.Millisecond * time.Duration(binary.BigEndian.Uint32(b[ndpRARetransTimerOffset:]))\n+}\n+\n+// Options returns an NDPOptions of the the options body.\n+func (b NDPRouterAdvert) Options() NDPOptions {\n+ return NDPOptions(b[ndpRAOptionsOffset:])\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ndp_test.go",
"new_path": "pkg/tcpip/header/ndp_test.go",
"diff": "@@ -17,6 +17,7 @@ package header\nimport (\n\"bytes\"\n\"testing\"\n+ \"time\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n@@ -117,6 +118,40 @@ func TestNDPNeighborAdvert(t *testing.T) {\n}\n}\n+func TestNDPRouterAdvert(t *testing.T) {\n+ b := []byte{\n+ 64, 128, 1, 2,\n+ 3, 4, 5, 6,\n+ 7, 8, 9, 10,\n+ }\n+\n+ ra := NDPRouterAdvert(b)\n+\n+ if got := ra.CurrHopLimit(); got != 64 {\n+ t.Fatalf(\"got ra.CurrHopLimit = %d, want = 64\", got)\n+ }\n+\n+ if got := ra.ManagedAddrConfFlag(); !got {\n+ t.Fatalf(\"got ManagedAddrConfFlag = false, want = true\")\n+ }\n+\n+ if got := ra.OtherConfFlag(); got {\n+ t.Fatalf(\"got OtherConfFlag = true, want = false\")\n+ }\n+\n+ if got, want := ra.RouterLifetime(), time.Second*258; got != want {\n+ t.Fatalf(\"got ra.RouterLifetime = %d, want = %d\", got, want)\n+ }\n+\n+ if got, want := ra.ReachableTime(), time.Millisecond*50595078; got != want {\n+ t.Fatalf(\"got ra.ReachableTime = %d, want = %d\", got, want)\n+ }\n+\n+ if got, want := ra.RetransTimer(), time.Millisecond*117967114; got != want {\n+ t.Fatalf(\"got ra.RetransTimer = %d, want = %d\", got, want)\n+ }\n+}\n+\n// TestNDPTargetLinkLayerAddressOptionSerialize tests serializing a\n// NDPTargetLinkLayerAddressOption.\nfunc TestNDPTargetLinkLayerAddressOptionSerialize(t *testing.T) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a type to represent the NDP Router Advertisement message.
This change is in preparation for NDP Router Discovery where the stack will need
to handle NDP Router Advertisments.
Tests: Test that given an NDP Router Advertisement buffer (body of an ICMPv6
packet, correct values are returned by the field getters).
PiperOrigin-RevId: 276146817 |
259,853 | 22.10.2019 14:55:54 | 25,200 | e63ff6d923bb7ec74b837e6b00df41e4d805e70a | platform/ptrace: exit without panic if a stub process has been killed by SIGKILL
SIGKILL can be sent only by an user or OOM-killer. In both cases, we don't
need to panic. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -327,6 +327,19 @@ func (t *thread) dumpAndPanic(message string) {\npanic(message)\n}\n+func (t *thread) unexpectedStubExit() {\n+ msg, err := t.getEventMessage()\n+ status := syscall.WaitStatus(msg)\n+ if status.Signaled() && status.Signal() == syscall.SIGKILL {\n+ // SIGKILL can be only sent by an user or OOM-killer. In both\n+ // these cases, we don't need to panic. There is no reasons to\n+ // think that something wrong in gVisor.\n+ log.Warningf(\"The ptrace stub process %v has been killed by SIGKILL.\", t.tgid)\n+ syscall.Kill(os.Getpid(), syscall.SIGKILL)\n+ }\n+ t.dumpAndPanic(fmt.Sprintf(\"wait failed: the process %d:%d exited: %x (err %v)\", t.tgid, t.tid, msg, err))\n+}\n+\n// wait waits for a stop event.\n//\n// Precondition: outcome is a valid waitOutcome.\n@@ -355,8 +368,7 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {\n}\nif stopSig == syscall.SIGTRAP {\nif status.TrapCause() == syscall.PTRACE_EVENT_EXIT {\n- msg, err := t.getEventMessage()\n- t.dumpAndPanic(fmt.Sprintf(\"wait failed: the process %d:%d exited: %x (err %v)\", t.tgid, t.tid, msg, err))\n+ t.unexpectedStubExit()\n}\n// Re-encode the trap cause the way it's expected.\nreturn stopSig | syscall.Signal(status.TrapCause()<<8)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"diff": "@@ -129,6 +129,9 @@ func createStub() (*thread, error) {\n// transitively) will be killed as well. It's simply not possible to\n// safely handle a single stub getting killed: the exact state of\n// execution is unknown and not recoverable.\n+ //\n+ // In addition, we set the PTRACE_O_TRACEEXIT option to log more\n+ // information about a stub process when it receives a fatal signal.\nreturn attachedThread(uintptr(syscall.SIGKILL)|syscall.CLONE_FILES, defaultAction)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/ptrace: exit without panic if a stub process has been killed by SIGKILL
SIGKILL can be sent only by an user or OOM-killer. In both cases, we don't
need to panic.
PiperOrigin-RevId: 276150120 |
259,884 | 22.10.2019 16:14:42 | 25,200 | ebe8001724a6965cc9723604b38f42563d284a6a | Update const names to be Go style. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64.go",
"diff": "package linux\nconst (\n- _LINUX_SYSNAME = \"Linux\"\n- _LINUX_RELEASE = \"4.4.0\"\n- _LINUX_VERSION = \"#1 SMP Sun Jan 10 15:06:54 PST 2016\"\n+ // LinuxSysname is the OS name advertised by gVisor.\n+ LinuxSysname = \"Linux\"\n+\n+ // LinuxRelease is the Linux release version number advertised by gVisor.\n+ LinuxRelease = \"4.4.0\"\n+\n+ // LinuxVersion is the version info advertised by gVisor.\n+ LinuxVersion = \"#1 SMP Sun Jan 10 15:06:54 PST 2016\"\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"diff": "@@ -34,9 +34,9 @@ var AMD64 = &kernel.SyscallTable{\n// guides the interface provided by this syscall table. The build\n// version is that for a clean build with default kernel config, at 5\n// minutes after v4.4 was tagged.\n- Sysname: _LINUX_SYSNAME,\n- Release: _LINUX_RELEASE,\n- Version: _LINUX_VERSION,\n+ Sysname: LinuxSysname,\n+ Release: LinuxRelease,\n+ Version: LinuxVersion,\n},\nAuditNumber: linux.AUDIT_ARCH_X86_64,\nTable: map[uintptr]kernel.Syscall{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_arm64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_arm64.go",
"diff": "@@ -30,9 +30,9 @@ var ARM64 = &kernel.SyscallTable{\nOS: abi.Linux,\nArch: arch.ARM64,\nVersion: kernel.Version{\n- Sysname: _LINUX_SYSNAME,\n- Release: _LINUX_RELEASE,\n- Version: _LINUX_VERSION,\n+ Sysname: LinuxSysname,\n+ Release: LinuxRelease,\n+ Version: LinuxVersion,\n},\nAuditNumber: linux.AUDIT_ARCH_AARCH64,\nTable: map[uintptr]kernel.Syscall{\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update const names to be Go style.
PiperOrigin-RevId: 276165962 |
259,881 | 23.10.2019 12:58:40 | 25,200 | c0065e296f6e840ec1f6797fb0fd55cde0fff785 | Remove comparison between signed and unsigned int
Some compilers don't like the comparison between int and size_t. Remove it.
The other changes are minor style cleanups. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/sendfile_socket.cc",
"new_path": "test/syscalls/linux/sendfile_socket.cc",
"diff": "@@ -185,7 +185,7 @@ TEST_P(SendFileTest, Shutdown) {\n// Create a socket.\nstd::tuple<int, int> fds = ASSERT_NO_ERRNO_AND_VALUE(Sockets());\nconst FileDescriptor client(std::get<0>(fds));\n- FileDescriptor server(std::get<1>(fds)); // non-const, released below.\n+ FileDescriptor server(std::get<1>(fds)); // non-const, reset below.\n// If this is a TCP socket, then turn off linger.\nif (GetParam() == AF_INET) {\n@@ -210,14 +210,14 @@ TEST_P(SendFileTest, Shutdown) {\n// checking the contents (other tests do that), so we just re-use the same\n// buffer as above.\nScopedThread t([&]() {\n- int done = 0;\n+ size_t done = 0;\nwhile (done < data.size()) {\n- int n = read(server.get(), data.data(), data.size());\n+ int n = RetryEINTR(read)(server.get(), data.data(), data.size());\nASSERT_THAT(n, SyscallSucceeds());\ndone += n;\n}\n// Close the server side socket.\n- ASSERT_THAT(close(server.release()), SyscallSucceeds());\n+ server.reset();\n});\n// Continuously stream from the file to the socket. Note we do not assert\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove comparison between signed and unsigned int
Some compilers don't like the comparison between int and size_t. Remove it.
The other changes are minor style cleanups.
PiperOrigin-RevId: 276333450 |
259,958 | 23.10.2019 14:26:23 | 25,200 | fbe6b50d5628bc3d522f87eee2abcc5a923df420 | Keep minimal available fd to accelerate fd allocation
Use fd.next to store the iteration start position, which can be used to accelerate allocating new FDs.
And adding the corresponding gtest benchmark to measure performance.
COPYBARA_INTEGRATE_REVIEW=https://github.com/google/gvisor/pull/758 from DarcySail:master | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fd_table.go",
"new_path": "pkg/sentry/kernel/fd_table.go",
"diff": "@@ -81,6 +81,9 @@ type FDTable struct {\n// mu protects below.\nmu sync.Mutex `state:\"nosave\"`\n+ // next is start position to find fd.\n+ next int32\n+\n// used contains the number of non-nil entries. It must be accessed\n// atomically. It may be read atomically without holding mu (but not\n// written).\n@@ -226,6 +229,11 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\nf.mu.Lock()\ndefer f.mu.Unlock()\n+ // From f.next to find available fd.\n+ if fd < f.next {\n+ fd = f.next\n+ }\n+\n// Install all entries.\nfor i := fd; i < end && len(fds) < len(files); i++ {\nif d, _, _ := f.get(i); d == nil {\n@@ -242,6 +250,11 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags\nreturn nil, syscall.EMFILE\n}\n+ if fd == f.next {\n+ // Update next search start position.\n+ f.next = fds[len(fds)-1] + 1\n+ }\n+\nreturn fds, nil\n}\n@@ -361,6 +374,11 @@ func (f *FDTable) Remove(fd int32) *fs.File {\nf.mu.Lock()\ndefer f.mu.Unlock()\n+ // Update current available position.\n+ if fd < f.next {\n+ f.next = fd\n+ }\n+\norig, _, _ := f.get(fd)\nif orig != nil {\norig.IncRef() // Reference for caller.\n@@ -377,6 +395,10 @@ func (f *FDTable) RemoveIf(cond func(*fs.File, FDFlags) bool) {\nf.forEach(func(fd int32, file *fs.File, flags FDFlags) {\nif cond(file, flags) {\nf.set(fd, nil, FDFlags{}) // Clear from table.\n+ // Update current available position.\n+ if fd < f.next {\n+ f.next = fd\n+ }\n}\n})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fd_table_test.go",
"new_path": "pkg/sentry/kernel/fd_table_test.go",
"diff": "@@ -70,6 +70,42 @@ func TestFDTableMany(t *testing.T) {\nif err := fdTable.NewFDAt(ctx, 1, file, FDFlags{}); err != nil {\nt.Fatalf(\"fdTable.NewFDAt(1, r, FDFlags{}): got %v, wanted nil\", err)\n}\n+\n+ i := int32(2)\n+ fdTable.Remove(i)\n+ if fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil || fds[0] != i {\n+ t.Fatalf(\"Allocated %v FDs but wanted to allocate %v: %v\", i, maxFD, err)\n+ }\n+ })\n+}\n+\n+func TestFDTableOverLimit(t *testing.T) {\n+ runTest(t, func(ctx context.Context, fdTable *FDTable, file *fs.File, _ *limits.LimitSet) {\n+ if _, err := fdTable.NewFDs(ctx, maxFD, []*fs.File{file}, FDFlags{}); err == nil {\n+ t.Fatalf(\"fdTable.NewFDs(maxFD, f): got nil, wanted error\")\n+ }\n+\n+ if _, err := fdTable.NewFDs(ctx, maxFD-2, []*fs.File{file, file, file}, FDFlags{}); err == nil {\n+ t.Fatalf(\"fdTable.NewFDs(maxFD-2, {f,f,f}): got nil, wanted error\")\n+ }\n+\n+ if fds, err := fdTable.NewFDs(ctx, maxFD-3, []*fs.File{file, file, file}, FDFlags{}); err != nil {\n+ t.Fatalf(\"fdTable.NewFDs(maxFD-3, {f,f,f}): got %v, wanted nil\", err)\n+ } else {\n+ for _, fd := range fds {\n+ fdTable.Remove(fd)\n+ }\n+ }\n+\n+ if fds, err := fdTable.NewFDs(ctx, maxFD-1, []*fs.File{file}, FDFlags{}); err != nil || fds[0] != maxFD-1 {\n+ t.Fatalf(\"fdTable.NewFDAt(1, r, FDFlags{}): got %v, wanted nil\", err)\n+ }\n+\n+ if fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil {\n+ t.Fatalf(\"Adding an FD to a resized map: got %v, want nil\", err)\n+ } else if len(fds) != 1 || fds[0] != 0 {\n+ t.Fatalf(\"Added an FD to a resized map: got %v, want {1}\", fds)\n+ }\n})\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Keep minimal available fd to accelerate fd allocation
Use fd.next to store the iteration start position, which can be used to accelerate allocating new FDs.
And adding the corresponding gtest benchmark to measure performance.
@tanjianfeng
COPYBARA_INTEGRATE_REVIEW=https://github.com/google/gvisor/pull/758 from DarcySail:master 96685ec7886dfe1a64988406831d3bc002b438cc
PiperOrigin-RevId: 276351250 |
259,891 | 23.10.2019 17:20:07 | 25,200 | 072af49059a1818e0e06188be81fe425363acf55 | Add check for proper settings to AF_PACKET tests.
As in packet_socket_raw.cc, we should check that certain proc files are set
correctly. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/packet_socket.cc",
"new_path": "test/syscalls/linux/packet_socket.cc",
"diff": "@@ -130,6 +130,20 @@ void CookedPacketTest::SetUp() {\nGTEST_SKIP();\n}\n+ if (!IsRunningOnGvisor()) {\n+ FileDescriptor acceptLocal = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(\"/proc/sys/net/ipv4/conf/lo/accept_local\", O_RDONLY));\n+ FileDescriptor routeLocalnet = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(\"/proc/sys/net/ipv4/conf/lo/route_localnet\", O_RDONLY));\n+ char enabled;\n+ ASSERT_THAT(read(acceptLocal.get(), &enabled, 1),\n+ SyscallSucceedsWithValue(1));\n+ ASSERT_EQ(enabled, '1');\n+ ASSERT_THAT(read(routeLocalnet.get(), &enabled, 1),\n+ SyscallSucceedsWithValue(1));\n+ ASSERT_EQ(enabled, '1');\n+ }\n+\nASSERT_THAT(socket_ = socket(AF_PACKET, SOCK_DGRAM, htons(GetParam())),\nSyscallSucceeds());\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add check for proper settings to AF_PACKET tests.
As in packet_socket_raw.cc, we should check that certain proc files are set
correctly.
PiperOrigin-RevId: 276384534 |
259,884 | 18.10.2019 02:40:54 | 14,400 | 514f16b290229cc1105cdaf8102fee59b1365aee | Fix links on website. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -63,6 +63,10 @@ compatibility-docs: bin/generate-syscall-docs upstream/gvisor/bazel-bin/runsc/li\n./upstream/gvisor/bazel-bin/runsc/linux_amd64_pure_stripped/runsc help syscalls -o json | ./bin/generate-syscall-docs -out ./content/docs/user_guide/compatibility/\n.PHONY: compatibility-docs\n+check: website\n+ docker run -v $(shell pwd)/public:/public gcr.io/gvisor-website/html-proofer:3.10.2 htmlproofer --disable-external --check-html public/static\n+.PHONY: check\n+\n# Run a local content development server. Redirects will not be supported.\ndevserver: all-upstream compatibility-docs\n$(HUGO) server -FD --port 8080\n"
},
{
"change_type": "MODIFY",
"old_path": "cmd/gvisor-website/main.go",
"new_path": "cmd/gvisor-website/main.go",
"diff": "@@ -38,9 +38,16 @@ var redirects = map[string]string{\n// Redirects to compatibility docs.\n\"/c\": \"/docs/user_guide/compatibility/\",\n\"/c/linux/amd64\": \"/docs/user_guide/compatibility/linux/amd64/\",\n- // Redirect for old url\n- \"/docs/user_guide/compatibility/amd64\": \"/docs/user_guide/compatibility/linux/amd64/\",\n+\n+ // Redirect for old urls\n\"/docs/user_guide/compatibility/amd64/\": \"/docs/user_guide/compatibility/linux/amd64/\",\n+ \"/docs/user_guide/compatibility/amd64\": \"/docs/user_guide/compatibility/linux/amd64/\",\n+ \"/docs/user_guide/kubernetes/\": \"/docs/user_guide/quick_start/kubernetes/\",\n+ \"/docs/user_guide/kubernetes\": \"/docs/user_guide/quick_start/kubernetes/\",\n+ \"/docs/user_guide/oci/\": \"/docs/user_guide/quick_start/oci/\",\n+ \"/docs/user_guide/oci\": \"/docs/user_guide/quick_start/oci/\",\n+ \"/docs/user_guide/docker/\": \"/docs/user_guide/quick_start/docker/\",\n+ \"/docs/user_guide/docker\": \"/docs/user_guide/quick_start/docker/\",\n// Deprecated, but links continue to work.\n\"/cl\": \"https://gvisor-review.googlesource.com\",\n"
},
{
"change_type": "MODIFY",
"old_path": "content/_index.html",
"new_path": "content/_index.html",
"diff": "@@ -7,7 +7,7 @@ description = \"A container sandbox runtime focused on security, efficiency, and\n{{< blocks/cover image_anchor=\"top\" height=\"auto\" color=\"primary\" title=\"gVisor\" >}}\n<div class=\"mx-auto\">\n<p class=\"lead\">A container sandbox runtime focused on <strong>security</strong>, <strong>efficiency</strong>, and <strong>ease of use</strong>.</p>\n- <a class=\"btn btn-lg btn-primary mr-3 mb-4\" href=\"./docs/user_guide/docker/\">Quick Start<i class=\"fas fa-arrow-alt-circle-right ml-2\"></i></a>\n+ <a class=\"btn btn-lg btn-primary mr-3 mb-4\" href=\"./docs/user_guide/quick_start/docker/\">Quick Start<i class=\"fas fa-arrow-alt-circle-right ml-2\"></i></a>\n<a class=\"btn btn-lg btn-secondary mr-3 mb-4\" href=\"https://github.com/google/gvisor\" rel=\"noopener\">GitHub <i class=\"fab fa-github ml-2\"></i></a>\n</div>\n{{< /blocks/cover >}}\n"
},
{
"change_type": "MODIFY",
"old_path": "content/docs/_index.md",
"new_path": "content/docs/_index.md",
"diff": "@@ -14,7 +14,7 @@ gVisor takes a distinct approach to container sandboxing and makes a different\nset of technical trade-offs compared to existing sandbox technologies, thus\nproviding new tools and ideas for the container security landscape.\n-Check out the [gVisor Quick Start](./user_guide/docker/) to get started\n+Check out the [gVisor Quick Start](./user_guide/quick_start/docker/) to get started\nusing gVisor.\n## How this documentation is organized\n"
},
{
"change_type": "MODIFY",
"old_path": "content/docs/architecture_guide/performance.md",
"new_path": "content/docs/architecture_guide/performance.md",
"diff": "@@ -175,7 +175,7 @@ similarly loads a number of modules and binds an HTTP server.\n> Note: most of the time overhead above is associated Docker itself. This is\n> evident with the empty `runc` benchmark. To avoid these costs with `runsc`,\n> you may also consider using `runsc do` mode or invoking the [OCI\n-> runtime](../../user_guide/oci/) directly.\n+> runtime](../../user_guide/quick_start/oci/) directly.\n## Network\n"
},
{
"change_type": "MODIFY",
"old_path": "content/docs/tutorials/docker.md",
"new_path": "content/docs/tutorials/docker.md",
"diff": "@@ -68,6 +68,6 @@ Congratulations! You have just deployed a WordPress site using Docker.\n[Learn how to deploy WordPress with Kubernetes][wordpress-k8s].\n[docker]: https://www.docker.com/\n-[docker-install]: /docs/user_guide/docker/\n+[docker-install]: /docs/user_guide/quick_start/docker/\n[wordpress]: https://wordpress.com/\n[wordpress-k8s]: /docs/tutorials/kubernetes/\n"
},
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/debugging.md",
"new_path": "content/docs/user_guide/debugging.md",
"diff": "@@ -4,7 +4,7 @@ weight = 120\n+++\nTo enable debug and system call logging, add the `runtimeArgs` below to your\n-[Docker](../docker/) configuration (`/etc/docker/daemon.json`):\n+[Docker](../quick_start/docker/) configuration (`/etc/docker/daemon.json`):\n```json\n{\n"
},
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/quick_start/docker.md",
"new_path": "content/docs/user_guide/quick_start/docker.md",
"diff": "@@ -85,9 +85,12 @@ $ docker run --runtime=runsc -it ubuntu dmesg\nNote that this is easily replicated by an attacker so applications should never\nuse `dmesg` to verify the runtime in a security sensitive context.\n-Next, look at the different options available for gVisor: [platform](../platforms/),\n-[network](../networking/), [filesystem](../filesystem/).\n+Next, look at the different options available for gVisor: [platform][platforms],\n+[network][networking], [filesystem][filesystem].\n[docker]: https://docs.docker.com/install/\n-\n[storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver\n+\n+[filesystem]: /docs/user_guide/filesystem/\n+[networking]: /docs/user_guide/networking/\n+[platforms]: /docs/user_guide/platforms/\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix links on website. |
259,884 | 18.10.2019 03:05:17 | 14,400 | a9a28c9e4de60a4ec355cb51bdac0f618c208ce1 | Add Google Analytics tracking. | [
{
"change_type": "MODIFY",
"old_path": "config.toml",
"new_path": "config.toml",
"diff": "@@ -61,9 +61,7 @@ anchor = \"smart\"\n[services]\n[services.googleAnalytics]\n-# Comment out the next line to disable GA tracking. Also disables the feature described in [params.ui.feedback].\n-# TODO: Add analytics\n-# id = \"UA-00000000-0\"\n+id = \"UA-150193582-1\"\n# Everything below this are Site Params\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Google Analytics tracking. |
259,971 | 24.10.2019 13:43:10 | -39,600 | cf240fdf731ff631a37e1a35e6b4e4f2c4203a2d | Fix install link
Link to install docs is currently missing from the Docker quickstart page. | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/quick_start/docker.md",
"new_path": "content/docs/user_guide/quick_start/docker.md",
"diff": "@@ -90,7 +90,7 @@ Next, look at the different options available for gVisor: [platform][platforms],\n[docker]: https://docs.docker.com/install/\n[storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver\n-\n+[install]: /docs/user_guide/install/\n[filesystem]: /docs/user_guide/filesystem/\n[networking]: /docs/user_guide/networking/\n[platforms]: /docs/user_guide/platforms/\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix install link
Link to install docs is currently missing from the Docker quickstart page. |
259,860 | 23.10.2019 22:21:33 | 25,200 | 7ca50236c42ad1b1aa19951815d03b62c0c722ed | Handle AT_EMPTY_PATH flag in execveat. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"diff": "@@ -362,7 +362,7 @@ var AMD64 = &kernel.SyscallTable{\n319: syscalls.Supported(\"memfd_create\", MemfdCreate),\n320: syscalls.CapError(\"kexec_file_load\", linux.CAP_SYS_BOOT, \"\", nil),\n321: syscalls.CapError(\"bpf\", linux.CAP_SYS_ADMIN, \"\", nil),\n- 322: syscalls.PartiallySupported(\"execveat\", Execveat, \"No support for AT_EMPTY_PATH, AT_SYMLINK_FOLLOW.\", nil),\n+ 322: syscalls.PartiallySupported(\"execveat\", Execveat, \"No support for AT_SYMLINK_FOLLOW.\", nil),\n323: syscalls.ErrorWithEvent(\"userfaultfd\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/266\"}), // TODO(b/118906345)\n324: syscalls.ErrorWithEvent(\"membarrier\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/267\"}), // TODO(b/118904897)\n325: syscalls.PartiallySupported(\"mlock2\", Mlock2, \"Stub implementation. The sandbox lacks appropriate permissions.\", nil),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"new_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"diff": "@@ -105,18 +105,26 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\n}\n}\n- if flags != 0 {\n- // TODO(b/128449944): Handle AT_EMPTY_PATH and AT_SYMLINK_NOFOLLOW.\n+ if flags&linux.AT_SYMLINK_NOFOLLOW != 0 {\n+ // TODO(b/128449944): Handle AT_SYMLINK_NOFOLLOW.\nt.Kernel().EmitUnimplementedEvent(t)\nreturn 0, nil, syserror.ENOSYS\n}\n+ atEmptyPath := flags&linux.AT_EMPTY_PATH != 0\n+ if !atEmptyPath && len(pathname) == 0 {\n+ return 0, nil, syserror.ENOENT\n+ }\n+\nroot := t.FSContext().RootDirectory()\ndefer root.DecRef()\nvar wd *fs.Dirent\n+ var executable *fs.File\nif dirFD == linux.AT_FDCWD || path.IsAbs(pathname) {\n- // If pathname is absolute, LoadTaskImage() will ignore the wd.\n+ // Even if the pathname is absolute, we may still need the wd\n+ // for interpreter scripts if the path of the interpreter is\n+ // relative.\nwd = t.FSContext().WorkingDirectory()\n} else {\n// Need to extract the given FD.\n@@ -126,17 +134,23 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\n}\ndefer f.DecRef()\n+ if atEmptyPath && len(pathname) == 0 {\n+ executable = f\n+ } else {\nwd = f.Dirent\nwd.IncRef()\nif !fs.IsDir(wd.Inode.StableAttr) {\nreturn 0, nil, syserror.ENOTDIR\n}\n}\n+ }\n+ if wd != nil {\ndefer wd.DecRef()\n+ }\n// Load the new TaskContext.\nmaxTraversals := uint(linux.MaxSymlinkTraversals)\n- tc, se := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, &maxTraversals, pathname, nil, argv, envv, t.Arch().FeatureSet())\n+ tc, se := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, &maxTraversals, pathname, executable, argv, envv, t.Arch().FeatureSet())\nif se != nil {\nreturn 0, nil, se.ToError()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/exec.cc",
"new_path": "test/syscalls/linux/exec.cc",
"diff": "@@ -550,6 +550,18 @@ TEST(ExecveatTest, Basic) {\nArgEnvExitStatus(0, 0), absl::StrCat(absolute_path, \"\\n\"));\n}\n+TEST(ExecveatTest, FDNotADirectory) {\n+ std::string absolute_path = WorkloadPath(kBasicWorkload);\n+ std::string relative_path = std::string(Basename(absolute_path));\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(absolute_path, 0));\n+\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(fd.get(), relative_path,\n+ {absolute_path}, {}, /*flags=*/0,\n+ /*child=*/nullptr, &execve_errno));\n+ EXPECT_EQ(execve_errno, ENOTDIR);\n+}\n+\nTEST(ExecveatTest, AbsolutePathWithFDCWD) {\nstd::string path = WorkloadPath(kBasicWorkload);\nCheckExecveat(AT_FDCWD, path, {path}, {}, ArgEnvExitStatus(0, 0), 0,\n@@ -564,6 +576,56 @@ TEST(ExecveatTest, AbsolutePath) {\nabsl::StrCat(path, \"\\n\"));\n}\n+TEST(ExecveatTest, EmptyPathBasic) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_PATH));\n+\n+ CheckExecveat(fd.get(), \"\", {path}, {}, AT_EMPTY_PATH, ArgEnvExitStatus(0, 0),\n+ absl::StrCat(path, \"\\n\"));\n+}\n+\n+TEST(ExecveatTest, EmptyPathWithDirFD) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ std::string parent_dir = std::string(Dirname(path));\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));\n+\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(dirfd.get(), \"\", {path}, {},\n+ AT_EMPTY_PATH,\n+ /*child=*/nullptr, &execve_errno));\n+ EXPECT_EQ(execve_errno, EACCES);\n+}\n+\n+TEST(ExecveatTest, EmptyPathWithoutEmptyPathFlag) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_PATH));\n+\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(\n+ fd.get(), \"\", {path}, {}, /*flags=*/0, /*child=*/nullptr, &execve_errno));\n+ EXPECT_EQ(execve_errno, ENOENT);\n+}\n+\n+TEST(ExecveatTest, AbsolutePathWithEmptyPathFlag) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_PATH));\n+\n+ CheckExecveat(fd.get(), path, {path}, {}, AT_EMPTY_PATH,\n+ ArgEnvExitStatus(0, 0), absl::StrCat(path, \"\\n\"));\n+}\n+\n+TEST(ExecveatTest, RelativePathWithEmptyPathFlag) {\n+ std::string absolute_path = WorkloadPath(kBasicWorkload);\n+ std::string parent_dir = std::string(Dirname(absolute_path));\n+ std::string relative_path = std::string(Basename(absolute_path));\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));\n+\n+ CheckExecveat(dirfd.get(), relative_path, {absolute_path}, {}, AT_EMPTY_PATH,\n+ ArgEnvExitStatus(0, 0), absl::StrCat(absolute_path, \"\\n\"));\n+}\n+\n// Priority consistent across calls to execve()\nTEST(GetpriorityTest, ExecveMaintainsPriority) {\nint prio = 16;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/multiprocess_util.h",
"new_path": "test/util/multiprocess_util.h",
"diff": "@@ -109,6 +109,15 @@ PosixErrorOr<Cleanup> ForkAndExecveat(int32_t dirfd, const std::string& pathname\nconst std::function<void()>& fn,\npid_t* child, int* execve_errno);\n+inline PosixErrorOr<Cleanup> ForkAndExecveat(int32_t dirfd,\n+ const std::string& pathname,\n+ const ExecveArray& argv,\n+ const ExecveArray& envv, int flags,\n+ pid_t* child, int* execve_errno) {\n+ return ForkAndExecveat(\n+ dirfd, pathname, argv, envv, flags, [] {}, child, execve_errno);\n+}\n+\n// Calls fn in a forked subprocess and returns the exit status of the\n// subprocess.\n//\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle AT_EMPTY_PATH flag in execveat.
PiperOrigin-RevId: 276419967 |
259,858 | 23.10.2019 22:59:45 | 25,200 | 6b0f111d96b08e8fdc59400c3fe76fedfd57b0c4 | Temporarily remove apt instructions
The built repository is not working. This will be updated when the release file is being generated correctly. | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/install.md",
"new_path": "content/docs/user_guide/install.md",
"diff": "@@ -13,9 +13,13 @@ release channels. You should pick the version you'd like to install. For\nexperimentation, the nightly release is recommended. For production use, the\nlatest release is recommended.\n+<!--\n+\nAfter selecting an appropriate release channel from the options below, proceed\nto the preferred installation mechanism: manual or from an `apt` repository.\n+ -->\n+\n### Nightly\nNightly releases are built most nights from the master branch, and are available\n@@ -29,18 +33,26 @@ Specific nightly releases can be found at:\nNote that a release may not be available for every day.\n+<!--\n+\nTo use a nightly release, use one of the above URLs for `URL` in the manual\ninstructions below. For `apt`, use `nightly` for `DIST` below.\n+ -->\n+\n### Latest release\nThe latest official release is available at the following URL:\n`https://storage.googleapis.com/gvisor/releases/release/latest`\n+<!--\n+\nTo use the latest release, use the above URL for `URL` in the manual\ninstructions below. For `apt`, use `latest` for `DIST` below.\n+ -->\n+\n### Specific release\nA given release release is available at the following URL:\n@@ -49,21 +61,29 @@ A given release release is available at the following URL:\nSee the [releases][releases] page for information about specific releases.\n+<!--\n+\nThis will include point updates for the release, if required. To use a specific\nrelease, use the above URL for `URL` in the manual instructions below. For\n`apt`, use `${yyyymmdd}` for `DIST` below.\n+ -->\n+\n### Point release\nA given point release is available at the following URL:\n`https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}.${rc}`\n+<!--\n+\nUnlike the specific release above, which may include updates, this release will\nnot change. To use a specific point release, use the above URL for `URL` in the\nmanual instructions below. For apt, use `${yyyymmdd}.${rc}` for `DIST` below.\n-## Install from an `apt` repository\n+ -->\n+\n+<!-- Install from an `apt` repository\nFirst, appropriate dependencies must be installed to allow `apt` to install\npackages via https:\n@@ -118,6 +138,8 @@ sudo apt-get update && sudo apt-get install -y runsc\nIf you have Docker installed, it will be automatically configured.\n+-->\n+\n## Install manually\nAfter selecting an appropriate `URL` above, you can download `runsc` directly\n"
}
] | Go | Apache License 2.0 | google/gvisor | Temporarily remove apt instructions
The built repository is not working. This will be updated when the release file is being generated correctly. |
259,860 | 24.10.2019 01:44:03 | 25,200 | d9fd5363409facbc5cf04b85b3b0e7dade085dd9 | Handle AT_SYMLINK_NOFOLLOW flag for execveat. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -805,7 +805,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\n// Create a fresh task context.\nremainingTraversals = uint(args.MaxSymlinkTraversals)\n- tc, se := k.LoadTaskImage(ctx, mounts, root, wd, &remainingTraversals, args.Filename, args.File, args.Argv, args.Envv, k.featureSet)\n+ tc, se := k.LoadTaskImage(ctx, mounts, root, wd, &remainingTraversals, args.Filename, args.File, args.Argv, args.Envv, true /*resolveFinal*/, k.featureSet)\nif se != nil {\nreturn nil, 0, errors.New(se.String())\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_context.go",
"new_path": "pkg/sentry/kernel/task_context.go",
"diff": "@@ -145,7 +145,7 @@ func (t *Task) Stack() *arch.Stack {\n// * argv: Binary argv\n// * envv: Binary envv\n// * fs: Binary FeatureSet\n-func (k *Kernel) LoadTaskImage(ctx context.Context, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, filename string, file *fs.File, argv, envv []string, fs *cpuid.FeatureSet) (*TaskContext, *syserr.Error) {\n+func (k *Kernel) LoadTaskImage(ctx context.Context, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, filename string, file *fs.File, argv, envv []string, resolveFinal bool, fs *cpuid.FeatureSet) (*TaskContext, *syserr.Error) {\n// If File is not nil, we should load that instead of resolving filename.\nif file != nil {\nfilename = file.MappedName(ctx)\n@@ -155,7 +155,7 @@ func (k *Kernel) LoadTaskImage(ctx context.Context, mounts *fs.MountNamespace, r\nm := mm.NewMemoryManager(k, k)\ndefer m.DecUsers(ctx)\n- os, ac, name, err := loader.Load(ctx, m, mounts, root, wd, maxTraversals, fs, filename, file, argv, envv, k.extraAuxv, k.vdso)\n+ os, ac, name, err := loader.Load(ctx, m, mounts, root, wd, maxTraversals, fs, filename, file, argv, envv, resolveFinal, k.extraAuxv, k.vdso)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -640,7 +640,7 @@ func loadELF(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace\nvar interp loadedELF\nif bin.interpreter != \"\" {\n- d, i, err := openPath(ctx, mounts, root, wd, maxTraversals, bin.interpreter)\n+ d, i, err := openPath(ctx, mounts, root, wd, maxTraversals, bin.interpreter, true /*resolveFinal*/)\nif err != nil {\nctx.Infof(\"Error opening interpreter %s: %v\", bin.interpreter, err)\nreturn loadedELF{}, nil, err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -57,13 +57,19 @@ func readFull(ctx context.Context, f *fs.File, dst usermem.IOSequence, offset in\n// installed in the Task FDTable. The caller takes ownership of both.\n//\n// name must be a readable, executable, regular file.\n-func openPath(ctx context.Context, mm *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, name string) (*fs.Dirent, *fs.File, error) {\n+func openPath(ctx context.Context, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, name string, resolveFinal bool) (*fs.Dirent, *fs.File, error) {\n+ var err error\nif name == \"\" {\nctx.Infof(\"cannot open empty name\")\nreturn nil, nil, syserror.ENOENT\n}\n- d, err := mm.FindInode(ctx, root, wd, name, maxTraversals)\n+ var d *fs.Dirent\n+ if resolveFinal {\n+ d, err = mounts.FindInode(ctx, root, wd, name, maxTraversals)\n+ } else {\n+ d, err = mounts.FindLink(ctx, root, wd, name, maxTraversals)\n+ }\nif err != nil {\nreturn nil, nil, err\n}\n@@ -71,10 +77,13 @@ func openPath(ctx context.Context, mm *fs.MountNamespace, root, wd *fs.Dirent, m\n// Open file will take a reference to Dirent, so destroy this one.\ndefer d.DecRef()\n+ if !resolveFinal && fs.IsSymlink(d.Inode.StableAttr) {\n+ return nil, nil, syserror.ELOOP\n+ }\n+\nreturn openFile(ctx, nil, d, name)\n}\n-// openFile performs checks on a file to be executed. If provided a *fs.File,\n// openFile takes that file's Dirent and performs checks on it. If provided a\n// *fs.Dirent and not a *fs.File, it creates a *fs.File object from the Dirent's\n// Inode and performs checks on that.\n@@ -181,7 +190,7 @@ const (\n// * arch.Context matching the binary arch\n// * fs.Dirent of the binary file\n// * Possibly updated argv\n-func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, remainingTraversals *uint, features *cpuid.FeatureSet, filename string, passedFile *fs.File, argv []string) (loadedELF, arch.Context, *fs.Dirent, []string, error) {\n+func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, remainingTraversals *uint, features *cpuid.FeatureSet, filename string, passedFile *fs.File, argv []string, resolveFinal bool) (loadedELF, arch.Context, *fs.Dirent, []string, error) {\nfor i := 0; i < maxLoaderAttempts; i++ {\nvar (\nd *fs.Dirent\n@@ -189,8 +198,7 @@ func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamesp\nerr error\n)\nif passedFile == nil {\n- d, f, err = openPath(ctx, mounts, root, wd, remainingTraversals, filename)\n-\n+ d, f, err = openPath(ctx, mounts, root, wd, remainingTraversals, filename, resolveFinal)\n} else {\nd, f, err = openFile(ctx, passedFile, nil, \"\")\n// Set to nil in case we loop on a Interpreter Script.\n@@ -255,9 +263,9 @@ func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamesp\n// Preconditions:\n// * The Task MemoryManager is empty.\n// * Load is called on the Task goroutine.\n-func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, fs *cpuid.FeatureSet, filename string, file *fs.File, argv, envv []string, extraAuxv []arch.AuxEntry, vdso *VDSO) (abi.OS, arch.Context, string, *syserr.Error) {\n+func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, fs *cpuid.FeatureSet, filename string, file *fs.File, argv, envv []string, resolveFinal bool, extraAuxv []arch.AuxEntry, vdso *VDSO) (abi.OS, arch.Context, string, *syserr.Error) {\n// Load the binary itself.\n- loaded, ac, d, argv, err := loadBinary(ctx, m, mounts, root, wd, maxTraversals, fs, filename, file, argv)\n+ loaded, ac, d, argv, err := loadBinary(ctx, m, mounts, root, wd, maxTraversals, fs, filename, file, argv, resolveFinal)\nif err != nil {\nreturn 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Failed to load %s: %v\", filename, err), syserr.FromError(err).ToLinux())\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"diff": "@@ -362,7 +362,7 @@ var AMD64 = &kernel.SyscallTable{\n319: syscalls.Supported(\"memfd_create\", MemfdCreate),\n320: syscalls.CapError(\"kexec_file_load\", linux.CAP_SYS_BOOT, \"\", nil),\n321: syscalls.CapError(\"bpf\", linux.CAP_SYS_ADMIN, \"\", nil),\n- 322: syscalls.PartiallySupported(\"execveat\", Execveat, \"No support for AT_SYMLINK_FOLLOW.\", nil),\n+ 322: syscalls.Supported(\"execveat\", Execveat),\n323: syscalls.ErrorWithEvent(\"userfaultfd\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/266\"}), // TODO(b/118906345)\n324: syscalls.ErrorWithEvent(\"membarrier\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/267\"}), // TODO(b/118904897)\n325: syscalls.PartiallySupported(\"mlock2\", Mlock2, \"Stub implementation. The sandbox lacks appropriate permissions.\", nil),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"new_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"diff": "@@ -105,16 +105,14 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\n}\n}\n- if flags&linux.AT_SYMLINK_NOFOLLOW != 0 {\n- // TODO(b/128449944): Handle AT_SYMLINK_NOFOLLOW.\n- t.Kernel().EmitUnimplementedEvent(t)\n- return 0, nil, syserror.ENOSYS\n+ if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {\n+ return 0, nil, syserror.EINVAL\n}\n-\natEmptyPath := flags&linux.AT_EMPTY_PATH != 0\nif !atEmptyPath && len(pathname) == 0 {\nreturn 0, nil, syserror.ENOENT\n}\n+ resolveFinal := flags&linux.AT_SYMLINK_NOFOLLOW == 0\nroot := t.FSContext().RootDirectory()\ndefer root.DecRef()\n@@ -150,7 +148,7 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\n// Load the new TaskContext.\nmaxTraversals := uint(linux.MaxSymlinkTraversals)\n- tc, se := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, &maxTraversals, pathname, executable, argv, envv, t.Arch().FeatureSet())\n+ tc, se := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, &maxTraversals, pathname, executable, argv, envv, resolveFinal, t.Arch().FeatureSet())\nif se != nil {\nreturn 0, nil, se.ToError()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/exec.cc",
"new_path": "test/syscalls/linux/exec.cc",
"diff": "@@ -542,23 +542,23 @@ TEST(ExecveatTest, BasicWithFDCWD) {\nTEST(ExecveatTest, Basic) {\nstd::string absolute_path = WorkloadPath(kBasicWorkload);\nstd::string parent_dir = std::string(Dirname(absolute_path));\n- std::string relative_path = std::string(Basename(absolute_path));\n+ std::string base = std::string(Basename(absolute_path));\nconst FileDescriptor dirfd =\nASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));\n- CheckExecveat(dirfd.get(), relative_path, {absolute_path}, {}, /*flags=*/0,\n+ CheckExecveat(dirfd.get(), base, {absolute_path}, {}, /*flags=*/0,\nArgEnvExitStatus(0, 0), absl::StrCat(absolute_path, \"\\n\"));\n}\nTEST(ExecveatTest, FDNotADirectory) {\nstd::string absolute_path = WorkloadPath(kBasicWorkload);\n- std::string relative_path = std::string(Basename(absolute_path));\n+ std::string base = std::string(Basename(absolute_path));\nconst FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(absolute_path, 0));\nint execve_errno;\n- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(fd.get(), relative_path,\n- {absolute_path}, {}, /*flags=*/0,\n- /*child=*/nullptr, &execve_errno));\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(fd.get(), base, {absolute_path}, {},\n+ /*flags=*/0, /*child=*/nullptr,\n+ &execve_errno));\nEXPECT_EQ(execve_errno, ENOTDIR);\n}\n@@ -618,14 +618,77 @@ TEST(ExecveatTest, AbsolutePathWithEmptyPathFlag) {\nTEST(ExecveatTest, RelativePathWithEmptyPathFlag) {\nstd::string absolute_path = WorkloadPath(kBasicWorkload);\nstd::string parent_dir = std::string(Dirname(absolute_path));\n- std::string relative_path = std::string(Basename(absolute_path));\n+ std::string base = std::string(Basename(absolute_path));\nconst FileDescriptor dirfd =\nASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));\n- CheckExecveat(dirfd.get(), relative_path, {absolute_path}, {}, AT_EMPTY_PATH,\n+ CheckExecveat(dirfd.get(), base, {absolute_path}, {}, AT_EMPTY_PATH,\nArgEnvExitStatus(0, 0), absl::StrCat(absolute_path, \"\\n\"));\n}\n+TEST(ExecveatTest, SymlinkNoFollowWithRelativePath) {\n+ std::string parent_dir = \"/tmp\";\n+ TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateSymlinkTo(parent_dir, WorkloadPath(kBasicWorkload)));\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));\n+ std::string base = std::string(Basename(link.path()));\n+\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(dirfd.get(), base, {base}, {},\n+ AT_SYMLINK_NOFOLLOW,\n+ /*child=*/nullptr, &execve_errno));\n+ EXPECT_EQ(execve_errno, ELOOP);\n+}\n+\n+TEST(ExecveatTest, SymlinkNoFollowWithAbsolutePath) {\n+ std::string parent_dir = \"/tmp\";\n+ TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateSymlinkTo(parent_dir, WorkloadPath(kBasicWorkload)));\n+ std::string path = link.path();\n+\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(AT_FDCWD, path, {path}, {},\n+ AT_SYMLINK_NOFOLLOW,\n+ /*child=*/nullptr, &execve_errno));\n+ EXPECT_EQ(execve_errno, ELOOP);\n+}\n+\n+TEST(ExecveatTest, SymlinkNoFollowAndEmptyPath) {\n+ TempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateSymlinkTo(\"/tmp\", WorkloadPath(kBasicWorkload)));\n+ std::string path = link.path();\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, 0));\n+\n+ CheckExecveat(fd.get(), \"\", {path}, {}, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW,\n+ ArgEnvExitStatus(0, 0), absl::StrCat(path, \"\\n\"));\n+}\n+\n+TEST(ExecveatTest, SymlinkNoFollowIgnoreSymlinkAncestor) {\n+ TempPath parent_link =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateSymlinkTo(\"/tmp\", \"/bin\"));\n+ std::string path_with_symlink = JoinPath(parent_link.path(), \"echo\");\n+\n+ CheckExecveat(AT_FDCWD, path_with_symlink, {path_with_symlink}, {},\n+ AT_SYMLINK_NOFOLLOW, ArgEnvExitStatus(0, 0), \"\");\n+}\n+\n+TEST(ExecveatTest, SymlinkNoFollowWithNormalFile) {\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(\"/bin\", O_DIRECTORY));\n+\n+ CheckExecveat(dirfd.get(), \"echo\", {\"echo\"}, {}, AT_SYMLINK_NOFOLLOW,\n+ ArgEnvExitStatus(0, 0), \"\");\n+}\n+\n+TEST(ExecveatTest, InvalidFlags) {\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(\n+ /*dirfd=*/-1, \"\", {}, {}, /*flags=*/0xFFFF, /*child=*/nullptr,\n+ &execve_errno));\n+ EXPECT_EQ(execve_errno, EINVAL);\n+}\n+\n// Priority consistent across calls to execve()\nTEST(GetpriorityTest, ExecveMaintainsPriority) {\nint prio = 16;\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle AT_SYMLINK_NOFOLLOW flag for execveat.
PiperOrigin-RevId: 276441249 |
259,891 | 24.10.2019 14:08:03 | 25,200 | 9d81dd355953f44e6cfdbe1f010bac430171970d | Change tcpdump status to working | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/compatibility/_index.md",
"new_path": "content/docs/user_guide/compatibility/_index.md",
"diff": "@@ -79,7 +79,7 @@ Most common utilities work. Note that:\n| sshd | Partially working. Job control [in progress](https://github.com/google/gvisor/issues/154) |\n| strace | Working |\n| tar | Working |\n-| tcpdump | [In progress](https://github.com/google/gvisor/issues/173) |\n+| tcpdump | Working |\n| top | Working |\n| uptime | Working |\n| vim | Working |\n"
}
] | Go | Apache License 2.0 | google/gvisor | Change tcpdump status to working
https://github.com/google/gvisor/commit/12235d533ae5c8b4b03c6d7f24cae785a2d6ec22 |
260,004 | 24.10.2019 15:18:43 | 25,200 | e50a1f5739adc9bcb74456d365959ae718ff2197 | Remove the amss field from tcpip.tcp.handshake as it was unused
The amss field in the tcpip.tcp.handshake was not used anywhere. Removed it to
not cause confusion with the amss field in the tcpip.tcp.endpoint struct, which
was documented to be used (and is actually being used) for the same purpose. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -78,9 +78,6 @@ type handshake struct {\n// mss is the maximum segment size received from the peer.\nmss uint16\n- // amss is the maximum segment size advertised by us to the peer.\n- amss uint16\n-\n// sndWndScale is the send window scale, as defined in RFC 1323. A\n// negative value means no scaling is supported by the peer.\nsndWndScale int\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove the amss field from tcpip.tcp.handshake as it was unused
The amss field in the tcpip.tcp.handshake was not used anywhere. Removed it to
not cause confusion with the amss field in the tcpip.tcp.endpoint struct, which
was documented to be used (and is actually being used) for the same purpose.
PiperOrigin-RevId: 276577088 |
259,992 | 24.10.2019 16:35:29 | 25,200 | e8ba10c0085d404378ce649e018624b93cf4aa65 | Fix early deletion of rootDir
container.startContainers() cannot be called twice in a test
(e.g. TestMultiContainerLoadSandbox) because the cleanup
function deletes the rootDir, together with information from
all other containers that may exist. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -1548,7 +1548,8 @@ func TestAbbreviatedIDs(t *testing.T) {\n}\ndefer os.RemoveAll(rootDir)\n- conf := testutil.TestConfigWithRoot(rootDir)\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\ncids := []string{\n\"foo-\" + testutil.UniqueContainerID(),\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -60,13 +60,8 @@ func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {\n}\nfunc startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) {\n- // Setup root dir if one hasn't been provided.\nif len(conf.RootDir) == 0 {\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- return nil, nil, fmt.Errorf(\"error creating root dir: %v\", err)\n- }\n- conf.RootDir = rootDir\n+ panic(\"conf.RootDir not set. Call testutil.SetupRootDir() to set.\")\n}\nvar containers []*Container\n@@ -78,7 +73,6 @@ func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*C\nfor _, b := range bundles {\nos.RemoveAll(b)\n}\n- os.RemoveAll(conf.RootDir)\n}\nfor i, spec := range specs {\nbundleDir, err := testutil.SetupBundleDir(spec)\n@@ -144,6 +138,13 @@ func TestMultiContainerSanity(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\nspecs, ids := createSpecs(sleep, sleep)\n@@ -175,6 +176,13 @@ func TestMultiPIDNS(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\ntestSpecs, ids := createSpecs(sleep, sleep)\n@@ -213,6 +221,13 @@ func TestMultiPIDNSPath(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\ntestSpecs, ids := createSpecs(sleep, sleep, sleep)\n@@ -268,13 +283,21 @@ func TestMultiPIDNSPath(t *testing.T) {\n}\nfunc TestMultiContainerWait(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\n// The first container should run the entire duration of the test.\ncmd1 := []string{\"sleep\", \"100\"}\n// We'll wait on the second container, which is much shorter lived.\ncmd2 := []string{\"sleep\", \"1\"}\nspecs, ids := createSpecs(cmd1, cmd2)\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -344,12 +367,14 @@ func TestExecWait(t *testing.T) {\n}\ndefer os.RemoveAll(rootDir)\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\n// The first container should run the entire duration of the test.\ncmd1 := []string{\"sleep\", \"100\"}\n// We'll wait on the second container, which is much shorter lived.\ncmd2 := []string{\"sleep\", \"1\"}\nspecs, ids := createSpecs(cmd1, cmd2)\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -432,7 +457,15 @@ func TestMultiContainerMount(t *testing.T) {\n})\n// Setup the containers.\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\nconf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\ncontainers, cleanup, err := startContainers(conf, sps, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -454,6 +487,13 @@ func TestMultiContainerSignal(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\nspecs, ids := createSpecs(sleep, sleep)\n@@ -548,6 +588,13 @@ func TestMultiContainerDestroy(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// First container will remain intact while the second container is killed.\npodSpecs, ids := createSpecs(\n[]string{\"sleep\", \"100\"},\n@@ -599,13 +646,21 @@ func TestMultiContainerDestroy(t *testing.T) {\n}\nfunc TestMultiContainerProcesses(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\n// Note: use curly braces to keep 'sh' process around. Otherwise, shell\n// will just execve into 'sleep' and both containers will look the\n// same.\nspecs, ids := createSpecs(\n[]string{\"sleep\", \"100\"},\n[]string{\"sh\", \"-c\", \"{ sleep 100; }\"})\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -650,6 +705,15 @@ func TestMultiContainerProcesses(t *testing.T) {\n// TestMultiContainerKillAll checks that all process that belong to a container\n// are killed when SIGKILL is sent to *all* processes in that container.\nfunc TestMultiContainerKillAll(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\nfor _, tc := range []struct {\nkillContainer bool\n}{\n@@ -665,7 +729,6 @@ func TestMultiContainerKillAll(t *testing.T) {\nspecs, ids := createSpecs(\n[]string{app, \"task-tree\", \"--depth=2\", \"--width=2\"},\n[]string{app, \"task-tree\", \"--depth=4\", \"--width=2\"})\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -739,19 +802,13 @@ func TestMultiContainerDestroyNotStarted(t *testing.T) {\nspecs, ids := createSpecs(\n[]string{\"/bin/sleep\", \"100\"},\n[]string{\"/bin/sleep\", \"100\"})\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n- conf := testutil.TestConfigWithRoot(rootDir)\n-\n- // Create and start root container.\n- rootBundleDir, err := testutil.SetupBundleDir(specs[0])\n+ conf := testutil.TestConfig()\n+ rootDir, rootBundleDir, err := testutil.SetupContainer(specs[0], conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n+ defer os.RemoveAll(rootDir)\ndefer os.RemoveAll(rootBundleDir)\nrootArgs := Args{\n@@ -800,19 +857,12 @@ func TestMultiContainerDestroyStarting(t *testing.T) {\n}\nspecs, ids := createSpecs(cmds...)\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n- conf := testutil.TestConfigWithRoot(rootDir)\n-\n- // Create and start root container.\n- rootBundleDir, err := testutil.SetupBundleDir(specs[0])\n+ conf := testutil.TestConfig()\n+ rootDir, rootBundleDir, err := testutil.SetupContainer(specs[0], conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n+ defer os.RemoveAll(rootDir)\ndefer os.RemoveAll(rootBundleDir)\nrootArgs := Args{\n@@ -886,9 +936,17 @@ func TestMultiContainerDifferentFilesystems(t *testing.T) {\nscript := fmt.Sprintf(\"if [ -f %q ]; then exit 1; else touch %q; fi\", filename, filename)\ncmd := []string{\"sh\", \"-c\", script}\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\n// Make sure overlay is enabled, and none of the root filesystems are\n// read-only, otherwise we won't be able to create the file.\n- conf := testutil.TestConfig()\nconf.Overlay = true\nspecs, ids := createSpecs(cmdRoot, cmd, cmd)\nfor _, s := range specs {\n@@ -941,26 +999,21 @@ func TestMultiContainerContainerDestroyStress(t *testing.T) {\n}\nallSpecs, allIDs := createSpecs(cmds...)\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n// Split up the specs and IDs.\nrootSpec := allSpecs[0]\nrootID := allIDs[0]\nchildrenSpecs := allSpecs[1:]\nchildrenIDs := allIDs[1:]\n- bundleDir, err := testutil.SetupBundleDir(rootSpec)\n+ conf := testutil.TestConfig()\n+ rootDir, bundleDir, err := testutil.SetupContainer(rootSpec, conf)\nif err != nil {\n- t.Fatalf(\"error setting up bundle dir: %v\", err)\n+ t.Fatalf(\"error setting up container: %v\", err)\n}\n+ defer os.RemoveAll(rootDir)\ndefer os.RemoveAll(bundleDir)\n// Start root container.\n- conf := testutil.TestConfigWithRoot(rootDir)\nrootArgs := Args{\nID: rootID,\nSpec: rootSpec,\n@@ -1029,6 +1082,13 @@ func TestMultiContainerSharedMount(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\npodSpec, ids := createSpecs(sleep, sleep)\n@@ -1137,6 +1197,13 @@ func TestMultiContainerSharedMountReadonly(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\npodSpec, ids := createSpecs(sleep, sleep)\n@@ -1197,6 +1264,13 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ conf.RootDir = rootDir\n+\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\npodSpec, ids := createSpecs(sleep, sleep)\n@@ -1300,8 +1374,14 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {\n// Test that unsupported pod mounts options are ignored when matching master and\n// slave mounts.\nfunc TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\nconf := testutil.TestConfig()\n- t.Logf(\"Running test with conf: %+v\", conf)\n+ conf.RootDir = rootDir\n// Setup the containers.\nsleep := []string{\"/bin/sleep\", \"100\"}\n@@ -1376,6 +1456,15 @@ func TestMultiContainerMultiRootCanHandleFDs(t *testing.T) {\nType: \"tmpfs\",\n}\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\n// Create the specs.\nspecs, ids := createSpecs(\n[]string{\"sleep\", \"1000\"},\n@@ -1386,7 +1475,6 @@ func TestMultiContainerMultiRootCanHandleFDs(t *testing.T) {\nspecs[1].Mounts = append(specs[2].Mounts, sharedMnt, writeableMnt)\nspecs[2].Mounts = append(specs[1].Mounts, sharedMnt)\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -1405,9 +1493,17 @@ func TestMultiContainerMultiRootCanHandleFDs(t *testing.T) {\n// Test that container is destroyed when Gofer is killed.\nfunc TestMultiContainerGoferKilled(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\nsleep := []string{\"sleep\", \"100\"}\nspecs, ids := createSpecs(sleep, sleep, sleep)\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -1483,7 +1579,15 @@ func TestMultiContainerGoferKilled(t *testing.T) {\nfunc TestMultiContainerLoadSandbox(t *testing.T) {\nsleep := []string{\"sleep\", \"100\"}\nspecs, ids := createSpecs(sleep, sleep, sleep)\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\nconf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n// Create containers for the sandbox.\nwants, cleanup, err := startContainers(conf, specs, ids)\n@@ -1576,7 +1680,15 @@ func TestMultiContainerRunNonRoot(t *testing.T) {\nType: \"bind\",\n})\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\nconf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\npod, cleanup, err := startContainers(conf, podSpecs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/testutil/testutil.go",
"new_path": "runsc/testutil/testutil.go",
"diff": "@@ -151,13 +151,6 @@ func TestConfig() *boot.Config {\n}\n}\n-// TestConfigWithRoot returns the default configuration to use in tests.\n-func TestConfigWithRoot(rootDir string) *boot.Config {\n- conf := TestConfig()\n- conf.RootDir = rootDir\n- return conf\n-}\n-\n// NewSpecWithArgs creates a simple spec with the given args suitable for use\n// in tests.\nfunc NewSpecWithArgs(args ...string) *specs.Spec {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/root/oom_score_adj_test.go",
"new_path": "test/root/oom_score_adj_test.go",
"diff": "@@ -40,6 +40,15 @@ var (\n// TestOOMScoreAdjSingle tests that oom_score_adj is set properly in a\n// single container sandbox.\nfunc TestOOMScoreAdjSingle(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\nppid, err := specutils.GetParentPid(os.Getpid())\nif err != nil {\nt.Fatalf(\"getting parent pid: %v\", err)\n@@ -84,7 +93,6 @@ func TestOOMScoreAdjSingle(t *testing.T) {\ns := testutil.NewSpecWithArgs(\"sleep\", \"1000\")\ns.Process.OOMScoreAdj = testCase.OOMScoreAdj\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, []*specs.Spec{s}, []string{id})\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -123,6 +131,15 @@ func TestOOMScoreAdjSingle(t *testing.T) {\n// TestOOMScoreAdjMulti tests that oom_score_adj is set properly in a\n// multi-container sandbox.\nfunc TestOOMScoreAdjMulti(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfig()\n+ conf.RootDir = rootDir\n+\nppid, err := specutils.GetParentPid(os.Getpid())\nif err != nil {\nt.Fatalf(\"getting parent pid: %v\", err)\n@@ -240,7 +257,6 @@ func TestOOMScoreAdjMulti(t *testing.T) {\n}\n}\n- conf := testutil.TestConfig()\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -327,13 +343,8 @@ func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {\n}\nfunc startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*container.Container, func(), error) {\n- // Setup root dir if one hasn't been provided.\nif len(conf.RootDir) == 0 {\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- return nil, nil, fmt.Errorf(\"error creating root dir: %v\", err)\n- }\n- conf.RootDir = rootDir\n+ panic(\"conf.RootDir not set. Call testutil.SetupRootDir() to set.\")\n}\nvar containers []*container.Container\n@@ -345,7 +356,6 @@ func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*c\nfor _, b := range bundles {\nos.RemoveAll(b)\n}\n- os.RemoveAll(conf.RootDir)\n}\nfor i, spec := range specs {\nbundleDir, err := testutil.SetupBundleDir(spec)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix early deletion of rootDir
container.startContainers() cannot be called twice in a test
(e.g. TestMultiContainerLoadSandbox) because the cleanup
function deletes the rootDir, together with information from
all other containers that may exist.
PiperOrigin-RevId: 276591806 |
260,004 | 24.10.2019 16:51:41 | 25,200 | 27e896f2905eea612855b1c92d9b43ebaa09cbf3 | Add a type to represent the NDP Prefix Information option.
This change is in preparation for NDP Prefix Discovery and SLAAC where the stack
will need to handle NDP Prefix Information options.
Tests: Test that given an NDP Prefix Information option buffer, correct values
are returned by the field getters. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ndp_options.go",
"new_path": "pkg/tcpip/header/ndp_options.go",
"diff": "package header\nimport (\n+ \"encoding/binary\"\n+ \"time\"\n+\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n@@ -27,6 +30,65 @@ const (\n// Link Layer Option for an Ethernet address.\nndpTargetEthernetLinkLayerAddressSize = 8\n+ // ndpPrefixInformationType is the type of the Prefix Information\n+ // option, as per RFC 4861 section 4.6.2.\n+ ndpPrefixInformationType = 3\n+\n+ // ndpPrefixInformationLength is the expected length, in bytes, of the\n+ // body of an NDP Prefix Information option, as per RFC 4861 section\n+ // 4.6.2 which specifies that the Length field is 4. Given this, the\n+ // expected length, in bytes, is 30 becuase 4 * lengthByteUnits (8) - 2\n+ // (Type & Length) = 30.\n+ ndpPrefixInformationLength = 30\n+\n+ // ndpPrefixInformationPrefixLengthOffset is the offset of the Prefix\n+ // Length field within an NDPPrefixInformation.\n+ ndpPrefixInformationPrefixLengthOffset = 0\n+\n+ // ndpPrefixInformationFlagsOffset is the offset of the flags byte\n+ // within an NDPPrefixInformation.\n+ ndpPrefixInformationFlagsOffset = 1\n+\n+ // ndpPrefixInformationOnLinkFlagMask is the mask of the On-Link Flag\n+ // field in the flags byte within an NDPPrefixInformation.\n+ ndpPrefixInformationOnLinkFlagMask = (1 << 7)\n+\n+ // ndpPrefixInformationAutoAddrConfFlagMask is the mask of the\n+ // Autonomous Address-Configuration flag field in the flags byte within\n+ // an NDPPrefixInformation.\n+ ndpPrefixInformationAutoAddrConfFlagMask = (1 << 6)\n+\n+ // ndpPrefixInformationReserved1FlagsMask is the mask of the Reserved1\n+ // field in the flags byte within an NDPPrefixInformation.\n+ ndpPrefixInformationReserved1FlagsMask = 63\n+\n+ // ndpPrefixInformationValidLifetimeOffset is the start of the 4-byte\n+ // Valid Lifetime field within an NDPPrefixInformation.\n+ ndpPrefixInformationValidLifetimeOffset = 2\n+\n+ // ndpPrefixInformationPreferredLifetimeOffset is the start of the\n+ // 4-byte Preferred Lifetime field within an NDPPrefixInformation.\n+ ndpPrefixInformationPreferredLifetimeOffset = 6\n+\n+ // ndpPrefixInformationReserved2Offset is the start of the 4-byte\n+ // Reserved2 field within an NDPPrefixInformation.\n+ ndpPrefixInformationReserved2Offset = 10\n+\n+ // ndpPrefixInformationReserved2Length is the length of the Reserved2\n+ // field.\n+ //\n+ // It is 4 bytes.\n+ ndpPrefixInformationReserved2Length = 4\n+\n+ // ndpPrefixInformationPrefixOffset is the start of the Prefix field\n+ // within an NDPPrefixInformation.\n+ ndpPrefixInformationPrefixOffset = 14\n+\n+ // NDPPrefixInformationInfiniteLifetime is a value that represents\n+ // infinity for the Valid and Preferred Lifetime fields in a NDP Prefix\n+ // Information option. Its value is (2^32 - 1)s = 4294967295s\n+ NDPPrefixInformationInfiniteLifetime = time.Second * 4294967295\n+\n// lengthByteUnits is the multiplier factor for the Length field of an\n// NDP option. That is, the length field for NDP options is in units of\n// 8 octets, as per RFC 4861 section 4.6.\n@@ -154,6 +216,9 @@ func (b NDPOptionsSerializer) Length() int {\n// NDPTargetLinkLayerAddressOption is the NDP Target Link Layer Option\n// as defined by RFC 4861 section 4.6.1.\n+//\n+// It is the first X bytes following the NDP option's Type and Length field\n+// where X is the value in Length multiplied by lengthByteUnits - 2 bytes.\ntype NDPTargetLinkLayerAddressOption tcpip.LinkAddress\n// Type implements ndpOption.Type.\n@@ -170,3 +235,102 @@ func (o NDPTargetLinkLayerAddressOption) Length() int {\nfunc (o NDPTargetLinkLayerAddressOption) serializeInto(b []byte) int {\nreturn copy(b, o)\n}\n+\n+// NDPPrefixInformation is the NDP Prefix Information option as defined by\n+// RFC 4861 section 4.6.2.\n+//\n+// The length, in bytes, of a valid NDP Prefix Information option body MUST be\n+// ndpPrefixInformationLength bytes.\n+type NDPPrefixInformation []byte\n+\n+// Type implements ndpOption.Type.\n+func (o NDPPrefixInformation) Type() uint8 {\n+ return ndpPrefixInformationType\n+}\n+\n+// Length implements ndpOption.Length.\n+func (o NDPPrefixInformation) Length() int {\n+ return ndpPrefixInformationLength\n+}\n+\n+// serializeInto implements ndpOption.serializeInto.\n+func (o NDPPrefixInformation) serializeInto(b []byte) int {\n+ used := copy(b, o)\n+\n+ // Zero out the Reserved1 field.\n+ b[ndpPrefixInformationFlagsOffset] &^= ndpPrefixInformationReserved1FlagsMask\n+\n+ // Zero out the Reserved2 field.\n+ reserved2 := b[ndpPrefixInformationReserved2Offset:][:ndpPrefixInformationReserved2Length]\n+ for i := range reserved2 {\n+ reserved2[i] = 0\n+ }\n+\n+ return used\n+}\n+\n+// PrefixLength returns the value in the number of leading bits in the Prefix\n+// that are valid.\n+//\n+// Valid values are in the range [0, 128], but o may not always contain valid\n+// values. It is up to the caller to valdiate the Prefix Information option.\n+func (o NDPPrefixInformation) PrefixLength() uint8 {\n+ return o[ndpPrefixInformationPrefixLengthOffset]\n+}\n+\n+// OnLinkFlag returns true of the prefix is considered on-link. On-link means\n+// that a forwarding node is not needed to send packets to other nodes on the\n+// same prefix.\n+//\n+// Note, when this function returns false, no statement is made about the\n+// on-link property of a prefix. That is, if OnLinkFlag returns false, the\n+// caller MUST NOT conclude that the prefix is off-link and MUST NOT update any\n+// previously stored state for this prefix about its on-link status.\n+func (o NDPPrefixInformation) OnLinkFlag() bool {\n+ return o[ndpPrefixInformationFlagsOffset]&ndpPrefixInformationOnLinkFlagMask != 0\n+}\n+\n+// AutonomousAddressConfigurationFlag returns true if the prefix can be used for\n+// Stateless Address Auto-Configuration (as specified in RFC 4862).\n+func (o NDPPrefixInformation) AutonomousAddressConfigurationFlag() bool {\n+ return o[ndpPrefixInformationFlagsOffset]&ndpPrefixInformationAutoAddrConfFlagMask != 0\n+}\n+\n+// ValidLifetime returns the length of time that the prefix is valid for the\n+// purpose of on-link determination. This value is relative to the send time of\n+// the packet that the Prefix Information option was present in.\n+//\n+// Note, a value of 0 implies the prefix should not be considered as on-link,\n+// and a value of infinity/forever is represented by\n+// NDPPrefixInformationInfiniteLifetime.\n+func (o NDPPrefixInformation) ValidLifetime() time.Duration {\n+ // The field is the time in seconds, as per RFC 4861 section 4.6.2.\n+ return time.Second * time.Duration(binary.BigEndian.Uint32(o[ndpPrefixInformationValidLifetimeOffset:]))\n+}\n+\n+// PreferredLifetime returns the length of time that an address generated from\n+// the prefix via Stateless Address Auto-Configuration remains preferred. This\n+// value is relative to the send time of the packet that the Prefix Information\n+// option was present in.\n+//\n+// Note, a value of 0 implies that addresses generated from the prefix should\n+// no longer remain preferred, and a value of infinity is represented by\n+// NDPPrefixInformationInfiniteLifetime.\n+//\n+// Also note that the value of this field MUST NOT exceed the Valid Lifetime\n+// field to avoid preferring addresses that are no longer valid, for the\n+// purpose of Stateless Address Auto-Configuration.\n+func (o NDPPrefixInformation) PreferredLifetime() time.Duration {\n+ // The field is the time in seconds, as per RFC 4861 section 4.6.2.\n+ return time.Second * time.Duration(binary.BigEndian.Uint32(o[ndpPrefixInformationPreferredLifetimeOffset:]))\n+}\n+\n+// Prefix returns an IPv6 address or a prefix of an IPv6 address. The Prefix\n+// Length field (see NDPPrefixInformation.PrefixLength) contains the number\n+// of valid leading bits in the prefix.\n+//\n+// Hosts SHOULD ignore an NDP Prefix Information option where the Prefix field\n+// holds the link-local prefix (fe80::).\n+func (o NDPPrefixInformation) Prefix() tcpip.Address {\n+ return tcpip.Address(o[ndpPrefixInformationPrefixOffset:][:IPv6AddressSize])\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ndp_test.go",
"new_path": "pkg/tcpip/header/ndp_test.go",
"diff": "@@ -197,3 +197,74 @@ func TestNDPTargetLinkLayerAddressOptionSerialize(t *testing.T) {\n})\n}\n}\n+\n+// TestNDPPrefixInformationOption tests the field getters and serialization of a\n+// NDPPrefixInformation.\n+func TestNDPPrefixInformationOption(t *testing.T) {\n+ b := []byte{\n+ 43, 127,\n+ 1, 2, 3, 4,\n+ 5, 6, 7, 8,\n+ 5, 5, 5, 5,\n+ 9, 10, 11, 12,\n+ 13, 14, 15, 16,\n+ 17, 18, 19, 20,\n+ 21, 22, 23, 24,\n+ }\n+\n+ targetBuf := []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}\n+ opts := NDPOptions(targetBuf)\n+ serializer := NDPOptionsSerializer{\n+ NDPPrefixInformation(b),\n+ }\n+ opts.Serialize(serializer)\n+ expectedBuf := []byte{\n+ 3, 4, 43, 64,\n+ 1, 2, 3, 4,\n+ 5, 6, 7, 8,\n+ 0, 0, 0, 0,\n+ 9, 10, 11, 12,\n+ 13, 14, 15, 16,\n+ 17, 18, 19, 20,\n+ 21, 22, 23, 24,\n+ }\n+ if !bytes.Equal(targetBuf, expectedBuf) {\n+ t.Fatalf(\"got targetBuf = %x, want = %x\", targetBuf, expectedBuf)\n+ }\n+\n+ // First two bytes are the Type and Length fields, which are not part of\n+ // the option body.\n+ pi := NDPPrefixInformation(targetBuf[2:])\n+\n+ if got := pi.Type(); got != 3 {\n+ t.Fatalf(\"got Type = %d, want = 3\", got)\n+ }\n+\n+ if got := pi.Length(); got != 30 {\n+ t.Fatalf(\"got Length = %d, want = 30\", got)\n+ }\n+\n+ if got := pi.PrefixLength(); got != 43 {\n+ t.Fatalf(\"got PrefixLength = %d, want = 43\", got)\n+ }\n+\n+ if pi.OnLinkFlag() {\n+ t.Fatalf(\"got OnLinkFlag = true, want = false\")\n+ }\n+\n+ if !pi.AutonomousAddressConfigurationFlag() {\n+ t.Fatalf(\"got AutonomousAddressConfigurationFlag = false, want = true\")\n+ }\n+\n+ if got, want := pi.ValidLifetime(), 16909060*time.Second; got != want {\n+ t.Fatalf(\"got ValidLifetime = %d, want = %d\", got, want)\n+ }\n+\n+ if got, want := pi.PreferredLifetime(), 84281096*time.Second; got != want {\n+ t.Fatalf(\"got PreferredLifetime = %d, want = %d\", got, want)\n+ }\n+\n+ if got, want := pi.Prefix(), tcpip.Address(\"\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\"); got != want {\n+ t.Fatalf(\"got Prefix = %s, want = %s\", got, want)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a type to represent the NDP Prefix Information option.
This change is in preparation for NDP Prefix Discovery and SLAAC where the stack
will need to handle NDP Prefix Information options.
Tests: Test that given an NDP Prefix Information option buffer, correct values
are returned by the field getters.
PiperOrigin-RevId: 276594592 |
259,853 | 25.10.2019 10:47:49 | 25,200 | fd598912bee1965c32dee1a5933678ed34e768bc | platform/ptrace: use tgkill instead of kill
The syscall filters don't allow kill, just tgkill. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -335,7 +335,8 @@ func (t *thread) unexpectedStubExit() {\n// these cases, we don't need to panic. There is no reasons to\n// think that something wrong in gVisor.\nlog.Warningf(\"The ptrace stub process %v has been killed by SIGKILL.\", t.tgid)\n- syscall.Kill(os.Getpid(), syscall.SIGKILL)\n+ pid := os.Getpid()\n+ syscall.Tgkill(pid, pid, syscall.Signal(syscall.SIGKILL))\n}\nt.dumpAndPanic(fmt.Sprintf(\"wait failed: the process %d:%d exited: %x (err %v)\", t.tgid, t.tid, msg, err))\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/ptrace: use tgkill instead of kill
The syscall filters don't allow kill, just tgkill.
PiperOrigin-RevId: 276718421 |
259,854 | 25.10.2019 13:14:02 | 25,200 | 8f029b3f823342e43d23e2a238bc599596bdca24 | Convert DelayOption to the newer/faster SockOpt int type.
DelayOption is set on all new endpoints in gVisor. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -281,7 +281,7 @@ type SocketOperations struct {\n// New creates a new endpoint socket.\nfunc New(t *kernel.Task, family int, skType linux.SockType, protocol int, queue *waiter.Queue, endpoint tcpip.Endpoint) (*fs.File, *syserr.Error) {\nif skType == linux.SOCK_STREAM {\n- if err := endpoint.SetSockOpt(tcpip.DelayOption(1)); err != nil {\n+ if err := endpoint.SetSockOptInt(tcpip.DelayOption, 1); err != nil {\nreturn nil, syserr.TranslateNetstackError(err)\n}\n}\n@@ -1055,8 +1055,8 @@ func getSockOptTCP(t *kernel.Task, ep commonEndpoint, name, outLen int) (interfa\nreturn nil, syserr.ErrInvalidArgument\n}\n- var v tcpip.DelayOption\n- if err := ep.GetSockOpt(&v); err != nil {\n+ v, err := ep.GetSockOptInt(tcpip.DelayOption)\n+ if err != nil {\nreturn nil, syserr.TranslateNetstackError(err)\n}\n@@ -1497,11 +1497,11 @@ func setSockOptTCP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *\n}\nv := usermem.ByteOrder.Uint32(optVal)\n- var o tcpip.DelayOption\n+ var o int\nif v == 0 {\no = 1\n}\n- return syserr.TranslateNetstackError(ep.SetSockOpt(o))\n+ return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.DelayOption, o))\ncase linux.TCP_CORK:\nif len(optVal) < sizeOfInt32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -489,6 +489,11 @@ const (\n// number of unread bytes in the output buffer should be returned.\nSendQueueSizeOption\n+ // DelayOption is used by SetSockOpt/GetSockOpt to specify if data\n+ // should be sent out immediately by the transport protocol. For TCP,\n+ // it determines if the Nagle algorithm is on or off.\n+ DelayOption\n+\n// TODO(b/137664753): convert all int socket options to be handled via\n// GetSockOptInt.\n)\n@@ -501,11 +506,6 @@ type ErrorOption struct{}\n// socket is to be restricted to sending and receiving IPv6 packets only.\ntype V6OnlyOption int\n-// DelayOption is used by SetSockOpt/GetSockOpt to specify if data should be\n-// sent out immediately by the transport protocol. For TCP, it determines if the\n-// Nagle algorithm is on or off.\n-type DelayOption int\n-\n// CorkOption is used by SetSockOpt/GetSockOpt to specify if data should be\n// held until segments are full by the TCP transport protocol.\ntype CorkOption int\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1133,16 +1133,6 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOpt, v int) *tcpip.Error {\ne.sndBufMu.Unlock()\nreturn nil\n- default:\n- return nil\n- }\n-}\n-\n-// SetSockOpt sets a socket option.\n-func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n- // Lower 2 bits represents ECN bits. RFC 3168, section 23.1\n- const inetECNMask = 3\n- switch v := opt.(type) {\ncase tcpip.DelayOption:\nif v == 0 {\natomic.StoreUint32(&e.delay, 0)\n@@ -1154,6 +1144,16 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n}\nreturn nil\n+ default:\n+ return nil\n+ }\n+}\n+\n+// SetSockOpt sets a socket option.\n+func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n+ // Lower 2 bits represents ECN bits. RFC 3168, section 23.1\n+ const inetECNMask = 3\n+ switch v := opt.(type) {\ncase tcpip.CorkOption:\nif v == 0 {\natomic.StoreUint32(&e.cork, 0)\n@@ -1345,6 +1345,7 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOpt) (int, *tcpip.Error) {\nswitch opt {\ncase tcpip.ReceiveQueueSizeOption:\nreturn e.readyReceiveSize()\n+\ncase tcpip.SendBufferSizeOption:\ne.sndBufMu.Lock()\nv := e.sndBufSize\n@@ -1357,9 +1358,17 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOpt) (int, *tcpip.Error) {\ne.rcvListMu.Unlock()\nreturn v, nil\n+ case tcpip.DelayOption:\n+ var o int\n+ if v := atomic.LoadUint32(&e.delay); v != 0 {\n+ o = 1\n}\n+ return o, nil\n+\n+ default:\nreturn -1, tcpip.ErrUnknownProtocolOption\n}\n+}\n// GetSockOpt implements tcpip.Endpoint.GetSockOpt.\nfunc (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error {\n@@ -1379,13 +1388,6 @@ func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error {\n*o = header.TCPDefaultMSS\nreturn nil\n- case *tcpip.DelayOption:\n- *o = 0\n- if v := atomic.LoadUint32(&e.delay); v != 0 {\n- *o = 1\n- }\n- return nil\n-\ncase *tcpip.CorkOption:\n*o = 0\nif v := atomic.LoadUint32(&e.cork); v != 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -1623,7 +1623,7 @@ func TestDelay(t *testing.T) {\nc.CreateConnected(789, 30000, -1 /* epRcvBuf */)\n- c.EP.SetSockOpt(tcpip.DelayOption(1))\n+ c.EP.SetSockOptInt(tcpip.DelayOption, 1)\nvar allData []byte\nfor i, data := range [][]byte{{0}, {1, 2, 3, 4}, {5, 6, 7}, {8, 9}, {10}, {11}} {\n@@ -1671,7 +1671,7 @@ func TestUndelay(t *testing.T) {\nc.CreateConnected(789, 30000, -1 /* epRcvBuf */)\n- c.EP.SetSockOpt(tcpip.DelayOption(1))\n+ c.EP.SetSockOptInt(tcpip.DelayOption, 1)\nallData := [][]byte{{0}, {1, 2, 3}}\nfor i, data := range allData {\n@@ -1704,7 +1704,7 @@ func TestUndelay(t *testing.T) {\n// Check that we don't get the second packet yet.\nc.CheckNoPacketTimeout(\"delayed second packet transmitted\", 100*time.Millisecond)\n- c.EP.SetSockOpt(tcpip.DelayOption(0))\n+ c.EP.SetSockOptInt(tcpip.DelayOption, 0)\n// Check that data is received.\nsecond := c.GetPacket()\n@@ -1741,7 +1741,7 @@ func TestMSSNotDelayed(t *testing.T) {\nfn func(tcpip.Endpoint)\n}{\n{\"no-op\", func(tcpip.Endpoint) {}},\n- {\"delay\", func(ep tcpip.Endpoint) { ep.SetSockOpt(tcpip.DelayOption(1)) }},\n+ {\"delay\", func(ep tcpip.Endpoint) { ep.SetSockOptInt(tcpip.DelayOption, 1) }},\n{\"cork\", func(ep tcpip.Endpoint) { ep.SetSockOpt(tcpip.CorkOption(1)) }},\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Convert DelayOption to the newer/faster SockOpt int type.
DelayOption is set on all new endpoints in gVisor.
PiperOrigin-RevId: 276746791 |
260,004 | 25.10.2019 16:05:31 | 25,200 | 5a421058a07477e23f6ca23bb510894419224080 | Validate the checksum for incoming ICMPv6 packets
This change validates the ICMPv6 checksum field before further processing an
ICMPv6 packet.
Tests: Unittests to make sure that only ICMPv6 packets with a valid checksum
are accepted/processed. Existing tests using checker.ICMPv6 now also check the
ICMPv6 checksum field. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/checker/BUILD",
"new_path": "pkg/tcpip/checker/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_library(\nvisibility = [\"//visibility:public\"],\ndeps = [\n\"//pkg/tcpip\",\n+ \"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/seqnum\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/checker/checker.go",
"new_path": "pkg/tcpip/checker/checker.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"testing\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n)\n@@ -639,6 +640,8 @@ func ICMPv4Code(want byte) TransportChecker {\n// ICMPv6 creates a checker that checks that the transport protocol is ICMPv6 and\n// potentially additional ICMPv6 header fields.\n+//\n+// ICMPv6 will validate the checksum field before calling checkers.\nfunc ICMPv6(checkers ...TransportChecker) NetworkChecker {\nreturn func(t *testing.T, h []header.Network) {\nt.Helper()\n@@ -650,6 +653,10 @@ func ICMPv6(checkers ...TransportChecker) NetworkChecker {\n}\nicmp := header.ICMPv6(last.Payload())\n+ if got, want := icmp.Checksum(), header.ICMPv6Checksum(icmp, last.SourceAddress(), last.DestinationAddress(), buffer.VectorisedView{}); got != want {\n+ t.Fatalf(\"Bad ICMPv6 checksum; got %d, want %d\", got, want)\n+ }\n+\nfor _, f := range checkers {\nf(t, icmp)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv6.go",
"new_path": "pkg/tcpip/header/icmpv6.go",
"diff": "@@ -132,7 +132,7 @@ func (b ICMPv6) Checksum() uint16 {\nreturn binary.BigEndian.Uint16(b[icmpv6ChecksumOffset:])\n}\n-// SetChecksum calculates and sets the ICMP checksum field.\n+// SetChecksum sets the ICMP checksum field.\nfunc (b ICMPv6) SetChecksum(checksum uint16) {\nbinary.BigEndian.PutUint16(b[icmpv6ChecksumOffset:], checksum)\n}\n@@ -197,7 +197,7 @@ func (b ICMPv6) Payload() []byte {\nreturn b[ICMPv6PayloadOffset:]\n}\n-// ICMPv6Checksum calculates the ICMP checksum over the provided ICMP header,\n+// ICMPv6Checksum calculates the ICMP checksum over the provided ICMPv6 header,\n// IPv6 src/dst addresses and the payload.\nfunc ICMPv6Checksum(h ICMPv6, src, dst tcpip.Address, vv buffer.VectorisedView) uint16 {\n// Calculate the IPv6 pseudo-header upper-layer checksum.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ip_test.go",
"new_path": "pkg/tcpip/network/ip_test.go",
"diff": "@@ -519,6 +519,7 @@ func TestIPv6ReceiveControl(t *testing.T) {\nnewUint16 := func(v uint16) *uint16 { return &v }\nconst mtu = 0xffff\n+ const outerSrcAddr = \"\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xaa\"\ncases := []struct {\nname string\nexpectedCount int\n@@ -570,7 +571,7 @@ func TestIPv6ReceiveControl(t *testing.T) {\nPayloadLength: uint16(len(view) - header.IPv6MinimumSize - c.trunc),\nNextHeader: uint8(header.ICMPv6ProtocolNumber),\nHopLimit: 20,\n- SrcAddr: \"\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xaa\",\n+ SrcAddr: outerSrcAddr,\nDstAddr: localIpv6Addr,\n})\n@@ -618,6 +619,10 @@ func TestIPv6ReceiveControl(t *testing.T) {\no.extra = c.expectedExtra\nvv := view[:len(view)-c.trunc].ToVectorisedView()\n+\n+ // Set ICMPv6 checksum.\n+ icmp.SetChecksum(header.ICMPv6Checksum(icmp, outerSrcAddr, localIpv6Addr, buffer.VectorisedView{}))\n+\nep.HandlePacket(&r, vv)\nif want := c.expectedCount; o.controlCalls != want {\nt.Fatalf(\"Bad number of control calls for %q case: got %v, want %v\", c.name, o.controlCalls, want)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -72,6 +72,18 @@ func (e *endpoint) handleICMP(r *stack.Route, netHeader buffer.View, vv buffer.V\nh := header.ICMPv6(v)\niph := header.IPv6(netHeader)\n+ // Validate ICMPv6 checksum before processing the packet.\n+ //\n+ // Only the first view in vv is accounted for by h. To account for the\n+ // rest of vv, a shallow copy is made and the first view is removed.\n+ // This copy is used as extra payload during the checksum calculation.\n+ payload := vv\n+ payload.RemoveFirst()\n+ if got, want := h.Checksum(), header.ICMPv6Checksum(h, iph.SourceAddress(), iph.DestinationAddress(), payload); got != want {\n+ received.Invalid.Increment()\n+ return\n+ }\n+\n// As per RFC 4861 sections 4.1 - 4.5, 6.1.1, 6.1.2, 7.1.1, 7.1.2 and\n// 8.1, nodes MUST silently drop NDP packets where the Hop Limit field\n// in the IPv6 header is not set to 255.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"new_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"diff": "@@ -30,7 +30,7 @@ import (\n)\nconst (\n- linkAddr0 = tcpip.LinkAddress(\"\\x01\\x02\\x03\\x04\\x05\\x06\")\n+ linkAddr0 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x06\")\nlinkAddr1 = tcpip.LinkAddress(\"\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\")\n)\n@@ -359,3 +359,533 @@ func TestLinkResolution(t *testing.T) {\nrouteICMPv6Packet(t, args, nil)\n}\n}\n+\n+func TestICMPChecksumValidationSimple(t *testing.T) {\n+ types := []struct {\n+ name string\n+ typ header.ICMPv6Type\n+ size int\n+ statCounter func(tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter\n+ }{\n+ {\n+ \"DstUnreachable\",\n+ header.ICMPv6DstUnreachable,\n+ header.ICMPv6DstUnreachableMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.DstUnreachable\n+ },\n+ },\n+ {\n+ \"PacketTooBig\",\n+ header.ICMPv6PacketTooBig,\n+ header.ICMPv6PacketTooBigMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.PacketTooBig\n+ },\n+ },\n+ {\n+ \"TimeExceeded\",\n+ header.ICMPv6TimeExceeded,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.TimeExceeded\n+ },\n+ },\n+ {\n+ \"ParamProblem\",\n+ header.ICMPv6ParamProblem,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.ParamProblem\n+ },\n+ },\n+ {\n+ \"EchoRequest\",\n+ header.ICMPv6EchoRequest,\n+ header.ICMPv6EchoMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.EchoRequest\n+ },\n+ },\n+ {\n+ \"EchoReply\",\n+ header.ICMPv6EchoReply,\n+ header.ICMPv6EchoMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.EchoReply\n+ },\n+ },\n+ {\n+ \"RouterSolicit\",\n+ header.ICMPv6RouterSolicit,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.RouterSolicit\n+ },\n+ },\n+ {\n+ \"RouterAdvert\",\n+ header.ICMPv6RouterAdvert,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.RouterAdvert\n+ },\n+ },\n+ {\n+ \"NeighborSolicit\",\n+ header.ICMPv6NeighborSolicit,\n+ header.ICMPv6NeighborSolicitMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.NeighborSolicit\n+ },\n+ },\n+ {\n+ \"NeighborAdvert\",\n+ header.ICMPv6NeighborAdvert,\n+ header.ICMPv6NeighborAdvertSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.NeighborAdvert\n+ },\n+ },\n+ {\n+ \"RedirectMsg\",\n+ header.ICMPv6RedirectMsg,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.RedirectMsg\n+ },\n+ },\n+ }\n+\n+ for _, typ := range types {\n+ t.Run(typ.name, func(t *testing.T) {\n+ e := channel.New(10, 1280, linkAddr0)\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocol{NewProtocol()},\n+ })\n+ if err := s.CreateNIC(1, e); err != nil {\n+ t.Fatalf(\"CreateNIC(_) = %s\", err)\n+ }\n+\n+ if err := s.AddAddress(1, ProtocolNumber, lladdr0); err != nil {\n+ t.Fatalf(\"AddAddress(_, %d, %s) = %s\", ProtocolNumber, lladdr0, err)\n+ }\n+ {\n+ subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat(\"\\xff\", len(lladdr1))))\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ s.SetRouteTable(\n+ []tcpip.Route{{\n+ Destination: subnet,\n+ NIC: 1,\n+ }},\n+ )\n+ }\n+\n+ handleIPv6Payload := func(typ header.ICMPv6Type, size int, checksum bool) {\n+ hdr := buffer.NewPrependable(header.IPv6MinimumSize + size)\n+ pkt := header.ICMPv6(hdr.Prepend(size))\n+ pkt.SetType(typ)\n+ if checksum {\n+ pkt.SetChecksum(header.ICMPv6Checksum(pkt, lladdr1, lladdr0, buffer.VectorisedView{}))\n+ }\n+ ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))\n+ ip.Encode(&header.IPv6Fields{\n+ PayloadLength: uint16(size),\n+ NextHeader: uint8(header.ICMPv6ProtocolNumber),\n+ HopLimit: header.NDPHopLimit,\n+ SrcAddr: lladdr1,\n+ DstAddr: lladdr0,\n+ })\n+ e.Inject(ProtocolNumber, hdr.View().ToVectorisedView())\n+ }\n+\n+ stats := s.Stats().ICMP.V6PacketsReceived\n+ invalid := stats.Invalid\n+ typStat := typ.statCounter(stats)\n+\n+ // Initial stat counts should be 0.\n+ if got := invalid.Value(); got != 0 {\n+ t.Fatalf(\"got invalid = %d, want = 0\", got)\n+ }\n+ if got := typStat.Value(); got != 0 {\n+ t.Fatalf(\"got %s = %d, want = 0\", typ.name, got)\n+ }\n+\n+ // Without setting checksum, the incoming packet should\n+ // be invalid.\n+ handleIPv6Payload(typ.typ, typ.size, false)\n+ if got := invalid.Value(); got != 1 {\n+ t.Fatalf(\"got invalid = %d, want = 1\", got)\n+ }\n+ // Rx count of type typ.typ should not have increased.\n+ if got := typStat.Value(); got != 0 {\n+ t.Fatalf(\"got %s = %d, want = 0\", typ.name, got)\n+ }\n+\n+ // When checksum is set, it should be received.\n+ handleIPv6Payload(typ.typ, typ.size, true)\n+ if got := typStat.Value(); got != 1 {\n+ t.Fatalf(\"got %s = %d, want = 1\", typ.name, got)\n+ }\n+ // Invalid count should not have increased again.\n+ if got := invalid.Value(); got != 1 {\n+ t.Fatalf(\"got invalid = %d, want = 1\", got)\n+ }\n+ })\n+ }\n+}\n+\n+func TestICMPChecksumValidationWithPayload(t *testing.T) {\n+ const simpleBodySize = 64\n+ simpleBody := func(view buffer.View) {\n+ for i := 0; i < simpleBodySize; i++ {\n+ view[i] = uint8(i)\n+ }\n+ }\n+\n+ const errorICMPBodySize = header.IPv6MinimumSize + simpleBodySize\n+ errorICMPBody := func(view buffer.View) {\n+ ip := header.IPv6(view)\n+ ip.Encode(&header.IPv6Fields{\n+ PayloadLength: simpleBodySize,\n+ NextHeader: 10,\n+ HopLimit: 20,\n+ SrcAddr: lladdr0,\n+ DstAddr: lladdr1,\n+ })\n+ simpleBody(view[header.IPv6MinimumSize:])\n+ }\n+\n+ types := []struct {\n+ name string\n+ typ header.ICMPv6Type\n+ size int\n+ statCounter func(tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter\n+ payloadSize int\n+ payload func(buffer.View)\n+ }{\n+ {\n+ \"DstUnreachable\",\n+ header.ICMPv6DstUnreachable,\n+ header.ICMPv6DstUnreachableMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.DstUnreachable\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"PacketTooBig\",\n+ header.ICMPv6PacketTooBig,\n+ header.ICMPv6PacketTooBigMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.PacketTooBig\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"TimeExceeded\",\n+ header.ICMPv6TimeExceeded,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.TimeExceeded\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"ParamProblem\",\n+ header.ICMPv6ParamProblem,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.ParamProblem\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"EchoRequest\",\n+ header.ICMPv6EchoRequest,\n+ header.ICMPv6EchoMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.EchoRequest\n+ },\n+ simpleBodySize,\n+ simpleBody,\n+ },\n+ {\n+ \"EchoReply\",\n+ header.ICMPv6EchoReply,\n+ header.ICMPv6EchoMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.EchoReply\n+ },\n+ simpleBodySize,\n+ simpleBody,\n+ },\n+ }\n+\n+ for _, typ := range types {\n+ t.Run(typ.name, func(t *testing.T) {\n+ e := channel.New(10, 1280, linkAddr0)\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocol{NewProtocol()},\n+ })\n+ if err := s.CreateNIC(1, e); err != nil {\n+ t.Fatalf(\"CreateNIC(_) = %s\", err)\n+ }\n+\n+ if err := s.AddAddress(1, ProtocolNumber, lladdr0); err != nil {\n+ t.Fatalf(\"AddAddress(_, %d, %s) = %s\", ProtocolNumber, lladdr0, err)\n+ }\n+ {\n+ subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat(\"\\xff\", len(lladdr1))))\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ s.SetRouteTable(\n+ []tcpip.Route{{\n+ Destination: subnet,\n+ NIC: 1,\n+ }},\n+ )\n+ }\n+\n+ handleIPv6Payload := func(typ header.ICMPv6Type, size, payloadSize int, payloadFn func(buffer.View), checksum bool) {\n+ icmpSize := size + payloadSize\n+ hdr := buffer.NewPrependable(header.IPv6MinimumSize + icmpSize)\n+ pkt := header.ICMPv6(hdr.Prepend(icmpSize))\n+ pkt.SetType(typ)\n+ payloadFn(pkt.Payload())\n+\n+ if checksum {\n+ pkt.SetChecksum(header.ICMPv6Checksum(pkt, lladdr1, lladdr0, buffer.VectorisedView{}))\n+ }\n+\n+ ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))\n+ ip.Encode(&header.IPv6Fields{\n+ PayloadLength: uint16(icmpSize),\n+ NextHeader: uint8(header.ICMPv6ProtocolNumber),\n+ HopLimit: header.NDPHopLimit,\n+ SrcAddr: lladdr1,\n+ DstAddr: lladdr0,\n+ })\n+ e.Inject(ProtocolNumber, hdr.View().ToVectorisedView())\n+ }\n+\n+ stats := s.Stats().ICMP.V6PacketsReceived\n+ invalid := stats.Invalid\n+ typStat := typ.statCounter(stats)\n+\n+ // Initial stat counts should be 0.\n+ if got := invalid.Value(); got != 0 {\n+ t.Fatalf(\"got invalid = %d, want = 0\", got)\n+ }\n+ if got := typStat.Value(); got != 0 {\n+ t.Fatalf(\"got %s = %d, want = 0\", typ.name, got)\n+ }\n+\n+ // Without setting checksum, the incoming packet should\n+ // be invalid.\n+ handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, false)\n+ if got := invalid.Value(); got != 1 {\n+ t.Fatalf(\"got invalid = %d, want = 1\", got)\n+ }\n+ // Rx count of type typ.typ should not have increased.\n+ if got := typStat.Value(); got != 0 {\n+ t.Fatalf(\"got %s = %d, want = 0\", typ.name, got)\n+ }\n+\n+ // When checksum is set, it should be received.\n+ handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, true)\n+ if got := typStat.Value(); got != 1 {\n+ t.Fatalf(\"got %s = %d, want = 1\", typ.name, got)\n+ }\n+ // Invalid count should not have increased again.\n+ if got := invalid.Value(); got != 1 {\n+ t.Fatalf(\"got invalid = %d, want = 1\", got)\n+ }\n+ })\n+ }\n+}\n+\n+func TestICMPChecksumValidationWithPayloadMultipleViews(t *testing.T) {\n+ const simpleBodySize = 64\n+ simpleBody := func(view buffer.View) {\n+ for i := 0; i < simpleBodySize; i++ {\n+ view[i] = uint8(i)\n+ }\n+ }\n+\n+ const errorICMPBodySize = header.IPv6MinimumSize + simpleBodySize\n+ errorICMPBody := func(view buffer.View) {\n+ ip := header.IPv6(view)\n+ ip.Encode(&header.IPv6Fields{\n+ PayloadLength: simpleBodySize,\n+ NextHeader: 10,\n+ HopLimit: 20,\n+ SrcAddr: lladdr0,\n+ DstAddr: lladdr1,\n+ })\n+ simpleBody(view[header.IPv6MinimumSize:])\n+ }\n+\n+ types := []struct {\n+ name string\n+ typ header.ICMPv6Type\n+ size int\n+ statCounter func(tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter\n+ payloadSize int\n+ payload func(buffer.View)\n+ }{\n+ {\n+ \"DstUnreachable\",\n+ header.ICMPv6DstUnreachable,\n+ header.ICMPv6DstUnreachableMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.DstUnreachable\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"PacketTooBig\",\n+ header.ICMPv6PacketTooBig,\n+ header.ICMPv6PacketTooBigMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.PacketTooBig\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"TimeExceeded\",\n+ header.ICMPv6TimeExceeded,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.TimeExceeded\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"ParamProblem\",\n+ header.ICMPv6ParamProblem,\n+ header.ICMPv6MinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.ParamProblem\n+ },\n+ errorICMPBodySize,\n+ errorICMPBody,\n+ },\n+ {\n+ \"EchoRequest\",\n+ header.ICMPv6EchoRequest,\n+ header.ICMPv6EchoMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.EchoRequest\n+ },\n+ simpleBodySize,\n+ simpleBody,\n+ },\n+ {\n+ \"EchoReply\",\n+ header.ICMPv6EchoReply,\n+ header.ICMPv6EchoMinimumSize,\n+ func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\n+ return stats.EchoReply\n+ },\n+ simpleBodySize,\n+ simpleBody,\n+ },\n+ }\n+\n+ for _, typ := range types {\n+ t.Run(typ.name, func(t *testing.T) {\n+ e := channel.New(10, 1280, linkAddr0)\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocol{NewProtocol()},\n+ })\n+ if err := s.CreateNIC(1, e); err != nil {\n+ t.Fatalf(\"CreateNIC(_) = %s\", err)\n+ }\n+\n+ if err := s.AddAddress(1, ProtocolNumber, lladdr0); err != nil {\n+ t.Fatalf(\"AddAddress(_, %d, %s) = %s\", ProtocolNumber, lladdr0, err)\n+ }\n+ {\n+ subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat(\"\\xff\", len(lladdr1))))\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ s.SetRouteTable(\n+ []tcpip.Route{{\n+ Destination: subnet,\n+ NIC: 1,\n+ }},\n+ )\n+ }\n+\n+ handleIPv6Payload := func(typ header.ICMPv6Type, size, payloadSize int, payloadFn func(buffer.View), checksum bool) {\n+ hdr := buffer.NewPrependable(header.IPv6MinimumSize + size)\n+ pkt := header.ICMPv6(hdr.Prepend(size))\n+ pkt.SetType(typ)\n+\n+ payload := buffer.NewView(payloadSize)\n+ payloadFn(payload)\n+\n+ if checksum {\n+ pkt.SetChecksum(header.ICMPv6Checksum(pkt, lladdr1, lladdr0, payload.ToVectorisedView()))\n+ }\n+\n+ ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))\n+ ip.Encode(&header.IPv6Fields{\n+ PayloadLength: uint16(size + payloadSize),\n+ NextHeader: uint8(header.ICMPv6ProtocolNumber),\n+ HopLimit: header.NDPHopLimit,\n+ SrcAddr: lladdr1,\n+ DstAddr: lladdr0,\n+ })\n+ e.Inject(ProtocolNumber,\n+ buffer.NewVectorisedView(header.IPv6MinimumSize+size+payloadSize,\n+ []buffer.View{hdr.View(), payload}))\n+ }\n+\n+ stats := s.Stats().ICMP.V6PacketsReceived\n+ invalid := stats.Invalid\n+ typStat := typ.statCounter(stats)\n+\n+ // Initial stat counts should be 0.\n+ if got := invalid.Value(); got != 0 {\n+ t.Fatalf(\"got invalid = %d, want = 0\", got)\n+ }\n+ if got := typStat.Value(); got != 0 {\n+ t.Fatalf(\"got %s = %d, want = 0\", typ.name, got)\n+ }\n+\n+ // Without setting checksum, the incoming packet should\n+ // be invalid.\n+ handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, false)\n+ if got := invalid.Value(); got != 1 {\n+ t.Fatalf(\"got invalid = %d, want = 1\", got)\n+ }\n+ // Rx count of type typ.typ should not have increased.\n+ if got := typStat.Value(); got != 0 {\n+ t.Fatalf(\"got %s = %d, want = 0\", typ.name, got)\n+ }\n+\n+ // When checksum is set, it should be received.\n+ handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, true)\n+ if got := typStat.Value(); got != 1 {\n+ t.Fatalf(\"got %s = %d, want = 1\", typ.name, got)\n+ }\n+ // Invalid count should not have increased again.\n+ if got := invalid.Value(); got != 1 {\n+ t.Fatalf(\"got invalid = %d, want = 1\", got)\n+ }\n+ })\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -445,13 +445,13 @@ func send6(r *stack.Route, ident uint16, data buffer.View, ttl uint8) *tcpip.Err\nreturn tcpip.ErrInvalidEndpointState\n}\n- icmpv6.SetChecksum(0)\n- icmpv6.SetChecksum(^header.Checksum(icmpv6, header.Checksum(data, 0)))\n+ dataVV := data.ToVectorisedView()\n+ icmpv6.SetChecksum(header.ICMPv6Checksum(icmpv6, r.LocalAddress, r.RemoteAddress, dataVV))\nif ttl == 0 {\nttl = r.DefaultTTL()\n}\n- return r.WritePacket(nil /* gso */, hdr, data.ToVectorisedView(), stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS})\n+ return r.WritePacket(nil /* gso */, hdr, dataVV, stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS})\n}\nfunc (e *endpoint) checkV4Mapped(addr *tcpip.FullAddress, allowMismatch bool) (tcpip.NetworkProtocolNumber, *tcpip.Error) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Validate the checksum for incoming ICMPv6 packets
This change validates the ICMPv6 checksum field before further processing an
ICMPv6 packet.
Tests: Unittests to make sure that only ICMPv6 packets with a valid checksum
are accepted/processed. Existing tests using checker.ICMPv6 now also check the
ICMPv6 checksum field.
PiperOrigin-RevId: 276779148 |
259,860 | 25.10.2019 22:31:35 | 25,200 | 1c480abc39b9957606ff8bf125a5c253ad8a76cb | Aggregate arguments for loading executables into a single struct.
This change simplifies the function signatures of functions related to loading
executables, such as LoadTaskImage, Load, loadBinary. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -804,8 +804,20 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\n// Create a fresh task context.\nremainingTraversals = uint(args.MaxSymlinkTraversals)\n-\n- tc, se := k.LoadTaskImage(ctx, mounts, root, wd, &remainingTraversals, args.Filename, args.File, args.Argv, args.Envv, true /*resolveFinal*/, k.featureSet)\n+ loadArgs := loader.LoadArgs{\n+ Mounts: mounts,\n+ Root: root,\n+ WorkingDirectory: wd,\n+ RemainingTraversals: &remainingTraversals,\n+ ResolveFinal: true,\n+ Filename: args.Filename,\n+ File: args.File,\n+ Argv: args.Argv,\n+ Envv: args.Envv,\n+ Features: k.featureSet,\n+ }\n+\n+ tc, se := k.LoadTaskImage(ctx, loadArgs)\nif se != nil {\nreturn nil, 0, errors.New(se.String())\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_context.go",
"new_path": "pkg/sentry/kernel/task_context.go",
"diff": "@@ -18,10 +18,8 @@ import (\n\"fmt\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/context\"\n- \"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/futex\"\n\"gvisor.dev/gvisor/pkg/sentry/loader\"\n\"gvisor.dev/gvisor/pkg/sentry/mm\"\n@@ -132,30 +130,21 @@ func (t *Task) Stack() *arch.Stack {\nreturn &arch.Stack{t.Arch(), t.MemoryManager(), usermem.Addr(t.Arch().Stack())}\n}\n-// LoadTaskImage loads filename into a new TaskContext.\n+// LoadTaskImage loads a specified file into a new TaskContext.\n//\n-// It takes several arguments:\n-// * mounts: MountNamespace to lookup filename in\n-// * root: Root to lookup filename under\n-// * wd: Working directory to lookup filename under\n-// * maxTraversals: maximum number of symlinks to follow\n-// * filename: path to binary to load\n-// * file: an open fs.File object of the binary to load. If set,\n-// file will be loaded and not filename.\n-// * argv: Binary argv\n-// * envv: Binary envv\n-// * fs: Binary FeatureSet\n-func (k *Kernel) LoadTaskImage(ctx context.Context, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, filename string, file *fs.File, argv, envv []string, resolveFinal bool, fs *cpuid.FeatureSet) (*TaskContext, *syserr.Error) {\n- // If File is not nil, we should load that instead of resolving filename.\n- if file != nil {\n- filename = file.MappedName(ctx)\n+// args.MemoryManager does not need to be set by the caller.\n+func (k *Kernel) LoadTaskImage(ctx context.Context, args loader.LoadArgs) (*TaskContext, *syserr.Error) {\n+ // If File is not nil, we should load that instead of resolving Filename.\n+ if args.File != nil {\n+ args.Filename = args.File.MappedName(ctx)\n}\n// Prepare a new user address space to load into.\nm := mm.NewMemoryManager(k, k)\ndefer m.DecUsers(ctx)\n+ args.MemoryManager = m\n- os, ac, name, err := loader.Load(ctx, m, mounts, root, wd, maxTraversals, fs, filename, file, argv, envv, resolveFinal, k.extraAuxv, k.vdso)\n+ os, ac, name, err := loader.Load(ctx, args, k.extraAuxv, k.vdso)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -624,15 +624,15 @@ func loadInterpreterELF(ctx context.Context, m *mm.MemoryManager, f *fs.File, in\nreturn loadParsedELF(ctx, m, f, info, 0)\n}\n-// loadELF loads f into the Task address space.\n+// loadELF loads args.File into the Task address space.\n//\n// If loadELF returns ErrSwitchFile it should be called again with the returned\n// path and argv.\n//\n// Preconditions:\n-// * f is an ELF file\n-func loadELF(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, fs *cpuid.FeatureSet, f *fs.File) (loadedELF, arch.Context, error) {\n- bin, ac, err := loadInitialELF(ctx, m, fs, f)\n+// * args.File is an ELF file\n+func loadELF(ctx context.Context, args LoadArgs) (loadedELF, arch.Context, error) {\n+ bin, ac, err := loadInitialELF(ctx, args.MemoryManager, args.Features, args.File)\nif err != nil {\nctx.Infof(\"Error loading binary: %v\", err)\nreturn loadedELF{}, nil, err\n@@ -640,7 +640,12 @@ func loadELF(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace\nvar interp loadedELF\nif bin.interpreter != \"\" {\n- d, i, err := openPath(ctx, mounts, root, wd, maxTraversals, bin.interpreter, true /*resolveFinal*/)\n+ // Even if we do not allow the final link of the script to be\n+ // resolved, the interpreter should still be resolved if it is\n+ // a symlink.\n+ args.ResolveFinal = true\n+ args.Filename = bin.interpreter\n+ d, i, err := openPath(ctx, args)\nif err != nil {\nctx.Infof(\"Error opening interpreter %s: %v\", bin.interpreter, err)\nreturn loadedELF{}, nil, err\n@@ -649,7 +654,7 @@ func loadELF(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace\n// We don't need the Dirent.\nd.DecRef()\n- interp, err = loadInterpreterELF(ctx, m, i, bin)\n+ interp, err = loadInterpreterELF(ctx, args.MemoryManager, i, bin)\nif err != nil {\nctx.Infof(\"Error loading interpreter: %v\", err)\nreturn loadedELF{}, nil, err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// Package loader loads a binary into a MemoryManager.\n+// Package loader loads an executable file into a MemoryManager.\npackage loader\nimport (\n@@ -35,6 +35,48 @@ import (\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n+// LoadArgs holds specifications for an executable file to be loaded.\n+type LoadArgs struct {\n+ // MemoryManager is the memory manager to load the executable into.\n+ MemoryManager *mm.MemoryManager\n+\n+ // Mounts is the mount namespace in which to look up Filename.\n+ Mounts *fs.MountNamespace\n+\n+ // Root is the root directory under which to look up Filename.\n+ Root *fs.Dirent\n+\n+ // WorkingDirectory is the working directory under which to look up\n+ // Filename.\n+ WorkingDirectory *fs.Dirent\n+\n+ // RemainingTraversals is the maximum number of symlinks to follow to\n+ // resolve Filename. This counter is passed by reference to keep it\n+ // updated throughout the call stack.\n+ RemainingTraversals *uint\n+\n+ // ResolveFinal indicates whether the final link of Filename should be\n+ // resolved, if it is a symlink.\n+ ResolveFinal bool\n+\n+ // Filename is the path for the executable.\n+ Filename string\n+\n+ // File is an open fs.File object of the executable. If File is not\n+ // nil, then File will be loaded and Filename will be ignored.\n+ File *fs.File\n+\n+ // Argv is the vector of arguments to pass to the executable.\n+ Argv []string\n+\n+ // Envv is the vector of environment variables to pass to the\n+ // executable.\n+ Envv []string\n+\n+ // Features specifies the CPU feature set for the executable.\n+ Features *cpuid.FeatureSet\n+}\n+\n// readFull behaves like io.ReadFull for an *fs.File.\nfunc readFull(ctx context.Context, f *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {\nvar total int64\n@@ -51,24 +93,24 @@ func readFull(ctx context.Context, f *fs.File, dst usermem.IOSequence, offset in\nreturn total, nil\n}\n-// openPath opens name for loading.\n+// openPath opens args.Filename for loading.\n//\n-// openPath returns the fs.Dirent and an *fs.File for name, which is not\n-// installed in the Task FDTable. The caller takes ownership of both.\n+// openPath returns the fs.Dirent and an *fs.File for args.Filename, which is\n+// not installed in the Task FDTable. The caller takes ownership of both.\n//\n-// name must be a readable, executable, regular file.\n-func openPath(ctx context.Context, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, name string, resolveFinal bool) (*fs.Dirent, *fs.File, error) {\n+// args.Filename must be a readable, executable, regular file.\n+func openPath(ctx context.Context, args LoadArgs) (*fs.Dirent, *fs.File, error) {\nvar err error\n- if name == \"\" {\n+ if args.Filename == \"\" {\nctx.Infof(\"cannot open empty name\")\nreturn nil, nil, syserror.ENOENT\n}\nvar d *fs.Dirent\n- if resolveFinal {\n- d, err = mounts.FindInode(ctx, root, wd, name, maxTraversals)\n+ if args.ResolveFinal {\n+ d, err = args.Mounts.FindInode(ctx, args.Root, args.WorkingDirectory, args.Filename, args.RemainingTraversals)\n} else {\n- d, err = mounts.FindLink(ctx, root, wd, name, maxTraversals)\n+ d, err = args.Mounts.FindLink(ctx, args.Root, args.WorkingDirectory, args.Filename, args.RemainingTraversals)\n}\nif err != nil {\nreturn nil, nil, err\n@@ -77,11 +119,11 @@ func openPath(ctx context.Context, mounts *fs.MountNamespace, root, wd *fs.Diren\n// Open file will take a reference to Dirent, so destroy this one.\ndefer d.DecRef()\n- if !resolveFinal && fs.IsSymlink(d.Inode.StableAttr) {\n+ if !args.ResolveFinal && fs.IsSymlink(d.Inode.StableAttr) {\nreturn nil, nil, syserror.ELOOP\n}\n- return openFile(ctx, nil, d, name)\n+ return openFile(ctx, nil, d, args.Filename)\n}\n// openFile takes that file's Dirent and performs checks on it. If provided a\n@@ -182,34 +224,33 @@ const (\nmaxLoaderAttempts = 6\n)\n-// loadBinary loads a binary that is pointed to by \"file\". If nil, the path\n-// \"filename\" is resolved and loaded.\n+// loadExecutable loads an executable that is pointed to by args.File. If nil,\n+// the path args.Filename is resolved and loaded. If the executable is an\n+// interpreter script rather than an ELF, the binary of the corresponding\n+// interpreter will be loaded.\n//\n// It returns:\n// * loadedELF, description of the loaded binary\n// * arch.Context matching the binary arch\n// * fs.Dirent of the binary file\n-// * Possibly updated argv\n-func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, remainingTraversals *uint, features *cpuid.FeatureSet, filename string, passedFile *fs.File, argv []string, resolveFinal bool) (loadedELF, arch.Context, *fs.Dirent, []string, error) {\n+// * Possibly updated args.Argv\n+func loadExecutable(ctx context.Context, args LoadArgs) (loadedELF, arch.Context, *fs.Dirent, []string, error) {\nfor i := 0; i < maxLoaderAttempts; i++ {\nvar (\nd *fs.Dirent\n- f *fs.File\nerr error\n)\n- if passedFile == nil {\n- d, f, err = openPath(ctx, mounts, root, wd, remainingTraversals, filename, resolveFinal)\n+ if args.File == nil {\n+ d, args.File, err = openPath(ctx, args)\n} else {\n- d, f, err = openFile(ctx, passedFile, nil, \"\")\n- // Set to nil in case we loop on a Interpreter Script.\n- passedFile = nil\n+ d, args.File, err = openFile(ctx, args.File, nil, \"\")\n}\nif err != nil {\n- ctx.Infof(\"Error opening %s: %v\", filename, err)\n+ ctx.Infof(\"Error opening %s: %v\", args.Filename, err)\nreturn loadedELF{}, nil, nil, nil, err\n}\n- defer f.DecRef()\n+ defer args.File.DecRef()\n// We will return d in the successful case, but defer a DecRef\n// for intermediate loops and failure cases.\ndefer d.DecRef()\n@@ -217,9 +258,9 @@ func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamesp\n// Check the header. Is this an ELF or interpreter script?\nvar hdr [4]uint8\n// N.B. We assume that reading from a regular file cannot block.\n- _, err = readFull(ctx, f, usermem.BytesIOSequence(hdr[:]), 0)\n- // Allow unexpected EOF, as a valid executable could be only three\n- // bytes (e.g., #!a).\n+ _, err = readFull(ctx, args.File, usermem.BytesIOSequence(hdr[:]), 0)\n+ // Allow unexpected EOF, as a valid executable could be only\n+ // three bytes (e.g., #!a).\nif err != nil && err != io.ErrUnexpectedEOF {\nif err == io.EOF {\nerr = syserror.ENOEXEC\n@@ -229,33 +270,33 @@ func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamesp\nswitch {\ncase bytes.Equal(hdr[:], []byte(elfMagic)):\n- loaded, ac, err := loadELF(ctx, m, mounts, root, wd, remainingTraversals, features, f)\n+ loaded, ac, err := loadELF(ctx, args)\nif err != nil {\nctx.Infof(\"Error loading ELF: %v\", err)\nreturn loadedELF{}, nil, nil, nil, err\n}\n// An ELF is always terminal. Hold on to d.\nd.IncRef()\n- return loaded, ac, d, argv, err\n+ return loaded, ac, d, args.Argv, err\ncase bytes.Equal(hdr[:2], []byte(interpreterScriptMagic)):\n- newpath, newargv, err := parseInterpreterScript(ctx, filename, f, argv)\n+ args.Filename, args.Argv, err = parseInterpreterScript(ctx, args.Filename, args.File, args.Argv)\nif err != nil {\nctx.Infof(\"Error loading interpreter script: %v\", err)\nreturn loadedELF{}, nil, nil, nil, err\n}\n- filename = newpath\n- argv = newargv\ndefault:\nctx.Infof(\"Unknown magic: %v\", hdr)\nreturn loadedELF{}, nil, nil, nil, syserror.ENOEXEC\n}\n+ // Set to nil in case we loop on a Interpreter Script.\n+ args.File = nil\n}\nreturn loadedELF{}, nil, nil, nil, syserror.ELOOP\n}\n-// Load loads \"file\" into a MemoryManager. If file is nil, the path \"filename\"\n-// is resolved and loaded instead.\n+// Load loads args.File into a MemoryManager. If args.File is nil, the path\n+// args.Filename is resolved and loaded instead.\n//\n// If Load returns ErrSwitchFile it should be called again with the returned\n// path and argv.\n@@ -263,37 +304,37 @@ func loadBinary(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamesp\n// Preconditions:\n// * The Task MemoryManager is empty.\n// * Load is called on the Task goroutine.\n-func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, fs *cpuid.FeatureSet, filename string, file *fs.File, argv, envv []string, resolveFinal bool, extraAuxv []arch.AuxEntry, vdso *VDSO) (abi.OS, arch.Context, string, *syserr.Error) {\n- // Load the binary itself.\n- loaded, ac, d, argv, err := loadBinary(ctx, m, mounts, root, wd, maxTraversals, fs, filename, file, argv, resolveFinal)\n+func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *VDSO) (abi.OS, arch.Context, string, *syserr.Error) {\n+ // Load the executable itself.\n+ loaded, ac, d, newArgv, err := loadExecutable(ctx, args)\nif err != nil {\n- return 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Failed to load %s: %v\", filename, err), syserr.FromError(err).ToLinux())\n+ return 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Failed to load %s: %v\", args.Filename, err), syserr.FromError(err).ToLinux())\n}\ndefer d.DecRef()\n// Load the VDSO.\n- vdsoAddr, err := loadVDSO(ctx, m, vdso, loaded)\n+ vdsoAddr, err := loadVDSO(ctx, args.MemoryManager, vdso, loaded)\nif err != nil {\nreturn 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Error loading VDSO: %v\", err), syserr.FromError(err).ToLinux())\n}\n// Setup the heap. brk starts at the next page after the end of the\n- // binary. Userspace can assume that the remainer of the page after\n+ // executable. Userspace can assume that the remainer of the page after\n// loaded.end is available for its use.\ne, ok := loaded.end.RoundUp()\nif !ok {\nreturn 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"brk overflows: %#x\", loaded.end), linux.ENOEXEC)\n}\n- m.BrkSetup(ctx, e)\n+ args.MemoryManager.BrkSetup(ctx, e)\n// Allocate our stack.\n- stack, err := allocStack(ctx, m, ac)\n+ stack, err := allocStack(ctx, args.MemoryManager, ac)\nif err != nil {\nreturn 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Failed to allocate stack: %v\", err), syserr.FromError(err).ToLinux())\n}\n// Push the original filename to the stack, for AT_EXECFN.\n- execfn, err := stack.Push(filename)\n+ execfn, err := stack.Push(args.Filename)\nif err != nil {\nreturn 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Failed to push exec filename: %v\", err), syserr.FromError(err).ToLinux())\n}\n@@ -327,11 +368,12 @@ func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, r\n}...)\nauxv = append(auxv, extraAuxv...)\n- sl, err := stack.Load(argv, envv, auxv)\n+ sl, err := stack.Load(newArgv, args.Envv, auxv)\nif err != nil {\nreturn 0, nil, \"\", syserr.NewDynamic(fmt.Sprintf(\"Failed to load stack: %v\", err), syserr.FromError(err).ToLinux())\n}\n+ m := args.MemoryManager\nm.SetArgvStart(sl.ArgvStart)\nm.SetArgvEnd(sl.ArgvEnd)\nm.SetEnvvStart(sl.EnvvStart)\n@@ -342,7 +384,7 @@ func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, r\nac.SetIP(uintptr(loaded.entry))\nac.SetStack(uintptr(stack.Bottom))\n- name := path.Base(filename)\n+ name := path.Base(args.Filename)\nif len(name) > linux.TASK_COMM_LEN-1 {\nname = name[:linux.TASK_COMM_LEN-1]\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/BUILD",
"new_path": "pkg/sentry/syscalls/linux/BUILD",
"diff": "@@ -79,6 +79,7 @@ go_library(\n\"//pkg/sentry/kernel/signalfd\",\n\"//pkg/sentry/kernel/time\",\n\"//pkg/sentry/limits\",\n+ \"//pkg/sentry/loader\",\n\"//pkg/sentry/memmap\",\n\"//pkg/sentry/mm\",\n\"//pkg/sentry/safemem\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"new_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/sched\"\n+ \"gvisor.dev/gvisor/pkg/sentry/loader\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n@@ -147,8 +148,21 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\n}\n// Load the new TaskContext.\n- maxTraversals := uint(linux.MaxSymlinkTraversals)\n- tc, se := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, &maxTraversals, pathname, executable, argv, envv, resolveFinal, t.Arch().FeatureSet())\n+ remainingTraversals := uint(linux.MaxSymlinkTraversals)\n+ loadArgs := loader.LoadArgs{\n+ Mounts: t.MountNamespace(),\n+ Root: root,\n+ WorkingDirectory: wd,\n+ RemainingTraversals: &remainingTraversals,\n+ ResolveFinal: resolveFinal,\n+ Filename: pathname,\n+ File: executable,\n+ Argv: argv,\n+ Envv: envv,\n+ Features: t.Arch().FeatureSet(),\n+ }\n+\n+ tc, se := t.Kernel().LoadTaskImage(t, loadArgs)\nif se != nil {\nreturn 0, nil, se.ToError()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Aggregate arguments for loading executables into a single struct.
This change simplifies the function signatures of functions related to loading
executables, such as LoadTaskImage, Load, loadBinary.
PiperOrigin-RevId: 276821187 |
259,884 | 28.10.2019 01:32:30 | 14,400 | c73410c0bda98fa617d82cf423ce96ee5e6af6a4 | Comment out unused references | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/install.md",
"new_path": "content/docs/user_guide/install.md",
"diff": "@@ -164,7 +164,9 @@ optionally automatically configure Docker:\nrunsc install\n```\n+[old-linux]: /docs/user_guide/networking/#gso\n+<!--\n[latest-nightly]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n[latest-hash]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n-[old-linux]: /docs/user_guide/networking/#gso\n[releases]: https://github.com/google/gvisor/releases\n+-->\n"
}
] | Go | Apache License 2.0 | google/gvisor | Comment out unused references |
259,974 | 24.10.2019 03:18:18 | 0 | dec831b4939a6332cac5d186a604ff2cbbcaf7af | Cast the Stat_t.Nlink to uint64 on arm64.
Since the syscall.Stat_t.Nlink is defined as different types on
amd64 and arm64(uint64 and uint32 respectively), we need to cast
them to a unified uint64 type in gVisor code. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/p9.go",
"new_path": "pkg/p9/p9.go",
"diff": "@@ -814,7 +814,7 @@ func StatToAttr(s *syscall.Stat_t, req AttrMask) (Attr, AttrMask) {\nattr.Mode = FileMode(s.Mode)\n}\nif req.NLink {\n- attr.NLink = s.Nlink\n+ attr.NLink = uint64(s.Nlink)\n}\nif req.UID {\nattr.UID = UID(s.Uid)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -601,7 +601,7 @@ func (l *localFile) GetAttr(_ p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error)\nMode: p9.FileMode(stat.Mode),\nUID: p9.UID(stat.Uid),\nGID: p9.GID(stat.Gid),\n- NLink: stat.Nlink,\n+ NLink: uint64(stat.Nlink),\nRDev: stat.Rdev,\nSize: uint64(stat.Size),\nBlockSize: uint64(stat.Blksize),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Cast the Stat_t.Nlink to uint64 on arm64.
Since the syscall.Stat_t.Nlink is defined as different types on
amd64 and arm64(uint64 and uint32 respectively), we need to cast
them to a unified uint64 type in gVisor code.
Signed-off-by: Haibo Xu <[email protected]>
Change-Id: I7542b99b195c708f3fc49b1cbe6adebdd2f6e96b |
259,881 | 28.10.2019 10:18:55 | 25,200 | 198f1cddb82d46570ae63cb704b4a1b88cf0de1f | Update comment
FDTable.GetFile doesn't exist. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -709,9 +709,9 @@ func (t *Task) FDTable() *FDTable {\nreturn t.fdTable\n}\n-// GetFile is a convenience wrapper t.FDTable().GetFile.\n+// GetFile is a convenience wrapper t.FDTable().Get.\n//\n-// Precondition: same as FDTable.\n+// Precondition: same as FDTable.Get.\nfunc (t *Task) GetFile(fd int32) *fs.File {\nf, _ := t.fdTable.Get(fd)\nreturn f\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update comment
FDTable.GetFile doesn't exist.
PiperOrigin-RevId: 277089842 |
259,992 | 28.10.2019 18:48:35 | 25,200 | dbeaf9d4dbeea4cde670c3d07a78b56a45fa8f21 | Deflake TestCheckpointRestore | [
{
"change_type": "MODIFY",
"old_path": "test/e2e/integration_test.go",
"new_path": "test/e2e/integration_test.go",
"diff": "@@ -175,6 +175,9 @@ func TestCheckpointRestore(t *testing.T) {\nt.Fatal(err)\n}\n+ // TODO(b/143498576): Remove after github.com/moby/moby/issues/38963 is fixed.\n+ time.Sleep(1 * time.Second)\n+\nif err := d.Restore(\"test\"); err != nil {\nt.Fatal(\"docker restore failed:\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deflake TestCheckpointRestore
PiperOrigin-RevId: 277189064 |
259,860 | 29.10.2019 10:03:18 | 25,200 | 29273b03842a85bce8314799348231520ceb6e9c | Disallow execveat on interpreter scripts with fd opened with O_CLOEXEC.
When an interpreter script is opened with O_CLOEXEC and the resulting fd is
passed into execveat, an ENOENT error should occur (the script would otherwise
be inaccessible to the interpreter). This matches the actual behavior of
Linux's execveat. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -812,6 +812,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\nResolveFinal: true,\nFilename: args.Filename,\nFile: args.File,\n+ CloseOnExec: false,\nArgv: args.Argv,\nEnvv: args.Envv,\nFeatures: k.featureSet,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -66,6 +66,12 @@ type LoadArgs struct {\n// nil, then File will be loaded and Filename will be ignored.\nFile *fs.File\n+ // CloseOnExec indicates that the executable (or one of its parent\n+ // directories) was opened with O_CLOEXEC. If the executable is an\n+ // interpreter script, then cause an ENOENT error to occur, since the\n+ // script would otherwise be inaccessible to the interpreter.\n+ CloseOnExec bool\n+\n// Argv is the vector of arguments to pass to the executable.\nArgv []string\n@@ -279,6 +285,9 @@ func loadExecutable(ctx context.Context, args LoadArgs) (loadedELF, arch.Context\nd.IncRef()\nreturn loaded, ac, d, args.Argv, err\ncase bytes.Equal(hdr[:2], []byte(interpreterScriptMagic)):\n+ if args.CloseOnExec {\n+ return loadedELF{}, nil, nil, nil, syserror.ENOENT\n+ }\nargs.Filename, args.Argv, err = parseInterpreterScript(ctx, args.Filename, args.File, args.Argv)\nif err != nil {\nctx.Infof(\"Error loading interpreter script: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"new_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"diff": "@@ -120,6 +120,7 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\nvar wd *fs.Dirent\nvar executable *fs.File\n+ var closeOnExec bool\nif dirFD == linux.AT_FDCWD || path.IsAbs(pathname) {\n// Even if the pathname is absolute, we may still need the wd\n// for interpreter scripts if the path of the interpreter is\n@@ -127,11 +128,12 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\nwd = t.FSContext().WorkingDirectory()\n} else {\n// Need to extract the given FD.\n- f := t.GetFile(dirFD)\n+ f, fdFlags := t.FDTable().Get(dirFD)\nif f == nil {\nreturn 0, nil, syserror.EBADF\n}\ndefer f.DecRef()\n+ closeOnExec = fdFlags.CloseOnExec\nif atEmptyPath && len(pathname) == 0 {\nexecutable = f\n@@ -157,6 +159,7 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr user\nResolveFinal: resolveFinal,\nFilename: pathname,\nFile: executable,\n+ CloseOnExec: closeOnExec,\nArgv: argv,\nEnvv: envv,\nFeatures: t.Arch().FeatureSet(),\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/exec.cc",
"new_path": "test/syscalls/linux/exec.cc",
"diff": "@@ -681,6 +681,39 @@ TEST(ExecveatTest, SymlinkNoFollowWithNormalFile) {\nArgEnvExitStatus(0, 0), \"\");\n}\n+TEST(ExecveatTest, BasicWithCloexecFD) {\n+ std::string path = WorkloadPath(kBasicWorkload);\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CLOEXEC));\n+\n+ CheckExecveat(fd.get(), \"\", {path}, {}, AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH,\n+ ArgEnvExitStatus(0, 0), absl::StrCat(path, \"\\n\"));\n+}\n+\n+TEST(ExecveatTest, InterpreterScriptWithCloexecFD) {\n+ std::string path = WorkloadPath(kExitScript);\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CLOEXEC));\n+\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(fd.get(), \"\", {path}, {},\n+ AT_EMPTY_PATH, /*child=*/nullptr,\n+ &execve_errno));\n+ EXPECT_EQ(execve_errno, ENOENT);\n+}\n+\n+TEST(ExecveatTest, InterpreterScriptWithCloexecDirFD) {\n+ std::string absolute_path = WorkloadPath(kExitScript);\n+ std::string parent_dir = std::string(Dirname(absolute_path));\n+ std::string base = std::string(Basename(absolute_path));\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_CLOEXEC | O_DIRECTORY));\n+\n+ int execve_errno;\n+ ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(dirfd.get(), base, {base}, {},\n+ /*flags=*/0, /*child=*/nullptr,\n+ &execve_errno));\n+ EXPECT_EQ(execve_errno, ENOENT);\n+}\n+\nTEST(ExecveatTest, InvalidFlags) {\nint execve_errno;\nASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(\n"
}
] | Go | Apache License 2.0 | google/gvisor | Disallow execveat on interpreter scripts with fd opened with O_CLOEXEC.
When an interpreter script is opened with O_CLOEXEC and the resulting fd is
passed into execveat, an ENOENT error should occur (the script would otherwise
be inaccessible to the interpreter). This matches the actual behavior of
Linux's execveat.
PiperOrigin-RevId: 277306680 |
259,854 | 29.10.2019 11:19:04 | 25,200 | 7d80e85835fbe47b2395eedf287cf902ed78599a | Allow waiting for Endpoint worker goroutines to finish.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -67,6 +67,20 @@ type TransportEndpoint interface {\n// HandleControlPacket is called by the stack when new control (e.g.,\n// ICMP) packets arrive to this transport endpoint.\nHandleControlPacket(id TransportEndpointID, typ ControlType, extra uint32, vv buffer.VectorisedView)\n+\n+ // Close puts the endpoint in a closed state and frees all resources\n+ // associated with it. This cleanup may happen asynchronously. Wait can\n+ // be used to block on this asynchronous cleanup.\n+ Close()\n+\n+ // Wait waits for any worker goroutines owned by the endpoint to stop.\n+ //\n+ // An endpoint can be requested to stop its worker goroutines by calling\n+ // its Close method.\n+ //\n+ // Wait will not block if the endpoint hasn't started any goroutines\n+ // yet, even if it might later.\n+ Wait()\n}\n// RawTransportEndpoint is the interface that needs to be implemented by raw\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_demuxer.go",
"new_path": "pkg/tcpip/stack/transport_demuxer.go",
"diff": "@@ -240,6 +240,26 @@ func (ep *multiPortEndpoint) handlePacketAll(r *Route, id TransportEndpointID, v\nep.mu.RUnlock() // Don't use defer for performance reasons.\n}\n+// Close implements stack.TransportEndpoint.Close.\n+func (ep *multiPortEndpoint) Close() {\n+ ep.mu.RLock()\n+ eps := append([]TransportEndpoint(nil), ep.endpointsArr...)\n+ ep.mu.RUnlock()\n+ for _, e := range eps {\n+ e.Close()\n+ }\n+}\n+\n+// Wait implements stack.TransportEndpoint.Wait.\n+func (ep *multiPortEndpoint) Wait() {\n+ ep.mu.RLock()\n+ eps := append([]TransportEndpoint(nil), ep.endpointsArr...)\n+ ep.mu.RUnlock()\n+ for _, e := range eps {\n+ e.Wait()\n+ }\n+}\n+\n// singleRegisterEndpoint tries to add an endpoint to the multiPortEndpoint\n// list. The list might be empty already.\nfunc (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint, reusePort bool) *tcpip.Error {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_test.go",
"new_path": "pkg/tcpip/stack/transport_test.go",
"diff": "@@ -225,8 +225,9 @@ func (f *fakeTransportEndpoint) IPTables() (iptables.IPTables, error) {\nreturn iptables.IPTables{}, nil\n}\n-func (f *fakeTransportEndpoint) Resume(*stack.Stack) {\n-}\n+func (f *fakeTransportEndpoint) Resume(*stack.Stack) {}\n+\n+func (f *fakeTransportEndpoint) Wait() {}\ntype fakeTransportGoodOption bool\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -798,3 +798,6 @@ func (e *endpoint) Info() tcpip.EndpointInfo {\nfunc (e *endpoint) Stats() tcpip.EndpointStats {\nreturn &e.stats\n}\n+\n+// Wait implements stack.TransportEndpoint.Wait.\n+func (*endpoint) Wait() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -641,3 +641,6 @@ func (e *endpoint) Info() tcpip.EndpointInfo {\nfunc (e *endpoint) Stats() tcpip.EndpointStats {\nreturn &e.stats\n}\n+\n+// Wait implements stack.TransportEndpoint.Wait.\n+func (*endpoint) Wait() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -2399,6 +2399,22 @@ func (e *endpoint) Stats() tcpip.EndpointStats {\nreturn &e.stats\n}\n+// Wait implements stack.TransportEndpoint.Wait.\n+func (e *endpoint) Wait() {\n+ waitEntry, notifyCh := waiter.NewChannelEntry(nil)\n+ e.waiterQueue.EventRegister(&waitEntry, waiter.EventHUp)\n+ defer e.waiterQueue.EventUnregister(&waitEntry)\n+ for {\n+ e.mu.Lock()\n+ running := e.workerRunning\n+ e.mu.Unlock()\n+ if !running {\n+ break\n+ }\n+ <-notifyCh\n+ }\n+}\n+\nfunc mssForRoute(r *stack.Route) uint16 {\n// TODO(b/143359391): Respect TCP Min and Max size.\nreturn uint16(r.MTU() - header.TCPMinimumSize)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -1234,6 +1234,9 @@ func (e *endpoint) Stats() tcpip.EndpointStats {\nreturn &e.stats\n}\n+// Wait implements tcpip.Endpoint.Wait.\n+func (*endpoint) Wait() {}\n+\nfunc isBroadcastOrMulticast(a tcpip.Address) bool {\nreturn a == header.IPv4Broadcast || header.IsV4MulticastAddress(a) || header.IsV6MulticastAddress(a)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow waiting for Endpoint worker goroutines to finish.
Updates #837
PiperOrigin-RevId: 277325162 |
259,962 | 29.10.2019 12:15:33 | 25,200 | 392c56149531c82ef3c07e2899939c0d63f0980b | Fix PollWithFullBufferBlocks.
Set the snd/rcv buffer sizes so that the test is deterministic and runs in a
reasonable amount of time. It also ensures that we disable any auto-tuning of
the send/receive buffer which may happen. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "@@ -394,8 +394,15 @@ TEST_P(TcpSocketTest, PollWithFullBufferBlocks) {\nsizeof(tcp_nodelay_flag)),\nSyscallSucceeds());\n+ // Set a 256KB send/receive buffer.\n+ int buf_sz = 1 << 18;\n+ EXPECT_THAT(setsockopt(t_, SOL_SOCKET, SO_RCVBUF, &buf_sz, sizeof(buf_sz)),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_THAT(setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &buf_sz, sizeof(buf_sz)),\n+ SyscallSucceedsWithValue(0));\n+\n// Create a large buffer that will be used for sending.\n- std::vector<char> buf(10 * sendbuf_size_);\n+ std::vector<char> buf(1 << 16);\n// Write until we receive an error.\nwhile (RetryEINTR(send)(s_, buf.data(), buf.size(), 0) != -1) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix PollWithFullBufferBlocks.
Set the snd/rcv buffer sizes so that the test is deterministic and runs in a
reasonable amount of time. It also ensures that we disable any auto-tuning of
the send/receive buffer which may happen.
PiperOrigin-RevId: 277337232 |
259,860 | 29.10.2019 12:50:03 | 25,200 | 2e00771d5abb3d821703965953c2b21ef7c20911 | Refactor logic for loadExecutable.
Separate the handling of filenames and *fs.File objects in a more explicit way
for the sake of clarity. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -20,6 +20,7 @@ import (\n\"fmt\"\n\"io\"\n\"path\"\n+ \"strings\"\n\"gvisor.dev/gvisor/pkg/abi\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -99,20 +100,20 @@ func readFull(ctx context.Context, f *fs.File, dst usermem.IOSequence, offset in\nreturn total, nil\n}\n-// openPath opens args.Filename for loading.\n+// openPath opens args.Filename and checks that it is valid for loading.\n//\n-// openPath returns the fs.Dirent and an *fs.File for args.Filename, which is\n-// not installed in the Task FDTable. The caller takes ownership of both.\n+// openPath returns an *fs.Dirent and *fs.File for args.Filename, which is not\n+// installed in the Task FDTable. The caller takes ownership of both.\n//\n// args.Filename must be a readable, executable, regular file.\nfunc openPath(ctx context.Context, args LoadArgs) (*fs.Dirent, *fs.File, error) {\n- var err error\nif args.Filename == \"\" {\nctx.Infof(\"cannot open empty name\")\nreturn nil, nil, syserror.ENOENT\n}\nvar d *fs.Dirent\n+ var err error\nif args.ResolveFinal {\nd, err = args.Mounts.FindInode(ctx, args.Root, args.WorkingDirectory, args.Filename, args.RemainingTraversals)\n} else {\n@@ -121,67 +122,60 @@ func openPath(ctx context.Context, args LoadArgs) (*fs.Dirent, *fs.File, error)\nif err != nil {\nreturn nil, nil, err\n}\n-\n- // Open file will take a reference to Dirent, so destroy this one.\n+ // Defer a DecRef for the sake of failure cases.\ndefer d.DecRef()\nif !args.ResolveFinal && fs.IsSymlink(d.Inode.StableAttr) {\nreturn nil, nil, syserror.ELOOP\n}\n- return openFile(ctx, nil, d, args.Filename)\n+ if err := checkPermission(ctx, d); err != nil {\n+ return nil, nil, err\n}\n-// openFile takes that file's Dirent and performs checks on it. If provided a\n-// *fs.Dirent and not a *fs.File, it creates a *fs.File object from the Dirent's\n-// Inode and performs checks on that.\n-//\n-// openFile returns an *fs.File and *fs.Dirent, and the caller takes ownership\n-// of both.\n+ // If they claim it's a directory, then make sure.\n//\n-// \"dirent\" and \"file\" must not both be nil and point to a readable, executable, regular file.\n-func openFile(ctx context.Context, file *fs.File, dirent *fs.Dirent, name string) (*fs.Dirent, *fs.File, error) {\n- // file and dirent must not be nil.\n- if dirent == nil && file == nil {\n- ctx.Infof(\"dirent and file cannot both be nil.\")\n- return nil, nil, syserror.ENOENT\n+ // N.B. we reject directories below, but we must first reject\n+ // non-directories passed as directories.\n+ if strings.HasSuffix(args.Filename, \"/\") && !fs.IsDir(d.Inode.StableAttr) {\n+ return nil, nil, syserror.ENOTDIR\n}\n- if file != nil {\n- dirent = file.Dirent\n+ if err := checkIsRegularFile(ctx, d, args.Filename); err != nil {\n+ return nil, nil, err\n}\n- // Perform permissions checks on the file.\n- if err := checkFile(ctx, dirent, name); err != nil {\n+ f, err := d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true})\n+ if err != nil {\nreturn nil, nil, err\n}\n+ // Defer a DecRef for the sake of failure cases.\n+ defer f.DecRef()\n- if file == nil {\n- var ferr error\n- if file, ferr = dirent.Inode.GetFile(ctx, dirent, fs.FileFlags{Read: true}); ferr != nil {\n- return nil, nil, ferr\n+ if err := checkPread(ctx, f, args.Filename); err != nil {\n+ return nil, nil, err\n}\n- } else {\n- // GetFile takes a reference to the created file, so make one in the case\n- // that the file reference already existed.\n- file.IncRef()\n+\n+ d.IncRef()\n+ f.IncRef()\n+ return d, f, err\n}\n- // We must be able to read at arbitrary offsets.\n- if !file.Flags().Pread {\n- file.DecRef()\n- ctx.Infof(\"%s cannot be read at an offset: %+v\", file.MappedName(ctx), file.Flags())\n- return nil, nil, syserror.EACCES\n+// checkFile performs checks on a file to be executed.\n+func checkFile(ctx context.Context, f *fs.File, filename string) error {\n+ if err := checkPermission(ctx, f.Dirent); err != nil {\n+ return err\n+ }\n+\n+ if err := checkIsRegularFile(ctx, f.Dirent, filename); err != nil {\n+ return err\n}\n- // Grab reference for caller.\n- dirent.IncRef()\n- return dirent, file, nil\n+ return checkPread(ctx, f, filename)\n}\n-// checkFile performs file permissions checks for binaries called in openPath\n-// and openFile\n-func checkFile(ctx context.Context, d *fs.Dirent, name string) error {\n+// checkPermission checks whether the file is readable and executable.\n+func checkPermission(ctx context.Context, d *fs.Dirent) error {\nperms := fs.PermMask{\n// TODO(gvisor.dev/issue/160): Linux requires only execute\n// permission, not read. However, our backing filesystems may\n@@ -192,26 +186,26 @@ func checkFile(ctx context.Context, d *fs.Dirent, name string) error {\nRead: true,\nExecute: true,\n}\n- if err := d.Inode.CheckPermission(ctx, perms); err != nil {\n- return err\n+ return d.Inode.CheckPermission(ctx, perms)\n}\n- // If they claim it's a directory, then make sure.\n- //\n- // N.B. we reject directories below, but we must first reject\n- // non-directories passed as directories.\n- if len(name) > 0 && name[len(name)-1] == '/' && !fs.IsDir(d.Inode.StableAttr) {\n- return syserror.ENOTDIR\n+// checkIsRegularFile prevents us from trying to execute a directory, pipe, etc.\n+func checkIsRegularFile(ctx context.Context, d *fs.Dirent, filename string) error {\n+ attr := d.Inode.StableAttr\n+ if !fs.IsRegular(attr) {\n+ ctx.Infof(\"%s is not regular: %v\", filename, attr)\n+ return syserror.EACCES\n+ }\n+ return nil\n}\n- // No exec-ing directories, pipes, etc!\n- if !fs.IsRegular(d.Inode.StableAttr) {\n- ctx.Infof(\"%s is not regular: %v\", name, d.Inode.StableAttr)\n+// checkPread checks whether we can read the file at arbitrary offsets.\n+func checkPread(ctx context.Context, f *fs.File, filename string) error {\n+ if !f.Flags().Pread {\n+ ctx.Infof(\"%s cannot be read at an offset: %+v\", filename, f.Flags())\nreturn syserror.EACCES\n}\n-\nreturn nil\n-\n}\n// allocStack allocates and maps a stack in to any available part of the address space.\n@@ -248,25 +242,31 @@ func loadExecutable(ctx context.Context, args LoadArgs) (loadedELF, arch.Context\n)\nif args.File == nil {\nd, args.File, err = openPath(ctx, args)\n+ // We will return d in the successful case, but defer a DecRef for the\n+ // sake of intermediate loops and failure cases.\n+ if d != nil {\n+ defer d.DecRef()\n+ }\n+ if args.File != nil {\n+ defer args.File.DecRef()\n+ }\n} else {\n- d, args.File, err = openFile(ctx, args.File, nil, \"\")\n+ d = args.File.Dirent\n+ d.IncRef()\n+ defer d.DecRef()\n+ err = checkFile(ctx, args.File, args.Filename)\n}\n-\nif err != nil {\nctx.Infof(\"Error opening %s: %v\", args.Filename, err)\nreturn loadedELF{}, nil, nil, nil, err\n}\n- defer args.File.DecRef()\n- // We will return d in the successful case, but defer a DecRef\n- // for intermediate loops and failure cases.\n- defer d.DecRef()\n// Check the header. Is this an ELF or interpreter script?\nvar hdr [4]uint8\n// N.B. We assume that reading from a regular file cannot block.\n_, err = readFull(ctx, args.File, usermem.BytesIOSequence(hdr[:]), 0)\n- // Allow unexpected EOF, as a valid executable could be only\n- // three bytes (e.g., #!a).\n+ // Allow unexpected EOF, as a valid executable could be only three bytes\n+ // (e.g., #!a).\nif err != nil && err != io.ErrUnexpectedEOF {\nif err == io.EOF {\nerr = syserror.ENOEXEC\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor logic for loadExecutable.
Separate the handling of filenames and *fs.File objects in a more explicit way
for the sake of clarity.
PiperOrigin-RevId: 277344203 |
259,881 | 29.10.2019 13:17:01 | 25,200 | c0b8fd4b6a9fcb595f3200577b93d07737cfaacd | Update build tags to allow Go 1.14
Currently there are no ABI changes. We should check again closer to release. | [
{
"change_type": "MODIFY",
"old_path": "pkg/procid/procid_amd64.s",
"new_path": "pkg/procid/procid_amd64.s",
"diff": "// +build amd64\n// +build go1.8\n-// +build !go1.14\n+// +build !go1.15\n#include \"textflag.h\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/procid/procid_arm64.s",
"new_path": "pkg/procid/procid_arm64.s",
"diff": "// +build arm64\n// +build go1.8\n-// +build !go1.14\n+// +build !go1.15\n#include \"textflag.h\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount_unsafe.go",
"new_path": "pkg/sentry/vfs/mount_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sleep/sleep_unsafe.go",
"new_path": "pkg/sleep/sleep_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.11\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/rawfile/blockingpoll_yield_unsafe.go",
"new_path": "pkg/tcpip/link/rawfile/blockingpoll_yield_unsafe.go",
"diff": "// +build linux,amd64 linux,arm64\n// +build go1.12\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/time_unsafe.go",
"new_path": "pkg/tcpip/time_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.9\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "third_party/gvsync/downgradable_rwmutex_1_13_unsafe.go",
"new_path": "third_party/gvsync/downgradable_rwmutex_1_13_unsafe.go",
"diff": "// license that can be found in the LICENSE file.\n// +build go1.13\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "third_party/gvsync/downgradable_rwmutex_unsafe.go",
"new_path": "third_party/gvsync/downgradable_rwmutex_unsafe.go",
"diff": "// license that can be found in the LICENSE file.\n// +build go1.12\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
},
{
"change_type": "MODIFY",
"old_path": "third_party/gvsync/memmove_unsafe.go",
"new_path": "third_party/gvsync/memmove_unsafe.go",
"diff": "// license that can be found in the LICENSE file.\n// +build go1.12\n-// +build !go1.14\n+// +build !go1.15\n// Check go:linkname function signatures when updating Go version.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update build tags to allow Go 1.14
Currently there are no ABI changes. We should check again closer to release.
PiperOrigin-RevId: 277349744 |
259,860 | 29.10.2019 13:58:20 | 25,200 | d7f5e823e24501c33a377ee6c73210b00bf3d89f | Fix grammar in comment.
Missing "for". | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -709,7 +709,7 @@ func (t *Task) FDTable() *FDTable {\nreturn t.fdTable\n}\n-// GetFile is a convenience wrapper t.FDTable().Get.\n+// GetFile is a convenience wrapper for t.FDTable().Get.\n//\n// Precondition: same as FDTable.Get.\nfunc (t *Task) GetFile(fd int32) *fs.File {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix grammar in comment.
Missing "for".
PiperOrigin-RevId: 277358513 |
259,854 | 29.10.2019 16:13:43 | 25,200 | a2c51efe3669f0380042b2375eae79e403d3680c | Add endpoint tracking to the stack.
In the future this will replace DanglingEndpoints. DanglingEndpoints must be
kept for now due to issues with save/restore.
This is arguably a cleaner design and allows the stack to know which transport
endpoints might still be using its link endpoints.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/inet/BUILD",
"new_path": "pkg/sentry/inet/BUILD",
"diff": "@@ -13,5 +13,8 @@ go_library(\n\"test_stack.go\",\n],\nimportpath = \"gvisor.dev/gvisor/pkg/sentry/inet\",\n- deps = [\"//pkg/sentry/context\"],\n+ deps = [\n+ \"//pkg/sentry/context\",\n+ \"//pkg/tcpip/stack\",\n+ ],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/inet/inet.go",
"new_path": "pkg/sentry/inet/inet.go",
"diff": "// Package inet defines semantics for IP stacks.\npackage inet\n+import \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+\n// Stack represents a TCP/IP stack.\ntype Stack interface {\n// Interfaces returns all network interfaces as a mapping from interface\n@@ -58,6 +60,16 @@ type Stack interface {\n// Resume restarts the network stack after restore.\nResume()\n+\n+ // RegisteredEndpoints returns all endpoints which are currently registered.\n+ RegisteredEndpoints() []stack.TransportEndpoint\n+\n+ // CleanupEndpoints returns endpoints currently in the cleanup state.\n+ CleanupEndpoints() []stack.TransportEndpoint\n+\n+ // RestoreCleanupEndpoints adds endpoints to cleanup tracking. This is useful\n+ // for restoring a stack after a save.\n+ RestoreCleanupEndpoints([]stack.TransportEndpoint)\n}\n// Interface contains information about a network interface.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/inet/test_stack.go",
"new_path": "pkg/sentry/inet/test_stack.go",
"diff": "package inet\n+import \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+\n// TestStack is a dummy implementation of Stack for tests.\ntype TestStack struct {\nInterfacesMap map[int32]Interface\n@@ -94,5 +96,17 @@ func (s *TestStack) RouteTable() []Route {\n}\n// Resume implements Stack.Resume.\n-func (s *TestStack) Resume() {\n+func (s *TestStack) Resume() {}\n+\n+// RegisteredEndpoints implements inet.Stack.RegisteredEndpoints.\n+func (s *TestStack) RegisteredEndpoints() []stack.TransportEndpoint {\n+ return nil\n}\n+\n+// CleanupEndpoints implements inet.Stack.CleanupEndpoints.\n+func (s *TestStack) CleanupEndpoints() []stack.TransportEndpoint {\n+ return nil\n+}\n+\n+// RestoreCleanupEndpoints implements inet.Stack.RestoreCleanupEndpoints.\n+func (s *TestStack) RestoreCleanupEndpoints([]stack.TransportEndpoint) {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/BUILD",
"new_path": "pkg/sentry/socket/hostinet/BUILD",
"diff": "@@ -32,6 +32,7 @@ go_library(\n\"//pkg/sentry/usermem\",\n\"//pkg/syserr\",\n\"//pkg/syserror\",\n+ \"//pkg/tcpip/stack\",\n\"//pkg/waiter\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/stack.go",
"new_path": "pkg/sentry/socket/hostinet/stack.go",
"diff": "@@ -31,6 +31,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\nvar defaultRecvBufSize = inet.TCPBufferSize{\n@@ -442,3 +443,12 @@ func (s *Stack) RouteTable() []inet.Route {\n// Resume implements inet.Stack.Resume.\nfunc (s *Stack) Resume() {}\n+\n+// RegisteredEndpoints implements inet.Stack.RegisteredEndpoints.\n+func (s *Stack) RegisteredEndpoints() []stack.TransportEndpoint { return nil }\n+\n+// CleanupEndpoints implements inet.Stack.CleanupEndpoints.\n+func (s *Stack) CleanupEndpoints() []stack.TransportEndpoint { return nil }\n+\n+// RestoreCleanupEndpoints implements inet.Stack.RestoreCleanupEndpoints.\n+func (s *Stack) RestoreCleanupEndpoints([]stack.TransportEndpoint) {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/stack.go",
"new_path": "pkg/sentry/socket/netstack/stack.go",
"diff": "@@ -291,3 +291,18 @@ func (s *Stack) FillDefaultIPTables() {\nfunc (s *Stack) Resume() {\ns.Stack.Resume()\n}\n+\n+// RegisteredEndpoints implements inet.Stack.RegisteredEndpoints.\n+func (s *Stack) RegisteredEndpoints() []stack.TransportEndpoint {\n+ return s.Stack.RegisteredEndpoints()\n+}\n+\n+// CleanupEndpoints implements inet.Stack.CleanupEndpoints.\n+func (s *Stack) CleanupEndpoints() []stack.TransportEndpoint {\n+ return s.Stack.CleanupEndpoints()\n+}\n+\n+// RestoreCleanupEndpoints implements inet.Stack.RestoreCleanupEndpoints.\n+func (s *Stack) RestoreCleanupEndpoints(es []stack.TransportEndpoint) {\n+ s.Stack.RestoreCleanupEndpoints(es)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/BUILD",
"new_path": "pkg/sentry/socket/rpcinet/BUILD",
"diff": "@@ -37,6 +37,7 @@ go_library(\n\"//pkg/syserror\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n+ \"//pkg/tcpip/stack\",\n\"//pkg/unet\",\n\"//pkg/waiter\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/stack.go",
"new_path": "pkg/sentry/socket/rpcinet/stack.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/socket/rpcinet/conn\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/rpcinet/notifier\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n\"gvisor.dev/gvisor/pkg/unet\"\n)\n@@ -165,3 +166,12 @@ func (s *Stack) RouteTable() []inet.Route {\n// Resume implements inet.Stack.Resume.\nfunc (s *Stack) Resume() {}\n+\n+// RegisteredEndpoints implements inet.Stack.RegisteredEndpoints.\n+func (s *Stack) RegisteredEndpoints() []stack.TransportEndpoint { return nil }\n+\n+// CleanupEndpoints implements inet.Stack.CleanupEndpoints.\n+func (s *Stack) CleanupEndpoints() []stack.TransportEndpoint { return nil }\n+\n+// RestoreCleanupEndpoints implements inet.Stack.RestoreCleanupEndpoints.\n+func (s *Stack) RestoreCleanupEndpoints([]stack.TransportEndpoint) {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -364,6 +364,7 @@ type Stack struct {\nmu sync.RWMutex\nnics map[tcpip.NICID]*NIC\nforwarding bool\n+ cleanupEndpoints map[TransportEndpoint]struct{}\n// route is the route table passed in by the user via SetRouteTable(),\n// it is used by FindRoute() to build a route for a specific\n@@ -513,6 +514,7 @@ func New(opts Options) *Stack {\nnetworkProtocols: make(map[tcpip.NetworkProtocolNumber]NetworkProtocol),\nlinkAddrResolvers: make(map[tcpip.NetworkProtocolNumber]LinkAddressResolver),\nnics: make(map[tcpip.NICID]*NIC),\n+ cleanupEndpoints: make(map[TransportEndpoint]struct{}),\nlinkAddrCache: newLinkAddrCache(ageLimit, resolutionTimeout, resolutionAttempts),\nPortManager: ports.NewPortManager(),\nclock: clock,\n@@ -1136,6 +1138,25 @@ func (s *Stack) UnregisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip\ns.demux.unregisterEndpoint(netProtos, protocol, id, ep, bindToDevice)\n}\n+// StartTransportEndpointCleanup removes the endpoint with the given id from\n+// the stack transport dispatcher. It also transitions it to the cleanup stage.\n+func (s *Stack) StartTransportEndpointCleanup(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, bindToDevice tcpip.NICID) {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+\n+ s.cleanupEndpoints[ep] = struct{}{}\n+\n+ s.demux.unregisterEndpoint(netProtos, protocol, id, ep, bindToDevice)\n+}\n+\n+// CompleteTransportEndpointCleanup removes the endpoint from the cleanup\n+// stage.\n+func (s *Stack) CompleteTransportEndpointCleanup(ep TransportEndpoint) {\n+ s.mu.Lock()\n+ delete(s.cleanupEndpoints, ep)\n+ s.mu.Unlock()\n+}\n+\n// RegisterRawTransportEndpoint registers the given endpoint with the stack\n// transport dispatcher. Received packets that match the provided transport\n// protocol will be delivered to the given endpoint.\n@@ -1157,6 +1178,38 @@ func (s *Stack) RegisterRestoredEndpoint(e ResumableEndpoint) {\ns.mu.Unlock()\n}\n+// RegisteredEndpoints returns all endpoints which are currently registered.\n+func (s *Stack) RegisteredEndpoints() []TransportEndpoint {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ var es []TransportEndpoint\n+ for _, e := range s.demux.protocol {\n+ es = append(es, e.transportEndpoints()...)\n+ }\n+ return es\n+}\n+\n+// CleanupEndpoints returns endpoints currently in the cleanup state.\n+func (s *Stack) CleanupEndpoints() []TransportEndpoint {\n+ s.mu.Lock()\n+ es := make([]TransportEndpoint, 0, len(s.cleanupEndpoints))\n+ for e := range s.cleanupEndpoints {\n+ es = append(es, e)\n+ }\n+ s.mu.Unlock()\n+ return es\n+}\n+\n+// RestoreCleanupEndpoints adds endpoints to cleanup tracking. This is useful\n+// for restoring a stack after a save.\n+func (s *Stack) RestoreCleanupEndpoints(es []TransportEndpoint) {\n+ s.mu.Lock()\n+ for _, e := range es {\n+ s.cleanupEndpoints[e] = struct{}{}\n+ }\n+ s.mu.Unlock()\n+}\n+\n// Resume restarts the stack after a restore. This must be called after the\n// entire system has been restored.\nfunc (s *Stack) Resume() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_demuxer.go",
"new_path": "pkg/tcpip/stack/transport_demuxer.go",
"diff": "@@ -41,6 +41,31 @@ type transportEndpoints struct {\nrawEndpoints []RawTransportEndpoint\n}\n+// unregisterEndpoint unregisters the endpoint with the given id such that it\n+// won't receive any more packets.\n+func (eps *transportEndpoints) unregisterEndpoint(id TransportEndpointID, ep TransportEndpoint, bindToDevice tcpip.NICID) {\n+ eps.mu.Lock()\n+ defer eps.mu.Unlock()\n+ epsByNic, ok := eps.endpoints[id]\n+ if !ok {\n+ return\n+ }\n+ if !epsByNic.unregisterEndpoint(bindToDevice, ep) {\n+ return\n+ }\n+ delete(eps.endpoints, id)\n+}\n+\n+func (eps *transportEndpoints) transportEndpoints() []TransportEndpoint {\n+ eps.mu.RLock()\n+ defer eps.mu.RUnlock()\n+ es := make([]TransportEndpoint, 0, len(eps.endpoints))\n+ for _, e := range eps.endpoints {\n+ es = append(es, e.transportEndpoints()...)\n+ }\n+ return es\n+}\n+\ntype endpointsByNic struct {\nmu sync.RWMutex\nendpoints map[tcpip.NICID]*multiPortEndpoint\n@@ -48,6 +73,16 @@ type endpointsByNic struct {\nseed uint32\n}\n+func (epsByNic *endpointsByNic) transportEndpoints() []TransportEndpoint {\n+ epsByNic.mu.RLock()\n+ defer epsByNic.mu.RUnlock()\n+ var eps []TransportEndpoint\n+ for _, ep := range epsByNic.endpoints {\n+ eps = append(eps, ep.transportEndpoints()...)\n+ }\n+ return eps\n+}\n+\n// HandlePacket is called by the stack when new packets arrive to this transport\n// endpoint.\nfunc (epsByNic *endpointsByNic) handlePacket(r *Route, id TransportEndpointID, vv buffer.VectorisedView) {\n@@ -127,21 +162,6 @@ func (epsByNic *endpointsByNic) unregisterEndpoint(bindToDevice tcpip.NICID, t T\nreturn len(epsByNic.endpoints) == 0\n}\n-// unregisterEndpoint unregisters the endpoint with the given id such that it\n-// won't receive any more packets.\n-func (eps *transportEndpoints) unregisterEndpoint(id TransportEndpointID, ep TransportEndpoint, bindToDevice tcpip.NICID) {\n- eps.mu.Lock()\n- defer eps.mu.Unlock()\n- epsByNic, ok := eps.endpoints[id]\n- if !ok {\n- return\n- }\n- if !epsByNic.unregisterEndpoint(bindToDevice, ep) {\n- return\n- }\n- delete(eps.endpoints, id)\n-}\n-\n// transportDemuxer demultiplexes packets targeted at a transport endpoint\n// (i.e., after they've been parsed by the network layer). It does two levels\n// of demultiplexing: first based on the network and transport protocols, then\n@@ -183,14 +203,27 @@ func (d *transportDemuxer) registerEndpoint(netProtos []tcpip.NetworkProtocolNum\n// multiPortEndpoint is a container for TransportEndpoints which are bound to\n// the same pair of address and port. endpointsArr always has at least one\n// element.\n+//\n+// FIXME(gvisor.dev/issue/873): Restore this properly. Currently, we just save\n+// this to ensure that the underlying endpoints get saved/restored, but not not\n+// use the restored copy.\n+//\n+// +stateify savable\ntype multiPortEndpoint struct {\n- mu sync.RWMutex\n+ mu sync.RWMutex `state:\"nosave\"`\nendpointsArr []TransportEndpoint\nendpointsMap map[TransportEndpoint]int\n// reuse indicates if more than one endpoint is allowed.\nreuse bool\n}\n+func (ep *multiPortEndpoint) transportEndpoints() []TransportEndpoint {\n+ ep.mu.RLock()\n+ eps := append([]TransportEndpoint(nil), ep.endpointsArr...)\n+ ep.mu.RUnlock()\n+ return eps\n+}\n+\n// reciprocalScale scales a value into range [0, n).\n//\n// This is similar to val % n, but faster.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_test.go",
"new_path": "pkg/tcpip/stack/transport_test.go",
"diff": "@@ -218,8 +218,7 @@ func (f *fakeTransportEndpoint) State() uint32 {\nreturn 0\n}\n-func (f *fakeTransportEndpoint) ModerateRecvBuf(copied int) {\n-}\n+func (f *fakeTransportEndpoint) ModerateRecvBuf(copied int) {}\nfunc (f *fakeTransportEndpoint) IPTables() (iptables.IPTables, error) {\nreturn iptables.IPTables{}, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -686,7 +686,7 @@ func (e *endpoint) Close() {\n// in Listen() when trying to register.\nif e.state == StateListen && e.isPortReserved {\nif e.isRegistered {\n- e.stack.UnregisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.bindToDevice)\n+ e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.bindToDevice)\ne.isRegistered = false\n}\n@@ -747,7 +747,7 @@ func (e *endpoint) cleanupLocked() {\ne.workerCleanup = false\nif e.isRegistered {\n- e.stack.UnregisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.bindToDevice)\n+ e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.bindToDevice)\ne.isRegistered = false\n}\n@@ -757,6 +757,7 @@ func (e *endpoint) cleanupLocked() {\n}\ne.route.Release()\n+ e.stack.CompleteTransportEndpointCleanup(e)\ntcpip.DeleteDanglingEndpoint(e)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"diff": "@@ -193,8 +193,10 @@ func (e *endpoint) Resume(s *stack.Stack) {\nif len(e.BindAddr) == 0 {\ne.BindAddr = e.ID.LocalAddress\n}\n- if err := e.Bind(tcpip.FullAddress{Addr: e.BindAddr, Port: e.ID.LocalPort}); err != nil {\n- panic(\"endpoint binding failed: \" + err.String())\n+ addr := e.BindAddr\n+ port := e.ID.LocalPort\n+ if err := e.Bind(tcpip.FullAddress{Addr: addr, Port: port}); err != nil {\n+ panic(fmt.Sprintf(\"endpoint binding [%v]:%d failed: %v\", addr, port, err))\n}\n}\n@@ -265,6 +267,7 @@ func (e *endpoint) Resume(s *stack.Stack) {\n}\nfallthrough\ncase StateError:\n+ e.stack.CompleteTransportEndpointCleanup(e)\ntcpip.DeleteDanglingEndpoint(e)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add endpoint tracking to the stack.
In the future this will replace DanglingEndpoints. DanglingEndpoints must be
kept for now due to issues with save/restore.
This is arguably a cleaner design and allows the stack to know which transport
endpoints might still be using its link endpoints.
Updates #837
PiperOrigin-RevId: 277386633 |
259,854 | 29.10.2019 17:21:01 | 25,200 | dc21c5ca16dbc43755185ffdf53764c7bb4c3a12 | Add Close and Wait methods to stack.
Link endpoints still don't have a unified way to be requested to stop.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -1210,6 +1210,37 @@ func (s *Stack) RestoreCleanupEndpoints(es []TransportEndpoint) {\ns.mu.Unlock()\n}\n+// Close closes all currently registered transport endpoints.\n+//\n+// Endpoints created or modified during this call may not get closed.\n+func (s *Stack) Close() {\n+ for _, e := range s.RegisteredEndpoints() {\n+ e.Close()\n+ }\n+}\n+\n+// Wait waits for all transport and link endpoints to halt their worker\n+// goroutines.\n+//\n+// Endpoints created or modified during this call may not get waited on.\n+//\n+// Note that link endpoints must be stopped via an implementation specific\n+// mechanism.\n+func (s *Stack) Wait() {\n+ for _, e := range s.RegisteredEndpoints() {\n+ e.Wait()\n+ }\n+ for _, e := range s.CleanupEndpoints() {\n+ e.Wait()\n+ }\n+\n+ s.mu.RLock()\n+ defer s.mu.RUnlock()\n+ for _, n := range s.nics {\n+ n.linkEP.Wait()\n+ }\n+}\n+\n// Resume restarts the stack after a restore. This must be called after the\n// entire system has been restored.\nfunc (s *Stack) Resume() {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Close and Wait methods to stack.
Link endpoints still don't have a unified way to be requested to stop.
Updates #837
PiperOrigin-RevId: 277398952 |
259,974 | 30.10.2019 03:06:34 | 0 | 80d0db274ef88f4c53d2d08df52c0f9c58ca53ac | Enable runsc/fsgofer support on arm64.
newfstatat() syscall is not supported on arm64, so we resort
to use the fstatat() syscall. | [
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/BUILD",
"new_path": "runsc/fsgofer/BUILD",
"diff": "@@ -6,6 +6,8 @@ go_library(\nname = \"fsgofer\",\nsrcs = [\n\"fsgofer.go\",\n+ \"fsgofer_amd64_unsafe.go\",\n+ \"fsgofer_arm64_unsafe.go\",\n\"fsgofer_unsafe.go\",\n],\nimportpath = \"gvisor.dev/gvisor/runsc/fsgofer\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/filter/BUILD",
"new_path": "runsc/fsgofer/filter/BUILD",
"diff": "@@ -6,6 +6,8 @@ go_library(\nname = \"filter\",\nsrcs = [\n\"config.go\",\n+ \"config_amd64.go\",\n+ \"config_arm64.go\",\n\"extra_filters.go\",\n\"extra_filters_msan.go\",\n\"extra_filters_race.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/filter/config.go",
"new_path": "runsc/fsgofer/filter/config.go",
"diff": "@@ -26,10 +26,6 @@ import (\n// allowedSyscalls is the set of syscalls executed by the gofer.\nvar allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_ACCEPT: {},\n- syscall.SYS_ARCH_PRCTL: []seccomp.Rule{\n- {seccomp.AllowValue(linux.ARCH_GET_FS)},\n- {seccomp.AllowValue(linux.ARCH_SET_FS)},\n- },\nsyscall.SYS_CLOCK_GETTIME: {},\nsyscall.SYS_CLONE: []seccomp.Rule{\n{\n@@ -155,7 +151,6 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_MPROTECT: {},\nsyscall.SYS_MUNMAP: {},\nsyscall.SYS_NANOSLEEP: {},\n- syscall.SYS_NEWFSTATAT: {},\nsyscall.SYS_OPENAT: {},\nsyscall.SYS_PPOLL: {},\nsyscall.SYS_PREAD64: {},\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/config_amd64.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build amd64\n+\n+package filter\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/seccomp\"\n+)\n+\n+func init() {\n+ allowedSyscalls[syscall.SYS_ARCH_PRCTL] = []seccomp.Rule{\n+ {seccomp.AllowValue(linux.ARCH_GET_FS)},\n+ {seccomp.AllowValue(linux.ARCH_SET_FS)},\n+ }\n+\n+ allowedSyscalls[syscall.SYS_NEWFSTATAT] = []seccomp.Rule{}\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/config_arm64.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build arm64\n+\n+package filter\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.dev/gvisor/pkg/seccomp\"\n+)\n+\n+func init() {\n+ allowedSyscalls[syscall.SYS_FSTATAT] = []seccomp.Rule{}\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/fsgofer_amd64_unsafe.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build amd64\n+\n+package fsgofer\n+\n+import (\n+ \"syscall\"\n+ \"unsafe\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/syserr\"\n+)\n+\n+func statAt(dirFd int, name string) (syscall.Stat_t, error) {\n+ nameBytes, err := syscall.BytePtrFromString(name)\n+ if err != nil {\n+ return syscall.Stat_t{}, err\n+ }\n+ namePtr := unsafe.Pointer(nameBytes)\n+\n+ var stat syscall.Stat_t\n+ statPtr := unsafe.Pointer(&stat)\n+\n+ if _, _, errno := syscall.Syscall6(\n+ syscall.SYS_NEWFSTATAT,\n+ uintptr(dirFd),\n+ uintptr(namePtr),\n+ uintptr(statPtr),\n+ linux.AT_SYMLINK_NOFOLLOW,\n+ 0,\n+ 0); errno != 0 {\n+\n+ return syscall.Stat_t{}, syserr.FromHost(errno).ToError()\n+ }\n+ return stat, nil\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/fsgofer_arm64_unsafe.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build arm64\n+\n+package fsgofer\n+\n+import (\n+ \"syscall\"\n+ \"unsafe\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/syserr\"\n+)\n+\n+func statAt(dirFd int, name string) (syscall.Stat_t, error) {\n+ nameBytes, err := syscall.BytePtrFromString(name)\n+ if err != nil {\n+ return syscall.Stat_t{}, err\n+ }\n+ namePtr := unsafe.Pointer(nameBytes)\n+\n+ var stat syscall.Stat_t\n+ statPtr := unsafe.Pointer(&stat)\n+\n+ if _, _, errno := syscall.Syscall6(\n+ syscall.SYS_FSTATAT,\n+ uintptr(dirFd),\n+ uintptr(namePtr),\n+ uintptr(statPtr),\n+ linux.AT_SYMLINK_NOFOLLOW,\n+ 0,\n+ 0); errno != 0 {\n+\n+ return syscall.Stat_t{}, syserr.FromHost(errno).ToError()\n+ }\n+ return stat, nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer_unsafe.go",
"new_path": "runsc/fsgofer/fsgofer_unsafe.go",
"diff": "@@ -18,34 +18,9 @@ import (\n\"syscall\"\n\"unsafe\"\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n)\n-func statAt(dirFd int, name string) (syscall.Stat_t, error) {\n- nameBytes, err := syscall.BytePtrFromString(name)\n- if err != nil {\n- return syscall.Stat_t{}, err\n- }\n- namePtr := unsafe.Pointer(nameBytes)\n-\n- var stat syscall.Stat_t\n- statPtr := unsafe.Pointer(&stat)\n-\n- if _, _, errno := syscall.Syscall6(\n- syscall.SYS_NEWFSTATAT,\n- uintptr(dirFd),\n- uintptr(namePtr),\n- uintptr(statPtr),\n- linux.AT_SYMLINK_NOFOLLOW,\n- 0,\n- 0); errno != 0 {\n-\n- return syscall.Stat_t{}, syserr.FromHost(errno).ToError()\n- }\n- return stat, nil\n-}\n-\nfunc utimensat(dirFd int, name string, times [2]syscall.Timespec, flags int) error {\n// utimensat(2) doesn't accept empty name, instead name must be nil to make it\n// operate directly on 'dirFd' unlike other *at syscalls.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable runsc/fsgofer support on arm64.
newfstatat() syscall is not supported on arm64, so we resort
to use the fstatat() syscall.
Signed-off-by: Haibo Xu <[email protected]>
Change-Id: I9e89d46c5ec9ae07db201c9da5b6dda9bfd2eaf0 |
259,860 | 30.10.2019 13:29:56 | 25,200 | 8bc7b8dba2dcc339ab5bd1b05c83f74a6211a7d0 | Clean up typos in test names. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"new_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"diff": "namespace gvisor {\nnamespace testing {\n-TEST_P(TCPSocketPairTest, TcpInfoSucceedes) {\n+TEST_P(TCPSocketPairTest, TcpInfoSucceeds) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nstruct tcp_info opt = {};\n@@ -39,7 +39,7 @@ TEST_P(TCPSocketPairTest, TcpInfoSucceedes) {\nSyscallSucceeds());\n}\n-TEST_P(TCPSocketPairTest, ShortTcpInfoSucceedes) {\n+TEST_P(TCPSocketPairTest, ShortTcpInfoSucceeds) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nstruct tcp_info opt = {};\n@@ -48,7 +48,7 @@ TEST_P(TCPSocketPairTest, ShortTcpInfoSucceedes) {\nSyscallSucceeds());\n}\n-TEST_P(TCPSocketPairTest, ZeroTcpInfoSucceedes) {\n+TEST_P(TCPSocketPairTest, ZeroTcpInfoSucceeds) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nstruct tcp_info opt = {};\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clean up typos in test names.
PiperOrigin-RevId: 277572791 |
259,853 | 30.10.2019 15:32:20 | 25,200 | db37483cb6acf55b66132d534bb734f09555b1cf | Store endpoints inside multiPortEndpoint in a sorted order
It is required to guarantee the same order of endpoints after save/restore. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -60,6 +60,9 @@ const (\n// TransportEndpoint is the interface that needs to be implemented by transport\n// protocol (e.g., tcp, udp) endpoints that can handle packets.\ntype TransportEndpoint interface {\n+ // UniqueID returns an unique ID for this transport endpoint.\n+ UniqueID() uint64\n+\n// HandlePacket is called by the stack when new packets arrive to\n// this transport endpoint.\nHandlePacket(r *Route, id TransportEndpointID, vv buffer.VectorisedView)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -22,6 +22,7 @@ package stack\nimport (\n\"encoding/binary\"\n\"sync\"\n+ \"sync/atomic\"\n\"time\"\n\"golang.org/x/time/rate\"\n@@ -344,6 +345,13 @@ type ResumableEndpoint interface {\nResume(*Stack)\n}\n+// uniqueIDGenerator is a default unique ID generator.\n+type uniqueIDGenerator uint64\n+\n+func (u *uniqueIDGenerator) UniqueID() uint64 {\n+ return atomic.AddUint64((*uint64)(u), 1)\n+}\n+\n// Stack is a networking stack, with all supported protocols, NICs, and route\n// table.\ntype Stack struct {\n@@ -411,6 +419,14 @@ type Stack struct {\n// ndpDisp is the NDP event dispatcher that is used to send the netstack\n// integrator NDP related events.\nndpDisp NDPDispatcher\n+\n+ // uniqueIDGenerator is a generator of unique identifiers.\n+ uniqueIDGenerator UniqueID\n+}\n+\n+// UniqueID is an abstract generator of unique identifiers.\n+type UniqueID interface {\n+ UniqueID() uint64\n}\n// Options contains optional Stack configuration.\n@@ -434,6 +450,9 @@ type Options struct {\n// stack (false).\nHandleLocal bool\n+ // UniqueID is an optional generator of unique identifiers.\n+ UniqueID UniqueID\n+\n// NDPConfigs is the default NDP configurations used by interfaces.\n//\n// By default, NDPConfigs will have a zero value for its\n@@ -506,6 +525,10 @@ func New(opts Options) *Stack {\nclock = &tcpip.StdClock{}\n}\n+ if opts.UniqueID == nil {\n+ opts.UniqueID = new(uniqueIDGenerator)\n+ }\n+\n// Make sure opts.NDPConfigs contains valid values only.\nopts.NDPConfigs.validate()\n@@ -524,6 +547,7 @@ func New(opts Options) *Stack {\nportSeed: generateRandUint32(),\nndpConfigs: opts.NDPConfigs,\nautoGenIPv6LinkLocal: opts.AutoGenIPv6LinkLocal,\n+ uniqueIDGenerator: opts.UniqueID,\nndpDisp: opts.NDPDisp,\n}\n@@ -551,6 +575,11 @@ func New(opts Options) *Stack {\nreturn s\n}\n+// UniqueID returns a unique identifier.\n+func (s *Stack) UniqueID() uint64 {\n+ return s.uniqueIDGenerator.UniqueID()\n+}\n+\n// SetNetworkProtocolOption allows configuring individual protocol level\n// options. This method returns an error if the protocol is not supported or\n// option is not supported by the protocol implementation or the provided value\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_demuxer.go",
"new_path": "pkg/tcpip/stack/transport_demuxer.go",
"diff": "@@ -17,6 +17,7 @@ package stack\nimport (\n\"fmt\"\n\"math/rand\"\n+ \"sort\"\n\"sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -310,6 +311,15 @@ func (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint, reusePo\n// endpointsMap. This will allow us to remove endpoint from the array fast.\nep.endpointsMap[t] = len(ep.endpointsArr)\nep.endpointsArr = append(ep.endpointsArr, t)\n+\n+ // ep.endpointsArr is sorted by endpoint unique IDs, so that endpoints\n+ // can be restored in the same order.\n+ sort.Slice(ep.endpointsArr, func(i, j int) bool {\n+ return ep.endpointsArr[i].UniqueID() < ep.endpointsArr[j].UniqueID()\n+ })\n+ for i, e := range ep.endpointsArr {\n+ ep.endpointsMap[e] = i\n+ }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_test.go",
"new_path": "pkg/tcpip/stack/transport_test.go",
"diff": "@@ -43,6 +43,7 @@ type fakeTransportEndpoint struct {\nproto *fakeTransportProtocol\npeerAddr tcpip.Address\nroute stack.Route\n+ uniqueID uint64\n// acceptQueue is non-nil iff bound.\nacceptQueue []fakeTransportEndpoint\n@@ -56,8 +57,8 @@ func (f *fakeTransportEndpoint) Stats() tcpip.EndpointStats {\nreturn nil\n}\n-func newFakeTransportEndpoint(s *stack.Stack, proto *fakeTransportProtocol, netProto tcpip.NetworkProtocolNumber) tcpip.Endpoint {\n- return &fakeTransportEndpoint{stack: s, TransportEndpointInfo: stack.TransportEndpointInfo{NetProto: netProto}, proto: proto}\n+func newFakeTransportEndpoint(s *stack.Stack, proto *fakeTransportProtocol, netProto tcpip.NetworkProtocolNumber, uniqueID uint64) tcpip.Endpoint {\n+ return &fakeTransportEndpoint{stack: s, TransportEndpointInfo: stack.TransportEndpointInfo{NetProto: netProto}, proto: proto, uniqueID: uniqueID}\n}\nfunc (f *fakeTransportEndpoint) Close() {\n@@ -144,6 +145,10 @@ func (f *fakeTransportEndpoint) Connect(addr tcpip.FullAddress) *tcpip.Error {\nreturn nil\n}\n+func (f *fakeTransportEndpoint) UniqueID() uint64 {\n+ return f.uniqueID\n+}\n+\nfunc (f *fakeTransportEndpoint) ConnectEndpoint(e tcpip.Endpoint) *tcpip.Error {\nreturn nil\n}\n@@ -251,7 +256,7 @@ func (*fakeTransportProtocol) Number() tcpip.TransportProtocolNumber {\n}\nfunc (f *fakeTransportProtocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, _ *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n- return newFakeTransportEndpoint(stack, f, netProto), nil\n+ return newFakeTransportEndpoint(stack, f, netProto, stack.UniqueID()), nil\n}\nfunc (f *fakeTransportProtocol) NewRawEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, _ *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -58,6 +58,7 @@ type endpoint struct {\n// immutable.\nstack *stack.Stack `state:\"manual\"`\nwaiterQueue *waiter.Queue\n+ uniqueID uint64\n// The following fields are used to manage the receive queue, and are\n// protected by rcvMu.\n@@ -90,9 +91,15 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProt\nrcvBufSizeMax: 32 * 1024,\nsndBufSize: 32 * 1024,\nstate: stateInitial,\n+ uniqueID: s.UniqueID(),\n}, nil\n}\n+// UniqueID implements stack.TransportEndpoint.UniqueID.\n+func (e *endpoint) UniqueID() uint64 {\n+ return e.uniqueID\n+}\n+\n// Close puts the endpoint in a closed state and frees all resources\n// associated with it.\nfunc (e *endpoint) Close() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -287,6 +287,7 @@ type endpoint struct {\n// change throughout the lifetime of the endpoint.\nstack *stack.Stack `state:\"manual\"`\nwaiterQueue *waiter.Queue `state:\"wait\"`\n+ uniqueID uint64\n// lastError represents the last error that the endpoint reported;\n// access to it is protected by the following mutex.\n@@ -504,6 +505,11 @@ type endpoint struct {\nstats Stats `state:\"nosave\"`\n}\n+// UniqueID implements stack.TransportEndpoint.UniqueID.\n+func (e *endpoint) UniqueID() uint64 {\n+ return e.uniqueID\n+}\n+\n// calculateAdvertisedMSS calculates the MSS to advertise.\n//\n// If userMSS is non-zero and is not greater than the maximum possible MSS for\n@@ -565,6 +571,7 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQue\ninterval: 75 * time.Second,\ncount: 9,\n},\n+ uniqueID: s.UniqueID(),\n}\nvar ss SendBufferSizeOption\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -80,6 +80,7 @@ type endpoint struct {\n// change throughout the lifetime of the endpoint.\nstack *stack.Stack `state:\"manual\"`\nwaiterQueue *waiter.Queue\n+ uniqueID uint64\n// The following fields are used to manage the receive queue, and are\n// protected by rcvMu.\n@@ -160,9 +161,15 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQue\nrcvBufSizeMax: 32 * 1024,\nsndBufSize: 32 * 1024,\nstate: StateInitial,\n+ uniqueID: s.UniqueID(),\n}\n}\n+// UniqueID implements stack.TransportEndpoint.UniqueID.\n+func (e *endpoint) UniqueID() uint64 {\n+ return e.uniqueID\n+}\n+\n// Close puts the endpoint in a closed state and frees all resources\n// associated with it.\nfunc (e *endpoint) Close() {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -232,7 +232,7 @@ func New(args Args) (*Loader, error) {\n// this point. Netns is configured before Run() is called. Netstack is\n// configured using a control uRPC message. Host network is configured inside\n// Run().\n- networkStack, err := newEmptyNetworkStack(args.Conf, k)\n+ networkStack, err := newEmptyNetworkStack(args.Conf, k, k)\nif err != nil {\nreturn nil, fmt.Errorf(\"creating network: %v\", err)\n}\n@@ -905,7 +905,7 @@ func (l *Loader) WaitExit() kernel.ExitStatus {\nreturn l.k.GlobalInit().ExitStatus()\n}\n-func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\n+func newEmptyNetworkStack(conf *Config, clock tcpip.Clock, uniqueID stack.UniqueID) (inet.Stack, error) {\nswitch conf.Network {\ncase NetworkHost:\nreturn hostinet.NewStack(), nil\n@@ -923,6 +923,7 @@ func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\n// Enable raw sockets for users with sufficient\n// privileges.\nRawFactory: raw.EndpointFactory{},\n+ UniqueID: uniqueID,\n})}\n// Enable SACK Recovery.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_inet_loopback.cc",
"new_path": "test/syscalls/linux/socket_inet_loopback.cc",
"diff": "#include <netinet/in.h>\n#include <poll.h>\n#include <string.h>\n+#include <sys/epoll.h>\n#include <sys/socket.h>\n#include <atomic>\n@@ -516,6 +517,112 @@ TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThread) {\nEquivalentWithin((kConnectAttempts / kThreadCount), 0.10));\n}\n+TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThreadShort) {\n+ auto const& param = GetParam();\n+\n+ TestAddress const& listener = param.listener;\n+ TestAddress const& connector = param.connector;\n+ sockaddr_storage listen_addr = listener.addr;\n+ sockaddr_storage conn_addr = connector.addr;\n+ constexpr int kThreadCount = 3;\n+\n+ // TODO(b/141211329): endpointsByNic.seed has to be saved/restored.\n+ const DisableSave ds141211329;\n+\n+ // Create listening sockets.\n+ FileDescriptor listener_fds[kThreadCount];\n+ for (int i = 0; i < kThreadCount; i++) {\n+ listener_fds[i] =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(listener.family(), SOCK_DGRAM, 0));\n+ int fd = listener_fds[i].get();\n+\n+ ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(\n+ bind(fd, reinterpret_cast<sockaddr*>(&listen_addr), listener.addr_len),\n+ SyscallSucceeds());\n+\n+ // On the first bind we need to determine which port was bound.\n+ if (i != 0) {\n+ continue;\n+ }\n+\n+ // Get the port bound by the listening socket.\n+ socklen_t addrlen = listener.addr_len;\n+ ASSERT_THAT(\n+ getsockname(listener_fds[0].get(),\n+ reinterpret_cast<sockaddr*>(&listen_addr), &addrlen),\n+ SyscallSucceeds());\n+ uint16_t const port =\n+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));\n+ ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port));\n+ ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));\n+ }\n+\n+ constexpr int kConnectAttempts = 10;\n+ FileDescriptor client_fds[kConnectAttempts];\n+\n+ // Do the first run without save/restore.\n+ DisableSave ds;\n+ for (int i = 0; i < kConnectAttempts; i++) {\n+ client_fds[i] =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(connector.family(), SOCK_DGRAM, 0));\n+ EXPECT_THAT(RetryEINTR(sendto)(client_fds[i].get(), &i, sizeof(i), 0,\n+ reinterpret_cast<sockaddr*>(&conn_addr),\n+ connector.addr_len),\n+ SyscallSucceedsWithValue(sizeof(i)));\n+ }\n+ ds.reset();\n+\n+ // Check that a mapping of client and server sockets has\n+ // not been change after save/restore.\n+ for (int i = 0; i < kConnectAttempts; i++) {\n+ EXPECT_THAT(RetryEINTR(sendto)(client_fds[i].get(), &i, sizeof(i), 0,\n+ reinterpret_cast<sockaddr*>(&conn_addr),\n+ connector.addr_len),\n+ SyscallSucceedsWithValue(sizeof(i)));\n+ }\n+\n+ int epollfd;\n+ ASSERT_THAT(epollfd = epoll_create1(0), SyscallSucceeds());\n+\n+ for (int i = 0; i < kThreadCount; i++) {\n+ int fd = listener_fds[i].get();\n+ struct epoll_event ev;\n+ ev.data.fd = fd;\n+ ev.events = EPOLLIN;\n+ ASSERT_THAT(epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev), SyscallSucceeds());\n+ }\n+\n+ std::map<uint16_t, int> portToFD;\n+\n+ for (int i = 0; i < kConnectAttempts * 2; i++) {\n+ struct sockaddr_storage addr = {};\n+ socklen_t addrlen = sizeof(addr);\n+ struct epoll_event ev;\n+ int data, fd;\n+\n+ ASSERT_THAT(epoll_wait(epollfd, &ev, 1, -1), SyscallSucceedsWithValue(1));\n+\n+ fd = ev.data.fd;\n+ EXPECT_THAT(RetryEINTR(recvfrom)(fd, &data, sizeof(data), 0,\n+ reinterpret_cast<struct sockaddr*>(&addr),\n+ &addrlen),\n+ SyscallSucceedsWithValue(sizeof(data)));\n+ uint16_t const port =\n+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(connector.family(), addr));\n+ auto prev_port = portToFD.find(port);\n+ // Check that all packets from one client have been delivered to the same\n+ // server socket.\n+ if (prev_port == portToFD.end()) {\n+ portToFD[port] = ev.data.fd;\n+ } else {\n+ EXPECT_EQ(portToFD[port], ev.data.fd);\n+ }\n+ }\n+}\n+\nINSTANTIATE_TEST_SUITE_P(\nAll, SocketInetReusePortTest,\n::testing::Values(\n"
}
] | Go | Apache License 2.0 | google/gvisor | Store endpoints inside multiPortEndpoint in a sorted order
It is required to guarantee the same order of endpoints after save/restore.
PiperOrigin-RevId: 277598665 |
259,847 | 30.10.2019 16:14:30 | 25,200 | df125c986948fbbae2bc30de33213e2095762a86 | Add Kokoro config for new runtime tests | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "kokoro/runtime_tests.cfg",
"diff": "+build_file: \"repo/scripts/runtime_tests.sh\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/runtime_tests.sh",
"diff": "+#!/bin/bash\n+\n+# Copyright 2019 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+source $(dirname $0)/common.sh\n+\n+if [ ! -v RUNTIME ]; then\n+ echo 'Must set $RUNTIME' >&2\n+ exit 1\n+fi\n+\n+install_runsc_for_test runtimes\n+test_runsc \"//test/runtimes:${RUNTIME}_test\"\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/BUILD",
"new_path": "test/runtimes/BUILD",
"diff": "@@ -16,32 +16,32 @@ go_binary(\n)\nruntime_test(\n+ name = \"go1.12\",\nblacklist_file = \"blacklist_go1.12.csv\",\n- image = \"gcr.io/gvisor-presubmit/go1.12\",\nlang = \"go\",\n)\nruntime_test(\n+ name = \"java11\",\nblacklist_file = \"blacklist_java11.csv\",\n- image = \"gcr.io/gvisor-presubmit/java11\",\nlang = \"java\",\n)\nruntime_test(\n+ name = \"nodejs12.4.0\",\nblacklist_file = \"blacklist_nodejs12.4.0.csv\",\n- image = \"gcr.io/gvisor-presubmit/nodejs12.4.0\",\nlang = \"nodejs\",\n)\nruntime_test(\n+ name = \"php7.3.6\",\nblacklist_file = \"blacklist_php7.3.6.csv\",\n- image = \"gcr.io/gvisor-presubmit/php7.3.6\",\nlang = \"php\",\n)\nruntime_test(\n+ name = \"python3.7.3\",\nblacklist_file = \"blacklist_python3.7.3.csv\",\n- image = \"gcr.io/gvisor-presubmit/python3.7.3\",\nlang = \"python\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/build_defs.bzl",
"new_path": "test/runtimes/build_defs.bzl",
"diff": "load(\"@io_bazel_rules_go//go:def.bzl\", \"go_test\")\n-# runtime_test is a macro that will create targets to run the given test target\n-# with different runtime options.\ndef runtime_test(\n+ name,\nlang,\n- image,\n+ image_repo = \"gcr.io/gvisor-presubmit\",\n+ image_name = None,\n+ blacklist_file = None,\nshard_count = 50,\n- size = \"enormous\",\n- blacklist_file = \"\"):\n+ size = \"enormous\"):\n+ \"\"\"Generates sh_test and blacklist test targets for a given runtime.\n+\n+ Args:\n+ name: The name of the runtime being tested. Typically, the lang + version.\n+ This is used in the names of the generated test targets.\n+ lang: The language being tested.\n+ image_repo: The docker repository containing the proctor image to run.\n+ i.e., the prefix to the fully qualified docker image id.\n+ image_name: The name of the image in the image_repo.\n+ Defaults to the test name.\n+ blacklist_file: A test blacklist to pass to the runtime test's runner.\n+ shard_count: See Bazel common test attributes.\n+ size: See Bazel common test attributes.\n+ \"\"\"\n+ if image_name == None:\n+ image_name = name\nargs = [\n\"--lang\",\nlang,\n\"--image\",\n- image,\n+ \"/\".join([image_repo, image_name]),\n]\ndata = [\n\":runner\",\n]\n- if blacklist_file != \"\":\n+ if blacklist_file:\nargs += [\"--blacklist_file\", \"test/runtimes/\" + blacklist_file]\ndata += [blacklist_file]\n# Add a test that the blacklist parses correctly.\n- blacklist_test(lang, blacklist_file)\n+ blacklist_test(name, blacklist_file)\nsh_test(\n- name = lang + \"_test\",\n+ name = name + \"_test\",\nsrcs = [\"runner.sh\"],\nargs = args,\ndata = data,\n@@ -35,15 +51,14 @@ def runtime_test(\nshard_count = shard_count,\ntags = [\n# Requires docker and runsc to be configured before the test runs.\n- \"manual\",\n\"local\",\n],\n)\n-def blacklist_test(lang, blacklist_file):\n+def blacklist_test(name, blacklist_file):\n\"\"\"Test that a blacklist parses correctly.\"\"\"\ngo_test(\n- name = lang + \"_blacklist_test\",\n+ name = name + \"_blacklist_test\",\nembed = [\":runner\"],\nsrcs = [\"blacklist_test.go\"],\nargs = [\"--blacklist_file\", \"test/runtimes/\" + blacklist_file],\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Kokoro config for new runtime tests
PiperOrigin-RevId: 277607217 |
259,891 | 30.10.2019 17:00:29 | 25,200 | 3246040447c6d0a08cc12c5721480c06f77f5dfe | Deep copy dispatcher views.
When VectorisedViews were passed up the stack from packet_dispatchers, we were
passing a sub-slice of the dispatcher's views fields. The dispatchers then
immediately set those views to nil.
This wasn't caught before because every implementer copied the data in these
views before returning. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"diff": "@@ -45,7 +45,7 @@ const (\ntype packetInfo struct {\nraddr tcpip.LinkAddress\nproto tcpip.NetworkProtocolNumber\n- contents buffer.View\n+ contents buffer.VectorisedView\nlinkHeader buffer.View\n}\n@@ -94,7 +94,7 @@ func (c *context) cleanup() {\n}\nfunc (c *context) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remote tcpip.LinkAddress, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView, linkHeader buffer.View) {\n- c.ch <- packetInfo{remote, protocol, vv.ToView(), linkHeader}\n+ c.ch <- packetInfo{remote, protocol, vv, linkHeader}\n}\nfunc TestNoEthernetProperties(t *testing.T) {\n@@ -319,13 +319,17 @@ func TestDeliverPacket(t *testing.T) {\nwant := packetInfo{\nraddr: raddr,\nproto: proto,\n- contents: b,\n+ contents: buffer.View(b).ToVectorisedView(),\nlinkHeader: buffer.View(hdr),\n}\nif !eth {\nwant.proto = header.IPv4ProtocolNumber\nwant.raddr = \"\"\n}\n+ // want.contents will be a single view,\n+ // so make pi do the same for the\n+ // DeepEqual check.\n+ pi.contents = pi.contents.ToView().ToVectorisedView()\nif !reflect.DeepEqual(want, pi) {\nt.Fatalf(\"Unexpected received packet: %+v, want %+v\", pi, want)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/packet_dispatchers.go",
"new_path": "pkg/tcpip/link/fdbased/packet_dispatchers.go",
"diff": "@@ -139,7 +139,7 @@ func (d *readVDispatcher) dispatch() (bool, *tcpip.Error) {\n}\nused := d.capViews(n, BufConfig)\n- vv := buffer.NewVectorisedView(n, d.views[:used])\n+ vv := buffer.NewVectorisedView(n, append([]buffer.View(nil), d.views[:used]...))\nvv.TrimFront(d.e.hdrSize)\nd.e.dispatcher.DeliverNetworkPacket(d.e, remote, local, p, vv, buffer.View(eth))\n@@ -293,7 +293,7 @@ func (d *recvMMsgDispatcher) dispatch() (bool, *tcpip.Error) {\n}\nused := d.capViews(k, int(n), BufConfig)\n- vv := buffer.NewVectorisedView(int(n), d.views[k][:used])\n+ vv := buffer.NewVectorisedView(int(n), append([]buffer.View(nil), d.views[k][:used]...))\nvv.TrimFront(d.e.hdrSize)\nd.e.dispatcher.DeliverNetworkPacket(d.e, remote, local, p, vv, buffer.View(eth))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -762,7 +762,7 @@ func (n *NIC) DeliverNetworkPacket(linkEP LinkEndpoint, remote, local tcpip.Link\n}\nn.mu.RUnlock()\nfor _, ep := range packetEPs {\n- ep.HandlePacket(n.id, local, protocol, vv, linkHeader)\n+ ep.HandlePacket(n.id, local, protocol, vv.Clone(nil), linkHeader)\n}\nif netProto.Number() == header.IPv4ProtocolNumber || netProto.Number() == header.IPv6ProtocolNumber {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -65,10 +65,14 @@ type TransportEndpoint interface {\n// HandlePacket is called by the stack when new packets arrive to\n// this transport endpoint.\n+ //\n+ // HandlePacket takes ownership of vv.\nHandlePacket(r *Route, id TransportEndpointID, vv buffer.VectorisedView)\n// HandleControlPacket is called by the stack when new control (e.g.,\n// ICMP) packets arrive to this transport endpoint.\n+ //\n+ // HandleControlPacket takes ownership of vv.\nHandleControlPacket(id TransportEndpointID, typ ControlType, extra uint32, vv buffer.VectorisedView)\n// Close puts the endpoint in a closed state and frees all resources\n@@ -94,6 +98,8 @@ type RawTransportEndpoint interface {\n// HandlePacket is called by the stack when new packets arrive to\n// this transport endpoint. The packet contains all data from the link\n// layer up.\n+ //\n+ // HandlePacket takes ownership of packet and netHeader.\nHandlePacket(r *Route, netHeader buffer.View, packet buffer.VectorisedView)\n}\n@@ -110,6 +116,8 @@ type PacketEndpoint interface {\n//\n// linkHeader may have a length of 0, in which case the PacketEndpoint\n// should construct its own ethernet header for applications.\n+ //\n+ // HandlePacket takes ownership of packet and linkHeader.\nHandlePacket(nicid tcpip.NICID, addr tcpip.LinkAddress, netProto tcpip.NetworkProtocolNumber, packet buffer.VectorisedView, linkHeader buffer.View)\n}\n@@ -160,10 +168,14 @@ type TransportDispatcher interface {\n// DeliverTransportPacket delivers packets to the appropriate\n// transport protocol endpoint. It also returns the network layer\n// header for the enpoint to inspect or pass up the stack.\n+ //\n+ // DeliverTransportPacket takes ownership of vv and netHeader.\nDeliverTransportPacket(r *Route, protocol tcpip.TransportProtocolNumber, netHeader buffer.View, vv buffer.VectorisedView)\n// DeliverTransportControlPacket delivers control packets to the\n// appropriate transport protocol endpoint.\n+ //\n+ // DeliverTransportControlPacket takes ownership of vv.\nDeliverTransportControlPacket(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, typ ControlType, extra uint32, vv buffer.VectorisedView)\n}\n@@ -237,6 +249,8 @@ type NetworkEndpoint interface {\n// HandlePacket is called by the link layer when new packets arrive to\n// this network endpoint.\n+ //\n+ // HandlePacket takes ownership of vv.\nHandlePacket(r *Route, vv buffer.VectorisedView)\n// Close is called when the endpoint is reomved from a stack.\n@@ -282,6 +296,8 @@ type NetworkDispatcher interface {\n// DeliverNetworkPacket finds the appropriate network protocol endpoint\n// and hands the packet over for further processing. linkHeader may have\n// length 0 when the caller does not have ethernet data.\n+ //\n+ // DeliverNetworkPacket takes ownership of vv and linkHeader.\nDeliverNetworkPacket(linkEP LinkEndpoint, remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView, linkHeader buffer.View)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -31,9 +31,6 @@ type icmpPacket struct {\nsenderAddress tcpip.FullAddress\ndata buffer.VectorisedView `state:\".(buffer.VectorisedView)\"`\ntimestamp int64\n- // views is used as buffer for data when its length is large\n- // enough to store a VectorisedView.\n- views [8]buffer.View `state:\"nosave\"`\n}\ntype endpointState int\n@@ -767,7 +764,7 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv\n},\n}\n- pkt.data = vv.Clone(pkt.views[:])\n+ pkt.data = vv\ne.rcvList.PushBack(pkt)\ne.rcvBufSize += pkt.data.Size()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint.go",
"new_path": "pkg/tcpip/transport/packet/endpoint.go",
"diff": "@@ -41,10 +41,6 @@ type packet struct {\n// data holds the actual packet data, including any headers and\n// payload.\ndata buffer.VectorisedView `state:\".(buffer.VectorisedView)\"`\n- // views is pre-allocated space to back data. As long as the packet is\n- // made up of fewer than 8 buffer.Views, no extra allocation is\n- // necessary to store packet data.\n- views [8]buffer.View `state:\"nosave\"`\n// timestampNS is the unix time at which the packet was received.\ntimestampNS int64\n// senderAddr is the network address of the sender.\n@@ -310,7 +306,7 @@ func (ep *endpoint) HandlePacket(nicid tcpip.NICID, localAddr tcpip.LinkAddress,\nif ep.cooked {\n// Cooked packets can simply be queued.\n- packet.data = vv.Clone(packet.views[:])\n+ packet.data = vv\n} else {\n// Raw packets need their ethernet headers prepended before\n// queueing.\n@@ -328,7 +324,7 @@ func (ep *endpoint) HandlePacket(nicid tcpip.NICID, localAddr tcpip.LinkAddress,\n}\ncombinedVV := buffer.View(ethHeader).ToVectorisedView()\ncombinedVV.Append(vv)\n- packet.data = combinedVV.Clone(packet.views[:])\n+ packet.data = combinedVV\n}\npacket.timestampNS = ep.stack.NowNanoseconds()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -42,10 +42,6 @@ type rawPacket struct {\n// data holds the actual packet data, including any headers and\n// payload.\ndata buffer.VectorisedView `state:\".(buffer.VectorisedView)\"`\n- // views is pre-allocated space to back data. As long as the packet is\n- // made up of fewer than 8 buffer.Views, no extra allocation is\n- // necessary to store packet data.\n- views [8]buffer.View `state:\"nosave\"`\n// timestampNS is the unix time at which the packet was received.\ntimestampNS int64\n// senderAddr is the network address of the sender.\n@@ -609,7 +605,7 @@ func (e *endpoint) HandlePacket(route *stack.Route, netHeader buffer.View, vv bu\ncombinedVV := netHeader.ToVectorisedView()\ncombinedVV.Append(vv)\n- pkt.data = combinedVV.Clone(pkt.views[:])\n+ pkt.data = combinedVV\npkt.timestampNS = e.stack.NowNanoseconds()\ne.rcvList.PushBack(pkt)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -31,9 +31,6 @@ type udpPacket struct {\nsenderAddress tcpip.FullAddress\ndata buffer.VectorisedView `state:\".(buffer.VectorisedView)\"`\ntimestamp int64\n- // views is used as buffer for data when its length is large\n- // enough to store a VectorisedView.\n- views [8]buffer.View `state:\"nosave\"`\n}\n// EndpointState represents the state of a UDP endpoint.\n@@ -1202,7 +1199,7 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv\nPort: hdr.SourcePort(),\n},\n}\n- pkt.data = vv.Clone(pkt.views[:])\n+ pkt.data = vv\ne.rcvList.PushBack(pkt)\ne.rcvBufSize += vv.Size()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deep copy dispatcher views.
When VectorisedViews were passed up the stack from packet_dispatchers, we were
passing a sub-slice of the dispatcher's views fields. The dispatchers then
immediately set those views to nil.
This wasn't caught before because every implementer copied the data in these
views before returning.
PiperOrigin-RevId: 277615351 |
259,847 | 31.10.2019 11:25:19 | 25,200 | 7dcfcd53e4f3f0e1384ac42eacf2622a57d1b37c | Fix overloaded use of $RUNTIME.
Turns out we use $RUNTIME in scripts/common.sh to give a name to the runsc
runtime used by the tests. | [
{
"change_type": "MODIFY",
"old_path": "scripts/runtime_tests.sh",
"new_path": "scripts/runtime_tests.sh",
"diff": "source $(dirname $0)/common.sh\n-if [ ! -v RUNTIME ]; then\n- echo 'Must set $RUNTIME' >&2\n+if [ ! -v RUNTIME_TEST_NAME ]; then\n+ echo 'Must set $RUNTIME_TEST_NAME' >&2\nexit 1\nfi\ninstall_runsc_for_test runtimes\n-test_runsc \"//test/runtimes:${RUNTIME}_test\"\n+test_runsc \"//test/runtimes:${RUNTIME_TEST_NAME}_test\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix overloaded use of $RUNTIME.
Turns out we use $RUNTIME in scripts/common.sh to give a name to the runsc
runtime used by the tests.
PiperOrigin-RevId: 277764383 |
259,853 | 31.10.2019 12:27:46 | 25,200 | f7dbddaf77a6059c2f5a441d068a39219fe593bd | platform/kvm: calll sigtimedwait with zero timeout
sigtimedwait is used to check pending signals and
it should not block. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"diff": "@@ -80,13 +80,17 @@ func bluepillHandler(context unsafe.Pointer) {\n// interrupted KVM. Since we're in a signal handler\n// currently, all signals are masked and the signal\n// must have been delivered directly to this thread.\n+ timeout := syscall.Timespec{}\nsig, _, errno := syscall.RawSyscall6(\nsyscall.SYS_RT_SIGTIMEDWAIT,\nuintptr(unsafe.Pointer(&bounceSignalMask)),\n0, // siginfo.\n- 0, // timeout.\n+ uintptr(unsafe.Pointer(&timeout)), // timeout.\n8, // sigset size.\n0, 0)\n+ if errno == syscall.EAGAIN {\n+ continue\n+ }\nif errno != 0 {\nthrow(\"error waiting for pending signal\")\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/kvm: calll sigtimedwait with zero timeout
sigtimedwait is used to check pending signals and
it should not block.
PiperOrigin-RevId: 277777269 |
259,858 | 31.10.2019 12:51:50 | 25,200 | fe2e0764ac600fe19a3d87069a58d7463a5223ab | Add LICENSE and AUTHORS to the go branch.
Also, construct the README directly so that edits can be made. | [
{
"change_type": "MODIFY",
"old_path": "tools/go_branch.sh",
"new_path": "tools/go_branch.sh",
"diff": "set -eo pipefail\n# Discovery the package name from the go.mod file.\n-declare -r gomod=\"$(pwd)/go.mod\"\n-declare -r module=$(cat \"${gomod}\" | grep -E \"^module\" | cut -d' ' -f2)\n-declare -r gosum=\"$(pwd)/go.sum\"\n+declare -r module=$(cat go.mod | grep -E \"^module\" | cut -d' ' -f2)\n+declare -r origpwd=$(pwd)\n+declare -r othersrc=(\"go.mod\" \"go.sum\" \"AUTHORS\" \"LICENSE\")\n# Check that gopath has been built.\ndeclare -r gopath_dir=\"$(pwd)/bazel-bin/gopath/src/${module}\"\n@@ -65,10 +65,22 @@ git checkout -b go \"${go_branch}\"\ngit merge --no-commit --strategy ours ${head} || \\\ngit merge --allow-unrelated-histories --no-commit --strategy ours ${head}\n-# Sync the entire gopath_dir and go.mod.\n-rsync --recursive --verbose --delete --exclude .git --exclude README.md -L \"${gopath_dir}/\" .\n-cp \"${gomod}\" .\n-cp \"${gosum}\" .\n+# Sync the entire gopath_dir.\n+rsync --recursive --verbose --delete --exclude .git -L \"${gopath_dir}/\" .\n+\n+# Add additional files.\n+for file in \"${othersrc[@]}\"; do\n+ cp \"${origpwd}\"/\"${file}\" .\n+done\n+\n+# Construct a new README.md.\n+cat > README.md <<EOF\n+# gVisor\n+\n+This branch is a synthetic branch, containing only Go sources, that is\n+compatible with standard Go tools. See the `master` branch for authoritative\n+sources and tests.\n+EOF\n# There are a few solitary files that can get left behind due to the way bazel\n# constructs the gopath target. Note that we don't find all Go files here\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add LICENSE and AUTHORS to the go branch.
Also, construct the README directly so that edits can be made.
PiperOrigin-RevId: 277782095 |
259,884 | 31.10.2019 17:37:54 | 25,200 | 36837c4ad3f3c840791379db81d02b60d918c0f5 | Add systemd-cgroup flag option.
Adds a systemd-cgroup flag option that prints an error letting the user know
that systemd cgroups are not supported and points them to the relevant issue.
Issue | [
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -46,6 +46,8 @@ var (\nlogFormat = flag.String(\"log-format\", \"text\", \"log format: text (default), json, or json-k8s.\")\ndebug = flag.Bool(\"debug\", false, \"enable debug logging.\")\nshowVersion = flag.Bool(\"version\", false, \"show version and exit.\")\n+ // TODO(gvisor.dev/issue/193): support systemd cgroups\n+ systemdCgroup = flag.Bool(\"systemd-cgroup\", false, \"Use systemd for cgroups. NOT SUPPORTED.\")\n// These flags are unique to runsc, and are used to configure parts of the\n// system that are not covered by the runtime spec.\n@@ -136,6 +138,12 @@ func main() {\nos.Exit(0)\n}\n+ // TODO(gvisor.dev/issue/193): support systemd cgroups\n+ if *systemdCgroup {\n+ fmt.Fprintln(os.Stderr, \"systemd cgroup flag passed, but systemd cgroups not supported. See gvisor.dev/issue/193\")\n+ os.Exit(1)\n+ }\n+\nvar errorLogger io.Writer\nif *logFD > -1 {\nerrorLogger = os.NewFile(uintptr(*logFD), \"error log file\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add systemd-cgroup flag option.
Adds a systemd-cgroup flag option that prints an error letting the user know
that systemd cgroups are not supported and points them to the relevant issue.
Issue #193
PiperOrigin-RevId: 277837162 |
259,858 | 31.10.2019 18:02:04 | 25,200 | a99d3479a84ca86843e500dbdf58db0af389b536 | Add context to state. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/context/context.go",
"new_path": "pkg/sentry/context/context.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// Package context defines the sentry's Context type.\n+// Package context defines an internal context type.\n+//\n+// The given Context conforms to the standard Go context, but mandates\n+// additional methods that are specific to the kernel internals. Note however,\n+// that the Context described by this package carries additional constraints\n+// regarding concurrent access and retaining beyond the scope of a call.\n+//\n+// See the Context type for complete details.\npackage context\nimport (\n+ \"context\"\n+ \"time\"\n+\n\"gvisor.dev/gvisor/pkg/amutex\"\n\"gvisor.dev/gvisor/pkg/log\"\n)\n@@ -59,6 +69,7 @@ func ThreadGroupIDFromContext(ctx Context) (tgid int32, ok bool) {\ntype Context interface {\nlog.Logger\namutex.Sleeper\n+ context.Context\n// UninterruptibleSleepStart indicates the beginning of an uninterruptible\n// sleep state (equivalent to Linux's TASK_UNINTERRUPTIBLE). If deactivate\n@@ -72,19 +83,36 @@ type Context interface {\n// AddressSpace is activated. Normally activate is the same value as the\n// deactivate parameter passed to UninterruptibleSleepStart.\nUninterruptibleSleepFinish(activate bool)\n+}\n- // Value returns the value associated with this Context for key, or nil if\n- // no value is associated with key. Successive calls to Value with the same\n- // key returns the same result.\n- //\n- // A key identifies a specific value in a Context. Functions that wish to\n- // retrieve values from Context typically allocate a key in a global\n- // variable then use that key as the argument to Context.Value. A key can\n- // be any type that supports equality; packages should define keys as an\n- // unexported type to avoid collisions.\n- Value(key interface{}) interface{}\n+// NoopSleeper is a noop implementation of amutex.Sleeper and UninterruptibleSleep\n+// methods for anonymous embedding in other types that do not implement sleeps.\n+type NoopSleeper struct {\n+ amutex.NoopSleeper\n+}\n+\n+// UninterruptibleSleepStart does nothing.\n+func (NoopSleeper) UninterruptibleSleepStart(bool) {}\n+\n+// UninterruptibleSleepFinish does nothing.\n+func (NoopSleeper) UninterruptibleSleepFinish(bool) {}\n+\n+// Deadline returns zero values, meaning no deadline.\n+func (NoopSleeper) Deadline() (time.Time, bool) {\n+ return time.Time{}, false\n}\n+// Done returns nil.\n+func (NoopSleeper) Done() <-chan struct{} {\n+ return nil\n+}\n+\n+// Err returns nil.\n+func (NoopSleeper) Err() error {\n+ return nil\n+}\n+\n+// logContext implements basic logging.\ntype logContext struct {\nlog.Logger\nNoopSleeper\n@@ -95,19 +123,6 @@ func (logContext) Value(key interface{}) interface{} {\nreturn nil\n}\n-// NoopSleeper is a noop implementation of amutex.Sleeper and\n-// Context.UninterruptibleSleep* methods for anonymous embedding in other types\n-// that do not want to notify kernel.Task about sleeps.\n-type NoopSleeper struct {\n- amutex.NoopSleeper\n-}\n-\n-// UninterruptibleSleepStart does nothing.\n-func (NoopSleeper) UninterruptibleSleepStart(bool) {}\n-\n-// UninterruptibleSleepFinish does nothing.\n-func (NoopSleeper) UninterruptibleSleepFinish(bool) {}\n-\n// bgContext is the context returned by context.Background.\nvar bgContext = &logContext{Logger: log.Log()}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/context.go",
"new_path": "pkg/sentry/kernel/context.go",
"diff": "package kernel\nimport (\n+ \"time\"\n+\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/context\"\n)\n@@ -97,6 +99,21 @@ func TaskFromContext(ctx context.Context) *Task {\nreturn nil\n}\n+// Deadline implements context.Context.Deadline.\n+func (*Task) Deadline() (time.Time, bool) {\n+ return time.Time{}, false\n+}\n+\n+// Done implements context.Context.Done.\n+func (*Task) Done() <-chan struct{} {\n+ return nil\n+}\n+\n+// Err implements context.Context.Err.\n+func (*Task) Err() error {\n+ return nil\n+}\n+\n// AsyncContext returns a context.Context that may be used by goroutines that\n// do work on behalf of t and therefore share its contextual values, but are\n// not t's task goroutine (e.g. asynchronous I/O).\n@@ -129,6 +146,21 @@ func (ctx taskAsyncContext) IsLogging(level log.Level) bool {\nreturn ctx.t.IsLogging(level)\n}\n+// Deadline implements context.Context.Deadline.\n+func (ctx taskAsyncContext) Deadline() (time.Time, bool) {\n+ return ctx.t.Deadline()\n+}\n+\n+// Done implements context.Context.Done.\n+func (ctx taskAsyncContext) Done() <-chan struct{} {\n+ return ctx.t.Done()\n+}\n+\n+// Err implements context.Context.Err.\n+func (ctx taskAsyncContext) Err() error {\n+ return ctx.t.Err()\n+}\n+\n// Value implements context.Context.Value.\nfunc (ctx taskAsyncContext) Value(key interface{}) interface{} {\nreturn ctx.t.Value(key)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -391,7 +391,7 @@ func (k *Kernel) SaveTo(w io.Writer) error {\n//\n// N.B. This will also be saved along with the full kernel save below.\ncpuidStart := time.Now()\n- if err := state.Save(w, k.FeatureSet(), nil); err != nil {\n+ if err := state.Save(k.SupervisorContext(), w, k.FeatureSet(), nil); err != nil {\nreturn err\n}\nlog.Infof(\"CPUID save took [%s].\", time.Since(cpuidStart))\n@@ -399,7 +399,7 @@ func (k *Kernel) SaveTo(w io.Writer) error {\n// Save the kernel state.\nkernelStart := time.Now()\nvar stats state.Stats\n- if err := state.Save(w, k, &stats); err != nil {\n+ if err := state.Save(k.SupervisorContext(), w, k, &stats); err != nil {\nreturn err\n}\nlog.Infof(\"Kernel save stats: %s\", &stats)\n@@ -407,7 +407,7 @@ func (k *Kernel) SaveTo(w io.Writer) error {\n// Save the memory file's state.\nmemoryStart := time.Now()\n- if err := k.mf.SaveTo(w); err != nil {\n+ if err := k.mf.SaveTo(k.SupervisorContext(), w); err != nil {\nreturn err\n}\nlog.Infof(\"Memory save took [%s].\", time.Since(memoryStart))\n@@ -542,7 +542,7 @@ func (k *Kernel) LoadFrom(r io.Reader, net inet.Stack, clocks sentrytime.Clocks)\n// don't need to explicitly install it in the Kernel.\ncpuidStart := time.Now()\nvar features cpuid.FeatureSet\n- if err := state.Load(r, &features, nil); err != nil {\n+ if err := state.Load(k.SupervisorContext(), r, &features, nil); err != nil {\nreturn err\n}\nlog.Infof(\"CPUID load took [%s].\", time.Since(cpuidStart))\n@@ -558,7 +558,7 @@ func (k *Kernel) LoadFrom(r io.Reader, net inet.Stack, clocks sentrytime.Clocks)\n// Load the kernel state.\nkernelStart := time.Now()\nvar stats state.Stats\n- if err := state.Load(r, k, &stats); err != nil {\n+ if err := state.Load(k.SupervisorContext(), r, k, &stats); err != nil {\nreturn err\n}\nlog.Infof(\"Kernel load stats: %s\", &stats)\n@@ -566,7 +566,7 @@ func (k *Kernel) LoadFrom(r io.Reader, net inet.Stack, clocks sentrytime.Clocks)\n// Load the memory file's state.\nmemoryStart := time.Now()\n- if err := k.mf.LoadFrom(r); err != nil {\n+ if err := k.mf.LoadFrom(k.SupervisorContext(), r); err != nil {\nreturn err\n}\nlog.Infof(\"Memory load took [%s].\", time.Since(memoryStart))\n@@ -1322,6 +1322,7 @@ func (k *Kernel) ListSockets() []*SocketEntry {\nreturn socks\n}\n+// supervisorContext is a privileged context.\ntype supervisorContext struct {\ncontext.NoopSleeper\nlog.Logger\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/pgalloc/save_restore.go",
"new_path": "pkg/sentry/pgalloc/save_restore.go",
"diff": "@@ -16,6 +16,7 @@ package pgalloc\nimport (\n\"bytes\"\n+ \"context\"\n\"fmt\"\n\"io\"\n\"runtime\"\n@@ -29,7 +30,7 @@ import (\n)\n// SaveTo writes f's state to the given stream.\n-func (f *MemoryFile) SaveTo(w io.Writer) error {\n+func (f *MemoryFile) SaveTo(ctx context.Context, w io.Writer) error {\n// Wait for reclaim.\nf.mu.Lock()\ndefer f.mu.Unlock()\n@@ -78,10 +79,10 @@ func (f *MemoryFile) SaveTo(w io.Writer) error {\n}\n// Save metadata.\n- if err := state.Save(w, &f.fileSize, nil); err != nil {\n+ if err := state.Save(ctx, w, &f.fileSize, nil); err != nil {\nreturn err\n}\n- if err := state.Save(w, &f.usage, nil); err != nil {\n+ if err := state.Save(ctx, w, &f.usage, nil); err != nil {\nreturn err\n}\n@@ -114,9 +115,9 @@ func (f *MemoryFile) SaveTo(w io.Writer) error {\n}\n// LoadFrom loads MemoryFile state from the given stream.\n-func (f *MemoryFile) LoadFrom(r io.Reader) error {\n+func (f *MemoryFile) LoadFrom(ctx context.Context, r io.Reader) error {\n// Load metadata.\n- if err := state.Load(r, &f.fileSize, nil); err != nil {\n+ if err := state.Load(ctx, r, &f.fileSize, nil); err != nil {\nreturn err\n}\nif err := f.file.Truncate(f.fileSize); err != nil {\n@@ -124,7 +125,7 @@ func (f *MemoryFile) LoadFrom(r io.Reader) error {\n}\nnewMappings := make([]uintptr, f.fileSize>>chunkShift)\nf.mappings.Store(newMappings)\n- if err := state.Load(r, &f.usage, nil); err != nil {\n+ if err := state.Load(ctx, r, &f.usage, nil); err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/decode.go",
"new_path": "pkg/state/decode.go",
"diff": "@@ -16,6 +16,7 @@ package state\nimport (\n\"bytes\"\n+ \"context\"\n\"encoding/binary\"\n\"errors\"\n\"fmt\"\n@@ -133,6 +134,9 @@ func (os *objectState) findCycle() []*objectState {\n// to ensure that all callbacks are executed, otherwise the callback graph was\n// not acyclic.\ntype decodeState struct {\n+ // ctx is the decode context.\n+ ctx context.Context\n+\n// objectByID is the set of objects in progress.\nobjectsByID map[uint64]*objectState\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/encode.go",
"new_path": "pkg/state/encode.go",
"diff": "@@ -16,6 +16,7 @@ package state\nimport (\n\"container/list\"\n+ \"context\"\n\"encoding/binary\"\n\"fmt\"\n\"io\"\n@@ -38,6 +39,9 @@ type queuedObject struct {\n// The encoding process is a breadth-first traversal of the object graph. The\n// inherent races and dependencies are much simpler than the decode case.\ntype encodeState struct {\n+ // ctx is the encode context.\n+ ctx context.Context\n+\n// lastID is the last object ID.\n//\n// See idsByObject for context. Because of the special zero encoding\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/map.go",
"new_path": "pkg/state/map.go",
"diff": "package state\nimport (\n+ \"context\"\n\"fmt\"\n\"reflect\"\n\"sort\"\n@@ -219,3 +220,13 @@ func (m Map) AfterLoad(fn func()) {\n// data dependencies have been cleared.\nm.os.callbacks = append(m.os.callbacks, fn)\n}\n+\n+// Context returns the current context object.\n+func (m Map) Context() context.Context {\n+ if m.es != nil {\n+ return m.es.ctx\n+ } else if m.ds != nil {\n+ return m.ds.ctx\n+ }\n+ return context.Background() // No context.\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/state.go",
"new_path": "pkg/state/state.go",
"diff": "package state\nimport (\n+ \"context\"\n\"fmt\"\n\"io\"\n\"reflect\"\n@@ -86,9 +87,10 @@ func UnwrapErrState(err error) error {\n}\n// Save saves the given object state.\n-func Save(w io.Writer, rootPtr interface{}, stats *Stats) error {\n+func Save(ctx context.Context, w io.Writer, rootPtr interface{}, stats *Stats) error {\n// Create the encoding state.\nes := &encodeState{\n+ ctx: ctx,\nidsByObject: make(map[uintptr]uint64),\nw: w,\nstats: stats,\n@@ -101,9 +103,10 @@ func Save(w io.Writer, rootPtr interface{}, stats *Stats) error {\n}\n// Load loads a checkpoint.\n-func Load(r io.Reader, rootPtr interface{}, stats *Stats) error {\n+func Load(ctx context.Context, r io.Reader, rootPtr interface{}, stats *Stats) error {\n// Create the decoding state.\nds := &decodeState{\n+ ctx: ctx,\nobjectsByID: make(map[uint64]*objectState),\ndeferred: make(map[uint64]*pb.Object),\nr: r,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/state_test.go",
"new_path": "pkg/state/state_test.go",
"diff": "@@ -16,6 +16,7 @@ package state\nimport (\n\"bytes\"\n+ \"context\"\n\"io/ioutil\"\n\"math\"\n\"reflect\"\n@@ -46,7 +47,7 @@ func runTest(t *testing.T, tests []TestCase) {\nsaveBuffer := &bytes.Buffer{}\nsaveObjectPtr := reflect.New(reflect.TypeOf(root))\nsaveObjectPtr.Elem().Set(reflect.ValueOf(root))\n- if err := Save(saveBuffer, saveObjectPtr.Interface(), nil); err != nil && !test.Fail {\n+ if err := Save(context.Background(), saveBuffer, saveObjectPtr.Interface(), nil); err != nil && !test.Fail {\nt.Errorf(\" FAIL: Save failed unexpectedly: %v\", err)\ncontinue\n} else if err != nil {\n@@ -56,7 +57,7 @@ func runTest(t *testing.T, tests []TestCase) {\n// Load a new copy of the object.\nloadObjectPtr := reflect.New(reflect.TypeOf(root))\n- if err := Load(bytes.NewReader(saveBuffer.Bytes()), loadObjectPtr.Interface(), nil); err != nil && !test.Fail {\n+ if err := Load(context.Background(), bytes.NewReader(saveBuffer.Bytes()), loadObjectPtr.Interface(), nil); err != nil && !test.Fail {\nt.Errorf(\" FAIL: Load failed unexpectedly: %v\", err)\ncontinue\n} else if err != nil {\n@@ -624,7 +625,7 @@ func BenchmarkEncoding(b *testing.B) {\nbs := buildObject(b.N)\nvar stats Stats\nb.StartTimer()\n- if err := Save(ioutil.Discard, bs, &stats); err != nil {\n+ if err := Save(context.Background(), ioutil.Discard, bs, &stats); err != nil {\nb.Errorf(\"save failed: %v\", err)\n}\nb.StopTimer()\n@@ -638,12 +639,12 @@ func BenchmarkDecoding(b *testing.B) {\nbs := buildObject(b.N)\nvar newBS benchStruct\nbuf := &bytes.Buffer{}\n- if err := Save(buf, bs, nil); err != nil {\n+ if err := Save(context.Background(), buf, bs, nil); err != nil {\nb.Errorf(\"save failed: %v\", err)\n}\nvar stats Stats\nb.StartTimer()\n- if err := Load(buf, &newBS, &stats); err != nil {\n+ if err := Load(context.Background(), buf, &newBS, &stats); err != nil {\nb.Errorf(\"load failed: %v\", err)\n}\nb.StopTimer()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add context to state.
PiperOrigin-RevId: 277840416 |
259,853 | 01.11.2019 11:21:06 | 25,200 | af6af2c34131c4ec5e3195be99c1deb6a2669c06 | tests: don't use ASSERT_THAT after fork | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/semaphore.cc",
"new_path": "test/syscalls/linux/semaphore.cc",
"diff": "@@ -447,9 +447,8 @@ TEST(SemaphoreTest, SemCtlGetPidFork) {\nconst pid_t child_pid = fork();\nif (child_pid == 0) {\n- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 1), SyscallSucceeds());\n- ASSERT_THAT(semctl(sem.get(), 0, GETPID),\n- SyscallSucceedsWithValue(getpid()));\n+ TEST_PCHECK(semctl(sem.get(), 0, SETVAL, 1) == 0);\n+ TEST_PCHECK(semctl(sem.get(), 0, GETPID) == getpid());\n_exit(0);\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | tests: don't use ASSERT_THAT after fork
PiperOrigin-RevId: 277965624 |
259,885 | 01.11.2019 11:43:33 | 25,200 | 5694bd080e0e95ba18cbf77038f450fe33b9f8df | Don't log "p9.channel.service: flipcall connection shutdown".
This gets quite spammy, especially in tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/flipcall/ctrl_futex.go",
"new_path": "pkg/flipcall/ctrl_futex.go",
"diff": "@@ -113,7 +113,7 @@ func (ep *Endpoint) enterFutexWait() error {\nreturn nil\ncase epsBlocked | epsShutdown:\natomic.AddInt32(&ep.ctrl.state, -epsBlocked)\n- return shutdownError{}\n+ return ShutdownError{}\ndefault:\n// Most likely due to ep.enterFutexWait() being called concurrently\n// from multiple goroutines.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/flipcall/flipcall.go",
"new_path": "pkg/flipcall/flipcall.go",
"diff": "@@ -136,8 +136,8 @@ func (ep *Endpoint) unmapPacket() {\n// Shutdown causes concurrent and future calls to ep.Connect(), ep.SendRecv(),\n// ep.RecvFirst(), and ep.SendLast(), as well as the same calls in the peer\n-// Endpoint, to unblock and return errors. It does not wait for concurrent\n-// calls to return. Successive calls to Shutdown have no effect.\n+// Endpoint, to unblock and return ShutdownErrors. It does not wait for\n+// concurrent calls to return. Successive calls to Shutdown have no effect.\n//\n// Shutdown is the only Endpoint method that may be called concurrently with\n// other methods on the same Endpoint.\n@@ -154,10 +154,12 @@ func (ep *Endpoint) isShutdownLocally() bool {\nreturn atomic.LoadUint32(&ep.shutdown) != 0\n}\n-type shutdownError struct{}\n+// ShutdownError is returned by most Endpoint methods after Endpoint.Shutdown()\n+// has been called.\n+type ShutdownError struct{}\n// Error implements error.Error.\n-func (shutdownError) Error() string {\n+func (ShutdownError) Error() string {\nreturn \"flipcall connection shutdown\"\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/flipcall/futex_linux.go",
"new_path": "pkg/flipcall/futex_linux.go",
"diff": "@@ -61,7 +61,7 @@ func (ep *Endpoint) futexSwitchToPeer() error {\nif !atomic.CompareAndSwapUint32(ep.connState(), ep.activeState, ep.inactiveState) {\nswitch cs := atomic.LoadUint32(ep.connState()); cs {\ncase csShutdown:\n- return shutdownError{}\n+ return ShutdownError{}\ndefault:\nreturn fmt.Errorf(\"unexpected connection state before FUTEX_WAKE: %v\", cs)\n}\n@@ -81,14 +81,14 @@ func (ep *Endpoint) futexSwitchFromPeer() error {\nreturn nil\ncase ep.inactiveState:\nif ep.isShutdownLocally() {\n- return shutdownError{}\n+ return ShutdownError{}\n}\nif err := ep.futexWaitConnState(ep.inactiveState); err != nil {\nreturn fmt.Errorf(\"failed to FUTEX_WAIT for peer Endpoint: %v\", err)\n}\ncontinue\ncase csShutdown:\n- return shutdownError{}\n+ return ShutdownError{}\ndefault:\nreturn fmt.Errorf(\"unexpected connection state before FUTEX_WAIT: %v\", cs)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/server.go",
"new_path": "pkg/p9/server.go",
"diff": "@@ -453,8 +453,12 @@ func (cs *connState) initializeChannels() (err error) {\ngo func() { // S/R-SAFE: Server side.\ndefer cs.channelWg.Done()\nif err := res.service(cs); err != nil {\n+ // Don't log flipcall.ShutdownErrors, which we expect to be\n+ // returned during server shutdown.\n+ if _, ok := err.(flipcall.ShutdownError); !ok {\nlog.Warningf(\"p9.channel.service: %v\", err)\n}\n+ }\n}()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't log "p9.channel.service: flipcall connection shutdown".
This gets quite spammy, especially in tests.
PiperOrigin-RevId: 277970468 |
259,881 | 04.11.2019 09:54:14 | 28,800 | 3b4f5445d03f7d2f170d68a8a4969b8acbad773e | Update membarrier bug
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_amd64.go",
"diff": "@@ -364,7 +364,7 @@ var AMD64 = &kernel.SyscallTable{\n321: syscalls.CapError(\"bpf\", linux.CAP_SYS_ADMIN, \"\", nil),\n322: syscalls.Supported(\"execveat\", Execveat),\n323: syscalls.ErrorWithEvent(\"userfaultfd\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/266\"}), // TODO(b/118906345)\n- 324: syscalls.ErrorWithEvent(\"membarrier\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/267\"}), // TODO(b/118904897)\n+ 324: syscalls.ErrorWithEvent(\"membarrier\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/267\"}), // TODO(gvisor.dev/issue/267)\n325: syscalls.PartiallySupported(\"mlock2\", Mlock2, \"Stub implementation. The sandbox lacks appropriate permissions.\", nil),\n// Syscalls after 325 are \"backports\" from versions of Linux after 4.4.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64_arm64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64_arm64.go",
"diff": "@@ -297,7 +297,7 @@ var ARM64 = &kernel.SyscallTable{\n280: syscalls.CapError(\"bpf\", linux.CAP_SYS_ADMIN, \"\", nil),\n281: syscalls.ErrorWithEvent(\"execveat\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/265\"}), // TODO(b/118901836)\n282: syscalls.ErrorWithEvent(\"userfaultfd\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/266\"}), // TODO(b/118906345)\n- 283: syscalls.ErrorWithEvent(\"membarrier\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/267\"}), // TODO(b/118904897)\n+ 283: syscalls.ErrorWithEvent(\"membarrier\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/267\"}), // TODO(gvisor.dev/issue/267)\n284: syscalls.PartiallySupported(\"mlock2\", Mlock2, \"Stub implementation. The sandbox lacks appropriate permissions.\", nil),\n285: syscalls.ErrorWithEvent(\"copy_file_range\", syserror.ENOSYS, \"\", nil),\n286: syscalls.Supported(\"preadv2\", Preadv2),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update membarrier bug
Updates #267
PiperOrigin-RevId: 278402684 |
259,891 | 04.11.2019 10:56:13 | 28,800 | 4fdd69d681bb3abb68a043377a2fb0ec8a031d54 | Check that a file is a regular file with open(O_TRUNC).
It was possible to panic the sentry by opening a cache revalidating folder with
O_TRUNC|O_CREAT. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/inode_operations.go",
"new_path": "pkg/sentry/fs/inode_operations.go",
"diff": "@@ -221,6 +221,8 @@ type InodeOperations interface {\n// sys_ftruncate.\n//\n// Implementations need not check that length >= 0.\n+ //\n+ // Truncate must only be called on regular files.\nTruncate(ctx context.Context, inode *Inode, size int64) error\n// Allocate allows the caller to reserve disk space for the inode.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -169,7 +169,7 @@ func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uint\nif dirPath {\nreturn syserror.ENOTDIR\n}\n- if flags&linux.O_TRUNC != 0 {\n+ if flags&linux.O_TRUNC != 0 && fs.IsRegular(d.Inode.StableAttr) {\nif err := d.Inode.Truncate(t, d, 0); err != nil {\nreturn err\n}\n@@ -397,7 +397,7 @@ func createAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint, mode l\n}\n// Should we truncate the file?\n- if flags&linux.O_TRUNC != 0 {\n+ if flags&linux.O_TRUNC != 0 && fs.IsRegular(found.Inode.StableAttr) {\nif err := found.Inode.Truncate(t, found, 0); err != nil {\nreturn err\n}\n@@ -1483,7 +1483,7 @@ func Truncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc\nif fs.IsDir(d.Inode.StableAttr) {\nreturn syserror.EISDIR\n}\n- if !fs.IsFile(d.Inode.StableAttr) {\n+ if !fs.IsRegular(d.Inode.StableAttr) {\nreturn syserror.EINVAL\n}\n@@ -1523,7 +1523,7 @@ func Ftruncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\n// Note that this is different from truncate(2) above, where a\n// directory returns EISDIR.\n- if !fs.IsFile(file.Dirent.Inode.StableAttr) {\n+ if !fs.IsRegular(file.Dirent.Inode.StableAttr) {\nreturn 0, nil, syserror.EINVAL\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Check that a file is a regular file with open(O_TRUNC).
It was possible to panic the sentry by opening a cache revalidating folder with
O_TRUNC|O_CREAT.
PiperOrigin-RevId: 278417533 |
259,853 | 04.11.2019 15:59:11 | 28,800 | 493334f8b594eb1c2b0f5a6133dbedad4e0ecd32 | kokoro: run KVM syscall tests
We don't know how stable they are, so let's start with warning. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "kokoro/syscall_kvm_tests.cfg",
"diff": "+build_file: \"repo/scripts/syscall_kvm_tests.sh\"\n+\n+action {\n+ define_artifacts {\n+ regex: \"**/sponge_log.xml\"\n+ regex: \"**/sponge_log.log\"\n+ regex: \"**/outputs.zip\"\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/syscall_kvm_tests.sh",
"diff": "+#!/bin/bash\n+\n+# Copyright 2019 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+source $(dirname $0)/common.sh\n+\n+# TODO(b/112165693): \"test --test_tag_filters=runsc_kvm\" can be used\n+# when the \"manual\" tag will be removed for kvm tests.\n+test `bazel query \"attr(tags, runsc_kvm, tests(//test/syscalls/...))\"`\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/itimer.cc",
"new_path": "test/syscalls/linux/itimer.cc",
"diff": "@@ -267,6 +267,9 @@ int TestSIGPROFFairness(absl::Duration sleep) {\n// Random save/restore is disabled as it introduces additional latency and\n// unpredictable distribution patterns.\nTEST(ItimerTest, DeliversSIGPROFToThreadsRoughlyFairlyActive_NoRandomSave) {\n+ // TODO(b/143247272): CPU time accounting is inaccurate for the KVM platform.\n+ SKIP_IF(GvisorPlatform() == Platform::kKVM);\n+\npid_t child;\nint execve_errno;\nauto kill = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -288,6 +291,9 @@ TEST(ItimerTest, DeliversSIGPROFToThreadsRoughlyFairlyActive_NoRandomSave) {\n// Random save/restore is disabled as it introduces additional latency and\n// unpredictable distribution patterns.\nTEST(ItimerTest, DeliversSIGPROFToThreadsRoughlyFairlyIdle_NoRandomSave) {\n+ // TODO(b/143247272): CPU time accounting is inaccurate for the KVM platform.\n+ SKIP_IF(GvisorPlatform() == Platform::kKVM);\n+\npid_t child;\nint execve_errno;\nauto kill = ASSERT_NO_ERRNO_AND_VALUE(\n"
}
] | Go | Apache License 2.0 | google/gvisor | kokoro: run KVM syscall tests
We don't know how stable they are, so let's start with warning.
PiperOrigin-RevId: 278484186 |
259,853 | 05.11.2019 17:02:15 | 28,800 | 57f6dbc4be5c9c5416c9d3a442eacfb797e57e9c | test/root: check that memory accouting works as expected | [
{
"change_type": "MODIFY",
"old_path": "test/root/cgroup_test.go",
"new_path": "test/root/cgroup_test.go",
"diff": "@@ -24,6 +24,7 @@ import (\n\"strconv\"\n\"strings\"\n\"testing\"\n+ \"time\"\n\"gvisor.dev/gvisor/runsc/cgroup\"\n\"gvisor.dev/gvisor/runsc/dockerutil\"\n@@ -55,6 +56,59 @@ func verifyPid(pid int, path string) error {\nreturn fmt.Errorf(\"got: %s, want: %d\", gots, pid)\n}\n+// TestCgroup sets cgroup options and checks that cgroup was properly configured.\n+func TestMemCGroup(t *testing.T) {\n+ allocMemSize := 128 << 20\n+ if err := dockerutil.Pull(\"python\"); err != nil {\n+ t.Fatal(\"docker pull failed:\", err)\n+ }\n+ d := dockerutil.MakeDocker(\"memusage-test\")\n+\n+ // Start a new container and allocate the specified about of memory.\n+ args := []string{\n+ \"--memory=256MB\",\n+ \"python\",\n+ \"python\",\n+ \"-c\",\n+ fmt.Sprintf(\"import time; s = 'a' * %d; time.sleep(100)\", allocMemSize),\n+ }\n+ if err := d.Run(args...); err != nil {\n+ t.Fatal(\"docker create failed:\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ gid, err := d.ID()\n+ if err != nil {\n+ t.Fatalf(\"Docker.ID() failed: %v\", err)\n+ }\n+ t.Logf(\"cgroup ID: %s\", gid)\n+\n+ path := filepath.Join(\"/sys/fs/cgroup/memory/docker\", gid, \"memory.usage_in_bytes\")\n+ memUsage := 0\n+\n+ // Wait when the container will allocate memory.\n+ start := time.Now()\n+ for time.Now().Sub(start) < 30*time.Second {\n+ outRaw, err := ioutil.ReadFile(path)\n+ if err != nil {\n+ t.Fatalf(\"failed to read %q: %v\", path, err)\n+ }\n+ out := strings.TrimSpace(string(outRaw))\n+ memUsage, err = strconv.Atoi(out)\n+ if err != nil {\n+ t.Fatalf(\"Atoi(%v): %v\", out, err)\n+ }\n+\n+ if memUsage > allocMemSize {\n+ return\n+ }\n+\n+ time.Sleep(100 * time.Millisecond)\n+ }\n+\n+ t.Fatalf(\"%vMB is less than %vMB: %v\", memUsage>>20, allocMemSize>>20)\n+}\n+\n// TestCgroup sets cgroup options and checks that cgroup was properly configured.\nfunc TestCgroup(t *testing.T) {\nif err := dockerutil.Pull(\"alpine\"); err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | test/root: check that memory accouting works as expected
PiperOrigin-RevId: 278739427 |
259,962 | 06.11.2019 22:28:41 | 28,800 | 3552691137284525a33d3de7e3c2d170da66c8ac | Fix data race in syscall_test_runner.go
Fixes | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/syscall_test_runner.go",
"new_path": "test/syscalls/syscall_test_runner.go",
"diff": "@@ -208,14 +208,15 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\n}\nlog.Warningf(\"%s: Got signal: %v\", name, s)\ndone := make(chan bool)\n- go func() {\n- dArgs := append(args, \"-alsologtostderr=true\", \"debug\", \"--stacks\", id)\n+ dArgs := append([]string{}, args...)\n+ dArgs = append(dArgs, \"-alsologtostderr=true\", \"debug\", \"--stacks\", id)\n+ go func(dArgs []string) {\ncmd := exec.Command(*runscPath, dArgs...)\ncmd.Stdout = os.Stdout\ncmd.Stderr = os.Stderr\ncmd.Run()\ndone <- true\n- }()\n+ }(dArgs)\ntimeout := time.After(3 * time.Second)\nselect {\n@@ -225,7 +226,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\n}\nlog.Warningf(\"Send SIGTERM to the sandbox process\")\n- dArgs := append(args, \"debug\",\n+ dArgs = append(args, \"debug\",\nfmt.Sprintf(\"--signal=%d\", syscall.SIGTERM),\nid)\ncmd = exec.Command(*runscPath, dArgs...)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix data race in syscall_test_runner.go
Fixes #1140
PiperOrigin-RevId: 279012793 |
259,962 | 06.11.2019 23:50:54 | 28,800 | 2326224a9652201938df2881be055ab352672587 | Fix yet another data race.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/syscall_test_runner.go",
"new_path": "test/syscalls/syscall_test_runner.go",
"diff": "@@ -229,7 +229,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\ndArgs = append(args, \"debug\",\nfmt.Sprintf(\"--signal=%d\", syscall.SIGTERM),\nid)\n- cmd = exec.Command(*runscPath, dArgs...)\n+ cmd := exec.Command(*runscPath, dArgs...)\ncmd.Stdout = os.Stdout\ncmd.Stderr = os.Stderr\ncmd.Run()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix yet another data race.
Fixes #1140
PiperOrigin-RevId: 279020846 |
259,853 | 08.11.2019 14:07:34 | 28,800 | 50d6236111485acce0e728794c4f53884097ea7d | Update kokoro images to install junitparser
junitparser will be used to merge junit xml files. | [
{
"change_type": "MODIFY",
"old_path": "kokoro/ubuntu1604/40_kokoro.sh",
"new_path": "kokoro/ubuntu1604/40_kokoro.sh",
"diff": "@@ -23,7 +23,10 @@ declare -r ssh_public_keys=(\n)\n# Install dependencies.\n-apt-get update && apt-get install -y rsync coreutils python-psutil qemu-kvm\n+apt-get update && apt-get install -y rsync coreutils python-psutil qemu-kvm python-pip\n+\n+# junitparser is used to merge junit xml files.\n+pip install junitparser\n# We need a kbuilder user.\nif useradd -c \"kbuilder user\" -m -s /bin/bash kbuilder; then\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update kokoro images to install junitparser
junitparser will be used to merge junit xml files.
PiperOrigin-RevId: 279387305 |
259,853 | 08.11.2019 15:43:47 | 28,800 | 14f4461f93a4c0014314a35a374ce07eec25636c | kokoro: update images to install zip | [
{
"change_type": "MODIFY",
"old_path": "kokoro/ubuntu1604/40_kokoro.sh",
"new_path": "kokoro/ubuntu1604/40_kokoro.sh",
"diff": "@@ -23,7 +23,7 @@ declare -r ssh_public_keys=(\n)\n# Install dependencies.\n-apt-get update && apt-get install -y rsync coreutils python-psutil qemu-kvm python-pip\n+apt-get update && apt-get install -y rsync coreutils python-psutil qemu-kvm python-pip zip\n# junitparser is used to merge junit xml files.\npip install junitparser\n"
}
] | Go | Apache License 2.0 | google/gvisor | kokoro: update images to install zip
PiperOrigin-RevId: 279406266 |
259,853 | 08.11.2019 16:39:25 | 28,800 | b91ad8fa0950c752ab08af7d08727d5a97b14b12 | test: merge log files of all shards for each test suite
This significantly speeds up a process of uploading this files
to sponge and resultstore by kokoro. | [
{
"change_type": "MODIFY",
"old_path": "scripts/common_bazel.sh",
"new_path": "scripts/common_bazel.sh",
"diff": "@@ -71,6 +71,13 @@ function run_as_root() {\nfunction collect_logs() {\n# Zip out everything into a convenient form.\nif [[ -v KOKORO_ARTIFACTS_DIR ]] && [[ -e bazel-testlogs ]]; then\n+ # Merge results files of all shards for each test suite.\n+ for d in `find -L \"bazel-testlogs\" -name 'shard_*_of_*' | xargs dirname | sort | uniq`; do\n+ junitparser merge `find $d -name test.xml` $d/test.xml\n+ cat $d/shard_*_of_*/test.log > $d/test.log\n+ ls -l $d/shard_*_of_*/outputs.zip && zip -r -1 $d/outputs.zip $d/shard_*_of_*/outputs.zip\n+ done\n+ find -L \"bazel-testlogs\" -name 'shard_*_of_*' | xargs rm -rf\n# Move test logs to Kokoro directory. tar is used to conveniently perform\n# renames while moving files.\nfind -L \"bazel-testlogs\" -name \"test.xml\" -o -name \"test.log\" -o -name \"outputs.zip\" |\n"
}
] | Go | Apache License 2.0 | google/gvisor | test: merge log files of all shards for each test suite
This significantly speeds up a process of uploading this files
to sponge and resultstore by kokoro.
PiperOrigin-RevId: 279416349 |
259,847 | 11.11.2019 14:41:44 | 28,800 | e09e7bf72f3e0208c7f557d9931407ee8729ebb2 | Add more extended features. | [
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/cpuid.go",
"new_path": "pkg/cpuid/cpuid.go",
"diff": "@@ -183,6 +183,33 @@ const (\nX86FeatureAVX512VBMI\nX86FeatureUMIP\nX86FeaturePKU\n+ X86FeatureOSPKE\n+ X86FeatureWAITPKG\n+ X86FeatureAVX512_VBMI2\n+ _ // ecx bit 7 is reserved\n+ X86FeatureGFNI\n+ X86FeatureVAES\n+ X86FeatureVPCLMULQDQ\n+ X86FeatureAVX512_VNNI\n+ X86FeatureAVX512_BITALG\n+ X86FeatureTME\n+ X86FeatureAVX512_VPOPCNTDQ\n+ _ // ecx bit 15 is reserved\n+ X86FeatureLA57\n+ // ecx bits 17-21 are reserved\n+ _\n+ _\n+ _\n+ _\n+ _\n+ X86FeatureRDPID\n+ // ecx bits 23-24 are reserved\n+ _\n+ _\n+ X86FeatureCLDEMOTE\n+ _ // ecx bit 26 is reserved\n+ X86FeatureMOVDIRI\n+ X86FeatureMOVDIR64B\n)\n// Block 4 constants are for xsave capabilities in CPUID.(EAX=0DH,ECX=01H):EAX.\n@@ -356,6 +383,21 @@ var x86FeatureStrings = map[Feature]string{\nX86FeatureAVX512VBMI: \"avx512vbmi\",\nX86FeatureUMIP: \"umip\",\nX86FeaturePKU: \"pku\",\n+ X86FeatureOSPKE: \"ospke\",\n+ X86FeatureWAITPKG: \"waitpkg\",\n+ X86FeatureAVX512_VBMI2: \"avx512_vbmi2\",\n+ X86FeatureGFNI: \"gfni\",\n+ X86FeatureVAES: \"vaes\",\n+ X86FeatureVPCLMULQDQ: \"vpclmulqdq\",\n+ X86FeatureAVX512_VNNI: \"avx512_vnni\",\n+ X86FeatureAVX512_BITALG: \"avx512_bitalg\",\n+ X86FeatureTME: \"tme\",\n+ X86FeatureAVX512_VPOPCNTDQ: \"avx512_vpopcntdq\",\n+ X86FeatureLA57: \"la57\",\n+ X86FeatureRDPID: \"rdpid\",\n+ X86FeatureCLDEMOTE: \"cldemote\",\n+ X86FeatureMOVDIRI: \"movdiri\",\n+ X86FeatureMOVDIR64B: \"movdir64b\",\n// Block 4.\nX86FeatureXSAVEOPT: \"xsaveopt\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add more extended features.
PiperOrigin-RevId: 279820435 |
259,962 | 11.11.2019 15:49:49 | 28,800 | 2b0e4dc6aa7fb8a3f619220b72537a8fff2f95b4 | Remove obsolete TODO. This is now fixed. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "@@ -425,6 +425,11 @@ TEST_P(TcpSocketTest, PollWithFullBufferBlocks) {\n}\n// The last error should have been EWOULDBLOCK.\nASSERT_EQ(errno, EWOULDBLOCK);\n+\n+ // Now polling on the FD with a timeout should return 0 corresponding to no\n+ // FDs ready.\n+ struct pollfd poll_fd = {s_, POLLOUT, 0};\n+ EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10), SyscallSucceedsWithValue(0));\n}\nTEST_P(TcpSocketTest, MsgTrunc) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove obsolete TODO. This is now fixed.
PiperOrigin-RevId: 279835100 |
259,854 | 11.11.2019 18:34:28 | 28,800 | b82bd24f9495435cadd2713db829b19ce8fcce9d | Update ephemeral port reservation tests.
The existing tests which are disabled on gVisor are failing because we default
to SO_REUSEADDR being enabled for TCP sockets. Update the test comments.
Also add new tests for enabled SO_REUSEADDR. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_inet_loopback.cc",
"new_path": "test/syscalls/linux/socket_inet_loopback.cc",
"diff": "@@ -1156,9 +1156,8 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6OnlyV6AnyReservesV6) {\nsockaddr_storage addr_dual = test_addr_dual.addr;\nconst FileDescriptor fd_dual = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(test_addr_dual.family(), param.type, 0));\n- int one = 1;\n- EXPECT_THAT(\n- setsockopt(fd_dual.get(), IPPROTO_IPV6, IPV6_V6ONLY, &one, sizeof(one)),\n+ EXPECT_THAT(setsockopt(fd_dual.get(), IPPROTO_IPV6, IPV6_V6ONLY,\n+ &kSockOptOn, sizeof(kSockOptOn)),\nSyscallSucceeds());\nASSERT_THAT(bind(fd_dual.get(), reinterpret_cast<sockaddr*>(&addr_dual),\ntest_addr_dual.addr_len),\n@@ -1207,7 +1206,8 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6OnlyV6AnyReservesV6) {\nTEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReserved) {\nauto const& param = GetParam();\n- // FIXME(b/114268588)\n+ // FIXME(b/76031995): Support disabling SO_REUSEADDR for TCP sockets and make\n+ // it disabled by default.\nSKIP_IF(IsRunningOnGvisor() && param.type == SOCK_STREAM);\nfor (int i = 0; true; i++) {\n@@ -1305,10 +1305,76 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReserved) {\n}\n}\n+TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReservedReuseAddr) {\n+ auto const& param = GetParam();\n+\n+ // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets.\n+ SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_DGRAM);\n+\n+ // Bind the v6 loopback on a dual stack socket.\n+ TestAddress const& test_addr = V6Loopback();\n+ sockaddr_storage bound_addr = test_addr.addr;\n+ const FileDescriptor bound_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ ASSERT_THAT(bind(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ test_addr.addr_len),\n+ SyscallSucceeds());\n+ ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ // Listen iff TCP.\n+ if (param.type == SOCK_STREAM) {\n+ ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());\n+ }\n+\n+ // Get the port that we bound.\n+ socklen_t bound_addr_len = test_addr.addr_len;\n+ ASSERT_THAT(\n+ getsockname(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ &bound_addr_len),\n+ SyscallSucceeds());\n+\n+ // Connect to bind an ephemeral port.\n+ const FileDescriptor connected_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ ASSERT_THAT(setsockopt(connected_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(connect(connected_fd.get(),\n+ reinterpret_cast<sockaddr*>(&bound_addr), bound_addr_len),\n+ SyscallSucceeds());\n+\n+ // Get the ephemeral port.\n+ sockaddr_storage connected_addr = {};\n+ socklen_t connected_addr_len = sizeof(connected_addr);\n+ ASSERT_THAT(getsockname(connected_fd.get(),\n+ reinterpret_cast<sockaddr*>(&connected_addr),\n+ &connected_addr_len),\n+ SyscallSucceeds());\n+ uint16_t const ephemeral_port =\n+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));\n+\n+ // Verify that we actually got an ephemeral port.\n+ ASSERT_NE(ephemeral_port, 0);\n+\n+ // Verify that the ephemeral port is not reserved.\n+ const FileDescriptor checking_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ ASSERT_THAT(setsockopt(checking_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ EXPECT_THAT(\n+ bind(checking_fd.get(), reinterpret_cast<sockaddr*>(&connected_addr),\n+ connected_addr_len),\n+ SyscallSucceeds());\n+}\n+\nTEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedEphemeralPortReserved) {\nauto const& param = GetParam();\n- // FIXME(b/114268588)\n+ // FIXME(b/76031995): Support disabling SO_REUSEADDR for TCP sockets and make\n+ // it disabled by default.\nSKIP_IF(IsRunningOnGvisor() && param.type == SOCK_STREAM);\nfor (int i = 0; true; i++) {\n@@ -1408,9 +1474,8 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedEphemeralPortReserved) {\n// v6-only socket.\nconst FileDescriptor fd_v6_only_any = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(test_addr_v6_any.family(), param.type, 0));\n- int one = 1;\nEXPECT_THAT(setsockopt(fd_v6_only_any.get(), IPPROTO_IPV6, IPV6_V6ONLY,\n- &one, sizeof(one)),\n+ &kSockOptOn, sizeof(kSockOptOn)),\nSyscallSucceeds());\nret =\nbind(fd_v6_only_any.get(), reinterpret_cast<sockaddr*>(&addr_v6_any),\n@@ -1429,10 +1494,78 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedEphemeralPortReserved) {\n}\n}\n+TEST_P(SocketMultiProtocolInetLoopbackTest,\n+ V4MappedEphemeralPortReservedResueAddr) {\n+ auto const& param = GetParam();\n+\n+ // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets.\n+ SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_DGRAM);\n+\n+ // Bind the v4 loopback on a dual stack socket.\n+ TestAddress const& test_addr = V4MappedLoopback();\n+ sockaddr_storage bound_addr = test_addr.addr;\n+ const FileDescriptor bound_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ ASSERT_THAT(bind(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ test_addr.addr_len),\n+ SyscallSucceeds());\n+\n+ ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ // Listen iff TCP.\n+ if (param.type == SOCK_STREAM) {\n+ ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());\n+ }\n+\n+ // Get the port that we bound.\n+ socklen_t bound_addr_len = test_addr.addr_len;\n+ ASSERT_THAT(\n+ getsockname(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ &bound_addr_len),\n+ SyscallSucceeds());\n+\n+ // Connect to bind an ephemeral port.\n+ const FileDescriptor connected_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ ASSERT_THAT(setsockopt(connected_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(connect(connected_fd.get(),\n+ reinterpret_cast<sockaddr*>(&bound_addr), bound_addr_len),\n+ SyscallSucceeds());\n+\n+ // Get the ephemeral port.\n+ sockaddr_storage connected_addr = {};\n+ socklen_t connected_addr_len = sizeof(connected_addr);\n+ ASSERT_THAT(getsockname(connected_fd.get(),\n+ reinterpret_cast<sockaddr*>(&connected_addr),\n+ &connected_addr_len),\n+ SyscallSucceeds());\n+ uint16_t const ephemeral_port =\n+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));\n+\n+ // Verify that we actually got an ephemeral port.\n+ ASSERT_NE(ephemeral_port, 0);\n+\n+ // Verify that the ephemeral port is not reserved.\n+ const FileDescriptor checking_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ ASSERT_THAT(setsockopt(checking_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ EXPECT_THAT(\n+ bind(checking_fd.get(), reinterpret_cast<sockaddr*>(&connected_addr),\n+ connected_addr_len),\n+ SyscallSucceeds());\n+}\n+\nTEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReserved) {\nauto const& param = GetParam();\n- // FIXME(b/114268588)\n+ // FIXME(b/76031995): Support disabling SO_REUSEADDR for TCP sockets and make\n+ // it disabled by default.\nSKIP_IF(IsRunningOnGvisor() && param.type == SOCK_STREAM);\nfor (int i = 0; true; i++) {\n@@ -1533,9 +1666,8 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReserved) {\n// v6-only socket.\nconst FileDescriptor fd_v6_only_any = ASSERT_NO_ERRNO_AND_VALUE(\nSocket(test_addr_v6_any.family(), param.type, 0));\n- int one = 1;\nEXPECT_THAT(setsockopt(fd_v6_only_any.get(), IPPROTO_IPV6, IPV6_V6ONLY,\n- &one, sizeof(one)),\n+ &kSockOptOn, sizeof(kSockOptOn)),\nSyscallSucceeds());\nret =\nbind(fd_v6_only_any.get(), reinterpret_cast<sockaddr*>(&addr_v6_any),\n@@ -1554,6 +1686,75 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReserved) {\n}\n}\n+TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReservedReuseAddr) {\n+ auto const& param = GetParam();\n+\n+ // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets.\n+ SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_DGRAM);\n+\n+ // Bind the v4 loopback on a v4 socket.\n+ TestAddress const& test_addr = V4Loopback();\n+ sockaddr_storage bound_addr = test_addr.addr;\n+ const FileDescriptor bound_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+\n+ ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ ASSERT_THAT(bind(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ test_addr.addr_len),\n+ SyscallSucceeds());\n+\n+ // Listen iff TCP.\n+ if (param.type == SOCK_STREAM) {\n+ ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());\n+ }\n+\n+ // Get the port that we bound.\n+ socklen_t bound_addr_len = test_addr.addr_len;\n+ ASSERT_THAT(\n+ getsockname(bound_fd.get(), reinterpret_cast<sockaddr*>(&bound_addr),\n+ &bound_addr_len),\n+ SyscallSucceeds());\n+\n+ // Connect to bind an ephemeral port.\n+ const FileDescriptor connected_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+\n+ ASSERT_THAT(setsockopt(connected_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ ASSERT_THAT(connect(connected_fd.get(),\n+ reinterpret_cast<sockaddr*>(&bound_addr), bound_addr_len),\n+ SyscallSucceeds());\n+\n+ // Get the ephemeral port.\n+ sockaddr_storage connected_addr = {};\n+ socklen_t connected_addr_len = sizeof(connected_addr);\n+ ASSERT_THAT(getsockname(connected_fd.get(),\n+ reinterpret_cast<sockaddr*>(&connected_addr),\n+ &connected_addr_len),\n+ SyscallSucceeds());\n+ uint16_t const ephemeral_port =\n+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));\n+\n+ // Verify that we actually got an ephemeral port.\n+ ASSERT_NE(ephemeral_port, 0);\n+\n+ // Verify that the ephemeral port is not reserved.\n+ const FileDescriptor checking_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ ASSERT_THAT(setsockopt(checking_fd.get(), SOL_SOCKET, SO_REUSEADDR,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ EXPECT_THAT(\n+ bind(checking_fd.get(), reinterpret_cast<sockaddr*>(&connected_addr),\n+ connected_addr_len),\n+ SyscallSucceeds());\n+}\n+\nTEST_P(SocketMultiProtocolInetLoopbackTest, PortReuseTwoSockets) {\nauto const& param = GetParam();\nTestAddress const& test_addr = V4Loopback();\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update ephemeral port reservation tests.
The existing tests which are disabled on gVisor are failing because we default
to SO_REUSEADDR being enabled for TCP sockets. Update the test comments.
Also add new tests for enabled SO_REUSEADDR.
PiperOrigin-RevId: 279862275 |
259,853 | 12.11.2019 12:00:16 | 28,800 | 548d65b2b6116beecb2aa782a0b5428fb20f89a0 | kokoro: correct a path to outputs.zip | [
{
"change_type": "MODIFY",
"old_path": "scripts/common_bazel.sh",
"new_path": "scripts/common_bazel.sh",
"diff": "@@ -75,7 +75,7 @@ function collect_logs() {\nfor d in `find -L \"bazel-testlogs\" -name 'shard_*_of_*' | xargs dirname | sort | uniq`; do\njunitparser merge `find $d -name test.xml` $d/test.xml\ncat $d/shard_*_of_*/test.log > $d/test.log\n- ls -l $d/shard_*_of_*/outputs.zip && zip -r -1 $d/outputs.zip $d/shard_*_of_*/outputs.zip\n+ ls -l $d/shard_*_of_*/test.outputs/outputs.zip && zip -r -1 $d/outputs.zip $d/shard_*_of_*/test.outputs/outputs.zip\ndone\nfind -L \"bazel-testlogs\" -name 'shard_*_of_*' | xargs rm -rf\n# Move test logs to Kokoro directory. tar is used to conveniently perform\n"
}
] | Go | Apache License 2.0 | google/gvisor | kokoro: correct a path to outputs.zip
PiperOrigin-RevId: 280021914 |
259,853 | 12.11.2019 15:58:41 | 28,800 | ca9cba66d2062811db9fa2b89a610f8eaa13fe99 | seccomp: introduce the GreaterThan rule type | [
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp.go",
"new_path": "pkg/seccomp/seccomp.go",
"diff": "@@ -199,6 +199,10 @@ func ruleViolationLabel(ruleSetIdx int, sysno uintptr, idx int) string {\nreturn fmt.Sprintf(\"ruleViolation_%v_%v_%v\", ruleSetIdx, sysno, idx)\n}\n+func ruleLabel(ruleSetIdx int, sysno uintptr, idx int, name string) string {\n+ return fmt.Sprintf(\"rule_%v_%v_%v_%v\", ruleSetIdx, sysno, idx, name)\n+}\n+\nfunc checkArgsLabel(sysno uintptr) string {\nreturn fmt.Sprintf(\"checkArgs_%v\", sysno)\n}\n@@ -223,6 +227,19 @@ func addSyscallArgsCheck(p *bpf.ProgramBuilder, rules []Rule, action linux.BPFAc\np.AddStmt(bpf.Ld|bpf.Abs|bpf.W, seccompDataOffsetArgHigh(i))\np.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))\nlabelled = true\n+ case GreaterThan:\n+ labelGood := fmt.Sprintf(\"gt%v\", i)\n+ high, low := uint32(a>>32), uint32(a)\n+ // assert arg_high < high\n+ p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, seccompDataOffsetArgHigh(i))\n+ p.AddJumpFalseLabel(bpf.Jmp|bpf.Jge|bpf.K, high, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))\n+ // arg_high > high\n+ p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))\n+ // arg_low < low\n+ p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, seccompDataOffsetArgLow(i))\n+ p.AddJumpFalseLabel(bpf.Jmp|bpf.Jgt|bpf.K, low, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))\n+ p.AddLabel(ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))\n+ labelled = true\ndefault:\nreturn fmt.Errorf(\"unknown syscall rule type: %v\", reflect.TypeOf(a))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_rules.go",
"new_path": "pkg/seccomp/seccomp_rules.go",
"diff": "@@ -49,6 +49,9 @@ func (a AllowAny) String() (s string) {\n// AllowValue specifies a value that needs to be strictly matched.\ntype AllowValue uintptr\n+// GreaterThan specifies a value that needs to be strictly smaller.\n+type GreaterThan uintptr\n+\nfunc (a AllowValue) String() (s string) {\nreturn fmt.Sprintf(\"%#x \", uintptr(a))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_test.go",
"new_path": "pkg/seccomp/seccomp_test.go",
"diff": "@@ -340,6 +340,54 @@ func TestBasic(t *testing.T) {\n},\n},\n},\n+ {\n+ ruleSets: []RuleSet{\n+ {\n+ Rules: SyscallRules{\n+ 1: []Rule{\n+ {\n+ GreaterThan(0xf),\n+ GreaterThan(0xabcd000d),\n+ },\n+ },\n+ },\n+ Action: linux.SECCOMP_RET_ALLOW,\n+ },\n+ },\n+ defaultAction: linux.SECCOMP_RET_TRAP,\n+ specs: []spec{\n+ {\n+ desc: \"GreaterThan: Syscall argument allowed\",\n+ data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64, args: [6]uint64{0x10, 0xffffffff}},\n+ want: linux.SECCOMP_RET_ALLOW,\n+ },\n+ {\n+ desc: \"GreaterThan: Syscall argument disallowed (equal)\",\n+ data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64, args: [6]uint64{0xf, 0xffffffff}},\n+ want: linux.SECCOMP_RET_TRAP,\n+ },\n+ {\n+ desc: \"Syscall argument disallowed (smaller)\",\n+ data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64, args: [6]uint64{0x0, 0xffffffff}},\n+ want: linux.SECCOMP_RET_TRAP,\n+ },\n+ {\n+ desc: \"GreaterThan2: Syscall argument allowed\",\n+ data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64, args: [6]uint64{0x10, 0xfbcd000d}},\n+ want: linux.SECCOMP_RET_ALLOW,\n+ },\n+ {\n+ desc: \"GreaterThan2: Syscall argument disallowed (equal)\",\n+ data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64, args: [6]uint64{0x10, 0xabcd000d}},\n+ want: linux.SECCOMP_RET_TRAP,\n+ },\n+ {\n+ desc: \"GreaterThan2: Syscall argument disallowed (smaller)\",\n+ data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64, args: [6]uint64{0x10, 0xa000ffff}},\n+ want: linux.SECCOMP_RET_TRAP,\n+ },\n+ },\n+ },\n} {\ninstrs, err := BuildProgram(test.ruleSets, test.defaultAction)\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | seccomp: introduce the GreaterThan rule type
PiperOrigin-RevId: 280075805 |
259,974 | 11.11.2019 08:20:18 | 0 | 05871a1cdc73e98df58f56841be23a4eac27225c | Enable runsc/boot support on arm64.
This patch also include a minor change to replace syscall.Dup2
with syscall.Dup3 which was missed in a previous commit(ref a25a976). | [
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_test_victim.go",
"new_path": "pkg/seccomp/seccomp_test_victim.go",
"diff": "@@ -38,7 +38,7 @@ func main() {\nsyscall.SYS_CLONE: {},\nsyscall.SYS_CLOSE: {},\nsyscall.SYS_DUP: {},\n- syscall.SYS_DUP2: {},\n+ syscall.SYS_DUP3: {},\nsyscall.SYS_EPOLL_CREATE1: {},\nsyscall.SYS_EPOLL_CTL: {},\nsyscall.SYS_EPOLL_WAIT: {},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/gofer/inode.go",
"new_path": "pkg/sentry/fs/gofer/inode.go",
"diff": "@@ -268,7 +268,7 @@ func (i *inodeFileState) recreateReadHandles(ctx context.Context, writer *handle\n// operations on the old will see the new data. Then, make the new handle take\n// ownereship of the old FD and mark the old readHandle to not close the FD\n// when done.\n- if err := syscall.Dup2(h.Host.FD(), i.readHandles.Host.FD()); err != nil {\n+ if err := syscall.Dup3(h.Host.FD(), i.readHandles.Host.FD(), 0); err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -15,6 +15,8 @@ go_library(\n\"fs.go\",\n\"limits.go\",\n\"loader.go\",\n+ \"loader_amd64.go\",\n+ \"loader_arm64.go\",\n\"network.go\",\n\"pprof.go\",\n\"strace.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/BUILD",
"new_path": "runsc/boot/filter/BUILD",
"diff": "@@ -6,6 +6,8 @@ go_library(\nname = \"filter\",\nsrcs = [\n\"config.go\",\n+ \"config_amd64.go\",\n+ \"config_arm64.go\",\n\"extra_filters.go\",\n\"extra_filters_msan.go\",\n\"extra_filters_race.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -26,10 +26,6 @@ import (\n// allowedSyscalls is the set of syscalls executed by the Sentry to the host OS.\nvar allowedSyscalls = seccomp.SyscallRules{\n- syscall.SYS_ARCH_PRCTL: []seccomp.Rule{\n- {seccomp.AllowValue(linux.ARCH_GET_FS)},\n- {seccomp.AllowValue(linux.ARCH_SET_FS)},\n- },\nsyscall.SYS_CLOCK_GETTIME: {},\nsyscall.SYS_CLONE: []seccomp.Rule{\n{\n@@ -44,7 +40,7 @@ var allowedSyscalls = seccomp.SyscallRules{\n},\nsyscall.SYS_CLOSE: {},\nsyscall.SYS_DUP: {},\n- syscall.SYS_DUP2: {},\n+ syscall.SYS_DUP3: {},\nsyscall.SYS_EPOLL_CREATE1: {},\nsyscall.SYS_EPOLL_CTL: {},\nsyscall.SYS_EPOLL_PWAIT: []seccomp.Rule{\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/filter/config_amd64.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build amd64\n+\n+package filter\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/seccomp\"\n+)\n+\n+func init() {\n+ allowedSyscalls[syscall.SYS_ARCH_PRCTL] = []seccomp.Rule{\n+ {seccomp.AllowValue(linux.ARCH_GET_FS)},\n+ {seccomp.AllowValue(linux.ARCH_SET_FS)},\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/filter/config_arm64.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build arm64\n+\n+package filter\n+\n+// Reserve for future customization.\n+func init() {\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -43,7 +43,6 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/pgalloc\"\n\"gvisor.dev/gvisor/pkg/sentry/platform\"\n\"gvisor.dev/gvisor/pkg/sentry/sighandling\"\n- slinux \"gvisor.dev/gvisor/pkg/sentry/syscalls/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/time\"\n\"gvisor.dev/gvisor/pkg/sentry/usage\"\n\"gvisor.dev/gvisor/pkg/sentry/watchdog\"\n@@ -147,9 +146,6 @@ type execProcess struct {\nfunc init() {\n// Initialize the random number generator.\nmrand.Seed(gtime.Now().UnixNano())\n-\n- // Register the global syscall table.\n- kernel.RegisterSyscallTable(slinux.AMD64)\n}\n// Args are the arguments for New().\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/loader_amd64.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build amd64\n+\n+// Package boot loads the kernel and runs a container.\n+package boot\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.dev/gvisor/pkg/sentry/syscalls/linux\"\n+)\n+\n+func init() {\n+ // Register the global syscall table.\n+ kernel.RegisterSyscallTable(linux.AMD64)\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/loader_arm64.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build arm64\n+\n+// Package boot loads the kernel and runs a container.\n+package boot\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.dev/gvisor/pkg/sentry/syscalls/linux\"\n+)\n+\n+func init() {\n+ // Register the global syscall table.\n+ kernel.RegisterSyscallTable(linux.ARM64)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable runsc/boot support on arm64.
This patch also include a minor change to replace syscall.Dup2
with syscall.Dup3 which was missed in a previous commit(ref a25a976).
Signed-off-by: Haibo Xu <[email protected]>
Change-Id: I00beb9cc492e44c762ebaa3750201c63c1f7c2f3 |
259,974 | 11.11.2019 09:15:45 | 0 | c5d9b5b8816e99507661e1d39ec51033fb69e212 | Enable sentry/fs/host support on arm64.
newfstatat() syscall is not supported on arm64, so we resort
to use the fstatat() syscall. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/BUILD",
"new_path": "pkg/sentry/fs/host/BUILD",
"diff": "@@ -21,6 +21,8 @@ go_library(\n\"socket_unsafe.go\",\n\"tty.go\",\n\"util.go\",\n+ \"util_amd64_unsafe.go\",\n+ \"util_arm64_unsafe.go\",\n\"util_unsafe.go\",\n],\nimportpath = \"gvisor.dev/gvisor/pkg/sentry/fs/host\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/util.go",
"new_path": "pkg/sentry/fs/host/util.go",
"diff": "@@ -155,7 +155,7 @@ func unstableAttr(mo *superOperations, s *syscall.Stat_t) fs.UnstableAttr {\nAccessTime: ktime.FromUnix(s.Atim.Sec, s.Atim.Nsec),\nModificationTime: ktime.FromUnix(s.Mtim.Sec, s.Mtim.Nsec),\nStatusChangeTime: ktime.FromUnix(s.Ctim.Sec, s.Ctim.Nsec),\n- Links: s.Nlink,\n+ Links: uint64(s.Nlink),\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/host/util_amd64_unsafe.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build amd64\n+\n+package host\n+\n+import (\n+ \"syscall\"\n+ \"unsafe\"\n+)\n+\n+func fstatat(fd int, name string, flags int) (syscall.Stat_t, error) {\n+ var stat syscall.Stat_t\n+ namePtr, err := syscall.BytePtrFromString(name)\n+ if err != nil {\n+ return stat, err\n+ }\n+ _, _, errno := syscall.Syscall6(\n+ syscall.SYS_NEWFSTATAT,\n+ uintptr(fd),\n+ uintptr(unsafe.Pointer(namePtr)),\n+ uintptr(unsafe.Pointer(&stat)),\n+ uintptr(flags),\n+ 0, 0)\n+ if errno != 0 {\n+ return stat, errno\n+ }\n+ return stat, nil\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/host/util_arm64_unsafe.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build arm64\n+\n+package host\n+\n+import (\n+ \"syscall\"\n+ \"unsafe\"\n+)\n+\n+func fstatat(fd int, name string, flags int) (syscall.Stat_t, error) {\n+ var stat syscall.Stat_t\n+ namePtr, err := syscall.BytePtrFromString(name)\n+ if err != nil {\n+ return stat, err\n+ }\n+ _, _, errno := syscall.Syscall6(\n+ syscall.SYS_FSTATAT,\n+ uintptr(fd),\n+ uintptr(unsafe.Pointer(namePtr)),\n+ uintptr(unsafe.Pointer(&stat)),\n+ uintptr(flags),\n+ 0, 0)\n+ if errno != 0 {\n+ return stat, errno\n+ }\n+ return stat, nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/util_unsafe.go",
"new_path": "pkg/sentry/fs/host/util_unsafe.go",
"diff": "@@ -116,22 +116,3 @@ func setTimestamps(fd int, ts fs.TimeSpec) error {\n}\nreturn nil\n}\n-\n-func fstatat(fd int, name string, flags int) (syscall.Stat_t, error) {\n- var stat syscall.Stat_t\n- namePtr, err := syscall.BytePtrFromString(name)\n- if err != nil {\n- return stat, err\n- }\n- _, _, errno := syscall.Syscall6(\n- syscall.SYS_NEWFSTATAT,\n- uintptr(fd),\n- uintptr(unsafe.Pointer(namePtr)),\n- uintptr(unsafe.Pointer(&stat)),\n- uintptr(flags),\n- 0, 0)\n- if errno != 0 {\n- return stat, errno\n- }\n- return stat, nil\n-}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable sentry/fs/host support on arm64.
newfstatat() syscall is not supported on arm64, so we resort
to use the fstatat() syscall.
Signed-off-by: Haibo Xu <[email protected]>
Change-Id: Iea95550ea53bcf85c01f7b3b95da70ad0952177d |
259,974 | 11.11.2019 09:42:04 | 0 | 1d8b7292d72ce93d465e4ded19237fb92c08bc56 | Fix some build errors on arm64.
Initialize the VDSO "os" and "arch" fields explicitly,
or the VDSO load process would failed on arm64 platform. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/ptrace_arm64.go",
"new_path": "pkg/sentry/kernel/ptrace_arm64.go",
"diff": "package kernel\nimport (\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/vdso.go",
"new_path": "pkg/sentry/loader/vdso.go",
"diff": "@@ -268,6 +268,8 @@ func PrepareVDSO(ctx context.Context, mfp pgalloc.MemoryFileProvider) (*VDSO, er\n// some applications may not be able to handle multiple [vdso]\n// hints.\nvdso: mm.NewSpecialMappable(\"\", mfp, vdso),\n+ os: info.os,\n+ arch: info.arch,\nphdrs: info.phdrs,\n}, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix some build errors on arm64.
Initialize the VDSO "os" and "arch" fields explicitly,
or the VDSO load process would failed on arm64 platform.
Signed-off-by: Haibo Xu <[email protected]>
Change-Id: Ic6768df88e43cd7c7956eb630511672ae11ac52f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.