patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -227,8 +227,13 @@ AX_CODE_COVERAGE
AC_ARG_WITH([flux-security], AS_HELP_STRING([--with-flux-security],
[Build with flux-security]))
AS_IF([test "x$with_flux_security" = "xyes"], [
- PKG_CHECK_MODULES([FLUX_SECURITY], [flux-security], [], [])
+ PKG_CHECK_MODULES([FLUX_SECURITY], [flux-security],
+ [flux_sec_incdir=`$PKG_CONFIG --variable=includedir flux-security`],
+ [flux_sec_incdir=;])
+ AS_IF([test "x$flux_sec_incdir" = x],
+ [AC_MSG_ERROR([couldn't find flux-security or include directory])])
AC_DEFINE([HAVE_FLUX_SECURITY], [1], [Define flux-security is available])
+ AC_SUBST(FLUX_SECURITY_INCDIR, $flux_sec_incdir)
])
AM_CONDITIONAL([HAVE_FLUX_SECURITY], [test "x$with_flux_security" = "xyes"])
| 1 | ##
# Prologue
##
AC_INIT([flux-core],
m4_esyscmd([git describe --always | awk '/.*/ {sub(/^v/, ""); printf "%s",$1; exit}']))
AC_CONFIG_AUX_DIR([config])
AC_CONFIG_MACRO_DIR([config])
AC_CONFIG_SRCDIR([NEWS])
AC_CANONICAL_SYSTEM
##
# If runstatedir not explicitly set on command line, use '/run' as default
# N.B. runstatedir is not set at all in autoconf < 2.70.
##
if test "$runstatedir" = '${localstatedir}/run' || test -z "$runstatedir"; then
AC_SUBST([runstatedir],[/run])
fi
X_AC_EXPAND_INSTALL_DIRS
##
# Automake support
##
AM_INIT_AUTOMAKE([subdir-objects tar-pax])
AM_SILENT_RULES([yes])
AM_CONFIG_HEADER([config/config.h])
AM_MAINTAINER_MODE
AC_DEFINE([_GNU_SOURCE], 1,
[Define _GNU_SOURCE so that we get all necessary prototypes])
##
# Initialize pkg-config for PKG_CHECK_MODULES to avoid conditional issues
##
PKG_PROG_PKG_CONFIG
##
# Checks for programs
##
AC_PROG_CC_C99
AM_PROG_CC_C_O
AX_COMPILER_VENDOR
AS_CASE($ax_cv_c_compiler_vendor,
[clang | gnu], [
WARNING_CFLAGS="-Wall -Werror -Werror=missing-field-initializers -Wno-strict-aliasing -Wno-error=deprecated-declarations"
AC_SUBST([WARNING_CFLAGS])
]
)
AC_PROG_CXX
# Check compiler vendor for c++, need to temporarily update AC_LANG
AC_LANG_PUSH([C++])
AX_COMPILER_VENDOR
AC_LANG_POP
AS_CASE($ax_cv_cxx_compiler_vendor,
[clang | gnu], [
WARNING_CXXFLAGS=$WARNING_CFLAGS
AC_SUBST([WARNING_CXXFLAGS])
]
)
AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory])
X_AC_ENABLE_SANITIZER
LT_INIT
AC_PROG_AWK
AC_ARG_ENABLE([docs],
AS_HELP_STRING([--disable-docs], [disable building docs]))
AS_IF([test "x$enable_docs" != "xno"], [
AC_CHECK_PROGS(ADOC, [a2x asciidoctor])
AS_IF([test "$ADOC" == "a2x"], [
ADOC_FORMAT_OPT="--format"
AC_SUBST([ADOC_FORMAT_OPT])
])
AS_IF([test "$ADOC" == "asciidoctor"], [
ADOC_FORMAT_OPT="--backend"
AC_SUBST([ADOC_FORMAT_OPT])
])
])
AM_CONDITIONAL([ENABLE_DOCS], [test -n "$ADOC"])
AC_CHECK_PROG(ASPELL,[aspell],[aspell])
##
# Checks for header files.
##
AC_HEADER_STDC
AC_CHECK_HEADERS( \
pthread.h \
getopt.h \
fcntl.h \
limits.h \
strings.h \
syslog.h \
unistd.h \
[sys/cdefs.h] \
[sys/param.h] \
stdarg.h \
locale.h \
xlocale.h \
endian.h \
inttypes.h \
)
##
# Checks for typedefs, structures, and compiler characteristics
##
AC_C_BIGENDIAN
AC_C_CONST
AC_TYPE_SIZE_T
AX_COMPILE_CHECK_SIZEOF(int)
AX_COMPILE_CHECK_SIZEOF(long)
AX_COMPILE_CHECK_SIZEOF(long long)
AX_COMPILE_CHECK_SIZEOF(uintptr_t, [#include <stdint.h>])
AX_COMPILE_CHECK_SIZEOF(size_t, [#include <stdint.h>])
##
# Checks for library functions
##
AC_CHECK_FUNCS( \
getopt_long \
vsnprintf \
vsscanf \
realloc \
strcasecmp \
strdup \
strerror \
snprintf \
vsnprintf \
vasprintf \
open \
vsyslog \
strncasecmp \
setlocale \
uselocale \
)
X_AC_CHECK_PTHREADS
X_AC_CHECK_COND_LIB(util, forkpty)
X_AC_CHECK_COND_LIB(rt, clock_gettime)
X_AC_CHECK_COND_LIB(dl, dlerror)
X_AC_MALLOC
AC_CHECK_LIB(m, floor)
AC_MSG_CHECKING(--enable-python argument)
AC_ARG_ENABLE(python,
[ --enable-python[=OPTS] Include Python bindings. [default=yes] [OPTS=no/yes]], ,
[enable_python="yes"])
AC_MSG_RESULT($enable_python)
if test "$enable_python" = "yes"; then
AX_PYTHON_DEVEL([>='2.7'])
AM_PATH_PYTHON([$ac_python_version])
if test "X$PYTHON" != "X"; then
# Flag for PYTHON_LDFLAGS workaround below.
if test -n "$PYTHON_LDFLAGS"; then
ac_python_ldflags_set_by_user=true
fi
AM_CHECK_PYMOD(cffi,
[cffi.__version_info__ >= (1,1)],
,
[AC_MSG_ERROR([could not find python module cffi, version 1.1+ required])]
)
AM_CHECK_PYMOD(six,
[StrictVersion(six.__version__) >= StrictVersion('1.9.0')],
,
[AC_MSG_ERROR([could not find python module six, version 1.9.0+ required])]
)
# Remove -L<path> from PYTHON_LDFLAGS if it is in a standard path
# (e.g. /usr/lib64). Placing a standard path earlier in the linker
# search can lead to linking problems.
#
# Logic below assumes only newer Python versions, protected by
# above check for atleast Python 2.7.
if test "$ac_python_ldflags_set_by_user" != "true"; then
AC_CHECK_LIB([$ac_python_library], [PyArg_ParseTuple],
[ac_python_in_ld_path=true])
if test "$ac_python_in_ld_path" = "true"; then
AC_MSG_NOTICE([Removing -L$ac_python_libdir from PYTHON_LDFLAGS])
PYTHON_LDFLAGS="-l$ac_python_library"
fi
fi
python_ok=yes
fi
AS_VAR_SET(fluxpydir, $pyexecdir/flux)
AC_SUBST(fluxpydir)
AS_VAR_SET(fluxsodir, $pyexecdir/_flux)
AC_SUBST(fluxsodir)
AS_VAR_SET(fluxpymoddir, $pyexecdir/flux/modules)
AC_SUBST(fluxpymoddir)
if test "$python_ok" != "yes"; then
AC_MSG_ERROR([could not configure python])
fi
fi
AM_CONDITIONAL([HAVE_PYTHON], [test "$enable_python" = yes])
AC_ARG_ENABLE([pylint],
[AS_HELP_STRING([--enable-pylint],
[Enable pylint checks of python bindings])],,
[enable_pylint="no"]
)
AS_IF([test "x$enable_pylint" = "xyes"], [
AC_CHECK_PROG(PYLINT,[pylint],[pylint])
AS_IF([test "x$PYLINT" != "xpylint"], [AC_MSG_ERROR([No pylint found in PATH])])
AM_CHECK_PYMOD(pylint,
[StrictVersion(pylint.__version__) >= StrictVersion('1.4.5')],
,
[AC_MSG_ERROR([could not find python module pylint, version 1.4.5+ required])]
)
])
AM_CONDITIONAL([ENABLE_PYLINT], [test "x$PYLINT" = "xpylint"])
AX_PROG_LUA([5.1],[5.3])
AX_LUA_HEADERS
AX_LUA_LIBS
X_AC_ZEROMQ
X_AC_MUNGE
X_AC_JANSSON
X_AC_YAMLCPP
PKG_CHECK_MODULES([HWLOC], [hwloc >= 1.11.1], [], [])
PKG_CHECK_MODULES([SQLITE], [sqlite3], [], [])
LX_FIND_MPI
AM_CONDITIONAL([HAVE_MPI], [test "$have_C_mpi" = yes])
AX_VALGRIND_H
AX_CODE_COVERAGE
AC_ARG_WITH([flux-security], AS_HELP_STRING([--with-flux-security],
[Build with flux-security]))
AS_IF([test "x$with_flux_security" = "xyes"], [
PKG_CHECK_MODULES([FLUX_SECURITY], [flux-security], [], [])
AC_DEFINE([HAVE_FLUX_SECURITY], [1], [Define flux-security is available])
])
AM_CONDITIONAL([HAVE_FLUX_SECURITY], [test "x$with_flux_security" = "xyes"])
AC_ARG_ENABLE(caliper,
[ --enable-caliper[=OPTS] Use caliper for profiling. [default=no] [OPTS=no/yes]], ,
[enable_caliper="no"])
if test "$enable_caliper" = "yes"; then
PKG_CHECK_MODULES([CALIPER], [caliper], [], [])
CFLAGS="${CFLAGS} ${CALIPER_CFLAGS} "
# Do not use CALIPER_LIBS, only link to libcaliper-stub
LIBS="${LIBS} $(pkg-config --libs-only-L caliper) -lcaliper-stub -lrt "
AC_DEFINE([HAVE_CALIPER], [1], [Define if you have libcaliper])
fi
##
# Check for systemd
##
RRA_WITH_SYSTEMD_UNITDIR
##
# Embedded libev
##
m4_include([src/common/libev/libev.m4])
AC_PKGCONFIG
##
# Project directories
##
AS_VAR_SET(fluxrcdir, $sysconfdir/flux)
AC_SUBST(fluxrcdir)
AS_VAR_SET(fluxrc1dir, $sysconfdir/flux/rc1.d)
AC_SUBST(fluxrc1dir)
AS_VAR_SET(fluxrc3dir, $sysconfdir/flux/rc3.d)
AC_SUBST(fluxrc3dir)
AS_VAR_SET(fluxcfdir, $sysconfdir/flux/conf.d)
AC_SUBST(fluxcfdir)
AS_VAR_SET(fluxlibexecdir, $libexecdir/flux)
AC_SUBST(fluxlibexecdir)
AS_VAR_SET(fluxcmddir, $libexecdir/flux/cmd)
AC_SUBST(fluxcmddir)
AS_VAR_SET(fluxlibdir, $libdir/flux)
AC_SUBST(fluxlibdir)
AS_VAR_SET(fluxmoddir, $libdir/flux/modules)
AC_SUBST(fluxmoddir)
AS_VAR_SET(fluxconnectordir, $libdir/flux/connectors)
AC_SUBST(fluxconnectordir)
AS_VAR_SET(fluxincludedir, $includedir/flux)
AC_SUBST(fluxincludedir)
AS_VAR_SET(fluxcoreincludedir, $includedir/flux/core)
AC_SUBST(fluxcoreincludedir)
adl_RECURSIVE_EVAL([$bindir], fluxbindir)
AS_VAR_SET(fluxbindir, $fluxbindir)
AC_SUBST(fluxbindir)
adl_RECURSIVE_EVAL([$luadir], fluxluadir)
AS_VAR_SET(fluxluadir, $fluxluadir)
AC_SUBST(fluxluadir)
##
# Macros to avoid repetition in Makefiles.am's
##
fluxmod_ldflags="$san_ld_zdef_flag -avoid-version -export-symbols-regex '^mod_(main|name|service)\$\$' --disable-static -shared -export-dynamic"
AC_SUBST(fluxmod_ldflags)
fluxlib_ldflags="-shared -export-dynamic --disable-static $san_ld_zdef_flag"
AC_SUBST(fluxlib_ldflags)
##
# Epilogue
##
AC_CONFIG_FILES( \
Makefile \
src/Makefile \
src/common/Makefile \
src/common/libtap/Makefile \
src/common/liblsd/Makefile \
src/common/libutil/Makefile \
src/common/libev/Makefile \
src/common/libminilzo/Makefile \
src/common/libpmi/Makefile \
src/common/libflux/Makefile \
src/common/libkvs/Makefile \
src/common/libkz/Makefile \
src/common/libjsc/Makefile \
src/common/libjob/Makefile \
src/common/libzio/Makefile \
src/common/libsubprocess/Makefile \
src/common/libcompat/Makefile \
src/common/liboptparse/Makefile \
src/common/libidset/Makefile \
src/common/libjobspec/Makefile \
src/common/libjobspec/flux-jobspec.pc \
src/common/libtomlc99/Makefile \
src/bindings/Makefile \
src/bindings/lua/Makefile \
src/bindings/python/Makefile \
src/bindings/python/flux/Makefile \
src/bindings/python/flux/core/Makefile \
src/bindings/python/_flux/Makefile \
src/broker/Makefile \
src/cmd/Makefile \
src/connectors/Makefile \
src/connectors/local/Makefile \
src/connectors/shmem/Makefile \
src/connectors/loop/Makefile \
src/connectors/ssh/Makefile \
src/modules/Makefile \
src/modules/connector-local/Makefile \
src/modules/kvs/Makefile \
src/modules/kvs-watch/Makefile \
src/modules/content-sqlite/Makefile \
src/modules/barrier/Makefile \
src/modules/wreck/Makefile \
src/modules/resource-hwloc/Makefile \
src/modules/cron/Makefile \
src/modules/aggregator/Makefile \
src/modules/pymod/Makefile \
src/modules/userdb/Makefile \
src/modules/job-ingest/Makefile \
src/test/Makefile \
etc/Makefile \
etc/flux-core.pc \
etc/flux-pmi.pc \
etc/flux-optparse.pc \
etc/flux-idset.pc \
etc/flux.service \
doc/Makefile \
doc/man1/Makefile \
doc/man3/Makefile \
doc/man7/Makefile \
doc/test/Makefile \
t/Makefile \
t/fluxometer/conf.lua \
t/fluxometer/conf.lua.installed \
)
AC_CONFIG_LINKS([ \
t/fluxometer.lua:t/fluxometer.lua \
])
AC_OUTPUT
AS_IF([test "x$enable_docs" != "xno"], [
if test -z "$ADOC"; then
AC_MSG_WARN([No asciidoc formatter found. Manual pages will not be generated.])
fi
])
| 1 | 21,568 | Should this line set the value to `x` since that is checked below? | flux-framework-flux-core | c |
@@ -138,6 +138,14 @@ public interface DataFile {
*/
DataFile copy();
+ /**
+ * Copies this {@link DataFile data file} without file stats. Manifest readers can reuse data file instances; use
+ * this method to copy data without stats when collecting files.
+ *
+ * @return a copy of this data file
+ */
+ DataFile slimCopy();
+
/**
* @return List of recommended split locations, if applicable, null otherwise.
* When available, this information is used for planning scan tasks whose boundaries | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.types.Types.BinaryType;
import org.apache.iceberg.types.Types.IntegerType;
import org.apache.iceberg.types.Types.ListType;
import org.apache.iceberg.types.Types.LongType;
import org.apache.iceberg.types.Types.MapType;
import org.apache.iceberg.types.Types.StringType;
import org.apache.iceberg.types.Types.StructType;
import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
/**
* Interface for files listed in a table manifest.
*/
public interface DataFile {
static StructType getType(StructType partitionType) {
// IDs start at 100 to leave room for changes to ManifestEntry
return StructType.of(
required(100, "file_path", StringType.get()),
required(101, "file_format", StringType.get()),
required(102, "partition", partitionType),
required(103, "record_count", LongType.get()),
required(104, "file_size_in_bytes", LongType.get()),
required(105, "block_size_in_bytes", LongType.get()),
optional(106, "file_ordinal", IntegerType.get()),
optional(107, "sort_columns", ListType.ofRequired(112, IntegerType.get())),
optional(108, "column_sizes", MapType.ofRequired(117, 118,
IntegerType.get(), LongType.get())),
optional(109, "value_counts", MapType.ofRequired(119, 120,
IntegerType.get(), LongType.get())),
optional(110, "null_value_counts", MapType.ofRequired(121, 122,
IntegerType.get(), LongType.get())),
optional(125, "lower_bounds", MapType.ofRequired(126, 127,
IntegerType.get(), BinaryType.get())),
optional(128, "upper_bounds", MapType.ofRequired(129, 130,
IntegerType.get(), BinaryType.get())),
optional(131, "key_metadata", BinaryType.get()),
optional(132, "split_offsets", ListType.ofRequired(133, LongType.get()))
// NEXT ID TO ASSIGN: 134
);
}
/**
* @return fully qualified path to the file, suitable for constructing a Hadoop Path
*/
CharSequence path();
/**
* @return format of the data file
*/
FileFormat format();
/**
* @return partition data for this file as a {@link StructLike}
*/
StructLike partition();
/**
* @return the number of top-level records in the data file
*/
long recordCount();
/**
* @return the data file size in bytes
*/
long fileSizeInBytes();
/**
* @return file ordinal if written in a global ordering, or null
*/
Integer fileOrdinal();
/**
* @return list of columns the file records are sorted by, or null
*/
List<Integer> sortColumns();
/**
* @return if collected, map from column ID to the size of the column in bytes, null otherwise
*/
Map<Integer, Long> columnSizes();
/**
* @return if collected, map from column ID to the count of its non-null values, null otherwise
*/
Map<Integer, Long> valueCounts();
/**
* @return if collected, map from column ID to its null value count, null otherwise
*/
Map<Integer, Long> nullValueCounts();
/**
* @return if collected, map from column ID to value lower bounds, null otherwise
*/
Map<Integer, ByteBuffer> lowerBounds();
/**
* @return if collected, map from column ID to value upper bounds, null otherwise
*/
Map<Integer, ByteBuffer> upperBounds();
/**
* @return metadata about how this file is encrypted, or null if the file is stored in plain
* text.
*/
ByteBuffer keyMetadata();
/**
* Copies this {@link DataFile data file}. Manifest readers can reuse data file instances; use
* this method to copy data when collecting files from tasks.
*
* @return a copy of this data file
*/
DataFile copy();
/**
* @return List of recommended split locations, if applicable, null otherwise.
* When available, this information is used for planning scan tasks whose boundaries
* are determined by these offsets. The returned list must be sorted in ascending order.
*/
List<Long> splitOffsets();
}
| 1 | 13,732 | I don't particularly love the terminology here. Why not simply add `copy(boolean stats)` or a copy with an enum to indicate what portions of the datafile to include? At some point we may want just some of the values (e.g. CBO may want counts, but not lower/upper bounds). Just a thought. | apache-iceberg | java |
@@ -220,11 +220,12 @@ public final class QueryRequest {
public Builder parseAnnotationQuery(String annotationQuery) {
if (annotationQuery != null && !annotationQuery.isEmpty()) {
for (String ann : annotationQuery.split(" and ")) {
- if (ann.indexOf('=') == -1) {
+ int idx = ann.indexOf('=');
+ if (idx == -1) {
addAnnotation(ann);
} else {
String[] keyValue = ann.split("=");
- addBinaryAnnotation(keyValue[0], keyValue.length < 2 ? "" :keyValue[1]);
+ addBinaryAnnotation(ann.substring(0, idx), keyValue.length < 2 ? "" : ann.substring(idx+1));
}
}
} | 1 | /**
* Copyright 2015-2016 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import zipkin.Span;
import zipkin.internal.Nullable;
import static zipkin.Constants.CORE_ANNOTATIONS;
import static zipkin.internal.Util.checkArgument;
/**
* Invoking this request retrieves traces matching the below filters.
*
* <p> Results should be filtered against {@link #endTs}, subject to {@link #limit} and {@link
* #lookback}. For example, if endTs is 10:20 today, limit is 10, and lookback is 7 days, traces
* returned should be those nearest to 10:20 today, not 10:20 a week ago.
*
* <p> Time units of {@link #endTs} and {@link #lookback} are milliseconds as opposed to
* microseconds, the grain of {@link Span#timestamp}. Milliseconds is a more familiar and supported
* granularity for query, index and windowing functions.
*/
public final class QueryRequest {
/**
* When present, corresponds to {@link zipkin.Endpoint#serviceName} and constrains all other
* parameters.
*/
@Nullable
public final String serviceName;
/** When present, only include traces with this {@link zipkin.Span#name} */
@Nullable
public final String spanName;
/**
* Include traces whose {@link zipkin.Span#annotations} include a value in this set.
*
* <p> This is an AND condition against the set, as well against {@link #binaryAnnotations}
*/
public final List<String> annotations;
/**
* Include traces whose {@link zipkin.Span#binaryAnnotations} include a String whose key and
* value are an entry in this set.
*
* <p> This is an AND condition against the set, as well against {@link #annotations}
*/
public final Map<String, String> binaryAnnotations;
/**
* Only return traces whose {@link zipkin.Span#duration} is greater than or equal to
* minDuration microseconds.
*/
@Nullable
public final Long minDuration;
/**
* Only return traces whose {@link zipkin.Span#duration} is less than or equal to maxDuration
* microseconds. Only valid with {@link #minDuration}.
*/
@Nullable
public final Long maxDuration;
/**
* Only return traces where all {@link zipkin.Span#timestamp} are at or before this time in
* epoch milliseconds. Defaults to current time.
*/
public final long endTs;
/**
* Only return traces where all {@link zipkin.Span#timestamp} are at or after (endTs -
* lookback) in milliseconds. Defaults to endTs.
*/
public final long lookback;
/** Maximum number of traces to return. Defaults to 10 */
public final int limit;
/**
* Corresponds to query parameter "annotationQuery". Ex. "http.method=GET and error"
*
* @see QueryRequest.Builder#parseAnnotationQuery(String)
*/
@Nullable
public String toAnnotationQuery() {
StringBuilder annotationQuery = new StringBuilder();
for (Iterator<Map.Entry<String, String>> i = binaryAnnotations.entrySet().iterator();
i.hasNext(); ) {
Map.Entry<String, String> next = i.next();
annotationQuery.append(next.getKey()).append('=').append(next.getValue());
if (i.hasNext() || !annotations.isEmpty()) annotationQuery.append(" and ");
}
for (Iterator<String> i = annotations.iterator(); i.hasNext(); ) {
annotationQuery.append(i.next());
if (i.hasNext()) annotationQuery.append(" and ");
}
return annotationQuery.length() > 0 ? annotationQuery.toString() : null;
}
QueryRequest(
String serviceName,
String spanName,
List<String> annotations,
Map<String, String> binaryAnnotations,
Long minDuration,
Long maxDuration,
long endTs,
long lookback,
int limit) {
checkArgument(serviceName == null || !serviceName.isEmpty(), "serviceName was empty");
checkArgument(spanName == null || !spanName.isEmpty(), "spanName was empty");
checkArgument(endTs > 0, "endTs should be positive, in epoch microseconds: was %d", endTs);
checkArgument(limit > 0, "limit should be positive: was %d", limit);
this.serviceName = serviceName != null? serviceName.toLowerCase() : null;
this.spanName = spanName != null ? spanName.toLowerCase() : null;
this.annotations = annotations;
for (String annotation : annotations) {
checkArgument(!annotation.isEmpty(), "annotation was empty");
checkArgument(!CORE_ANNOTATIONS.contains(annotation),
"queries cannot be refined by core annotations: %s", annotation);
}
this.binaryAnnotations = binaryAnnotations;
for (Map.Entry<String, String> entry : binaryAnnotations.entrySet()) {
checkArgument(!entry.getKey().isEmpty(), "binary annotation key was empty");
checkArgument(!entry.getValue().isEmpty(),
"binary annotation value for %s was empty", entry.getKey());
}
if (minDuration != null) {
checkArgument(minDuration > 0, "minDuration must be a positive number of microseconds");
this.minDuration = minDuration;
if (maxDuration != null) {
checkArgument(maxDuration >= minDuration, "maxDuration should be >= minDuration");
this.maxDuration = maxDuration;
} else {
this.maxDuration = null;
}
} else {
checkArgument(maxDuration == null, "maxDuration is only valid with minDuration");
this.minDuration = this.maxDuration = null;
}
this.endTs = endTs;
this.lookback = lookback;
this.limit = limit;
}
public Builder toBuilder() {
return new Builder(this);
}
public static Builder builder() {
return new Builder();
}
public static final class Builder {
private String serviceName;
private String spanName;
private List<String> annotations = new LinkedList<String>();
private Map<String, String> binaryAnnotations = new LinkedHashMap<String, String>();
private Long minDuration;
private Long maxDuration;
private Long endTs;
private Long lookback;
private Integer limit;
Builder(){
}
Builder(QueryRequest source) {
this.serviceName = source.serviceName;
this.spanName = source.spanName;
this.annotations = source.annotations;
this.binaryAnnotations = source.binaryAnnotations;
this.minDuration = source.minDuration;
this.maxDuration = source.maxDuration;
this.endTs = source.endTs;
this.lookback = source.lookback;
this.limit = source.limit;
}
/** @see QueryRequest#serviceName */
public Builder serviceName(@Nullable String serviceName) {
this.serviceName = serviceName;
return this;
}
/**
* This ignores the reserved span name "all".
*
* @see QueryRequest#spanName
*/
public Builder spanName(@Nullable String spanName) {
this.spanName = "all".equals(spanName) ? null : spanName;
return this;
}
/**
* Corresponds to query parameter "annotationQuery". Ex. "http.method=GET and error"
*
* @see QueryRequest#toAnnotationQuery()
*/
public Builder parseAnnotationQuery(String annotationQuery) {
if (annotationQuery != null && !annotationQuery.isEmpty()) {
for (String ann : annotationQuery.split(" and ")) {
if (ann.indexOf('=') == -1) {
addAnnotation(ann);
} else {
String[] keyValue = ann.split("=");
addBinaryAnnotation(keyValue[0], keyValue.length < 2 ? "" :keyValue[1]);
}
}
}
return this;
}
/** @see QueryRequest#annotations */
public Builder addAnnotation(String annotation) {
this.annotations.add(annotation);
return this;
}
/** @see QueryRequest#binaryAnnotations */
public Builder addBinaryAnnotation(String key, String value) {
this.binaryAnnotations.put(key, value);
return this;
}
/** @see QueryRequest#minDuration */
public Builder minDuration(Long minDuration) {
this.minDuration = minDuration;
return this;
}
/** @see QueryRequest#maxDuration */
public Builder maxDuration(Long maxDuration) {
this.maxDuration = maxDuration;
return this;
}
/** @see QueryRequest#endTs */
public Builder endTs(Long endTs) {
this.endTs = endTs;
return this;
}
/** @see QueryRequest#lookback */
public Builder lookback(Long lookback) {
this.lookback = lookback;
return this;
}
/** @see QueryRequest#limit */
public Builder limit(Integer limit) {
this.limit = limit;
return this;
}
public QueryRequest build() {
long selectedEndTs = endTs == null ? System.currentTimeMillis() : endTs;
return new QueryRequest(
serviceName,
spanName,
annotations,
binaryAnnotations,
minDuration,
maxDuration,
selectedEndTs,
Math.min(lookback == null ? selectedEndTs : lookback, selectedEndTs),
limit == null ? 10 : limit);
}
}
@Override
public String toString() {
return "QueryRequest{"
+ "serviceName=" + serviceName + ", "
+ "spanName=" + spanName + ", "
+ "annotations=" + annotations + ", "
+ "binaryAnnotations=" + binaryAnnotations + ", "
+ "minDuration=" + minDuration + ", "
+ "maxDuration=" + maxDuration + ", "
+ "endTs=" + endTs + ", "
+ "lookback=" + lookback + ", "
+ "limit=" + limit
+ "}";
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (o instanceof QueryRequest) {
QueryRequest that = (QueryRequest) o;
return ((this.serviceName == null) ? (that.serviceName == null) : this.serviceName.equals(that.serviceName))
&& ((this.spanName == null) ? (that.spanName == null) : this.spanName.equals(that.spanName))
&& ((this.annotations == null) ? (that.annotations == null) : this.annotations.equals(that.annotations))
&& ((this.binaryAnnotations == null) ? (that.binaryAnnotations == null) : this.binaryAnnotations.equals(that.binaryAnnotations))
&& ((this.minDuration == null) ? (that.minDuration == null) : this.minDuration.equals(that.minDuration))
&& ((this.maxDuration == null) ? (that.maxDuration == null) : this.maxDuration.equals(that.maxDuration))
&& (this.endTs == that.endTs)
&& (this.lookback == that.lookback)
&& (this.limit == that.limit);
}
return false;
}
@Override
public int hashCode() {
int h = 1;
h *= 1000003;
h ^= (serviceName == null) ? 0 : serviceName.hashCode();
h *= 1000003;
h ^= (spanName == null) ? 0 : spanName.hashCode();
h *= 1000003;
h ^= (annotations == null) ? 0 : annotations.hashCode();
h *= 1000003;
h ^= (binaryAnnotations == null) ? 0 : binaryAnnotations.hashCode();
h *= 1000003;
h ^= (minDuration == null) ? 0 : minDuration.hashCode();
h *= 1000003;
h ^= (maxDuration == null) ? 0 : maxDuration.hashCode();
h *= 1000003;
h ^= (endTs >>> 32) ^ endTs;
h *= 1000003;
h ^= (lookback >>> 32) ^ lookback;
h *= 1000003;
h ^= limit;
return h;
}
}
| 1 | 11,853 | you could probably remove keyValue and just compare idx vs ann.length (ex I think the goal here is to ensure it works with a value like `foo=` | openzipkin-zipkin | java |
@@ -94,8 +94,8 @@ exports.getMimeType = (item) => {
}
exports.getId = (item) => {
- if (item.file_type && item.file_type === 'TIMELINE') {
- return `${encodeURIComponent(item.meeting_id)}__TIMELINE`
+ if (item.file_type && item.file_type === 'CC') {
+ return `${encodeURIComponent(item.meeting_id)}__CC__${encodeURIComponent(item.recording_start)}`
} else if (item.file_type) {
return `${encodeURIComponent(item.meeting_id)}__${encodeURIComponent(item.id)}`
} | 1 | const moment = require('moment')
const MIMETYPES = {
MP4: 'video/mp4',
M4A: 'audio/mp4',
CHAT: 'text/plain',
TRANSCRIPT: 'text/vtt',
CC: 'text/vtt',
TIMELINE: 'application/json'
}
const EXT = {
MP4: 'mp4',
M4A: 'm4a',
CHAT: 'txt',
TRANSCRIPT: 'vtt',
CC: 'vtt',
TIMELINE: 'json'
}
const ICONS = {
MP4: 'video',
M4A: 'file',
CHAT: 'file',
TRANSCRIPT: 'file',
CC: 'file',
FOLDER: 'folder',
TIMELINE: 'file'
}
exports.getDateName = (start, end) => {
return `${start.format('YYYY-MM-DD')} - ${end.format('YYYY-MM-DD')}`
}
exports.getAccountCreationDate = (results) => {
return moment(results.created_at)
}
exports.getUserEmail = (results) => {
return results.email
}
exports.getDateFolderId = (start, end) => {
return `${start.format('YYYY-MM-DD')}_${end.format('YYYY-MM-DD')}`
}
exports.getDateFolderRequestPath = (start, end) => {
return `?from=${start.format('YYYY-MM-DD')}&to=${end.format('YYYY-MM-DD')}`
}
exports.getDateFolderModified = (end) => {
return end.format('YYYY-MM-DD')
}
exports.getDateNextPagePath = (start) => {
return `?cursor=${start.subtract(1, 'days').format('YYYY-MM-DD')}`
}
exports.getNextPagePath = (results) => {
if (results.next_page_token) {
return `?cursor=${results.next_page_token}&from=${results.from}&to=${results.to}`
}
return null
}
// we rely on the file_type attribute to differentiate a recording file from other items
exports.getIsFolder = (item) => {
return !item.file_type
}
exports.getItemName = (item) => {
const start = moment(item.start_time || item.recording_start)
.clone()
.format('YYYY-MM-DD, kk:mm')
if (item.file_type) {
const ext = EXT[item.file_type] ? `.${EXT[item.file_type]}` : ''
const itemType = item.recording_type ? ` - ${item.recording_type.split('_').join(' ')}` : ''
return `${start}${itemType} (${item.file_type.toLowerCase()})${ext}`
}
return `${item.topic} (${start})`
}
exports.getIcon = (item) => {
if (item.file_type) {
return ICONS[item.file_type]
}
return ICONS.FOLDER
}
exports.getMimeType = (item) => {
if (item.file_type) {
return MIMETYPES[item.file_type]
}
return null
}
exports.getId = (item) => {
if (item.file_type && item.file_type === 'TIMELINE') {
return `${encodeURIComponent(item.meeting_id)}__TIMELINE`
} else if (item.file_type) {
return `${encodeURIComponent(item.meeting_id)}__${encodeURIComponent(item.id)}`
}
return `${encodeURIComponent(item.uuid)}`
}
exports.getRequestPath = (item) => {
if (item.file_type && item.file_type === 'TIMELINE') {
return `${encodeURIComponent(item.meeting_id)}?recordingId=TIMELINE`
} else if (item.file_type) {
return `${encodeURIComponent(item.meeting_id)}?recordingId=${encodeURIComponent(item.id)}`
}
return `${encodeURIComponent(item.uuid)}`
}
exports.getStartDate = (item) => {
if (item.file_type === 'TIMELINE') {
return item.recording_start
}
return item.start_time
}
exports.getSize = (item) => {
if (item.file_type && item.file_type === 'TIMELINE') {
const maxExportFileSize = 1024 * 1024
return maxExportFileSize
} else if (item.file_type) {
return item.file_size
}
return item.total_size
}
| 1 | 13,493 | we do this to differentiate between the multiple cc files for when the recording is stopped / restarted multiple times within a single meeting | transloadit-uppy | js |
@@ -182,7 +182,6 @@ class KohaRest extends \VuFind\ILS\Driver\AbstractBase implements
*/
protected $itemStatusMappings = [
'Item::Held' => 'On Hold',
- 'Item::Lost' => 'Lost--Library Applied',
'Item::Waiting' => 'On Holdshelf',
];
| 1 | <?php
/**
* VuFind Driver for Koha, using REST API
*
* PHP version 7
*
* Copyright (C) The National Library of Finland 2016-2020.
* Copyright (C) Moravian Library 2019.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package ILS_Drivers
* @author Bohdan Inhliziian <[email protected]>
* @author Ere Maijala <[email protected]>
* @author Josef Moravec <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
namespace VuFind\ILS\Driver;
use VuFind\Date\DateException;
use VuFind\Exception\ILS as ILSException;
use VuFind\View\Helper\Root\SafeMoneyFormat;
/**
* VuFind Driver for Koha, using REST API
*
* Minimum Koha Version: 20.05 + koha-plugin-rest-di REST API plugin from
* https://github.com/natlibfi/koha-plugin-rest-di
*
* @category VuFind
* @package ILS_Drivers
* @author Bohdan Inhliziian <[email protected]>
* @author Ere Maijala <[email protected]>
* @author Josef Moravec <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
class KohaRest extends \VuFind\ILS\Driver\AbstractBase implements
\VuFindHttp\HttpServiceAwareInterface,
\VuFind\I18n\Translator\TranslatorAwareInterface,
\Laminas\Log\LoggerAwareInterface
{
use \VuFindHttp\HttpServiceAwareTrait;
use \VuFind\I18n\Translator\TranslatorAwareTrait;
use \VuFind\Log\LoggerAwareTrait {
logError as error;
}
use \VuFind\ILS\Driver\CacheTrait;
/**
* Library prefix
*
* @var string
*/
protected $source = '';
/**
* Date converter object
*
* @var \VuFind\Date\Converter
*/
protected $dateConverter;
/**
* Factory function for constructing the SessionContainer.
*
* @var Callable
*/
protected $sessionFactory;
/**
* Money formatting view helper
*
* @var SafeMoneyFormat
*/
protected $safeMoneyFormat;
/**
* Session cache
*
* @var \Laminas\Session\Container
*/
protected $sessionCache;
/**
* Default pickup location
*
* @var string
*/
protected $defaultPickUpLocation;
/**
* Item status rankings. The lower the value, the more important the status.
*
* @var array
*/
protected $statusRankings = [
'Charged' => 1,
'On Hold' => 2
];
/**
* Mappings from fee (account line) types
*
* @var array
*/
protected $feeTypeMappings = [
'A' => 'Account',
'C' => 'Credit',
'Copie' => 'Copier Fee',
'F' => 'Overdue',
'FU' => 'Accrued Fine',
'L' => 'Lost Item Replacement',
'M' => 'Sundry',
'N' => 'New Card',
'ODUE' => 'Overdue',
'Res' => 'Hold Fee',
'HE' => 'Hold Expired',
'RENT' => 'Rental'
];
/**
* Mappings from renewal block reasons
*
* @var array
*/
protected $renewalBlockMappings = [
'too_soon' => 'Cannot renew yet',
'onsite_checkout' => 'Copy has special circulation',
'on_reserve' => 'renew_item_requested',
'too_many' => 'renew_item_limit',
'restriction' => 'Borrowing Block Message',
'overdue' => 'renew_item_overdue',
'cardlost' => 'renew_card_lost',
'gonenoaddress' => 'patron_status_address_missing',
'debarred' => 'patron_status_card_blocked',
'debt' => 'renew_debt'
];
/**
* Permanent renewal blocks
*
* @var array
*/
protected $permanentRenewalBlocks = [
'onsite_checkout',
'on_reserve',
'too_many'
];
/**
* Patron status mappings
*
* @var array
*/
protected $patronStatusMappings = [
'Hold::MaximumHoldsReached' => 'patron_status_maximum_requests',
'Patron::CardExpired' => 'patron_status_card_expired',
'Patron::DebarredOverdue' => 'patron_status_debarred_overdue',
'Patron::Debt' => 'patron_status_debt_limit_reached',
'Patron::DebtGuarantees' => 'patron_status_guarantees_debt_limit_reached',
'Patron::GoneNoAddress' => 'patron_status_address_missing',
];
/**
* Item status mappings
*
* @var array
*/
protected $itemStatusMappings = [
'Item::Held' => 'On Hold',
'Item::Lost' => 'Lost--Library Applied',
'Item::Waiting' => 'On Holdshelf',
];
/**
* Item status mapping methods used when the item status mappings above
* (or in the configuration file) don't contain a direct mapping.
*
* @var array
*/
protected $itemStatusMappingMethods = [
'Item::CheckedOut' => 'getStatusCodeItemCheckedOut',
'Item::NotForLoan' => 'getStatusCodeItemNotForLoan',
'Item::NotForLoanForcing' => 'getStatusCodeItemNotForLoan',
'Item::Transfer' => 'getStatusCodeItemTransfer',
'ItemType::NotForLoan' => 'getStatusCodeItemNotForLoan',
];
/**
* Whether to display home library instead of holding library
*
* @var bool
*/
protected $useHomeLibrary = false;
/**
* Whether to sort items by serial issue. Default is true.
*
* @var bool
*/
protected $sortItemsBySerialIssue;
/**
* Constructor
*
* @param \VuFind\Date\Converter $dateConverter Date converter object
* @param Callable $sessionFactory Factory function returning
* SessionContainer object
* @param SafeMoneyFormat $safeMoneyFormat Money formatting view helper
*/
public function __construct(\VuFind\Date\Converter $dateConverter,
$sessionFactory, SafeMoneyFormat $safeMoneyFormat
) {
$this->dateConverter = $dateConverter;
$this->sessionFactory = $sessionFactory;
$this->safeMoneyFormat = $safeMoneyFormat;
}
/**
* Initialize the driver.
*
* Validate configuration and perform all resource-intensive tasks needed to
* make the driver active.
*
* @throws ILSException
* @return void
*/
public function init()
{
// Validate config
$required = ['host'];
foreach ($required as $current) {
if (!isset($this->config['Catalog'][$current])) {
throw new ILSException("Missing Catalog/{$current} config setting.");
}
}
$this->defaultPickUpLocation
= isset($this->config['Holds']['defaultPickUpLocation'])
? $this->config['Holds']['defaultPickUpLocation']
: '';
if ($this->defaultPickUpLocation === 'user-selected') {
$this->defaultPickUpLocation = false;
}
if (!empty($this->config['StatusRankings'])) {
$this->statusRankings = array_merge(
$this->statusRankings, $this->config['StatusRankings']
);
}
if (!empty($this->config['FeeTypeMappings'])) {
$this->feeTypeMappings = array_merge(
$this->feeTypeMappings, $this->config['FeeTypeMappings']
);
}
if (!empty($this->config['PatronStatusMappings'])) {
$this->patronStatusMappings = array_merge(
$this->patronStatusMappings, $this->config['PatronStatusMappings']
);
}
if (!empty($this->config['ItemStatusMappings'])) {
$this->itemStatusMappings = array_merge(
$this->itemStatusMappings, $this->config['ItemStatusMappings']
);
}
$this->useHomeLibrary = !empty($this->config['Holdings']['useHomeLibrary']);
$this->sortItemsBySerialIssue
= $this->config['Holdings']['sortBySerialIssue'] ?? true;
// Init session cache for session-specific data
$namespace = md5($this->config['Catalog']['host']);
$factory = $this->sessionFactory;
$this->sessionCache = $factory($namespace);
}
/**
* Method to ensure uniform cache keys for cached VuFind objects.
*
* @param string|null $suffix Optional suffix that will get appended to the
* object class name calling getCacheKey()
*
* @return string
*/
protected function getCacheKey($suffix = null)
{
return 'KohaRest' . '-' . md5($this->config['Catalog']['host'] . $suffix);
}
/**
* Get Status
*
* This is responsible for retrieving the status information of a certain
* record.
*
* @param string $id The record id to retrieve the holdings for
*
* @return array An associative array with the following keys:
* id, availability (boolean), status, location, reserve, callnumber.
*/
public function getStatus($id)
{
return $this->getItemStatusesForBiblio($id);
}
/**
* Get Statuses
*
* This is responsible for retrieving the status information for a
* collection of records.
*
* @param array $ids The array of record ids to retrieve the status for
*
* @return mixed An array of getStatus() return values on success.
*/
public function getStatuses($ids)
{
$items = [];
foreach ($ids as $id) {
$items[] = $this->getItemStatusesForBiblio($id);
}
return $items;
}
/**
* Get Holding
*
* This is responsible for retrieving the holding information of a certain
* record.
*
* @param string $id The record id to retrieve the holdings for
* @param array $patron Patron data
* @param array $options Extra options
*
* @throws \VuFind\Exception\ILS
* @return array On success, an associative array with the following
* keys: id, availability (boolean), status, location, reserve, callnumber,
* duedate, number, barcode.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getHolding($id, array $patron = null, array $options = [])
{
return $this->getItemStatusesForBiblio($id, $patron);
}
/**
* Get Purchase History
*
* This is responsible for retrieving the acquisitions history data for the
* specific record (usually recently received issues of a serial).
*
* @param string $id The record id to retrieve the info for
*
* @return mixed An array with the acquisitions data on success.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getPurchaseHistory($id)
{
return [];
}
/**
* Get New Items
*
* Retrieve the IDs of items recently added to the catalog.
*
* @param int $page Page number of results to retrieve (counting starts at 1)
* @param int $limit The size of each page of results to retrieve
* @param int $daysOld The maximum age of records to retrieve in days (max. 30)
* @param int $fundId optional fund ID to use for limiting results (use a value
* returned by getFunds, or exclude for no limit); note that "fund" may be a
* misnomer - if funds are not an appropriate way to limit your new item
* results, you can return a different set of values from getFunds. The
* important thing is that this parameter supports an ID returned by getFunds,
* whatever that may mean.
*
* @return array Associative array with 'count' and 'results' keys
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getNewItems($page, $limit, $daysOld, $fundId = null)
{
return ['count' => 0, 'results' => []];
}
/**
* Find Reserves
*
* Obtain information on course reserves.
*
* @param string $course ID from getCourses (empty string to match all)
* @param string $inst ID from getInstructors (empty string to match all)
* @param string $dept ID from getDepartments (empty string to match all)
*
* @return mixed An array of associative arrays representing reserve items.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function findReserves($course, $inst, $dept)
{
return [];
}
/**
* Patron Login
*
* This is responsible for authenticating a patron against the catalog.
*
* @param string $username The patron username
* @param string $password The patron password
*
* @return mixed Associative array of patron info on successful login,
* null on unsuccessful login.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function patronLogin($username, $password)
{
if (empty($username) || empty($password)) {
return null;
}
$result = $this->makeRequest(
[
'path' => 'v1/contrib/kohasuomi/auth/patrons/validation',
'json' => ['userid' => $username, 'password' => $password],
'method' => 'POST',
'errors' => true,
]
);
if (401 === $result['code'] || 403 === $result['code']) {
return null;
}
if (200 !== $result['code']) {
throw new ILSException('Problem with Koha REST API.');
}
$result = $result['data'];
return [
'id' => $result['patron_id'],
'firstname' => $result['firstname'],
'lastname' => $result['surname'],
'cat_username' => $username,
'cat_password' => $password,
'email' => $result['email'],
'major' => null,
'college' => null,
'home_library' => $result['library_id']
];
}
/**
* Check whether the patron is blocked from placing requests (holds/ILL/SRR).
*
* @param array $patron Patron data from patronLogin().
*
* @return mixed A boolean false if no blocks are in place and an array
* of block reasons if blocks are in place
*/
public function getRequestBlocks($patron)
{
return $this->getPatronBlocks($patron);
}
/**
* Check whether the patron has any blocks on their account.
*
* @param array $patron Patron data from patronLogin().
*
* @return mixed A boolean false if no blocks are in place and an array
* of block reasons if blocks are in place
*/
public function getAccountBlocks($patron)
{
return $this->getPatronBlocks($patron);
}
/**
* Get Patron Profile
*
* This is responsible for retrieving the profile for a specific patron.
*
* @param array $patron The patron array
*
* @throws ILSException
* @return array Array of the patron's profile data on success.
*/
public function getMyProfile($patron)
{
$result = $this->makeRequest(['v1', 'patrons', $patron['id']]);
if (200 !== $result['code']) {
throw new ILSException('Problem with Koha REST API.');
}
$result = $result['data'];
return [
'firstname' => $result['firstname'],
'lastname' => $result['surname'],
'phone' => $result['phone'],
'mobile_phone' => $result['mobile'],
'email' => $result['email'],
'address1' => $result['address'],
'address2' => $result['address2'],
'zip' => $result['postal_code'],
'city' => $result['city'],
'country' => $result['country'],
'expiration_date' => $this->convertDate($result['expiry_date'] ?? null)
];
}
/**
* Get Patron Transactions
*
* This is responsible for retrieving all transactions (i.e. checked out items)
* by a specific patron.
*
* @param array $patron The patron array from patronLogin
* @param array $params Parameters
*
* @throws DateException
* @throws ILSException
* @return array Array of the patron's transactions on success.
*/
public function getMyTransactions($patron, $params = [])
{
return $this->getTransactions($patron, $params, false);
}
/**
* Get Renew Details
*
* @param array $checkOutDetails An array of item data
*
* @return string Data for use in a form field
*/
public function getRenewDetails($checkOutDetails)
{
return $checkOutDetails['checkout_id'] . '|' . $checkOutDetails['item_id'];
}
/**
* Renew My Items
*
* Function for attempting to renew a patron's items. The data in
* $renewDetails['details'] is determined by getRenewDetails().
*
* @param array $renewDetails An array of data required for renewing items
* including the Patron ID and an array of renewal IDS
*
* @return array An array of renewal information keyed by item ID
*/
public function renewMyItems($renewDetails)
{
$finalResult = ['details' => []];
foreach ($renewDetails['details'] as $details) {
list($checkoutId, $itemId) = explode('|', $details);
$result = $this->makeRequest(
[
'path' => ['v1', 'checkouts', $checkoutId, 'renewal'],
'method' => 'POST'
]
);
if (201 === $result['code']) {
$newDate
= $this->convertDate($result['data']['due_date'] ?? null, true);
$finalResult['details'][$itemId] = [
'item_id' => $itemId,
'success' => true,
'new_date' => $newDate
];
} else {
$finalResult['details'][$itemId] = [
'item_id' => $itemId,
'success' => false
];
}
}
return $finalResult;
}
/**
* Get Patron Transaction History
*
* This is responsible for retrieving all historical transactions
* (i.e. checked out items)
* by a specific patron.
*
* @param array $patron The patron array from patronLogin
* @param array $params Parameters
*
* @throws DateException
* @throws ILSException
* @return array Array of the patron's transactions on success.
*/
public function getMyTransactionHistory($patron, $params)
{
return $this->getTransactions($patron, $params, true);
}
/**
* Get Patron Holds
*
* This is responsible for retrieving all holds by a specific patron.
*
* @param array $patron The patron array from patronLogin
*
* @throws DateException
* @throws ILSException
* @return array Array of the patron's holds on success.
*/
public function getMyHolds($patron)
{
$result = $this->makeRequest(
[
'path' => 'v1/holds',
'query' => [
'patron_id' => $patron['id'],
'_match' => 'exact'
]
]
);
$holds = [];
foreach ($result['data'] as $entry) {
$biblio = $this->getBiblio($entry['biblio_id']);
$frozen = false;
if (!empty($entry['suspended'])) {
$frozen = !empty($entry['suspend_until']) ? $entry['suspend_until']
: true;
}
$volume = '';
if ($entry['item_id'] ?? null) {
$item = $this->getItem($entry['item_id']);
$volume = $item['serial_issue_number'];
}
$holds[] = [
'id' => $entry['biblio_id'],
'item_id' => $entry['hold_id'],
'requestId' => $entry['hold_id'],
'location' => $this->getLibraryName(
$entry['pickup_library_id'] ?? null
),
'create' => $this->convertDate($entry['hold_date'] ?? null),
'expire' => $this->convertDate($entry['expiration_date'] ?? null),
'position' => $entry['priority'],
'available' => !empty($entry['waiting_date']),
'frozen' => $frozen,
'in_transit' => !empty($entry['status']) && $entry['status'] == 'T',
'title' => $this->getBiblioTitle($biblio),
'isbn' => $biblio['isbn'] ?? '',
'issn' => $biblio['issn'] ?? '',
'publication_year' => $biblio['copyright_date']
?? $biblio['publication_year'] ?? '',
'volume' => $volume,
];
}
return $holds;
}
/**
* Get Cancel Hold Details
*
* Get required data for canceling a hold. This value is used by relayed to the
* cancelHolds function when the user attempts to cancel a hold.
*
* @param array $holdDetails An array of hold data
*
* @return string Data for use in a form field
*/
public function getCancelHoldDetails($holdDetails)
{
return $holdDetails['available'] || $holdDetails['in_transit'] ? ''
: $holdDetails['requestId'];
}
/**
* Cancel Holds
*
* Attempts to Cancel a hold. The data in $cancelDetails['details'] is determined
* by getCancelHoldDetails().
*
* @param array $cancelDetails An array of item and patron data
*
* @return array An array of data on each request including
* whether or not it was successful and a system message (if available)
*/
public function cancelHolds($cancelDetails)
{
$details = $cancelDetails['details'];
$count = 0;
$response = [];
foreach ($details as $holdId) {
$result = $this->makeRequest(
[
'path' => ['v1', 'holds', $holdId],
'method' => 'DELETE',
'errors' => true
]
);
if (200 === $result['code'] || 204 === $result['code']) {
$response[$holdId] = [
'success' => true,
'status' => 'hold_cancel_success'
];
++$count;
} else {
$response[$holdId] = [
'success' => false,
'status' => 'hold_cancel_fail',
'sysMessage' => false
];
}
}
return ['count' => $count, 'items' => $response];
}
/**
* Get Pick Up Locations
*
* This is responsible for gettting a list of valid library locations for
* holds / recall retrieval
*
* @param array $patron Patron information returned by the patronLogin
* method.
* @param array $holdDetails Optional array, only passed in when getting a list
* in the context of placing a hold; contains most of the same values passed to
* placeHold, minus the patron data. May be used to limit the pickup options
* or may be ignored. The driver must not add new options to the return array
* based on this data or other areas of VuFind may behave incorrectly.
*
* @throws ILSException
* @return array An array of associative arrays with locationID and
* locationDisplay keys
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getPickUpLocations($patron = false, $holdDetails = null)
{
$bibId = $holdDetails['id'] ?? null;
$itemId = $holdDetails['item_id'] ?? false;
$requestType
= array_key_exists('StorageRetrievalRequest', $holdDetails ?? [])
? 'StorageRetrievalRequests' : 'Holds';
$included = null;
if ($bibId && 'Holds' === $requestType) {
// Collect library codes that are to be included
$level = !empty($holdDetails['level']) ? $holdDetails['level'] : 'title';
if ('copy' === $level && false === $itemId) {
return [];
}
if ('copy' === $level) {
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'availability', 'items',
$itemId, 'hold'
],
'query' => [
'patron_id' => (int)$patron['id'],
'query_pickup_locations' => 1
]
]
);
if (empty($result['data'])) {
return [];
}
$notes = $result['data']['availability']['notes'];
$included = $notes['Item::PickupLocations']['to_libraries'];
} else {
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'availability', 'biblios',
$bibId, 'hold'
],
'query' => [
'patron_id' => (int)$patron['id'],
'query_pickup_locations' => 1
]
]
);
if (empty($result['data'])) {
return [];
}
$notes = $result['data']['availability']['notes'];
$included = $notes['Biblio::PickupLocations']['to_libraries'];
}
}
$excluded = isset($this->config['Holds']['excludePickupLocations'])
? explode(':', $this->config['Holds']['excludePickupLocations']) : [];
$locations = [];
foreach ($this->getLibraries() as $library) {
$code = $library['library_id'];
if ((null === $included && !$library['pickup_location'])
|| in_array($code, $excluded)
|| (null !== $included && !in_array($code, $included))
) {
continue;
}
$locations[] = [
'locationID' => $code,
'locationDisplay' => $library['name']
];
}
// Do we need to sort pickup locations? If the setting is false, don't
// bother doing any more work. If it's not set at all, default to
// alphabetical order.
$orderSetting = isset($this->config['Holds']['pickUpLocationOrder'])
? $this->config['Holds']['pickUpLocationOrder'] : 'default';
if (count($locations) > 1 && !empty($orderSetting)) {
$locationOrder = $orderSetting === 'default'
? [] : array_flip(explode(':', $orderSetting));
$sortFunction = function ($a, $b) use ($locationOrder) {
$aLoc = $a['locationID'];
$bLoc = $b['locationID'];
if (isset($locationOrder[$aLoc])) {
if (isset($locationOrder[$bLoc])) {
return $locationOrder[$aLoc] - $locationOrder[$bLoc];
}
return -1;
}
if (isset($locationOrder[$bLoc])) {
return 1;
}
return strcasecmp($a['locationDisplay'], $b['locationDisplay']);
};
usort($locations, $sortFunction);
}
return $locations;
}
/**
* Get Default Pick Up Location
*
* Returns the default pick up location
*
* @param array $patron Patron information returned by the patronLogin
* method.
* @param array $holdDetails Optional array, only passed in when getting a list
* in the context of placing a hold; contains most of the same values passed to
* placeHold, minus the patron data. May be used to limit the pickup options
* or may be ignored.
*
* @return false|string The default pickup location for the patron or false
* if the user has to choose.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getDefaultPickUpLocation($patron = false, $holdDetails = null)
{
return $this->defaultPickUpLocation;
}
/**
* Check if request is valid
*
* This is responsible for determining if an item is requestable
*
* @param string $id The Bib ID
* @param array $data An Array of item data
* @param patron $patron An array of patron data
*
* @return mixed An array of data on the request including
* whether or not it is valid and a status message. Alternatively a boolean
* true if request is valid, false if not.
*/
public function checkRequestIsValid($id, $data, $patron)
{
if ($this->getPatronBlocks($patron)) {
return false;
}
$level = $data['level'] ?? 'copy';
if ('title' === $level) {
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'availability', 'biblios', $id,
'hold'
],
'query' => ['patron_id' => $patron['id']]
]
);
if (!empty($result['data']['availability']['available'])) {
return [
'valid' => true,
'status' => 'title_hold_place'
];
}
return [
'valid' => false,
'status' => $this->getHoldBlockReason($result['data'])
];
}
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'availability', 'items',
$data['item_id'], 'hold'
],
'query' => ['patron_id' => $patron['id']]
]
);
if (!empty($result['data']['availability']['available'])) {
return [
'valid' => true,
'status' => 'hold_place'
];
}
return [
'valid' => false,
'status' => $this->getHoldBlockReason($result['data'])
];
}
/**
* Place Hold
*
* Attempts to place a hold or recall on a particular item and returns
* an array with result details or throws an exception on failure of support
* classes
*
* @param array $holdDetails An array of item and patron data
*
* @throws ILSException
* @return mixed An array of data on the request including
* whether or not it was successful and a system message (if available)
*/
public function placeHold($holdDetails)
{
$patron = $holdDetails['patron'];
$level = isset($holdDetails['level']) && !empty($holdDetails['level'])
? $holdDetails['level'] : 'copy';
$pickUpLocation = !empty($holdDetails['pickUpLocation'])
? $holdDetails['pickUpLocation'] : $this->defaultPickUpLocation;
$itemId = $holdDetails['item_id'] ?? false;
$comment = $holdDetails['comment'] ?? '';
$bibId = $holdDetails['id'];
if ($level == 'copy' && empty($itemId)) {
throw new ILSException("Hold level is 'copy', but item ID is empty");
}
// Convert last interest date from Display Format to Koha's required format
try {
$lastInterestDate = $this->dateConverter->convertFromDisplayDate(
'Y-m-d', $holdDetails['requiredBy']
);
} catch (DateException $e) {
// Hold Date is invalid
return $this->holdError('hold_date_invalid');
}
try {
$checkTime = $this->dateConverter->convertFromDisplayDate(
'U', $holdDetails['requiredBy']
);
if (!is_numeric($checkTime)) {
throw new DateException('Result should be numeric');
}
} catch (DateException $e) {
throw new ILSException('Problem parsing required by date.');
}
if (time() > $checkTime) {
// Hold Date is in the past
return $this->holdError('hold_date_past');
}
// Make sure pickup location is valid
if (!$this->pickUpLocationIsValid($pickUpLocation, $patron, $holdDetails)) {
return $this->holdError('hold_invalid_pickup');
}
$request = [
'biblio_id' => (int)$bibId,
'patron_id' => (int)$patron['id'],
'pickup_library_id' => $pickUpLocation,
'notes' => $comment,
'expiration_date' => $lastInterestDate,
];
if ($level == 'copy') {
$request['item_id'] = (int)$itemId;
}
$result = $this->makeRequest(
[
'path' => 'v1/holds',
'json' => $request,
'method' => 'POST',
'errors' => true
]
);
if ($result['code'] >= 300) {
return $this->holdError($result['data']['error'] ?? 'hold_error_fail');
}
return ['success' => true];
}
/**
* Get Patron Storage Retrieval Requests
*
* This is responsible for retrieving all article requests by a specific patron.
*
* @param array $patron The patron array from patronLogin
*
* @return array Array of the patron's storage retrieval requests.
*/
public function getMyStorageRetrievalRequests($patron)
{
$result = $this->makeRequest(
[
'v1', 'contrib', 'kohasuomi', 'patrons', $patron['id'],
'articlerequests'
]
);
if (empty($result)) {
return [];
}
$requests = [];
foreach ($result['data'] as $entry) {
// Article requests don't yet have a unified API mapping in Koha.
// Try to take into account existing and predicted field names.
$bibId = $entry['biblio_id'] ?? $entry['biblionumber'] ?? null;
$itemId = $entry['item_id'] ?? $entry['itemnumber'] ?? null;
$location = $entry['library_id'] ?? $entry['branchcode'] ?? null;
$title = '';
$volume = '';
if ($itemId) {
$item = $this->getItem($itemId);
$bibId = $item['biblio_id'];
$volume = $item['serial_issue_number'];
}
if (!empty($bibId)) {
$bib = $this->getBiblio($bibId);
$title = $this->getBiblioTitle($bib);
}
$requests[] = [
'id' => $bibId,
'item_id' => $entry['id'],
'location' => $location,
'create' => $this->convertDate($entry['created_on']),
'available' => $entry['status'] === 'COMPLETED',
'title' => $title,
'volume' => $volume,
];
}
return $requests;
}
/**
* Get Cancel Storage Retrieval Request (article request) Details
*
* @param array $details An array of item data
*
* @return string Data for use in a form field
*/
public function getCancelStorageRetrievalRequestDetails($details)
{
return $details['item_id'];
}
/**
* Cancel Storage Retrieval Requests (article requests)
*
* Attempts to Cancel an article request on a particular item. The
* data in $cancelDetails['details'] is determined by
* getCancelStorageRetrievalRequestDetails().
*
* @param array $cancelDetails An array of item and patron data
*
* @return array An array of data on each request including
* whether or not it was successful and a system message (if available)
*/
public function cancelStorageRetrievalRequests($cancelDetails)
{
$details = $cancelDetails['details'];
$patron = $cancelDetails['patron'];
$count = 0;
$response = [];
foreach ($details as $id) {
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'patrons', $patron['id'],
'articlerequests', $id
],
'method' => 'DELETE',
'errors' => true
]
);
if (200 !== $result['code']) {
$response[$id] = [
'success' => false,
'status' => 'storage_retrieval_request_cancel_fail',
'sysMessage' => false
];
} else {
$response[$id] = [
'success' => true,
'status' => 'storage_retrieval_request_cancel_success'
];
++$count;
}
}
return ['count' => $count, 'items' => $response];
}
/**
* Check if storage retrieval request is valid
*
* This is responsible for determining if an item is requestable
*
* @param string $id The Bib ID
* @param array $data An Array of item data
* @param patron $patron An array of patron data
*
* @return bool True if request is valid, false if not
*/
public function checkStorageRetrievalRequestIsValid($id, $data, $patron)
{
if (!isset($this->config['StorageRetrievalRequests'])
|| $this->getPatronBlocks($patron)
) {
return false;
}
$level = $data['level'] ?? 'copy';
if ('title' === $level) {
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'availability', 'biblios', $id,
'articlerequest'
],
'query' => ['patron_id' => $patron['id']]
]
);
} else {
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'availability', 'items',
$data['item_id'], 'articlerequest'
],
'query' => ['patron_id' => $patron['id']]
]
);
}
return !empty($result['data']['availability']['available']);
}
/**
* Place Storage Retrieval Request (Call Slip)
*
* Attempts to place a call slip request on a particular item and returns
* an array with result details
*
* @param array $details An array of item and patron data
*
* @return mixed An array of data on the request including
* whether or not it was successful and a system message (if available)
*/
public function placeStorageRetrievalRequest($details)
{
$patron = $details['patron'];
$level = $details['level'] ?? 'copy';
$pickUpLocation = $details['pickUpLocation'] ?? null;
$itemId = $details['item_id'] ?? false;
$comment = $details['comment'] ?? '';
$bibId = $details['id'];
if ('copy' === $level && empty($itemId)) {
throw new ILSException("Request level is 'copy', but item ID is empty");
}
// Make sure pickup location is valid
if (null !== $pickUpLocation
&& !$this->pickUpLocationIsValid($pickUpLocation, $patron, $details)
) {
return [
'success' => false,
'sysMessage' => 'storage_retrieval_request_invalid_pickup'
];
}
$request = [
'biblio_id' => (int)$bibId,
'pickup_library_id' => $pickUpLocation,
'notes' => $comment,
'volume' => $details['volume'] ?? '',
'issue' => $details['issue'] ?? '',
'date' => $details['year'] ?? '',
];
if ($level == 'copy') {
$request['item_id'] = (int)$itemId;
}
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'patrons', $patron['id'],
'articlerequests'
],
'json' => $request,
'method' => 'POST',
'errors' => true
]
);
if ($result['code'] >= 300) {
$message = $result['data']['error']
?? 'storage_retrieval_request_error_fail';
return [
'success' => false,
'sysMessage' => $message
];
}
return [
'success' => true,
'status' => 'storage_retrieval_request_place_success'
];
}
/**
* Get Patron Fines
*
* This is responsible for retrieving all fines by a specific patron.
*
* @param array $patron The patron array from patronLogin
*
* @throws DateException
* @throws ILSException
* @return array Array of the patron's fines on success.
*/
public function getMyFines($patron)
{
// TODO: Make this use X-Koha-Embed when the endpoint allows
$result = $this->makeRequest(['v1', 'patrons', $patron['id'], 'account']);
$fines = [];
foreach ($result['data']['outstanding_debits']['lines'] ?? [] as $entry) {
$bibId = null;
if (!empty($entry['item_id'])) {
$item = $this->getItem($entry['item_id']);
if (!empty($item['biblio_id'])) {
$bibId = $item['biblio_id'];
}
}
$type = $entry['debit_type'];
$type = $this->translate($this->feeTypeMappings[$type] ?? $type);
if ($entry['description'] !== $type) {
$type .= ' - ' . $entry['description'];
}
$fine = [
'amount' => $entry['amount'] * 100,
'balance' => $entry['amount_outstanding'] * 100,
'fine' => $type,
'createdate' => $this->convertDate($entry['date'] ?? null),
'checkout' => '',
];
if (null !== $bibId) {
$fine['id'] = $bibId;
}
$fines[] = $fine;
}
return $fines;
}
/**
* Change Password
*
* Attempts to change patron password (PIN code)
*
* @param array $details An array of patron id and old and new password:
*
* 'patron' The patron array from patronLogin
* 'oldPassword' Old password
* 'newPassword' New password
*
* @return array An array of data on the request including
* whether or not it was successful and a system message (if available)
*/
public function changePassword($details)
{
$patron = $details['patron'];
$request = [
'password' => $details['newPassword'],
'password_2' => $details['newPassword']
];
$result = $this->makeRequest(
[
'path' => ['v1', 'patrons', $patron['id'], 'password'],
'json' => $request,
'method' => 'POST',
'errors' => true
]
);
if (200 !== $result['code']) {
if (400 === $result['code']) {
$message = 'password_error_invalid';
} else {
$message = 'An error has occurred';
}
return [
'success' => false, 'status' => $message
];
}
return ['success' => true, 'status' => 'change_password_ok'];
}
/**
* Public Function which retrieves renew, hold and cancel settings from the
* driver ini file.
*
* @param string $function The name of the feature to be checked
* @param array $params Optional feature-specific parameters (array)
*
* @return array An array with key-value pairs.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getConfig($function, $params = null)
{
if ('getMyTransactionHistory' === $function) {
if (empty($this->config['TransactionHistory']['enabled'])) {
return false;
}
$limit = $this->config['TransactionHistory']['max_page_size'] ?? 100;
return [
'max_results' => $limit,
'sort' => [
'-checkout_date' => 'sort_checkout_date_desc',
'+checkout_date' => 'sort_checkout_date_asc',
'-checkin_date' => 'sort_return_date_desc',
'+checkin_date' => 'sort_return_date_asc',
'-due_date' => 'sort_due_date_desc',
'+due_date' => 'sort_due_date_asc',
'+title' => 'sort_title'
],
'default_sort' => '-checkout_date'
];
} elseif ('getMyTransactions' === $function) {
$limit = $this->config['Loans']['max_page_size'] ?? 100;
return [
'max_results' => $limit,
'sort' => [
'-checkout_date' => 'sort_checkout_date_desc',
'+checkout_date' => 'sort_checkout_date_asc',
'-due_date' => 'sort_due_date_desc',
'+due_date' => 'sort_due_date_asc',
'+title' => 'sort_title'
],
'default_sort' => '+due_date'
];
}
return isset($this->config[$function])
? $this->config[$function] : false;
}
/**
* Helper method to determine whether or not a certain method can be
* called on this driver. Required method for any smart drivers.
*
* @param string $method The name of the called method.
* @param array $params Array of passed parameters
*
* @return bool True if the method can be called with the given parameters,
* false otherwise.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function supportsMethod($method, $params)
{
// Special case: change password is only available if properly configured.
if ($method == 'changePassword') {
return isset($this->config['changePassword']);
}
return is_callable([$this, $method]);
}
/**
* Create a HTTP client
*
* @param string $url Request URL
*
* @return \Laminas\Http\Client
*/
protected function createHttpClient($url)
{
$client = $this->httpService->createClient($url);
if (isset($this->config['Http']['ssl_verify_peer_name'])
&& !$this->config['Http']['ssl_verify_peer_name']
) {
$adapter = $client->getAdapter();
if ($adapter instanceof \Laminas\Http\Client\Adapter\Socket) {
$context = $adapter->getStreamContext();
$res = stream_context_set_option(
$context, 'ssl', 'verify_peer_name', false
);
if (!$res) {
throw new \Exception('Unable to set sslverifypeername option');
}
} elseif ($adapter instanceof \Laminas\Http\Client\Adapter\Curl) {
$adapter->setCurlOption(CURLOPT_SSL_VERIFYHOST, false);
}
}
// Set timeout value
$timeout = isset($this->config['Catalog']['http_timeout'])
? $this->config['Catalog']['http_timeout'] : 30;
$client->setOptions(
['timeout' => $timeout, 'useragent' => 'VuFind', 'keepalive' => true]
);
// Set Accept header
$client->getRequest()->getHeaders()->addHeaderLine(
'Accept', 'application/json'
);
return $client;
}
/**
* Make Request
*
* Makes a request to the Koha REST API
*
* @param array $request Either a path as string or non-keyed array of path
* elements, or a keyed array of request parameters:
*
* path String or array of values to embed in the URL path. String is taken
* as is, array elements are url-encoded.
* query URL parameters (optional)
* method HTTP method (default is GET)
* form Form request params (optional)
* json JSON request as a PHP array (optional, only when form is not
* specified)
* headers Headers
* errors If true, return errors instead of raising an exception
*
* @return array
* @throws ILSException
*/
protected function makeRequest($request)
{
// Set up the request
$apiUrl = $this->config['Catalog']['host'] . '/';
// Handle the simple case of just a path in $request
if (is_string($request) || !isset($request['path'])) {
$request = [
'path' => $request
];
}
if (is_array($request['path'])) {
$apiUrl .= implode('/', array_map('urlencode', $request['path']));
} else {
$apiUrl .= $request['path'];
}
$client = $this->createHttpClient($apiUrl);
$client->getRequest()->getHeaders()
->addHeaderLine('Authorization', $this->getOAuth2Token());
// Add params
if (!empty($request['query'])) {
$client->setParameterGet($request['query']);
}
if (!empty($request['form'])) {
$client->setParameterPost($request['form']);
} elseif (!empty($request['json'])) {
$client->getRequest()->setContent(json_encode($request['json']));
$client->getRequest()->getHeaders()->addHeaderLine(
'Content-Type', 'application/json'
);
}
if (!empty($request['headers'])) {
$requestHeaders = $client->getRequest()->getHeaders();
foreach ($request['headers'] as $name => $value) {
$requestHeaders->addHeaderLine($name, [$value]);
}
}
// Send request and retrieve response
$method = $request['method'] ?? 'GET';
$startTime = microtime(true);
$client->setMethod($method);
try {
$response = $client->send();
} catch (\Exception $e) {
$this->logError(
"$method request for '$apiUrl' failed: " . $e->getMessage()
);
throw new ILSException('Problem with Koha REST API.');
}
// If we get a 401, we need to renew the access token and try again
if ($response->getStatusCode() == 401) {
$client->getRequest()->getHeaders()
->addHeaderLine('Authorization', $this->getOAuth2Token(true));
try {
$response = $client->send();
} catch (\Exception $e) {
$this->logError(
"$method request for '$apiUrl' failed: " . $e->getMessage()
);
throw new ILSException('Problem with Koha REST API.');
}
}
$result = $response->getBody();
$fullUrl = $apiUrl;
if ($method == 'GET') {
$fullUrl .= '?' . $client->getRequest()->getQuery()->toString();
}
$this->debug(
'[' . round(microtime(true) - $startTime, 4) . 's]'
. " $method request $fullUrl" . PHP_EOL . 'response: ' . PHP_EOL
. $result
);
// Handle errors as complete failures only if the API call didn't return
// valid JSON that the caller can handle
$decodedResult = json_decode($result, true);
if (empty($request['errors']) && !$response->isSuccess()
&& (null === $decodedResult || !empty($decodedResult['error']))
) {
$params = $method == 'GET'
? $client->getRequest()->getQuery()->toString()
: $client->getRequest()->getPost()->toString();
$this->logError(
"$method request for '$apiUrl' with params '$params' and contents '"
. $client->getRequest()->getContent() . "' failed: "
. $response->getStatusCode() . ': ' . $response->getReasonPhrase()
. ', response content: ' . $response->getBody()
);
throw new ILSException('Problem with Koha REST API.');
}
return [
'data' => $decodedResult,
'code' => (int)$response->getStatusCode(),
'headers' => $response->getHeaders()->toArray(),
];
}
/**
* Get a new or cached OAuth2 token (type + token)
*
* @param bool $renew Force renewal of token
*
* @return string
*/
protected function getOAuth2Token($renew = false)
{
$cacheKey = 'oauth';
if (!$renew) {
$token = $this->getCachedData($cacheKey);
if ($token) {
return $token;
}
}
$url = $this->config['Catalog']['host'] . '/v1/oauth/token';
$client = $this->createHttpClient($url);
$client->setMethod('POST');
$client->getRequest()->getHeaders()->addHeaderLine(
'Content-Type', 'application/x-www-form-urlencoded'
);
$client->setParameterPost(
[
'client_id' => $this->config['Catalog']['clientId'],
'client_secret' => $this->config['Catalog']['clientSecret'],
'grant_type' => $this->config['Catalog']['grantType']
?? 'client_credentials'
]
);
try {
$response = $client->send();
} catch (\Exception $e) {
$this->logError(
"POST request for '$url' failed: " . $e->getMessage()
);
throw new ILSException('Problem with Koha REST API.');
}
if ($response->getStatusCode() != 200) {
$errorMessage = 'Error while getting OAuth2 access token (status code '
. $response->getStatusCode() . '): ' . $response->getContent();
$this->logError($errorMessage);
throw new ILSException('Problem with Koha REST API.');
}
$responseData = json_decode($response->getContent(), true);
if (empty($responseData['token_type'])
|| empty($responseData['access_token'])
) {
$this->logError(
'Did not receive OAuth2 token, response: '
. $response->getContent()
);
throw new ILSException('Problem with Koha REST API.');
}
$token = $responseData['token_type'] . ' '
. $responseData['access_token'];
$this->putCachedData($cacheKey, $token, $responseData['expires_in'] ?? null);
return $token;
}
/**
* Get Item Statuses
*
* This is responsible for retrieving the status information of a certain
* record.
*
* @param string $id The record id to retrieve the holdings for
* @param array $patron Patron information, if available
*
* @return array An associative array with the following keys:
* id, availability (boolean), status, location, reserve, callnumber.
*/
protected function getItemStatusesForBiblio($id, $patron = null)
{
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'availability', 'biblios', $id,
'search'
],
'errors' => true
]
);
if (404 == $result['code']) {
return [];
}
if (200 != $result['code']) {
throw new ILSException('Problem with Koha REST API.');
}
if (empty($result['data']['item_availabilities'])) {
return [];
}
$statuses = [];
foreach ($result['data']['item_availabilities'] as $i => $item) {
$avail = $item['availability'];
$available = $avail['available'];
$statusCodes = $this->getItemStatusCodes($item);
$status = $this->pickStatus($statusCodes);
if (isset($avail['unavailabilities']['Item::CheckedOut']['due_date'])) {
$duedate = $this->convertDate(
$avail['unavailabilities']['Item::CheckedOut']['due_date'],
true
);
} else {
$duedate = null;
}
$entry = [
'id' => $id,
'item_id' => $item['item_id'],
'location' => $this->getItemLocationName($item),
'availability' => $available,
'status' => $status,
'status_array' => $statusCodes,
'reserve' => 'N',
'callnumber' => $this->getItemCallNumber($item),
'duedate' => $duedate,
'number' => $item['serial_issue_number'],
'barcode' => $item['external_id'],
'sort' => $i,
'requests_placed' => max(
[$item['hold_queue_length'],
$result['data']['hold_queue_length']]
)
];
if (!empty($item['public_notes'])) {
$entry['item_notes'] = [$item['public_notes']];
}
if ($patron && $this->itemHoldAllowed($item)) {
$entry['is_holdable'] = true;
$entry['level'] = 'copy';
$entry['addLink'] = 'check';
} else {
$entry['is_holdable'] = false;
}
if ($patron && $this->itemArticleRequestAllowed($item)) {
$entry['storageRetrievalRequest'] = 'auto';
$entry['addStorageRetrievalRequestLink'] = 'check';
}
$statuses[] = $entry;
}
usort($statuses, [$this, 'statusSortFunction']);
return $statuses;
}
/**
* Get statuses for an item
*
* @param array $item Item from Koha
*
* @return array Status array and possible due date
*/
protected function getItemStatusCodes($item)
{
$statuses = [];
if ($item['availability']['available']) {
$statuses[] = 'On Shelf';
} elseif (isset($item['availability']['unavailabilities'])) {
foreach ($item['availability']['unavailabilities'] as $code => $data) {
// If we have a direct mapping, use it:
if (isset($this->itemStatusMappings[$code])) {
$statuses[] = $this->itemStatusMappings[$code];
continue;
}
// Check for a mapping method for the unavailability reason:
if ($methodName = ($this->itemStatusMappingMethods[$code] ?? '')) {
$statuses[]
= call_user_func([$this, $methodName], $code, $data, $item);
} else {
if (!empty($data['code'])) {
$statuses[] = $data['code'];
} else {
$parts = explode('::', $code, 2);
if (isset($parts[1])) {
$statuses[] = $parts[1];
}
}
}
}
if (empty($statuses)) {
$statuses[] = 'Not Available';
}
} else {
$this->error(
"Unable to determine status for item: " . print_r($item, true)
);
}
if (empty($statuses)) {
$statuses[] = 'No information available';
}
return array_unique($statuses);
}
/**
* Get item status code for CheckedOut status
*
* @param string $code Status code
* @param array $data Status data
* @param array $item Item
*
* @return string
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function getStatusCodeItemCheckedOut($code, $data, $item)
{
$overdue = false;
if (!empty($data['due_date'])) {
$duedate = $this->dateConverter->convert(
'Y-m-d',
'U',
$data['due_date']
);
$overdue = $duedate < time();
}
return $overdue ? 'Overdue' : 'Charged';
}
/**
* Get item status code for NotForLoan status
*
* @param string $code Status code
* @param array $data Status data
* @param array $item Item
*
* @return string
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function getStatusCodeItemNotForLoan($code, $data, $item)
{
// NotForLoan is special: status has a library-specific
// status number. Allow mapping of different status numbers
// separately (e.g. Item::NotForLoan with status number 4
// is mapped with key Item::NotForLoan4):
$statusKey = $code . ($data['status'] ?? '-');
// Replace ':' in status key if used as status since ':' is
// the namespace separator in translatable strings:
return $this->itemStatusMappings[$statusKey]
?? $data['code'] ?? str_replace(':', '_', $statusKey);
}
/**
* Get item status code for Transfer status
*
* @param string $code Status code
* @param array $data Status data
* @param array $item Item
*
* @return string
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function getStatusCodeItemTransfer($code, $data, $item)
{
$onHold = array_key_exists(
'Item::Held',
$item['availability']['notes'] ?? []
);
return $onHold ? 'In Transit On Hold' : 'In Transit';
}
/**
* Status item sort function
*
* @param array $a First status record to compare
* @param array $b Second status record to compare
*
* @return int
*/
protected function statusSortFunction($a, $b)
{
$result = strcmp($a['location'], $b['location']);
if (0 === $result && $this->sortItemsBySerialIssue) {
$result = strnatcmp($a['number'], $b['number']);
}
if (0 === $result) {
$result = $a['sort'] - $b['sort'];
}
return $result;
}
/**
* Check if an item is holdable
*
* @param array $item Item from Koha
*
* @return bool
*/
protected function itemHoldAllowed($item)
{
$unavail = $item['availability']['unavailabilities'] ?? [];
if (!isset($unavail['Hold::NotHoldable'])) {
return true;
}
return false;
}
/**
* Check if an article request can be placed on the item
*
* @param array $item Item from Koha
*
* @return bool
*/
protected function itemArticleRequestAllowed($item)
{
$unavail = $item['availability']['unavailabilities'] ?? [];
if (isset($unavail['ArticleRequest::NotAllowed'])) {
return false;
}
if (empty($this->config['StorageRetrievalRequests']['allow_checked_out'])
&& isset($unavail['Item::CheckedOut'])
) {
return false;
}
return true;
}
/**
* Protected support method to pick which status message to display when multiple
* options are present.
*
* @param array $statusArray Array of status messages to choose from.
*
* @throws ILSException
* @return string The best status message to display.
*/
protected function pickStatus($statusArray)
{
// Pick the first entry by default, then see if we can find a better match:
$status = $statusArray[0];
$rank = $this->getStatusRanking($status);
for ($x = 1; $x < count($statusArray); $x++) {
if ($this->getStatusRanking($statusArray[$x]) < $rank) {
$status = $statusArray[$x];
}
}
return $status;
}
/**
* Support method for pickStatus() -- get the ranking value of the specified
* status message.
*
* @param string $status Status message to look up
*
* @return int
*/
protected function getStatusRanking($status)
{
return isset($this->statusRankings[$status])
? $this->statusRankings[$status] : 32000;
}
/**
* Get libraries from cache or from the API
*
* @return array
*/
protected function getLibraries()
{
$cacheKey = 'libraries';
$libraries = $this->getCachedData($cacheKey);
if (null === $libraries) {
$result = $this->makeRequest('v1/libraries');
$libraries = [];
foreach ($result['data'] as $library) {
$libraries[$library['library_id']] = $library;
}
$this->putCachedData($cacheKey, $libraries, 3600);
}
return $libraries;
}
/**
* Get library name
*
* @param string $library Library ID
*
* @return string
*/
protected function getLibraryName($library)
{
$libraries = $this->getLibraries();
return $libraries[$library]['name'] ?? '';
}
/**
* Get patron's blocks, if any
*
* @param array $patron Patron
*
* @return mixed A boolean false if no blocks are in place and an array
* of block reasons if blocks are in place
*/
protected function getPatronBlocks($patron)
{
$patronId = $patron['id'];
$cacheId = "blocks|$patronId";
$blockReason = $this->getCachedData($cacheId);
if (null === $blockReason) {
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'patrons', $patron['id']
],
'query' => ['query_blocks' => 1]
]
);
$blockReason = [];
if (!empty($result['data']['blocks'])) {
$nonHoldBlock = false;
foreach ($result['data']['blocks'] as $reason => $details) {
if ($reason !== 'Hold::MaximumHoldsReached') {
$nonHoldBlock = true;
}
$description = $this->getPatronBlockReason($reason, $details);
if ($description) {
$blockReason[] = $description;
}
}
// Add the generic block message to the beginning if we have blocks
// other than hold block
if ($nonHoldBlock) {
array_unshift(
$blockReason, $this->translate('patron_status_card_blocked')
);
}
}
$this->putCachedData($cacheId, $blockReason);
}
return empty($blockReason) ? false : $blockReason;
}
/**
* Fetch an item record from Koha
*
* @param int $id Item id
*
* @return array|null
*/
protected function getItem($id)
{
$cacheId = "items|$id";
$item = $this->getCachedData($cacheId);
if (null === $item) {
$result = $this->makeRequest(['v1', 'items', $id]);
$item = $result['data'] ?? false;
$this->putCachedData($cacheId, $item, 300);
}
return $item ?: null;
}
/**
* Fetch a biblio record from Koha
*
* @param int $id Bib record id
*
* @return array|null
*/
protected function getBiblio($id)
{
static $cachedRecords = [];
if (!isset($cachedRecords[$id])) {
$result = $this->makeRequest(['v1', 'biblios', $id]);
$cachedRecords[$id] = $result['data'] ?? false;
}
return $cachedRecords[$id];
}
/**
* Is the selected pickup location valid for the hold?
*
* @param string $pickUpLocation Selected pickup location
* @param array $patron Patron information returned by the patronLogin
* method.
* @param array $holdDetails Details of hold being placed
*
* @return bool
*/
protected function pickUpLocationIsValid($pickUpLocation, $patron, $holdDetails)
{
$pickUpLibs = $this->getPickUpLocations($patron, $holdDetails);
foreach ($pickUpLibs as $location) {
if ($location['locationID'] == $pickUpLocation) {
return true;
}
}
return false;
}
/**
* Return a hold error message
*
* @param string $error Error message
*
* @return array
*/
protected function holdError($error)
{
switch ($error) {
case 'Hold cannot be placed. Reason: tooManyReserves':
case 'Hold cannot be placed. Reason: tooManyHoldsForThisRecord':
$error = 'hold_error_too_many_holds';
break;
case 'Hold cannot be placed. Reason: ageRestricted':
$error = 'hold_error_age_restricted';
break;
}
return [
'success' => false,
'sysMessage' => $error
];
}
/**
* Map a Koha renewal block reason code to a VuFind translation string
*
* @param string $reason Koha block code
*
* @return string
*/
protected function mapRenewalBlockReason($reason)
{
return isset($this->renewalBlockMappings[$reason])
? $this->renewalBlockMappings[$reason] : 'renew_item_no';
}
/**
* Return a location for a Koha item
*
* @param array $item Item
*
* @return string
*/
protected function getItemLocationName($item)
{
$libraryId = (!$this->useHomeLibrary && null !== $item['holding_library_id'])
? $item['holding_library_id'] : $item['home_library_id'];
$name = $this->translateLocation($libraryId);
if ($name === $libraryId) {
$libraries = $this->getLibraries();
$name = isset($libraries[$libraryId])
? $libraries[$libraryId]['name'] : $libraryId;
}
return $name;
}
/**
* Translate location name
*
* @param string $location Location code
* @param string $default Default value if translation is not available
*
* @return string
*/
protected function translateLocation($location, $default = null)
{
if (empty($location)) {
return null !== $default ? $default : '';
}
$prefix = 'location_';
return $this->translate(
"$prefix$location",
null,
null !== $default ? $default : $location
);
}
/**
* Return a call number for a Koha item
*
* @param array $item Item
*
* @return string
*/
protected function getItemCallNumber($item)
{
return $item['callnumber'];
}
/**
* Get a reason for why a hold cannot be placed
*
* @param array $result Hold check result
*
* @return string
*/
protected function getHoldBlockReason($result)
{
if (!empty($result['availability']['unavailabilities'])) {
foreach (array_keys($result['availability']['unavailabilities']) as $key
) {
switch ($key) {
case 'Biblio::NoAvailableItems':
return 'hold_error_not_holdable';
case 'Item::NotForLoan':
case 'Hold::NotAllowedInOPAC':
case 'Hold::ZeroHoldsAllowed':
case 'Hold::NotAllowedByLibrary':
case 'Hold::NotAllowedFromOtherLibraries':
case 'Item::Restricted':
case 'Hold::ItemLevelHoldNotAllowed':
return 'hold_error_item_not_holdable';
case 'Hold::MaximumHoldsForRecordReached':
case 'Hold::MaximumHoldsReached':
return 'hold_error_too_many_holds';
case 'Item::AlreadyHeldForThisPatron':
return 'hold_error_already_held';
case 'Hold::OnShelfNotAllowed':
return 'hold_error_on_shelf_blocked';
}
}
}
return 'hold_error_blocked';
}
/**
* Converts given key to corresponding parameter
*
* @param string $key to convert
* @param string $default value to return
*
* @return string
*/
protected function getSortParamValue($key, $default = '')
{
$params = [
'checkout' => 'issuedate',
'return' => 'returndate',
'lastrenewed' => 'lastreneweddate',
'title' => 'title'
];
return $params[$key] ?? $default;
}
/**
* Get a complete title from all the title-related fields
*
* @param array $biblio Biblio record (or something with the correct fields)
*
* @return string
*/
protected function getBiblioTitle($biblio)
{
$title = [];
foreach (['title', 'subtitle', 'part_number', 'part_name'] as $field) {
$content = $biblio[$field] ?? '';
if ($content) {
$title[] = $content;
}
}
return implode(' ', $title);
}
/**
* Convert a date to display format
*
* @param string $date Date
* @param bool $withTime Whether the date includes time
*
* @return string
*/
protected function convertDate($date, $withTime = false)
{
if (!$date) {
return '';
}
$createFormat = $withTime ? 'Y-m-d\TH:i:sP' : 'Y-m-d';
return $this->dateConverter->convertToDisplayDate($createFormat, $date);
}
/**
* Get Patron Transactions
*
* This is responsible for retrieving all transactions (i.e. checked-out items
* or checked-in items) by a specific patron.
*
* @param array $patron The patron array from patronLogin
* @param array $params Parameters
* @param bool $checkedIn Whether to list checked-in items
*
* @throws DateException
* @throws ILSException
* @return array Array of the patron's transactions on success.
*/
protected function getTransactions($patron, $params, $checkedIn)
{
$pageSize = $params['limit'] ?? 50;
$sort = $params['sort'] ?? '+due_date';
if ('+title' === $sort) {
$sort = '+title|+subtitle';
} elseif ('-title' === $sort) {
$sort = '-title|-subtitle';
}
$queryParams = [
'_order_by' => $sort,
'_page' => $params['page'] ?? 1,
'_per_page' => $pageSize
];
if ($checkedIn) {
$queryParams['checked_in'] = '1';
$arrayKey = 'transactions';
} else {
$arrayKey = 'records';
}
$result = $this->makeRequest(
[
'path' => [
'v1', 'contrib', 'kohasuomi', 'patrons', $patron['id'],
'checkouts'
],
'query' => $queryParams
]
);
if (200 !== $result['code']) {
throw new ILSException('Problem with Koha REST API.');
}
if (empty($result['data'])) {
return [
'count' => 0,
$arrayKey => []
];
}
$transactions = [];
foreach ($result['data'] as $entry) {
$dueStatus = false;
$now = time();
$dueTimeStamp = strtotime($entry['due_date']);
if (is_numeric($dueTimeStamp)) {
if ($now > $dueTimeStamp) {
$dueStatus = 'overdue';
} elseif ($now > $dueTimeStamp - (1 * 24 * 60 * 60)) {
$dueStatus = 'due';
}
}
$renewable = $entry['renewable'];
$renewals = $entry['renewals'];
$renewLimit = $entry['max_renewals'];
$message = '';
if (!$renewable && !$checkedIn) {
$message = $this->mapRenewalBlockReason(
$entry['renewability_blocks']
);
$permanent = in_array(
$entry['renewability_blocks'], $this->permanentRenewalBlocks
);
if ($permanent) {
$renewals = null;
$renewLimit = null;
}
}
$transaction = [
'id' => $entry['biblio_id'],
'checkout_id' => $entry['checkout_id'],
'item_id' => $entry['item_id'],
'barcode' => $entry['external_id'] ?? null,
'title' => $this->getBiblioTitle($entry),
'volume' => $entry['serial_issue_number'] ?? '',
'publication_year' => $entry['copyright_date']
?? $entry['publication_year'] ?? '',
'borrowingLocation' => $this->getLibraryName($entry['library_id']),
'checkoutDate' => $this->convertDate($entry['checkout_date']),
'duedate' => $this->convertDate($entry['due_date'], true),
'returnDate' => $this->convertDate($entry['checkin_date']),
'dueStatus' => $dueStatus,
'renew' => $renewals,
'renewLimit' => $renewLimit,
'renewable' => $renewable,
'message' => $message
];
$transactions[] = $transaction;
}
return [
'count' => $result['headers']['X-Total-Count'] ?? count($transactions),
$arrayKey => $transactions
];
}
/**
* Get a description for a block
*
* @param string $reason Koha block reason
* @param array $details Any details related to the reason
*
* @return string
*/
protected function getPatronBlockReason($reason, $details)
{
$params = [];
switch ($reason) {
case 'Hold::MaximumHoldsReached':
$params = [
'%%blockCount%%' => $details['current_hold_count'],
'%%blockLimit%%' => $details['max_holds_allowed']
];
break;
case 'Patron::Debt':
case 'Patron::DebtGuarantees':
$count = isset($details['current_outstanding'])
? $this->safeMoneyFormat->__invoke($details['current_outstanding'])
: '-';
$limit = isset($details['max_outstanding'])
? $this->safeMoneyFormat->__invoke($details['max_outstanding'])
: '-';
$params = [
'%%blockCount%%' => $count,
'%%blockLimit%%' => $limit,
];
break;
}
return $this->translate($this->patronStatusMappings[$reason] ?? '', $params);
}
}
| 1 | 30,250 | You've deleted this code but not added it anywhere else. Should this be used as my proposed example in the .ini file? Do we need a mechanism for configuring a global fallback string independent of the numeric codes? | vufind-org-vufind | php |
@@ -46,6 +46,9 @@ import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import javax.annotation.Nullable; | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.nodejs;
import com.google.api.codegen.ApiConfig;
import com.google.api.codegen.GapicContext;
import com.google.api.codegen.MethodConfig;
import com.google.api.codegen.transformer.ApiMethodTransformer;
import com.google.api.codegen.transformer.GrpcStubTransformer;
import com.google.api.codegen.transformer.MethodTransformerContext;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SurfaceTransformerContext;
import com.google.api.codegen.transformer.nodejs.NodeJSModelTypeNameConverter;
import com.google.api.codegen.transformer.nodejs.NodeJSSurfaceNamer;
import com.google.api.codegen.util.nodejs.NodeJSTypeTable;
import com.google.api.codegen.viewmodel.ApiMethodView;
import com.google.api.codegen.viewmodel.GrpcStubView;
import com.google.api.tools.framework.aspects.documentation.model.DocumentationUtil;
import com.google.api.tools.framework.aspects.documentation.model.ElementDocumentationAttribute;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.MessageType;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.Model;
import com.google.api.tools.framework.model.ProtoContainerElement;
import com.google.api.tools.framework.model.ProtoElement;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.TypeRef;
import com.google.api.tools.framework.model.TypeRef.Cardinality;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type;
import java.util.List;
import javax.annotation.Nullable;
/**
* A GapicContext specialized for NodeJS.
*/
public class NodeJSGapicContext extends GapicContext implements NodeJSContext {
public NodeJSGapicContext(Model model, ApiConfig apiConfig) {
super(model, apiConfig);
}
// Snippet Helpers
// ===============
/**
* Return ApiMethodView for sample gen.
*
* NOTE: Temporary solution to use MVVM with just sample gen. This class
* will eventually go away when code gen also converts to MVVM.
*/
public ApiMethodView getApiMethodView(Interface service, Method method) {
SurfaceTransformerContext context = getSurfaceTransformerContextFromService(service);
MethodTransformerContext methodContext = context.asMethodContext(method);
ApiMethodTransformer apiMethodTransformer = new ApiMethodTransformer();
return apiMethodTransformer.generateDynamicLangApiMethod(methodContext);
}
/**
* Return GrpcStubViews for mixins.
*
* NOTE: Temporary solution to use MVVM with just sample gen. This class
* will eventually go away when code gen also converts to MVVM.
*/
public List<GrpcStubView> getStubs(Interface service) {
GrpcStubTransformer grpcStubTransformer = new GrpcStubTransformer();
SurfaceTransformerContext context = getSurfaceTransformerContextFromService(service);
return grpcStubTransformer.generateGrpcStubs(context);
}
private String getStubNameFor(Interface service, Method method) {
NodeJSSurfaceNamer namer = new NodeJSSurfaceNamer(getApiConfig().getPackageName());
String jsMethodName = namer.getApiMethodName(method);
for (GrpcStubView stub : getStubs(service)) {
for (String methodName : stub.methodNames()) {
if (jsMethodName.equals(methodName)) {
return stub.name();
}
}
}
throw new IllegalArgumentException(
"Method " + method.getFullName() + " cannot be found in the stubs");
}
private SurfaceTransformerContext getSurfaceTransformerContextFromService(Interface service) {
ModelTypeTable modelTypeTable =
new ModelTypeTable(
new NodeJSTypeTable(getApiConfig().getPackageName()),
new NodeJSModelTypeNameConverter(getApiConfig().getPackageName()));
return SurfaceTransformerContext.create(
service,
getApiConfig(),
modelTypeTable,
new NodeJSSurfaceNamer(getApiConfig().getPackageName()),
new NodeJSFeatureConfig());
}
public String filePath(ProtoFile file) {
return file.getSimpleName().replace(".proto", "_pb2.js");
}
/**
* Return comments lines for a given proto element, extracted directly from the proto doc
*/
public List<String> defaultComments(ProtoElement element) {
if (!element.hasAttribute(ElementDocumentationAttribute.KEY)) {
return ImmutableList.<String>of();
}
return convertToCommentedBlock(
JSDocCommentFixer.jsdocify(DocumentationUtil.getScopedDescription(element)));
}
/**
* The package name of the grpc module for the API.
*/
public String grpcClientName(Interface service) {
return "grpc-" + service.getFile().getFullName().replace('.', '-');
}
public boolean isGcloud() {
return NodeJSUtils.isGcloud(getApiConfig());
}
/**
* The namespace (full package name) for the service.
*/
public String getNamespace(Interface service) {
String fullName = service.getFullName();
int slash = fullName.lastIndexOf('.');
return fullName.substring(0, slash);
}
/**
* The name for the module for this vkit module. This assumes that the service's
* full name will be in the format of 'google.some.apiname.version.ServiceName',
* and extracts the 'apiname' and 'version' part and combine them to lower-camelcased
* style (like pubsubV1).
*/
public String getModuleName(Interface service) {
List<String> names = Splitter.on(".").splitToList(service.getFullName());
return names.get(names.size() - 3) + lowerUnderscoreToUpperCamel(names.get(names.size() - 2));
}
/**
* Returns the major version part in the API namespace. This assumes that the service's
* full name will be in the format of 'google.some.apiname.version.ServiceName', and
* extracts the 'version' part.
*/
public String getApiVersion(Interface service) {
List<String> names = Splitter.on(".").splitToList(service.getFullName());
return names.get(names.size() - 2);
}
/**
* Returns the filename for documenting messages.
*/
public String getDocFilename(ProtoFile file) {
String filePath = file.getSimpleName().replace(".proto", ".js");
if (isExternalFile(file)) {
filePath = filePath.replaceAll("/", "_");
} else {
int lastSlash = filePath.lastIndexOf('/');
if (lastSlash >= 0) {
filePath = filePath.substring(lastSlash + 1);
}
}
return "doc_" + filePath;
}
/**
* Returns true if the proto file is external to the current package.
* Currently, it only checks the file path and thinks it is external if
* the file is well-known common protos.
*/
public boolean isExternalFile(ProtoFile file) {
String filePath = file.getSimpleName();
for (String commonPath : COMMON_PROTO_PATHS) {
if (filePath.startsWith(commonPath)) {
return true;
}
}
return false;
}
public String getFileURL(ProtoFile file) {
String filePath = file.getSimpleName();
if (filePath.startsWith("google/protobuf")) {
return "https://github.com/google/protobuf/blob/master/src/" + filePath;
} else {
return "https://github.com/googleapis/googleapis/blob/master/" + filePath;
}
}
/**
* Returns type information for a field in JSDoc style.
*/
private String fieldTypeCardinalityComment(Field field) {
TypeRef type = field.getType();
String cardinalityComment = "";
if (type.getCardinality() == Cardinality.REPEATED) {
if (type.isMap()) {
String keyType = jsTypeName(type.getMapKeyField().getType());
String valueType = jsTypeName(type.getMapValueField().getType());
return String.format("Object.<%s, %s>", keyType, valueType);
} else {
cardinalityComment = "[]";
}
}
String typeComment = jsTypeName(field.getType());
return String.format("%s%s", typeComment, cardinalityComment);
}
/**
* Returns a JSDoc comment string for the field as a parameter to a function.
*/
private String fieldParamComment(Field field, String paramComment, boolean isOptional) {
String commentType = fieldTypeCardinalityComment(field);
String fieldName = wrapIfKeywordOrBuiltIn(lowerUnderscoreToLowerCamel(field.getSimpleName()));
if (isOptional) {
fieldName = "options." + fieldName;
commentType = commentType + "=";
}
return fieldComment(
String.format("@param {%s} %s", commentType, fieldName), paramComment, field);
}
/**
* Returns a JSDoc comment string for the field as an attribute of a message.
*/
public List<String> fieldPropertyComment(Field field) {
String commentType = fieldTypeCardinalityComment(field);
String fieldName = wrapIfKeywordOrBuiltIn(field.getSimpleName());
return convertToCommentedBlock(
fieldComment(String.format("@property {%s} %s", commentType, fieldName), null, field));
}
private String fieldComment(String comment, String paramComment, Field field) {
if (paramComment == null) {
paramComment = DocumentationUtil.getScopedDescription(field);
}
if (!Strings.isNullOrEmpty(paramComment)) {
paramComment = JSDocCommentFixer.jsdocify(paramComment);
comment += "\n " + paramComment.replaceAll("(\\r?\\n)", "\n ");
}
if (field.getType().isMessage() && !field.getType().isMap()) {
if (!Strings.isNullOrEmpty(paramComment)) {
comment += "\n";
}
comment +=
"\n This object should have the same structure as "
+ linkForMessage(field.getType().getMessageType());
} else if (field.getType().isEnum()) {
if (!Strings.isNullOrEmpty(paramComment)) {
comment += "\n";
}
comment +=
"\n The number should be among the values of "
+ linkForMessage(field.getType().getEnumType());
}
return comment + "\n";
}
/**
* Return JSDoc callback comment and return type comment for the given method.
*/
@Nullable
private String returnTypeComment(Method method, MethodConfig config) {
if (config.isPageStreaming()) {
String callbackMessage =
"@param {function(?Error, ?"
+ jsTypeName(method.getOutputType())
+ ", ?"
+ jsTypeName(config.getPageStreaming().getResponseTokenField().getType())
+ ")=} callback\n"
+ " When specified, the results are not streamed but this callback\n"
+ " will be called with the response object representing "
+ linkForMessage(method.getOutputMessage())
+ ".\n"
+ " The third item will be set if the response contains the token for the further results\n"
+ " and can be reused to `pageToken` field in the options in the next request.";
TypeRef resourceType = config.getPageStreaming().getResourcesField().getType();
String resourceTypeName;
if (resourceType.isMessage()) {
resourceTypeName =
"an object representing\n " + linkForMessage(resourceType.getMessageType());
} else if (resourceType.isEnum()) {
resourceTypeName = "a number of\n " + linkForMessage(resourceType.getEnumType());
} else {
resourceTypeName = "a " + jsTypeName(resourceType);
}
return callbackMessage
+ "\n@returns {Stream|gax.EventEmitter}\n"
+ " An object stream which emits "
+ resourceTypeName
+ " on 'data' event.\n"
+ " When the callback is specified or streaming is suppressed through options,\n"
+ " it will return an event emitter to handle the call status and the callback\n"
+ " will be called with the response object.";
}
MessageType returnMessageType = method.getOutputMessage();
boolean isEmpty = returnMessageType.getFullName().equals("google.protobuf.Empty");
String classInfo = jsTypeName(method.getOutputType());
String callbackType =
isEmpty ? "function(?Error)" : String.format("function(?Error, ?%s)", classInfo);
String callbackMessage =
"@param {"
+ callbackType
+ "=} callback\n"
+ " The function which will be called with the result of the API call.";
if (!isEmpty) {
callbackMessage +=
"\n\n The second parameter to the callback is an object representing "
+ linkForMessage(returnMessageType);
}
String returnMessage =
"@returns {"
+ (config.isBundling() ? "gax.BundleEventEmitter" : "gax.EventEmitter")
+ "} - the event emitter to handle the call\n"
+ " status.";
if (config.isBundling()) {
returnMessage +=
" When isBundling: false is specified in the options, it still returns\n"
+ " a gax.BundleEventEmitter but the API is immediately invoked, so it behaves same\n"
+ " as a gax.EventEmitter does.";
}
return callbackMessage + "\n" + returnMessage;
}
/**
* Return the list of messages within element which should be documented in Node.JS.
*/
public ImmutableList<MessageType> filterDocumentingMessages(ProtoContainerElement element) {
ImmutableList.Builder<MessageType> builder = ImmutableList.builder();
for (MessageType msg : element.getMessages()) {
// Doesn't have to document map entries in Node.JS because Object is used.
if (!msg.isMapEntry()) {
builder.add(msg);
}
}
return builder.build();
}
/**
* Return comments lines for a given method, consisting of proto doc and parameter type
* documentation.
*/
public List<String> methodComments(Interface service, Method msg) {
MethodConfig config = getApiConfig().getInterfaceConfig(service).getMethodConfig(msg);
// Generate parameter types
StringBuilder paramTypesBuilder = new StringBuilder();
for (Field field : config.getRequiredFields()) {
paramTypesBuilder.append(fieldParamComment(field, null, false));
}
paramTypesBuilder.append(
"@param {Object=} options\n"
+ " Optional parameters. You can override the default settings for this call, e.g, timeout,\n"
+ " retries, paginations, etc. See [gax.CallOptions]{@link "
+ "https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.");
Iterable<Field> optionalParams = removePageTokenFromFields(config.getOptionalFields(), config);
if (optionalParams.iterator().hasNext()) {
paramTypesBuilder.append(
"\n\n In addition, options may contain the following optional parameters.\n");
for (Field field : optionalParams) {
if (config.isPageStreaming()
&& field.equals((config.getPageStreaming().getPageSizeField()))) {
paramTypesBuilder.append(
fieldParamComment(
field,
"The maximum number of resources contained in the underlying API\n"
+ "response. If page streaming is performed per-resource, this\n"
+ "parameter does not affect the return value. If page streaming is\n"
+ "performed per-page, this determines the maximum number of\n"
+ "resources in a page.",
true));
} else {
paramTypesBuilder.append(fieldParamComment(field, null, true));
}
}
}
String paramTypes = paramTypesBuilder.toString();
String returnType = returnTypeComment(msg, config);
// Generate comment contents
StringBuilder contentBuilder = new StringBuilder();
if (msg.hasAttribute(ElementDocumentationAttribute.KEY)) {
contentBuilder.append(
JSDocCommentFixer.jsdocify(DocumentationUtil.getScopedDescription(msg)));
if (!Strings.isNullOrEmpty(paramTypes)) {
contentBuilder.append("\n\n");
}
}
contentBuilder.append(paramTypes);
if (returnType != null) {
contentBuilder.append("\n" + returnType);
}
return convertToCommentedBlock(contentBuilder.toString());
}
/**
* Return a non-conflicting safe name if name is a JS reserved word.
*/
public String wrapIfKeywordOrBuiltIn(String name) {
if (KEYWORD_BUILT_IN_SET.contains(name)) {
return name + "_";
}
return name;
}
/**
* Returns the name of JS type for the given typeRef.
*/
public String jsTypeName(TypeRef typeRef) {
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "Object";
case TYPE_ENUM:
return "number";
default:
{
String name = PRIMITIVE_TYPE_NAMES.get(typeRef.getKind());
if (!Strings.isNullOrEmpty(name)) {
return name;
}
throw new IllegalArgumentException("unknown type kind: " + typeRef.getKind());
}
}
}
/**
* Returns the name of the JS type name for arguejs parameter definitions.
*/
public String getFieldType(Field field) {
TypeRef typeRef = field.getType();
if (typeRef.isMap()) {
return "Object";
}
if (typeRef.getCardinality() == Cardinality.REPEATED) {
return "Array";
}
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "Object";
case TYPE_BOOL:
return "Boolean";
case TYPE_STRING:
case TYPE_BYTES:
return "String";
default:
// Numeric types and enums.
return "Number";
}
}
/**
* Returns the JSDoc format of link to the element.
*/
public String linkForMessage(ProtoElement element) {
if (isExternalFile(element.getFile())) {
String fullName = element.getFullName();
return String.format("[%s]{@link external:\"%s\"}", fullName, fullName);
} else {
String simpleName = element.getSimpleName();
return String.format("[%s]{@link %s}", simpleName, simpleName);
}
}
/**
* Returns the JavaScript representation of the function to return the byte length.
*/
public String getByteLengthFunction(Interface service, Method method, TypeRef typeRef) {
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "gax.createByteLengthFunction(grpcClients."
+ getStubNameFor(service, method)
+ "."
+ typeRef.getMessageType().getFullName()
+ ")";
case TYPE_STRING:
case TYPE_BYTES:
return "function(s) { return s.length; }";
default:
// There is no easy way to say the actual length of the numeric fields.
// For now throwing an exception.
throw new IllegalArgumentException(
"Can't determine the byte length function for " + typeRef.getKind());
}
}
/**
* Convert the content string into a commented block that can be directly printed out in the
* generated JS files.
*/
private List<String> convertToCommentedBlock(String content) {
if (Strings.isNullOrEmpty(content)) {
return ImmutableList.<String>of();
}
ImmutableList.Builder<String> builder = ImmutableList.builder();
for (String comment : Splitter.on("\n").splitToList(content)) {
builder.add(comment);
}
return builder.build();
}
// Constants
// =========
/**
* A map from primitive types to its default value.
*/
private static final ImmutableMap<Type, String> DEFAULT_VALUE_MAP =
ImmutableMap.<Type, String>builder()
.put(Type.TYPE_BOOL, "false")
.put(Type.TYPE_DOUBLE, "0.0")
.put(Type.TYPE_FLOAT, "0.0")
.put(Type.TYPE_INT64, "0")
.put(Type.TYPE_UINT64, "0")
.put(Type.TYPE_SINT64, "0")
.put(Type.TYPE_FIXED64, "0")
.put(Type.TYPE_SFIXED64, "0")
.put(Type.TYPE_INT32, "0")
.put(Type.TYPE_UINT32, "0")
.put(Type.TYPE_SINT32, "0")
.put(Type.TYPE_FIXED32, "0")
.put(Type.TYPE_SFIXED32, "0")
.put(Type.TYPE_STRING, "\'\'")
.put(Type.TYPE_BYTES, "\'\'")
.build();
private static final ImmutableMap<Type, String> PRIMITIVE_TYPE_NAMES =
ImmutableMap.<Type, String>builder()
.put(Type.TYPE_BOOL, "boolean")
.put(Type.TYPE_DOUBLE, "number")
.put(Type.TYPE_FLOAT, "number")
.put(Type.TYPE_INT64, "number")
.put(Type.TYPE_UINT64, "number")
.put(Type.TYPE_SINT64, "number")
.put(Type.TYPE_FIXED64, "number")
.put(Type.TYPE_SFIXED64, "number")
.put(Type.TYPE_INT32, "number")
.put(Type.TYPE_UINT32, "number")
.put(Type.TYPE_SINT32, "number")
.put(Type.TYPE_FIXED32, "number")
.put(Type.TYPE_SFIXED32, "number")
.put(Type.TYPE_STRING, "string")
.put(Type.TYPE_BYTES, "string")
.build();
/**
* A set of ECMAScript 2016 reserved words. See
* https://tc39.github.io/ecma262/2016/#sec-reserved-words
*/
private static final ImmutableSet<String> KEYWORD_BUILT_IN_SET =
ImmutableSet.<String>builder()
.add(
"break",
"do",
"in",
"typeof",
"case",
"else",
"instanceof",
"var",
"catch",
"export",
"new",
"void",
"class",
"extends",
"return",
"while",
"const",
"finally",
"super",
"with",
"continue",
"for",
"switch",
"yield",
"debugger",
"function",
"this",
"default",
"if",
"throw",
"delete",
"import",
"try",
"let",
"static",
"enum",
"await",
"implements",
"package",
"protected",
"interface",
"private",
"public",
"null",
"true",
"false",
// common parameters passed to methods.
"options",
"callback",
// parameters used in CallOptions.
"timeout",
"retry",
"flattenPages",
"pageToken",
"isBundling")
.build();
private static final ImmutableSet<String> COMMON_PROTO_PATHS =
ImmutableSet.<String>builder()
.add(
"google/api",
"google/bytestream",
"google/logging/type",
"google/longrunning",
"google/protobuf",
"google/rpc",
"google/type")
.build();
}
| 1 | 18,069 | nit: strip away those imports? seems not used in the new code. | googleapis-gapic-generator | java |
@@ -25,8 +25,12 @@ import (
"os"
"time"
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
+ "github.com/mysteriumnetwork/node/blockchain/generated"
"github.com/mysteriumnetwork/payments/cli/helpers"
"github.com/mysteriumnetwork/payments/contracts/abigen"
"github.com/mysteriumnetwork/payments/mysttoken" | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
"context"
"flag"
"fmt"
"math/big"
"os"
"time"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/mysteriumnetwork/payments/cli/helpers"
"github.com/mysteriumnetwork/payments/contracts/abigen"
"github.com/mysteriumnetwork/payments/mysttoken"
)
func main() {
keyStoreDir := flag.String("keystore.directory", "", "Directory of keystore")
etherAddress := flag.String("ether.address", "", "Account inside keystore to use for deployment")
etherPassphrase := flag.String("ether.passphrase", "", "Passphrase for account unlocking")
ethRPC := flag.String("geth.url", "", "RPC url of ethereum client")
flag.Parse()
ks := helpers.GetKeystore(*keyStoreDir)
acc, err := helpers.GetUnlockedAcc(*etherAddress, *etherPassphrase, ks)
checkError("Unlock acc", err)
transactor := helpers.CreateNewKeystoreTransactor(ks, acc)
client, synced, err := helpers.LookupBackend(*ethRPC)
checkError("backend lookup", err)
<-synced
mystTokenAddress, tx, _, err := mysttoken.DeployMystToken(transactor, client)
checkError("Deploy token", err)
checkTxStatus(client, tx)
fmt.Println("Token: ", mystTokenAddress.String())
transactor.Nonce = big.NewInt(int64(tx.Nonce() + 1))
paymentsAddress, tx, _, err := abigen.DeployIdentityPromises(transactor, client, mystTokenAddress, big.NewInt(100))
checkError("Deploy payments", err)
checkTxStatus(client, tx)
fmt.Println("Payments: ", paymentsAddress.String())
}
func checkError(context string, err error) {
if err != nil {
fmt.Println("Error at:", context, "value:", err.Error())
os.Exit(1)
}
}
func checkTxStatus(client *ethclient.Client, tx *types.Transaction) {
//wait for transaction to be mined at most 10 seconds
for i := 0; i < 10; i++ {
_, pending, err := client.TransactionByHash(context.Background(), tx.Hash())
checkError("Get tx by hash", err)
if pending {
time.Sleep(1 * time.Second)
} else {
break
}
}
receipt, err := client.TransactionReceipt(context.Background(), tx.Hash())
checkError("Fetch tx receipt", err)
if receipt.Status != 1 {
fmt.Println("Receipt status expected to be 1")
os.Exit(1)
}
}
| 1 | 14,587 | redundant whitespace :octocat: | mysteriumnetwork-node | go |
@@ -698,7 +698,8 @@ class TestCloudFormationSetStackPolicy(CloudFormationConnectionBase):
self.set_http_response(status_code=200)
api_response = self.service_connection.set_stack_policy('stack-id',
stack_policy_body='{}')
- self.assertEqual(api_response['Some'], 'content')
+ self.assertDictEqual(api_response, {'SetStackPolicyResult': {'Some': 'content'}})
+ self.assertIsInstance(api_response, dict)
self.assert_request_parameters({
'Action': 'SetStackPolicy',
'ContentType': 'JSON', | 1 | #!/usr/bin/env python
import unittest
from datetime import datetime
from mock import Mock
from tests.unit import AWSMockServiceTestCase
from boto.cloudformation.connection import CloudFormationConnection
from boto.exception import BotoServerError
from boto.compat import json
SAMPLE_TEMPLATE = r"""
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Sample template",
"Parameters" : {
"KeyName" : {
"Description" : "key pair",
"Type" : "String"
}
},
"Resources" : {
"Ec2Instance" : {
"Type" : "AWS::EC2::Instance",
"Properties" : {
"KeyName" : { "Ref" : "KeyName" },
"ImageId" : "ami-7f418316",
"UserData" : { "Fn::Base64" : "80" }
}
}
},
"Outputs" : {
"InstanceId" : {
"Description" : "InstanceId of the newly created EC2 instance",
"Value" : { "Ref" : "Ec2Instance" }
}
}
"""
class CloudFormationConnectionBase(AWSMockServiceTestCase):
connection_class = CloudFormationConnection
def setUp(self):
super(CloudFormationConnectionBase, self).setUp()
self.stack_id = u'arn:aws:cloudformation:us-east-1:18:stack/Name/id'
class TestCloudFormationCreateStack(CloudFormationConnectionBase):
def default_body(self):
return json.dumps(
{u'CreateStackResponse':
{u'CreateStackResult': {u'StackId': self.stack_id},
u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8')
def test_create_stack_has_correct_request_params(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_stack(
'stack_name', template_url='http://url',
template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')],
tags={'TagKey': 'TagValue'},
notification_arns=['arn:notify1', 'arn:notify2'],
disable_rollback=True,
timeout_in_minutes=20, capabilities=['CAPABILITY_IAM']
)
self.assertEqual(api_response, self.stack_id)
# These are the parameters that are actually sent to the CloudFormation
# service.
self.assert_request_parameters({
'Action': 'CreateStack',
'Capabilities.member.1': 'CAPABILITY_IAM',
'ContentType': 'JSON',
'DisableRollback': 'true',
'NotificationARNs.member.1': 'arn:notify1',
'NotificationARNs.member.2': 'arn:notify2',
'Parameters.member.1.ParameterKey': 'KeyName',
'Parameters.member.1.ParameterValue': 'myKeyName',
'Tags.member.1.Key': 'TagKey',
'Tags.member.1.Value': 'TagValue',
'StackName': 'stack_name',
'Version': '2010-05-15',
'TimeoutInMinutes': 20,
'TemplateBody': SAMPLE_TEMPLATE,
'TemplateURL': 'http://url',
})
# The test_create_stack_has_correct_request_params verified all of the
# params needed when making a create_stack service call. The rest of the
# tests for create_stack only verify specific parts of the params sent
# to CloudFormation.
def test_create_stack_with_minimum_args(self):
# This will fail in practice, but the API docs only require stack_name.
self.set_http_response(status_code=200)
api_response = self.service_connection.create_stack('stack_name')
self.assertEqual(api_response, self.stack_id)
self.assert_request_parameters({
'Action': 'CreateStack',
'ContentType': 'JSON',
'DisableRollback': 'false',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_create_stack_fails(self):
self.set_http_response(status_code=400, reason='Bad Request',
body=b'{"Error": {"Code": 1, "Message": "Invalid arg."}}')
with self.assertRaisesRegexp(self.service_connection.ResponseError,
'Invalid arg.'):
api_response = self.service_connection.create_stack(
'stack_name', template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')])
def test_create_stack_fail_error(self):
self.set_http_response(status_code=400, reason='Bad Request',
body=b'{"RequestId": "abc", "Error": {"Code": 1, "Message": "Invalid arg."}}')
try:
api_response = self.service_connection.create_stack(
'stack_name', template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')])
except BotoServerError as e:
self.assertEqual('abc', e.request_id)
self.assertEqual(1, e.error_code)
self.assertEqual('Invalid arg.', e.message)
class TestCloudFormationUpdateStack(CloudFormationConnectionBase):
def default_body(self):
return json.dumps(
{u'UpdateStackResponse':
{u'UpdateStackResult': {u'StackId': self.stack_id},
u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8')
def test_update_stack_all_args(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.update_stack(
'stack_name', template_url='http://url',
template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')],
tags={'TagKey': 'TagValue'},
notification_arns=['arn:notify1', 'arn:notify2'],
disable_rollback=True,
timeout_in_minutes=20
)
self.assert_request_parameters({
'Action': 'UpdateStack',
'ContentType': 'JSON',
'DisableRollback': 'true',
'NotificationARNs.member.1': 'arn:notify1',
'NotificationARNs.member.2': 'arn:notify2',
'Parameters.member.1.ParameterKey': 'KeyName',
'Parameters.member.1.ParameterValue': 'myKeyName',
'Tags.member.1.Key': 'TagKey',
'Tags.member.1.Value': 'TagValue',
'StackName': 'stack_name',
'Version': '2010-05-15',
'TimeoutInMinutes': 20,
'TemplateBody': SAMPLE_TEMPLATE,
'TemplateURL': 'http://url',
})
def test_update_stack_with_minimum_args(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.update_stack('stack_name')
self.assertEqual(api_response, self.stack_id)
self.assert_request_parameters({
'Action': 'UpdateStack',
'ContentType': 'JSON',
'DisableRollback': 'false',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_update_stack_fails(self):
self.set_http_response(status_code=400, reason='Bad Request',
body=b'Invalid arg.')
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.update_stack(
'stack_name', template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')])
class TestCloudFormationDeleteStack(CloudFormationConnectionBase):
def default_body(self):
return json.dumps(
{u'DeleteStackResponse':
{u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8')
def test_delete_stack(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_stack('stack_name')
self.assertEqual(api_response, json.loads(self.default_body().decode('utf-8')))
self.assert_request_parameters({
'Action': 'DeleteStack',
'ContentType': 'JSON',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_delete_stack_fails(self):
self.set_http_response(status_code=400)
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.delete_stack('stack_name')
class TestCloudFormationDescribeStackResource(CloudFormationConnectionBase):
def default_body(self):
return json.dumps('fake server response').encode('utf-8')
def test_describe_stack_resource(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_stack_resource(
'stack_name', 'resource_id')
self.assertEqual(api_response, 'fake server response')
self.assert_request_parameters({
'Action': 'DescribeStackResource',
'ContentType': 'JSON',
'LogicalResourceId': 'resource_id',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_describe_stack_resource_fails(self):
self.set_http_response(status_code=400)
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.describe_stack_resource(
'stack_name', 'resource_id')
class TestCloudFormationGetTemplate(CloudFormationConnectionBase):
def default_body(self):
return json.dumps('fake server response').encode('utf-8')
def test_get_template(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_template('stack_name')
self.assertEqual(api_response, 'fake server response')
self.assert_request_parameters({
'Action': 'GetTemplate',
'ContentType': 'JSON',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_get_template_fails(self):
self.set_http_response(status_code=400)
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.get_template('stack_name')
class TestCloudFormationGetStackevents(CloudFormationConnectionBase):
def default_body(self):
return b"""
<DescribeStackEventsResult>
<StackEvents>
<member>
<EventId>Event-1-Id</EventId>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MyStack</LogicalResourceId>
<PhysicalResourceId>MyStack_One</PhysicalResourceId>
<ResourceType>AWS::CloudFormation::Stack</ResourceType>
<Timestamp>2010-07-27T22:26:28Z</Timestamp>
<ResourceStatus>CREATE_IN_PROGRESS</ResourceStatus>
<ResourceStatusReason>User initiated</ResourceStatusReason>
</member>
<member>
<EventId>Event-2-Id</EventId>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MySG1</LogicalResourceId>
<PhysicalResourceId>MyStack_SG1</PhysicalResourceId>
<ResourceType>AWS::SecurityGroup</ResourceType>
<Timestamp>2010-07-27T22:28:28Z</Timestamp>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
</member>
</StackEvents>
</DescribeStackEventsResult>
"""
def test_describe_stack_events(self):
self.set_http_response(status_code=200)
first, second = self.service_connection.describe_stack_events('stack_name', next_token='next_token')
self.assertEqual(first.event_id, 'Event-1-Id')
self.assertEqual(first.logical_resource_id, 'MyStack')
self.assertEqual(first.physical_resource_id, 'MyStack_One')
self.assertEqual(first.resource_properties, None)
self.assertEqual(first.resource_status, 'CREATE_IN_PROGRESS')
self.assertEqual(first.resource_status_reason, 'User initiated')
self.assertEqual(first.resource_type, 'AWS::CloudFormation::Stack')
self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(first.stack_name, 'MyStack')
self.assertIsNotNone(first.timestamp)
self.assertEqual(second.event_id, 'Event-2-Id')
self.assertEqual(second.logical_resource_id, 'MySG1')
self.assertEqual(second.physical_resource_id, 'MyStack_SG1')
self.assertEqual(second.resource_properties, None)
self.assertEqual(second.resource_status, 'CREATE_COMPLETE')
self.assertEqual(second.resource_status_reason, None)
self.assertEqual(second.resource_type, 'AWS::SecurityGroup')
self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(second.stack_name, 'MyStack')
self.assertIsNotNone(second.timestamp)
self.assert_request_parameters({
'Action': 'DescribeStackEvents',
'NextToken': 'next_token',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
class TestCloudFormationDescribeStackResources(CloudFormationConnectionBase):
def default_body(self):
return b"""
<DescribeStackResourcesResult>
<StackResources>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MyDBInstance</LogicalResourceId>
<PhysicalResourceId>MyStack_DB1</PhysicalResourceId>
<ResourceType>AWS::DBInstance</ResourceType>
<Timestamp>2010-07-27T22:27:28Z</Timestamp>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
</member>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MyAutoScalingGroup</LogicalResourceId>
<PhysicalResourceId>MyStack_ASG1</PhysicalResourceId>
<ResourceType>AWS::AutoScalingGroup</ResourceType>
<Timestamp>2010-07-27T22:28:28Z</Timestamp>
<ResourceStatus>CREATE_IN_PROGRESS</ResourceStatus>
</member>
</StackResources>
</DescribeStackResourcesResult>
"""
def test_describe_stack_resources(self):
self.set_http_response(status_code=200)
first, second = self.service_connection.describe_stack_resources(
'stack_name', 'logical_resource_id', 'physical_resource_id')
self.assertEqual(first.description, None)
self.assertEqual(first.logical_resource_id, 'MyDBInstance')
self.assertEqual(first.physical_resource_id, 'MyStack_DB1')
self.assertEqual(first.resource_status, 'CREATE_COMPLETE')
self.assertEqual(first.resource_status_reason, None)
self.assertEqual(first.resource_type, 'AWS::DBInstance')
self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(first.stack_name, 'MyStack')
self.assertIsNotNone(first.timestamp)
self.assertEqual(second.description, None)
self.assertEqual(second.logical_resource_id, 'MyAutoScalingGroup')
self.assertEqual(second.physical_resource_id, 'MyStack_ASG1')
self.assertEqual(second.resource_status, 'CREATE_IN_PROGRESS')
self.assertEqual(second.resource_status_reason, None)
self.assertEqual(second.resource_type, 'AWS::AutoScalingGroup')
self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(second.stack_name, 'MyStack')
self.assertIsNotNone(second.timestamp)
self.assert_request_parameters({
'Action': 'DescribeStackResources',
'LogicalResourceId': 'logical_resource_id',
'PhysicalResourceId': 'physical_resource_id',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
class TestCloudFormationDescribeStacks(CloudFormationConnectionBase):
def default_body(self):
return b"""
<DescribeStacksResponse>
<DescribeStacksResult>
<Stacks>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackStatus>CREATE_COMPLETE</StackStatus>
<StackName>MyStack</StackName>
<StackStatusReason/>
<Description>My Description</Description>
<CreationTime>2012-05-16T22:55:31Z</CreationTime>
<Capabilities>
<member>CAPABILITY_IAM</member>
</Capabilities>
<NotificationARNs>
<member>arn:aws:sns:region-name:account-name:topic-name</member>
</NotificationARNs>
<DisableRollback>false</DisableRollback>
<Parameters>
<member>
<ParameterValue>MyValue</ParameterValue>
<ParameterKey>MyKey</ParameterKey>
</member>
</Parameters>
<Outputs>
<member>
<OutputValue>http://url/</OutputValue>
<Description>Server URL</Description>
<OutputKey>ServerURL</OutputKey>
</member>
</Outputs>
<Tags>
<member>
<Key>MyTagKey</Key>
<Value>MyTagValue</Value>
</member>
</Tags>
</member>
</Stacks>
</DescribeStacksResult>
<ResponseMetadata>
<RequestId>12345</RequestId>
</ResponseMetadata>
</DescribeStacksResponse>
"""
def test_describe_stacks(self):
self.set_http_response(status_code=200)
stacks = self.service_connection.describe_stacks('MyStack')
self.assertEqual(len(stacks), 1)
stack = stacks[0]
self.assertEqual(stack.creation_time,
datetime(2012, 5, 16, 22, 55, 31))
self.assertEqual(stack.description, 'My Description')
self.assertEqual(stack.disable_rollback, False)
self.assertEqual(stack.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(stack.stack_status, 'CREATE_COMPLETE')
self.assertEqual(stack.stack_name, 'MyStack')
self.assertEqual(stack.stack_name_reason, None)
self.assertEqual(stack.timeout_in_minutes, None)
self.assertEqual(len(stack.outputs), 1)
self.assertEqual(stack.outputs[0].description, 'Server URL')
self.assertEqual(stack.outputs[0].key, 'ServerURL')
self.assertEqual(stack.outputs[0].value, 'http://url/')
self.assertEqual(len(stack.parameters), 1)
self.assertEqual(stack.parameters[0].key, 'MyKey')
self.assertEqual(stack.parameters[0].value, 'MyValue')
self.assertEqual(len(stack.capabilities), 1)
self.assertEqual(stack.capabilities[0].value, 'CAPABILITY_IAM')
self.assertEqual(len(stack.notification_arns), 1)
self.assertEqual(stack.notification_arns[0].value, 'arn:aws:sns:region-name:account-name:topic-name')
self.assertEqual(len(stack.tags), 1)
self.assertEqual(stack.tags['MyTagKey'], 'MyTagValue')
self.assert_request_parameters({
'Action': 'DescribeStacks',
'StackName': 'MyStack',
'Version': '2010-05-15',
})
class TestCloudFormationListStackResources(CloudFormationConnectionBase):
def default_body(self):
return b"""
<ListStackResourcesResponse>
<ListStackResourcesResult>
<StackResourceSummaries>
<member>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
<LogicalResourceId>SampleDB</LogicalResourceId>
<LastUpdatedTime>2011-06-21T20:25:57Z</LastUpdatedTime>
<PhysicalResourceId>My-db-ycx</PhysicalResourceId>
<ResourceType>AWS::RDS::DBInstance</ResourceType>
</member>
<member>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
<LogicalResourceId>CPUAlarmHigh</LogicalResourceId>
<LastUpdatedTime>2011-06-21T20:29:23Z</LastUpdatedTime>
<PhysicalResourceId>MyStack-CPUH-PF</PhysicalResourceId>
<ResourceType>AWS::CloudWatch::Alarm</ResourceType>
</member>
</StackResourceSummaries>
</ListStackResourcesResult>
<ResponseMetadata>
<RequestId>2d06e36c-ac1d-11e0-a958-f9382b6eb86b</RequestId>
</ResponseMetadata>
</ListStackResourcesResponse>
"""
def test_list_stack_resources(self):
self.set_http_response(status_code=200)
resources = self.service_connection.list_stack_resources('MyStack',
next_token='next_token')
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0].last_updated_time,
datetime(2011, 6, 21, 20, 25, 57))
self.assertEqual(resources[0].logical_resource_id, 'SampleDB')
self.assertEqual(resources[0].physical_resource_id, 'My-db-ycx')
self.assertEqual(resources[0].resource_status, 'CREATE_COMPLETE')
self.assertEqual(resources[0].resource_status_reason, None)
self.assertEqual(resources[0].resource_type, 'AWS::RDS::DBInstance')
self.assertEqual(resources[1].last_updated_time,
datetime(2011, 6, 21, 20, 29, 23))
self.assertEqual(resources[1].logical_resource_id, 'CPUAlarmHigh')
self.assertEqual(resources[1].physical_resource_id, 'MyStack-CPUH-PF')
self.assertEqual(resources[1].resource_status, 'CREATE_COMPLETE')
self.assertEqual(resources[1].resource_status_reason, None)
self.assertEqual(resources[1].resource_type, 'AWS::CloudWatch::Alarm')
self.assert_request_parameters({
'Action': 'ListStackResources',
'NextToken': 'next_token',
'StackName': 'MyStack',
'Version': '2010-05-15',
})
class TestCloudFormationListStacks(CloudFormationConnectionBase):
def default_body(self):
return b"""
<ListStacksResponse>
<ListStacksResult>
<StackSummaries>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack/Test1/aa</StackId>
<StackStatus>CREATE_IN_PROGRESS</StackStatus>
<StackName>vpc1</StackName>
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
<TemplateDescription>My Description.</TemplateDescription>
</member>
</StackSummaries>
</ListStacksResult>
</ListStacksResponse>
"""
def test_list_stacks(self):
self.set_http_response(status_code=200)
stacks = self.service_connection.list_stacks(['CREATE_IN_PROGRESS'],
next_token='next_token')
self.assertEqual(len(stacks), 1)
self.assertEqual(stacks[0].stack_id,
'arn:aws:cfn:us-east-1:1:stack/Test1/aa')
self.assertEqual(stacks[0].stack_status, 'CREATE_IN_PROGRESS')
self.assertEqual(stacks[0].stack_name, 'vpc1')
self.assertEqual(stacks[0].creation_time,
datetime(2011, 5, 23, 15, 47, 44))
self.assertEqual(stacks[0].deletion_time, None)
self.assertEqual(stacks[0].template_description, 'My Description.')
self.assert_request_parameters({
'Action': 'ListStacks',
'NextToken': 'next_token',
'StackStatusFilter.member.1': 'CREATE_IN_PROGRESS',
'Version': '2010-05-15',
})
class TestCloudFormationValidateTemplate(CloudFormationConnectionBase):
def default_body(self):
return b"""
<ValidateTemplateResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
<ValidateTemplateResult>
<Description>My Description.</Description>
<Parameters>
<member>
<NoEcho>false</NoEcho>
<ParameterKey>InstanceType</ParameterKey>
<Description>Type of instance to launch</Description>
<DefaultValue>m1.small</DefaultValue>
</member>
<member>
<NoEcho>false</NoEcho>
<ParameterKey>KeyName</ParameterKey>
<Description>EC2 KeyPair</Description>
</member>
</Parameters>
<CapabilitiesReason>Reason</CapabilitiesReason>
<Capabilities>
<member>CAPABILITY_IAM</member>
</Capabilities>
</ValidateTemplateResult>
<ResponseMetadata>
<RequestId>0be7b6e8-e4a0-11e0-a5bd-9f8d5a7dbc91</RequestId>
</ResponseMetadata>
</ValidateTemplateResponse>
"""
def test_validate_template(self):
self.set_http_response(status_code=200)
template = self.service_connection.validate_template(template_body=SAMPLE_TEMPLATE,
template_url='http://url')
self.assertEqual(template.description, 'My Description.')
self.assertEqual(len(template.template_parameters), 2)
param1, param2 = template.template_parameters
self.assertEqual(param1.default_value, 'm1.small')
self.assertEqual(param1.description, 'Type of instance to launch')
self.assertEqual(param1.no_echo, True)
self.assertEqual(param1.parameter_key, 'InstanceType')
self.assertEqual(param2.default_value, None)
self.assertEqual(param2.description, 'EC2 KeyPair')
self.assertEqual(param2.no_echo, True)
self.assertEqual(param2.parameter_key, 'KeyName')
self.assertEqual(template.capabilities_reason, 'Reason')
self.assertEqual(len(template.capabilities), 1)
self.assertEqual(template.capabilities[0].value, 'CAPABILITY_IAM')
self.assert_request_parameters({
'Action': 'ValidateTemplate',
'TemplateBody': SAMPLE_TEMPLATE,
'TemplateURL': 'http://url',
'Version': '2010-05-15',
})
class TestCloudFormationCancelUpdateStack(CloudFormationConnectionBase):
def default_body(self):
return b"""<CancelUpdateStackResult/>"""
def test_cancel_update_stack(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.cancel_update_stack('stack_name')
self.assertEqual(api_response, True)
self.assert_request_parameters({
'Action': 'CancelUpdateStack',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
class TestCloudFormationEstimateTemplateCost(CloudFormationConnectionBase):
def default_body(self):
return b"""
{
"EstimateTemplateCostResponse": {
"EstimateTemplateCostResult": {
"Url": "http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6"
}
}
}
"""
def test_estimate_template_cost(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.estimate_template_cost(
template_body='{}')
self.assertEqual(api_response,
'http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6')
self.assert_request_parameters({
'Action': 'EstimateTemplateCost',
'ContentType': 'JSON',
'TemplateBody': '{}',
'Version': '2010-05-15',
})
class TestCloudFormationGetStackPolicy(CloudFormationConnectionBase):
def default_body(self):
return b"""
{
"GetStackPolicyResponse": {
"GetStackPolicyResult": {
"StackPolicyBody": "{...}"
}
}
}
"""
def test_get_stack_policy(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_stack_policy('stack-id')
self.assertEqual(api_response, '{...}')
self.assert_request_parameters({
'Action': 'GetStackPolicy',
'ContentType': 'JSON',
'StackName': 'stack-id',
'Version': '2010-05-15',
})
class TestCloudFormationSetStackPolicy(CloudFormationConnectionBase):
def default_body(self):
return b"""
{
"SetStackPolicyResponse": {
"SetStackPolicyResult": {
"Some": "content"
}
}
}
"""
def test_set_stack_policy(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.set_stack_policy('stack-id',
stack_policy_body='{}')
self.assertEqual(api_response['Some'], 'content')
self.assert_request_parameters({
'Action': 'SetStackPolicy',
'ContentType': 'JSON',
'StackName': 'stack-id',
'StackPolicyBody': '{}',
'Version': '2010-05-15',
})
if __name__ == '__main__':
unittest.main()
| 1 | 10,928 | I don't believe that the `assertDictEqual` call can pass without `api_response` being a `dict`, so this second check isn't needed! | boto-boto | py |
@@ -213,8 +213,10 @@ static h2o_iovec_t *decode_string(h2o_mem_pool_t *pool, const uint8_t **src, con
if (len > src_end - *src)
return NULL;
ret = alloc_buf(pool, len * 2); /* max compression ratio is >= 0.5 */
- if ((ret->len = h2o_hpack_decode_huffman(ret->base, *src, len, is_header_name, err_desc)) == SIZE_MAX)
+ if ((ret->len = h2o_hpack_decode_huffman(ret->base, *src, len, is_header_name, err_desc)) == SIZE_MAX) {
+ h2o_mem_release_shared(ret);
return NULL;
+ }
ret->base[ret->len] = '\0';
} else {
if (len > src_end - *src) | 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Fastly, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "h2o/hpack.h"
#include "h2o/http2_common.h"
#define HEADER_TABLE_OFFSET 62
#define HEADER_TABLE_ENTRY_SIZE_OFFSET 32
#define STATUS_HEADER_MAX_SIZE 5
#define CONTENT_LENGTH_HEADER_MAX_SIZE \
(3 + sizeof(H2O_SIZE_T_LONGEST_STR) - 1) /* uses Literal Header Field without Indexing (RFC7541 6.2.2) */
#include "hpack_huffman_table.h"
static inline int value_is_part_of_static_table(const h2o_iovec_t *value)
{
return &h2o_hpack_static_table[0].value <= value &&
value <= &h2o_hpack_static_table[sizeof(h2o_hpack_static_table) / sizeof(h2o_hpack_static_table[0]) - 1].value;
}
static h2o_iovec_t *alloc_buf(h2o_mem_pool_t *pool, size_t len)
{
h2o_iovec_t *buf = h2o_mem_alloc_shared(pool, sizeof(h2o_iovec_t) + len + 1, NULL);
buf->base = (char *)buf + sizeof(h2o_iovec_t);
buf->len = len;
return buf;
}
int64_t h2o_hpack_decode_int(const uint8_t **src, const uint8_t *src_end, unsigned prefix_bits)
{
uint64_t value;
unsigned shift;
uint8_t prefix_max = (1 << prefix_bits) - 1;
if (*src >= src_end)
return H2O_HTTP2_ERROR_INCOMPLETE;
value = *(*src)++ & prefix_max;
if (value != prefix_max)
return (int64_t)value;
/* decode upto 8 octets (excluding prefix), that are guaranteed not to cause overflow */
value = prefix_max;
for (shift = 0; shift < 56; shift += 7) {
if (*src == src_end)
return H2O_HTTP2_ERROR_INCOMPLETE;
value += (uint64_t)(**src & 127) << shift;
if ((*(*src)++ & 128) == 0)
return (int64_t)value;
}
/* handling the 9th octet */
if (*src == src_end)
return H2O_HTTP2_ERROR_INCOMPLETE;
if ((**src & 128) != 0)
return H2O_HTTP2_ERROR_COMPRESSION;
value += (uint64_t)(*(*src)++ & 127) << shift;
if (value > (uint64_t)INT64_MAX)
return H2O_HTTP2_ERROR_COMPRESSION;
return value;
}
static char *huffdecode4(char *dst, uint8_t in, uint8_t *state, int *maybe_eos, uint8_t *seen_char_types)
{
const nghttp2_huff_decode *entry = huff_decode_table[*state] + in;
if ((entry->flags & NGHTTP2_HUFF_FAIL) != 0)
return NULL;
if ((entry->flags & NGHTTP2_HUFF_SYM) != 0) {
*dst++ = entry->sym;
*seen_char_types |= (entry->flags & NGHTTP2_HUFF_INVALID_CHARS);
}
*state = entry->state;
*maybe_eos = (entry->flags & NGHTTP2_HUFF_ACCEPTED) != 0;
return dst;
}
const char h2o_hpack_err_found_upper_case_in_header_name[] = "found an upper-case letter in header name";
const char h2o_hpack_soft_err_found_invalid_char_in_header_name[] = "found an invalid character in header name";
const char h2o_hpack_soft_err_found_invalid_char_in_header_value[] = "found an invalid character in header value";
size_t h2o_hpack_decode_huffman(char *_dst, const uint8_t *src, size_t len, int is_name, const char **err_desc)
{
char *dst = _dst;
const uint8_t *src_end = src + len;
uint8_t state = 0, seen_char_types = 0;
int maybe_eos = 1;
/* decode */
for (; src < src_end; src++) {
if ((dst = huffdecode4(dst, *src >> 4, &state, &maybe_eos, &seen_char_types)) == NULL)
return SIZE_MAX;
if ((dst = huffdecode4(dst, *src & 0xf, &state, &maybe_eos, &seen_char_types)) == NULL)
return SIZE_MAX;
}
if (!maybe_eos)
return SIZE_MAX;
/* validate */
if (is_name) {
if (dst == _dst)
return SIZE_MAX;
/* pseudo-headers are checked later in `decode_header` */
if ((seen_char_types & NGHTTP2_HUFF_INVALID_FOR_HEADER_NAME) != 0 && _dst[0] != ':') {
if ((seen_char_types & NGHTTP2_HUFF_UPPER_CASE_CHAR) != 0) {
*err_desc = h2o_hpack_err_found_upper_case_in_header_name;
return SIZE_MAX;
} else {
*err_desc = h2o_hpack_soft_err_found_invalid_char_in_header_name;
}
}
} else {
if ((seen_char_types & NGHTTP2_HUFF_INVALID_FOR_HEADER_VALUE) != 0)
*err_desc = h2o_hpack_soft_err_found_invalid_char_in_header_value;
}
return dst - _dst;
}
/* validate a header name against https://tools.ietf.org/html/rfc7230#section-3.2,
* in addition to that, we disallow upper case chars as well.
* This sets @err_desc for all invalid characters, but only returns true
* for upper case characters, this is because we return a protocol error
* in that case. */
int h2o_hpack_validate_header_name(const char *s, size_t len, const char **err_desc)
{
/* all printable chars, except upper case and separator characters */
static const char valid_h2_header_name_char[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0-31 */
0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 32-63 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, /* 64-95 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, /* 96-127 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-159 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 160-191 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 192-223 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 224-255 */
};
for (; len != 0; ++s, --len) {
unsigned char ch = (unsigned char)*s;
if (!valid_h2_header_name_char[ch]) {
if (ch - 'A' < 26U) {
*err_desc = h2o_hpack_err_found_upper_case_in_header_name;
return 0;
}
*err_desc = h2o_hpack_soft_err_found_invalid_char_in_header_name;
}
}
return 1;
}
/* validate a header value against https://tools.ietf.org/html/rfc7230#section-3.2 */
void h2o_hpack_validate_header_value(const char *s, size_t len, const char **err_desc)
{
/* all printable chars + horizontal tab */
static const char valid_h2_field_value_char[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0-31 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 32-63 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 64-95 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, /* 96-127 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 128-159 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 160-191 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 192-223 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 224-255 */
};
for (; len != 0; ++s, --len) {
unsigned char ch = (unsigned char)*s;
if (!valid_h2_field_value_char[ch]) {
*err_desc = h2o_hpack_soft_err_found_invalid_char_in_header_value;
break;
}
}
}
static h2o_iovec_t *decode_string(h2o_mem_pool_t *pool, const uint8_t **src, const uint8_t *src_end, int is_header_name,
const char **err_desc)
{
h2o_iovec_t *ret;
int is_huffman;
int64_t len;
if (*src >= src_end)
return NULL;
is_huffman = (**src & 0x80) != 0;
if ((len = h2o_hpack_decode_int(src, src_end, 7)) < 0)
return NULL;
if (is_huffman) {
if (len > src_end - *src)
return NULL;
ret = alloc_buf(pool, len * 2); /* max compression ratio is >= 0.5 */
if ((ret->len = h2o_hpack_decode_huffman(ret->base, *src, len, is_header_name, err_desc)) == SIZE_MAX)
return NULL;
ret->base[ret->len] = '\0';
} else {
if (len > src_end - *src)
return NULL;
if (is_header_name) {
/* pseudo-headers are checked later in `decode_header` */
if (**src != (uint8_t)':' && !h2o_hpack_validate_header_name((char *)*src, len, err_desc))
return NULL;
} else {
h2o_hpack_validate_header_value((char *)*src, len, err_desc);
}
ret = alloc_buf(pool, len);
memcpy(ret->base, *src, len);
ret->base[len] = '\0';
}
*src += len;
return ret;
}
static void header_table_evict_one(h2o_hpack_header_table_t *table)
{
struct st_h2o_hpack_header_table_entry_t *entry;
assert(table->num_entries != 0);
entry = h2o_hpack_header_table_get(table, --table->num_entries);
table->hpack_size -= entry->name->len + entry->value->len + HEADER_TABLE_ENTRY_SIZE_OFFSET;
if (!h2o_iovec_is_token(entry->name))
h2o_mem_release_shared(entry->name);
if (!value_is_part_of_static_table(entry->value))
h2o_mem_release_shared(entry->value);
memset(entry, 0, sizeof(*entry));
}
static struct st_h2o_hpack_header_table_entry_t *header_table_add(h2o_hpack_header_table_t *table, size_t size_add,
size_t max_num_entries)
{
/* adjust the size */
while (table->num_entries != 0 && table->hpack_size + size_add > table->hpack_capacity)
header_table_evict_one(table);
while (max_num_entries <= table->num_entries)
header_table_evict_one(table);
if (table->num_entries == 0) {
assert(table->hpack_size == 0);
if (size_add > table->hpack_capacity)
return NULL;
}
table->hpack_size += size_add;
/* grow the entries if full */
if (table->num_entries == table->entry_capacity) {
size_t new_capacity = table->num_entries * 2;
if (new_capacity < 16)
new_capacity = 16;
struct st_h2o_hpack_header_table_entry_t *new_entries =
h2o_mem_alloc(new_capacity * sizeof(struct st_h2o_hpack_header_table_entry_t));
if (table->num_entries != 0) {
size_t src_index = table->entry_start_index, dst_index = 0;
do {
new_entries[dst_index] = table->entries[src_index];
++dst_index;
src_index = (src_index + 1) % table->entry_capacity;
} while (dst_index != table->num_entries);
}
memset(new_entries + table->num_entries, 0, sizeof(*new_entries) * (new_capacity - table->num_entries));
free(table->entries);
table->entries = new_entries;
table->entry_capacity = new_capacity;
table->entry_start_index = 0;
}
++table->num_entries;
table->entry_start_index = (table->entry_start_index + table->entry_capacity - 1) % table->entry_capacity;
return table->entries + table->entry_start_index;
}
int h2o_hpack_decode_header(h2o_mem_pool_t *pool, void *_hpack_header_table, h2o_iovec_t **_name, h2o_iovec_t *_value,
const uint8_t **const src, const uint8_t *src_end, const char **err_desc)
{
h2o_hpack_header_table_t *hpack_header_table = _hpack_header_table;
h2o_iovec_t *name = NULL, *value = NULL;
int64_t index = 0;
int value_is_indexed = 0, do_index = 0;
Redo:
if (*src >= src_end)
return H2O_HTTP2_ERROR_COMPRESSION;
/* determine the mode and handle accordingly */
if (**src >= 128) {
/* indexed header field representation */
if ((index = h2o_hpack_decode_int(src, src_end, 7)) <= 0)
return H2O_HTTP2_ERROR_COMPRESSION;
value_is_indexed = 1;
} else if (**src >= 64) {
/* literal header field with incremental handling */
if (**src == 64) {
++*src;
} else if ((index = h2o_hpack_decode_int(src, src_end, 6)) <= 0) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
do_index = 1;
} else if (**src < 32) {
/* literal header field without indexing / never indexed */
if ((**src & 0xf) == 0) {
++*src;
} else if ((index = h2o_hpack_decode_int(src, src_end, 4)) <= 0) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
} else {
/* size update */
int64_t new_capacity;
if ((new_capacity = h2o_hpack_decode_int(src, src_end, 5)) < 0) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
if (new_capacity > hpack_header_table->hpack_max_capacity) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
hpack_header_table->hpack_capacity = (size_t)new_capacity;
while (hpack_header_table->num_entries != 0 && hpack_header_table->hpack_size > hpack_header_table->hpack_capacity) {
header_table_evict_one(hpack_header_table);
}
goto Redo;
}
/* determine the header */
if (index > 0) {
/* existing name (and value?) */
if (index < HEADER_TABLE_OFFSET) {
name = (h2o_iovec_t *)h2o_hpack_static_table[index - 1].name;
if (value_is_indexed)
value = (h2o_iovec_t *)&h2o_hpack_static_table[index - 1].value;
} else if (index - HEADER_TABLE_OFFSET < hpack_header_table->num_entries) {
struct st_h2o_hpack_header_table_entry_t *entry =
h2o_hpack_header_table_get(hpack_header_table, index - HEADER_TABLE_OFFSET);
*err_desc = entry->err_desc;
name = entry->name;
if (!h2o_iovec_is_token(name))
h2o_mem_link_shared(pool, name);
if (value_is_indexed) {
value = entry->value;
h2o_mem_link_shared(pool, value);
}
} else {
return H2O_HTTP2_ERROR_COMPRESSION;
}
} else {
/* non-existing name */
const h2o_token_t *name_token;
if ((name = decode_string(pool, src, src_end, 1, err_desc)) == NULL) {
if (*err_desc == h2o_hpack_err_found_upper_case_in_header_name)
return H2O_HTTP2_ERROR_PROTOCOL;
return H2O_HTTP2_ERROR_COMPRESSION;
}
if (*err_desc == NULL) {
/* predefined header names should be interned */
if ((name_token = h2o_lookup_token(name->base, name->len)) != NULL)
name = (h2o_iovec_t *)&name_token->buf;
}
}
/* determine the value (if necessary) */
if (!value_is_indexed) {
if ((value = decode_string(pool, src, src_end, 0, err_desc)) == NULL)
return H2O_HTTP2_ERROR_COMPRESSION;
}
/* add the decoded header to the header table if necessary */
if (do_index) {
struct st_h2o_hpack_header_table_entry_t *entry =
header_table_add(hpack_header_table, name->len + value->len + HEADER_TABLE_ENTRY_SIZE_OFFSET, SIZE_MAX);
if (entry != NULL) {
entry->err_desc = *err_desc;
entry->name = name;
if (!h2o_iovec_is_token(entry->name))
h2o_mem_addref_shared(entry->name);
entry->value = value;
if (!value_is_part_of_static_table(entry->value))
h2o_mem_addref_shared(entry->value);
}
}
*_name = name;
*_value = *value;
return *err_desc != NULL ? H2O_HTTP2_ERROR_INVALID_HEADER_CHAR : 0;
}
static uint8_t *encode_status(uint8_t *dst, int status)
{
/* see also: STATUS_HEADER_MAX_SIZE */
assert(100 <= status && status <= 999);
switch (status) {
#define COMMON_CODE(code, st) \
case st: \
*dst++ = 0x80 | code; \
break
COMMON_CODE(8, 200);
COMMON_CODE(9, 204);
COMMON_CODE(10, 206);
COMMON_CODE(11, 304);
COMMON_CODE(12, 400);
COMMON_CODE(13, 404);
COMMON_CODE(14, 500);
#undef COMMON_CODE
default:
/* use literal header field without indexing - indexed name */
*dst++ = 8;
*dst++ = 3;
sprintf((char *)dst, "%d", status);
dst += 3;
break;
}
return dst;
}
static uint8_t *encode_content_length(uint8_t *dst, size_t value)
{
char buf[32], *p = buf + sizeof(buf);
size_t l;
do {
*--p = '0' + value % 10;
} while ((value /= 10) != 0);
l = buf + sizeof(buf) - p;
*dst++ = 0x0f;
*dst++ = 0x0d;
*dst++ = (uint8_t)l;
memcpy(dst, p, l);
dst += l;
return dst;
}
void h2o_hpack_dispose_header_table(h2o_hpack_header_table_t *header_table)
{
if (header_table->num_entries != 0) {
size_t index = header_table->entry_start_index;
do {
struct st_h2o_hpack_header_table_entry_t *entry = header_table->entries + index;
if (!h2o_iovec_is_token(entry->name))
h2o_mem_release_shared(entry->name);
if (!value_is_part_of_static_table(entry->value))
h2o_mem_release_shared(entry->value);
index = (index + 1) % header_table->entry_capacity;
} while (--header_table->num_entries != 0);
}
free(header_table->entries);
}
int h2o_hpack_parse_request(h2o_mem_pool_t *pool, h2o_hpack_decode_header_cb decode_cb, void *decode_ctx, h2o_iovec_t *method,
const h2o_url_scheme_t **scheme, h2o_iovec_t *authority, h2o_iovec_t *path, h2o_headers_t *headers,
int *pseudo_header_exists_map, size_t *content_length, h2o_cache_digests_t **digests,
const uint8_t *src, size_t len, const char **err_desc)
{
const uint8_t *src_end = src + len;
*content_length = SIZE_MAX;
while (src != src_end) {
h2o_iovec_t *name, value;
const char *decode_err = NULL;
int ret = decode_cb(pool, decode_ctx, &name, &value, &src, src_end, &decode_err);
if (ret != 0) {
if (ret == H2O_HTTP2_ERROR_INVALID_HEADER_CHAR) {
/* this is a soft error, we continue parsing, but register only the first error */
if (*err_desc == NULL) {
*err_desc = decode_err;
}
} else {
*err_desc = decode_err;
return ret;
}
}
if (name->base[0] == ':') {
if (pseudo_header_exists_map != NULL) {
/* FIXME validate the chars in the value (e.g. reject SP in path) */
if (name == &H2O_TOKEN_AUTHORITY->buf) {
if (authority->base != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
*authority = value;
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS;
} else if (name == &H2O_TOKEN_METHOD->buf) {
if (method->base != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
*method = value;
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS;
} else if (name == &H2O_TOKEN_PATH->buf) {
if (path->base != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
if (value.len == 0)
return H2O_HTTP2_ERROR_PROTOCOL;
*path = value;
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_PATH_EXISTS;
} else if (name == &H2O_TOKEN_SCHEME->buf) {
if (*scheme != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
if (h2o_memis(value.base, value.len, H2O_STRLIT("https"))) {
*scheme = &H2O_URL_SCHEME_HTTPS;
} else {
/* draft-16 8.1.2.3 suggests quote: ":scheme is not restricted to http and https schemed URIs" */
*scheme = &H2O_URL_SCHEME_HTTP;
}
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS;
} else {
return H2O_HTTP2_ERROR_PROTOCOL;
}
} else {
return H2O_HTTP2_ERROR_PROTOCOL;
}
} else {
pseudo_header_exists_map = NULL;
if (h2o_iovec_is_token(name)) {
h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, name);
if (token == H2O_TOKEN_CONTENT_LENGTH) {
if ((*content_length = h2o_strtosize(value.base, value.len)) == SIZE_MAX)
return H2O_HTTP2_ERROR_PROTOCOL;
} else {
/* reject headers as defined in draft-16 8.1.2.2 */
if (token->flags.http2_should_reject) {
if (token == H2O_TOKEN_HOST) {
/* HTTP2 allows the use of host header (in place of :authority) */
if (authority->base == NULL)
*authority = value;
goto Next;
} else if (token == H2O_TOKEN_TE && h2o_lcstris(value.base, value.len, H2O_STRLIT("trailers"))) {
/* do not reject */
} else {
return H2O_HTTP2_ERROR_PROTOCOL;
}
}
if (token == H2O_TOKEN_CACHE_DIGEST && digests != NULL) {
/* TODO cache the decoded result in HPACK, as well as delay the decoding of the digest until being used */
h2o_cache_digests_load_header(digests, value.base, value.len);
}
h2o_add_header(pool, headers, token, NULL, value.base, value.len);
}
} else {
h2o_add_header_by_str(pool, headers, name->base, name->len, 0, NULL, value.base, value.len);
}
}
Next:;
}
if (*err_desc != NULL)
return H2O_HTTP2_ERROR_INVALID_HEADER_CHAR;
return 0;
}
int h2o_hpack_parse_response(h2o_mem_pool_t *pool, h2o_hpack_decode_header_cb decode_cb, void *decode_ctx, int *status,
h2o_headers_t *headers, const uint8_t *src, size_t len, const char **err_desc)
{
*status = 0;
const uint8_t *src_end = src + len;
/* the response MUST contain a :status header as the first element */
if (src == src_end)
return H2O_HTTP2_ERROR_PROTOCOL;
do {
h2o_iovec_t *name, value;
const char *decode_err = NULL;
int ret = decode_cb(pool, decode_ctx, &name, &value, &src, src_end, &decode_err);
if (ret != 0) {
if (ret == H2O_HTTP2_ERROR_INVALID_HEADER_CHAR) {
/* this is a soft error, we continue parsing, but register only the first error */
if (*err_desc == NULL) {
*err_desc = decode_err;
}
} else {
*err_desc = decode_err;
return ret;
}
}
if (name->base[0] == ':') {
if (name != &H2O_TOKEN_STATUS->buf)
return H2O_HTTP2_ERROR_PROTOCOL;
if (*status != 0)
return H2O_HTTP2_ERROR_PROTOCOL;
/* parse status */
if (value.len != 3)
return H2O_HTTP2_ERROR_PROTOCOL;
char *c = value.base;
#define PARSE_DIGIT(mul, min_digit) \
do { \
if (*c < '0' + (min_digit) || '9' < *c) \
return H2O_HTTP2_ERROR_PROTOCOL; \
*status += (*c - '0') * mul; \
++c; \
} while (0)
PARSE_DIGIT(100, 1);
PARSE_DIGIT(10, 0);
PARSE_DIGIT(1, 0);
#undef PARSE_DIGIT
} else {
if (*status == 0)
return H2O_HTTP2_ERROR_PROTOCOL;
if (h2o_iovec_is_token(name)) {
h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, name);
/* reject headers as defined in draft-16 8.1.2.2 */
if (token->flags.http2_should_reject)
return H2O_HTTP2_ERROR_PROTOCOL;
h2o_add_header(pool, headers, token, NULL, value.base, value.len);
} else {
h2o_add_header_by_str(pool, headers, name->base, name->len, 0, NULL, value.base, value.len);
}
}
} while (src != src_end);
if (*err_desc) {
return H2O_HTTP2_ERROR_INVALID_HEADER_CHAR;
}
return 0;
}
static inline int encode_int_is_onebyte(int64_t value, unsigned prefix_bits)
{
return value < (1 << prefix_bits) - 1;
}
uint8_t *h2o_hpack_encode_int(uint8_t *dst, int64_t value, unsigned prefix_bits)
{
if (encode_int_is_onebyte(value, prefix_bits)) {
*dst++ |= value;
} else {
/* see also: MAX_ENCODE_INT_LENGTH */
assert(value >= 0);
value -= (1 << prefix_bits) - 1;
*dst++ |= (1 << prefix_bits) - 1;
for (; value >= 128; value >>= 7) {
*dst++ = 0x80 | value;
}
*dst++ = value;
}
return dst;
}
size_t h2o_hpack_encode_huffman(uint8_t *_dst, const uint8_t *src, size_t len)
{
uint8_t *dst = _dst, *dst_end = dst + len;
const uint8_t *src_end = src + len;
uint64_t bits = 0;
int bits_left = 40;
while (src != src_end) {
const nghttp2_huff_sym *sym = huff_sym_table + *src++;
bits |= (uint64_t)sym->code << (bits_left - sym->nbits);
bits_left -= sym->nbits;
while (bits_left <= 32) {
*dst++ = bits >> 32;
bits <<= 8;
bits_left += 8;
if (dst == dst_end) {
return SIZE_MAX;
}
}
}
if (bits_left != 40) {
bits |= ((uint64_t)1 << bits_left) - 1;
*dst++ = bits >> 32;
}
if (dst == dst_end) {
return SIZE_MAX;
}
return dst - _dst;
}
static size_t encode_as_is(uint8_t *dst, const char *s, size_t len)
{
uint8_t *start = dst;
*dst = '\0';
dst = h2o_hpack_encode_int(dst, len, 7);
memcpy(dst, s, len);
dst += len;
return dst - start;
}
size_t h2o_hpack_encode_string(uint8_t *dst, const char *s, size_t len)
{
if (H2O_LIKELY(len != 0)) {
/* try to encode using huffman */
size_t hufflen = h2o_hpack_encode_huffman(dst + 1, (const uint8_t *)s, len);
if (H2O_LIKELY(hufflen != SIZE_MAX)) {
size_t head_len;
if (H2O_LIKELY(encode_int_is_onebyte((uint32_t)hufflen, 7))) {
dst[0] = (uint8_t)(0x80 | hufflen);
head_len = 1;
} else {
uint8_t head[8];
head[0] = '\x80';
head_len = h2o_hpack_encode_int(head, hufflen, 7) - head;
memmove(dst + head_len, dst + 1, hufflen);
memcpy(dst, head, head_len);
}
return head_len + hufflen;
}
}
return encode_as_is(dst, s, len);
}
static uint8_t *do_encode_header(h2o_hpack_header_table_t *header_table, uint8_t *dst, const h2o_iovec_t *name,
const h2o_iovec_t *value, int dont_compress)
{
int is_token = h2o_iovec_is_token(name);
int name_index = is_token ? ((const h2o_token_t *)name)->flags.http2_static_table_name_index : 0;
/* try to send as indexed */
{
size_t header_table_index = header_table->entry_start_index, n;
for (n = header_table->num_entries; n != 0; --n) {
struct st_h2o_hpack_header_table_entry_t *entry = header_table->entries + header_table_index;
if (is_token) {
if (name != entry->name)
goto Next;
} else {
if (!h2o_memis(name->base, name->len, entry->name->base, entry->name->len))
goto Next;
if (name_index == 0)
name_index = (int)(header_table->num_entries - n + HEADER_TABLE_OFFSET);
}
/* name matched! */
if (!h2o_memis(value->base, value->len, entry->value->base, entry->value->len))
goto Next;
/* name and value matched! */
*dst = 0x80;
dst = h2o_hpack_encode_int(dst, header_table->num_entries - n + HEADER_TABLE_OFFSET, 7);
return dst;
Next:
++header_table_index;
if (header_table_index == header_table->entry_capacity)
header_table_index = 0;
}
}
if (!dont_compress && is_token)
dont_compress = ((const h2o_token_t *)name)->flags.dont_compress;
if (dont_compress)
dont_compress = value->len < 20;
if (name_index != 0) {
/* literal header field with indexing (indexed name). */
if (dont_compress == 1) {
/* mark the field as 'never indexed' */
*dst = 0x10;
dst = h2o_hpack_encode_int(dst, name_index, 4);
} else {
*dst = 0x40;
dst = h2o_hpack_encode_int(dst, name_index, 6);
}
} else {
/* literal header field with indexing (new name) */
*dst++ = 0x40;
dst += h2o_hpack_encode_string(dst, name->base, name->len);
}
if (dont_compress == 1) {
/* bypass huffman encoding */
dst += encode_as_is(dst, value->base, value->len);
} else {
/* add to header table (maximum number of entries in output header table is limited to 32 so that the search (see above)
would
not take too long) */
dst += h2o_hpack_encode_string(dst, value->base, value->len);
struct st_h2o_hpack_header_table_entry_t *entry =
header_table_add(header_table, name->len + value->len + HEADER_TABLE_ENTRY_SIZE_OFFSET, 32);
if (entry != NULL) {
if (is_token) {
entry->name = (h2o_iovec_t *)name;
} else {
entry->name = alloc_buf(NULL, name->len);
entry->name->base[name->len] = '\0';
memcpy(entry->name->base, name->base, name->len);
}
entry->value = alloc_buf(NULL, value->len);
entry->value->base[value->len] = '\0';
memcpy(entry->value->base, value->base, value->len);
}
}
return dst;
}
static uint8_t *encode_header(h2o_hpack_header_table_t *header_table, uint8_t *dst, const h2o_header_t *header)
{
return do_encode_header(header_table, dst, header->name, &header->value, header->flags.dont_compress);
}
static uint8_t *encode_header_token(h2o_hpack_header_table_t *header_table, uint8_t *dst, const h2o_token_t *token,
const h2o_iovec_t *value)
{
return do_encode_header(header_table, dst, &token->buf, value, token->flags.dont_compress);
}
static uint8_t *encode_method(h2o_hpack_header_table_t *header_table, uint8_t *dst, h2o_iovec_t value)
{
if (h2o_memis(value.base, value.len, H2O_STRLIT("GET"))) {
*dst++ = 0x82;
return dst;
}
if (h2o_memis(value.base, value.len, H2O_STRLIT("POST"))) {
*dst++ = 0x83;
return dst;
}
return encode_header_token(header_table, dst, H2O_TOKEN_METHOD, &value);
}
static uint8_t *encode_scheme(h2o_hpack_header_table_t *header_table, uint8_t *dst, const h2o_url_scheme_t *scheme)
{
if (scheme == &H2O_URL_SCHEME_HTTPS) {
*dst++ = 0x87;
return dst;
}
if (scheme == &H2O_URL_SCHEME_HTTP) {
*dst++ = 0x86;
return dst;
}
return encode_header_token(header_table, dst, H2O_TOKEN_SCHEME, &scheme->name);
}
static uint8_t *encode_path(h2o_hpack_header_table_t *header_table, uint8_t *dst, h2o_iovec_t value)
{
if (h2o_memis(value.base, value.len, H2O_STRLIT("/"))) {
*dst++ = 0x84;
return dst;
}
if (h2o_memis(value.base, value.len, H2O_STRLIT("/index.html"))) {
*dst++ = 0x85;
return dst;
}
return encode_header_token(header_table, dst, H2O_TOKEN_PATH, &value);
}
static uint8_t *encode_literal_header_without_indexing(uint8_t *dst, const h2o_iovec_t *name, const h2o_iovec_t *value)
{
/* literal header field without indexing / never indexed */
*dst++ = 0;
dst += h2o_hpack_encode_string(dst, name->base, name->len);
dst += h2o_hpack_encode_string(dst, value->base, value->len);
return dst;
}
static size_t calc_capacity(size_t name_len, size_t value_len)
{
return name_len + value_len + 1 + H2O_HPACK_ENCODE_INT_MAX_LENGTH * 2;
}
static size_t calc_headers_capacity(const h2o_header_t *headers, size_t num_headers)
{
const h2o_header_t *header;
size_t capacity = 0;
for (header = headers; num_headers != 0; ++header, --num_headers)
capacity += calc_capacity(header->name->len, header->value.len);
return capacity;
}
static void fixup_frame_headers(h2o_buffer_t **buf, size_t start_at, uint8_t type, uint32_t stream_id, size_t max_frame_size,
int flags)
{
/* try to fit all data into single frame, using the preallocated space for the frame header */
size_t payload_size = (*buf)->size - start_at - H2O_HTTP2_FRAME_HEADER_SIZE;
if (payload_size <= max_frame_size) {
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + start_at), payload_size, type,
H2O_HTTP2_FRAME_FLAG_END_HEADERS | flags, stream_id);
return;
}
/* need to setup continuation frames */
size_t off;
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + start_at), max_frame_size, type, flags, stream_id);
off = start_at + H2O_HTTP2_FRAME_HEADER_SIZE + max_frame_size;
while (1) {
size_t left = (*buf)->size - off;
h2o_buffer_reserve(buf, H2O_HTTP2_FRAME_HEADER_SIZE);
memmove((*buf)->bytes + off + H2O_HTTP2_FRAME_HEADER_SIZE, (*buf)->bytes + off, left);
(*buf)->size += H2O_HTTP2_FRAME_HEADER_SIZE;
if (left <= max_frame_size) {
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + off), left, H2O_HTTP2_FRAME_TYPE_CONTINUATION,
H2O_HTTP2_FRAME_FLAG_END_HEADERS, stream_id);
break;
} else {
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + off), max_frame_size, H2O_HTTP2_FRAME_TYPE_CONTINUATION, 0,
stream_id);
off += H2O_HTTP2_FRAME_HEADER_SIZE + max_frame_size;
}
}
}
void h2o_hpack_flatten_request(h2o_buffer_t **buf, h2o_hpack_header_table_t *header_table, uint32_t stream_id,
size_t max_frame_size, h2o_iovec_t method, h2o_url_t *url, const h2o_header_t *headers,
size_t num_headers, int is_end_stream)
{
size_t capacity = calc_headers_capacity(headers, num_headers);
capacity += H2O_HTTP2_FRAME_HEADER_SIZE;
capacity += calc_capacity(H2O_TOKEN_METHOD->buf.len, method.len);
capacity += calc_capacity(H2O_TOKEN_SCHEME->buf.len, url->scheme->name.len);
capacity += calc_capacity(H2O_TOKEN_AUTHORITY->buf.len, url->authority.len);
capacity += calc_capacity(H2O_TOKEN_PATH->buf.len, url->path.len);
size_t start_at = (*buf)->size;
uint8_t *dst = (void *)(h2o_buffer_reserve(buf, capacity).base + H2O_HTTP2_FRAME_HEADER_SIZE);
/* encode */
dst = encode_method(header_table, dst, method);
dst = encode_scheme(header_table, dst, url->scheme);
dst = encode_header_token(header_table, dst, H2O_TOKEN_AUTHORITY, &url->authority);
dst = encode_path(header_table, dst, url->path);
size_t i;
for (i = 0; i != num_headers; ++i) {
const h2o_header_t *header = headers + i;
if (header->name == &H2O_TOKEN_ACCEPT_ENCODING->buf &&
h2o_memis(header->value.base, header->value.len, H2O_STRLIT("gzip, deflate"))) {
*dst++ = 0x90;
} else {
dst = encode_header(header_table, dst, header);
}
}
(*buf)->size = (char *)dst - (*buf)->bytes;
/* setup the frame headers */
fixup_frame_headers(buf, start_at, H2O_HTTP2_FRAME_TYPE_HEADERS, stream_id, max_frame_size,
is_end_stream ? H2O_HTTP2_FRAME_FLAG_END_STREAM : 0);
}
void h2o_hpack_flatten_push_promise(h2o_buffer_t **buf, h2o_hpack_header_table_t *header_table, uint32_t stream_id,
size_t max_frame_size, const h2o_url_scheme_t *scheme, h2o_iovec_t authority,
h2o_iovec_t method, h2o_iovec_t path, const h2o_header_t *headers, size_t num_headers,
uint32_t parent_stream_id)
{
size_t capacity = calc_headers_capacity(headers, num_headers);
capacity += H2O_HTTP2_FRAME_HEADER_SIZE /* first frame header */
+ 4; /* promised stream id */
capacity += calc_capacity(H2O_TOKEN_METHOD->buf.len, method.len);
capacity += calc_capacity(H2O_TOKEN_SCHEME->buf.len, scheme->name.len);
capacity += calc_capacity(H2O_TOKEN_AUTHORITY->buf.len, authority.len);
capacity += calc_capacity(H2O_TOKEN_PATH->buf.len, path.len);
size_t start_at = (*buf)->size;
uint8_t *dst = (void *)(h2o_buffer_reserve(buf, capacity).base + H2O_HTTP2_FRAME_HEADER_SIZE);
/* encode */
dst = h2o_http2_encode32u(dst, stream_id);
dst = encode_method(header_table, dst, method);
dst = encode_scheme(header_table, dst, scheme);
dst = encode_header_token(header_table, dst, H2O_TOKEN_AUTHORITY, &authority);
dst = encode_path(header_table, dst, path);
size_t i;
for (i = 0; i != num_headers; ++i) {
const h2o_header_t *header = headers + i;
if (header->name == &H2O_TOKEN_ACCEPT_ENCODING->buf &&
h2o_memis(header->value.base, header->value.len, H2O_STRLIT("gzip, deflate"))) {
*dst++ = 0x90;
} else {
dst = encode_header(header_table, dst, header);
}
}
(*buf)->size = (char *)dst - (*buf)->bytes;
/* setup the frame headers */
fixup_frame_headers(buf, start_at, H2O_HTTP2_FRAME_TYPE_PUSH_PROMISE, parent_stream_id, max_frame_size, 0);
}
void h2o_hpack_flatten_response(h2o_buffer_t **buf, h2o_hpack_header_table_t *header_table, uint32_t stream_id,
size_t max_frame_size, int status, const h2o_header_t *headers, size_t num_headers,
const h2o_iovec_t *server_name, size_t content_length)
{
size_t capacity = calc_headers_capacity(headers, num_headers);
capacity += H2O_HTTP2_FRAME_HEADER_SIZE; /* for the first header */
capacity += STATUS_HEADER_MAX_SIZE; /* for :status: */
#ifndef H2O_UNITTEST
if (server_name != NULL && server_name->len) {
capacity += 5 + server_name->len; /* for Server: */
}
#endif
if (content_length != SIZE_MAX)
capacity += CONTENT_LENGTH_HEADER_MAX_SIZE; /* for content-length: UINT64_MAX (with huffman compression applied) */
size_t start_at = (*buf)->size;
uint8_t *dst = (void *)(h2o_buffer_reserve(buf, capacity).base + H2O_HTTP2_FRAME_HEADER_SIZE); /* skip frame header */
/* encode */
dst = encode_status(dst, status);
#ifndef H2O_UNITTEST
/* TODO keep some kind of reference to the indexed Server header, and reuse it */
if (server_name != NULL && server_name->len) {
dst = encode_header_token(header_table, dst, H2O_TOKEN_SERVER, server_name);
}
#endif
size_t i;
for (i = 0; i != num_headers; ++i)
dst = encode_header(header_table, dst, headers + i);
if (content_length != SIZE_MAX)
dst = encode_content_length(dst, content_length);
(*buf)->size = (char *)dst - (*buf)->bytes;
/* setup the frame headers */
fixup_frame_headers(buf, start_at, H2O_HTTP2_FRAME_TYPE_HEADERS, stream_id, max_frame_size, 0);
}
void h2o_hpack_flatten_trailers(h2o_buffer_t **buf, h2o_hpack_header_table_t *header_table, uint32_t stream_id,
size_t max_frame_size, const h2o_header_t *headers, size_t num_headers)
{
size_t capacity = calc_headers_capacity(headers, num_headers);
capacity += H2O_HTTP2_FRAME_HEADER_SIZE;
size_t start_at = (*buf)->size;
uint8_t *dst = (void *)(h2o_buffer_reserve(buf, capacity).base + H2O_HTTP2_FRAME_HEADER_SIZE); /* skip frame header */
size_t i;
for (i = 0; i != num_headers; ++i)
dst = encode_header(header_table, dst, headers + i);
(*buf)->size = (char *)dst - (*buf)->bytes;
/* setup the frame headers */
fixup_frame_headers(buf, start_at, H2O_HTTP2_FRAME_TYPE_HEADERS, stream_id, max_frame_size, H2O_HTTP2_FRAME_FLAG_END_STREAM);
}
| 1 | 14,313 | Fuzzer did not like this change. Looks like I misunderstood how the pool works... if allocated from a pool we should never free it manually, right? I think I'll need to drop this patch. | h2o-h2o | c |
@@ -4,16 +4,13 @@ import javafx.beans.property.ObjectProperty;
import javafx.beans.property.SimpleObjectProperty;
import javafx.scene.control.Alert;
import javafx.scene.control.ButtonType;
-import javafx.scene.control.Label;
-import javafx.scene.layout.Region;
-import javafx.stage.Window;
import java.util.Optional;
/**
- * A confirm dialog with two callbacks
+ * An abstract class for confirm dialogs with two callbacks
*/
-public class ConfirmDialog extends Alert {
+public abstract class ConfirmDialog extends Alert {
/**
* Callback for {@link ButtonType#OK} button events
*/ | 1 | package org.phoenicis.javafx.dialogs;
import javafx.beans.property.ObjectProperty;
import javafx.beans.property.SimpleObjectProperty;
import javafx.scene.control.Alert;
import javafx.scene.control.ButtonType;
import javafx.scene.control.Label;
import javafx.scene.layout.Region;
import javafx.stage.Window;
import java.util.Optional;
/**
* A confirm dialog with two callbacks
*/
public class ConfirmDialog extends Alert {
/**
* Callback for {@link ButtonType#OK} button events
*/
private final ObjectProperty<Runnable> yesCallback;
/**
* Callback for other button events than {@link ButtonType#OK}
*/
private final ObjectProperty<Runnable> noCallback;
/**
* Constructor
*/
private ConfirmDialog() {
super(AlertType.CONFIRMATION);
this.yesCallback = new SimpleObjectProperty<>();
this.noCallback = new SimpleObjectProperty<>();
}
/**
* Create a new builder for the confirm dialog
*
* @return A new builder instance
*/
public static ConfirmDialogBuilder builder() {
return new ConfirmDialogBuilder();
}
/**
* Displays the {@link ConfirmDialog} and waits for a result.
* After receiving a result from the dialog call either the yes or no callback
*/
public void showAndCallback() {
ButtonType result = showAndWait().orElse(ButtonType.CANCEL);
if (result == ButtonType.OK) {
Optional.ofNullable(getYesCallback()).ifPresent(Runnable::run);
} else {
Optional.ofNullable(getNoCallback()).ifPresent(Runnable::run);
}
}
public Runnable getYesCallback() {
return yesCallback.get();
}
public ObjectProperty<Runnable> yesCallbackProperty() {
return yesCallback;
}
public void setYesCallback(Runnable yesCallback) {
this.yesCallback.set(yesCallback);
}
public Runnable getNoCallback() {
return noCallback.get();
}
public ObjectProperty<Runnable> noCallbackProperty() {
return noCallback;
}
public void setNoCallback(Runnable noCallback) {
this.noCallback.set(noCallback);
}
/**
* A builder class for {@link ConfirmDialog} instances
*/
public static class ConfirmDialogBuilder {
/**
* The title of the {@link ConfirmDialog}
*/
private String title;
/**
* The message of the {@link ConfirmDialog}
*/
private String message;
/**
* The success callback of the {@link ConfirmDialog}
*/
private Runnable yesCallback;
/**
* The failure callback of the {@link ConfirmDialog}
*/
private Runnable noCallback;
/**
* The owner window of the {@link ConfirmDialog}
*/
private Window owner;
/**
* The resizable status of the {@link ConfirmDialog}
*/
private boolean resizable;
public ConfirmDialogBuilder withTitle(String title) {
this.title = title;
return this;
}
public ConfirmDialogBuilder withMessage(String message) {
this.message = message;
return this;
}
public ConfirmDialogBuilder withYesCallback(Runnable yesCallback) {
this.yesCallback = yesCallback;
return this;
}
public ConfirmDialogBuilder withNoCallback(Runnable noCallback) {
this.noCallback = noCallback;
return this;
}
public ConfirmDialogBuilder withOwner(Window owner) {
this.owner = owner;
return this;
}
public ConfirmDialogBuilder withResizable(boolean resizable) {
this.resizable = resizable;
return this;
}
public ConfirmDialog build() {
final ConfirmDialog dialog = new ConfirmDialog();
dialog.initOwner(owner);
dialog.setTitle(title);
dialog.setHeaderText(title);
dialog.setContentText(message);
dialog.setYesCallback(yesCallback);
dialog.setNoCallback(noCallback);
dialog.setResizable(resizable);
dialog.getDialogPane().getChildren().stream()
.filter(node -> node instanceof Label)
.map(node -> (Label) node)
.forEach(label -> label.setMinHeight(Region.USE_PREF_SIZE));
return dialog;
}
}
}
| 1 | 13,517 | Wouldn't it make sense to already have title, owner etc in this class? | PhoenicisOrg-phoenicis | java |
@@ -18,6 +18,19 @@ module Faker
def character
fetch('aqua_teen_hunger_force.character')
end
+
+ ##
+ # Produces a perl of great AHTF wisdom
+ #
+ # @return [String]
+ #
+ # @example
+ # Faker::TvShows::AquaTeenHungerForce.quote #=> "Friendship ain't about trust. Friendship's about nunchucks."
+ #
+ # @faker.version 1.8.5
+ def quote
+ fetch('aqua_teen_hunger_force.quote')
+ end
end
end
end | 1 | # frozen_string_literal: true
module Faker
class TvShows
class AquaTeenHungerForce < Base
flexible :aqua_teen_hunger_force
class << self
##
# Produces a character from Aqua Teen Hunger Force.
#
# @return [String]
#
# @example
# Faker::TvShows::AquaTeenHungerForce.character #=> "Master Shake"
#
# @faker.version 1.8.5
def character
fetch('aqua_teen_hunger_force.character')
end
end
end
end
end
| 1 | 9,684 | New generators should have version `next` | faker-ruby-faker | rb |
@@ -204,6 +204,8 @@ public class ProcessBesuNodeRunner implements BesuNodeRunner {
params.add("--metrics-category");
params.add(((Enum<?>) category).name());
}
+ params.add("--metrics-protocol");
+ params.add(metricsConfiguration.getProtocol().name());
if (metricsConfiguration.isPushEnabled()) {
params.add("--metrics-push-enabled");
params.add("--metrics-push-host"); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.dsl.node;
import static com.google.common.base.Preconditions.checkState;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.tests.acceptance.dsl.StaticNodesUtils;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.lang.ProcessBuilder.Redirect;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.ThreadContext;
public class ProcessBesuNodeRunner implements BesuNodeRunner {
private static final Logger LOG = LogManager.getLogger();
private static final Logger PROCESS_LOG =
LogManager.getLogger("org.hyperledger.besu.SubProcessLog");
private final Map<String, Process> besuProcesses = new HashMap<>();
private final ExecutorService outputProcessorExecutor = Executors.newCachedThreadPool();
private boolean capturingConsole;
private final ByteArrayOutputStream consoleContents = new ByteArrayOutputStream();
private final PrintStream consoleOut = new PrintStream(consoleContents);
ProcessBesuNodeRunner() {
Runtime.getRuntime().addShutdownHook(new Thread(this::shutdown));
}
@Override
public void startNode(final BesuNode node) {
final Path dataDir = node.homeDirectory();
final List<String> params = new ArrayList<>();
params.add("build/install/besu/bin/besu");
params.add("--data-path");
params.add(dataDir.toAbsolutePath().toString());
if (node.isDevMode()) {
params.add("--network");
params.add("DEV");
}
params.add("--sync-mode");
params.add("FULL");
params.add("--discovery-enabled");
params.add(Boolean.toString(node.isDiscoveryEnabled()));
params.add("--p2p-host");
params.add(node.p2pListenHost());
params.add("--p2p-port");
params.add("0");
if (node.getMiningParameters().isMiningEnabled()) {
params.add("--miner-enabled");
params.add("--miner-coinbase");
params.add(node.getMiningParameters().getCoinbase().get().toString());
params.add("--miner-stratum-port");
params.add(Integer.toString(node.getMiningParameters().getStratumPort()));
params.add("--miner-stratum-host");
params.add(node.getMiningParameters().getStratumNetworkInterface());
params.add("--min-gas-price");
params.add(
Integer.toString(node.getMiningParameters().getMinTransactionGasPrice().intValue()));
params.add("--Xminer-remote-sealers-limit");
params.add(Integer.toString(node.getMiningParameters().getRemoteSealersLimit()));
params.add("--Xminer-remote-sealers-hashrate-ttl");
params.add(Long.toString(node.getMiningParameters().getRemoteSealersTimeToLive()));
}
if (node.getMiningParameters().isStratumMiningEnabled()) {
params.add("--miner-stratum-enabled");
}
if (node.getPrivacyParameters().isEnabled()) {
params.add("--privacy-enabled");
params.add("--privacy-url");
params.add(node.getPrivacyParameters().getEnclaveUri().toString());
if (node.getPrivacyParameters().isMultiTenancyEnabled()) {
params.add("--privacy-multi-tenancy-enabled");
} else {
params.add("--privacy-public-key-file");
params.add(node.getPrivacyParameters().getEnclavePublicKeyFile().getAbsolutePath());
}
params.add("--privacy-marker-transaction-signing-key-file");
params.add(node.homeDirectory().resolve("key").toString());
if (node.getPrivacyParameters().isOnchainPrivacyGroupsEnabled()) {
params.add("--privacy-onchain-groups-enabled");
}
}
params.add("--bootnodes");
if (!node.getBootnodes().isEmpty()) {
params.add(node.getBootnodes().stream().map(URI::toString).collect(Collectors.joining(",")));
}
if (node.hasStaticNodes()) {
createStaticNodes(node);
}
if (node.isDnsEnabled()) {
params.add("--Xdns-enabled");
params.add("true");
params.add("--Xdns-update-enabled");
params.add("true");
}
if (node.isJsonRpcEnabled()) {
params.add("--rpc-http-enabled");
params.add("--rpc-http-host");
params.add(node.jsonRpcListenHost().get());
params.add("--rpc-http-port");
params.add(node.jsonRpcListenPort().map(Object::toString).get());
params.add("--rpc-http-api");
params.add(apiList(node.jsonRpcConfiguration().getRpcApis()));
if (node.jsonRpcConfiguration().isAuthenticationEnabled()) {
params.add("--rpc-http-authentication-enabled");
}
if (node.jsonRpcConfiguration().getAuthenticationCredentialsFile() != null) {
params.add("--rpc-http-authentication-credentials-file");
params.add(node.jsonRpcConfiguration().getAuthenticationCredentialsFile());
}
if (node.jsonRpcConfiguration().getAuthenticationPublicKeyFile() != null) {
params.add("--rpc-http-authentication-jwt-public-key-file");
params.add(node.jsonRpcConfiguration().getAuthenticationPublicKeyFile().getAbsolutePath());
}
}
if (node.wsRpcEnabled()) {
params.add("--rpc-ws-enabled");
params.add("--rpc-ws-host");
params.add(node.wsRpcListenHost().get());
params.add("--rpc-ws-port");
params.add(node.wsRpcListenPort().map(Object::toString).get());
params.add("--rpc-ws-api");
params.add(apiList(node.webSocketConfiguration().getRpcApis()));
if (node.webSocketConfiguration().isAuthenticationEnabled()) {
params.add("--rpc-ws-authentication-enabled");
}
if (node.webSocketConfiguration().getAuthenticationCredentialsFile() != null) {
params.add("--rpc-ws-authentication-credentials-file");
params.add(node.webSocketConfiguration().getAuthenticationCredentialsFile());
}
if (node.webSocketConfiguration().getAuthenticationPublicKeyFile() != null) {
params.add("--rpc-ws-authentication-jwt-public-key-file");
params.add(
node.webSocketConfiguration().getAuthenticationPublicKeyFile().getAbsolutePath());
}
}
if (node.isMetricsEnabled()) {
final MetricsConfiguration metricsConfiguration = node.getMetricsConfiguration();
params.add("--metrics-enabled");
params.add("--metrics-host");
params.add(metricsConfiguration.getHost());
params.add("--metrics-port");
params.add(Integer.toString(metricsConfiguration.getPort()));
for (final MetricCategory category : metricsConfiguration.getMetricCategories()) {
params.add("--metrics-category");
params.add(((Enum<?>) category).name());
}
if (metricsConfiguration.isPushEnabled()) {
params.add("--metrics-push-enabled");
params.add("--metrics-push-host");
params.add(metricsConfiguration.getPushHost());
params.add("--metrics-push-port");
params.add(Integer.toString(metricsConfiguration.getPushPort()));
params.add("--metrics-push-interval");
params.add(Integer.toString(metricsConfiguration.getPushInterval()));
params.add("--metrics-push-prometheus-job");
params.add(metricsConfiguration.getPrometheusJob());
}
}
node.getGenesisConfig()
.ifPresent(
genesis -> {
final Path genesisFile = createGenesisFile(node, genesis);
params.add("--genesis-file");
params.add(genesisFile.toAbsolutePath().toString());
});
if (!node.isP2pEnabled()) {
params.add("--p2p-enabled");
params.add("false");
} else {
final List<String> networkConfigParams =
NetworkingOptions.fromConfig(node.getNetworkingConfiguration()).getCLIOptions();
params.addAll(networkConfigParams);
}
if (node.isRevertReasonEnabled()) {
params.add("--revert-reason-enabled");
}
params.add("--Xsecp256k1-native-enabled=" + node.isSecp256k1Native());
params.add("--Xaltbn128-native-enabled=" + node.isAltbn128Native());
node.getPermissioningConfiguration()
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(
permissioningConfiguration -> {
if (permissioningConfiguration.isNodeAllowlistEnabled()) {
params.add("--permissions-nodes-config-file-enabled");
}
if (permissioningConfiguration.getNodePermissioningConfigFilePath() != null) {
params.add("--permissions-nodes-config-file");
params.add(permissioningConfiguration.getNodePermissioningConfigFilePath());
}
if (permissioningConfiguration.isAccountAllowlistEnabled()) {
params.add("--permissions-accounts-config-file-enabled");
}
if (permissioningConfiguration.getAccountPermissioningConfigFilePath() != null) {
params.add("--permissions-accounts-config-file");
params.add(permissioningConfiguration.getAccountPermissioningConfigFilePath());
}
});
node.getPermissioningConfiguration()
.flatMap(PermissioningConfiguration::getSmartContractConfig)
.ifPresent(
permissioningConfiguration -> {
if (permissioningConfiguration.isSmartContractNodeAllowlistEnabled()) {
params.add("--permissions-nodes-contract-enabled");
}
if (permissioningConfiguration.getNodeSmartContractAddress() != null) {
params.add("--permissions-nodes-contract-address");
params.add(permissioningConfiguration.getNodeSmartContractAddress().toString());
}
if (permissioningConfiguration.isSmartContractAccountAllowlistEnabled()) {
params.add("--permissions-accounts-contract-enabled");
}
if (permissioningConfiguration.getAccountSmartContractAddress() != null) {
params.add("--permissions-accounts-contract-address");
params.add(permissioningConfiguration.getAccountSmartContractAddress().toString());
}
params.add("--permissions-nodes-contract-version");
params.add(
String.valueOf(
permissioningConfiguration.getNodeSmartContractInterfaceVersion()));
});
params.addAll(node.getExtraCLIOptions());
params.add("--key-value-storage");
params.add("rocksdb");
params.add("--auto-log-bloom-caching-enabled");
params.add("false");
final String level = System.getProperty("root.log.level");
if (level != null) {
params.add("--logging=" + level);
}
params.addAll(node.getRunCommand());
LOG.info("Creating besu process with params {}", params);
final ProcessBuilder processBuilder =
new ProcessBuilder(params)
.directory(new File(System.getProperty("user.dir")).getParentFile().getParentFile())
.redirectErrorStream(true)
.redirectInput(Redirect.INHERIT);
if (!node.getPlugins().isEmpty()) {
processBuilder
.environment()
.put(
"BESU_OPTS",
"-Dbesu.plugins.dir=" + dataDir.resolve("plugins").toAbsolutePath().toString());
}
// Use non-blocking randomness for acceptance tests
processBuilder
.environment()
.put(
"JAVA_OPTS",
"-Djava.security.properties="
+ "acceptance-tests/tests/build/resources/test/acceptanceTesting.security");
try {
checkState(
isNotAliveOrphan(node.getName()),
"A live process with name: %s, already exists. Cannot create another with the same name as it would orphan the first",
node.getName());
final Process process = processBuilder.start();
process.onExit().thenRun(() -> node.setExitCode(process.exitValue()));
outputProcessorExecutor.execute(() -> printOutput(node, process));
besuProcesses.put(node.getName(), process);
} catch (final IOException e) {
LOG.error("Error starting BesuNode process", e);
}
if (node.getRunCommand().isEmpty()) {
waitForFile(dataDir, "besu.ports");
waitForFile(dataDir, "besu.networks");
}
ThreadContext.remove("node");
}
private boolean isNotAliveOrphan(final String name) {
final Process orphan = besuProcesses.get(name);
return orphan == null || !orphan.isAlive();
}
private void printOutput(final BesuNode node, final Process process) {
try (final BufferedReader in =
new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8))) {
ThreadContext.put("node", node.getName());
String line = in.readLine();
while (line != null) {
// would be nice to pass up the log level of the incoming log line
PROCESS_LOG.info(line);
if (capturingConsole) {
consoleOut.println(line);
}
line = in.readLine();
}
} catch (final IOException e) {
if (besuProcesses.containsKey(node.getName())) {
LOG.error("Failed to read output from process for node " + node.getName(), e);
} else {
LOG.debug("Stdout from process {} closed", node.getName());
}
}
}
private Path createGenesisFile(final BesuNode node, final String genesisConfig) {
try {
final Path genesisFile = Files.createTempFile(node.homeDirectory(), "genesis", "");
genesisFile.toFile().deleteOnExit();
Files.write(genesisFile, genesisConfig.getBytes(UTF_8));
return genesisFile;
} catch (final IOException e) {
throw new IllegalStateException(e);
}
}
private void createStaticNodes(final BesuNode node) {
StaticNodesUtils.createStaticNodesFile(node.homeDirectory(), node.getStaticNodes());
}
private String apiList(final Collection<RpcApi> rpcApis) {
return rpcApis.stream().map(RpcApis::getValue).collect(Collectors.joining(","));
}
@Override
public void stopNode(final BesuNode node) {
node.stop();
if (besuProcesses.containsKey(node.getName())) {
killBesuProcess(node.getName());
} else {
LOG.error("There was a request to stop an unknown node: {}", node.getName());
}
}
@Override
public synchronized void shutdown() {
final Set<String> localMap = new HashSet<>(besuProcesses.keySet());
localMap.forEach(this::killBesuProcess);
outputProcessorExecutor.shutdown();
try {
if (!outputProcessorExecutor.awaitTermination(5, TimeUnit.SECONDS)) {
LOG.error("Output processor executor did not shutdown cleanly.");
}
} catch (final InterruptedException e) {
LOG.error("Interrupted while already shutting down", e);
Thread.currentThread().interrupt();
}
}
@Override
public boolean isActive(final String nodeName) {
final Process process = besuProcesses.get(nodeName);
return process != null && process.isAlive();
}
private void killBesuProcess(final String name) {
final Process process = besuProcesses.remove(name);
if (process == null) {
LOG.error("Process {} wasn't in our list, pid {}", name, process.pid());
return;
}
if (!process.isAlive()) {
LOG.info("Process {} already exited, pid {}", name, process.pid());
return;
}
LOG.info("Killing {} process, pid {}", name, process.pid());
process.destroy();
try {
process.waitFor(30, TimeUnit.SECONDS);
} catch (final InterruptedException e) {
LOG.warn("Wait for death of process {} was interrupted", name, e);
}
if (process.isAlive()) {
LOG.warn("Process {} still alive, destroying forcibly now, pid {}", name, process.pid());
try {
process.destroyForcibly().waitFor(30, TimeUnit.SECONDS);
} catch (final Exception e) {
// just die already
}
LOG.info("Process exited with code {}", process.exitValue());
}
}
@Override
public void startConsoleCapture() {
consoleContents.reset();
capturingConsole = true;
}
@Override
public String getConsoleContents() {
capturingConsole = false;
return consoleContents.toString(UTF_8);
}
}
| 1 | 23,824 | Wrap these two lines inside an `if (node.isMetricsEnabled() || metricsConfiguration.isPushEnabled()) { ... }` | hyperledger-besu | java |
@@ -85,13 +85,14 @@ class FPN(BaseModule):
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
- if end_level == -1:
+ if end_level == -1 or end_level == self.num_ins:
+ # if end_level == len(inputs) or end_level == -1, extra level is allowed
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
- assert end_level <= len(in_channels)
+ assert end_level < len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level | 1 | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16
from ..builder import NECKS
@NECKS.register_module()
class FPN(BaseModule):
r"""Feature Pyramid Network.
This is an implementation of paper `Feature Pyramid Networks for Object
Detection <https://arxiv.org/abs/1612.03144>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, it is equivalent to `add_extra_convs='on_input'`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (str): Config dict for activation layer in ConvModule.
Default: None.
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(mode='nearest')`
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
upsample_cfg=dict(mode='nearest'),
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
self.add_extra_convs = 'on_input'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
@auto_fp16()
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
laterals[i - 1] += F.interpolate(laterals[i],
**self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, **self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 1 | 27,038 | if end_level == -1 or end_level == (self.num_ins - 1): for end_level is the Index of the end input backbone level. | open-mmlab-mmdetection | py |
@@ -33,6 +33,17 @@ namespace Nethermind.Store
public bool IsLeaf { get; }
public bool IsExtension => !IsLeaf;
+ public int Size
+ {
+ get
+ {
+ const int refSize = sizeof(long);
+ const int arrayOverhead = sizeof(long);
+ return refSize + Path.Length + arrayOverhead
+ + 4 /* aligned bools */;
+ }
+ }
+
public byte[] ToBytes()
{
byte[] output = new byte[Path.Length / 2 + 1]; | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Diagnostics;
using Nethermind.Core.Extensions;
namespace Nethermind.Store
{
public class HexPrefix
{
[DebuggerStepThrough]
public HexPrefix(bool isLeaf, params byte[] path)
{
IsLeaf = isLeaf;
Path = path;
}
public byte[] Path { get; private set; }
public bool IsLeaf { get; }
public bool IsExtension => !IsLeaf;
public byte[] ToBytes()
{
byte[] output = new byte[Path.Length / 2 + 1];
output[0] = (byte)(IsLeaf ? 0x20 : 0x000);
if (Path.Length % 2 != 0)
{
output[0] += (byte)(0x10 + Path[0]);
}
for (int i = 0; i < Path.Length - 1; i = i + 2)
{
output[i / 2 + 1] =
Path.Length % 2 == 0
? (byte)(16 * Path[i] + Path[i + 1])
: (byte)(16 * Path[i + 1] + Path[i + 2]);
}
return output;
}
public static HexPrefix FromBytes(Span<byte> bytes)
{
HexPrefix hexPrefix = new HexPrefix(bytes[0] >= 32);
bool isEven = (bytes[0] & 16) == 0;
int nibblesCount = bytes.Length * 2 - (isEven ? 2 : 1);
hexPrefix.Path = new byte[nibblesCount];
for (int i = 0; i < nibblesCount; i++)
{
hexPrefix.Path[i] =
isEven
? i % 2 == 0
? (byte)((bytes[1 + i / 2] & 240) / 16)
: (byte)(bytes[1 + i / 2] & 15)
: i % 2 == 0
? (byte)(bytes[i / 2] & 15)
: (byte)((bytes[1 + i / 2] & 240) / 16);
}
return hexPrefix;
}
public override string ToString()
{
return ToBytes().ToHexString(false);
}
}
}
| 1 | 23,032 | No CLR overhead? | NethermindEth-nethermind | .cs |
@@ -414,6 +414,18 @@ func TestCreateInstanceValidateMachineType(t *testing.T) {
p := "project"
z := "zone"
+ _, c, err := daisyCompute.NewTestClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "GET" && r.URL.String() == "/project/zones/zone/machineTypes/mt?alt=json" {
+ fmt.Fprintln(w, `{}`)
+ } else {
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(w, "bad request: %+v", r)
+ }
+ }))
+ if err != nil {
+ t.Fatalf("error creating test client: %v", err)
+ }
+
tests := []struct {
desc string
mt string | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workflow
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"net/http"
"path"
"reflect"
"sort"
"strings"
"testing"
"time"
daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"github.com/kylelemons/godebug/pretty"
compute "google.golang.org/api/compute/v1"
)
func TestLogSerialOutput(t *testing.T) {
ctx := context.Background()
w := testWorkflow()
var get []string
_, c, err := daisyCompute.NewTestClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" && strings.Contains(r.URL.String(), "serialPort?alt=json&port=1") {
if len(get) == 0 {
fmt.Fprintln(w, `{"Contents":"test","Start":"0"}`)
} else {
w.WriteHeader(http.StatusInternalServerError)
}
get = append(get, r.URL.String())
} else if r.Method == "GET" && strings.Contains(r.URL.String(), "serialPort?alt=json&port=2") {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintln(w, "500 error")
} else if r.Method == "GET" && strings.Contains(r.URL.String(), "serialPort?alt=json&port=3") {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintln(w, "400 error")
} else if r.Method == "GET" && strings.Contains(r.URL.String(), fmt.Sprintf("/%s/zones/%s/instances/i1", testProject, testZone)) {
fmt.Fprintln(w, `{"Status":"TERMINATED","SelfLink":"link"}`)
} else if r.Method == "GET" && strings.Contains(r.URL.String(), fmt.Sprintf("/%s/zones/%s/instances/i2", testProject, testZone)) {
fmt.Fprintln(w, `{"Status":"RUNNING","SelfLink":"link"}`)
} else if r.Method == "GET" && strings.Contains(r.URL.String(), fmt.Sprintf("/%s/zones/%s/instances/i3", testProject, testZone)) {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintln(w, "test error")
} else {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "bad request: %+v", r)
}
get = append(get, r.URL.String())
}))
if err != nil {
t.Fatal(err)
}
w.ComputeClient = c
w.bucket = "test-bucket"
instances[w].m = map[string]*resource{
"i1": {real: w.genName("i1"), link: "link"},
"i2": {real: w.genName("i2"), link: "link"},
"i3": {real: w.genName("i3"), link: "link"},
}
var buf bytes.Buffer
w.logger = log.New(&buf, "", 0)
tests := []struct {
test, want, name string
port int64
get []string // Test expected api call flow.
}{
{
"400 error but instance stopped",
"CreateInstances: streaming instance \"i1\" serial port 2 output to gs://test-bucket/i1-serial-port2.log\n",
"i1",
2,
[]string{"/test-project/zones/test-zone/instances/i1/serialPort?alt=json&port=2&start=0", "/test-project/zones/test-zone/instances/i1?alt=json"},
},
{
"400 error but instance running",
"CreateInstances: streaming instance \"i2\" serial port 3 output to gs://test-bucket/i2-serial-port3.log\nCreateInstances: instance \"i2\": error getting serial port: googleapi: got HTTP response code 400 with body: 400 error\n",
"i2",
3,
[]string{"/test-project/zones/test-zone/instances/i2/serialPort?alt=json&port=3&start=0", "/test-project/zones/test-zone/instances/i2?alt=json"},
},
{
"500 error but instance running",
"CreateInstances: streaming instance \"i2\" serial port 2 output to gs://test-bucket/i2-serial-port2.log\nCreateInstances: instance \"i2\": error getting serial port: googleapi: got HTTP response code 500 with body: 500 error\n",
"i2",
2,
[]string{"/test-project/zones/test-zone/instances/i2/serialPort?alt=json&port=2&start=0", "/test-project/zones/test-zone/instances/i2?alt=json", "/test-project/zones/test-zone/instances/i2/serialPort?alt=json&port=2&start=0", "/test-project/zones/test-zone/instances/i2?alt=json", "/test-project/zones/test-zone/instances/i2/serialPort?alt=json&port=2&start=0", "/test-project/zones/test-zone/instances/i2?alt=json", "/test-project/zones/test-zone/instances/i2/serialPort?alt=json&port=2&start=0", "/test-project/zones/test-zone/instances/i2?alt=json"},
},
{
"500 error but instance deleted",
"CreateInstances: streaming instance \"i4\" serial port 2 output to gs://test-bucket/i4-serial-port2.log\n",
"i4",
2,
[]string{"/test-project/zones/test-zone/instances/i4/serialPort?alt=json&port=2&start=0"},
},
{
"normal flow",
"CreateInstances: streaming instance \"i1\" serial port 1 output to gs://test-bucket/i1-serial-port1.log\n",
"i1",
1,
[]string{"/test-project/zones/test-zone/instances/i1/serialPort?alt=json&port=1&start=0", "/test-project/zones/test-zone/instances/i1/serialPort?alt=json&port=1&start=0", "/test-project/zones/test-zone/instances/i1/serialPort?alt=json&port=1&start=0", "/test-project/zones/test-zone/instances/i1/serialPort?alt=json&port=1&start=0", "/test-project/zones/test-zone/instances/i1?alt=json"},
},
}
for _, tt := range tests {
get = nil
buf.Reset()
logSerialOutput(ctx, w, tt.name, tt.port, 1*time.Microsecond)
if !reflect.DeepEqual(get, tt.get) {
t.Errorf("%s: got get calls: %q, want get calls: %q", tt.test, get, tt.get)
}
if buf.String() != tt.want {
t.Errorf("%s: got: %q, want: %q", tt.test, buf.String(), tt.want)
}
}
}
func TestCreateInstancePopulate(t *testing.T) {
ctx := context.Background()
w := testWorkflow()
desc := "desc"
defP := w.Project
defZ := w.Zone
defMT := fmt.Sprintf("projects/%s/zones/%s/machineTypes/n1-standard-1", defP, defZ)
defDM := "READ_WRITE"
defDs := []*compute.AttachedDisk{{Boot: true, Source: "foo", Mode: defDM}}
defNs := []*compute.NetworkInterface{{Network: "global/networks/default", AccessConfigs: []*compute.AccessConfig{{Type: "ONE_TO_ONE_NAT"}}}}
defMD := map[string]string{"daisy-sources-path": "gs://", "daisy-logs-path": "gs://", "daisy-outs-path": "gs://"}
defSs := []string{"https://www.googleapis.com/auth/devstorage.read_only"}
defSAs := []*compute.ServiceAccount{{Email: "default", Scopes: defSs}}
tests := []struct {
desc string
input, want *CreateInstance
shouldErr bool
}{
{
"defaults, non exact name case",
&CreateInstance{Instance: compute.Instance{Name: "foo", Description: desc, Disks: []*compute.AttachedDisk{{Source: "foo"}}}},
&CreateInstance{Instance: compute.Instance{Name: w.genName("foo"), Description: desc, Disks: defDs, MachineType: defMT, NetworkInterfaces: defNs, ServiceAccounts: defSAs}, Metadata: defMD, Scopes: defSs, Project: defP, Zone: defZ, daisyName: "foo"},
false,
},
{
"nondefault zone/project case",
&CreateInstance{Instance: compute.Instance{Name: "foo", Description: desc, Disks: []*compute.AttachedDisk{{Source: "foo"}}}, Project: "pfoo", Zone: "zfoo", ExactName: true},
&CreateInstance{Instance: compute.Instance{Name: "foo", Description: desc, Disks: []*compute.AttachedDisk{{Boot: true, Source: "foo", Mode: defDM}}, MachineType: "projects/pfoo/zones/zfoo/machineTypes/n1-standard-1", NetworkInterfaces: defNs, ServiceAccounts: defSAs}, Metadata: defMD, Scopes: defSs, Project: "pfoo", Zone: "zfoo", daisyName: "foo", ExactName: true},
false,
},
}
for _, tt := range tests {
s := &Step{w: w, CreateInstances: &CreateInstances{tt.input}}
err := s.CreateInstances.populate(ctx, s)
if tt.shouldErr {
if err == nil {
t.Errorf("%s: should have returned error but didn't", tt.desc)
}
} else if err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
} else {
tt.input.Instance.Metadata = nil // This is undeterministic, but we can check tt.input.Metadata.
if diff := pretty.Compare(tt.input, tt.want); diff != "" {
t.Errorf("%s: CreateInstance not modified as expected: (-got +want)\n%s", tt.desc, diff)
}
}
}
}
func TestCreateInstancePopulateDisks(t *testing.T) {
w := testWorkflow()
tests := []struct {
desc string
ad, wantAd []*compute.AttachedDisk
}{
{"normal case", []*compute.AttachedDisk{{Source: "d1"}}, []*compute.AttachedDisk{{Boot: true, Source: "d1", Mode: "READ_WRITE"}}},
{"multiple disks case", []*compute.AttachedDisk{{Source: "d1"}, {Source: "d2"}}, []*compute.AttachedDisk{{Boot: true, Source: "d1", Mode: "READ_WRITE"}, {Boot: false, Source: "d2", Mode: "READ_WRITE"}}},
{"mode specified case", []*compute.AttachedDisk{{Source: "d1", Mode: "READ_ONLY"}}, []*compute.AttachedDisk{{Boot: true, Source: "d1", Mode: "READ_ONLY"}}},
}
for _, tt := range tests {
ci := CreateInstance{Instance: compute.Instance{Disks: tt.ad}}
err := ci.populateDisks(w)
if err != nil {
t.Errorf("%s: populateDisks returned an unexpected error: %v", tt.desc, err)
} else if diff := pretty.Compare(tt.ad, tt.wantAd); diff != "" {
t.Errorf("%s: AttachedDisks not modified as expected: (-got +want)\n%s", tt.desc, diff)
}
}
}
func TestCreateInstancePopulateMachineType(t *testing.T) {
tests := []struct {
desc, mt, wantMt string
shouldErr bool
}{
{"normal case", "mt", "projects/foo/zones/bar/machineTypes/mt", false},
{"expand case", "zones/bar/machineTypes/mt", "projects/foo/zones/bar/machineTypes/mt", false},
}
for _, tt := range tests {
ci := CreateInstance{Instance: compute.Instance{MachineType: tt.mt}, Project: "foo", Zone: "bar"}
err := ci.populateMachineType()
if tt.shouldErr && err == nil {
t.Errorf("%s: populateMachineType should have erred but didn't", tt.desc)
} else if !tt.shouldErr && err != nil {
t.Errorf("%s: populateMachineType returned an unexpected error: %v", tt.desc, err)
} else if err == nil && ci.MachineType != tt.wantMt {
t.Errorf("%s: MachineType not modified as expected: got: %q, want: %q", tt.desc, ci.MachineType, tt.wantMt)
}
}
}
func TestCreateInstancePopulateMetadata(t *testing.T) {
w := testWorkflow()
w.populate(context.Background())
w.Sources = map[string]string{"file": "foo/bar"}
filePath := "gs://" + path.Join(w.bucket, w.sourcesPath, "file")
baseMd := map[string]string{
"daisy-sources-path": "gs://" + path.Join(w.bucket, w.sourcesPath),
"daisy-logs-path": "gs://" + path.Join(w.bucket, w.logsPath),
"daisy-outs-path": "gs://" + path.Join(w.bucket, w.outsPath),
}
getWantMd := func(md map[string]string) *compute.Metadata {
for k, v := range baseMd {
md[k] = v
}
result := &compute.Metadata{}
for k, v := range md {
vCopy := v
result.Items = append(result.Items, &compute.MetadataItems{Key: k, Value: &vCopy})
}
return result
}
tests := []struct {
desc string
md map[string]string
startupScript string
wantMd *compute.Metadata
shouldErr bool
}{
{"defaults case", nil, "", getWantMd(map[string]string{}), false},
{"startup script case", nil, "file", getWantMd(map[string]string{"startup-script-url": filePath, "windows-startup-script-url": filePath}), false},
{"bad startup script case", nil, "foo", nil, true},
}
for _, tt := range tests {
ci := CreateInstance{Metadata: tt.md, StartupScript: tt.startupScript}
err := ci.populateMetadata(w)
if err == nil {
if tt.shouldErr {
t.Errorf("%s: populateMetadata should have erred but didn't", tt.desc)
} else {
compFactory := func(items []*compute.MetadataItems) func(i, j int) bool {
return func(i, j int) bool { return items[i].Key < items[j].Key }
}
sort.Slice(ci.Instance.Metadata.Items, compFactory(ci.Instance.Metadata.Items))
sort.Slice(tt.wantMd.Items, compFactory(tt.wantMd.Items))
if diff := pretty.Compare(ci.Instance.Metadata, tt.wantMd); diff != "" {
t.Errorf("%s: Metadata not modified as expected: (-got +want)\n%s", tt.desc, diff)
}
}
} else if !tt.shouldErr {
t.Errorf("%s: populateMetadata returned an unexpected error: %v", tt.desc, err)
}
}
}
func TestCreateInstancePopulateNetworks(t *testing.T) {
defaultAcs := []*compute.AccessConfig{{Type: "ONE_TO_ONE_NAT"}}
tests := []struct {
desc string
input, want []*compute.NetworkInterface
}{
{"default case", nil, []*compute.NetworkInterface{{Network: "global/networks/default", AccessConfigs: defaultAcs}}},
{"default AccessConfig case", []*compute.NetworkInterface{{Network: "global/networks/foo"}}, []*compute.NetworkInterface{{Network: "global/networks/foo", AccessConfigs: defaultAcs}}},
{"network URL resolution case", []*compute.NetworkInterface{{Network: "foo", AccessConfigs: []*compute.AccessConfig{}}}, []*compute.NetworkInterface{{Network: "global/networks/foo", AccessConfigs: []*compute.AccessConfig{}}}},
}
for _, tt := range tests {
ci := &CreateInstance{Instance: compute.Instance{NetworkInterfaces: tt.input}}
err := ci.populateNetworks()
if err != nil {
t.Errorf("%s: should have returned an error", tt.desc)
} else if diff := pretty.Compare(ci.NetworkInterfaces, tt.want); diff != "" {
t.Errorf("%s: NetworkInterfaces not modified as expected: (-got +want)\n%s", tt.desc, diff)
}
}
}
func TestCreateInstancePopulateScopes(t *testing.T) {
defaultScopes := []string{"https://www.googleapis.com/auth/devstorage.read_only"}
tests := []struct {
desc string
input []string
inputSas, want []*compute.ServiceAccount
shouldErr bool
}{
{"default case", nil, nil, []*compute.ServiceAccount{{Email: "default", Scopes: defaultScopes}}, false},
{"nondefault case", []string{"foo"}, nil, []*compute.ServiceAccount{{Email: "default", Scopes: []string{"foo"}}}, false},
{"service accounts override case", []string{"foo"}, []*compute.ServiceAccount{}, []*compute.ServiceAccount{}, false},
}
for _, tt := range tests {
ci := &CreateInstance{Scopes: tt.input, Instance: compute.Instance{ServiceAccounts: tt.inputSas}}
err := ci.populateScopes()
if err == nil {
if tt.shouldErr {
t.Errorf("%s: should have returned an error", tt.desc)
} else if diff := pretty.Compare(ci.ServiceAccounts, tt.want); diff != "" {
t.Errorf("%s: NetworkInterfaces not modified as expected: (-got +want)\n%s", tt.desc, diff)
}
} else if !tt.shouldErr {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
func TestCreateInstancesRun(t *testing.T) {
ctx := context.Background()
var createErr error
w := testWorkflow()
w.ComputeClient.(*daisyCompute.TestClient).CreateInstanceFn = func(p, z string, i *compute.Instance) error {
i.SelfLink = "insertedLink"
return createErr
}
s := &Step{w: w}
w.Sources = map[string]string{"file": "gs://some/file"}
disks[w].m = map[string]*resource{
"d0": {real: w.genName("d0"), link: "diskLink0"},
}
// Good case: check disk link gets resolved. Check instance reference map updates.
i0 := &CreateInstance{daisyName: "i0", Instance: compute.Instance{Name: "realI0", MachineType: "foo-type", Disks: []*compute.AttachedDisk{{Source: "d0"}}}}
i1 := &CreateInstance{daisyName: "i1", Project: "foo", Zone: "bar", Instance: compute.Instance{Name: "realI1", MachineType: "foo-type", Disks: []*compute.AttachedDisk{{Source: "other"}}}}
ci := &CreateInstances{i0, i1}
if err := ci.run(ctx, s); err != nil {
t.Errorf("unexpected error running CreateInstances.run(): %v", err)
}
if i0.Disks[0].Source != disks[w].m["d0"].link {
t.Errorf("instance disk link did not resolve properly: want: %q, got: %q", disks[w].m["d0"].link, i0.Disks[0].Source)
}
if i1.Disks[0].Source != "other" {
t.Errorf("instance disk link did not resolve properly: want: %q, got: %q", "other", i1.Disks[0].Source)
}
// Bad case: compute client CreateInstance error. Check instance ref map doesn't update.
instances[w].m = map[string]*resource{}
createErr = errors.New("client error")
ci = &CreateInstances{
{daisyName: "i0", Instance: compute.Instance{Name: "realI0", MachineType: "foo-type", Disks: []*compute.AttachedDisk{{Source: "d0"}}}},
}
if err := ci.run(ctx, s); err != createErr {
t.Errorf("CreateInstances.run() should have return compute client error: %v != %v", err, createErr)
}
}
func TestCreateInstanceValidateDisks(t *testing.T) {
ctx := context.Background()
w := testWorkflow()
p := "p"
z := "z"
disks[w].m = map[string]*resource{"d": {link: fmt.Sprintf("projects/%s/zones/%s/disks/d", p, z)}}
m := "READ_WRITE"
tests := []struct {
desc string
ci *CreateInstance
shouldErr bool
}{
{"good case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "d", Mode: m}}}, Project: p, Zone: z}, false},
{"good case 2", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "projects/p/zones/z/disks/d", Mode: m}}}, Project: p, Zone: z}, false},
{"no disks case", &CreateInstance{Instance: compute.Instance{Name: "foo"}}, true},
{"disk dne case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "dne", Mode: m}}}}, true},
{"bad project case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "projects/p2/zones/z/disks/d", Mode: m}}}, Project: p, Zone: z}, true},
{"bad zone case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "zones/z2/disks/d", Mode: m}}}, Project: p, Zone: z}, true},
{"bad disk mode case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: []*compute.AttachedDisk{{Source: "d", Mode: "bad mode!"}}}, Project: p, Zone: z}, true},
}
for _, tt := range tests {
s := &Step{w: w, CreateInstances: &CreateInstances{tt.ci}}
if err := tt.ci.validateDisks(ctx, s); tt.shouldErr && err == nil {
t.Errorf("%s: should have returned an error", tt.desc)
} else if !tt.shouldErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
func TestCreateInstanceValidateMachineType(t *testing.T) {
p := "project"
z := "zone"
tests := []struct {
desc string
mt string
shouldErr bool
}{
{"good case", fmt.Sprintf("projects/%s/zones/%s/machineTypes/mt", p, z), false},
{"good case 2", fmt.Sprintf("zones/%s/machineTypes/mt", z), false},
{"bad machine type case", "bad machine type!", true},
{"bad project case", fmt.Sprintf("projects/p2/zones/%s/machineTypes/mt", z), true},
{"bad zone case", fmt.Sprintf("projects/%s/zones/z2/machineTypes/mt", p), true},
{"bad zone case 2", "zones/z2/machineTypes/mt", true},
}
for _, tt := range tests {
ci := &CreateInstance{Instance: compute.Instance{MachineType: tt.mt}, Project: p, Zone: z}
if err := ci.validateMachineType(); tt.shouldErr && err == nil {
t.Errorf("%s: should have returned an error", tt.desc)
} else if !tt.shouldErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
func TestCreateInstanceValidateNetworks(t *testing.T) {
acs := []*compute.AccessConfig{{Type: "ONE_TO_ONE_NAT"}}
tests := []struct {
desc string
nis []*compute.NetworkInterface
shouldErr bool
}{
{"good case", []*compute.NetworkInterface{{Network: "global/networks/n", AccessConfigs: acs}}, false},
{"good case 2", []*compute.NetworkInterface{{Network: "projects/p/global/networks/n", AccessConfigs: acs}}, false},
{"bad name case", []*compute.NetworkInterface{{Network: "global/networks/bad!", AccessConfigs: acs}}, true},
{"bad project case", []*compute.NetworkInterface{{Network: "projects/bad-project/global/networks/n", AccessConfigs: acs}}, true},
}
for _, tt := range tests {
ci := &CreateInstance{Instance: compute.Instance{NetworkInterfaces: tt.nis}, Project: "p"}
if err := ci.validateNetworks(); tt.shouldErr && err == nil {
t.Errorf("%s: should have returned an error", tt.desc)
} else if !tt.shouldErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
func TestCreateInstancesValidate(t *testing.T) {
ctx := context.Background()
w := testWorkflow()
p := "p"
z := "z"
ad := []*compute.AttachedDisk{{Source: "d", Mode: "READ_WRITE"}}
mt := fmt.Sprintf("projects/%s/zones/%s/machineTypes/mt", p, z)
dCreator := &Step{name: "dCreator", w: w}
w.Steps["dCreator"] = dCreator
disks[w].registerCreation("d", &resource{link: fmt.Sprintf("projects/%s/zones/%s/disks/d", p, z)}, dCreator)
tests := []struct {
desc string
input *CreateInstance
shouldErr bool
}{
{"normal case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: ad, MachineType: mt}, Project: p, Zone: z}, false},
{"bad dupe case", &CreateInstance{Instance: compute.Instance{Name: "foo", Disks: ad, MachineType: mt}, Project: p, Zone: z}, true},
{"bad name case", &CreateInstance{Instance: compute.Instance{Name: "bad!", Disks: ad, MachineType: mt}, Project: p, Zone: z}, true},
{"bad project case", &CreateInstance{Instance: compute.Instance{Name: "bar", Disks: ad, MachineType: mt}, Project: "bad!", Zone: z}, true},
{"bad zone case", &CreateInstance{Instance: compute.Instance{Name: "baz", Disks: ad, MachineType: mt}, Project: p, Zone: "bad!"}, true},
{"machine type validation fails case", &CreateInstance{Instance: compute.Instance{Name: "gaz", Disks: ad, MachineType: "bad machine type!"}, Project: p, Zone: z, daisyName: "gaz"}, true},
}
for _, tt := range tests {
s := &Step{name: tt.desc, w: w, CreateInstances: &CreateInstances{tt.input}}
w.Steps[tt.desc] = s
w.Dependencies[tt.desc] = []string{"dCreator"}
if err := s.CreateInstances.validate(ctx, s); tt.shouldErr && err == nil {
t.Errorf("%s: should have returned an error", tt.desc)
} else if !tt.shouldErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
| 1 | 6,661 | Consider faking the TestClient's GetProject/GetZone/GetMachine methods. | GoogleCloudPlatform-compute-image-tools | go |
@@ -88,13 +88,15 @@ func AddHeader(key, value string) OutboundOption {
}
}
-// NewOutbound builds an HTTP outbound which sends requests to peers supplied
+// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
-// Peer Choosers used with the HTTP outbound MUST yield *hostport.Peer
-// objects. Also note that the Chooser MUST have started before Outbound.Start
-// is called.
+// The peer chooser and outbound must share the same transport, in this case
+// the HTTP transport.
+// The peer chooser must use the transport's RetainPeer to obtain peer
+// instances and return those peers to the outbound when it calls Choose.
+// The concrete peer type is private and intrinsic to the HTTP transport.
func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
o := &Outbound{
once: sync.Once(), | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/errors"
"go.uber.org/yarpc/internal/introspection"
"go.uber.org/yarpc/internal/sync"
peerchooser "go.uber.org/yarpc/peer"
"go.uber.org/yarpc/peer/hostport"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
)
// this ensures the HTTP outbound implements both transport.Outbound interfaces
var (
_ transport.UnaryOutbound = (*Outbound)(nil)
_ transport.OnewayOutbound = (*Outbound)(nil)
_ introspection.IntrospectableOutbound = (*Outbound)(nil)
)
var defaultURLTemplate, _ = url.Parse("http://localhost")
// OutboundOption customizes an HTTP Outbound.
type OutboundOption func(*Outbound)
func (OutboundOption) httpOption() {}
// URLTemplate specifies the URL this outbound makes requests to. For
// peer.Chooser-based outbounds, the peer (host:port) spection of the URL may
// vary from call to call but the rest will remain unchanged. For single-peer
// outbounds, the URL will be used as-is.
func URLTemplate(template string) OutboundOption {
return func(o *Outbound) {
o.setURLTemplate(template)
}
}
// AddHeader specifies that an HTTP outbound should always include the given
// header in outgoung requests.
//
// httpTransport.NewOutbound(chooser, http.AddHeader("X-Token", "TOKEN"))
//
// Note that headers starting with "Rpc-" are reserved by YARPC. This function
// will panic if the header starts with "Rpc-".
func AddHeader(key, value string) OutboundOption {
if strings.HasPrefix(strings.ToLower(key), "rpc-") {
panic(fmt.Errorf(
"invalid header name %q: "+
`headers starting with "Rpc-" are reserved by YARPC`, key))
}
return func(o *Outbound) {
if o.headers == nil {
o.headers = make(http.Header)
}
o.headers.Add(key, value)
}
}
// NewOutbound builds an HTTP outbound which sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// Peer Choosers used with the HTTP outbound MUST yield *hostport.Peer
// objects. Also note that the Chooser MUST have started before Outbound.Start
// is called.
func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
o := &Outbound{
once: sync.Once(),
chooser: chooser,
urlTemplate: defaultURLTemplate,
tracer: t.tracer,
transport: t,
}
for _, opt := range opts {
opt(o)
}
return o
}
// NewOutbound builds an HTTP outbound which sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// Peer Choosers used with the HTTP outbound MUST yield *hostport.Peer
// objects. Also note that the Chooser MUST have started before Outbound.Start
// is called.
func NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
return NewTransport().NewOutbound(chooser, opts...)
}
// NewSingleOutbound builds an outbound which sends YARPC requests over HTTP
// to the specified URL.
//
// The URLTemplate option has no effect in this form.
func (t *Transport) NewSingleOutbound(uri string, opts ...OutboundOption) *Outbound {
parsedURL, err := url.Parse(uri)
if err != nil {
panic(err.Error())
}
chooser := peerchooser.NewSingle(hostport.PeerIdentifier(parsedURL.Host), t)
o := t.NewOutbound(chooser)
for _, opt := range opts {
opt(o)
}
o.setURLTemplate(uri)
return o
}
// Outbound sends YARPC requests over HTTP. It may be constructed using the
// NewOutbound function or the NewOutbound or NewSingleOutbound methods on the
// HTTP Transport. It is recommended that services use a single HTTP transport
// to construct all HTTP outbounds, ensuring efficient sharing of resources
// across the different outbounds.
type Outbound struct {
chooser peer.Chooser
urlTemplate *url.URL
tracer opentracing.Tracer
transport *Transport
// Headers to add to all outgoing requests.
headers http.Header
once sync.LifecycleOnce
}
// setURLTemplate configures an alternate URL template.
// The host:port portion of the URL template gets replaced by the chosen peer's
// identifier for each outbound request.
func (o *Outbound) setURLTemplate(URL string) {
parsedURL, err := url.Parse(URL)
if err != nil {
log.Fatalf("failed to configure HTTP outbound: invalid URL template %q: %s", URL, err)
}
o.urlTemplate = parsedURL
}
// Transports returns the outbound's HTTP transport.
func (o *Outbound) Transports() []transport.Transport {
return []transport.Transport{o.transport}
}
// Chooser returns the outbound's peer chooser.
func (o *Outbound) Chooser() peer.Chooser {
return o.chooser
}
// Start the HTTP outbound
func (o *Outbound) Start() error {
return o.once.Start(o.chooser.Start)
}
// Stop the HTTP outbound
func (o *Outbound) Stop() error {
return o.once.Stop(o.chooser.Stop)
}
// IsRunning returns whether the Outbound is running.
func (o *Outbound) IsRunning() bool {
return o.once.IsRunning()
}
// Call makes a HTTP request
func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
if err := o.once.WhenRunning(ctx); err != nil {
return nil, err
}
start := time.Now()
deadline, _ := ctx.Deadline()
ttl := deadline.Sub(start)
return o.call(ctx, treq, start, ttl)
}
// CallOneway makes a oneway request
func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) {
if err := o.once.WhenRunning(ctx); err != nil {
return nil, err
}
start := time.Now()
var ttl time.Duration
_, err := o.call(ctx, treq, start, ttl)
if err != nil {
return nil, err
}
return time.Now(), nil
}
func (o *Outbound) call(ctx context.Context, treq *transport.Request, start time.Time, ttl time.Duration) (*transport.Response, error) {
p, onFinish, err := o.getPeerForRequest(ctx, treq)
if err != nil {
return nil, err
}
resp, err := o.callWithPeer(ctx, treq, start, ttl, p)
// Call the onFinish method right before returning (with the error from call with peer)
onFinish(err)
return resp, err
}
func (o *Outbound) callWithPeer(
ctx context.Context,
treq *transport.Request,
start time.Time,
ttl time.Duration,
p *hostport.Peer,
) (*transport.Response, error) {
req, err := o.createRequest(p, treq)
if err != nil {
return nil, err
}
req.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil)
ctx, req, span, err := o.withOpentracingSpan(ctx, req, treq, start)
if err != nil {
return nil, err
}
defer span.Finish()
req = o.withCoreHeaders(req, treq, ttl)
client, err := o.getHTTPClient(p)
if err != nil {
return nil, err
}
response, err := client.Do(req.WithContext(ctx))
if err != nil {
// Workaround borrowed from ctxhttp until
// https://github.com/golang/go/issues/17711 is resolved.
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
span.SetTag("error", true)
span.LogEvent(err.Error())
if err == context.DeadlineExceeded {
end := time.Now()
return nil, errors.ClientTimeoutError(treq.Service, treq.Procedure, end.Sub(start))
}
return nil, err
}
span.SetTag("http.status_code", response.StatusCode)
if response.StatusCode >= 200 && response.StatusCode < 300 {
appHeaders := applicationHeaders.FromHTTPHeaders(
response.Header, transport.NewHeaders())
appError := response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus
return &transport.Response{
Headers: appHeaders,
Body: response.Body,
ApplicationError: appError,
}, nil
}
return nil, getErrFromResponse(response)
}
func (o *Outbound) getPeerForRequest(ctx context.Context, treq *transport.Request) (*hostport.Peer, func(error), error) {
p, onFinish, err := o.chooser.Choose(ctx, treq)
if err != nil {
return nil, nil, err
}
hpPeer, ok := p.(*hostport.Peer)
if !ok {
return nil, nil, peer.ErrInvalidPeerConversion{
Peer: p,
ExpectedType: "*hostport.Peer",
}
}
return hpPeer, onFinish, nil
}
func (o *Outbound) createRequest(p *hostport.Peer, treq *transport.Request) (*http.Request, error) {
newURL := *o.urlTemplate
newURL.Host = p.HostPort()
return http.NewRequest("POST", newURL.String(), treq.Body)
}
func (o *Outbound) withOpentracingSpan(ctx context.Context, req *http.Request, treq *transport.Request, start time.Time) (context.Context, *http.Request, opentracing.Span, error) {
// Apply HTTP Context headers for tracing and baggage carried by tracing.
tracer := o.tracer
var parent opentracing.SpanContext // ok to be nil
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
parent = parentSpan.Context()
}
span := tracer.StartSpan(
treq.Procedure,
opentracing.StartTime(start),
opentracing.ChildOf(parent),
opentracing.Tags{
"rpc.caller": treq.Caller,
"rpc.service": treq.Service,
"rpc.encoding": treq.Encoding,
"rpc.transport": "http",
},
)
ext.PeerService.Set(span, treq.Service)
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, req.URL.String())
ctx = opentracing.ContextWithSpan(ctx, span)
err := tracer.Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
return ctx, req, span, err
}
func (o *Outbound) withCoreHeaders(req *http.Request, treq *transport.Request, ttl time.Duration) *http.Request {
// Add default headers to all requests.
for k, vs := range o.headers {
for _, v := range vs {
req.Header.Add(k, v)
}
}
req.Header.Set(CallerHeader, treq.Caller)
req.Header.Set(ServiceHeader, treq.Service)
req.Header.Set(ProcedureHeader, treq.Procedure)
if ttl != 0 {
req.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond))
}
if treq.ShardKey != "" {
req.Header.Set(ShardKeyHeader, treq.ShardKey)
}
if treq.RoutingKey != "" {
req.Header.Set(RoutingKeyHeader, treq.RoutingKey)
}
if treq.RoutingDelegate != "" {
req.Header.Set(RoutingDelegateHeader, treq.RoutingDelegate)
}
encoding := string(treq.Encoding)
if encoding != "" {
req.Header.Set(EncodingHeader, encoding)
}
return req
}
func (o *Outbound) getHTTPClient(p *hostport.Peer) (*http.Client, error) {
t, ok := p.Transport().(*Transport)
if !ok {
return nil, peer.ErrInvalidTransportConversion{
Transport: p.Transport(),
ExpectedType: "*http.Transport",
}
}
return t.client, nil
}
func getErrFromResponse(response *http.Response) error {
// TODO Behavior for 300-range status codes is undefined
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
if err := response.Body.Close(); err != nil {
return err
}
// Trim the trailing newline from HTTP error messages
message := strings.TrimSuffix(string(contents), "\n")
if response.StatusCode >= 400 && response.StatusCode < 500 {
return errors.RemoteBadRequestError(message)
}
if response.StatusCode == http.StatusGatewayTimeout {
return errors.RemoteTimeoutError(message)
}
return errors.RemoteUnexpectedError(message)
}
// Introspect returns basic status about this outbound.
func (o *Outbound) Introspect() introspection.OutboundStatus {
state := "Stopped"
if o.IsRunning() {
state = "Running"
}
var chooser introspection.ChooserStatus
if i, ok := o.chooser.(introspection.IntrospectableChooser); ok {
chooser = i.Introspect()
} else {
chooser = introspection.ChooserStatus{
Name: "Introspection not available",
}
}
return introspection.OutboundStatus{
Transport: "http",
Endpoint: o.urlTemplate.String(),
State: state,
Chooser: chooser,
}
}
| 1 | 14,278 | Returning a private type as part of the public API? Isn't this impossible for users to implement? | yarpc-yarpc-go | go |
@@ -422,9 +422,9 @@ std::string MolToSmiles(const ROMol &mol, bool doIsomericSmiles, bool doKekule,
if (canonical) {
if (tmol->hasProp("_canonicalRankingNumbers")) {
for (unsigned int i = 0; i < tmol->getNumAtoms(); ++i) {
- unsigned int rankNum;
- tmol->getAtomWithIdx(i)
- ->getPropIfPresent("_canonicalRankingNumber", rankNum);
+ unsigned int rankNum = 0;
+ tmol->getAtomWithIdx(i)->getPropIfPresent("_canonicalRankingNumber",
+ rankNum);
ranks[i] = rankNum;
}
} else { | 1 | // $Id$
//
// Copyright (C) 2002-2012 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include "SmilesWrite.h"
#include <GraphMol/RDKitBase.h>
#include <RDGeneral/types.h>
#include <GraphMol/Canon.h>
#include <GraphMol/new_canon.h>
#include <boost/lexical_cast.hpp>
#include <boost/foreach.hpp>
#include <boost/dynamic_bitset.hpp>
#include <sstream>
#include <map>
#include <list>
//#define VERBOSE_CANON 1
namespace RDKit {
namespace SmilesWrite {
const int atomicSmiles[] = {5, 6, 7, 8, 9, 15, 16, 17, 35, 53, -1};
bool inOrganicSubset(int atomicNumber) {
unsigned int idx = 0;
while (atomicSmiles[idx] < atomicNumber && atomicSmiles[idx] != -1) {
++idx;
}
if (atomicSmiles[idx] == atomicNumber) {
return true;
}
return false;
}
std::string GetAtomSmiles(const Atom *atom, bool doKekule, const Bond *bondIn,
bool allHsExplicit) {
RDUNUSED_PARAM(bondIn);
PRECONDITION(atom, "bad atom");
INT_VECT atomicSmilesVect(
atomicSmiles,
atomicSmiles + (sizeof(atomicSmiles) - 1) / sizeof(atomicSmiles[0]));
std::string res;
int fc = atom->getFormalCharge();
int num = atom->getAtomicNum();
int isotope = atom->getIsotope();
bool needsBracket = false;
std::string symb;
if (!atom->getPropIfPresent(common_properties::smilesSymbol, symb)) {
symb = PeriodicTable::getTable()->getElementSymbol(num);
}
// symb = atom->getSymbol();
std::string atString = "";
if (!allHsExplicit && inOrganicSubset(num)) {
// it's a member of the organic subset
// if(!doKekule && atom->getIsAromatic() && symb[0] < 'a') symb[0] -=
// ('A'-'a');
// -----
// figure out if we need to put a bracket around the atom,
// the conditions for this are:
// - formal charge specified
// - the atom has a nonstandard valence
// - chirality present and writing isomeric smiles
// - non-default isotope and writing isomeric smiles
// - atom-map information present
const INT_VECT &defaultVs = PeriodicTable::getTable()->getValenceList(num);
int totalValence = atom->getTotalValence();
bool nonStandard = false;
if (atom->getNumRadicalElectrons()) {
nonStandard = true;
} else if ((num == 7 || num == 15) && atom->getIsAromatic() &&
atom->getNumExplicitHs()) {
// another type of "nonstandard" valence is an aromatic N or P with
// explicit Hs indicated:
nonStandard = true;
} else {
nonStandard =
(totalValence != defaultVs.front() && atom->getTotalNumHs());
}
if (fc || nonStandard) {
needsBracket = true;
}
if (atom->getOwningMol().hasProp(common_properties::_doIsoSmiles)) {
if (atom->getChiralTag() != Atom::CHI_UNSPECIFIED &&
!atom->hasProp(common_properties::_brokenChirality)) {
switch (atom->getChiralTag()) {
case Atom::CHI_TETRAHEDRAL_CW:
atString = "@@";
break;
case Atom::CHI_TETRAHEDRAL_CCW:
atString = "@";
break;
default:
break;
}
needsBracket = true;
} else if (isotope) {
needsBracket = true;
}
}
if (atom->hasProp(common_properties::molAtomMapNumber)) {
needsBracket = true;
}
} else {
needsBracket = true;
}
if (needsBracket) res += "[";
if (isotope &&
atom->getOwningMol().hasProp(common_properties::_doIsoSmiles)) {
res += boost::lexical_cast<std::string>(isotope);
}
// this was originally only done for the organic subset,
// applying it to other atom-types is a fix for Issue 3152751:
if (!doKekule && atom->getIsAromatic() && symb[0] >= 'A' && symb[0] <= 'Z') {
symb[0] -= ('A' - 'a');
}
res += symb;
res += atString;
if (needsBracket) {
unsigned int totNumHs = atom->getTotalNumHs();
if (totNumHs > 0) {
res += "H";
if (totNumHs > 1) res += boost::lexical_cast<std::string>(totNumHs);
}
if (fc > 0) {
res += "+";
if (fc > 1) res += boost::lexical_cast<std::string>(fc);
} else if (fc < 0) {
if (fc < -1)
res += boost::lexical_cast<std::string>(fc);
else
res += "-";
}
int mapNum;
if (atom->getPropIfPresent(common_properties::molAtomMapNumber, mapNum)) {
res += ":";
res += boost::lexical_cast<std::string>(mapNum);
}
res += "]";
}
// If the atom has this property, the contained string will
// be inserted directly in the SMILES:
std::string label;
if (atom->getPropIfPresent(common_properties::_supplementalSmilesLabel,
label)) {
res += label;
}
return res;
}
std::string GetBondSmiles(const Bond *bond, int atomToLeftIdx, bool doKekule,
bool allBondsExplicit) {
PRECONDITION(bond, "bad bond");
if (atomToLeftIdx < 0) atomToLeftIdx = bond->getBeginAtomIdx();
std::string res = "";
bool aromatic = false;
if (!doKekule && (bond->getBondType() == Bond::SINGLE ||
bond->getBondType() == Bond::DOUBLE ||
bond->getBondType() == Bond::AROMATIC)) {
Atom *a1, *a2;
a1 = bond->getOwningMol().getAtomWithIdx(atomToLeftIdx);
a2 = bond->getOwningMol().getAtomWithIdx(
bond->getOtherAtomIdx(atomToLeftIdx));
if ((a1->getIsAromatic() && a2->getIsAromatic()) &&
(a1->getAtomicNum() || a2->getAtomicNum()))
aromatic = true;
}
Bond::BondDir dir = bond->getBondDir();
if (bond->hasProp(common_properties::_TraversalRingClosureBond)) {
// std::cerr<<"FLIP: "<<bond->getIdx()<<"
// "<<bond->getBeginAtomIdx()<<"-"<<bond->getEndAtomIdx()<<std::endl;
// if(dir==Bond::ENDDOWNRIGHT) dir=Bond::ENDUPRIGHT;
// else if(dir==Bond::ENDUPRIGHT) dir=Bond::ENDDOWNRIGHT;
bond->clearProp(common_properties::_TraversalRingClosureBond);
}
switch (bond->getBondType()) {
case Bond::SINGLE:
if (dir != Bond::NONE && dir != Bond::UNKNOWN) {
switch (dir) {
case Bond::ENDDOWNRIGHT:
if (bond->getOwningMol().hasProp(common_properties::_doIsoSmiles))
res = "\\";
break;
case Bond::ENDUPRIGHT:
if (bond->getOwningMol().hasProp(common_properties::_doIsoSmiles))
res = "/";
break;
default:
break;
}
} else {
// if the bond is marked as aromatic and the two atoms
// are aromatic, we need no marker (this arises in kekulized
// molecules).
// FIX: we should be able to dump kekulized smiles
// currently this is possible by removing all
// isAromatic flags, but there should maybe be another way
if (allBondsExplicit)
res = "-";
else if (aromatic && !bond->getIsAromatic())
res = "-";
}
break;
case Bond::DOUBLE:
// see note above
if (!aromatic || !bond->getIsAromatic()) res = "=";
break;
case Bond::TRIPLE:
res = "#";
break;
case Bond::AROMATIC:
if (dir != Bond::NONE && dir != Bond::UNKNOWN) {
switch (dir) {
case Bond::ENDDOWNRIGHT:
if (bond->getOwningMol().hasProp(common_properties::_doIsoSmiles))
res = "\\";
break;
case Bond::ENDUPRIGHT:
if (bond->getOwningMol().hasProp(common_properties::_doIsoSmiles))
res = "/";
break;
default:
break;
}
} else if (allBondsExplicit || !aromatic) {
res = ":";
}
break;
case Bond::DATIVE:
if (atomToLeftIdx >= 0 &&
bond->getBeginAtomIdx() == static_cast<unsigned int>(atomToLeftIdx))
res = ">";
else
res = "<";
break;
default:
res = "~";
}
return res;
}
std::string FragmentSmilesConstruct(
ROMol &mol, int atomIdx, std::vector<Canon::AtomColors> &colors,
const UINT_VECT &ranks, bool doKekule, bool canonical,
bool doIsomericSmiles, bool allBondsExplicit, bool allHsExplicit,
std::vector<unsigned int> &atomOrdering,
const boost::dynamic_bitset<> *bondsInPlay = 0,
const std::vector<std::string> *atomSymbols = 0,
const std::vector<std::string> *bondSymbols = 0) {
PRECONDITION(!bondsInPlay || bondsInPlay->size() >= mol.getNumBonds(),
"bad bondsInPlay");
PRECONDITION(!atomSymbols || atomSymbols->size() >= mol.getNumAtoms(),
"bad atomSymbols");
PRECONDITION(!bondSymbols || bondSymbols->size() >= mol.getNumBonds(),
"bad bondSymbols");
Canon::MolStack molStack;
// try to prevent excessive reallocation
molStack.reserve(mol.getNumAtoms() + mol.getNumBonds());
std::stringstream res;
std::map<int, int> ringClosureMap;
int ringIdx, closureVal;
if (!canonical) mol.setProp(common_properties::_StereochemDone, 1);
std::list<unsigned int> ringClosuresToErase;
Canon::canonicalizeFragment(mol, atomIdx, colors, ranks, molStack,
bondsInPlay, bondSymbols, doIsomericSmiles);
Bond *bond = 0;
BOOST_FOREACH (Canon::MolStackElem mSE, molStack) {
switch (mSE.type) {
case Canon::MOL_STACK_ATOM:
if (!ringClosuresToErase.empty()) {
BOOST_FOREACH (unsigned int rclosure, ringClosuresToErase) {
ringClosureMap.erase(rclosure);
}
ringClosuresToErase.clear();
}
// std::cout<<"\t\tAtom: "<<mSE.obj.atom->getIdx()<<std::endl;
if (!atomSymbols) {
res << GetAtomSmiles(mSE.obj.atom, doKekule, bond, allHsExplicit);
} else {
res << (*atomSymbols)[mSE.obj.atom->getIdx()];
}
atomOrdering.push_back(mSE.obj.atom->getIdx());
break;
case Canon::MOL_STACK_BOND:
bond = mSE.obj.bond;
// std::cout<<"\t\tBond: "<<bond->getIdx()<<std::endl;
if (!bondSymbols) {
res << GetBondSmiles(bond, mSE.number, doKekule, allBondsExplicit);
} else {
res << (*bondSymbols)[bond->getIdx()];
}
break;
case Canon::MOL_STACK_RING:
ringIdx = mSE.number;
// std::cout<<"\t\tRing: "<<ringIdx;
if (ringClosureMap.count(ringIdx)) {
// the index is already in the map ->
// we're closing a ring, so grab
// the index and then delete the value:
closureVal = ringClosureMap[ringIdx];
// ringClosureMap.erase(ringIdx);
ringClosuresToErase.push_back(ringIdx);
} else {
// we're opening a new ring, find the index for it:
closureVal = 1;
bool done = false;
// EFF: there's got to be a more efficient way to do this
while (!done) {
std::map<int, int>::iterator mapIt;
for (mapIt = ringClosureMap.begin(); mapIt != ringClosureMap.end();
mapIt++) {
if (mapIt->second == closureVal) break;
}
if (mapIt == ringClosureMap.end()) {
done = true;
} else {
closureVal += 1;
}
}
ringClosureMap[ringIdx] = closureVal;
}
if (closureVal >= 10) {
res << "%";
}
// std::cerr << " > " << closureVal <<std::endl;
res << closureVal;
break;
case Canon::MOL_STACK_BRANCH_OPEN:
res << "(";
break;
case Canon::MOL_STACK_BRANCH_CLOSE:
res << ")";
break;
default:
break;
}
}
return res.str();
}
} // end of namespace SmilesWrite
static bool SortBasedOnFirstElement(
const std::pair<std::string, std::vector<unsigned int> > &a,
const std::pair<std::string, std::vector<unsigned int> > &b) {
return a.first < b.first;
}
std::string MolToSmiles(const ROMol &mol, bool doIsomericSmiles, bool doKekule,
int rootedAtAtom, bool canonical, bool allBondsExplicit,
bool allHsExplicit) {
if (!mol.getNumAtoms()) return "";
PRECONDITION(rootedAtAtom < 0 ||
static_cast<unsigned int>(rootedAtAtom) < mol.getNumAtoms(),
"rootedAtomAtom must be less than the number of atoms");
std::vector<std::vector<int> > fragsMolAtomMapping;
std::vector<ROMOL_SPTR> mols =
MolOps::getMolFrags(mol, false, NULL, &fragsMolAtomMapping, false);
std::vector<std::string> vfragsmi;
// for(unsigned i=0; i<fragsMolAtomMapping.size(); i++){
// std::cout << i << ": ";
// for(unsigned j=0; j<fragsMolAtomMapping[i].size(); j++){
// std::cout << j <<"("<<fragsMolAtomMapping[i][j]<<") ";
// }
// std::cout << std::endl;
// }
std::vector<std::vector<RDKit::UINT> > allAtomOrdering;
for (unsigned i = 0; i < mols.size(); i++) {
ROMol *tmol = mols[i].get();
// update property cache
for (ROMol::AtomIterator atomIt = tmol->beginAtoms();
atomIt != tmol->endAtoms(); ++atomIt) {
(*atomIt)->updatePropertyCache(false);
}
// clean up the chirality on any atom that is marked as chiral,
// but that should not be:
if (doIsomericSmiles) {
tmol->setProp(common_properties::_doIsoSmiles, 1);
if (!mol.hasProp(common_properties::_StereochemDone)) {
MolOps::assignStereochemistry(*tmol, true);
}
}
#if 0
std::cout << "----------------------------" << std::endl;
std::cout << "MolToSmiles:"<< std::endl;
tmol->debugMol(std::cout);
std::cout << "----------------------------" << std::endl;
#endif
std::string res;
unsigned int nAtoms = tmol->getNumAtoms();
UINT_VECT ranks(nAtoms);
std::vector<unsigned int> atomOrdering;
if (canonical) {
if (tmol->hasProp("_canonicalRankingNumbers")) {
for (unsigned int i = 0; i < tmol->getNumAtoms(); ++i) {
unsigned int rankNum;
tmol->getAtomWithIdx(i)
->getPropIfPresent("_canonicalRankingNumber", rankNum);
ranks[i] = rankNum;
}
} else {
Canon::rankMolAtoms(*tmol, ranks, true, doIsomericSmiles,
doIsomericSmiles);
}
} else {
for (unsigned int i = 0; i < tmol->getNumAtoms(); ++i) ranks[i] = i;
}
#ifdef VERBOSE_CANON
for (unsigned int tmpI = 0; tmpI < ranks.size(); tmpI++) {
std::cout << tmpI << " " << ranks[tmpI] << " "
<< *(tmol.getAtomWithIdx(tmpI)) << std::endl;
}
#endif
std::vector<Canon::AtomColors> colors(nAtoms, Canon::WHITE_NODE);
std::vector<Canon::AtomColors>::iterator colorIt;
colorIt = colors.begin();
// loop to deal with the possibility that there might be disconnected
// fragments
while (colorIt != colors.end()) {
int nextAtomIdx = -1;
std::string subSmi;
// find the next atom for a traverse
if (rootedAtAtom >= 0) {
nextAtomIdx = rootedAtAtom;
rootedAtAtom = -1;
} else {
unsigned int nextRank = nAtoms + 1;
for (unsigned int i = 0; i < nAtoms; i++) {
if (colors[i] == Canon::WHITE_NODE && ranks[i] < nextRank) {
nextRank = ranks[i];
nextAtomIdx = i;
}
}
}
CHECK_INVARIANT(nextAtomIdx >= 0, "no start atom found");
subSmi = SmilesWrite::FragmentSmilesConstruct(
*tmol, nextAtomIdx, colors, ranks, doKekule, canonical,
doIsomericSmiles, allBondsExplicit, allHsExplicit, atomOrdering);
res += subSmi;
colorIt = std::find(colors.begin(), colors.end(), Canon::WHITE_NODE);
if (colorIt != colors.end()) {
res += ".";
}
}
vfragsmi.push_back(res);
for (std::vector<RDKit::UINT>::iterator vit = atomOrdering.begin();
vit != atomOrdering.end(); ++vit) {
*vit = fragsMolAtomMapping[i][*vit]; // Lookup the Id in the original
// molecule
}
allAtomOrdering.push_back(atomOrdering);
}
std::string result;
std::vector<unsigned int> flattenedAtomOrdering;
if (canonical) {
// Sort the vfragsmi, but also sort the atom order vectors into the same
// order
typedef std::pair<std::string, std::vector<unsigned int> > PairStrAndVec;
std::vector<PairStrAndVec> tmp(vfragsmi.size());
for (unsigned int ti = 0; ti < vfragsmi.size(); ++ti)
tmp[ti] = PairStrAndVec(vfragsmi[ti], allAtomOrdering[ti]);
std::sort(tmp.begin(), tmp.end(), SortBasedOnFirstElement);
for (unsigned int ti = 0; ti < vfragsmi.size(); ++ti) {
result += tmp[ti].first;
if (ti < vfragsmi.size() - 1) result += ".";
flattenedAtomOrdering.insert(flattenedAtomOrdering.end(),
tmp[ti].second.begin(),
tmp[ti].second.end());
}
} else { // Not canonical
for (unsigned int i = 0; i < allAtomOrdering.size(); ++i)
flattenedAtomOrdering.insert(flattenedAtomOrdering.end(),
allAtomOrdering[i].begin(),
allAtomOrdering[i].end());
for (unsigned i = 0; i < vfragsmi.size(); ++i) {
result += vfragsmi[i];
if (i < vfragsmi.size() - 1) {
result += ".";
}
}
}
mol.setProp(common_properties::_smilesAtomOutputOrder, flattenedAtomOrdering,
true);
return result;
} // end of MolToSmiles()
std::string MolFragmentToSmiles(const ROMol &mol,
const std::vector<int> &atomsToUse,
const std::vector<int> *bondsToUse,
const std::vector<std::string> *atomSymbols,
const std::vector<std::string> *bondSymbols,
bool doIsomericSmiles, bool doKekule,
int rootedAtAtom, bool canonical,
bool allBondsExplicit, bool allHsExplicit) {
PRECONDITION(atomsToUse.size(), "no atoms provided");
PRECONDITION(rootedAtAtom < 0 ||
static_cast<unsigned int>(rootedAtAtom) < mol.getNumAtoms(),
"rootedAtomAtom must be less than the number of atoms");
PRECONDITION(rootedAtAtom < 0 ||
std::find(atomsToUse.begin(), atomsToUse.end(),
rootedAtAtom) != atomsToUse.end(),
"rootedAtomAtom not found in atomsToUse");
PRECONDITION(!atomSymbols || atomSymbols->size() >= mol.getNumAtoms(),
"bad atomSymbols vector");
PRECONDITION(!bondSymbols || bondSymbols->size() >= mol.getNumBonds(),
"bad bondSymbols vector");
if (!mol.getNumAtoms()) return "";
ROMol tmol(mol, true);
if (doIsomericSmiles) {
tmol.setProp(common_properties::_doIsoSmiles, 1);
}
std::string res;
boost::dynamic_bitset<> atomsInPlay(mol.getNumAtoms(), 0);
BOOST_FOREACH (int aidx, atomsToUse) { atomsInPlay.set(aidx); }
// figure out which bonds are actually in play:
boost::dynamic_bitset<> bondsInPlay(mol.getNumBonds(), 0);
if (bondsToUse) {
BOOST_FOREACH (int bidx, *bondsToUse) { bondsInPlay.set(bidx); }
} else {
BOOST_FOREACH (int aidx, atomsToUse) {
ROMol::OEDGE_ITER beg, end;
boost::tie(beg, end) = mol.getAtomBonds(mol.getAtomWithIdx(aidx));
while (beg != end) {
const BOND_SPTR bond = mol[*beg];
if (atomsInPlay[bond->getOtherAtomIdx(aidx)])
bondsInPlay.set(bond->getIdx());
++beg;
}
}
}
// copy over the rings that only involve atoms/bonds in this fragment:
if (mol.getRingInfo()->isInitialized()) {
tmol.getRingInfo()->reset();
tmol.getRingInfo()->initialize();
for (unsigned int ridx = 0; ridx < mol.getRingInfo()->numRings(); ++ridx) {
const INT_VECT å = mol.getRingInfo()->atomRings()[ridx];
const INT_VECT &bring = mol.getRingInfo()->bondRings()[ridx];
bool keepIt = true;
BOOST_FOREACH (int aidx, aring) {
if (!atomsInPlay[aidx]) {
keepIt = false;
break;
}
}
if (keepIt) {
BOOST_FOREACH (int bidx, bring) {
if (!bondsInPlay[bidx]) {
keepIt = false;
break;
}
}
}
if (keepIt) {
tmol.getRingInfo()->addRing(aring, bring);
}
}
}
if (tmol.needsUpdatePropertyCache()) {
for (ROMol::AtomIterator atIt = tmol.beginAtoms(); atIt != tmol.endAtoms();
atIt++) {
(*atIt)->updatePropertyCache(false);
}
}
UINT_VECT ranks(tmol.getNumAtoms());
std::vector<unsigned int> atomOrdering;
// clean up the chirality on any atom that is marked as chiral,
// but that should not be:
if (doIsomericSmiles) {
if (!mol.hasProp(common_properties::_StereochemDone)) {
MolOps::assignStereochemistry(tmol, true);
} else {
tmol.setProp(common_properties::_StereochemDone, 1);
// we need the CIP codes:
BOOST_FOREACH (int aidx, atomsToUse) {
const Atom *oAt = mol.getAtomWithIdx(aidx);
std::string cipCode;
if (oAt->getPropIfPresent(common_properties::_CIPCode, cipCode)) {
tmol.getAtomWithIdx(aidx)
->setProp(common_properties::_CIPCode, cipCode);
}
}
}
}
if (canonical) {
Canon::rankFragmentAtoms(tmol, ranks, atomsInPlay, bondsInPlay, atomSymbols,
true, doIsomericSmiles, doIsomericSmiles);
// MolOps::rankAtomsInFragment(tmol,ranks,atomsInPlay,bondsInPlay,atomSymbols,bondSymbols);
} else {
for (unsigned int i = 0; i < tmol.getNumAtoms(); ++i) ranks[i] = i;
}
#ifdef VERBOSE_CANON
for (unsigned int tmpI = 0; tmpI < ranks.size(); tmpI++) {
std::cout << tmpI << " " << ranks[tmpI] << " "
<< *(tmol.getAtomWithIdx(tmpI)) << std::endl;
}
#endif
std::vector<Canon::AtomColors> colors(tmol.getNumAtoms(), Canon::BLACK_NODE);
BOOST_FOREACH (int aidx, atomsToUse) { colors[aidx] = Canon::WHITE_NODE; }
std::vector<Canon::AtomColors>::iterator colorIt;
colorIt = colors.begin();
// loop to deal with the possibility that there might be disconnected
// fragments
while (colorIt != colors.end()) {
int nextAtomIdx = -1;
std::string subSmi;
// find the next atom for a traverse
if (rootedAtAtom >= 0) {
nextAtomIdx = rootedAtAtom;
rootedAtAtom = -1;
} else {
unsigned int nextRank = rdcast<unsigned int>(tmol.getNumAtoms()) + 1;
BOOST_FOREACH (int i, atomsToUse) {
if (colors[i] == Canon::WHITE_NODE && ranks[i] < nextRank) {
nextRank = ranks[i];
nextAtomIdx = i;
}
}
}
CHECK_INVARIANT(nextAtomIdx >= 0, "no start atom found");
subSmi = SmilesWrite::FragmentSmilesConstruct(
tmol, nextAtomIdx, colors, ranks, doKekule, canonical, doIsomericSmiles,
allBondsExplicit, allHsExplicit, atomOrdering, &bondsInPlay,
atomSymbols, bondSymbols);
res += subSmi;
colorIt = std::find(colors.begin(), colors.end(), Canon::WHITE_NODE);
if (colorIt != colors.end()) {
res += ".";
}
}
mol.setProp(common_properties::_smilesAtomOutputOrder, atomOrdering, true);
return res;
} // end of MolFragmentToSmiles()
}
| 1 | 14,855 | prefer common_properties::_canonicalRankingNumber if possible, should be quicker in general. | rdkit-rdkit | cpp |
@@ -52,6 +52,8 @@ class AnchorHead(nn.Module):
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
+ loss_normalizer=-1,
+ loss_normalizer_momentum=0.9,
train_cfg=None,
test_cfg=None):
super(AnchorHead, self).__init__() | 1 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (anchor_inside_flags, build_anchor_generator,
build_assigner, build_bbox_coder, build_sampler,
force_fp32, images_to_levels, multi_apply,
multiclass_nms, unmap)
from ..builder import HEADS, build_loss
@HEADS.register_module()
class AnchorHead(nn.Module):
"""Anchor-based head (RPN, RetinaNet, SSD, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
reg_decoded_bbox (bool): If true, the regression loss would be
applied on decoded bounding boxes. Default: False
background_label (int | None): Label ID of background, set as 0 for
RPN and num_classes for other heads. It will automatically set as
num_classes if None is given.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0)),
reg_decoded_bbox=False,
background_label=None,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
train_cfg=None,
test_cfg=None):
super(AnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
# TODO better way to determine whether sample or not
self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
if self.cls_out_channels <= 0:
raise ValueError(f'num_classes={num_classes} is too small')
self.reg_decoded_bbox = reg_decoded_bbox
self.background_label = (
num_classes if background_label is None else background_label)
# background_label should be either 0 or num_classes
assert (self.background_label == 0
or self.background_label == num_classes)
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
self.anchor_generator = build_anchor_generator(anchor_generator)
# usually the numbers of anchors for each level are the same
# except SSD detectors
self.num_anchors = self.anchor_generator.num_base_anchors[0]
self._init_layers()
def _init_layers(self):
self.conv_cls = nn.Conv2d(self.in_channels,
self.num_anchors * self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1)
def init_weights(self):
normal_init(self.conv_cls, std=0.01)
normal_init(self.conv_reg, std=0.01)
def forward_single(self, x):
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
device (torch.device | str): Device for returned tensors
Returns:
tuple:
anchor_list (list[Tensor]): Anchors of each image
valid_flag_list (list[Tensor]): Valid flags of each image
"""
num_imgs = len(img_metas)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = self.anchor_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'], device)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def _get_targets_single(self,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in
a single image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
img_meta (dict): Meta info of the image.
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
img_meta (dict): Meta info of the image.
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple:
labels_list (list[Tensor]): Labels of each level
label_weights_list (list[Tensor]): Label weights of each level
bbox_targets_list (list[Tensor]): BBox targets of each level
bbox_weights_list (list[Tensor]): BBox weights of each level
num_total_pos (int): Number of positive samples in all images
num_total_neg (int): Number of negative samples in all images
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 6
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
assign_result = self.assigner.assign(
anchors, gt_bboxes, gt_bboxes_ignore,
None if self.sampling else gt_labels)
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.background_label,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
else:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
# only rpn gives gt_labels as None, this time FG is 1
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(
labels, num_total_anchors, inside_flags,
fill=self.num_classes) # fill bg label
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in
multiple images.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple:
labels_list (list[Tensor]): Labels of each level
label_weights_list (list[Tensor]): Label weights of each level
bbox_targets_list (list[Tensor]): BBox targets of each level
bbox_weights_list (list[Tensor]): BBox weights of each level
num_total_pos (int): Number of positive samples in all images
num_total_neg (int): Number of negative samples in all images
additional_returns: This function enables user-defined returns from
`self._get_targets_single`. These returns are currently refined
to properties at each feature map (i.e. having HxW dimension).
The results will be concatenated after the end
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors to a single tensor
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(torch.cat(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
results = multi_apply(
self._get_targets_single,
concat_anchor_list,
concat_valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list) = results[:6]
rest_results = list(results[6:]) # user-added return values
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
for i, r in enumerate(rest_results): # user-added return values
rest_results[i] = images_to_levels(r, num_level_anchors)
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg) \
+ tuple(rest_results)
def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
if self.reg_decoded_bbox:
anchors = anchors.reshape(-1, 4)
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg=None,
rescale=False):
"""
Transform network output for a batch into labeled boxes.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
img_metas (list[dict]): Size / scale info for each image
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the class index of the
corresponding box.
Example:
>>> import mmcv
>>> self = AnchorHead(
>>> num_classes=9,
>>> in_channels=1,
>>> anchor_generator=dict(
>>> type='AnchorGenerator',
>>> scales=[8],
>>> ratios=[0.5, 1.0, 2.0],
>>> strides=[4,]))
>>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
>>> cfg = mmcv.Config(dict(
>>> score_thr=0.00,
>>> nms=dict(type='nms', iou_thr=1.0),
>>> max_per_img=10))
>>> feat = torch.rand(1, 1, 3, 3)
>>> cls_score, bbox_pred = self.forward_single(feat)
>>> # note the input lists are over different levels, not images
>>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
>>> result_list = self.get_bboxes(cls_scores, bbox_preds,
>>> img_metas, cfg)
>>> det_bboxes, det_labels = result_list[0]
>>> assert len(result_list) == 1
>>> assert det_bboxes.shape[1] == 5
>>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
"""
Transform outputs for a single batch item into labeled boxes.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, anchors in zip(cls_score_list,
bbox_pred_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
max_scores, _ = scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
| 1 | 19,671 | The meaning of `loss_normalizer` and `loss_normalizer_momentum` should be reflected in docstring. | open-mmlab-mmdetection | py |
@@ -87,11 +87,16 @@ static void init_async(h2o_multithread_queue_t *queue, h2o_loop_t *loop)
{
int fds[2];
+#ifndef _WIN32
if (cloexec_pipe(fds) != 0) {
perror("pipe");
abort();
}
fcntl(fds[1], F_SETFL, O_NONBLOCK);
+#else
+ u_long nonblock = 1;
+ ioctlsocket(fds[1], FIONBIO, &nonblock);
+#endif
queue->async.write = fds[1];
queue->async.read = h2o_evloop_socket_create(loop, fds[0], NULL, 0, 0);
queue->async.read->data = queue; | 1 | /*
* Copyright (c) 2015 DeNA Co., Ltd., Kazuho Oku
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <pthread.h>
#include "cloexec.h"
#include "h2o/multithread.h"
struct st_h2o_multithread_queue_t {
#if H2O_USE_LIBUV
uv_async_t async;
#else
struct {
int write;
h2o_socket_t *read;
} async;
#endif
pthread_mutex_t mutex;
struct {
h2o_linklist_t active;
h2o_linklist_t inactive;
} receivers;
};
static void queue_cb(h2o_multithread_queue_t *queue)
{
pthread_mutex_lock(&queue->mutex);
while (!h2o_linklist_is_empty(&queue->receivers.active)) {
h2o_multithread_receiver_t *receiver =
H2O_STRUCT_FROM_MEMBER(h2o_multithread_receiver_t, _link, queue->receivers.active.next);
/* detach all the messages from the receiver */
h2o_linklist_t messages;
h2o_linklist_init_anchor(&messages);
h2o_linklist_insert_list(&messages, &receiver->_messages);
/* relink the receiver to the inactive list */
h2o_linklist_unlink(&receiver->_link);
h2o_linklist_insert(&queue->receivers.inactive, &receiver->_link);
/* dispatch the messages */
pthread_mutex_unlock(&queue->mutex);
receiver->cb(receiver, &messages);
assert(h2o_linklist_is_empty(&messages));
pthread_mutex_lock(&queue->mutex);
}
pthread_mutex_unlock(&queue->mutex);
}
#if H2O_USE_LIBUV
#else
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
static void on_read(h2o_socket_t *sock, int status)
{
if (status != 0) {
fprintf(stderr, "pipe error\n");
abort();
}
h2o_buffer_consume(&sock->input, sock->input->size);
queue_cb(sock->data);
}
static void init_async(h2o_multithread_queue_t *queue, h2o_loop_t *loop)
{
int fds[2];
if (cloexec_pipe(fds) != 0) {
perror("pipe");
abort();
}
fcntl(fds[1], F_SETFL, O_NONBLOCK);
queue->async.write = fds[1];
queue->async.read = h2o_evloop_socket_create(loop, fds[0], NULL, 0, 0);
queue->async.read->data = queue;
h2o_socket_read_start(queue->async.read, on_read);
}
#endif
h2o_multithread_queue_t *h2o_multithread_create_queue(h2o_loop_t *loop)
{
h2o_multithread_queue_t *queue = h2o_mem_alloc(sizeof(*queue));
*queue = (h2o_multithread_queue_t){};
#if H2O_USE_LIBUV
uv_async_init(loop, &queue->async, (void *)queue_cb);
#else
init_async(queue, loop);
#endif
pthread_mutex_init(&queue->mutex, NULL);
h2o_linklist_init_anchor(&queue->receivers.active);
h2o_linklist_init_anchor(&queue->receivers.inactive);
return queue;
}
void h2o_multithread_destroy_queue(h2o_multithread_queue_t *queue)
{
assert(h2o_linklist_is_empty(&queue->receivers.active));
assert(h2o_linklist_is_empty(&queue->receivers.inactive));
#if H2O_USE_LIBUV
uv_close((uv_handle_t *)&queue->async, (void *)free);
#else
h2o_socket_read_stop(queue->async.read);
h2o_socket_close(queue->async.read);
close(queue->async.write);
#endif
pthread_mutex_destroy(&queue->mutex);
}
void h2o_multithread_register_receiver(h2o_multithread_queue_t *queue, h2o_multithread_receiver_t *receiver,
h2o_multithread_receiver_cb cb)
{
receiver->queue = queue;
receiver->_link = (h2o_linklist_t){};
h2o_linklist_init_anchor(&receiver->_messages);
receiver->cb = cb;
h2o_linklist_insert(&queue->receivers.inactive, &receiver->_link);
}
void h2o_multithread_unregister_receiver(h2o_multithread_queue_t *queue, h2o_multithread_receiver_t *receiver)
{
assert(queue == receiver->queue);
assert(h2o_linklist_is_empty(&receiver->_messages));
h2o_linklist_unlink(&receiver->_link);
}
void h2o_multithread_send_message(h2o_multithread_receiver_t *receiver, h2o_multithread_message_t *message)
{
int do_send = 0;
assert(!h2o_linklist_is_linked(&message->link));
pthread_mutex_lock(&receiver->queue->mutex);
if (h2o_linklist_is_empty(&receiver->_messages)) {
h2o_linklist_unlink(&receiver->_link);
h2o_linklist_insert(&receiver->queue->receivers.active, &receiver->_link);
do_send = 1;
}
h2o_linklist_insert(&receiver->_messages, &message->link);
pthread_mutex_unlock(&receiver->queue->mutex);
if (do_send) {
#if H2O_USE_LIBUV
uv_async_send(&receiver->queue->async);
#else
while (write(receiver->queue->async.write, "", 1) == -1 && errno == EINTR)
;
#endif
}
}
void h2o_multithread_create_thread(pthread_t *tid, const pthread_attr_t *attr, void *(*func)(void *), void *arg)
{
if (pthread_create(tid, attr, func, arg) != 0) {
perror("pthread_create");
abort();
}
}
| 1 | 10,686 | Call to `cloexec_pipe` (or an equivalent function) is missing. I presume that this is the reason why you are seeing timeout errors. | h2o-h2o | c |
@@ -26,11 +26,7 @@ import org.springframework.security.oauth2.jwt.Jwt;
import java.time.Duration;
import java.time.Instant;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.*;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy; | 1 | /*
* Copyright 2002-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.oauth2.client.oidc.authentication;
import org.junit.Before;
import org.junit.Test;
import org.springframework.security.oauth2.client.registration.ClientRegistration;
import org.springframework.security.oauth2.client.registration.TestClientRegistrations;
import org.springframework.security.oauth2.core.OAuth2Error;
import org.springframework.security.oauth2.core.oidc.IdTokenClaimNames;
import org.springframework.security.oauth2.jose.jws.JwsAlgorithms;
import org.springframework.security.oauth2.jwt.Jwt;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/**
* @author Rob Winch
* @author Joe Grandja
* @since 5.1
*/
public class OidcIdTokenValidatorTests {
private ClientRegistration.Builder registration = TestClientRegistrations.clientRegistration();
private Map<String, Object> headers = new HashMap<>();
private Map<String, Object> claims = new HashMap<>();
private Instant issuedAt = Instant.now();
private Instant expiresAt = this.issuedAt.plusSeconds(3600);
private Duration clockSkew = Duration.ofSeconds(60);
@Before
public void setup() {
this.headers.put("alg", JwsAlgorithms.RS256);
this.claims.put(IdTokenClaimNames.ISS, "https://issuer.example.com");
this.claims.put(IdTokenClaimNames.SUB, "rob");
this.claims.put(IdTokenClaimNames.AUD, Collections.singletonList("client-id"));
}
@Test
public void validateWhenValidThenNoErrors() {
assertThat(this.validateIdToken()).isEmpty();
}
@Test
public void setClockSkewWhenNullThenThrowIllegalArgumentException() {
OidcIdTokenValidator idTokenValidator = new OidcIdTokenValidator(this.registration.build());
assertThatThrownBy(() -> idTokenValidator.setClockSkew(null))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void setClockSkewWhenNegativeSecondsThenThrowIllegalArgumentException() {
OidcIdTokenValidator idTokenValidator = new OidcIdTokenValidator(this.registration.build());
assertThatThrownBy(() -> idTokenValidator.setClockSkew(Duration.ofSeconds(-1)))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void validateWhenIssuerNullThenHasErrors() {
this.claims.remove(IdTokenClaimNames.ISS);
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.ISS));
}
@Test
public void validateWhenSubNullThenHasErrors() {
this.claims.remove(IdTokenClaimNames.SUB);
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.SUB));
}
@Test
public void validateWhenAudNullThenHasErrors() {
this.claims.remove(IdTokenClaimNames.AUD);
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.AUD));
}
@Test
public void validateWhenIssuedAtNullThenHasErrors() {
this.issuedAt = null;
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.IAT));
}
@Test
public void validateWhenExpiresAtNullThenHasErrors() {
this.expiresAt = null;
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.EXP));
}
@Test
public void validateWhenAudMultipleAndAzpNullThenHasErrors() {
this.claims.put(IdTokenClaimNames.AUD, Arrays.asList("client-id", "other"));
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.AZP));
}
@Test
public void validateWhenAzpNotClientIdThenHasErrors() {
this.claims.put(IdTokenClaimNames.AZP, "other");
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.AZP));
}
@Test
public void validateWhenMultipleAudAzpClientIdThenNoErrors() {
this.claims.put(IdTokenClaimNames.AUD, Arrays.asList("client-id", "other"));
this.claims.put(IdTokenClaimNames.AZP, "client-id");
assertThat(this.validateIdToken()).isEmpty();
}
@Test
public void validateWhenMultipleAudAzpNotClientIdThenHasErrors() {
this.claims.put(IdTokenClaimNames.AUD, Arrays.asList("client-id-1", "client-id-2"));
this.claims.put(IdTokenClaimNames.AZP, "other-client");
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.AZP));
}
@Test
public void validateWhenAudNotClientIdThenHasErrors() {
this.claims.put(IdTokenClaimNames.AUD, Collections.singletonList("other-client"));
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.AUD));
}
@Test
public void validateWhenExpiredAnd60secClockSkewThenNoErrors() {
this.issuedAt = Instant.now().minus(Duration.ofSeconds(60));
this.expiresAt = this.issuedAt.plus(Duration.ofSeconds(30));
this.clockSkew = Duration.ofSeconds(60);
assertThat(this.validateIdToken()).isEmpty();
}
@Test
public void validateWhenExpiredAnd0secClockSkewThenHasErrors() {
this.issuedAt = Instant.now().minus(Duration.ofSeconds(60));
this.expiresAt = this.issuedAt.plus(Duration.ofSeconds(30));
this.clockSkew = Duration.ofSeconds(0);
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.EXP));
}
@Test
public void validateWhenIssuedAt5minAheadAnd5minClockSkewThenNoErrors() {
this.issuedAt = Instant.now().plus(Duration.ofMinutes(5));
this.expiresAt = this.issuedAt.plus(Duration.ofSeconds(60));
this.clockSkew = Duration.ofMinutes(5);
assertThat(this.validateIdToken()).isEmpty();
}
@Test
public void validateWhenIssuedAt1minAheadAnd0minClockSkewThenHasErrors() {
this.issuedAt = Instant.now().plus(Duration.ofMinutes(1));
this.expiresAt = this.issuedAt.plus(Duration.ofSeconds(60));
this.clockSkew = Duration.ofMinutes(0);
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.IAT));
}
@Test
public void validateWhenExpiresAtBeforeNowThenHasErrors() {
this.issuedAt = Instant.now().minus(Duration.ofSeconds(10));
this.expiresAt = this.issuedAt.plus(Duration.ofSeconds(5));
this.clockSkew = Duration.ofSeconds(0);
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.EXP));
}
@Test
public void validateWhenMissingClaimsThenHasErrors() {
this.claims.remove(IdTokenClaimNames.SUB);
this.claims.remove(IdTokenClaimNames.AUD);
this.issuedAt = null;
this.expiresAt = null;
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.contains(IdTokenClaimNames.SUB))
.allMatch(msg -> msg.contains(IdTokenClaimNames.AUD))
.allMatch(msg -> msg.contains(IdTokenClaimNames.IAT))
.allMatch(msg -> msg.contains(IdTokenClaimNames.EXP));
}
@Test
public void validateFormatError() {
this.claims.remove(IdTokenClaimNames.SUB);
this.claims.remove(IdTokenClaimNames.AUD);
assertThat(this.validateIdToken())
.hasSize(1)
.extracting(OAuth2Error::getDescription)
.allMatch(msg -> msg.equals("The ID Token contains invalid claims: {sub=null, aud=null}"));
}
private Collection<OAuth2Error> validateIdToken() {
Jwt idToken = new Jwt("token123", this.issuedAt, this.expiresAt, this.headers, this.claims);
OidcIdTokenValidator validator = new OidcIdTokenValidator(this.registration.build());
validator.setClockSkew(this.clockSkew);
return validator.validate(idToken).getErrors();
}
}
| 1 | 14,210 | There are no changes in this file. Please reset. | spring-projects-spring-security | java |
@@ -731,9 +731,11 @@ def nullDistribution(verbosity=0):
def normalProbability(x, distributionParams):
"""
- Given the normal distribution specified by the mean and standard deviation in
- distributionParams, return the probability of getting samples > x.
- This is the Q-function: the tail probability of the normal distribution.
+ Given the normal distribution specified by the mean and standard deviation
+ in distributionParams, return the probability of getting samples further
+ from the mean. For values above the mean, this is the probability of getting
+ samples > x and for values below the mean, the probability of getting
+ samples < x. This is the Q-function: the tail probability of the normal distribution.
:param distributionParams: dict with 'mean' and 'stdev' of the distribution
""" | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module analyzes and estimates the distribution of averaged anomaly scores
from a given model. Given a new anomaly score `s`, estimates `P(score >= s)`.
The number `P(score >= s)` represents the likelihood of the current state of
predictability. For example, a likelihood of 0.01 or 1% means we see this much
predictability about one out of every 100 records. The number is not as unusual
as it seems. For records that arrive every minute, this means once every hour
and 40 minutes. A likelihood of 0.0001 or 0.01% means we see it once out of
10,000 records, or about once every 7 days.
USAGE
-----
There are two ways to use the code: using the AnomalyLikelihood helper class or
using the raw individual functions.
Helper Class
------------
The helper class AnomalyLikelihood is the easiest to use. To use it simply
create an instance and then feed it successive anomaly scores:
anomalyLikelihood = AnomalyLikelihood()
while still_have_data:
# Get anomaly score from model
# Compute probability that an anomaly has ocurred
anomalyProbability = anomalyLikelihood.anomalyProbability(
value, anomalyScore, timestamp)
Raw functions
-------------
There are two lower level functions, estimateAnomalyLikelihoods and
updateAnomalyLikelihoods. The details of these are described below.
"""
import collections
import math
import numpy
from nupic.utils import MovingAverage
class AnomalyLikelihood(object):
"""
Helper class for running anomaly likelihood computation.
"""
def __init__(self,
claLearningPeriod=None,
learningPeriod=288,
estimationSamples=100,
historicWindowSize=8640,
reestimationPeriod=100):
"""
NOTE: Anomaly likelihood scores are reported at a flat 0.5 for
learningPeriod + estimationSamples iterations.
claLearningPeriod and learningPeriod are specifying the same variable,
although claLearningPeriod is a deprecated name for it.
@param learningPeriod (claLeraningPeriod: deprecated) - (int) the number of
iterations required for the algorithm to learn the basic patterns in the
dataset and for the anomaly score to 'settle down'. The default is based
on empirical observations but in reality this could be larger for more
complex domains. The downside if this is too large is that real anomalies
might get ignored and not flagged.
@param estimationSamples - (int) the number of reasonable anomaly scores
required for the initial estimate of the Gaussian. The default of 100
records is reasonable - we just need sufficient samples to get a decent
estimate for the Gaussian. It's unlikely you will need to tune this since
the Gaussian is re-estimated every 10 iterations by default.
@param historicWindowSize - (int) size of sliding window of historical
data points to maintain for periodic reestimation of the Gaussian. Note:
the default of 8640 is based on a month's worth of history at 5-minute
intervals.
@param reestimationPeriod - (int) how often we re-estimate the Gaussian
distribution. The ideal is to re-estimate every iteration but this is a
performance hit. In general the system is not very sensitive to this
number as long as it is small relative to the total number of records
processed.
"""
if historicWindowSize < estimationSamples:
raise ValueError("estimationSamples exceeds historicWindowSize")
self._iteration = 0
self._historicalScores = collections.deque(maxlen=historicWindowSize)
self._distribution = None
if claLearningPeriod != None:
print "claLearningPeriod is deprecated, use learningPeriod instead."
self._learningPeriod = claLearningPeriod
else:
self._learningPeriod = learningPeriod
self._probationaryPeriod = self._learningPeriod + estimationSamples
self._reestimationPeriod = reestimationPeriod
def __eq__(self, o):
# pylint: disable=W0212
return (isinstance(o, AnomalyLikelihood) and
self._iteration == o._iteration and
self._historicalScores == o._historicalScores and
self._distribution == o._distribution and
self._probationaryPeriod == o._probationaryPeriod and
self._learningPeriod == o._learningPeriod and
self._reestimationPeriod == o._reestimationPeriod)
# pylint: enable=W0212
def __str__(self):
return ("AnomalyLikelihood: %s %s %s %s %s %s" % (
self._iteration,
self._historicalScores,
self._distribution,
self._probationaryPeriod,
self._learningPeriod,
self._reestimationPeriod) )
@staticmethod
def computeLogLikelihood(likelihood):
"""
Compute a log scale representation of the likelihood value. Since the
likelihood computations return low probabilities that often go into four 9's
or five 9's, a log value is more useful for visualization, thresholding,
etc.
"""
# The log formula is:
# Math.log(1.0000000001 - likelihood) / Math.log(1.0 - 0.9999999999)
return math.log(1.0000000001 - likelihood) / -23.02585084720009
@staticmethod
def _calcSkipRecords(numIngested, windowSize, learningPeriod):
"""Return the value of skipRecords for passing to estimateAnomalyLikelihoods
If `windowSize` is very large (bigger than the amount of data) then this
could just return `learningPeriod`. But when some values have fallen out of
the historical sliding window of anomaly records, then we have to take those
into account as well so we return the `learningPeriod` minus the number
shifted out.
@param numIngested - (int) number of data points that have been added to the
sliding window of historical data points.
@param windowSize - (int) size of sliding window of historical data points.
@param learningPeriod - (int) the number of iterations required for the
algorithm to learn the basic patterns in the dataset and for the anomaly
score to 'settle down'.
"""
numShiftedOut = max(0, numIngested - windowSize)
return min(numIngested, max(0, learningPeriod - numShiftedOut))
@classmethod
def read(cls, proto):
""" capnp deserialization method for the anomaly likelihood object
@param proto (Object) capnp proto object specified in
nupic.regions.AnomalyLikelihoodRegion.capnp
@return (Object) the deserialized AnomalyLikelihood object
"""
# pylint: disable=W0212
anomalyLikelihood = object.__new__(cls)
anomalyLikelihood._iteration = proto.iteration
anomalyLikelihood._historicalScores = collections.deque(
maxlen=proto.historicWindowSize)
for i, score in enumerate(proto.historicalScores):
anomalyLikelihood._historicalScores.append((i, score.value,
score.anomalyScore))
if proto.distribution.name: # is "" when there is no distribution.
anomalyLikelihood._distribution = {}
anomalyLikelihood._distribution["name"] = proto.distribution.name
anomalyLikelihood._distribution["mean"] = proto.distribution.mean
anomalyLikelihood._distribution["variance"] = proto.distribution.variance
anomalyLikelihood._distribution["stdev"] = proto.distribution.stdev
anomalyLikelihood._distribution["movingAverage"] = {}
anomalyLikelihood._distribution["movingAverage"]["windowSize"] =\
proto.distribution.movingAverage.windowSize
anomalyLikelihood._distribution["movingAverage"]["historicalValues"] = []
for value in proto.distribution.movingAverage.historicalValues:
anomalyLikelihood._distribution["movingAverage"]["historicalValues"]\
.append(value)
anomalyLikelihood._distribution["movingAverage"]["total"] =\
proto.distribution.movingAverage.total
anomalyLikelihood._distribution["historicalLikelihoods"] = []
for likelihood in proto.distribution.historicalLikelihoods:
anomalyLikelihood._distribution["historicalLikelihoods"].append(
likelihood)
else:
anomalyLikelihood._distribution = None
anomalyLikelihood._probationaryPeriod = proto.probationaryPeriod
anomalyLikelihood._learningPeriod = proto.learningPeriod
anomalyLikelihood._reestimationPeriod = proto.reestimationPeriod
# pylint: enable=W0212
return anomalyLikelihood
def write(self, proto):
""" capnp serialization method for the anomaly likelihood object
@param proto (Object) capnp proto object specified in
nupic.regions.AnomalyLikelihoodRegion.capnp
"""
proto.iteration = self._iteration
pHistScores = proto.init('historicalScores', len(self._historicalScores))
for i, score in enumerate(list(self._historicalScores)):
_, value, anomalyScore = score
record = pHistScores[i]
record.value = float(value)
record.anomalyScore = float(anomalyScore)
if self._distribution:
proto.distribution.name = self._distribution["distributionParams"]["name"]
proto.distribution.mean = self._distribution["distributionParams"]["mean"]
proto.distribution.variance = self._distribution["distributionParams"]\
["variance"]
proto.distribution.stdev = self._distribution["distributionParams"]\
["stdev"]
proto.distribution.movingAverage.windowSize = self._distribution\
["movingAverage"]["windowSize"]
historicalValues = self._distribution["movingAverage"]["historicalValues"]
pHistValues = proto.distribution.movingAverage.init(
"historicalValues", len(historicalValues))
for i, value in enumerate(historicalValues):
pHistValues[i] = float(value)
proto.distribution.movingAverage.historicalValues = self._distribution\
["movingAverage"]["historicalValues"]
proto.distribution.movingAverage.total = self._distribution\
["movingAverage"]["total"]
historicalLikelihoods = self._distribution["historicalLikelihoods"]
pHistLikelihoods = proto.distribution.init("historicalLikelihoods",
len(historicalLikelihoods))
for i, likelihood in enumerate(historicalLikelihoods):
pHistLikelihoods[i] = float(likelihood)
proto.probationaryPeriod = self._probationaryPeriod
proto.learningPeriod = self._learningPeriod
proto.reestimationPeriod = self._reestimationPeriod
proto.historicWindowSize = self._historicalScores.maxlen
def anomalyProbability(self, value, anomalyScore, timestamp=None):
"""
Compute the probability that the current value plus anomaly score represents
an anomaly given the historical distribution of anomaly scores. The closer
the number is to 1, the higher the chance it is an anomaly.
@param value - the current metric ("raw") input value, eg. "orange", or
'21.2' (deg. Celsius), ...
@param anomalyScore - the current anomaly score
@param timestamp - (optional) timestamp of the ocurrence,
default (None) results in using iteration step.
@return the anomalyLikelihood for this record.
"""
if timestamp is None:
timestamp = self._iteration
dataPoint = (timestamp, value, anomalyScore)
# We ignore the first probationaryPeriod data points
if self._iteration < self._probationaryPeriod:
likelihood = 0.5
else:
# On a rolling basis we re-estimate the distribution
if ( (self._distribution is None) or
(self._iteration % self._reestimationPeriod == 0) ):
numSkipRecords = self._calcSkipRecords(
numIngested=self._iteration,
windowSize=self._historicalScores.maxlen,
learningPeriod=self._learningPeriod)
_, _, self._distribution = estimateAnomalyLikelihoods(
self._historicalScores,
skipRecords=numSkipRecords)
likelihoods, _, self._distribution = updateAnomalyLikelihoods(
[dataPoint],
self._distribution)
likelihood = 1.0 - likelihoods[0]
# Before we exit update historical scores and iteration
self._historicalScores.append(dataPoint)
self._iteration += 1
return likelihood
#
# USAGE FOR LOW-LEVEL FUNCTIONS
# -----------------------------
#
# There are two primary interface routines:
#
# estimateAnomalyLikelihoods: batch routine, called initially and once in a
# while
# updateAnomalyLikelihoods: online routine, called for every new data point
#
# 1. Initially::
#
# likelihoods, avgRecordList, estimatorParams = \
# estimateAnomalyLikelihoods(metric_data)
#
# 2. Whenever you get new data::
#
# likelihoods, avgRecordList, estimatorParams = \
# updateAnomalyLikelihoods(data2, estimatorParams)
#
# 3. And again (make sure you use the new estimatorParams returned in the above
# call to updateAnomalyLikelihoods!)::
#
# likelihoods, avgRecordList, estimatorParams = \
# updateAnomalyLikelihoods(data3, estimatorParams)
#
# 4. Every once in a while update estimator with a lot of recent data::
#
# likelihoods, avgRecordList, estimatorParams = \
# estimateAnomalyLikelihoods(lots_of_metric_data)
#
#
# PARAMS
# ~~~~~~
#
# The parameters dict returned by the above functions has the following
# structure. Note: the client does not need to know the details of this.
#
# ::
#
# {
# "distribution": # describes the distribution
# {
# "name": STRING, # name of the distribution, such as 'normal'
# "mean": SCALAR, # mean of the distribution
# "variance": SCALAR, # variance of the distribution
#
# # There may also be some keys that are specific to the distribution
# },
#
# "historicalLikelihoods": [] # Contains the last windowSize likelihood
# # values returned
#
# "movingAverage": # stuff needed to compute a rolling average
# # of the anomaly scores
# {
# "windowSize": SCALAR, # the size of the averaging window
# "historicalValues": [], # list with the last windowSize anomaly
# # scores
# "total": SCALAR, # the total of the values in historicalValues
# },
#
# }
def estimateAnomalyLikelihoods(anomalyScores,
averagingWindow=10,
skipRecords=0,
verbosity=0):
"""
Given a series of anomaly scores, compute the likelihood for each score. This
function should be called once on a bunch of historical anomaly scores for an
initial estimate of the distribution. It should be called again every so often
(say every 50 records) to update the estimate.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
For best results, the list should be between 1000
and 10,000 records
:param averagingWindow: integer number of records to average over
:param skipRecords: integer specifying number of records to skip when
estimating distributions. If skip records are >=
len(anomalyScores), a very broad distribution is returned
that makes everything pretty likely.
:param verbosity: integer controlling extent of printouts for debugging
0 = none
1 = occasional information
2 = print every record
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
a small JSON dict that contains the state of the estimator
"""
if verbosity > 1:
print "In estimateAnomalyLikelihoods."
print "Number of anomaly scores:", len(anomalyScores)
print "Skip records=", skipRecords
print "First 20:", anomalyScores[0:min(20, len(anomalyScores))]
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
# Compute averaged anomaly scores
aggRecordList, historicalValues, total = _anomalyScoreMovingAverage(
anomalyScores,
windowSize = averagingWindow,
verbosity = verbosity)
s = [r[2] for r in aggRecordList]
dataValues = numpy.array(s)
# Estimate the distribution of anomaly scores based on aggregated records
if len(aggRecordList) <= skipRecords:
distributionParams = nullDistribution(verbosity = verbosity)
else:
distributionParams = estimateNormal(dataValues[skipRecords:])
# HACK ALERT! The CLA model currently does not handle constant metric values
# very well (time of day encoder changes sometimes lead to unstable SDR's
# even though the metric is constant). Until this is resolved, we explicitly
# detect and handle completely flat metric values by reporting them as not
# anomalous.
s = [r[1] for r in aggRecordList]
metricValues = numpy.array(s)
metricDistribution = estimateNormal(metricValues[skipRecords:],
performLowerBoundCheck=False)
if metricDistribution["variance"] < 1.5e-5:
distributionParams = nullDistribution(verbosity = verbosity)
# Estimate likelihoods based on this distribution
likelihoods = numpy.array(dataValues, dtype=float)
for i, s in enumerate(dataValues):
likelihoods[i] = normalProbability(s, distributionParams)
# Filter likelihood values
filteredLikelihoods = numpy.array(
_filterLikelihoods(likelihoods) )
params = {
"distribution": distributionParams,
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": averagingWindow,
},
"historicalLikelihoods":
list(likelihoods[-min(averagingWindow, len(likelihoods)):]),
}
if verbosity > 1:
print "Discovered params="
print params
print "Number of likelihoods:", len(likelihoods)
print "First 20 likelihoods:", (
filteredLikelihoods[0:min(20, len(filteredLikelihoods))] )
print "leaving estimateAnomalyLikelihoods"
return (filteredLikelihoods, aggRecordList, params)
def updateAnomalyLikelihoods(anomalyScores,
params,
verbosity=0):
"""
Compute updated probabilities for anomalyScores using the given params.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
:param params: the JSON dict returned by estimateAnomalyLikelihoods
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
an updated JSON object containing the state of this metric.
"""
if verbosity > 3:
print "In updateAnomalyLikelihoods."
print "Number of anomaly scores:", len(anomalyScores)
print "First 20:", anomalyScores[0:min(20, len(anomalyScores))]
print "Params:", params
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
if not isValidEstimatorParams(params):
raise ValueError("'params' is not a valid params structure")
# For backward compatibility.
if not params.has_key("historicalLikelihoods"):
params["historicalLikelihoods"] = [1.0]
# Compute moving averages of these new scores using the previous values
# as well as likelihood for these scores using the old estimator
historicalValues = params["movingAverage"]["historicalValues"]
total = params["movingAverage"]["total"]
windowSize = params["movingAverage"]["windowSize"]
aggRecordList = numpy.zeros(len(anomalyScores), dtype=float)
likelihoods = numpy.zeros(len(anomalyScores), dtype=float)
for i, v in enumerate(anomalyScores):
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, v[2], windowSize)
)
aggRecordList[i] = newAverage
likelihoods[i] = normalProbability(newAverage, params["distribution"])
# Filter the likelihood values. First we prepend the historical likelihoods
# to the current set. Then we filter the values. We peel off the likelihoods
# to return and the last windowSize values to store for later.
likelihoods2 = params["historicalLikelihoods"] + list(likelihoods)
filteredLikelihoods = _filterLikelihoods(likelihoods2)
likelihoods[:] = filteredLikelihoods[-len(likelihoods):]
historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):]
# Update the estimator
newParams = {
"distribution": params["distribution"],
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": windowSize,
},
"historicalLikelihoods": historicalLikelihoods,
}
assert len(newParams["historicalLikelihoods"]) <= windowSize
if verbosity > 3:
print "Number of likelihoods:", len(likelihoods)
print "First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))]
print "Leaving updateAnomalyLikelihoods."
return (likelihoods, aggRecordList, newParams)
def _filterLikelihoods(likelihoods,
redThreshold=0.99999, yellowThreshold=0.999):
"""
Filter the list of raw (pre-filtered) likelihoods so that we only preserve
sharp increases in likelihood. 'likelihoods' can be a numpy array of floats or
a list of floats.
:returns: A new list of floats likelihoods containing the filtered values.
"""
redThreshold = 1.0 - redThreshold
yellowThreshold = 1.0 - yellowThreshold
# The first value is untouched
filteredLikelihoods = [likelihoods[0]]
for i, v in enumerate(likelihoods[1:]):
if v <= redThreshold:
# Value is in the redzone
if likelihoods[i] > redThreshold:
# Previous value is not in redzone, so leave as-is
filteredLikelihoods.append(v)
else:
filteredLikelihoods.append(yellowThreshold)
else:
# Value is below the redzone, so leave as-is
filteredLikelihoods.append(v)
return filteredLikelihoods
def _anomalyScoreMovingAverage(anomalyScores,
windowSize=10,
verbosity=0,
):
"""
Given a list of anomaly scores return a list of averaged records.
anomalyScores is assumed to be a list of records of the form:
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
Each record in the returned list list contains:
[datetime, value, averagedScore]
*Note:* we only average the anomaly score.
"""
historicalValues = []
total = 0.0
averagedRecordList = [] # Aggregated records
for record in anomalyScores:
# Skip (but log) records without correct number of entries
if not isinstance(record, (list, tuple)) or len(record) != 3:
if verbosity >= 1:
print "Malformed record:", record
continue
avg, historicalValues, total = (
MovingAverage.compute(historicalValues, total, record[2], windowSize)
)
averagedRecordList.append( [record[0], record[1], avg] )
if verbosity > 2:
print "Aggregating input record:", record
print "Result:", [record[0], record[1], avg]
return averagedRecordList, historicalValues, total
def estimateNormal(sampleData, performLowerBoundCheck=True):
"""
:param sampleData:
:type sampleData: Numpy array.
:param performLowerBoundCheck:
:type performLowerBoundCheck: bool
:returns: A dict containing the parameters of a normal distribution based on
the ``sampleData``.
"""
params = {
"name": "normal",
"mean": numpy.mean(sampleData),
"variance": numpy.var(sampleData),
}
if performLowerBoundCheck:
# Handle edge case of almost no deviations and super low anomaly scores. We
# find that such low anomaly means can happen, but then the slightest blip
# of anomaly score can cause the likelihood to jump up to red.
if params["mean"] < 0.03:
params["mean"] = 0.03
# Catch all for super low variance to handle numerical precision issues
if params["variance"] < 0.0003:
params["variance"] = 0.0003
# Compute standard deviation
if params["variance"] > 0:
params["stdev"] = math.sqrt(params["variance"])
else:
params["stdev"] = 0
return params
def nullDistribution(verbosity=0):
"""
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: A distribution that is very broad and makes every anomaly score
between 0 and 1 pretty likely.
"""
if verbosity>0:
print "Returning nullDistribution"
return {
"name": "normal",
"mean": 0.5,
"variance": 1e6,
"stdev": 1e3,
}
def normalProbability(x, distributionParams):
"""
Given the normal distribution specified by the mean and standard deviation in
distributionParams, return the probability of getting samples > x.
This is the Q-function: the tail probability of the normal distribution.
:param distributionParams: dict with 'mean' and 'stdev' of the distribution
"""
if "mean" not in distributionParams or "stdev" not in distributionParams:
raise RuntimeError("Insufficient parameters to specify the distribution.")
if x < distributionParams["mean"]:
# Gaussian is symmetrical around mean, so flip to get the tail probability
xp = 2 * distributionParams["mean"] - x
return 1.0 - normalProbability(xp, distributionParams)
# Calculate the Q function with the complementary error function, explained
# here: http://www.gaussianwaves.com/2012/07/q-function-and-error-functions
z = (x - distributionParams["mean"]) / distributionParams["stdev"]
return 0.5 * math.erfc(z/1.4142)
def isValidEstimatorParams(p):
"""
:returns: ``True`` if ``p`` is a valid estimator params as might be returned
by ``estimateAnomalyLikelihoods()`` or ``updateAnomalyLikelihoods``,
``False`` otherwise. Just does some basic validation.
"""
if not isinstance(p, dict):
return False
if not p.has_key("distribution"):
return False
if not p.has_key("movingAverage"):
return False
dist = p["distribution"]
if not (dist.has_key("mean") and dist.has_key("name")
and dist.has_key("variance") and dist.has_key("stdev")):
return False
return True
| 1 | 21,700 | Should we rename this to tailProbability? | numenta-nupic | py |
@@ -431,6 +431,9 @@ drutil_insert_get_mem_addr_arm(void *drcontext, instrlist_t *bb, instr_t *where,
index = replace_stolen_reg(drcontext, bb, where, memref, dst, scratch,
scratch_used);
}
+ if (opnd_get_base_aligned(memref)) {
+ // TODO
+ }
if (index == REG_NULL && opnd_get_disp(memref) != 0) {
/* first try "add dst, base, #disp" */
instr = negated | 1 | /* **********************************************************
* Copyright (c) 2011-2021 Google, Inc. All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/* drutil: DynamoRIO Instrumentation Utilities
* Derived from Dr. Memory: the memory debugger
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License, and no later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* DynamoRIO Instrumentation Utilities Extension */
#include "dr_api.h"
#include "drmgr.h"
#include "../ext_utils.h"
/* currently using asserts on internal logic sanity checks (never on
* input from user)
*/
#ifdef DEBUG
# define ASSERT(x, msg) DR_ASSERT_MSG(x, msg)
#else
# define ASSERT(x, msg) /* nothing */
#endif
/* There are cases where notifying the user is the right thing, even for a library.
* Xref i#1055 where w/o visible notification the user might not know what's
* going on.
*/
#ifdef WINDOWS
# define USAGE_ERROR(msg) \
do { \
dr_messagebox("FATAL USAGE ERROR: %s", msg); \
dr_abort(); \
} while (0);
#else
# define USAGE_ERROR(msg) \
do { \
dr_fprintf(STDERR, "FATAL USAGE ERROR: %s\n", msg); \
dr_abort(); \
} while (0);
#endif
#define PRE instrlist_meta_preinsert
/* for inserting an app instruction, which must have a translation ("xl8") field */
#define PREXL8 instrlist_preinsert
#ifdef X86
static uint drutil_xsave_area_size;
#endif
/***************************************************************************
* INIT
*/
static int drutil_init_count;
#ifdef X86
static inline void
native_unix_cpuid(uint *eax, uint *ebx, uint *ecx, uint *edx)
{
# ifdef UNIX
/* We need to do this xbx trick, because xbx might be used for fPIC,
* and gcc < 5 chokes on it. This can get removed and replaced by
* a "=b" constraint when moving to gcc-5.
*/
# ifdef X64
/* In 64-bit, we are getting a 64-bit pointer (xref i#3478). */
asm volatile("xchgq\t%%rbx, %q1\n\t"
"cpuid\n\t"
"xchgq\t%%rbx, %q1\n\t"
: "=a"(*eax), "=&r"(*ebx), "=c"(*ecx), "=d"(*edx)
: "0"(*eax), "2"(*ecx));
# else
asm volatile("xchgl\t%%ebx, %k1\n\t"
"cpuid\n\t"
"xchgl\t%%ebx, %k1\n\t"
: "=a"(*eax), "=&r"(*ebx), "=c"(*ecx), "=d"(*edx)
: "0"(*eax), "2"(*ecx));
# endif
# endif
}
static inline void
cpuid(uint op, uint subop, uint *eax, uint *ebx, uint *ecx, uint *edx)
{
# ifdef WINDOWS
int output[4];
__cpuidex(output, op, subop);
/* XXX i#3469: On a Windows laptop, I inspected this and it returned 1088
* bytes, which is a rather unexpected number. Investigate whether this is
* correct.
*/
*eax = output[0];
*ebx = output[1];
*ecx = output[2];
*edx = output[3];
# else
*eax = op;
*ecx = subop;
native_unix_cpuid(eax, ebx, ecx, edx);
# endif
}
#endif
DR_EXPORT
bool
drutil_init(void)
{
/* handle multiple sets of init/exit calls */
int count = dr_atomic_add32_return_sum(&drutil_init_count, 1);
if (count > 1)
return true;
#ifdef X86
/* XXX: we may want to re-factor and move functions like this into drx and/or
* using pre-existing versions in clients/drcpusim/tests/cpuid.c.
*/
uint eax, ecx, edx;
const int proc_ext_state_main_leaf = 0xd;
cpuid(proc_ext_state_main_leaf, 0, &eax, &drutil_xsave_area_size, &ecx, &edx);
#endif
/* nothing yet: but putting in API up front in case need later */
return true;
}
DR_EXPORT
void
drutil_exit(void)
{
/* handle multiple sets of init/exit calls */
int count = dr_atomic_add32_return_sum(&drutil_init_count, -1);
if (count != 0)
return;
/* nothing yet: but putting in API up front in case need later */
}
/***************************************************************************
* MEMORY TRACING
*/
#ifdef X86
static bool
drutil_insert_get_mem_addr_x86(void *drcontext, instrlist_t *bb, instr_t *where,
opnd_t memref, reg_id_t dst, reg_id_t scratch,
OUT bool *scratch_used);
#elif defined(AARCHXX)
static bool
drutil_insert_get_mem_addr_arm(void *drcontext, instrlist_t *bb, instr_t *where,
opnd_t memref, reg_id_t dst, reg_id_t scratch,
OUT bool *scratch_used);
#endif /* X86/ARM */
/* Could be optimized to have scratch==dst for many common cases, but
* need way to get a 2nd reg for corner cases: simpler to ask caller
* to give us scratch reg distinct from dst
* XXX: however, this means that a client must spill the scratch reg
* every time, even though it's only used for far or xlat memref.
*
* XXX: provide a version that calls clean call? would have to hardcode
* what gets included: memory size? perhaps should try to create a
* vararg clean call arg feature to chain things together.
*/
DR_EXPORT
bool
drutil_insert_get_mem_addr_ex(void *drcontext, instrlist_t *bb, instr_t *where,
opnd_t memref, reg_id_t dst, reg_id_t scratch,
OUT bool *scratch_used)
{
if (scratch_used != NULL)
*scratch_used = false;
#if defined(X86)
return drutil_insert_get_mem_addr_x86(drcontext, bb, where, memref, dst, scratch,
scratch_used);
#elif defined(AARCHXX)
return drutil_insert_get_mem_addr_arm(drcontext, bb, where, memref, dst, scratch,
scratch_used);
#endif
}
DR_EXPORT
bool
drutil_insert_get_mem_addr(void *drcontext, instrlist_t *bb, instr_t *where,
opnd_t memref, reg_id_t dst, reg_id_t scratch)
{
#if defined(X86)
return drutil_insert_get_mem_addr_x86(drcontext, bb, where, memref, dst, scratch,
NULL);
#elif defined(AARCHXX)
return drutil_insert_get_mem_addr_arm(drcontext, bb, where, memref, dst, scratch,
NULL);
#endif
}
#ifdef X86
static bool
drutil_insert_get_mem_addr_x86(void *drcontext, instrlist_t *bb, instr_t *where,
opnd_t memref, reg_id_t dst, reg_id_t scratch,
OUT bool *scratch_used)
{
if (opnd_is_far_base_disp(memref) &&
/* We assume that far memory references via %ds and %es are flat,
* i.e. the segment base is 0, so we only handle %fs and %gs here.
* The assumption is consistent with dr_insert_get_seg_base,
* which does say for windows it only supports TLS segment,
* and inserts "mov 0 => reg" for %ds and %es instead.
*/
opnd_get_segment(memref) != DR_SEG_ES && opnd_get_segment(memref) != DR_SEG_DS &&
/* cs: is sometimes seen, as here on win10:
* RPCRT4!Invoke+0x28:
* 76d85ea0 2eff1548d5de76 call dword ptr cs:[RPCRT4!
* __guard_check_icall_fptr (76ded548)]
* We assume it's flat.
*/
opnd_get_segment(memref) != DR_SEG_CS) {
instr_t *near_in_scratch = NULL;
reg_id_t reg_segbase = dst;
/* If we need two steps, we get the near first as it may depend on dst. */
if (opnd_uses_reg(memref, dst) ||
(opnd_get_base(memref) != DR_REG_NULL &&
opnd_get_index(memref) != DR_REG_NULL)) {
/* We need a scratch reg. We document these conditions so it's user error
* if one wasn't provided.
*/
if (scratch == DR_REG_NULL)
return false;
if ((opnd_get_base(memref) == DR_REG_NULL ||
opnd_get_index(memref) == DR_REG_NULL) &&
!opnd_uses_reg(memref, scratch)) {
/* We can do it one step if we swap regs. */
reg_id_t temp = reg_segbase;
reg_segbase = scratch;
scratch = temp;
} else {
/* We have to take two steps. */
opnd_set_size(&memref, OPSZ_lea);
if (scratch_used != NULL)
*scratch_used = true;
near_in_scratch =
INSTR_CREATE_lea(drcontext, opnd_create_reg(scratch), memref);
PRE(bb, where, near_in_scratch);
}
}
/* Now get segment base into dst, then add to near address. */
if (!dr_insert_get_seg_base(drcontext, bb, where, opnd_get_segment(memref),
reg_segbase))
return false;
if (near_in_scratch != NULL) {
PRE(bb, where,
INSTR_CREATE_lea(
drcontext, opnd_create_reg(dst),
opnd_create_base_disp(reg_segbase, scratch, 1, 0, OPSZ_lea)));
} else {
reg_id_t base = opnd_get_base(memref);
reg_id_t index = opnd_get_index(memref);
int scale = opnd_get_scale(memref);
int disp = opnd_get_disp(memref);
if (opnd_get_base(memref) == DR_REG_NULL) {
base = reg_segbase;
} else if (opnd_get_index(memref) == DR_REG_NULL) {
index = reg_segbase;
scale = 1;
} else {
ASSERT(false, "memaddr internal error");
}
PRE(bb, where,
INSTR_CREATE_lea(
drcontext, opnd_create_reg(dst),
opnd_create_base_disp(base, index, scale, disp, OPSZ_lea)));
}
} else if (opnd_is_base_disp(memref)) {
/* special handling for xlat instr, [%ebx,%al]
* - save %eax
* - movzx %al => %eax
* - lea [%ebx, %eax] => dst
* - restore %eax
*/
bool is_xlat = false;
if (opnd_get_index(memref) == DR_REG_AL) {
is_xlat = true;
if (scratch == DR_REG_NULL)
return false;
if (scratch != DR_REG_XAX && dst != DR_REG_XAX) {
/* we do not have to save xax if it is saved by caller */
if (scratch_used != NULL)
*scratch_used = true;
PRE(bb, where,
INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(scratch),
opnd_create_reg(DR_REG_XAX)));
}
PRE(bb, where,
INSTR_CREATE_movzx(drcontext, opnd_create_reg(DR_REG_XAX),
opnd_create_reg(DR_REG_AL)));
memref = opnd_create_base_disp(DR_REG_XBX, DR_REG_XAX, 1, 0, OPSZ_lea);
}
/* lea [ref] => reg */
opnd_set_size(&memref, OPSZ_lea);
PRE(bb, where, INSTR_CREATE_lea(drcontext, opnd_create_reg(dst), memref));
if (is_xlat && scratch != DR_REG_XAX && dst != DR_REG_XAX) {
PRE(bb, where,
INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(DR_REG_XAX),
opnd_create_reg(scratch)));
}
} else if (IF_X64(opnd_is_rel_addr(memref) ||) opnd_is_abs_addr(memref)) {
/* mov addr => reg */
PRE(bb, where,
INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(dst),
OPND_CREATE_INTPTR(opnd_get_addr(memref))));
} else {
/* unhandled memory reference */
return false;
}
return true;
}
#elif defined(AARCHXX)
# ifdef ARM
static bool
instr_has_opnd(instr_t *instr, opnd_t opnd)
{
int i;
if (instr == NULL)
return false;
for (i = 0; i < instr_num_srcs(instr); i++) {
if (opnd_same(opnd, instr_get_src(instr, i)))
return true;
}
for (i = 0; i < instr_num_dsts(instr); i++) {
if (opnd_same(opnd, instr_get_dst(instr, i)))
return true;
}
return false;
}
static instr_t *
instrlist_find_app_instr(instrlist_t *ilist, instr_t *where, opnd_t opnd)
{
instr_t *app;
/* looking for app instr at/after where */
for (app = instr_is_app(where) ? where : instr_get_next_app(where); app != NULL;
app = instr_get_next_app(app)) {
if (instr_has_opnd(app, opnd))
return app;
}
/* looking for app instr before where */
for (app = instr_get_prev_app(where); app != NULL; app = instr_get_prev_app(app)) {
if (instr_has_opnd(app, opnd))
return app;
}
return NULL;
}
# endif /* ARM */
static reg_id_t
replace_stolen_reg(void *drcontext, instrlist_t *bb, instr_t *where, opnd_t memref,
reg_id_t dst, reg_id_t scratch, OUT bool *scratch_used)
{
reg_id_t reg;
reg = opnd_uses_reg(memref, dst) ? scratch : dst;
if (scratch_used != NULL && reg == scratch)
*scratch_used = true;
DR_ASSERT(!opnd_uses_reg(memref, reg));
dr_insert_get_stolen_reg_value(drcontext, bb, where, reg);
return reg;
}
static bool
drutil_insert_get_mem_addr_arm(void *drcontext, instrlist_t *bb, instr_t *where,
opnd_t memref, reg_id_t dst, reg_id_t scratch,
OUT bool *scratch_used)
{
if (!opnd_is_base_disp(memref) IF_AARCH64(&&!opnd_is_rel_addr(memref)))
return false;
# ifdef ARM
if (opnd_get_base(memref) == DR_REG_PC) {
app_pc target;
/* We need the app instr for getting the rel_addr_target.
* XXX: add drutil_insert_get_mem_addr_ex to let client provide app instr.
*/
instr_t *app = instrlist_find_app_instr(bb, where, memref);
if (app == NULL)
return false;
if (!instr_get_rel_addr_target(app, &target))
return false;
instrlist_insert_mov_immed_ptrsz(drcontext, (ptr_int_t)target,
opnd_create_reg(dst), bb, where, NULL, NULL);
}
# else /* AARCH64 */
if (opnd_is_rel_addr(memref)) {
instrlist_insert_mov_immed_ptrsz(drcontext, (ptr_int_t)opnd_get_addr(memref),
opnd_create_reg(dst), bb, where, NULL, NULL);
return true;
}
# endif /* ARM/AARCH64 */
else {
instr_t *instr;
reg_id_t base = opnd_get_base(memref);
reg_id_t index = opnd_get_index(memref);
bool negated = TEST(DR_OPND_NEGATED, opnd_get_flags(memref));
int disp = opnd_get_disp(memref);
reg_id_t stolen = dr_get_stolen_reg();
/* On ARM, disp is never negative; on AArch64, we do not use DR_OPND_NEGATED. */
ASSERT(IF_ARM_ELSE(disp >= 0, !negated), "DR_OPND_NEGATED internal error");
if (disp < 0) {
disp = -disp;
negated = !negated;
}
if (dst == stolen || scratch == stolen)
return false;
if (base == stolen) {
base = replace_stolen_reg(drcontext, bb, where, memref, dst, scratch,
scratch_used);
} else if (index == stolen) {
index = replace_stolen_reg(drcontext, bb, where, memref, dst, scratch,
scratch_used);
}
if (index == REG_NULL && opnd_get_disp(memref) != 0) {
/* first try "add dst, base, #disp" */
instr = negated
? INSTR_CREATE_sub(drcontext, opnd_create_reg(dst), opnd_create_reg(base),
OPND_CREATE_INT(disp))
: XINST_CREATE_add_2src(drcontext, opnd_create_reg(dst),
opnd_create_reg(base), OPND_CREATE_INT(disp));
# define MAX_ADD_IMM_DISP (1 << 12)
if (IF_ARM_ELSE(instr_is_encoding_possible(instr), disp < MAX_ADD_IMM_DISP)) {
PRE(bb, where, instr);
return true;
}
instr_destroy(drcontext, instr);
/* The memref may have a disp that cannot be directly encoded into an
* add_imm instr, so we use movw to put disp into the scratch instead
* and fake it as an index reg to insert an add instr later.
*/
/* if dst is used in memref, we use scratch instead */
index = (base == dst) ? scratch : dst;
if (scratch_used != NULL && index == scratch)
*scratch_used = true;
PRE(bb, where,
XINST_CREATE_load_int(drcontext, opnd_create_reg(index),
OPND_CREATE_INT(disp)));
/* "add" instr is inserted below with a fake index reg added here */
}
if (index != REG_NULL) {
# ifdef ARM
uint amount;
dr_shift_type_t shift = opnd_get_index_shift(memref, &amount);
instr = negated
? INSTR_CREATE_sub_shimm(drcontext, opnd_create_reg(dst),
opnd_create_reg(base), opnd_create_reg(index),
OPND_CREATE_INT(shift), OPND_CREATE_INT(amount))
: INSTR_CREATE_add_shimm(drcontext, opnd_create_reg(dst),
opnd_create_reg(base), opnd_create_reg(index),
OPND_CREATE_INT(shift), OPND_CREATE_INT(amount));
# else /* AARCH64 */
uint amount;
dr_extend_type_t extend = opnd_get_index_extend(memref, NULL, &amount);
instr = negated
? INSTR_CREATE_sub_extend(drcontext, opnd_create_reg(dst),
opnd_create_reg(base), opnd_create_reg(index),
OPND_CREATE_INT(extend),
OPND_CREATE_INT(amount))
: INSTR_CREATE_add_extend(drcontext, opnd_create_reg(dst),
opnd_create_reg(base), opnd_create_reg(index),
OPND_CREATE_INT(extend),
OPND_CREATE_INT(amount));
# endif /* ARM/AARCH64 */
PRE(bb, where, instr);
} else if (base != dst) {
PRE(bb, where,
XINST_CREATE_move(drcontext, opnd_create_reg(dst),
opnd_create_reg(base)));
}
}
return true;
}
#endif /* X86/AARCHXX */
DR_EXPORT
uint
drutil_opnd_mem_size_in_bytes(opnd_t memref, instr_t *inst)
{
#ifdef X86
if (inst != NULL && instr_get_opcode(inst) == OP_enter) {
uint extra_pushes = (uint)opnd_get_immed_int(instr_get_src(inst, 1));
uint sz = opnd_size_in_bytes(opnd_get_size(instr_get_dst(inst, 1)));
ASSERT(opnd_is_immed_int(instr_get_src(inst, 1)), "malformed OP_enter");
return sz * extra_pushes;
} else if (inst != NULL && instr_is_xsave(inst)) {
/* See the doxygen docs. */
switch (instr_get_opcode(inst)) {
case OP_xsave32:
case OP_xsave64:
case OP_xsaveopt32:
case OP_xsaveopt64:
case OP_xsavec32:
case OP_xsavec64: return drutil_xsave_area_size; break;
default: ASSERT(false, "unknown xsave opcode"); return 0;
}
} else
#endif /* X86 */
return opnd_size_in_bytes(opnd_get_size(memref));
}
#ifdef X86
static bool
opc_is_stringop_loop(uint opc)
{
return (opc == OP_rep_ins || opc == OP_rep_outs || opc == OP_rep_movs ||
opc == OP_rep_stos || opc == OP_rep_lods || opc == OP_rep_cmps ||
opc == OP_repne_cmps || opc == OP_rep_scas || opc == OP_repne_scas);
}
static instr_t *
create_nonloop_stringop(void *drcontext, instr_t *inst)
{
instr_t *res;
int nsrc = instr_num_srcs(inst);
int ndst = instr_num_dsts(inst);
uint opc = instr_get_opcode(inst);
int i;
ASSERT(opc_is_stringop_loop(opc), "invalid param");
switch (opc) {
case OP_rep_ins:
opc = OP_ins;
break;
;
case OP_rep_outs:
opc = OP_outs;
break;
;
case OP_rep_movs:
opc = OP_movs;
break;
;
case OP_rep_stos:
opc = OP_stos;
break;
;
case OP_rep_lods:
opc = OP_lods;
break;
;
case OP_rep_cmps:
opc = OP_cmps;
break;
;
case OP_repne_cmps:
opc = OP_cmps;
break;
;
case OP_rep_scas:
opc = OP_scas;
break;
;
case OP_repne_scas:
opc = OP_scas;
break;
;
default: ASSERT(false, "not a stringop loop opcode"); return NULL;
}
res = instr_build(drcontext, opc, ndst - 1, nsrc - 1);
/* We assume xcx is last src and last dst */
ASSERT(opnd_is_reg(instr_get_src(inst, nsrc - 1)) &&
opnd_uses_reg(instr_get_src(inst, nsrc - 1), DR_REG_XCX),
"rep opnd order assumption violated");
ASSERT(opnd_is_reg(instr_get_dst(inst, ndst - 1)) &&
opnd_uses_reg(instr_get_dst(inst, ndst - 1), DR_REG_XCX),
"rep opnd order assumption violated");
for (i = 0; i < nsrc - 1; i++)
instr_set_src(res, i, instr_get_src(inst, i));
for (i = 0; i < ndst - 1; i++)
instr_set_dst(res, i, instr_get_dst(inst, i));
instr_set_translation(res, instr_get_app_pc(inst));
return res;
}
#endif /* X86 */
DR_EXPORT
bool
drutil_instr_is_stringop_loop(instr_t *inst)
{
#ifdef X86
return opc_is_stringop_loop(instr_get_opcode(inst));
#else
return false;
#endif
}
DR_EXPORT
bool
drutil_expand_rep_string_ex(void *drcontext, instrlist_t *bb, bool *expanded OUT,
instr_t **stringop OUT)
{
#ifdef X86
instr_t *inst, *next_inst, *first_app = NULL;
bool delete_rest = false;
uint opc;
#endif
if (drmgr_current_bb_phase(drcontext) != DRMGR_PHASE_APP2APP) {
USAGE_ERROR("drutil_expand_rep_string* must be called from "
"drmgr's app2app phase");
return false;
}
#ifdef X86
/* Make a rep string instr be its own bb: the loop is going to
* duplicate the tail anyway, and have to terminate at the added cbr.
*/
for (inst = instrlist_first(bb); inst != NULL; inst = next_inst) {
next_inst = instr_get_next(inst);
if (delete_rest) {
instrlist_remove(bb, inst);
instr_destroy(drcontext, inst);
} else if (instr_is_app(inst)) {
/* We have to handle meta instrs, as drwrap_replace_native() and
* some other app2app xforms use them.
*/
if (first_app == NULL)
first_app = inst;
opc = instr_get_opcode(inst);
if (opc_is_stringop_loop(opc)) {
delete_rest = true;
if (inst != first_app) {
instrlist_remove(bb, inst);
instr_destroy(drcontext, inst);
}
}
}
}
/* Convert to a regular loop if it's the sole instr */
inst = first_app;
opc = (inst == NULL) ? OP_INVALID : instr_get_opcode(inst);
if (opc_is_stringop_loop(opc)) {
/* A rep string instr does check for 0 up front. DR limits us
* to 1 cbr but drmgr will mark the extras as meta later. If ecx is uninit
* the loop* will catch it so we're ok not instrumenting this.
* I would just jecxz to loop, but w/ instru it can't reach so
* I have to add yet more internal jmps that will execute each
* iter. We use drmgr's feature of allowing extra non-meta instrs.
* Our "mov $1,ecx" will remain non-meta.
* Note that we do not want any of the others to have xl8 as its
* translation as that could trigger duplicate clean calls from
* other passes looking for post-call or other addresses so we use
* xl8+1 which will always be mid-instr. NULL is another possibility,
* but it results in meta-may-fault instrs that need a translation
* and naturally want to use the app instr's translation.
*
* So we have:
* rep movs
* =>
* jecxz zero
* jmp iter
* zero:
* mov $0x00000001 -> %ecx
* jmp pre_loop
* iter:
* movs %ds:(%esi) %esi %edi -> %es:(%edi) %esi %edi
* pre_loop:
* loop
*
* XXX: this non-linear code can complicate subsequent
* analysis routines. Perhaps we should consider splitting
* into multiple bbs?
*
* XXX i#1460: the jecxz is marked meta by drmgr (via i#676) and is
* thus not mangled by DR, resulting in just an 8-bit reach.
*/
app_pc xl8 = instr_get_app_pc(inst);
app_pc fake_xl8 = xl8 + 1;
opnd_t xcx = instr_get_dst(inst, instr_num_dsts(inst) - 1);
instr_t *loop, *pre_loop, *jecxz, *zero, *iter, *string;
ASSERT(opnd_uses_reg(xcx, DR_REG_XCX), "rep string opnd order mismatch");
ASSERT(inst == instrlist_last(bb), "repstr not alone in bb");
emulated_instr_t emulated_instr;
emulated_instr.size = sizeof(emulated_instr);
emulated_instr.pc = xl8;
emulated_instr.instr = inst;
/* We can't place an end label after our conditional branch as DR won't
* allow anything past the branch (we explored relaxing that and ran into
* many complexities that were not worth further work), so we instead
* use the flag to mark the whole block as emulated.
*/
emulated_instr.flags = DR_EMULATE_REST_OF_BLOCK |
/* This is a different type of emulation where we want
* observational clients to look at the original instruction for instruction
* fetch info but the emulation sequence for data load/store info. We use
* this flag in emulated_instr_t to indicate this.
*/
DR_EMULATE_INSTR_ONLY;
drmgr_insert_emulation_start(drcontext, bb, inst, &emulated_instr);
pre_loop = INSTR_CREATE_label(drcontext);
/* hack to handle loop decrementing xcx: simpler if could have 2 cbrs! */
if (opnd_get_size(xcx) == OPSZ_8) {
/* rely on setting upper 32 bits to zero */
zero = INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(DR_REG_ECX),
OPND_CREATE_INT32(1));
} else {
zero = INSTR_CREATE_mov_imm(drcontext, xcx,
opnd_create_immed_int(1, opnd_get_size(xcx)));
}
iter = INSTR_CREATE_label(drcontext);
jecxz = INSTR_CREATE_jecxz(drcontext, opnd_create_instr(zero));
/* be sure to match the same counter reg width */
instr_set_src(jecxz, 1, xcx);
PREXL8(bb, inst, INSTR_XL8(jecxz, fake_xl8));
PREXL8(bb, inst,
INSTR_XL8(INSTR_CREATE_jmp_short(drcontext, opnd_create_instr(iter)),
fake_xl8));
PREXL8(bb, inst, INSTR_XL8(zero, fake_xl8));
/* target the instrumentation for the loop, not loop itself */
PREXL8(bb, inst,
INSTR_XL8(INSTR_CREATE_jmp(drcontext, opnd_create_instr(pre_loop)),
fake_xl8));
PRE(bb, inst, iter);
string = INSTR_XL8(create_nonloop_stringop(drcontext, inst), xl8);
if (stringop != NULL)
*stringop = string;
PREXL8(bb, inst, string);
PRE(bb, inst, pre_loop);
if (opc == OP_rep_cmps || opc == OP_rep_scas) {
loop = INSTR_CREATE_loope(drcontext, opnd_create_pc(xl8));
} else if (opc == OP_repne_cmps || opc == OP_repne_scas) {
loop = INSTR_CREATE_loopne(drcontext, opnd_create_pc(xl8));
} else {
loop = INSTR_CREATE_loop(drcontext, opnd_create_pc(xl8));
}
/* be sure to match the same counter reg width */
instr_set_src(loop, 1, xcx);
instr_set_dst(loop, 0, xcx);
PREXL8(bb, inst, INSTR_XL8(loop, fake_xl8));
/* Now throw out the original instr. It is part of the emulation label
* and will be freed along with the instrlist so we just remove it from
* the list and do not free it ourselves.
*/
instrlist_remove(bb, inst);
if (expanded != NULL)
*expanded = true;
return true;
}
#endif
if (expanded != NULL)
*expanded = false;
if (stringop != NULL)
*stringop = NULL;
return true;
}
DR_EXPORT
bool
drutil_expand_rep_string(void *drcontext, instrlist_t *bb)
{
return drutil_expand_rep_string_ex(drcontext, bb, NULL, NULL);
}
| 1 | 25,840 | Add the issue number too i#4400 | DynamoRIO-dynamorio | c |
@@ -373,7 +373,7 @@ abstract class BaseFile<F>
if (list != null) {
List<E> copy = Lists.newArrayListWithExpectedSize(list.size());
copy.addAll(list);
- return Collections.unmodifiableList(copy);
+ return copy;
}
return null;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.ByteBuffers;
/**
* Base class for both {@link DataFile} and {@link DeleteFile}.
*/
abstract class BaseFile<F>
implements ContentFile<F>, IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable {
static final Types.StructType EMPTY_STRUCT_TYPE = Types.StructType.of();
static final PartitionData EMPTY_PARTITION_DATA = new PartitionData(EMPTY_STRUCT_TYPE) {
@Override
public PartitionData copy() {
return this; // this does not change
}
};
private int[] fromProjectionPos;
private Types.StructType partitionType;
private FileContent content = FileContent.DATA;
private String filePath = null;
private FileFormat format = null;
private PartitionData partitionData = null;
private Long recordCount = null;
private long fileSizeInBytes = -1L;
// optional fields
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
private List<Long> splitOffsets = null;
private byte[] keyMetadata = null;
// cached schema
private transient Schema avroSchema = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
BaseFile(Schema avroSchema) {
this.avroSchema = avroSchema;
Types.StructType schema = AvroSchemaUtil.convert(avroSchema).asNestedType().asStructType();
// partition type may be null if the field was not projected
Type partType = schema.fieldType("partition");
if (partType != null) {
this.partitionType = partType.asNestedType().asStructType();
} else {
this.partitionType = EMPTY_STRUCT_TYPE;
}
List<Types.NestedField> fields = schema.fields();
List<Types.NestedField> allFields = DataFile.getType(partitionType).fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
this.partitionData = new PartitionData(partitionType);
}
BaseFile(FileContent content, String filePath, FileFormat format,
PartitionData partition, long fileSizeInBytes, long recordCount,
Map<Integer, Long> columnSizes, Map<Integer, Long> valueCounts, Map<Integer, Long> nullValueCounts,
Map<Integer, ByteBuffer> lowerBounds, Map<Integer, ByteBuffer> upperBounds, List<Long> splitOffsets,
ByteBuffer keyMetadata) {
this.content = content;
this.filePath = filePath;
this.format = format;
// this constructor is used by DataFiles.Builder, which passes null for unpartitioned data
if (partition == null) {
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
} else {
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
}
// this will throw NPE if metrics.recordCount is null
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.columnSizes = columnSizes;
this.valueCounts = valueCounts;
this.nullValueCounts = nullValueCounts;
this.lowerBounds = SerializableByteBufferMap.wrap(lowerBounds);
this.upperBounds = SerializableByteBufferMap.wrap(upperBounds);
this.splitOffsets = copy(splitOffsets);
this.keyMetadata = ByteBuffers.toByteArray(keyMetadata);
}
/**
* Copy constructor.
*
* @param toCopy a generic data file to copy.
* @param fullCopy whether to copy all fields or to drop column-level stats
*/
BaseFile(BaseFile<F> toCopy, boolean fullCopy) {
this.content = toCopy.content;
this.filePath = toCopy.filePath;
this.format = toCopy.format;
this.partitionData = toCopy.partitionData.copy();
this.partitionType = toCopy.partitionType;
this.recordCount = toCopy.recordCount;
this.fileSizeInBytes = toCopy.fileSizeInBytes;
if (fullCopy) {
// TODO: support lazy conversion to/from map
this.columnSizes = copy(toCopy.columnSizes);
this.valueCounts = copy(toCopy.valueCounts);
this.nullValueCounts = copy(toCopy.nullValueCounts);
this.lowerBounds = SerializableByteBufferMap.wrap(copy(toCopy.lowerBounds));
this.upperBounds = SerializableByteBufferMap.wrap(copy(toCopy.upperBounds));
} else {
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
}
this.fromProjectionPos = toCopy.fromProjectionPos;
this.keyMetadata = toCopy.keyMetadata == null ? null : Arrays.copyOf(toCopy.keyMetadata, toCopy.keyMetadata.length);
this.splitOffsets = copy(toCopy.splitOffsets);
}
/**
* Constructor for Java serialization.
*/
BaseFile() {
}
protected abstract Schema getAvroSchema(Types.StructType partitionStruct);
@Override
public Schema getSchema() {
if (avroSchema == null) {
this.avroSchema = getAvroSchema(partitionType);
}
return avroSchema;
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object value) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
this.content = value != null ? FileContent.values()[(Integer) value] : FileContent.DATA;
return;
case 1:
// always coerce to String for Serializable
this.filePath = value.toString();
return;
case 2:
this.format = FileFormat.valueOf(value.toString());
return;
case 3:
this.partitionData = (PartitionData) value;
return;
case 4:
this.recordCount = (Long) value;
return;
case 5:
this.fileSizeInBytes = (Long) value;
return;
case 6:
this.columnSizes = (Map<Integer, Long>) value;
return;
case 7:
this.valueCounts = (Map<Integer, Long>) value;
return;
case 8:
this.nullValueCounts = (Map<Integer, Long>) value;
return;
case 9:
this.lowerBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) value);
return;
case 10:
this.upperBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) value);
return;
case 11:
this.keyMetadata = ByteBuffers.toByteArray((ByteBuffer) value);
return;
case 12:
this.splitOffsets = (List<Long>) value;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public <T> void set(int pos, T value) {
put(pos, value);
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return content.id();
case 1:
return filePath;
case 2:
return format != null ? format.toString() : null;
case 3:
return partitionData;
case 4:
return recordCount;
case 5:
return fileSizeInBytes;
case 6:
return columnSizes;
case 7:
return valueCounts;
case 8:
return nullValueCounts;
case 9:
return lowerBounds;
case 10:
return upperBounds;
case 11:
return keyMetadata != null ? ByteBuffer.wrap(keyMetadata) : null;
case 12:
return splitOffsets;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public int size() {
return DataFile.getType(EMPTY_STRUCT_TYPE).fields().size();
}
@Override
public FileContent content() {
return content;
}
@Override
public CharSequence path() {
return filePath;
}
@Override
public FileFormat format() {
return format;
}
@Override
public StructLike partition() {
return partitionData;
}
@Override
public long recordCount() {
return recordCount;
}
@Override
public long fileSizeInBytes() {
return fileSizeInBytes;
}
@Override
public Map<Integer, Long> columnSizes() {
return columnSizes;
}
@Override
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
@Override
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
@Override
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
@Override
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
@Override
public ByteBuffer keyMetadata() {
return keyMetadata != null ? ByteBuffer.wrap(keyMetadata) : null;
}
@Override
public List<Long> splitOffsets() {
return splitOffsets;
}
private static <K, V> Map<K, V> copy(Map<K, V> map) {
if (map != null) {
Map<K, V> copy = Maps.newHashMapWithExpectedSize(map.size());
copy.putAll(map);
return Collections.unmodifiableMap(copy);
}
return null;
}
private static <E> List<E> copy(List<E> list) {
if (list != null) {
List<E> copy = Lists.newArrayListWithExpectedSize(list.size());
copy.addAll(list);
return Collections.unmodifiableList(copy);
}
return null;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("content", content.toString().toLowerCase(Locale.ROOT))
.add("file_path", filePath)
.add("file_format", format)
.add("partition", partitionData)
.add("record_count", recordCount)
.add("file_size_in_bytes", fileSizeInBytes)
.add("column_sizes", columnSizes)
.add("value_counts", valueCounts)
.add("null_value_counts", nullValueCounts)
.add("lower_bounds", lowerBounds)
.add("upper_bounds", upperBounds)
.add("key_metadata", keyMetadata == null ? "null" : "(redacted)")
.add("split_offsets", splitOffsets == null ? "null" : splitOffsets)
.toString();
}
}
| 1 | 20,982 | Why make this modifiable? | apache-iceberg | java |
@@ -8,13 +8,11 @@ package blockchain
const (
// Erc721Binary a simple erc721 token bin
- Erc721Binary="60806040523480156200001157600080fd5b5060408051808201825260068082527f4e6674696573000000000000000000000000000000000000000000000000000060208084019190915283518085019094529083527f4e465449455300000000000000000000000000000000000000000000000000009083015290620000af7f01ffc9a700000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620000e37f80ac58cd00000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620001177f4f558e7900000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b81516200012c90600590602085019062000220565b5080516200014290600690602084019062000220565b50620001777f780e9d6300000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620001ab7f5b5e139f00000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b5050620002c5565b7fffffffff000000000000000000000000000000000000000000000000000000008082161415620001e357600080fd5b7fffffffff00000000000000000000000000000000000000000000000000000000166000908152602081905260409020805460ff19166001179055565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200026357805160ff191683800117855562000293565b8280016001018555821562000293579182015b828111156200029357825182559160200191906001019062000276565b50620002a1929150620002a5565b5090565b620002c291905b80821115620002a15760008155600101620002ac565b90565b611b2480620002d56000396000f3006080604052600436106101275763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166301ffc9a7811461012c57806306fdde0314610162578063081812fc146101ec578063095ea7b31461022057806318160ddd1461024657806319fa8f501461026d57806323b872dd1461029f5780632f745c59146102c957806342842e0e146102ed5780634f558e79146103175780634f6ccce71461032f5780636352211e1461034757806370a082311461035f5780638462151c146103805780639507d39a146103f157806395d89b411461045f578063a22cb46514610474578063b88d4fde1461049a578063c87b56dd14610509578063e985e9c514610521578063efc81a8c14610548578063fdb05e851461055d575b600080fd5b34801561013857600080fd5b5061014e600160e060020a031960043516610581565b604080519115158252519081900360200190f35b34801561016e57600080fd5b506101776105a0565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101b1578181015183820152602001610199565b50505050905090810190601f1680156101de5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156101f857600080fd5b50610204600435610637565b60408051600160a060020a039092168252519081900360200190f35b34801561022c57600080fd5b50610244600160a060020a0360043516602435610652565b005b34801561025257600080fd5b5061025b610708565b60408051918252519081900360200190f35b34801561027957600080fd5b5061028261070e565b60408051600160e060020a03199092168252519081900360200190f35b3480156102ab57600080fd5b50610244600160a060020a0360043581169060243516604435610732565b3480156102d557600080fd5b5061025b600160a060020a03600435166024356107d5565b3480156102f957600080fd5b50610244600160a060020a0360043581169060243516604435610822565b34801561032357600080fd5b5061014e600435610843565b34801561033b57600080fd5b5061025b600435610860565b34801561035357600080fd5b50610204600435610895565b34801561036b57600080fd5b5061025b600160a060020a03600435166108bf565b34801561038c57600080fd5b506103a1600160a060020a03600435166108f2565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156103dd5781810151838201526020016103c5565b505050509050019250505060405180910390f35b3480156103fd57600080fd5b5061040960043561095e565b60408051600160a060020a03909816885260ff9687166020890152948616878601529285166060870152908416608086015290921660a084015267ffffffffffffffff90911660c0830152519081900360e00190f35b34801561046b57600080fd5b50610177610a91565b34801561048057600080fd5b50610244600160a060020a03600435166024351515610af2565b3480156104a657600080fd5b50604080516020601f60643560048181013592830184900484028501840190955281845261024494600160a060020a038135811695602480359092169560443595369560849401918190840183828082843750949750610b769650505050505050565b34801561051557600080fd5b50610177600435610b9e565b34801561052d57600080fd5b5061014e600160a060020a0360043581169060243516610c49565b34801561055457600080fd5b5061025b610c77565b34801561056957600080fd5b50610244600160a060020a0360043516602435610ffb565b600160e060020a03191660009081526020819052604090205460ff1690565b60058054604080516020601f600260001961010060018816150201909516949094049384018190048102820181019092528281526060939092909183018282801561062c5780601f106106015761010080835404028352916020019161062c565b820191906000526020600020905b81548152906001019060200180831161060f57829003601f168201915b505050505090505b90565b600090815260026020526040902054600160a060020a031690565b600061065d82610895565b9050600160a060020a03838116908216141561067857600080fd5b33600160a060020a038216148061069457506106948133610c49565b151561069f57600080fd5b600082815260026020526040808220805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0387811691821790925591518593918516917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591a4505050565b60095490565b7f01ffc9a70000000000000000000000000000000000000000000000000000000081565b61073c3382611043565b151561074757600080fd5b600160a060020a038316151561075c57600080fd5b600160a060020a038216151561077157600080fd5b61077b83826110a2565b6107858382611113565b61078f828261121a565b8082600160a060020a031684600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef60405160405180910390a4505050565b60006107e0836108bf565b82106107eb57600080fd5b600160a060020a038316600090815260076020526040902080548390811061080f57fe5b9060005260206000200154905092915050565b61083e8383836020604051908101604052806000815250610b76565b505050565b600090815260016020526040902054600160a060020a0316151590565b600061086a610708565b821061087557600080fd5b600980548390811061088357fe5b90600052602060002001549050919050565b600081815260016020526040812054600160a060020a03168015156108b957600080fd5b92915050565b6000600160a060020a03821615156108d657600080fd5b50600160a060020a031660009081526003602052604090205490565b600160a060020a03811660009081526007602090815260409182902080548351818402810184019094528084526060939283018282801561095257602002820191906000526020600020905b81548152602001906001019080831161093e575b50505050509050919050565b600081815260016020526040812054600c8054839283928392839283928392600160a060020a03909216918a90811061099357fe5b600091825260209091200154600c805460ff909216918b9081106109b357fe5b9060005260206000200160000160019054906101000a900460ff16600c8b8154811015156109dd57fe5b9060005260206000200160000160029054906101000a900460ff16600c8c815481101515610a0757fe5b9060005260206000200160000160039054906101000a900460ff16600c8d815481101515610a3157fe5b9060005260206000200160000160049054906101000a900460ff16600c8e815481101515610a5b57fe5b600091825260209091200154959e949d50929b50909950975095506501000000000090910467ffffffffffffffff169350915050565b60068054604080516020601f600260001961010060018816150201909516949094049384018190048102820181019092528281526060939092909183018282801561062c5780601f106106015761010080835404028352916020019161062c565b600160a060020a038216331415610b0857600080fd5b336000818152600460209081526040808320600160a060020a03871680855290835292819020805460ff1916861515908117909155815190815290519293927f17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31929181900390910190a35050565b610b81848484610732565b610b8d84848484611263565b1515610b9857600080fd5b50505050565b6060610ba982610843565b1515610bb457600080fd5b6000828152600b602090815260409182902080548351601f6002600019610100600186161502019093169290920491820184900484028101840190945280845290918301828280156109525780601f10610c1c57610100808354040283529160200191610952565b820191906000526020600020905b815481529060010190602001808311610c2a5750939695505050505050565b600160a060020a03918216600090815260046020908152604080832093909416825291909152205460ff1690565b60008060008060008060006060610c8c611a07565b610c94610708565b6040805160208082019390935260001943014081830152815180820383018152606090910191829052805190928291908401908083835b60208310610cea5780518252601f199092019160209182019101610ccb565b5181516020939093036101000a600019018019909116921691909117905260405192018290039091209a5060059250505060f860020a60008a901a81020460ff16066001908101975060059089901a60f860020a0260f860020a900460ff16811515610d5257fe5b066001019550600560f860020a60028a901a81020460ff16066001019450600560f860020a60038a901a81020460ff16066001019350600560f860020a60048a901a81020460ff16066001019250610dad87878787876113d0565b6040805160c08101825260ff808b16825289811660208301908152898216938301938452888216606084019081528883166080850190815267ffffffffffffffff43811660a08701908152600c805460018101825560009190915287517fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c78201805497519a5196519551935190941665010000000000026cffffffffffffffff0000000000199389166401000000000264ff0000000019968a1663010000000263ff00000019988b16620100000262ff0000199d8c166101000261ff001995909c1660ff19909b169a909a1793909316999099179a909a16969096179490941694909417919091169390931791909116939093179055909a509092509050610ed5338a61152d565b610edf898361157c565b33600160a060020a03167f1ae41272ced32fa050dc3df49761e279866a7a9378212de7a61104821698f18c8a89898989898860a001518a604051808981526020018860ff1660ff1681526020018760ff1660ff1681526020018660ff1660ff1681526020018560ff1660ff1681526020018460ff1660ff1681526020018367ffffffffffffffff1667ffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610faf578181015183820152602001610f97565b50505050905090810190601f168015610fdc5780820380516001836020036101000a031916815260200191505b50995050505050505050505060405180910390a2505050505050505090565b60408051808201909152600481527f74657374000000000000000000000000000000000000000000000000000000006020820152611039838361152d565b61083e828261157c565b60008061104f83610895565b905080600160a060020a031684600160a060020a0316148061108a575083600160a060020a031661107f84610637565b600160a060020a0316145b8061109a575061109a8185610c49565b949350505050565b81600160a060020a03166110b582610895565b600160a060020a0316146110c857600080fd5b600081815260026020526040902054600160a060020a03161561110f576000818152600260205260409020805473ffffffffffffffffffffffffffffffffffffffff191690555b5050565b600080600061112285856115af565b600084815260086020908152604080832054600160a060020a038916845260079092529091205490935061115d90600163ffffffff61164516565b600160a060020a03861660009081526007602052604090208054919350908390811061118557fe5b90600052602060002001549050806007600087600160a060020a0316600160a060020a03168152602001908152602001600020848154811015156111c557fe5b6000918252602080832090910192909255600160a060020a03871681526007909152604090208054906111fc906000198301611a3c565b50600093845260086020526040808520859055908452909220555050565b60006112268383611657565b50600160a060020a039091166000908152600760209081526040808320805460018101825590845282842081018590559383526008909152902055565b60008061127885600160a060020a03166116e7565b151561128757600191506113c7565b6040517f150b7a020000000000000000000000000000000000000000000000000000000081523360048201818152600160a060020a03898116602485015260448401889052608060648501908152875160848601528751918a169463150b7a0294938c938b938b93909160a490910190602085019080838360005b8381101561131a578181015183820152602001611302565b50505050905090810190601f1680156113475780820380516001836020036101000a031916815260200191505b5095505050505050602060405180830381600087803b15801561136957600080fd5b505af115801561137d573d6000803e3d6000fd5b505050506040513d602081101561139357600080fd5b5051600160e060020a031981167f150b7a020000000000000000000000000000000000000000000000000000000014925090505b50949350505050565b6040805180820190915260208082527f68747470733a2f2f6e66746965732e696f2f746f6b656e732f6e66746965732d9082015260609061141181886116ef565b90506114398160408051908101604052806001815260200160f860020a602d02815250611880565b905061144581876116ef565b905061146d8160408051908101604052806001815260200160f860020a602d02815250611880565b905061147981866116ef565b90506114a18160408051908101604052806001815260200160f860020a602d02815250611880565b90506114ad81856116ef565b90506114d58160408051908101604052806001815260200160f860020a602d02815250611880565b90506114e181846116ef565b9050611522816040805190810160405280600481526020017f2e706e6700000000000000000000000000000000000000000000000000000000815250611880565b979650505050505050565b611537828261199f565b600980546000838152600a60205260408120829055600182018355919091527f6e1540171b6c0c960b71a7020d9f60077f6af931a8bbf590da0223dacf75c7af015550565b61158582610843565b151561159057600080fd5b6000828152600b60209081526040909120825161083e92840190611a60565b81600160a060020a03166115c282610895565b600160a060020a0316146115d557600080fd5b600160a060020a0382166000908152600360205260409020546115ff90600163ffffffff61164516565b600160a060020a03909216600090815260036020908152604080832094909455918152600190915220805473ffffffffffffffffffffffffffffffffffffffff19169055565b60008282111561165157fe5b50900390565b600081815260016020526040902054600160a060020a03161561167957600080fd5b6000818152600160208181526040808420805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a03881690811790915584526003909152909120546116c7916119fa565b600160a060020a0390921660009081526003602052604090209190915550565b6000903b1190565b60408051606480825260a0820190925260609190829060009081908390819083908760208201610c8080388339019050509550600094505b60ff89161561177c578551600a60ff9a8b168181049b60018901989290910616955060f860020a603087010291889190811061175f57fe5b906020010190600160f860020a031916908160001a905350611727565b899250848351016040519080825280601f01601f1916602001820160405280156117b0578160200160208202803883390190505b509150600090505b82518110156118105782818151811015156117cf57fe5b90602001015160f860020a900460f860020a0282828151811015156117f057fe5b906020010190600160f860020a031916908160001a9053506001016117b8565b5060005b84811015611873578581600187030381518110151561182f57fe5b90602001015160f860020a900460f860020a02828451830181518110151561185357fe5b906020010190600160f860020a031916908160001a905350600101611814565b5098975050505050505050565b606080606080606060008088955087945084518651016040519080825280601f01601f1916602001820160405280156118c3578160200160208202803883390190505b50935083925060009150600090505b85518110156119305785818151811015156118e957fe5b90602001015160f860020a900460f860020a02838380600101945081518110151561191057fe5b906020010190600160f860020a031916908160001a9053506001016118d2565b5060005b845181101561199257848181518110151561194b57fe5b90602001015160f860020a900460f860020a02838380600101945081518110151561197257fe5b906020010190600160f860020a031916908160001a905350600101611934565b5090979650505050505050565b600160a060020a03821615156119b457600080fd5b6119be828261121a565b6040518190600160a060020a038416906000907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef908290a45050565b818101828110156108b957fe5b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b81548183558181111561083e5760008381526020902061083e918101908301611ade565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10611aa157805160ff1916838001178555611ace565b82800160010185558215611ace579182015b82811115611ace578251825591602001919060010190611ab3565b50611ada929150611ade565b5090565b61063491905b80821115611ada5760008155600101611ae45600a165627a7a72305820eb6ef4caf7e8dfb40d659802d0d7106ab4b338205d58c4b1d0596c47c740a77e0029"
+ Erc721Binary = "60806040523480156200001157600080fd5b5060408051808201825260068082527f4e6674696573000000000000000000000000000000000000000000000000000060208084019190915283518085019094529083527f4e465449455300000000000000000000000000000000000000000000000000009083015290620000af7f01ffc9a700000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620000e37f80ac58cd00000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620001177f4f558e7900000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b81516200012c90600590602085019062000220565b5080516200014290600690602084019062000220565b50620001777f780e9d6300000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620001ab7f5b5e139f00000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b5050620002c5565b7fffffffff000000000000000000000000000000000000000000000000000000008082161415620001e357600080fd5b7fffffffff00000000000000000000000000000000000000000000000000000000166000908152602081905260409020805460ff19166001179055565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200026357805160ff191683800117855562000293565b8280016001018555821562000293579182015b828111156200029357825182559160200191906001019062000276565b50620002a1929150620002a5565b5090565b620002c291905b80821115620002a15760008155600101620002ac565b90565b611b2480620002d56000396000f3006080604052600436106101275763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166301ffc9a7811461012c57806306fdde0314610162578063081812fc146101ec578063095ea7b31461022057806318160ddd1461024657806319fa8f501461026d57806323b872dd1461029f5780632f745c59146102c957806342842e0e146102ed5780634f558e79146103175780634f6ccce71461032f5780636352211e1461034757806370a082311461035f5780638462151c146103805780639507d39a146103f157806395d89b411461045f578063a22cb46514610474578063b88d4fde1461049a578063c87b56dd14610509578063e985e9c514610521578063efc81a8c14610548578063fdb05e851461055d575b600080fd5b34801561013857600080fd5b5061014e600160e060020a031960043516610581565b604080519115158252519081900360200190f35b34801561016e57600080fd5b506101776105a0565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101b1578181015183820152602001610199565b50505050905090810190601f1680156101de5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156101f857600080fd5b50610204600435610637565b60408051600160a060020a039092168252519081900360200190f35b34801561022c57600080fd5b50610244600160a060020a0360043516602435610652565b005b34801561025257600080fd5b5061025b610708565b60408051918252519081900360200190f35b34801561027957600080fd5b5061028261070e565b60408051600160e060020a03199092168252519081900360200190f35b3480156102ab57600080fd5b50610244600160a060020a0360043581169060243516604435610732565b3480156102d557600080fd5b5061025b600160a060020a03600435166024356107d5565b3480156102f957600080fd5b50610244600160a060020a0360043581169060243516604435610822565b34801561032357600080fd5b5061014e600435610843565b34801561033b57600080fd5b5061025b600435610860565b34801561035357600080fd5b50610204600435610895565b34801561036b57600080fd5b5061025b600160a060020a03600435166108bf565b34801561038c57600080fd5b506103a1600160a060020a03600435166108f2565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156103dd5781810151838201526020016103c5565b505050509050019250505060405180910390f35b3480156103fd57600080fd5b5061040960043561095e565b60408051600160a060020a03909816885260ff9687166020890152948616878601529285166060870152908416608086015290921660a084015267ffffffffffffffff90911660c0830152519081900360e00190f35b34801561046b57600080fd5b50610177610a91565b34801561048057600080fd5b50610244600160a060020a03600435166024351515610af2565b3480156104a657600080fd5b50604080516020601f60643560048181013592830184900484028501840190955281845261024494600160a060020a038135811695602480359092169560443595369560849401918190840183828082843750949750610b769650505050505050565b34801561051557600080fd5b50610177600435610b9e565b34801561052d57600080fd5b5061014e600160a060020a0360043581169060243516610c49565b34801561055457600080fd5b5061025b610c77565b34801561056957600080fd5b50610244600160a060020a0360043516602435610ffb565b600160e060020a03191660009081526020819052604090205460ff1690565b60058054604080516020601f600260001961010060018816150201909516949094049384018190048102820181019092528281526060939092909183018282801561062c5780601f106106015761010080835404028352916020019161062c565b820191906000526020600020905b81548152906001019060200180831161060f57829003601f168201915b505050505090505b90565b600090815260026020526040902054600160a060020a031690565b600061065d82610895565b9050600160a060020a03838116908216141561067857600080fd5b33600160a060020a038216148061069457506106948133610c49565b151561069f57600080fd5b600082815260026020526040808220805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0387811691821790925591518593918516917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591a4505050565b60095490565b7f01ffc9a70000000000000000000000000000000000000000000000000000000081565b61073c3382611043565b151561074757600080fd5b600160a060020a038316151561075c57600080fd5b600160a060020a038216151561077157600080fd5b61077b83826110a2565b6107858382611113565b61078f828261121a565b8082600160a060020a031684600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef60405160405180910390a4505050565b60006107e0836108bf565b82106107eb57600080fd5b600160a060020a038316600090815260076020526040902080548390811061080f57fe5b9060005260206000200154905092915050565b61083e8383836020604051908101604052806000815250610b76565b505050565b600090815260016020526040902054600160a060020a0316151590565b600061086a610708565b821061087557600080fd5b600980548390811061088357fe5b90600052602060002001549050919050565b600081815260016020526040812054600160a060020a03168015156108b957600080fd5b92915050565b6000600160a060020a03821615156108d657600080fd5b50600160a060020a031660009081526003602052604090205490565b600160a060020a03811660009081526007602090815260409182902080548351818402810184019094528084526060939283018282801561095257602002820191906000526020600020905b81548152602001906001019080831161093e575b50505050509050919050565b600081815260016020526040812054600c8054839283928392839283928392600160a060020a03909216918a90811061099357fe5b600091825260209091200154600c805460ff909216918b9081106109b357fe5b9060005260206000200160000160019054906101000a900460ff16600c8b8154811015156109dd57fe5b9060005260206000200160000160029054906101000a900460ff16600c8c815481101515610a0757fe5b9060005260206000200160000160039054906101000a900460ff16600c8d815481101515610a3157fe5b9060005260206000200160000160049054906101000a900460ff16600c8e815481101515610a5b57fe5b600091825260209091200154959e949d50929b50909950975095506501000000000090910467ffffffffffffffff169350915050565b60068054604080516020601f600260001961010060018816150201909516949094049384018190048102820181019092528281526060939092909183018282801561062c5780601f106106015761010080835404028352916020019161062c565b600160a060020a038216331415610b0857600080fd5b336000818152600460209081526040808320600160a060020a03871680855290835292819020805460ff1916861515908117909155815190815290519293927f17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31929181900390910190a35050565b610b81848484610732565b610b8d84848484611263565b1515610b9857600080fd5b50505050565b6060610ba982610843565b1515610bb457600080fd5b6000828152600b602090815260409182902080548351601f6002600019610100600186161502019093169290920491820184900484028101840190945280845290918301828280156109525780601f10610c1c57610100808354040283529160200191610952565b820191906000526020600020905b815481529060010190602001808311610c2a5750939695505050505050565b600160a060020a03918216600090815260046020908152604080832093909416825291909152205460ff1690565b60008060008060008060006060610c8c611a07565b610c94610708565b6040805160208082019390935260001943014081830152815180820383018152606090910191829052805190928291908401908083835b60208310610cea5780518252601f199092019160209182019101610ccb565b5181516020939093036101000a600019018019909116921691909117905260405192018290039091209a5060059250505060f860020a60008a901a81020460ff16066001908101975060059089901a60f860020a0260f860020a900460ff16811515610d5257fe5b066001019550600560f860020a60028a901a81020460ff16066001019450600560f860020a60038a901a81020460ff16066001019350600560f860020a60048a901a81020460ff16066001019250610dad87878787876113d0565b6040805160c08101825260ff808b16825289811660208301908152898216938301938452888216606084019081528883166080850190815267ffffffffffffffff43811660a08701908152600c805460018101825560009190915287517fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c78201805497519a5196519551935190941665010000000000026cffffffffffffffff0000000000199389166401000000000264ff0000000019968a1663010000000263ff00000019988b16620100000262ff0000199d8c166101000261ff001995909c1660ff19909b169a909a1793909316999099179a909a16969096179490941694909417919091169390931791909116939093179055909a509092509050610ed5338a61152d565b610edf898361157c565b33600160a060020a03167f1ae41272ced32fa050dc3df49761e279866a7a9378212de7a61104821698f18c8a89898989898860a001518a604051808981526020018860ff1660ff1681526020018760ff1660ff1681526020018660ff1660ff1681526020018560ff1660ff1681526020018460ff1660ff1681526020018367ffffffffffffffff1667ffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610faf578181015183820152602001610f97565b50505050905090810190601f168015610fdc5780820380516001836020036101000a031916815260200191505b50995050505050505050505060405180910390a2505050505050505090565b60408051808201909152600481527f74657374000000000000000000000000000000000000000000000000000000006020820152611039838361152d565b61083e828261157c565b60008061104f83610895565b905080600160a060020a031684600160a060020a0316148061108a575083600160a060020a031661107f84610637565b600160a060020a0316145b8061109a575061109a8185610c49565b949350505050565b81600160a060020a03166110b582610895565b600160a060020a0316146110c857600080fd5b600081815260026020526040902054600160a060020a03161561110f576000818152600260205260409020805473ffffffffffffffffffffffffffffffffffffffff191690555b5050565b600080600061112285856115af565b600084815260086020908152604080832054600160a060020a038916845260079092529091205490935061115d90600163ffffffff61164516565b600160a060020a03861660009081526007602052604090208054919350908390811061118557fe5b90600052602060002001549050806007600087600160a060020a0316600160a060020a03168152602001908152602001600020848154811015156111c557fe5b6000918252602080832090910192909255600160a060020a03871681526007909152604090208054906111fc906000198301611a3c565b50600093845260086020526040808520859055908452909220555050565b60006112268383611657565b50600160a060020a039091166000908152600760209081526040808320805460018101825590845282842081018590559383526008909152902055565b60008061127885600160a060020a03166116e7565b151561128757600191506113c7565b6040517f150b7a020000000000000000000000000000000000000000000000000000000081523360048201818152600160a060020a03898116602485015260448401889052608060648501908152875160848601528751918a169463150b7a0294938c938b938b93909160a490910190602085019080838360005b8381101561131a578181015183820152602001611302565b50505050905090810190601f1680156113475780820380516001836020036101000a031916815260200191505b5095505050505050602060405180830381600087803b15801561136957600080fd5b505af115801561137d573d6000803e3d6000fd5b505050506040513d602081101561139357600080fd5b5051600160e060020a031981167f150b7a020000000000000000000000000000000000000000000000000000000014925090505b50949350505050565b6040805180820190915260208082527f68747470733a2f2f6e66746965732e696f2f746f6b656e732f6e66746965732d9082015260609061141181886116ef565b90506114398160408051908101604052806001815260200160f860020a602d02815250611880565b905061144581876116ef565b905061146d8160408051908101604052806001815260200160f860020a602d02815250611880565b905061147981866116ef565b90506114a18160408051908101604052806001815260200160f860020a602d02815250611880565b90506114ad81856116ef565b90506114d58160408051908101604052806001815260200160f860020a602d02815250611880565b90506114e181846116ef565b9050611522816040805190810160405280600481526020017f2e706e6700000000000000000000000000000000000000000000000000000000815250611880565b979650505050505050565b611537828261199f565b600980546000838152600a60205260408120829055600182018355919091527f6e1540171b6c0c960b71a7020d9f60077f6af931a8bbf590da0223dacf75c7af015550565b61158582610843565b151561159057600080fd5b6000828152600b60209081526040909120825161083e92840190611a60565b81600160a060020a03166115c282610895565b600160a060020a0316146115d557600080fd5b600160a060020a0382166000908152600360205260409020546115ff90600163ffffffff61164516565b600160a060020a03909216600090815260036020908152604080832094909455918152600190915220805473ffffffffffffffffffffffffffffffffffffffff19169055565b60008282111561165157fe5b50900390565b600081815260016020526040902054600160a060020a03161561167957600080fd5b6000818152600160208181526040808420805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a03881690811790915584526003909152909120546116c7916119fa565b600160a060020a0390921660009081526003602052604090209190915550565b6000903b1190565b60408051606480825260a0820190925260609190829060009081908390819083908760208201610c8080388339019050509550600094505b60ff89161561177c578551600a60ff9a8b168181049b60018901989290910616955060f860020a603087010291889190811061175f57fe5b906020010190600160f860020a031916908160001a905350611727565b899250848351016040519080825280601f01601f1916602001820160405280156117b0578160200160208202803883390190505b509150600090505b82518110156118105782818151811015156117cf57fe5b90602001015160f860020a900460f860020a0282828151811015156117f057fe5b906020010190600160f860020a031916908160001a9053506001016117b8565b5060005b84811015611873578581600187030381518110151561182f57fe5b90602001015160f860020a900460f860020a02828451830181518110151561185357fe5b906020010190600160f860020a031916908160001a905350600101611814565b5098975050505050505050565b606080606080606060008088955087945084518651016040519080825280601f01601f1916602001820160405280156118c3578160200160208202803883390190505b50935083925060009150600090505b85518110156119305785818151811015156118e957fe5b90602001015160f860020a900460f860020a02838380600101945081518110151561191057fe5b906020010190600160f860020a031916908160001a9053506001016118d2565b5060005b845181101561199257848181518110151561194b57fe5b90602001015160f860020a900460f860020a02838380600101945081518110151561197257fe5b906020010190600160f860020a031916908160001a905350600101611934565b5090979650505050505050565b600160a060020a03821615156119b457600080fd5b6119be828261121a565b6040518190600160a060020a038416906000907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef908290a45050565b818101828110156108b957fe5b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b81548183558181111561083e5760008381526020902061083e918101908301611ade565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10611aa157805160ff1916838001178555611ace565b82800160010185558215611ace579182015b82811115611ace578251825591602001919060010190611ab3565b50611ada929150611ade565b5090565b61063491905b80821115611ada5760008155600101611ae45600a165627a7a72305820eb6ef4caf7e8dfb40d659802d0d7106ab4b338205d58c4b1d0596c47c740a77e0029"
// CreateTo createTo(address _owner,uint256 _tokenId)
- CreateTo="fdb05e85"
+ CreateTo = "fdb05e85"
// TransferFrom transferFrom(address _from, address _to, uint256 _tokenId)
- TransferFrom="23b872dd"
+ TransferFrom = "23b872dd"
// BalanceOf BalanceOf(opts *bind.CallOpts, _owner common.Address)
- BalanceOf="70a08231"
- )
-
-
+ BalanceOf = "70a08231"
+) | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
const (
// Erc721Binary a simple erc721 token bin
Erc721Binary="60806040523480156200001157600080fd5b5060408051808201825260068082527f4e6674696573000000000000000000000000000000000000000000000000000060208084019190915283518085019094529083527f4e465449455300000000000000000000000000000000000000000000000000009083015290620000af7f01ffc9a700000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620000e37f80ac58cd00000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620001177f4f558e7900000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b81516200012c90600590602085019062000220565b5080516200014290600690602084019062000220565b50620001777f780e9d6300000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b620001ab7f5b5e139f00000000000000000000000000000000000000000000000000000000640100000000620001b3810204565b5050620002c5565b7fffffffff000000000000000000000000000000000000000000000000000000008082161415620001e357600080fd5b7fffffffff00000000000000000000000000000000000000000000000000000000166000908152602081905260409020805460ff19166001179055565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200026357805160ff191683800117855562000293565b8280016001018555821562000293579182015b828111156200029357825182559160200191906001019062000276565b50620002a1929150620002a5565b5090565b620002c291905b80821115620002a15760008155600101620002ac565b90565b611b2480620002d56000396000f3006080604052600436106101275763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166301ffc9a7811461012c57806306fdde0314610162578063081812fc146101ec578063095ea7b31461022057806318160ddd1461024657806319fa8f501461026d57806323b872dd1461029f5780632f745c59146102c957806342842e0e146102ed5780634f558e79146103175780634f6ccce71461032f5780636352211e1461034757806370a082311461035f5780638462151c146103805780639507d39a146103f157806395d89b411461045f578063a22cb46514610474578063b88d4fde1461049a578063c87b56dd14610509578063e985e9c514610521578063efc81a8c14610548578063fdb05e851461055d575b600080fd5b34801561013857600080fd5b5061014e600160e060020a031960043516610581565b604080519115158252519081900360200190f35b34801561016e57600080fd5b506101776105a0565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101b1578181015183820152602001610199565b50505050905090810190601f1680156101de5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156101f857600080fd5b50610204600435610637565b60408051600160a060020a039092168252519081900360200190f35b34801561022c57600080fd5b50610244600160a060020a0360043516602435610652565b005b34801561025257600080fd5b5061025b610708565b60408051918252519081900360200190f35b34801561027957600080fd5b5061028261070e565b60408051600160e060020a03199092168252519081900360200190f35b3480156102ab57600080fd5b50610244600160a060020a0360043581169060243516604435610732565b3480156102d557600080fd5b5061025b600160a060020a03600435166024356107d5565b3480156102f957600080fd5b50610244600160a060020a0360043581169060243516604435610822565b34801561032357600080fd5b5061014e600435610843565b34801561033b57600080fd5b5061025b600435610860565b34801561035357600080fd5b50610204600435610895565b34801561036b57600080fd5b5061025b600160a060020a03600435166108bf565b34801561038c57600080fd5b506103a1600160a060020a03600435166108f2565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156103dd5781810151838201526020016103c5565b505050509050019250505060405180910390f35b3480156103fd57600080fd5b5061040960043561095e565b60408051600160a060020a03909816885260ff9687166020890152948616878601529285166060870152908416608086015290921660a084015267ffffffffffffffff90911660c0830152519081900360e00190f35b34801561046b57600080fd5b50610177610a91565b34801561048057600080fd5b50610244600160a060020a03600435166024351515610af2565b3480156104a657600080fd5b50604080516020601f60643560048181013592830184900484028501840190955281845261024494600160a060020a038135811695602480359092169560443595369560849401918190840183828082843750949750610b769650505050505050565b34801561051557600080fd5b50610177600435610b9e565b34801561052d57600080fd5b5061014e600160a060020a0360043581169060243516610c49565b34801561055457600080fd5b5061025b610c77565b34801561056957600080fd5b50610244600160a060020a0360043516602435610ffb565b600160e060020a03191660009081526020819052604090205460ff1690565b60058054604080516020601f600260001961010060018816150201909516949094049384018190048102820181019092528281526060939092909183018282801561062c5780601f106106015761010080835404028352916020019161062c565b820191906000526020600020905b81548152906001019060200180831161060f57829003601f168201915b505050505090505b90565b600090815260026020526040902054600160a060020a031690565b600061065d82610895565b9050600160a060020a03838116908216141561067857600080fd5b33600160a060020a038216148061069457506106948133610c49565b151561069f57600080fd5b600082815260026020526040808220805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0387811691821790925591518593918516917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591a4505050565b60095490565b7f01ffc9a70000000000000000000000000000000000000000000000000000000081565b61073c3382611043565b151561074757600080fd5b600160a060020a038316151561075c57600080fd5b600160a060020a038216151561077157600080fd5b61077b83826110a2565b6107858382611113565b61078f828261121a565b8082600160a060020a031684600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef60405160405180910390a4505050565b60006107e0836108bf565b82106107eb57600080fd5b600160a060020a038316600090815260076020526040902080548390811061080f57fe5b9060005260206000200154905092915050565b61083e8383836020604051908101604052806000815250610b76565b505050565b600090815260016020526040902054600160a060020a0316151590565b600061086a610708565b821061087557600080fd5b600980548390811061088357fe5b90600052602060002001549050919050565b600081815260016020526040812054600160a060020a03168015156108b957600080fd5b92915050565b6000600160a060020a03821615156108d657600080fd5b50600160a060020a031660009081526003602052604090205490565b600160a060020a03811660009081526007602090815260409182902080548351818402810184019094528084526060939283018282801561095257602002820191906000526020600020905b81548152602001906001019080831161093e575b50505050509050919050565b600081815260016020526040812054600c8054839283928392839283928392600160a060020a03909216918a90811061099357fe5b600091825260209091200154600c805460ff909216918b9081106109b357fe5b9060005260206000200160000160019054906101000a900460ff16600c8b8154811015156109dd57fe5b9060005260206000200160000160029054906101000a900460ff16600c8c815481101515610a0757fe5b9060005260206000200160000160039054906101000a900460ff16600c8d815481101515610a3157fe5b9060005260206000200160000160049054906101000a900460ff16600c8e815481101515610a5b57fe5b600091825260209091200154959e949d50929b50909950975095506501000000000090910467ffffffffffffffff169350915050565b60068054604080516020601f600260001961010060018816150201909516949094049384018190048102820181019092528281526060939092909183018282801561062c5780601f106106015761010080835404028352916020019161062c565b600160a060020a038216331415610b0857600080fd5b336000818152600460209081526040808320600160a060020a03871680855290835292819020805460ff1916861515908117909155815190815290519293927f17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31929181900390910190a35050565b610b81848484610732565b610b8d84848484611263565b1515610b9857600080fd5b50505050565b6060610ba982610843565b1515610bb457600080fd5b6000828152600b602090815260409182902080548351601f6002600019610100600186161502019093169290920491820184900484028101840190945280845290918301828280156109525780601f10610c1c57610100808354040283529160200191610952565b820191906000526020600020905b815481529060010190602001808311610c2a5750939695505050505050565b600160a060020a03918216600090815260046020908152604080832093909416825291909152205460ff1690565b60008060008060008060006060610c8c611a07565b610c94610708565b6040805160208082019390935260001943014081830152815180820383018152606090910191829052805190928291908401908083835b60208310610cea5780518252601f199092019160209182019101610ccb565b5181516020939093036101000a600019018019909116921691909117905260405192018290039091209a5060059250505060f860020a60008a901a81020460ff16066001908101975060059089901a60f860020a0260f860020a900460ff16811515610d5257fe5b066001019550600560f860020a60028a901a81020460ff16066001019450600560f860020a60038a901a81020460ff16066001019350600560f860020a60048a901a81020460ff16066001019250610dad87878787876113d0565b6040805160c08101825260ff808b16825289811660208301908152898216938301938452888216606084019081528883166080850190815267ffffffffffffffff43811660a08701908152600c805460018101825560009190915287517fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c78201805497519a5196519551935190941665010000000000026cffffffffffffffff0000000000199389166401000000000264ff0000000019968a1663010000000263ff00000019988b16620100000262ff0000199d8c166101000261ff001995909c1660ff19909b169a909a1793909316999099179a909a16969096179490941694909417919091169390931791909116939093179055909a509092509050610ed5338a61152d565b610edf898361157c565b33600160a060020a03167f1ae41272ced32fa050dc3df49761e279866a7a9378212de7a61104821698f18c8a89898989898860a001518a604051808981526020018860ff1660ff1681526020018760ff1660ff1681526020018660ff1660ff1681526020018560ff1660ff1681526020018460ff1660ff1681526020018367ffffffffffffffff1667ffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610faf578181015183820152602001610f97565b50505050905090810190601f168015610fdc5780820380516001836020036101000a031916815260200191505b50995050505050505050505060405180910390a2505050505050505090565b60408051808201909152600481527f74657374000000000000000000000000000000000000000000000000000000006020820152611039838361152d565b61083e828261157c565b60008061104f83610895565b905080600160a060020a031684600160a060020a0316148061108a575083600160a060020a031661107f84610637565b600160a060020a0316145b8061109a575061109a8185610c49565b949350505050565b81600160a060020a03166110b582610895565b600160a060020a0316146110c857600080fd5b600081815260026020526040902054600160a060020a03161561110f576000818152600260205260409020805473ffffffffffffffffffffffffffffffffffffffff191690555b5050565b600080600061112285856115af565b600084815260086020908152604080832054600160a060020a038916845260079092529091205490935061115d90600163ffffffff61164516565b600160a060020a03861660009081526007602052604090208054919350908390811061118557fe5b90600052602060002001549050806007600087600160a060020a0316600160a060020a03168152602001908152602001600020848154811015156111c557fe5b6000918252602080832090910192909255600160a060020a03871681526007909152604090208054906111fc906000198301611a3c565b50600093845260086020526040808520859055908452909220555050565b60006112268383611657565b50600160a060020a039091166000908152600760209081526040808320805460018101825590845282842081018590559383526008909152902055565b60008061127885600160a060020a03166116e7565b151561128757600191506113c7565b6040517f150b7a020000000000000000000000000000000000000000000000000000000081523360048201818152600160a060020a03898116602485015260448401889052608060648501908152875160848601528751918a169463150b7a0294938c938b938b93909160a490910190602085019080838360005b8381101561131a578181015183820152602001611302565b50505050905090810190601f1680156113475780820380516001836020036101000a031916815260200191505b5095505050505050602060405180830381600087803b15801561136957600080fd5b505af115801561137d573d6000803e3d6000fd5b505050506040513d602081101561139357600080fd5b5051600160e060020a031981167f150b7a020000000000000000000000000000000000000000000000000000000014925090505b50949350505050565b6040805180820190915260208082527f68747470733a2f2f6e66746965732e696f2f746f6b656e732f6e66746965732d9082015260609061141181886116ef565b90506114398160408051908101604052806001815260200160f860020a602d02815250611880565b905061144581876116ef565b905061146d8160408051908101604052806001815260200160f860020a602d02815250611880565b905061147981866116ef565b90506114a18160408051908101604052806001815260200160f860020a602d02815250611880565b90506114ad81856116ef565b90506114d58160408051908101604052806001815260200160f860020a602d02815250611880565b90506114e181846116ef565b9050611522816040805190810160405280600481526020017f2e706e6700000000000000000000000000000000000000000000000000000000815250611880565b979650505050505050565b611537828261199f565b600980546000838152600a60205260408120829055600182018355919091527f6e1540171b6c0c960b71a7020d9f60077f6af931a8bbf590da0223dacf75c7af015550565b61158582610843565b151561159057600080fd5b6000828152600b60209081526040909120825161083e92840190611a60565b81600160a060020a03166115c282610895565b600160a060020a0316146115d557600080fd5b600160a060020a0382166000908152600360205260409020546115ff90600163ffffffff61164516565b600160a060020a03909216600090815260036020908152604080832094909455918152600190915220805473ffffffffffffffffffffffffffffffffffffffff19169055565b60008282111561165157fe5b50900390565b600081815260016020526040902054600160a060020a03161561167957600080fd5b6000818152600160208181526040808420805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a03881690811790915584526003909152909120546116c7916119fa565b600160a060020a0390921660009081526003602052604090209190915550565b6000903b1190565b60408051606480825260a0820190925260609190829060009081908390819083908760208201610c8080388339019050509550600094505b60ff89161561177c578551600a60ff9a8b168181049b60018901989290910616955060f860020a603087010291889190811061175f57fe5b906020010190600160f860020a031916908160001a905350611727565b899250848351016040519080825280601f01601f1916602001820160405280156117b0578160200160208202803883390190505b509150600090505b82518110156118105782818151811015156117cf57fe5b90602001015160f860020a900460f860020a0282828151811015156117f057fe5b906020010190600160f860020a031916908160001a9053506001016117b8565b5060005b84811015611873578581600187030381518110151561182f57fe5b90602001015160f860020a900460f860020a02828451830181518110151561185357fe5b906020010190600160f860020a031916908160001a905350600101611814565b5098975050505050505050565b606080606080606060008088955087945084518651016040519080825280601f01601f1916602001820160405280156118c3578160200160208202803883390190505b50935083925060009150600090505b85518110156119305785818151811015156118e957fe5b90602001015160f860020a900460f860020a02838380600101945081518110151561191057fe5b906020010190600160f860020a031916908160001a9053506001016118d2565b5060005b845181101561199257848181518110151561194b57fe5b90602001015160f860020a900460f860020a02838380600101945081518110151561197257fe5b906020010190600160f860020a031916908160001a905350600101611934565b5090979650505050505050565b600160a060020a03821615156119b457600080fd5b6119be828261121a565b6040518190600160a060020a038416906000907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef908290a45050565b818101828110156108b957fe5b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b81548183558181111561083e5760008381526020902061083e918101908301611ade565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10611aa157805160ff1916838001178555611ace565b82800160010185558215611ace579182015b82811115611ace578251825591602001919060010190611ab3565b50611ada929150611ade565b5090565b61063491905b80821115611ada5760008155600101611ae45600a165627a7a72305820eb6ef4caf7e8dfb40d659802d0d7106ab4b338205d58c4b1d0596c47c740a77e0029"
// CreateTo createTo(address _owner,uint256 _tokenId)
CreateTo="fdb05e85"
// TransferFrom transferFrom(address _from, address _to, uint256 _tokenId)
TransferFrom="23b872dd"
// BalanceOf BalanceOf(opts *bind.CallOpts, _owner common.Address)
BalanceOf="70a08231"
)
| 1 | 16,970 | Why this file is changed? | iotexproject-iotex-core | go |
@@ -9,6 +9,11 @@ def includeme(config):
description='Track changes on data.',
url='http://kinto.readthedocs.io/en/latest/api/1.x/history.html')
+ config.add_api_capability(
+ 'revert',
+ description='discard all changes after given timestamp.',
+ url='http://kinto.readthedocs.io/en/latest/api/1.x/revert.html')
+
# Activate end-points.
config.scan('kinto.plugins.history.views')
| 1 | from kinto.core.events import ResourceChanged
from .listener import on_resource_changed
def includeme(config):
config.add_api_capability(
'history',
description='Track changes on data.',
url='http://kinto.readthedocs.io/en/latest/api/1.x/history.html')
# Activate end-points.
config.scan('kinto.plugins.history.views')
# If StatsD is enabled, monitor execution time of listener.
listener = on_resource_changed
if config.registry.statsd:
key = 'plugins.history'
listener = config.registry.statsd.timer(key)(on_resource_changed)
# Listen to every resources (except history)
config.add_subscriber(listener, ResourceChanged,
for_resources=('bucket', 'group',
'collection', 'record'))
| 1 | 9,994 | I'm not sure that we need this | Kinto-kinto | py |
@@ -22,7 +22,7 @@ namespace Microsoft.DotNet.Build.Tasks
string dependencyVersion;
if (package.Value is JObject)
{
- dependencyVersion = package.Value["version"].Value<string>();
+ dependencyVersion = package.Value["version"]?.Value<string>();
}
else if (package.Value is JValue)
{ | 1 | using Microsoft.Build.Framework;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
using System;
namespace Microsoft.DotNet.Build.Tasks
{
public class UpdatePackageDependencyVersion : VisitProjectDependencies
{
[Required]
public string PackageId { get; set; }
[Required]
public string OldVersion { get; set; }
[Required]
public string NewVersion { get; set; }
public override bool VisitPackage(JProperty package, string projectJsonPath)
{
var dependencyIdentifier = package.Name;
string dependencyVersion;
if (package.Value is JObject)
{
dependencyVersion = package.Value["version"].Value<string>();
}
else if (package.Value is JValue)
{
dependencyVersion = package.Value.ToObject<string>();
}
else
{
throw new ArgumentException(string.Format(
"Unrecognized dependency element for {0} in {1}",
package.Name,
projectJsonPath));
}
if (dependencyIdentifier == PackageId && dependencyVersion == OldVersion)
{
Log.LogMessage(
"Changing {0} {1} to {2} in {3}",
dependencyIdentifier,
dependencyVersion,
NewVersion,
projectJsonPath);
if (package.Value is JObject)
{
package.Value["version"] = NewVersion;
}
else
{
package.Value = NewVersion;
}
return true;
}
return false;
}
}
}
| 1 | 8,889 | Is this going to cause issues in any other places were someone forgets the version? Should we consider also checking for type=project? | dotnet-buildtools | .cs |
@@ -86,3 +86,18 @@ func extractASMValue(out *secretsmanager.GetSecretValueOutput) (docker.AuthConfi
return dac, nil
}
+
+// GetSecretFromASM makes the api call to the AWS Secrets Manager service to
+// retrieve the secret value
+func GetSecretFromASM(secretID string, client secretsmanageriface.SecretsManagerAPI) (string, error) {
+ in := &secretsmanager.GetSecretValueInput{
+ SecretId: aws.String(secretID),
+ }
+
+ out, err := client.GetSecretValue(in)
+ if err != nil {
+ return "", errors.Wrapf(err, "secret %s", secretID)
+ }
+
+ return aws.StringValue(out.SecretString), nil
+} | 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package asm
import (
"encoding/json"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
docker "github.com/fsouza/go-dockerclient"
"github.com/pkg/errors"
)
// AuthDataValue is the schema for
// the SecretStringValue returned by ASM
type AuthDataValue struct {
Username *string
Password *string
}
// GetDockerAuthFromASM makes the api call to the AWS Secrets Manager service to
// retrieve the docker auth data
func GetDockerAuthFromASM(secretID string, client secretsmanageriface.SecretsManagerAPI) (docker.AuthConfiguration, error) {
in := &secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretID),
}
out, err := client.GetSecretValue(in)
if err != nil {
return docker.AuthConfiguration{}, errors.Wrapf(err,
"asm fetching secret from the service for %s", secretID)
}
return extractASMValue(out)
}
func extractASMValue(out *secretsmanager.GetSecretValueOutput) (docker.AuthConfiguration, error) {
if out == nil {
return docker.AuthConfiguration{}, errors.New(
"asm fetching authorization data: empty response")
}
secretValue := aws.StringValue(out.SecretString)
if secretValue == "" {
return docker.AuthConfiguration{}, errors.New(
"asm fetching authorization data: empty secrets value")
}
authDataValue := AuthDataValue{}
err := json.Unmarshal([]byte(secretValue), &authDataValue)
if err != nil {
// could not unmarshal, incorrect secret value schema
return docker.AuthConfiguration{}, errors.New(
"asm fetching authorization data: unable to unmarshal secret value, invalid schema")
}
username := aws.StringValue(authDataValue.Username)
password := aws.StringValue(authDataValue.Password)
if username == "" {
return docker.AuthConfiguration{}, errors.New(
"asm fetching username: AuthorizationData is malformed, empty field")
}
if password == "" {
return docker.AuthConfiguration{}, errors.New(
"asm fetching password: AuthorizationData is malformed, empty field")
}
dac := docker.AuthConfiguration{
Username: username,
Password: password,
}
return dac, nil
}
| 1 | 21,533 | Does this have retries? Is there a possibility that customer would hit throttle errors here? | aws-amazon-ecs-agent | go |
@@ -3,15 +3,17 @@
#endif
#include <platform.h>
-#if defined(PLATFORM_IS_LINUX) || defined(PLATFORM_IS_FREEBSD)
+#if defined(PLATFORM_IS_LINUX) || defined(PLATFORM_IS_BSD)
#include <sched.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/mman.h>
#endif
-#if defined(PLATFORM_IS_FREEBSD)
+#if defined(PLATFORM_IS_BSD)
#include <pthread_np.h>
+#endif
+#if defined(PLATFORM_IS_FREEBSD)
#include <sys/cpuset.h>
typedef cpuset_t cpu_set_t;
#endif | 1 | #ifdef __linux__
#define _GNU_SOURCE
#endif
#include <platform.h>
#if defined(PLATFORM_IS_LINUX) || defined(PLATFORM_IS_FREEBSD)
#include <sched.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/mman.h>
#endif
#if defined(PLATFORM_IS_FREEBSD)
#include <pthread_np.h>
#include <sys/cpuset.h>
typedef cpuset_t cpu_set_t;
#endif
#if defined(PLATFORM_IS_LINUX)
#include <dlfcn.h>
#include <stdio.h>
struct bitmask
{
unsigned long size;
unsigned long *maskp;
};
static int (*_numa_available)();
static int (*_numa_num_task_cpus)();
static void (*_numa_set_localalloc)();
static int (*_numa_node_of_cpu)(int cpu);
static void* (*_numa_alloc_onnode)(size_t size, int node);
static void* (*_numa_alloc)(size_t size);
static void (*_numa_free)(void* mem, size_t size);
static struct bitmask** _numa_all_nodes_ptr;
static struct bitmask* (*_numa_allocate_cpumask)();
static void (*_numa_bitmask_free)(struct bitmask*);
static int (*_numa_node_to_cpus)(int, struct bitmask*);
static unsigned int (*_numa_bitmask_weight)(const struct bitmask *);
static int (*_numa_bitmask_isbitset)(const struct bitmask*, unsigned int);
static bool use_numa = false;
#define LOAD_SYMBOL(sym) \
{ \
typedef typeof(_##sym) f; \
_##sym = (f)dlsym(lib, #sym); \
err += (_##sym == NULL); \
}
bool ponyint_numa_init()
{
void* lib = dlopen("libnuma.so.1", RTLD_LAZY);
int err = 0;
if(lib == NULL)
return false;
LOAD_SYMBOL(numa_available);
LOAD_SYMBOL(numa_num_task_cpus);
LOAD_SYMBOL(numa_set_localalloc);
LOAD_SYMBOL(numa_node_of_cpu);
LOAD_SYMBOL(numa_alloc_onnode);
LOAD_SYMBOL(numa_alloc);
LOAD_SYMBOL(numa_free);
LOAD_SYMBOL(numa_all_nodes_ptr);
LOAD_SYMBOL(numa_allocate_cpumask);
LOAD_SYMBOL(numa_bitmask_free);
LOAD_SYMBOL(numa_node_to_cpus);
LOAD_SYMBOL(numa_bitmask_weight);
LOAD_SYMBOL(numa_bitmask_isbitset);
if(err != 0)
return false;
if(_numa_available() == -1)
return false;
use_numa = true;
_numa_set_localalloc();
return true;
}
uint32_t ponyint_numa_cores()
{
if(use_numa)
return _numa_num_task_cpus();
return 0;
}
static uint32_t numa_core_list(cpu_set_t* mask, uint32_t index, uint32_t* list)
{
uint32_t nodes = _numa_bitmask_weight(*_numa_all_nodes_ptr);
struct bitmask* cpu = _numa_allocate_cpumask();
uint32_t node_count = 0;
for(uint32_t i = 0; node_count < nodes; i++)
{
if(!_numa_bitmask_isbitset(*_numa_all_nodes_ptr, i))
continue;
node_count++;
_numa_node_to_cpus(i, cpu);
uint32_t cpus = _numa_bitmask_weight(cpu);
uint32_t cpu_count = 0;
for(uint32_t j = 0; cpu_count < cpus; j++)
{
if(!_numa_bitmask_isbitset(cpu, j))
continue;
cpu_count++;
if(CPU_ISSET(j, mask))
list[index++] = j;
}
}
_numa_bitmask_free(cpu);
return index;
}
uint32_t ponyint_numa_core_list(cpu_set_t* hw_cpus, cpu_set_t* ht_cpus,
uint32_t* list)
{
if(!use_numa)
return 0;
// Create an ordered list of cpus. Physical CPUs grouped by NUMA node come
// first, followed by hyperthreaded CPUs grouped by NUMA node.
uint32_t index = 0;
index = numa_core_list(hw_cpus, index, list);
index = numa_core_list(ht_cpus, index, list);
return index;
}
uint32_t ponyint_numa_node_of_cpu(uint32_t cpu)
{
if(use_numa)
return _numa_node_of_cpu(cpu);
return 0;
}
#endif
bool ponyint_thread_create(pony_thread_id_t* thread, thread_fn start,
uint32_t cpu, void* arg)
{
(void)cpu;
#if defined(PLATFORM_IS_WINDOWS)
uintptr_t p = _beginthreadex(NULL, 0, start, arg, 0, NULL);
if(!p)
return false;
*thread = (HANDLE)p;
#elif defined(PLATFORM_IS_LINUX)
pthread_attr_t attr;
pthread_attr_init(&attr);
if(cpu != (uint32_t)-1)
{
cpu_set_t set;
CPU_ZERO(&set);
CPU_SET(cpu, &set);
if(use_numa)
{
struct rlimit limit;
if(getrlimit(RLIMIT_STACK, &limit) == 0)
{
int node = _numa_node_of_cpu(cpu);
void* stack = _numa_alloc_onnode((size_t)limit.rlim_cur, node);
if (stack != NULL) {
pthread_attr_setstack(&attr, stack, (size_t)limit.rlim_cur);
}
}
}
}
if(pthread_create(thread, &attr, start, arg))
return false;
pthread_attr_destroy(&attr);
#else
if(pthread_create(thread, NULL, start, arg))
return false;
#endif
return true;
}
bool ponyint_thread_join(pony_thread_id_t thread)
{
#ifdef PLATFORM_IS_WINDOWS
WaitForSingleObject(thread, INFINITE);
CloseHandle(thread);
#else
if(pthread_join(thread, NULL))
return false;
#endif
return true;
}
void ponyint_thread_detach(pony_thread_id_t thread)
{
#ifndef PLATFORM_IS_WINDOWS
pthread_detach(thread);
#endif
}
pony_thread_id_t ponyint_thread_self()
{
#ifdef PLATFORM_IS_WINDOWS
return GetCurrentThread();
#else
return pthread_self();
#endif
}
| 1 | 11,148 | i noted that we are inconsistent with when we indent includes (see atomics.h which is rather different) and for example cpu.c which is also different. we seem to have 3 styles. | ponylang-ponyc | c |
@@ -80,6 +80,12 @@ const start = (passthroughArgs, buildConfig = config.defaultBuildConfig, options
if (user_data_dir) {
// clear the data directory before doing a network test
fs.removeSync(user_data_dir.replace('\\', ''))
+ if (fs.existsSync(networkLogFile)) {
+ fs.unlinkSync(networkLogFile)
+ }
+ if (fs.existsSync('network-audit-results.json')) {
+ fs.unlinkSync('network-audit-results.json')
+ }
}
}
| 1 | const path = require('path')
const fs = require('fs-extra')
const ip = require('ip')
const URL = require('url').URL
const config = require('../lib/config')
const util = require('../lib/util')
const whitelistedUrlPrefixes = require('./whitelistedUrlPrefixes')
const start = (passthroughArgs, buildConfig = config.defaultBuildConfig, options) => {
config.buildConfig = buildConfig
config.update(options)
let braveArgs = [
'--enable-logging',
'--v=' + options.v,
]
if (options.vmodule) {
braveArgs.push('--vmodule=' + options.vmodule);
}
if (options.no_sandbox) {
braveArgs.push('--no-sandbox')
}
if (options.disable_brave_extension) {
braveArgs.push('--disable-brave-extension')
}
if (options.disable_brave_rewards_extension) {
braveArgs.push('--disable-brave-rewards-extension')
}
if (options.disable_pdfjs_extension) {
braveArgs.push('--disable-pdfjs-extension')
}
if (options.disable_webtorrent_extension) {
braveArgs.push('--disable-webtorrent-extension')
}
if (options.ui_mode) {
braveArgs.push(`--ui-mode=${options.ui_mode}`)
}
if (!options.enable_brave_update) {
// This only has meaning with MacOS and official build.
braveArgs.push('--disable-brave-update')
}
if (options.single_process) {
braveArgs.push('--single-process')
}
if (options.show_component_extensions) {
braveArgs.push('--show-component-extension-options')
}
if (options.rewards) {
braveArgs.push(`--rewards=${options.rewards}`)
}
if (options.brave_ads_testing) {
braveArgs.push('--brave-ads-testing')
}
if (options.brave_ads_debug) {
braveArgs.push('--brave-ads-debug')
}
if (options.brave_ads_production) {
braveArgs.push('--brave-ads-production')
}
if (options.brave_ads_staging) {
braveArgs.push('--brave-ads-staging')
}
braveArgs = braveArgs.concat(passthroughArgs)
let user_data_dir
if (options.user_data_dir_name) {
if (process.platform === 'darwin') {
user_data_dir = path.join(process.env.HOME, 'Library', 'Application\\ Support', 'BraveSoftware', options.user_data_dir_name)
} else if (process.platform === 'win32') {
user_data_dir = path.join(process.env.LocalAppData, 'BraveSoftware', options.user_data_dir_name)
} else {
user_data_dir = path.join(process.env.HOME, '.config', 'BraveSoftware', options.user_data_dir_name)
}
braveArgs.push('--user-data-dir=' + user_data_dir);
}
const networkLogFile = path.resolve(path.join(__dirname, '..', 'network_log.json'))
if (options.network_log) {
braveArgs.push(`--log-net-log=${networkLogFile}`)
braveArgs.push(`--net-log-capture-mode=IncludeSocketBytes`)
if (user_data_dir) {
// clear the data directory before doing a network test
fs.removeSync(user_data_dir.replace('\\', ''))
}
}
let cmdOptions = {
stdio: 'inherit',
timeout: options.network_log ? 120000 : undefined,
continueOnFail: options.network_log ? true : false,
shell: true
}
if (options.network_log) {
console.log('Network audit started. Logging requests for the next 2min or until you quit Brave...')
}
let outputPath = options.output_path
if (!outputPath) {
if (process.platform === 'darwin') {
outputPath = path.join(config.outputDir, config.macAppName() + '.app', 'Contents', 'MacOS', config.macAppName())
} else if (process.platform === 'win32') {
outputPath = path.join(config.outputDir, 'brave.exe')
} else {
outputPath = path.join(config.outputDir, 'brave')
}
}
util.run(outputPath, braveArgs, cmdOptions)
if (options.network_log) {
let exitCode = 0
// Read the network log
const jsonOutput = fs.readJsonSync(networkLogFile)
const URL_REQUEST_TYPE = jsonOutput.constants.logSourceType.URL_REQUEST
const URL_REQUEST_FAKE_RESPONSE_HEADERS_CREATED = jsonOutput.constants.logEventTypes.URL_REQUEST_FAKE_RESPONSE_HEADERS_CREATED
const urlRequests = jsonOutput.events.filter((event) => {
if (event.type === URL_REQUEST_FAKE_RESPONSE_HEADERS_CREATED) {
// showing these helps determine which URL requests which don't
// actually hit the network
return true
}
if (event.source.type === URL_REQUEST_TYPE) {
if (!event.params) {
return false
}
const url = event.params.url
if (!url) {
return false
}
if (url.startsWith('http') && url.includes('.')) {
const found = whitelistedUrlPrefixes.find((prefix) => {
return url.startsWith(prefix)
})
if (!found) {
// Check if the URL is a private IP
try {
const hostname = new URL(url).hostname
if (ip.isPrivate(hostname)) {
// Warn but don't fail the audit
console.log('NETWORK AUDIT WARN:', url)
return true
}
} catch (e) {}
// This is not a whitelisted URL! log it and exit with non-zero
console.log('NETWORK AUDIT FAIL:', url)
exitCode = 1
}
return true
}
}
return false
})
fs.writeJsonSync('network-audit-results.json', urlRequests)
if (exitCode > 0) {
console.log(`network-audit failed. import ${networkLogFile} in chrome://net-internals for more details.`)
} else {
console.log('network audit passed.')
}
process.exit(exitCode)
}
}
module.exports = start
| 1 | 5,630 | Deleting the files before starting the audit helps avoid stale results if the new file is not created. | brave-brave-browser | js |
@@ -70,14 +70,8 @@ func (c *Controller) spcEventHandler(operation string, spcGot *apis.StoragePoolC
// CreateStoragePool function will create the storage pool
// It is a create event so resync should be false and pendingPoolcount is passed 0
// pendingPoolcount is not used when resync is false.
- var newSpcLease Leases
- newSpcLease = &spcLease{spcGot, SpcLeaseKey, c.clientset}
- _, err := newSpcLease.GetLease()
- if err != nil {
- glog.Errorf("Could not acquire lease on spc object:%v", err)
- return addEvent, err
- }
- err = CreateStoragePool(spcGot, false, 0)
+
+ err := c.CreateStoragePool(spcGot, false, 0)
if err != nil {
glog.Error("Storagepool could not be created:", err) | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spc
import (
"fmt"
"github.com/golang/glog"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/client/k8s"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
)
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the spcPoolUpdated resource
// with the current status of the resource.
func (c *Controller) syncHandler(key, operation string, object interface{}) error {
// getSpcResource will take a key as argument which contains the namespace/name or simply name
// of the object and will fetch the object.
spcGot, err := c.getSpcResource(key)
if err != nil {
return err
}
// Check if the event is for delete and use the spc object that was pushed in the queue
// for utilising details from it e.g. delete cas template name for storagepool deletion.
if operation == deleteEvent {
// Need to typecast the interface object to storagepoolclaim object because
// interface type of nil is different from nil but all other type of nil has the same type as that of nil.
spcObject := object.(*apis.StoragePoolClaim)
if spcObject == nil {
return fmt.Errorf("storagepoolclaim object not found for storage pool deletion")
}
spcGot = spcObject
}
// Call the spcEventHandler which will take spc object , key(namespace/name of object) and type of operation we need to to for storage pool
// Type of operation for storage pool e.g. create, delete etc.
events, err := c.spcEventHandler(operation, spcGot)
if events == ignoreEvent {
glog.Warning("None of the SPC handler was executed")
return nil
}
if err != nil {
return err
}
// If this function returns a error then the object will be requeued.
// No need to error out even if it occurs,
return nil
}
// spcPoolEventHandler is to handle SPC related events.
func (c *Controller) spcEventHandler(operation string, spcGot *apis.StoragePoolClaim) (string, error) {
switch operation {
case addEvent:
// CreateStoragePool function will create the storage pool
// It is a create event so resync should be false and pendingPoolcount is passed 0
// pendingPoolcount is not used when resync is false.
var newSpcLease Leases
newSpcLease = &spcLease{spcGot, SpcLeaseKey, c.clientset}
_, err := newSpcLease.GetLease()
if err != nil {
glog.Errorf("Could not acquire lease on spc object:%v", err)
return addEvent, err
}
err = CreateStoragePool(spcGot, false, 0)
if err != nil {
glog.Error("Storagepool could not be created:", err)
// To-Do
// If Some error occur patch the spc object with appropriate reason
}
return addEvent, err
break
case updateEvent:
// TO-DO : Handle Business Logic
// Hook Update Business Logic Here
return updateEvent, nil
break
case syncEvent:
err := syncSpc(spcGot)
if err != nil {
glog.Errorf("Storagepool %s could not be synced:%v", spcGot.Name, err)
}
return syncEvent, err
break
case deleteEvent:
err := DeleteStoragePool(spcGot)
if err != nil {
glog.Error("Storagepool could not be deleted:", err)
}
return deleteEvent, err
break
default:
// opeartion with tag other than add,update and delete are ignored.
break
}
return ignoreEvent, nil
}
// enqueueSpc takes a SPC resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than SPC.
func (c *Controller) enqueueSpc(queueLoad *QueueLoad) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(queueLoad.Object); err != nil {
runtime.HandleError(err)
return
}
queueLoad.Key = key
c.workqueue.AddRateLimited(queueLoad)
}
// getSpcResource returns object corresponding to the resource key
func (c *Controller) getSpcResource(key string) (*apis.StoragePoolClaim, error) {
// Convert the key(namespace/name) string into a distinct name
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("Invalid resource key: %s", key))
return nil, err
}
spcGot, err := c.clientset.OpenebsV1alpha1().StoragePoolClaims().Get(name, metav1.GetOptions{})
if err != nil {
// The SPC resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("spcGot '%s' in work queue no longer exists:'%v'", key, err))
// No need to return error to caller as we still want to fire the delete handler
// using the spc key(name)
// If error is returned the caller function will return without calling the spcEventHandler
// function that invokes business logic for pool deletion
return nil, nil
}
return nil, err
}
return spcGot, nil
}
func syncSpc(spcGot *apis.StoragePoolClaim) error {
if len(spcGot.Spec.Disks.DiskList) > 0 {
// TODO : reconciliation for manual storagepool provisioning
glog.V(1).Infof("No reconciliation needed for manual provisioned pool of storagepoolclaim %s", spcGot.Name)
return nil
}
glog.V(1).Infof("Syncing storagepoolclaim %s", spcGot.Name)
// Get kubernetes clientset
// namespaces is not required, hence passed empty.
newK8sClient, err := k8s.NewK8sClient("")
if err != nil {
return err
}
// Get openebs clientset using a getter method (i.e. GetOECS() ) as
// the openebs clientset is not exported.
newOecsClient := newK8sClient.GetOECS()
// Get the current count of provisioned pool for the storagepool claim
spList, err := newOecsClient.OpenebsV1alpha1().StoragePools().List(metav1.ListOptions{LabelSelector: string(apis.StoragePoolClaimCPK) + "=" + spcGot.Name})
if err != nil {
return fmt.Errorf("unable to list storagepools: %v", err)
}
currentPoolCount := len(spList.Items)
// If current pool count is less than maxpool count, try to converge to maxpool
if currentPoolCount < int(spcGot.Spec.MaxPools) {
glog.Infof("Converging storagepoolclaim %s to desired state:current pool count is %d,desired pool count is %d", spcGot.Name, currentPoolCount, spcGot.Spec.MaxPools)
// pendingPoolCount holds the pending pool that should be provisioned to get the desired state.
pendingPoolCount := int(spcGot.Spec.MaxPools) - currentPoolCount
// Call the storage pool create logic to provision the pending pools.
err := CreateStoragePool(spcGot, true, pendingPoolCount)
if err != nil {
return err
}
}
return nil
}
| 1 | 9,639 | Remove this blank line | openebs-maya | go |
@@ -23,12 +23,19 @@ import com.google.api.codegen.discovery.Schema.Format;
import com.google.api.codegen.discovery.Schema.Type;
import com.google.api.codegen.transformer.ImportTypeTable;
import com.google.api.codegen.util.Name;
+import com.google.api.codegen.util.SymbolTable;
import com.google.api.codegen.util.TypeName;
import com.google.api.tools.framework.model.Oneof;
import com.google.api.tools.framework.model.TypeRef.Cardinality;
import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
+import java.util.AbstractMap;
+import java.util.Comparator;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable; | 1 | /* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.codegen.discogapic.StringTypeModel;
import com.google.api.codegen.discogapic.transformer.DiscoGapicParser;
import com.google.api.codegen.discovery.Document;
import com.google.api.codegen.discovery.Method;
import com.google.api.codegen.discovery.Schema;
import com.google.api.codegen.discovery.Schema.Format;
import com.google.api.codegen.discovery.Schema.Type;
import com.google.api.codegen.transformer.ImportTypeTable;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.TypeName;
import com.google.api.tools.framework.model.Oneof;
import com.google.api.tools.framework.model.TypeRef.Cardinality;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
/** A field declaration wrapper around a Discovery Schema. */
public class DiscoveryField implements FieldModel, TypeModel {
private final List<DiscoveryField> properties;
// Dereferenced schema to use for rendering type names and determining properties, type, and format.
private final Schema schema;
// Not dereferenced schema; used in rendering this FieldModel's parameter name.
private final Schema originalSchema;
private final DiscoApiModel apiModel;
/**
* Create a FieldModel object from a non-null Schema object, and internally dereference the input
* schema.
*/
private DiscoveryField(Schema schema, DiscoApiModel apiModel) {
Preconditions.checkNotNull(schema);
this.originalSchema = schema;
this.schema = schema.dereference();
this.apiModel = apiModel;
ImmutableList.Builder<DiscoveryField> propertiesBuilder = ImmutableList.builder();
for (Schema child : this.schema.properties().values()) {
propertiesBuilder.add(DiscoveryField.create(child, apiModel));
}
this.properties = propertiesBuilder.build();
}
/** Create a FieldModel object from a non-null Schema object. */
public static DiscoveryField create(Schema schema, DiscoApiModel rootApiModel) {
Preconditions.checkNotNull(schema);
Preconditions.checkNotNull(rootApiModel);
return new DiscoveryField(schema, rootApiModel);
}
/** @return the underlying Discovery Schema. */
public Schema getDiscoveryField() {
return schema;
}
@Override
public String getSimpleName() {
return DiscoGapicParser.stringToName(schema.getIdentifier()).toLowerCamel();
}
@Override
public String getFullName() {
return DiscoGapicParser.getSchemaNameAsParameter(originalSchema).toUpperCamel();
}
@Override
public String getNameAsParameter() {
return getNameAsParameterName().toLowerCamel();
}
@Override
public Name getNameAsParameterName() {
return DiscoGapicParser.getSchemaNameAsParameter(originalSchema);
}
@Override
public String getTypeFullName() {
return originalSchema.getIdentifier();
}
@Override
public boolean isMap() {
return originalSchema.additionalProperties() != null;
}
@Override
public TypeModel getMapKeyType() {
if (isMap()) {
// Assume that the schema's additionalProperties map keys are Strings.
return StringTypeModel.getInstance();
}
return null;
}
@Override
public TypeModel getMapValueType() {
if (isMap()) {
return DiscoveryField.create(originalSchema.additionalProperties(), apiModel);
}
return null;
}
@Override
public boolean isMessage() {
return !isPrimitiveType();
}
@Override
public boolean isRequired() {
return schema.required();
}
@Override
public boolean isRepeated() {
return schema.type() == Type.ARRAY;
}
@Override
public boolean mayBeInResourceName() {
// A ResourceName will only contain path parameters.
return schema.isPathParam();
}
@Override
public String getParentFullName() {
String parentName;
if (schema.parent() instanceof Method) {
parentName = DiscoGapicParser.getRequestName((Method) schema.parent()).toUpperCamel();
} else if (schema.parent() instanceof Schema) {
parentName = Name.anyCamel(((Schema) schema.parent()).getIdentifier()).toUpperCamel();
} else if (schema.parent() instanceof Document) {
parentName = ((Document) schema.parent()).name();
} else {
parentName = "";
}
return ResourceNameMessageConfig.getFullyQualifiedMessageName(
apiModel.getDefaultPackageName(), parentName);
}
@Override
public String getParentSimpleName() {
return schema.parent().id();
}
@Override
public TypeName getParentTypeName(ImportTypeTable typeTable) {
if (schema.parent() instanceof Schema) {
DiscoveryField parent = DiscoveryField.create((Schema) schema.parent(), apiModel);
return typeTable.getTypeTable().getTypeName(typeTable.getFullNameFor((FieldModel) parent));
}
return typeTable.getTypeTable().getTypeName(typeTable.getFullNameFor((FieldModel) this));
}
@Override
public Cardinality getCardinality() {
throw new IllegalArgumentException("Discovery model types have no defined Cardinality.");
}
@Override
public boolean isEnum() {
// TODO(andrealin): implement.
return false;
}
@Override
public boolean isPrimitive() {
return schema.items() == null && schema.type() != Type.OBJECT;
}
@Override
/* @Get the description of the element scoped to the visibility as currently set in the model. */
public String getScopedDocumentation() {
return schema.description();
}
@Override
public boolean isString() {
return schema.type().equals(Type.STRING);
}
@Override
public boolean isBytes() {
return schema.type().equals(Type.ANY)
|| (schema.type().equals(Type.STRING) && schema.format().equals(Format.BYTE));
}
@Override
public String getKind() {
return schema.type().toString();
}
@Nullable
@Override
public Oneof getOneof() {
return null;
}
@Override
public void validateValue(String value) {
switch (schema.type()) {
case BOOLEAN:
String lowerCaseValue = value.toLowerCase();
if (lowerCaseValue.equals("true") || lowerCaseValue.equals("false")) {
return;
}
break;
case NUMBER:
if (Pattern.matches("[+-]?([0-9]*[.])?[0-9]+", value)) {
return;
}
break;
case INTEGER:
if (Pattern.matches("[+-]?[0-9]+", value)) {
return;
}
break;
case STRING:
switch (schema.format()) {
case INT64:
case UINT64:
if (Pattern.matches("[+-]?[0-9]+", value)) {
return;
}
break;
default:
Matcher matcher = Pattern.compile("([^\\\"']*)").matcher(value);
if (matcher.matches()) {
return;
}
break;
}
default:
// Throw an exception if a value is unsupported for the given type.
throw new IllegalArgumentException(
"Tried to assign value for unsupported Schema type "
+ schema.type()
+ ", format "
+ schema.format()
+ "; value "
+ value);
}
throw new IllegalArgumentException(
"Could not assign value '"
+ value
+ "' to type "
+ schema.type()
+ ", format "
+ schema.format());
}
@Override
public List<DiscoveryField> getFields() {
return properties;
}
@Override
public DiscoveryField getField(String key) {
for (DiscoveryField field : getFields()) {
if (field.getNameAsParameter().equals(key)) {
return field;
}
}
Schema parentTypeSchema = getDiscoveryField();
List<Schema> pathToKeySchema = parentTypeSchema.findChild(key);
if (pathToKeySchema.size() == 0) {
return null; // key not found.
}
return DiscoveryField.create(pathToKeySchema.get(pathToKeySchema.size() - 1), apiModel);
}
@Override
// Schemas are immutable, so this is just the identity function.
public TypeModel makeOptional() {
return this;
}
@Override
public String getPrimitiveTypeName() {
Preconditions.checkArgument(isPrimitiveType());
switch (schema.type()) {
case INTEGER:
switch (schema.format()) {
case UINT32:
return "uint32";
default:
return "int32";
}
case NUMBER:
switch (schema.format()) {
case FLOAT:
return "float";
case DOUBLE:
default:
return "double";
}
case BOOLEAN:
return "bool";
case STRING:
if (schema.format() == null) {
return "string";
}
switch (schema.format()) {
case BYTE:
return "bytes";
case INT64:
return "sint64";
case UINT64:
return "uint64";
default:
return "string";
}
default:
return null;
}
}
private boolean isPrimitiveType() {
return schema.type().equals(Type.BOOLEAN)
|| schema.type().equals(Type.INTEGER)
|| schema.type().equals(Type.NUMBER)
|| schema.type().equals(Type.STRING);
}
@Override
public boolean isBooleanType() {
return schema.type().equals(Type.BOOLEAN);
}
@Override
public boolean isStringType() {
return schema.type().equals(Type.STRING);
}
@Override
public boolean isFloatType() {
return schema.type().equals(Type.NUMBER) && schema.format().equals(Format.FLOAT);
}
@Override
public boolean isBytesType() {
return schema.type().equals(Type.STRING) && schema.format().equals(Format.BYTE);
}
@Override
public boolean isDoubleType() {
return schema.type().equals(Type.NUMBER) && schema.format().equals(Format.DOUBLE);
}
@Override
public String getTypeName() {
if (isPrimitiveType()) {
return getPrimitiveTypeName();
}
switch (schema.type()) {
case ARRAY:
return "list";
default:
return "message";
}
}
@Override
public DiscoveryField getType() {
return this;
}
@Override
public boolean isEmptyType() {
return schema.getIdentifier().equals("Empty")
&& schema.type().equals(Type.OBJECT)
&& (schema.properties() == null || schema.properties().size() == 0);
}
@Override
public OneofConfig getOneOfConfig(String fieldName) {
return null;
}
@Override
public int hashCode() {
return 5 + 31 * schema.hashCode();
}
@Override
public String toString() {
return String.format("Discovery FieldModel: {%s}", schema.toString());
}
@Override
public boolean equals(Object o) {
return o != null
&& o instanceof DiscoveryField
&& ((DiscoveryField) o).schema.equals(this.schema);
}
}
| 1 | 25,374 | This is... quite unconventional (use a map entry as a key). I would suggest making your own data class for the key. | googleapis-gapic-generator | java |
@@ -213,7 +213,11 @@ class OutputList(RecycleView):
res = []
for o in outputs:
value = self.app.format_amount_and_units(o.value)
- res.append({'address': o.get_ui_address_str(), 'value': value})
+ res.append({
+ 'address': o.get_ui_address_str(),
+ 'value': value,
+ 'background_color': (0.3, 0.3, 0.3, 1),
+ 'color': (1, 1, 1, 1)})
self.data = res
| 1 | from typing import TYPE_CHECKING, Sequence
from kivy.app import App
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.properties import NumericProperty, StringProperty, BooleanProperty
from kivy.core.window import Window
from kivy.uix.recycleview import RecycleView
from kivy.uix.boxlayout import BoxLayout
from electrum.gui.kivy.i18n import _
if TYPE_CHECKING:
from ...main_window import ElectrumWindow
from electrum.transaction import TxOutput
class AnimatedPopup(Factory.Popup):
''' An Animated Popup that animates in and out.
'''
anim_duration = NumericProperty(.36)
'''Duration of animation to be used
'''
__events__ = ['on_activate', 'on_deactivate']
def on_activate(self):
'''Base function to be overridden on inherited classes.
Called when the popup is done animating.
'''
pass
def on_deactivate(self):
'''Base function to be overridden on inherited classes.
Called when the popup is done animating.
'''
pass
def open(self):
'''Do the initialization of incoming animation here.
Override to set your custom animation.
'''
def on_complete(*l):
self.dispatch('on_activate')
self.opacity = 0
super(AnimatedPopup, self).open()
anim = Factory.Animation(opacity=1, d=self.anim_duration)
anim.bind(on_complete=on_complete)
anim.start(self)
def dismiss(self):
'''Do the initialization of incoming animation here.
Override to set your custom animation.
'''
def on_complete(*l):
super(AnimatedPopup, self).dismiss()
self.dispatch('on_deactivate')
anim = Factory.Animation(opacity=0, d=.25)
anim.bind(on_complete=on_complete)
anim.start(self)
class EventsDialog(Factory.Popup):
''' Abstract Popup that provides the following events
.. events::
`on_release`
`on_press`
'''
__events__ = ('on_release', 'on_press')
def __init__(self, **kwargs):
super(EventsDialog, self).__init__(**kwargs)
def on_release(self, instance):
pass
def on_press(self, instance):
pass
def close(self):
self.dismiss()
class SelectionDialog(EventsDialog):
def add_widget(self, widget, index=0):
if self.content:
self.content.add_widget(widget, index)
return
super(SelectionDialog, self).add_widget(widget)
class InfoBubble(Factory.Bubble):
'''Bubble to be used to display short Help Information'''
message = StringProperty(_('Nothing set !'))
'''Message to be displayed; defaults to "nothing set"'''
icon = StringProperty('')
''' Icon to be displayed along with the message defaults to ''
:attr:`icon` is a `StringProperty` defaults to `''`
'''
fs = BooleanProperty(False)
''' Show Bubble in half screen mode
:attr:`fs` is a `BooleanProperty` defaults to `False`
'''
modal = BooleanProperty(False)
''' Allow bubble to be hidden on touch.
:attr:`modal` is a `BooleanProperty` defauult to `False`.
'''
exit = BooleanProperty(False)
'''Indicates whether to exit app after bubble is closed.
:attr:`exit` is a `BooleanProperty` defaults to False.
'''
dim_background = BooleanProperty(False)
''' Indicates Whether to draw a background on the windows behind the bubble.
:attr:`dim` is a `BooleanProperty` defaults to `False`.
'''
def on_touch_down(self, touch):
if self.modal:
return True
self.hide()
if self.collide_point(*touch.pos):
return True
def show(self, pos, duration, width=None, modal=False, exit=False):
'''Animate the bubble into position'''
self.modal, self.exit = modal, exit
if width:
self.width = width
if self.modal:
from kivy.uix.modalview import ModalView
self._modal_view = m = ModalView(background_color=[.5, .5, .5, .2])
Window.add_widget(m)
m.add_widget(self)
else:
Window.add_widget(self)
# wait for the bubble to adjust its size according to text then animate
Clock.schedule_once(lambda dt: self._show(pos, duration))
def _show(self, pos, duration):
def on_stop(*l):
if duration:
Clock.schedule_once(self.hide, duration + .5)
self.opacity = 0
arrow_pos = self.arrow_pos
if arrow_pos[0] in ('l', 'r'):
pos = pos[0], pos[1] - (self.height/2)
else:
pos = pos[0] - (self.width/2), pos[1]
self.limit_to = Window
anim = Factory.Animation(opacity=1, pos=pos, d=.32)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
def hide(self, now=False):
''' Auto fade out the Bubble
'''
def on_stop(*l):
if self.modal:
m = self._modal_view
m.remove_widget(self)
Window.remove_widget(m)
Window.remove_widget(self)
if self.exit:
App.get_running_app().stop()
import sys
sys.exit()
else:
App.get_running_app().is_exit = False
if now:
return on_stop()
anim = Factory.Animation(opacity=0, d=.25)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
class OutputItem(BoxLayout):
pass
class OutputList(RecycleView):
def __init__(self, **kwargs):
super(OutputList, self).__init__(**kwargs)
self.app = App.get_running_app() # type: ElectrumWindow
def update(self, outputs: Sequence['TxOutput']):
res = []
for o in outputs:
value = self.app.format_amount_and_units(o.value)
res.append({'address': o.get_ui_address_str(), 'value': value})
self.data = res
class TopLabel(Factory.Label):
pass
class RefLabel(TopLabel):
pass
| 1 | 13,393 | why are these colors needed to be specified here? when are they used? | spesmilo-electrum | py |
@@ -139,7 +139,12 @@ abstract class BaseMetadataTable implements Table {
@Override
public Rollback rollback() {
- throw new UnsupportedOperationException("Cannot roll back a metadata table");
+ throw new UnsupportedOperationException("Cannot rollback snapshots from a metadata table");
+ }
+
+ @Override
+ public ManageSnapshots manageSnapshots() {
+ throw new UnsupportedOperationException("Cannot manage snapshots in a metadata table");
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.collect.ImmutableMap;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
abstract class BaseMetadataTable implements Table {
private PartitionSpec spec = PartitionSpec.unpartitioned();
abstract Table table();
abstract String metadataTableName();
@Override
public FileIO io() {
return table().io();
}
@Override
public EncryptionManager encryption() {
return table().encryption();
}
@Override
public LocationProvider locationProvider() {
return table().locationProvider();
}
@Override
public void refresh() {
table().refresh();
}
@Override
public PartitionSpec spec() {
return spec;
}
@Override
public Map<Integer, PartitionSpec> specs() {
return ImmutableMap.of(spec.specId(), spec);
}
@Override
public Map<String, String> properties() {
return ImmutableMap.of();
}
@Override
public Snapshot currentSnapshot() {
return table().currentSnapshot();
}
@Override
public Iterable<Snapshot> snapshots() {
return table().snapshots();
}
@Override
public Snapshot snapshot(long snapshotId) {
return table().snapshot(snapshotId);
}
@Override
public List<HistoryEntry> history() {
return table().history();
}
@Override
public UpdateSchema updateSchema() {
throw new UnsupportedOperationException("Cannot update the schema of a metadata table");
}
@Override
public UpdateProperties updateProperties() {
throw new UnsupportedOperationException("Cannot update the properties of a metadata table");
}
@Override
public UpdateLocation updateLocation() {
throw new UnsupportedOperationException("Cannot update the location of a metadata table");
}
@Override
public AppendFiles newAppend() {
throw new UnsupportedOperationException("Cannot append to a metadata table");
}
@Override
public RewriteFiles newRewrite() {
throw new UnsupportedOperationException("Cannot rewrite in a metadata table");
}
@Override
public RewriteManifests rewriteManifests() {
throw new UnsupportedOperationException("Cannot rewrite manifests in a metadata table");
}
@Override
public OverwriteFiles newOverwrite() {
throw new UnsupportedOperationException("Cannot overwrite in a metadata table");
}
@Override
public ReplacePartitions newReplacePartitions() {
throw new UnsupportedOperationException("Cannot replace partitions in a metadata table");
}
@Override
public DeleteFiles newDelete() {
throw new UnsupportedOperationException("Cannot delete from a metadata table");
}
@Override
public ExpireSnapshots expireSnapshots() {
throw new UnsupportedOperationException("Cannot expire snapshots from a metadata table");
}
@Override
public Rollback rollback() {
throw new UnsupportedOperationException("Cannot roll back a metadata table");
}
@Override
public Transaction newTransaction() {
throw new UnsupportedOperationException("Cannot create transactions for a metadata table");
}
@Override
public String toString() {
return table().toString() + "." + metadataTableName();
}
}
| 1 | 17,201 | Nit: no need to change this method. | apache-iceberg | java |
@@ -19,11 +19,15 @@ import inspect
# pylint: disable=line-too-long
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
+from google.cloud.forseti.services.inventory.storage import DataAccess
+
from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification
from google.cloud.forseti.notifier.notifiers import cscc_notifier
+from google.cloud.forseti.notifier.notifiers import email_violations
from google.cloud.forseti.notifier.notifiers.inventory_summary import InventorySummary
-from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.scanner import dao as scanner_dao
+
+
# pylint: enable=line-too-long
| 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notifier runner."""
import importlib
import inspect
# pylint: disable=line-too-long
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification
from google.cloud.forseti.notifier.notifiers import cscc_notifier
from google.cloud.forseti.notifier.notifiers.inventory_summary import InventorySummary
from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.scanner import dao as scanner_dao
# pylint: enable=line-too-long
LOGGER = logger.get_logger(__name__)
# pylint: disable=inconsistent-return-statements
def find_notifiers(notifier_name):
"""Get the first class in the given sub module
Args:
notifier_name (str): Name of the notifier.
Return:
class: The class in the sub module
"""
try:
module = importlib.import_module(
'google.cloud.forseti.notifier.notifiers.{0}'.format(
notifier_name))
for filename in dir(module):
obj = getattr(module, filename)
if inspect.isclass(obj) \
and issubclass(obj, BaseNotification) \
and obj is not BaseNotification:
return obj
except ImportError:
LOGGER.exception('Can\'t import notifier %s', notifier_name)
# pylint: enable=inconsistent-return-statements
def convert_to_timestamp(violations):
"""Convert violation created_at_datetime to timestamp string.
Args:
violations (dict): List of violations as dict with
created_at_datetime.
Returns:
list: List of violations as dict with created_at_datetime
converted to timestamp string.
"""
for violation in violations:
violation['created_at_datetime'] = (
violation['created_at_datetime'].strftime(
string_formats.TIMESTAMP_TIMEZONE))
return violations
# pylint: disable=too-many-branches,too-many-statements
def run(inventory_index_id,
scanner_index_id,
progress_queue,
service_config=None):
"""Run the notifier.
Entry point when the notifier is run as a library.
Args:
inventory_index_id (int64): Inventory index id.
scanner_index_id (int64): Scanner index id.
progress_queue (Queue): The progress queue.
service_config (ServiceConfig): Forseti 2.0 service configs.
Returns:
int: Status code.
"""
# pylint: disable=too-many-locals
global_configs = service_config.get_global_config()
notifier_configs = service_config.get_notifier_config()
with service_config.scoped_session() as session:
if scanner_index_id:
inventory_index_id = (
DataAccess.get_inventory_index_id_by_scanner_index_id(
session,
scanner_index_id))
else:
if not inventory_index_id:
inventory_index_id = (
DataAccess.get_latest_inventory_index_id(session))
scanner_index_id = scanner_dao.get_latest_scanner_index_id(
session, inventory_index_id)
if not scanner_index_id:
LOGGER.error(
'No success or partial success scanner index found for '
'inventory index: "%s".', str(inventory_index_id))
else:
# get violations
violation_access = scanner_dao.ViolationAccess(session)
violations = violation_access.list(
scanner_index_id=scanner_index_id)
violations_as_dict = []
for violation in violations:
violations_as_dict.append(
scanner_dao.convert_sqlalchemy_object_to_dict(violation))
violations_as_dict = convert_to_timestamp(violations_as_dict)
violation_map = scanner_dao.map_by_resource(violations_as_dict)
for retrieved_v in violation_map:
log_message = (
'Retrieved {} violations for resource \'{}\''.format(
len(violation_map[retrieved_v]), retrieved_v))
LOGGER.info(log_message)
progress_queue.put(log_message)
# build notification notifiers
notifiers = []
for resource in notifier_configs['resources']:
if violation_map.get(resource['resource']) is None:
log_message = 'Resource \'{}\' has no violations'.format(
resource['resource'])
progress_queue.put(log_message)
LOGGER.info(log_message)
continue
if not resource['should_notify']:
LOGGER.debug('Not notifying for: %s', resource['resource'])
continue
for notifier in resource['notifiers']:
log_message = (
'Running \'{}\' notifier for resource \'{}\''.format(
notifier['name'], resource['resource']))
progress_queue.put(log_message)
LOGGER.info(log_message)
chosen_pipeline = find_notifiers(notifier['name'])
notifiers.append(chosen_pipeline(
resource['resource'], inventory_index_id,
violation_map[resource['resource']], global_configs,
notifier_configs, notifier['configuration']))
# Run the notifiers.
for notifier in notifiers:
notifier.run()
# Run the CSCC notifier.
violation_configs = notifier_configs.get('violation')
if violation_configs:
if violation_configs.get('cscc').get('enabled'):
source_id = violation_configs.get('cscc').get('source_id')
if source_id:
# beta mode
LOGGER.debug(
'Running CSCC notifier with beta API. source_id: '
'%s', source_id)
(cscc_notifier.CsccNotifier(inventory_index_id)
.run(violations_as_dict, source_id=source_id))
else:
# alpha mode
LOGGER.debug('Running CSCC notifier with alpha API.')
gcs_path = (
violation_configs.get('cscc').get('gcs_path'))
mode = violation_configs.get('cscc').get('mode')
organization_id = (
violation_configs.get('cscc').get(
'organization_id'))
(cscc_notifier.CsccNotifier(inventory_index_id)
.run(violations_as_dict, gcs_path, mode,
organization_id))
InventorySummary(service_config, inventory_index_id).run()
log_message = 'Notification completed!'
progress_queue.put(log_message)
progress_queue.put(None)
LOGGER.info(log_message)
return 0
# pylint: enable=too-many-branches,too-many-statements
| 1 | 33,097 | Why a blank line? | forseti-security-forseti-security | py |
@@ -103,7 +103,7 @@
<h2><%= _('Request expert feedback') %></h2>
<p><%= _('Click below to give data management staff at your organisation access to read and comment on your plan.') %></p>
<div class="well well-sm">
- <%= current_user.org.feedback_email_msg.html_safe %>
+ <%= current_user.org.feedback_email_msg %>
</div>
<p><%= _('You can continue to edit and download the plan in the interim.') %></p>
| 1 | <h2><%= _('Set plan visibility') %></h2>
<p class="form-control-static"><%= _('Public or organisational visibility is intended for finished plans. You must answer at least %{percentage}%% of the questions to enable these options. Note: test plans are set to private visibility by default.') % { :percentage => Rails.application.config.default_plan_percentage_answered } %></p>
<% allow_visibility = @plan.visibility_allowed? %>
<%= form_for(@plan, url: visibility_plan_path, method: :post, html: { id: 'set_visibility', remote: true }) do |f| %>
<fieldset<%= (allow_visibility ? '' : ' disabled') %>>
<div class="form-group col-xs-8">
<div class="radio">
<%= f.label :visibility_privately_visible, raw("#{f.radio_button :visibility, :privately_visible}\
#{_('Private: visible to me, specified collaborators and administrators at my organisation')}") %>
</div>
<div class="radio">
<%= f.label :visibility_organisationally_visible, raw("#{f.radio_button :visibility, :organisationally_visible} #{_('Organisation: anyone at my organisation can view')}") %>
</div>
<div class="radio">
<%= f.label :visibility_publicly_visible, raw("#{f.radio_button :visibility, :publicly_visible} #{_('Public: anyone can view')}") %>
</div>
</div>
<div class="col-xs-8">
<%= f.submit(_('Update'), style: 'display:none') %>
</div>
</fieldset>
<% end %>
<h2><%= _('Manage collaborators')%></h2>
<p><%= _('Invite specific people to read, edit, or administer your plan. Invitees will receive an email notification that they have access to this plan.') %></p>
<% administerable = @plan.administerable_by?(current_user) %>
<% if @plan.roles.any? then %>
<table class="table table-hover table-bordered" id="collaborator-table">
<thead>
<tr>
<th scope="col"><%= _('Email address')%></th>
<th scope="col"><%= _('Permissions')%></th>
<% if administerable %>
<th scope="col"><span aria-hidden="false" class="sr-only"><%= _('Actions') %></span></th>
<% end %>
</tr>
</thead>
<tbody>
<% @plan_roles.each do |role| %>
<tr>
<td><%= role.user.name %></td>
<td>
<% if role.creator? %>
<span><%= _('Owner') %></span>
<% else %>
<% if administerable && role.user != current_user %>
<%= form_for role, url: { controller: :roles, action: :update, id: role.id }, remote: true, html: { method: :put } do |f| %>
<div class="form-group col-xs-8">
<%= f.hidden_field :id %>
<%= f.select :access_level, {"#{_('Co-owner')}": 3, "#{_('Editor')}": 2, "#{_('Read only')}": 1}, {}, {id: "#{role.id}-can-edit", class: "toggle-existing-user-access" } %>
</div>
<% end %>
<% else %>
<span><%= display_role(role) %></span>
<% end %>
<% end %>
<% if administerable %>
<td>
<% unless role.creator? || role.user == current_user then %>
<%= link_to _('Remove'), role, method: :delete, data: { confirm: _('Are you sure?') }, :class => "a-orange" %>
<% end %>
</td>
<% end %>
</tr>
<% end %>
</tbody>
</table>
<% end %>
<h2><%= _('Invite collaborators') %></h2>
<% new_role = Role.new %>
<% new_role.plan = @plan %>
<%= form_for new_role, url: {controller: :roles, action: :create }, html: {method: :post} do |f| %>
<div class="form-group col-xs-8">
<%= f.hidden_field :plan_id %>
<%= f.fields_for :user do |user| %>
<%= user.label :email, _('Email'), class: 'control-label' %>
<%= user.email_field :email, for: :user, name: "user", class: "form-control", "aria-required": true %>
<% end %>
</div>
<fieldset class="col-xs-12">
<legend><%= _('Permissions') %></legend>
<div class="form-group">
<div class="radio">
<%= f.label :access_level, raw("#{f.radio_button :access_level, 3, "aria-required": true} #{_('Co-owner: can edit project details, change visibility, and add collaborators')}") %>
</div>
<div class="radio">
<%= f.label :access_level, raw("#{f.radio_button :access_level, 2} #{_('Editor: can comment and make changes')}") %>
</div>
<div class="radio">
<%= f.label :access_level, raw("#{f.radio_button :access_level, 1} #{_('Read only: can view and comment, but not make changes')}") %>
</div>
<%= f.button(_('Submit'), class: "btn btn-primary", type: "submit") %>
</div>
<div class="clearfix"></div>
<% end %>
</fieldset>
<div class="col-xs-12">
<% if plan.owner_and_coowners.include?(current_user) && current_user.org.present? && current_user.org.feedback_enabled? %>
<h2><%= _('Request expert feedback') %></h2>
<p><%= _('Click below to give data management staff at your organisation access to read and comment on your plan.') %></p>
<div class="well well-sm">
<%= current_user.org.feedback_email_msg.html_safe %>
</div>
<p><%= _('You can continue to edit and download the plan in the interim.') %></p>
<div class="form-group col-xs-8">
<%= link_to _('Request feedback'), request_feedback_plan_path, class: "btn btn-default#{' disabled' if @plan.feedback_requested?}" %>
<span><%= _("Feedback has been requested.") if @plan.feedback_requested? %></span>
</div>
<% end %> | 1 | 17,844 | Why remove the `.html_safe` here? Should we use `sanitize` or `raw` instead? This info comes off of the org edit page and is entered by users. | DMPRoadmap-roadmap | rb |
@@ -383,7 +383,7 @@ get_dr_stats(void)
* returns zero on success, non-zero on failure
*/
DYNAMORIO_EXPORT int
-dynamorio_app_init(void)
+dynamorio_app_init(bool attach_case)
{
int size;
| 1 | /* **********************************************************
* Copyright (c) 2010-2019 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* dynamo.c -- initialization and cleanup routines for DynamoRIO
*/
#include "globals.h"
#include "configure_defines.h"
#include "link.h"
#include "fragment.h"
#include "fcache.h"
#include "emit.h"
#include "dispatch.h"
#include "utils.h"
#include "monitor.h"
#include "vmareas.h"
#ifdef SIDELINE
# include "sideline.h"
#endif
#ifdef PAPI
# include "perfctr.h"
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#include "hotpatch.h"
#include "moduledb.h"
#include "module_shared.h"
#include "synch.h"
#include "native_exec.h"
#include "jit_opt.h"
#ifdef ANNOTATIONS
# include "annotations.h"
#endif
#ifdef WINDOWS
/* for close handle, duplicate handle, free memory and constants associated with them
*/
/* also for nt_terminate_process_for_app() */
# include "ntdll.h"
# include "nudge.h" /* to get generic_nudge_target() address for an assert */
#endif
#ifdef RCT_IND_BRANCH
# include "rct.h"
#endif
#include "perscache.h"
#ifdef VMX86_SERVER
# include "vmkuw.h"
#endif
#ifndef STANDALONE_UNIT_TEST
# ifdef __AVX512F__
# error "DynamoRIO core should run without AVX-512 instructions to remain \
portable and to avoid frequency scaling."
# endif
#endif
/* global thread-shared variables */
bool dynamo_initialized = false;
bool dynamo_heap_initialized = false;
bool dynamo_started = false;
bool automatic_startup = false;
bool control_all_threads = false;
#ifdef WINDOWS
bool dr_early_injected = false;
int dr_early_injected_location = INJECT_LOCATION_Invalid;
bool dr_earliest_injected = false;
static void *dr_earliest_inject_args;
/* should be set if we are controlling the primary thread, either by
* injecting initially (!dr_injected_secondary_thread), or by retaking
* over (dr_late_injected_primary_thread). Used only for debugging
* purposes, yet can't rely on !dr_injected_secondary_thread very
* early in the process
*/
bool dr_injected_primary_thread = false;
bool dr_injected_secondary_thread = false;
/* should be set once we retakeover the primary thread for -inject_primary */
bool dr_late_injected_primary_thread = false;
#endif /* WINDOWS */
/* flags to indicate when DR is being initialized / exited using the API */
bool dr_api_entry = false;
bool dr_api_exit = false;
#ifdef RETURN_AFTER_CALL
bool dr_preinjected = false;
#endif /* RETURN_AFTER_CALL */
#ifdef UNIX
static bool dynamo_exiting = false;
#endif
bool dynamo_exited = false;
bool dynamo_exited_all_other_threads = false;
bool dynamo_exited_and_cleaned = false;
#ifdef DEBUG
bool dynamo_exited_log_and_stats = false;
#endif
/* Only used in release build to decide whether synch is needed, justifying
* its placement in .nspdata. If we use it for more we should protect it.
*/
DECLARE_NEVERPROT_VAR(bool dynamo_all_threads_synched, false);
bool dynamo_resetting = false;
#if defined(CLIENT_INTERFACE) || defined(STANDALONE_UNIT_TEST)
bool standalone_library = false;
#endif
#ifdef UNIX
bool post_execve = false;
#endif
/* initial stack so we don't have to use app's */
byte *d_r_initstack;
event_t dr_app_started;
event_t dr_attach_finished;
#ifdef WINDOWS
/* PR203701: separate stack for error reporting when the dstack is exhausted */
# define EXCEPTION_STACK_SIZE (2 * PAGE_SIZE)
DECLARE_NEVERPROT_VAR(byte *exception_stack, NULL);
#endif
/*******************************************************/
/* separate segment of Non-Self-Protected data to avoid data section
* protection issues -- we need to write to these vars in bootstrapping
* spots where we cannot unprotect first
*/
START_DATA_SECTION(NEVER_PROTECTED_SECTION, "w");
/* spinlock used in assembly trampolines when we can't spare registers for more */
mutex_t initstack_mutex VAR_IN_SECTION(NEVER_PROTECTED_SECTION) =
INIT_SPINLOCK_FREE(initstack_mutex);
byte *initstack_app_xsp VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0;
/* keeps track of how many threads are in cleanup_and_terminate */
volatile int exiting_thread_count VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0;
/* Tracks newly created threads not yet on the all_threads list. */
volatile int uninit_thread_count VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0;
/* This is unprotected to allow stats to be written while the data
* segment is still protected (right now the only ones are selfmod stats)
*/
static dr_statistics_t nonshared_stats VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {
{ 0 },
};
/* Each lock protects its corresponding datasec_start, datasec_end, and
* datasec_writable variables.
*/
static mutex_t
datasec_lock[DATASEC_NUM] VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = { { 0 } };
/* back to normal section */
END_DATA_SECTION()
/*******************************************************/
/* Like a recursive lock: 0==readonly, 1+=writable.
* This would be a simple array, but we need each in its own protected
* section, as this could be exploited.
*/
const uint datasec_writable_neverprot = 1; /* always writable */
uint datasec_writable_rareprot = 1;
DECLARE_FREQPROT_VAR(uint datasec_writable_freqprot, 1);
DECLARE_CXTSWPROT_VAR(uint datasec_writable_cxtswprot, 1);
static app_pc datasec_start[DATASEC_NUM];
static app_pc datasec_end[DATASEC_NUM];
const uint DATASEC_SELFPROT[] = {
0,
SELFPROT_DATA_RARE,
SELFPROT_DATA_FREQ,
SELFPROT_DATA_CXTSW,
};
const char *const DATASEC_NAMES[] = {
NEVER_PROTECTED_SECTION,
RARELY_PROTECTED_SECTION,
FREQ_PROTECTED_SECTION,
CXTSW_PROTECTED_SECTION,
};
/* kept in unprotected heap to avoid issues w/ data segment being RO */
typedef struct _protect_info_t {
/* FIXME: this needs to be a recursive lock to handle signals
* and exceptions!
*/
mutex_t lock;
int num_threads_unprot; /* # threads in DR code */
int num_threads_suspended;
} protect_info_t;
static protect_info_t *protect_info;
static void
data_section_init(void);
static void
data_section_exit(void);
#ifdef DEBUG /*************************/
# include <time.h>
/* FIXME: not all dynamo_options references are #ifdef DEBUG
* are we trying to hardcode the options for a release build?
*/
# ifdef UNIX
/* linux include files for mmap stuff*/
# include <sys/ipc.h>
# include <sys/types.h>
# include <unistd.h>
# endif
static uint starttime;
file_t main_logfile = INVALID_FILE;
#endif /* DEBUG ****************************/
dr_statistics_t *d_r_stats = NULL;
DECLARE_FREQPROT_VAR(static int num_known_threads, 0);
#ifdef UNIX
/* i#237/PR 498284: vfork threads that execve need to be separately delay-freed */
DECLARE_FREQPROT_VAR(int num_execve_threads, 0);
#endif
DECLARE_FREQPROT_VAR(static uint threads_ever_count, 0);
/* FIXME : not static so os.c can hand walk it for dump core */
/* FIXME: use new generic_table_t and generic_hash_* routines */
thread_record_t **all_threads; /* ALL_THREADS_HASH_BITS-bit addressed hash table */
/* these locks are used often enough that we put them in .cspdata: */
/* not static so can be referenced in win32/os.c for SuspendThread handling,
* FIXME : is almost completely redundant in usage with thread_initexit_lock
* maybe replace this lock with thread_initexit_lock? */
DECLARE_CXTSWPROT_VAR(mutex_t all_threads_lock, INIT_LOCK_FREE(all_threads_lock));
/* used for synch to prevent thread creation/deletion in critical periods
* due to its use for flushing, this lock cannot be held while couldbelinking!
*/
DECLARE_CXTSWPROT_VAR(mutex_t thread_initexit_lock, INIT_LOCK_FREE(thread_initexit_lock));
/* recursive to handle signals/exceptions while in DR code */
DECLARE_CXTSWPROT_VAR(static recursive_lock_t thread_in_DR_exclusion,
INIT_RECURSIVE_LOCK(thread_in_DR_exclusion));
static thread_synch_state_t
exit_synch_state(void);
static void
synch_with_threads_at_exit(thread_synch_state_t synch_res, bool pre_exit);
/****************************************************************************/
#ifdef DEBUG
static const char *
main_logfile_name(void)
{
return get_app_name_for_path();
}
static const char *
thread_logfile_name(void)
{
return "log";
}
#endif /* DEBUG */
/****************************************************************************/
static void
statistics_pre_init(void)
{
/* until it's set up for real, point at static var
* really only logmask and loglevel are meaningful, so be careful!
* statistics_init and create_log_directory are the only routines that
* use stats before it's set up for real, currently
*/
/* The indirection here is left over from when we used to allow alternative
* locations for stats (namely shared memory for the old MIT gui). */
d_r_stats = &nonshared_stats;
d_r_stats->process_id = get_process_id();
strncpy(d_r_stats->process_name, get_application_name(), MAXIMUM_PATH);
d_r_stats->process_name[MAXIMUM_PATH - 1] = '\0';
ASSERT(strlen(d_r_stats->process_name) > 0);
d_r_stats->num_stats = 0;
}
static void
statistics_init(void)
{
/* should have called statistics_pre_init() first */
ASSERT(d_r_stats == &nonshared_stats);
ASSERT(d_r_stats->num_stats == 0);
#ifndef DEBUG
if (!DYNAMO_OPTION(global_rstats)) {
/* references to stat values should return 0 (static var) */
return;
}
#endif
d_r_stats->num_stats = 0
#ifdef DEBUG
# define STATS_DEF(desc, name) +1
#else
# define RSTATS_DEF(desc, name) +1
#endif
#include "statsx.h"
#undef STATS_DEF
#undef RSTATS_DEF
;
/* We inline the stat description to make it easy for external processes
* to view our stats: they don't have to chase pointers, and we could put
* this in shared memory easily. However, we do waste some memory, but
* not much in release build.
*/
#ifdef DEBUG
# define STATS_DEF(desc, statname) \
strncpy(d_r_stats->statname##_pair.name, desc, \
BUFFER_SIZE_ELEMENTS(d_r_stats->statname##_pair.name)); \
NULL_TERMINATE_BUFFER(d_r_stats->statname##_pair.name);
#else
# define RSTATS_DEF(desc, statname) \
strncpy(d_r_stats->statname##_pair.name, desc, \
BUFFER_SIZE_ELEMENTS(d_r_stats->statname##_pair.name)); \
NULL_TERMINATE_BUFFER(d_r_stats->statname##_pair.name);
#endif
#include "statsx.h"
#undef STATS_DEF
#undef RSTATS_DEF
}
static void
statistics_exit(void)
{
if (doing_detach)
memset(d_r_stats, 0, sizeof(*d_r_stats)); /* for possible re-attach */
d_r_stats = NULL;
}
dr_statistics_t *
get_dr_stats(void)
{
return d_r_stats;
}
/* initialize per-process dynamo state; this must be called before any
* threads are created and before any other API calls are made;
* returns zero on success, non-zero on failure
*/
DYNAMORIO_EXPORT int
dynamorio_app_init(void)
{
int size;
if (!dynamo_initialized /* we do enter if nullcalls is on */) {
#ifdef UNIX
os_page_size_init((const char **)our_environ, is_our_environ_followed_by_auxv());
#endif
#ifdef WINDOWS
/* MUST do this before making any system calls */
syscalls_init();
#endif
/* avoid time() for libc independence */
DODEBUG(starttime = query_time_seconds(););
#ifdef UNIX
if (getenv(DYNAMORIO_VAR_EXECVE) != NULL) {
post_execve = true;
# ifdef VMX86_SERVER
/* PR 458917: our gdt slot was not cleared on exec so we need to
* clear it now to ensure we don't leak it and eventually run out of
* slots. We could alternatively call os_tls_exit() prior to
* execve, since syscalls use thread-private fcache_enter, but
* complex to recover from execve failure, so instead we pass which
* TLS index we had.
*/
os_tls_pre_init(atoi(getenv(DYNAMORIO_VAR_EXECVE)));
# endif
/* important to remove it, don't want to propagate to forked children, etc. */
/* i#909: unsetenv is unsafe as it messes up auxv access, so we disable */
disable_env(DYNAMORIO_VAR_EXECVE);
/* check that it's gone: we've had problems with unsetenv */
ASSERT(getenv(DYNAMORIO_VAR_EXECVE) == NULL);
} else
post_execve = false;
#endif
/* default non-zero dynamo settings (options structure is
* initialized to 0 automatically)
*/
#ifdef DEBUG
# ifndef INTERNAL
nonshared_stats.logmask = LOG_ALL_RELEASE;
# else
nonshared_stats.logmask = LOG_ALL;
# endif
statistics_pre_init();
#endif
d_r_config_init();
options_init();
#ifdef WINDOWS
syscalls_init_options_read(); /* must be called after options_init
* but before init_syscall_trampolines */
#endif
utils_init();
data_section_init();
#ifdef DEBUG
/* decision: nullcalls WILL create a dynamorio.log file and
* fill it with perfctr stats!
*/
if (d_r_stats->loglevel > 0) {
main_logfile = open_log_file(main_logfile_name(), NULL, 0);
LOG(GLOBAL, LOG_TOP, 1, "global log file fd=%d\n", main_logfile);
} else {
/* loglevel 0 means we don't create a log file!
* if the loglevel is later raised, too bad! it all goes to stderr!
* N.B.: when checking for no logdir, we check for empty string or
* first char '<'!
*/
strncpy(d_r_stats->logdir, "<none (loglevel was 0 on startup)>",
MAXIMUM_PATH - 1);
d_r_stats->logdir[MAXIMUM_PATH - 1] = '\0'; /* if max no null */
main_logfile = INVALID_FILE;
}
# ifdef PAPI
/* setup hardware performance counting */
hardware_perfctr_init();
# endif
DOLOG(1, LOG_TOP, { print_version_and_app_info(GLOBAL); });
/* now exit if nullcalls, now that perfctrs are set up */
if (INTERNAL_OPTION(nullcalls)) {
print_file(main_logfile,
"** nullcalls is set, NOT taking over execution **\n\n");
return SUCCESS;
}
LOG(GLOBAL, LOG_TOP, 1, PRODUCT_NAME "'s stack size: %d Kb\n",
DYNAMORIO_STACK_SIZE / 1024);
#endif /* !DEBUG */
/* set up exported statistics struct */
#ifndef DEBUG
statistics_pre_init();
#endif
statistics_init();
#ifdef VMX86_SERVER
/* Must be before {vmm,d_r}_heap_init() */
vmk_init_lib();
#endif
/* initialize components (CAUTION: order is important here) */
vmm_heap_init(); /* must be called even if not using vmm heap */
#ifdef CLIENT_INTERFACE
/* PR 200207: load the client lib before callback_interception_init
* since the client library load would hit our own hooks (xref hotpatch
* cases about that) -- though -private_loader removes that issue.
*/
instrument_load_client_libs();
#endif
d_r_heap_init();
dynamo_heap_initialized = true;
/* The process start event should be done after d_r_os_init() but before
* process_control_int() because the former initializes event logging
* and the latter can kill the process if a violation occurs.
*/
SYSLOG(SYSLOG_INFORMATION,
IF_CLIENT_INTERFACE_ELSE(INFO_PROCESS_START_CLIENT, INFO_PROCESS_START),
IF_CLIENT_INTERFACE_ELSE(2, 3), get_application_name(),
get_application_pid() _IF_NOT_CLIENT_INTERFACE(get_application_md5()));
#ifdef PROCESS_CONTROL
if (IS_PROCESS_CONTROL_ON()) /* Case 8594. */
process_control_init();
#endif
#ifdef WINDOWS
/* Now that DR is set up, perform any final clean-up, before
* we do our address space scans.
*/
if (dr_earliest_injected)
earliest_inject_cleanup(dr_earliest_inject_args);
#endif
dynamo_vm_areas_init();
d_r_decode_init();
proc_init();
modules_init(); /* before vm_areas_init() */
d_r_os_init();
config_heap_init(); /* after heap_init */
/* Setup for handling faults in loader_init() */
/* initial stack so we don't have to use app's
* N.B.: we never de-allocate d_r_initstack (see comments in app_exit)
*/
d_r_initstack = (byte *)stack_alloc(DYNAMORIO_STACK_SIZE, NULL);
LOG(GLOBAL, LOG_SYNCH, 2, "d_r_initstack is " PFX "-" PFX "\n",
d_r_initstack - DYNAMORIO_STACK_SIZE, d_r_initstack);
#ifdef WINDOWS
/* PR203701: separate stack for error reporting when the
* dstack is exhausted
*/
exception_stack = (byte *)stack_alloc(EXCEPTION_STACK_SIZE, NULL);
#endif
#ifdef WINDOWS
if (!INTERNAL_OPTION(noasynch)) {
/* We split the hooks up: first we put in just Ki* to catch
* exceptions in client init routines (PR 200207), but we don't want
* syscall hooks so client init can scan syscalls.
* Xref PR 216934 where this was originally down below 1st thread init,
* before we had GLOBAL_DCONTEXT.
*/
callback_interception_init_start();
}
#endif /* WINDOWS */
#ifdef WINDOWS
/* loader initialization, finalize the private lib load.
* i#338: this must be before d_r_arch_init() for Windows, but Linux
* wants it later (i#2751).
*/
loader_init();
#endif
d_r_arch_init();
synch_init();
#ifdef KSTATS
kstat_init();
#endif
d_r_monitor_init();
fcache_init();
d_r_link_init();
fragment_init();
moduledb_init(); /* before vm_areas_init, after heap_init */
perscache_init(); /* before vm_areas_init */
native_exec_init(); /* before vm_areas_init, after arch_init */
if (!DYNAMO_OPTION(thin_client)) {
#ifdef HOT_PATCHING_INTERFACE
/* must init hotp before vm_areas_init() calls find_executable_vm_areas() */
if (DYNAMO_OPTION(hot_patching))
hotp_init();
#endif
}
#ifdef INTERNAL
{
char initial_options[MAX_OPTIONS_STRING];
get_dynamo_options_string(&dynamo_options, initial_options,
sizeof(initial_options), true);
SYSLOG_INTERNAL_INFO("Initial options = %s", initial_options);
DOLOG(1, LOG_TOP, {
get_pcache_dynamo_options_string(&dynamo_options, initial_options,
sizeof(initial_options),
OP_PCACHE_LOCAL);
LOG(GLOBAL, LOG_TOP, 1, "Initial pcache-affecting options = %s\n",
initial_options);
});
}
#endif /* INTERNAL */
LOG(GLOBAL, LOG_TOP, 1, "\n");
/* initialize thread hashtable */
/* Note: for thin_client, this isn't needed if it is only going to
* look for spawned processes; however, if we plan to promote from
* thin_client to hotp_only mode (highly likely), this would be needed.
* For now, leave it in there unless thin_client footprint becomes an
* issue.
*/
size = HASHTABLE_SIZE(ALL_THREADS_HASH_BITS) * sizeof(thread_record_t *);
all_threads =
(thread_record_t **)global_heap_alloc(size HEAPACCT(ACCT_THREAD_MGT));
memset(all_threads, 0, size);
if (!INTERNAL_OPTION(nop_initial_bblock) IF_WINDOWS(
|| !check_sole_thread())) /* some other thread is already here! */
bb_lock_start = true;
#ifdef SIDELINE
/* initialize sideline thread after thread table is set up */
if (dynamo_options.sideline)
sideline_init();
#endif
/* thread-specific initialization for the first thread we inject in
* (in a race with injected threads, sometimes it is not the primary thread)
*/
/* i#117/PR 395156: it'd be nice to have mc here but would
* require changing start/stop API
*/
dynamo_thread_init(NULL, NULL, NULL _IF_CLIENT_INTERFACE(false));
#ifndef WINDOWS
/* i#2751: we need TLS to be set up to relocate and call init funcs. */
loader_init();
#endif
/* We move vm_areas_init() below dynamo_thread_init() so we can have
* two things: 1) a dcontext and 2) a SIGSEGV handler, for TRY/EXCEPT
* inside vm_areas_init() for PR 361594's probes and for d_r_safe_read().
* This means vm_areas_thread_init() runs before vm_areas_init().
*/
if (!DYNAMO_OPTION(thin_client)) {
vm_areas_init();
#ifdef RCT_IND_BRANCH
/* relies on is_in_dynamo_dll() which needs vm_areas_init */
rct_init();
#endif
} else {
/* This is needed to handle exceptions in thin_client mode, mostly
* internal ones, but can be app ones too. */
dynamo_vm_areas_lock();
find_dynamo_library_vm_areas();
dynamo_vm_areas_unlock();
}
#ifdef ANNOTATIONS
annotation_init();
#endif
jitopt_init();
dr_attach_finished = create_broadcast_event();
/* New client threads rely on dr_app_started being initialized, so do
* that before initializing clients.
*/
dr_app_started = create_broadcast_event();
#ifdef CLIENT_INTERFACE
/* client last, in case it depends on other inits: must be after
* dynamo_thread_init so the client can use a dcontext (PR 216936).
* Note that we *load* the client library before installing our hooks,
* but call the client's init routine afterward so that we correctly
* report crashes (PR 200207).
* Note: DllMain in client libraries can crash and we still won't
* report; better document that client libraries shouldn't have
* DllMain.
*/
instrument_init();
/* To give clients a chance to process pcaches as we load them, we
* delay the loading until we've initialized the clients.
*/
vm_area_delay_load_coarse_units();
#endif
#ifdef WINDOWS
if (!INTERNAL_OPTION(noasynch))
callback_interception_init_finish(); /* split for PR 200207: see above */
#endif
if (SELF_PROTECT_ON_CXT_SWITCH) {
protect_info = (protect_info_t *)global_unprotected_heap_alloc(
sizeof(protect_info_t) HEAPACCT(ACCT_OTHER));
ASSIGN_INIT_LOCK_FREE(protect_info->lock, protect_info);
protect_info->num_threads_unprot = 0; /* ENTERING_DR() below will inc to 1 */
protect_info->num_threads_suspended = 0;
if (INTERNAL_OPTION(single_privileged_thread)) {
/* FIXME: thread_initexit_lock must be a recursive lock! */
ASSERT_NOT_IMPLEMENTED(false);
/* grab the lock now -- the thread that is in dynamo must be holding
* the lock, and we are the initial thread in dynamo!
*/
d_r_mutex_lock(&thread_initexit_lock);
}
/* ENTERING_DR will increment, so decrement first
* FIXME: waste of protection change since will nop-unprotect!
*/
if (TEST(SELFPROT_DATA_CXTSW, DYNAMO_OPTION(protect_mask)))
datasec_writable_cxtswprot = 0;
/* FIXME case 8073: remove once freqprot not every cxt sw */
if (TEST(SELFPROT_DATA_FREQ, DYNAMO_OPTION(protect_mask)))
datasec_writable_freqprot = 0;
}
/* this thread is now entering DR */
ENTERING_DR();
#ifdef WINDOWS
if (DYNAMO_OPTION(early_inject)) {
/* AFTER callback_interception_init and self protect init and
* ENTERING_DR() */
early_inject_init();
}
#endif
}
dynamo_initialized = true;
/* Protect .data, assuming all vars there have been initialized. */
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
/* internal-only options for testing run-once (case 3990) */
if (INTERNAL_OPTION(unsafe_crash_process)) {
SYSLOG_INTERNAL_ERROR("Crashing the process deliberately!");
*((int *)PTR_UINT_MINUS_1) = 0;
}
if (INTERNAL_OPTION(unsafe_hang_process)) {
event_t never_signaled = create_event();
SYSLOG_INTERNAL_ERROR("Hanging the process deliberately!");
wait_for_event(never_signaled, 0);
destroy_event(never_signaled);
}
return SUCCESS;
}
#ifdef UNIX
void
dynamorio_fork_init(dcontext_t *dcontext)
{
/* on a fork we want to re-initialize some data structures, especially
* log files, which we want a separate directory for
*/
thread_record_t **threads;
int i, num_threads;
# ifdef DEBUG
char parent_logdir[MAXIMUM_PATH];
# endif
/* re-cache app name, etc. that are using parent pid before we
* create log dirs (xref i#189/PR 452168)
*/
os_fork_init(dcontext);
/* sanity check, plus need to set this for statistics_init:
* even if parent did an execve, env var should be reset by now
*/
post_execve = (getenv(DYNAMORIO_VAR_EXECVE) != NULL);
ASSERT(!post_execve);
# ifdef DEBUG
/* copy d_r_stats->logdir
* d_r_stats->logdir is static, so current copy is fine, don't need
* frozen copy
*/
strncpy(parent_logdir, d_r_stats->logdir, MAXIMUM_PATH - 1);
d_r_stats->logdir[MAXIMUM_PATH - 1] = '\0'; /* if max no null */
# endif
if (get_log_dir(PROCESS_DIR, NULL, NULL)) {
/* we want brand new log dir */
enable_new_log_dir();
create_log_dir(PROCESS_DIR);
}
# ifdef DEBUG
/* just like dynamorio_app_init, create main_logfile before stats */
if (d_r_stats->loglevel > 0) {
/* we want brand new log files. os_fork_init() closed inherited files. */
main_logfile = open_log_file(main_logfile_name(), NULL, 0);
print_file(main_logfile, "%s\n", dynamorio_version_string);
print_file(main_logfile, "New log file for child %d forked by parent %d\n",
d_r_get_thread_id(), get_parent_id());
print_file(main_logfile, "Parent's log dir: %s\n", parent_logdir);
}
d_r_stats->process_id = get_process_id();
if (d_r_stats->loglevel > 0) {
/* FIXME: share these few lines of code w/ dynamorio_app_init? */
LOG(GLOBAL, LOG_TOP, 1, "Running: %s\n", d_r_stats->process_name);
# ifndef _WIN32_WCE
LOG(GLOBAL, LOG_TOP, 1, "DYNAMORIO_OPTIONS: %s\n", option_string);
# endif
}
# endif /* DEBUG */
vmm_heap_fork_init(dcontext);
/* must re-hash parent entry in threads table, plus no longer have any
* other threads (fork -> we're alone in address space), so clear
* out entire thread table, then add child
*/
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads_ex(&threads, &num_threads, true /*include execve*/);
for (i = 0; i < num_threads; i++) {
if (threads[i] == dcontext->thread_record)
remove_thread(threads[i]->id);
else
dynamo_other_thread_exit(threads[i]);
}
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(threads,
num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
add_thread(get_process_id(), d_r_get_thread_id(), true /*under dynamo control*/,
dcontext);
GLOBAL_STAT(num_threads) = 1;
# ifdef DEBUG
if (d_r_stats->loglevel > 0) {
/* need a new thread-local logfile */
dcontext->logfile = open_log_file(thread_logfile_name(), NULL, 0);
print_file(dcontext->logfile, "%s\n", dynamorio_version_string);
print_file(dcontext->logfile, "New log file for child %d forked by parent %d\n",
d_r_get_thread_id(), get_parent_id());
LOG(THREAD, LOG_TOP | LOG_THREADS, 1, "THREAD %d (dcontext " PFX ")\n\n",
d_r_get_thread_id(), dcontext);
}
# endif
num_threads = 1;
/* FIXME: maybe should have a callback list for who wants to be notified
* on a fork -- probably everyone who makes a log file on init.
*/
fragment_fork_init(dcontext);
/* this must be called after dynamo_other_thread_exit() above */
signal_fork_init(dcontext);
# ifdef CLIENT_INTERFACE
if (CLIENTS_EXIST()) {
instrument_fork_init(dcontext);
}
# endif
}
#endif /* UNIX */
#if defined(CLIENT_INTERFACE) || defined(STANDALONE_UNIT_TEST)
/* To make DynamoRIO useful as a library for a standalone client
* application (as opposed to a client library that works with
* DynamoRIO in executing a target application). This makes DynamoRIO
* useful as an IA-32 disassembly library, etc.
*/
dcontext_t *
standalone_init(void)
{
dcontext_t *dcontext;
if (dynamo_initialized)
return GLOBAL_DCONTEXT;
standalone_library = true;
/* We have release-build stats now so this is not just DEBUG */
d_r_stats = &nonshared_stats;
/* No reason to limit heap size when there's no code cache. */
IF_X64(dynamo_options.reachable_heap = false;)
dynamo_options.vm_base_near_app = false;
# if defined(INTERNAL) && defined(DEADLOCK_AVOIDANCE)
/* avoid issues w/ GLOBAL_DCONTEXT instead of thread dcontext */
dynamo_options.deadlock_avoidance = false;
# endif
# ifdef UNIX
os_page_size_init((const char **)our_environ, is_our_environ_followed_by_auxv());
# endif
# ifdef WINDOWS
/* MUST do this before making any system calls */
if (!syscalls_init())
return NULL; /* typically b/c of unsupported OS version */
# endif
d_r_config_init();
options_init();
vmm_heap_init();
d_r_heap_init();
dynamo_heap_initialized = true;
dynamo_vm_areas_init();
d_r_decode_init();
proc_init();
d_r_os_init();
config_heap_init();
# ifdef STANDALONE_UNIT_TEST
os_tls_init();
dcontext = create_new_dynamo_context(true /*initial*/, NULL, NULL);
set_thread_private_dcontext(dcontext);
/* sanity check */
ASSERT(get_thread_private_dcontext() == dcontext);
heap_thread_init(dcontext);
# ifdef DEBUG
/* FIXME: share code w/ main init routine? */
nonshared_stats.logmask = LOG_ALL;
options_init();
if (d_r_stats->loglevel > 0) {
char initial_options[MAX_OPTIONS_STRING];
main_logfile = open_log_file(main_logfile_name(), NULL, 0);
print_file(main_logfile, "%s\n", dynamorio_version_string);
print_file(main_logfile, "Log file for standalone unit test\n");
get_dynamo_options_string(&dynamo_options, initial_options,
sizeof(initial_options), true);
SYSLOG_INTERNAL_INFO("Initial options = %s", initial_options);
print_file(main_logfile, "\n");
}
# endif /* DEBUG */
# else
/* rather than ask the user to call some thread-init routine in
* every thread, we just use global dcontext everywhere (i#548)
*/
dcontext = GLOBAL_DCONTEXT;
# endif
/* since we do not export any dr_standalone_exit(), we clean up any .1config
* file right now. the only loss if that we can't synch options: but that
* should be less important for standalone. we disabling synching.
*/
/* options are never made read-only for standalone */
dynamo_options.dynamic_options = false;
dynamo_initialized = true;
return dcontext;
}
void
standalone_exit(void)
{
/* We support re-attach by setting doing_detach. */
doing_detach = true;
config_heap_exit();
os_fast_exit();
os_slow_exit();
dynamo_vm_areas_exit();
d_r_heap_exit();
vmm_heap_exit();
options_exit();
d_r_config_exit();
doing_detach = false;
standalone_library = false;
dynamo_initialized = false;
}
#endif
/* Perform exit tasks that require full thread data structs, which we have
* already cleaned up by the time we reach dynamo_shared_exit() for both
* debug and detach paths.
*/
void
dynamo_process_exit_with_thread_info(void)
{
perscache_fast_exit(); /* "fast" b/c called in release as well */
}
/* shared between app_exit and detach */
int
dynamo_shared_exit(thread_record_t *toexit /* must ==cur thread for Linux */
_IF_WINDOWS(bool detach_stacked_callbacks))
{
DEBUG_DECLARE(uint endtime);
/* set this now, could already be set */
dynamo_exited = true;
/* avoid time() for libc independence */
DODEBUG(endtime = query_time_seconds(););
LOG(GLOBAL, LOG_STATS, 1, "\n#### Statistics for entire process:\n");
LOG(GLOBAL, LOG_STATS, 1, "Total running time: %d seconds\n", endtime - starttime);
#ifdef PAPI
hardware_perfctr_exit();
#endif
#ifdef DEBUG
# if defined(INTERNAL) && defined(X86)
print_optimization_stats();
# endif /* INTERNAL && X86 */
DOLOG(1, LOG_STATS, { dump_global_stats(false); });
#endif /* DEBUG */
if (SELF_PROTECT_ON_CXT_SWITCH) {
DELETE_LOCK(protect_info->lock);
global_unprotected_heap_free(protect_info,
sizeof(protect_info_t) HEAPACCT(ACCT_OTHER));
}
/* call all component exit routines (CAUTION: order is important here) */
DELETE_RECURSIVE_LOCK(thread_in_DR_exclusion);
DOSTATS({
LOG(GLOBAL, LOG_TOP | LOG_THREADS, 1,
"fcache_stats_exit: before fragment cleanup\n");
DOLOG(1, LOG_CACHE, fcache_stats_exit(););
});
#ifdef RCT_IND_BRANCH
if (!DYNAMO_OPTION(thin_client))
rct_exit();
#endif
fragment_exit();
#ifdef ANNOTATIONS
annotation_exit();
#endif
jitopt_exit();
#ifdef CLIENT_INTERFACE
/* We tell the client as soon as possible in case it wants to use services from other
* components. Must be after fragment_exit() so that the client gets all the
* fragment_deleted() callbacks (xref PR 228156). FIXME - might be issues with the
* client trying to use api routines that depend on fragment state.
*/
instrument_exit();
# ifdef CLIENT_SIDELINE
/* We only need do a second synch-all if there are sideline client threads. */
if (d_r_get_num_threads() > 1)
synch_with_threads_at_exit(exit_synch_state(), false /*post-exit*/);
/* only current thread is alive */
dynamo_exited_all_other_threads = true;
# endif /* CLIENT_SIDELINE */
/* Some lock can only be deleted if only one thread left. */
instrument_exit_post_sideline();
#endif /* CLIENT_INTERFACE */
fragment_exit_post_sideline();
/* The dynamo_exited_and_cleaned should be set after the second synch-all.
* If it is set earlier after the first synch-all, some client thread may
* have memory leak due to dynamo_thread_exit_pre_client being skipped in
* dynamo_thread_exit_common called from exiting client threads.
*/
dynamo_exited_and_cleaned = true;
destroy_event(dr_app_started);
destroy_event(dr_attach_finished);
/* we want dcontext around for loader_exit() */
if (get_thread_private_dcontext() != NULL)
loader_thread_exit(get_thread_private_dcontext());
loader_exit();
if (toexit != NULL) {
/* free detaching thread's dcontext */
#ifdef WINDOWS
/* If we use dynamo_thread_exit() when toexit is the current thread,
* it results in asserts in the win32.tls test, so we stick with this.
*/
d_r_mutex_lock(&thread_initexit_lock);
dynamo_other_thread_exit(toexit, false);
d_r_mutex_unlock(&thread_initexit_lock);
#else
/* On Linux, restoring segment registers can only be done
* on the current thread, which must be toexit.
*/
ASSERT(toexit->id == d_r_get_thread_id());
dynamo_thread_exit();
#endif
}
if (IF_WINDOWS_ELSE(!detach_stacked_callbacks, true)) {
/* We don't fully free cur thread until after client exit event (PR 536058) */
if (thread_lookup(d_r_get_thread_id()) == NULL) {
LOG(GLOBAL, LOG_TOP | LOG_THREADS, 1,
"Current thread never under DynamoRIO control, not exiting it\n");
} else {
/* call thread_exit even if !under_dynamo_control, could have
* been at one time
*/
/* exit this thread now */
dynamo_thread_exit();
}
}
/* now that the final thread is exited, free the all_threads memory */
d_r_mutex_lock(&all_threads_lock);
global_heap_free(all_threads,
HASHTABLE_SIZE(ALL_THREADS_HASH_BITS) *
sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
all_threads = NULL;
d_r_mutex_unlock(&all_threads_lock);
#ifdef WINDOWS
# ifdef CLIENT_INTERFACE
/* for -private_loader we do this here to catch more exit-time crashes */
if (!INTERNAL_OPTION(noasynch) && INTERNAL_OPTION(private_loader) && !doing_detach)
callback_interception_unintercept();
# endif
/* callback_interception_exit must be after fragment exit for CLIENT_INTERFACE so
* that fragment_exit->frees fragments->instrument_fragment_deleted->
* hide_tag_from_fragment->is_intercepted_app_pc won't crash. xref PR 228156 */
if (!INTERNAL_OPTION(noasynch)) {
callback_interception_exit();
}
#endif
d_r_link_exit();
fcache_exit();
d_r_monitor_exit();
synch_exit();
d_r_arch_exit(IF_WINDOWS(detach_stacked_callbacks));
#ifdef CALL_PROFILE
/* above os_exit to avoid eventlog_mutex trigger if we're the first to
* create a log file
*/
profile_callers_exit();
#endif
os_fast_exit();
os_slow_exit();
native_exec_exit(); /* before vm_areas_exit for using dynamo_areas */
vm_areas_exit();
perscache_slow_exit(); /* fast called in dynamo_process_exit_with_thread_info() */
modules_exit(); /* after aslr_exit() from os_slow_exit(),
* after vm_areas & perscache exits */
moduledb_exit(); /* before heap_exit */
#ifdef HOT_PATCHING_INTERFACE
if (DYNAMO_OPTION(hot_patching))
hotp_exit();
#endif
#ifdef WINDOWS
/* Free exception stack before calling heap_exit */
stack_free(exception_stack, EXCEPTION_STACK_SIZE);
exception_stack = NULL;
#endif
config_heap_exit();
d_r_heap_exit();
vmm_heap_exit();
diagnost_exit();
data_section_exit();
/* funny dependences: options exit just frees lock, not destroying
* any options that are needed for other exits, so do it prior to
* checking locks in debug build
*/
options_exit();
utils_exit();
d_r_config_exit();
#ifdef KSTATS
kstat_exit();
#endif
DELETE_LOCK(all_threads_lock);
DELETE_LOCK(thread_initexit_lock);
DOLOG(1, LOG_STATS, {
/* dump after cleaning up to make it easy to check if stats that
* are inc-ed and dec-ed actually come down to 0
*/
dump_global_stats(false);
});
if (INTERNAL_OPTION(rstats_to_stderr))
dump_global_rstats_to_stderr();
statistics_exit();
#ifdef DEBUG
# ifdef DEADLOCK_AVOIDANCE
ASSERT(locks_not_closed() == 0);
# endif
dynamo_exited_log_and_stats = true;
if (main_logfile != STDERR) {
/* do it this way just in case someone tries to log to the global file
* right now */
file_t file_temp = main_logfile;
main_logfile = INVALID_FILE;
close_log_file(file_temp);
}
#else
# ifdef DEADLOCK_AVOIDANCE
ASSERT(locks_not_closed() == 0);
# endif
#endif /* DEBUG */
dynamo_initialized = false;
dynamo_started = false;
return SUCCESS;
}
/* NOINLINE because dynamorio_app_exit is a stopping point. */
NOINLINE int
dynamorio_app_exit(void)
{
return dynamo_process_exit();
}
/* synchs with all threads using synch type synch_res.
* also sets dynamo_exited to true.
* does not resume the threads but does release the thread_initexit_lock.
*/
static void
synch_with_threads_at_exit(thread_synch_state_t synch_res, bool pre_exit)
{
int num_threads;
thread_record_t **threads;
DEBUG_DECLARE(bool ok;)
/* If we fail to suspend a thread (e.g., privilege
* problems) ignore it. XXX: retry instead?
*/
uint flags = THREAD_SYNCH_SUSPEND_FAILURE_IGNORE;
if (pre_exit) {
/* i#297: we only synch client threads after process exit event. */
flags |= THREAD_SYNCH_SKIP_CLIENT_THREAD;
}
LOG(GLOBAL, LOG_TOP | LOG_THREADS, 1,
"\nsynch_with_threads_at_exit: cleaning up %d un-terminated threads\n",
d_r_get_num_threads());
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
/* make sure client nudges are finished */
wait_for_outstanding_nudges();
#endif
/* xref case 8747, requesting suspended is preferable to terminated and it
* doesn't make a difference here which we use (since the process is about
* to die).
* On Linux, however, we do not have dependencies on OS thread
* properties like we do on Windows (TEB, etc.), and our suspended
* threads use their sigstacks and ostd data structs, making cleanup
* while still catching other leaks more difficult: thus it's
* simpler to terminate and then clean up. FIXME: by terminating
* we'll raise SIGCHLD that may not have been raised natively if the
* whole group went down in a single SYS_exit_group. Instead we
* could have the suspended thread move from the sigstack-reliant
* loop to a stack-free loop (xref i#95).
*/
IF_UNIX(dynamo_exiting = true;) /* include execve-exited vfork threads */
DEBUG_DECLARE(ok =)
synch_with_all_threads(synch_res, &threads, &num_threads,
/* Case 6821: other synch-all-thread uses that
* only care about threads carrying fcache
* state can ignore us
*/
THREAD_SYNCH_NO_LOCKS_NO_XFER, flags);
ASSERT(ok);
ASSERT(threads == NULL && num_threads == 0); /* We asked for CLEANED */
/* the synch_with_all_threads function grabbed the
* thread_initexit_lock for us! */
/* do this now after all threads we know about are killed and
* while we hold the thread_initexit_lock so any new threads that
* are waiting on it won't get in our way (see thread_init()) */
dynamo_exited = true;
end_synch_with_all_threads(threads, num_threads, false /*don't resume*/);
}
static thread_synch_state_t
exit_synch_state(void)
{
thread_synch_state_t synch_res = IF_WINDOWS_ELSE(THREAD_SYNCH_SUSPENDED_AND_CLEANED,
THREAD_SYNCH_TERMINATED_AND_CLEANED);
#if defined(DR_APP_EXPORTS) && defined(UNIX)
if (dr_api_exit) {
/* Don't terminate the app's threads in case the app plans to continue
* after dr_app_cleanup(). Note that today we don't fully support that
* anyway: the app should use dr_app_stop_and_cleanup() whose detach
* code won't come here.
*/
synch_res = THREAD_SYNCH_SUSPENDED_AND_CLEANED;
}
#endif
return synch_res;
}
#ifdef DEBUG
/* cleanup after the application has exited */
static int
dynamo_process_exit_cleanup(void)
{
/* CAUTION: this should only be invoked after all app threads have stopped */
if (!dynamo_exited && !INTERNAL_OPTION(nullcalls)) {
dcontext_t *dcontext;
APP_EXPORT_ASSERT(dynamo_initialized, "Improper DynamoRIO initialization");
dcontext = get_thread_private_dcontext();
/* we deliberately do NOT clean up d_r_initstack (which was
* allocated using a separate mmap and so is not part of some
* large unit that is de-allocated), as it is used in special
* circumstances to call us...FIXME: is this memory leak ok?
* is there a better solution besides assuming the app stack?
*/
# ifdef SIDELINE
if (dynamo_options.sideline) {
/* exit now to make thread cleanup simpler */
sideline_exit();
}
# endif
/* perform exit tasks that require full thread data structs */
dynamo_process_exit_with_thread_info();
if (INTERNAL_OPTION(single_privileged_thread)) {
d_r_mutex_unlock(&thread_initexit_lock);
}
/* if ExitProcess called before all threads terminated, they won't
* all have gone through dynamo_thread_exit, so clean them up now
* so we can get stats about them
*
* we don't check control_all_threads b/c we're just killing
* the threads we know about here
*/
synch_with_threads_at_exit(exit_synch_state(), true /*pre-exit*/);
# ifndef CLIENT_SIDELINE
/* no sideline thread, synchall done */
dynamo_exited_all_other_threads = true;
# endif
/* now that APC interception point is unpatched and
* dynamorio_exited is set and we've killed all the theads we know
* about, assumption is that no other threads will be running in
* dynamorio code from here on out (esp. when we get into shared exit)
* that will do anything that could be dangerous (could possibly be
* a thread in the APC interception code prior to reaching thread_init
* but it will only global log and do thread_lookup which should be
* safe throughout) */
/* In order to pass the client a dcontext in the process exit event
* we do some thread cleanup early for the final thread so we can delay
* the rest (PR 536058). This is a little risky in that we
* clean up dcontext->fragment_field, which is used for lots of
* things like couldbelinking (and thus we have to disable some API
* routines in the thread exit event: i#1989).
*/
dynamo_thread_exit_pre_client(get_thread_private_dcontext(), d_r_get_thread_id());
# ifdef WINDOWS
/* FIXME : our call un-interception isn't atomic so (miniscule) chance
* of something going wrong if new thread is just hitting its init APC
*/
/* w/ the app's loader we must remove our LdrUnloadDll hook
* before we unload the client lib (and thus we miss client
* exit crashes): xref PR 200207.
*/
if (!INTERNAL_OPTION(noasynch)
IF_CLIENT_INTERFACE(&&!INTERNAL_OPTION(private_loader))) {
callback_interception_unintercept();
}
# else /* UNIX */
unhook_vsyscall();
# endif /* UNIX */
return dynamo_shared_exit(NULL /* not detaching */
_IF_WINDOWS(false /* not detaching */));
}
return SUCCESS;
}
#endif /* DEBUG */
int
dynamo_nullcalls_exit(void)
{
/* this routine is used when nullcalls is turned on
* simply to get perfctr numbers in a log file
*/
ASSERT(INTERNAL_OPTION(nullcalls));
#ifdef PAPI
hardware_perfctr_exit();
#endif
#ifdef DEBUG
if (main_logfile != STDERR) {
close_log_file(main_logfile);
main_logfile = INVALID_FILE;
}
#endif /* DEBUG */
dynamo_exited = true;
return SUCCESS;
}
/* called when we see that the process is about to exit */
int
dynamo_process_exit(void)
{
#ifndef DEBUG
bool each_thread;
#endif
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
synchronize_dynamic_options();
SYSLOG(SYSLOG_INFORMATION, INFO_PROCESS_STOP, 2, get_application_name(),
get_application_pid());
#ifdef DEBUG
if (!dynamo_exited) {
if (INTERNAL_OPTION(nullcalls)) {
/* if nullcalls is on we still do perfctr stats, and this is
* the only place we can print them out and exit
*/
dynamo_nullcalls_exit();
} else {
/* we don't check automatic_startup -- even if the app_
* interface is used, we are about to be gone from the process
* address space, so we clean up now
*/
LOG(GLOBAL, LOG_TOP, 1,
"\ndynamo_process_exit from thread " TIDFMT " -- cleaning up dynamo\n",
d_r_get_thread_id());
dynamo_process_exit_cleanup();
}
}
return SUCCESS;
#else
if (dynamo_exited)
return SUCCESS;
/* don't need to do much!
* we didn't create any IPC objects or anything that might be persistent
* beyond our death, we're not holding any systemwide locks, etc.
*/
/* It is not clear whether the Event Log service handles unterminated connections */
/* Do we need profile data for each thread?
* Note that windows prof_pcs duplicates the thread walk in d_r_os_exit()
* FIXME: should combine that thread walk with this one
*/
each_thread = TRACEDUMP_ENABLED();
# ifdef UNIX
each_thread = each_thread || INTERNAL_OPTION(profile_pcs);
# endif
# ifdef KSTATS
each_thread = each_thread || DYNAMO_OPTION(kstats);
# endif
# ifdef CLIENT_INTERFACE
each_thread = each_thread ||
/* If we don't need a thread exit event, avoid the possibility of
* racy crashes (PR 470957) by not calling instrument_thread_exit()
*/
(!INTERNAL_OPTION(nullcalls) && dr_thread_exit_hook_exists() &&
!DYNAMO_OPTION(skip_thread_exit_at_exit));
# endif
if (DYNAMO_OPTION(synch_at_exit)
/* by default we synch if any exit event exists */
IF_CLIENT_INTERFACE(
|| (!DYNAMO_OPTION(multi_thread_exit) && dr_exit_hook_exists()) ||
(!DYNAMO_OPTION(skip_thread_exit_at_exit) && dr_thread_exit_hook_exists()))) {
/* needed primarily for CLIENT_INTERFACE but technically all configurations
* can have racy crashes at exit time (xref PR 470957)
*/
synch_with_threads_at_exit(exit_synch_state(), true /*pre-exit*/);
# ifndef CLIENT_SIDELINE
dynamo_exited_all_other_threads = true;
# endif
} else
dynamo_exited = true;
if (each_thread) {
thread_record_t **threads;
int num, i;
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&threads, &num);
for (i = 0; i < num; i++) {
# ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(threads[i]->dcontext))
continue;
# endif
/* FIXME: separate trace dump from rest of fragment cleanup code */
if (TRACEDUMP_ENABLED() IF_CLIENT_INTERFACE(|| true)) {
/* We always want to call this for CI builds so we can get the
* dr_fragment_deleted() callbacks.
*/
fragment_thread_exit(threads[i]->dcontext);
}
# ifdef UNIX
if (INTERNAL_OPTION(profile_pcs))
pcprofile_thread_exit(threads[i]->dcontext);
# endif
# ifdef KSTATS
if (DYNAMO_OPTION(kstats))
kstat_thread_exit(threads[i]->dcontext);
# endif
# ifdef CLIENT_INTERFACE
/* Inform client of all thread exits */
if (!INTERNAL_OPTION(nullcalls) && !DYNAMO_OPTION(skip_thread_exit_at_exit)) {
instrument_thread_exit_event(threads[i]->dcontext);
/* i#1617: ensure we do all cleanup of priv libs */
if (threads[i]->id != d_r_get_thread_id()) /* i#1617: must delay this */
loader_thread_exit(threads[i]->dcontext);
}
# endif
}
global_heap_free(threads,
num * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
d_r_mutex_unlock(&thread_initexit_lock);
}
/* PR 522783: must be before we clear dcontext (if CLIENT_INTERFACE)! */
/* must also be prior to fragment_exit so we actually freeze pcaches (i#703) */
dynamo_process_exit_with_thread_info();
/* FIXME: separate trace dump from rest of fragment cleanup code. For client
* interface we need to call fragment_exit to get all the fragment deleted events. */
if (TRACEDUMP_ENABLED() IF_CLIENT_INTERFACE(|| dr_fragment_deleted_hook_exists()))
fragment_exit();
/* Inform client of process exit */
# ifdef CLIENT_INTERFACE
if (!INTERNAL_OPTION(nullcalls)) {
# ifdef WINDOWS
/* instrument_exit() unloads the client library, so make sure
* LdrUnloadDll isn't hooked if using the app loader.
*/
if (!INTERNAL_OPTION(noasynch)
IF_CLIENT_INTERFACE(&&!INTERNAL_OPTION(private_loader))) {
callback_interception_unintercept();
}
# endif
# ifdef UNIX
/* i#2976: unhook prior to client exit if modules are being watched */
if (dr_modload_hook_exists())
unhook_vsyscall();
# endif
/* Must be after fragment_exit() so that the client gets all the
* fragment_deleted() callbacks (xref PR 228156). FIXME - might be issues
* with the client trying to use api routines that depend on fragment state.
*/
instrument_exit();
# ifdef CLIENT_SIDELINE
/* We only need do a second synch-all if there are sideline client threads. */
if (d_r_get_num_threads() > 1)
synch_with_threads_at_exit(exit_synch_state(), false /*post-exit*/);
dynamo_exited_all_other_threads = true;
# endif
/* Some lock can only be deleted if one thread left. */
instrument_exit_post_sideline();
/* i#1617: We need to call client library fini routines for global
* destructors, etc.
*/
if (!INTERNAL_OPTION(nullcalls) && !DYNAMO_OPTION(skip_thread_exit_at_exit))
loader_thread_exit(get_thread_private_dcontext());
loader_exit();
/* for -private_loader we do this here to catch more exit-time crashes */
# ifdef WINDOWS
if (!INTERNAL_OPTION(noasynch)
IF_CLIENT_INTERFACE(&&INTERNAL_OPTION(private_loader)))
callback_interception_unintercept();
# endif
}
# endif /* CLIENT_INTERFACE */
fragment_exit_post_sideline();
# ifdef CALL_PROFILE
profile_callers_exit();
# endif
# ifdef KSTATS
if (DYNAMO_OPTION(kstats))
kstat_exit();
# endif
/* so make sure eventlog connection is terminated (if present) */
os_fast_exit();
if (INTERNAL_OPTION(rstats_to_stderr))
dump_global_rstats_to_stderr();
return SUCCESS;
#endif /* !DEBUG */
}
void
dynamo_exit_post_detach(void)
{
/* i#2157: best-effort re-init in case of re-attach */
do_once_generation++; /* Increment the generation in case we re-attach */
dynamo_initialized = false;
dynamo_heap_initialized = false;
automatic_startup = false;
control_all_threads = false;
dr_api_entry = false;
dr_api_exit = false;
#ifdef UNIX
dynamo_exiting = false;
#endif
dynamo_exited = false;
dynamo_exited_all_other_threads = false;
dynamo_exited_and_cleaned = false;
#ifdef DEBUG
dynamo_exited_log_and_stats = false;
#endif
dynamo_resetting = false;
#ifdef UNIX
post_execve = false;
#endif
vm_areas_post_exit();
heap_post_exit();
}
dcontext_t *
create_new_dynamo_context(bool initial, byte *dstack_in, priv_mcontext_t *mc)
{
dcontext_t *dcontext;
size_t alloc = sizeof(dcontext_t) + proc_get_cache_line_size();
void *alloc_start =
(void *)((TEST(SELFPROT_GLOBAL, dynamo_options.protect_mask) &&
!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
?
/* if protecting global but not dcontext, put whole thing in unprot
mem */
global_unprotected_heap_alloc(alloc HEAPACCT(ACCT_OTHER))
: global_heap_alloc(alloc HEAPACCT(ACCT_OTHER)));
dcontext = (dcontext_t *)proc_bump_to_end_of_cache_line((ptr_uint_t)alloc_start);
ASSERT(proc_is_cache_aligned(dcontext));
#ifdef X86
/* 264138: ensure xmm/ymm slots are aligned so we can use vmovdqa */
ASSERT(ALIGNED(get_mcontext(dcontext)->simd, ZMM_REG_SIZE));
/* also ensure we don't have extra padding beyond x86.asm defines */
ASSERT(sizeof(priv_mcontext_t) ==
IF_X64_ELSE(18, 10) * sizeof(reg_t) + PRE_XMM_PADDING +
MCXT_TOTAL_SIMD_SLOTS_SIZE + MCXT_TOTAL_OPMASK_SLOTS_SIZE);
#elif defined(ARM)
/* FIXME i#1551: add arm alignment check if any */
#endif /* X86/ARM */
/* Put here all one-time dcontext field initialization
* Make sure to update create_callback_dcontext to shared
* fields across callback dcontexts for the same thread.
*/
/* must set to 0 so can tell if initialized for callbacks! */
memset(dcontext, 0x0, sizeof(dcontext_t));
dcontext->allocated_start = alloc_start;
/* we share a single dstack across all callbacks */
if (initial) {
/* DrMi#1723: our dstack needs to be at a higher address than the app
* stack. If mc passed, use its xsp; else use cur xsp (initial thread
* is on the app stack here: xref i#1105), for lower bound for dstack.
*/
byte *app_xsp;
if (mc == NULL)
GET_STACK_PTR(app_xsp);
else
app_xsp = (byte *)mc->xsp;
if (dstack_in == NULL) {
dcontext->dstack = (byte *)stack_alloc(DYNAMORIO_STACK_SIZE, app_xsp);
} else
dcontext->dstack = dstack_in; /* xref i#149/PR 403015 */
#ifdef WINDOWS
DOCHECK(1, {
if (dcontext->dstack < app_xsp)
SYSLOG_INTERNAL_WARNING_ONCE("dstack is below app xsp");
});
#endif
} else {
/* dstack may be pre-allocated only at thread init, not at callback */
ASSERT(dstack_in == NULL);
}
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
dcontext->upcontext.separate_upcontext = global_unprotected_heap_alloc(
sizeof(unprotected_context_t) HEAPACCT(ACCT_OTHER));
/* don't need to initialize upcontext */
LOG(GLOBAL, LOG_TOP, 2, "new dcontext=" PFX ", dcontext->upcontext=" PFX "\n",
dcontext, dcontext->upcontext.separate_upcontext);
dcontext->upcontext_ptr = dcontext->upcontext.separate_upcontext;
} else
dcontext->upcontext_ptr = &(dcontext->upcontext.upcontext);
#ifdef HOT_PATCHING_INTERFACE
/* Set the hot patch exception state to be empty/unused. */
DODEBUG(memset(&dcontext->hotp_excpt_state, -1, sizeof(dr_jmp_buf_t)););
#endif
ASSERT(dcontext->try_except.try_except_state == NULL);
DODEBUG({ dcontext->logfile = INVALID_FILE; });
dcontext->owning_thread = d_r_get_thread_id();
#ifdef UNIX
dcontext->owning_process = get_process_id();
#endif
/* thread_record is set in add_thread */
/* all of the thread-private fcache and hashtable fields are shared
* among all dcontext instances of a thread, so the caller must
* set those fields
*/
/* rest of dcontext initialization happens in initialize_dynamo_context(),
* which is executed for each dr_app_start() and each
* callback start
*/
return dcontext;
}
static void
delete_dynamo_context(dcontext_t *dcontext, bool free_stack)
{
if (free_stack) {
ASSERT(dcontext->dstack != NULL);
ASSERT(!is_currently_on_dstack(dcontext));
LOG(GLOBAL, LOG_THREADS, 1, "Freeing DR stack " PFX "\n", dcontext->dstack);
stack_free(dcontext->dstack, DYNAMORIO_STACK_SIZE);
} /* else will be cleaned up by caller */
ASSERT(dcontext->try_except.try_except_state == NULL);
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
global_unprotected_heap_free(dcontext->upcontext.separate_upcontext,
sizeof(unprotected_context_t) HEAPACCT(ACCT_OTHER));
}
if (TEST(SELFPROT_GLOBAL, dynamo_options.protect_mask) &&
!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
/* if protecting global but not dcontext, we put whole thing in unprot mem */
global_unprotected_heap_free(dcontext->allocated_start,
sizeof(dcontext_t) +
proc_get_cache_line_size() HEAPACCT(ACCT_OTHER));
} else {
global_heap_free(dcontext->allocated_start,
sizeof(dcontext_t) +
proc_get_cache_line_size() HEAPACCT(ACCT_OTHER));
}
}
/* This routine is called not only at thread initialization,
* but for every callback, etc. that gets a fresh execution
* environment!
*/
void
initialize_dynamo_context(dcontext_t *dcontext)
{
/* we can't just zero out the whole thing b/c we have persistent state
* (fields kept across callbacks, like dstack, module-private fields, next &
* prev, etc.)
*/
memset(dcontext->upcontext_ptr, 0, sizeof(unprotected_context_t));
dcontext->initialized = true;
dcontext->whereami = DR_WHERE_APP;
dcontext->next_tag = NULL;
dcontext->native_exec_postsyscall = NULL;
memset(dcontext->native_retstack, 0, sizeof(dcontext->native_retstack));
dcontext->native_retstack_cur = 0;
dcontext->isa_mode = DEFAULT_ISA_MODE;
#ifdef ARM
dcontext->encode_state[0] = 0;
dcontext->encode_state[1] = 0;
dcontext->decode_state[0] = 0;
dcontext->decode_state[1] = 0;
#endif
dcontext->sys_num = 0;
#ifdef WINDOWS
# ifdef CLIENT_INTERFACE
dcontext->app_errno = 0;
# ifdef DEBUG
dcontext->is_client_thread_exiting = false;
# endif
# endif
dcontext->sys_param_base = NULL;
/* always initialize aslr_context */
dcontext->aslr_context.sys_aslr_clobbered = 0;
dcontext->aslr_context.randomized_section_handle = INVALID_HANDLE_VALUE;
dcontext->aslr_context.original_image_section_handle = INVALID_HANDLE_VALUE;
dcontext->aslr_context.original_section_base = ASLR_INVALID_SECTION_BASE;
# ifdef DEBUG
dcontext->aslr_context.last_app_section_handle = INVALID_HANDLE_VALUE;
# endif
/* note that aslr_context.last_child_padded is preserved across callbacks */
dcontext->ignore_enterexit = false;
#else
dcontext->sys_param0 = 0;
dcontext->sys_param1 = 0;
dcontext->sys_param2 = 0;
#endif
#ifdef UNIX
dcontext->signals_pending = false;
#endif
/* all thread-private fields are initialized in dynamo_thread_init
* or in create_callback_dcontext because they must be initialized differently
* in those two cases
*/
set_last_exit(dcontext, (linkstub_t *)get_starting_linkstub());
#ifdef PROFILE_RDTSC
dcontext->start_time = (uint64)0;
dcontext->prev_fragment = NULL;
dcontext->cache_frag_count = (uint64)0;
{
int i;
for (i = 0; i < 10; i++) {
dcontext->cache_time[i] = (uint64)0;
dcontext->cache_count[i] = (uint64)0;
}
}
#endif
#ifdef DEBUG
dcontext->in_opnd_disassemble = false;
#endif
#ifdef WINDOWS
/* Other pieces of DR -- callback & APC handling, detach -- test
* asynch_target to determine where the next app pc to execute is
* stored. Init it to 0 to indicate that this context's most recent
* syscall was not executed from handle_system_call().
*/
dcontext->asynch_target = NULL;
/* next_saved and prev_unused are zeroed out when dcontext is
* created; we shouldn't zero them here, they may have valid data
*/
dcontext->valid = true;
#endif
#ifdef HOT_PATCHING_INTERFACE
dcontext->nudge_thread = false; /* Fix for case 5367. */
#endif
#ifdef CHECK_RETURNS_SSE2
/* initialize sse2 index with 0
* go ahead and use eax, it's dead (about to return)
*/
# ifdef UNIX
asm("movl $0, %eax");
asm("pinsrw $7,%eax,%xmm7");
# else
# error NYI
# endif
#endif
/* We don't need to initialize dcontext->coarse_exit as it is only
* read when last_exit indicates a coarse exit, which sets the fields.
*/
dcontext->go_native = false;
}
#ifdef WINDOWS
/* on windows we use a new dcontext for each callback context */
dcontext_t *
create_callback_dcontext(dcontext_t *old_dcontext)
{
dcontext_t *new_dcontext = create_new_dynamo_context(false, NULL, NULL);
new_dcontext->valid = false;
/* all of these fields are shared among all dcontexts of a thread: */
new_dcontext->owning_thread = old_dcontext->owning_thread;
# ifdef UNIX
new_dcontext->owning_process = old_dcontext->owning_process;
# endif
new_dcontext->thread_record = old_dcontext->thread_record;
/* now that we have clean stack usage we can share a single stack */
ASSERT(old_dcontext->dstack != NULL);
new_dcontext->dstack = old_dcontext->dstack;
new_dcontext->isa_mode = old_dcontext->isa_mode;
new_dcontext->link_field = old_dcontext->link_field;
new_dcontext->monitor_field = old_dcontext->monitor_field;
new_dcontext->fcache_field = old_dcontext->fcache_field;
new_dcontext->fragment_field = old_dcontext->fragment_field;
new_dcontext->heap_field = old_dcontext->heap_field;
new_dcontext->vm_areas_field = old_dcontext->vm_areas_field;
new_dcontext->os_field = old_dcontext->os_field;
new_dcontext->synch_field = old_dcontext->synch_field;
/* case 8958: copy win32_start_addr in case we produce a forensics file
* from within a callback.
*/
new_dcontext->win32_start_addr = old_dcontext->win32_start_addr;
# ifdef CLIENT_INTERFACE
/* FlsData is persistent across callbacks */
new_dcontext->app_fls_data = old_dcontext->app_fls_data;
new_dcontext->priv_fls_data = old_dcontext->priv_fls_data;
new_dcontext->app_nt_rpc = old_dcontext->app_nt_rpc;
new_dcontext->priv_nt_rpc = old_dcontext->priv_nt_rpc;
new_dcontext->app_nls_cache = old_dcontext->app_nls_cache;
new_dcontext->priv_nls_cache = old_dcontext->priv_nls_cache;
# endif
new_dcontext->app_stack_limit = old_dcontext->app_stack_limit;
new_dcontext->app_stack_base = old_dcontext->app_stack_base;
new_dcontext->teb_base = old_dcontext->teb_base;
# ifdef UNIX
new_dcontext->signal_field = old_dcontext->signal_field;
new_dcontext->pcprofile_field = old_dcontext->pcprofile_field;
# endif
new_dcontext->private_code = old_dcontext->private_code;
# ifdef CLIENT_INTERFACE
new_dcontext->client_data = old_dcontext->client_data;
# endif
# ifdef DEBUG
new_dcontext->logfile = old_dcontext->logfile;
new_dcontext->thread_stats = old_dcontext->thread_stats;
# endif
# ifdef DEADLOCK_AVOIDANCE
new_dcontext->thread_owned_locks = old_dcontext->thread_owned_locks;
# endif
# ifdef KSTATS
new_dcontext->thread_kstats = old_dcontext->thread_kstats;
# endif
/* at_syscall is real time based, not app context based, so shared
*
* FIXME: Yes need to share when swapping at NtCallbackReturn, but
* want to keep old so when return from cb will do post-syscall for
* syscall that triggered cb in the first place!
* Plus, new cb calls initialize_dynamo_context(), which clears this field
* anyway! This all works now b/c we don't have alertable syscalls
* that we do post-syscall processing on.
*/
new_dcontext->upcontext_ptr->at_syscall = old_dcontext->upcontext_ptr->at_syscall;
# ifdef HOT_PATCHING_INTERFACE /* Fix for case 5367. */
/* hotp_excpt_state should be unused at this point. If it is used, it can
* be only because a hot patch made a system call with a callback. This is
* a bug because hot patches can't do system calls, let alone one with
* callbacks.
*/
DOCHECK(1, {
dr_jmp_buf_t empty;
memset(&empty, -1, sizeof(dr_jmp_buf_t));
ASSERT(memcmp(&old_dcontext->hotp_excpt_state, &empty, sizeof(dr_jmp_buf_t)) ==
0);
});
new_dcontext->nudge_thread = old_dcontext->nudge_thread;
# endif
/* our exceptions should be handled within one DR context switch */
ASSERT(old_dcontext->try_except.try_except_state == NULL);
new_dcontext->local_state = old_dcontext->local_state;
# ifdef WINDOWS
new_dcontext->aslr_context.last_child_padded =
old_dcontext->aslr_context.last_child_padded;
# endif
LOG(new_dcontext->logfile, LOG_TOP, 2, "made new dcontext " PFX " (old=" PFX ")\n",
new_dcontext, old_dcontext);
return new_dcontext;
}
#endif
bool
is_thread_initialized(void)
{
#if defined(UNIX) && defined(HAVE_TLS)
/* We don't want to pay the d_r_get_thread_id() cost on every
* get_thread_private_dcontext() when we only really need the
* check for this call here, so we explicitly check.
*/
if (get_tls_thread_id() != get_sys_thread_id())
return false;
#endif
return (get_thread_private_dcontext() != NULL);
}
bool
is_thread_known(thread_id_t tid)
{
return (thread_lookup(tid) != NULL);
}
#ifdef UNIX
/* i#237/PR 498284: a thread about to execute SYS_execve should be considered
* exited, but we can't easily clean up it for real immediately
*/
void
mark_thread_execve(thread_record_t *tr, bool execve)
{
ASSERT((execve && !tr->execve) || (!execve && tr->execve));
tr->execve = execve;
d_r_mutex_lock(&all_threads_lock);
if (execve) {
/* since we free on a second vfork we should never accumulate
* more than one
*/
ASSERT(num_execve_threads == 0);
num_execve_threads++;
} else {
ASSERT(num_execve_threads > 0);
num_execve_threads--;
}
d_r_mutex_unlock(&all_threads_lock);
}
#endif /* UNIX */
int
d_r_get_num_threads(void)
{
return num_known_threads IF_UNIX(-num_execve_threads);
}
bool
is_last_app_thread(void)
{
return (d_r_get_num_threads() == IF_CLIENT_INTERFACE(get_num_client_threads() +) 1);
}
/* This routine takes a snapshot of all the threads known to DR,
* NOT LIMITED to those currently under DR control!
* It returns an array of thread_record_t* and the length of the array
* The caller must free the array using global_heap_free
* The caller must hold the thread_initexit_lock to ensure that threads
* are not created or destroyed before the caller is done with the list
* The caller CANNOT be could_be_linking, else a deadlock with flushing
* can occur (unless the caller is the one flushing)
*/
static void
get_list_of_threads_common(thread_record_t ***list,
int *num _IF_UNIX(bool include_execve))
{
int i, cur = 0, max_num;
thread_record_t *tr;
thread_record_t **mylist;
/* Only a flushing thread can get the thread snapshot while being
* couldbelinking -- else a deadlock w/ flush!
* FIXME: this assert should be on any acquisition of thread_initexit_lock!
*/
ASSERT(is_self_flushing() || !is_self_couldbelinking());
ASSERT(all_threads != NULL);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
d_r_mutex_lock(&all_threads_lock);
/* Do not include vfork threads that exited via execve, unless we're exiting */
max_num = IF_UNIX_ELSE((include_execve || dynamo_exiting) ? num_known_threads
: d_r_get_num_threads(),
d_r_get_num_threads());
mylist = (thread_record_t **)global_heap_alloc(
max_num * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) {
for (tr = all_threads[i]; tr != NULL; tr = tr->next) {
/* include those for which !tr->under_dynamo_control */
/* don't include those that exited for execve. there should be
* no race b/c vfork suspends the parent. xref i#237/PR 498284.
*/
if (IF_UNIX_ELSE(!tr->execve || include_execve || dynamo_exiting, true)) {
mylist[cur] = tr;
cur++;
}
}
}
ASSERT(cur > 0);
IF_WINDOWS(ASSERT(cur == max_num));
if (cur < max_num) {
mylist = (thread_record_t **)global_heap_realloc(
mylist, max_num, cur, sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
*num = cur;
*list = mylist;
d_r_mutex_unlock(&all_threads_lock);
}
void
get_list_of_threads(thread_record_t ***list, int *num)
{
get_list_of_threads_common(list, num _IF_UNIX(false));
}
#ifdef UNIX
void
get_list_of_threads_ex(thread_record_t ***list, int *num, bool include_execve)
{
get_list_of_threads_common(list, num, include_execve);
}
#endif
/* assumes caller can ensure that thread is either suspended or self to
* avoid races
*/
thread_record_t *
thread_lookup(thread_id_t tid)
{
thread_record_t *tr;
uint hindex;
/* check that caller is self or has initexit_lock
* FIXME: no way to tell who has initexit_lock
*/
ASSERT(mutex_testlock(&thread_initexit_lock) || tid == d_r_get_thread_id());
hindex = HASH_FUNC_BITS(tid, ALL_THREADS_HASH_BITS);
d_r_mutex_lock(&all_threads_lock);
if (all_threads == NULL) {
tr = NULL;
} else {
tr = all_threads[hindex];
}
while (tr != NULL) {
if (tr->id == tid) {
d_r_mutex_unlock(&all_threads_lock);
return tr;
}
tr = tr->next;
}
d_r_mutex_unlock(&all_threads_lock);
return NULL;
}
/* assumes caller can ensure that thread is either suspended or self to
* avoid races
*/
uint
get_thread_num(thread_id_t tid)
{
thread_record_t *tr = thread_lookup(tid);
if (tr != NULL)
return tr->num;
else
return 0; /* yes can't distinguish from 1st thread, who cares */
}
void
add_thread(IF_WINDOWS_ELSE_NP(HANDLE hthread, process_id_t pid), thread_id_t tid,
bool under_dynamo_control, dcontext_t *dcontext)
{
thread_record_t *tr;
uint hindex;
ASSERT(all_threads != NULL);
/* add entry to thread hashtable */
tr = (thread_record_t *)global_heap_alloc(sizeof(thread_record_t)
HEAPACCT(ACCT_THREAD_MGT));
#ifdef WINDOWS
/* we duplicate the thread pseudo-handle, this should give us full rights
* Note that instead asking explicitly for THREAD_ALL_ACCESS or just for
* THREAD_TERMINATE|THREAD_SUSPEND_RESUME|THREAD_GET_CONTEXT|THREAD_SET_CONTEXT
* does not seem able to acquire more rights than simply duplicating the
* app handle gives.
*/
LOG(GLOBAL, LOG_THREADS, 1, "Thread %d app handle rights: " PFX "\n", tid,
nt_get_handle_access_rights(hthread));
duplicate_handle(NT_CURRENT_PROCESS, hthread, NT_CURRENT_PROCESS, &tr->handle, 0, 0,
DUPLICATE_SAME_ACCESS | DUPLICATE_SAME_ATTRIBUTES);
/* We prob. only need TERMINATE (for kill thread), SUSPEND/RESUME/GET_CONTEXT
* (for synchronizing), and SET_CONTEXT (+ synchronizing requirements, for
* detach). All access includes this and quite a bit more. */
# if 0
/* eventually should be a real assert, but until we have a story for the
* injected detach threads, have to ifdef out even the ASSERT_CURIOSITY
* (even a syslog internal warning is prob. to noisy for QA) */
ASSERT_CURIOSITY(TESTALL(THREAD_ALL_ACCESS, nt_get_handle_access_rights(tr->handle)));
# endif
LOG(GLOBAL, LOG_THREADS, 1, "Thread %d our handle rights: " PFX "\n", tid,
nt_get_handle_access_rights(tr->handle));
tr->retakeover = false;
#else
tr->pid = pid;
tr->execve = false;
#endif
tr->id = tid;
ASSERT(tid != INVALID_THREAD_ID); /* ensure os never assigns invalid id to a thread */
tr->under_dynamo_control = under_dynamo_control;
tr->dcontext = dcontext;
if (dcontext != NULL) /* we allow NULL for dr_create_client_thread() */
dcontext->thread_record = tr;
d_r_mutex_lock(&all_threads_lock);
tr->num = threads_ever_count++;
hindex = HASH_FUNC_BITS(tr->id, ALL_THREADS_HASH_BITS);
tr->next = all_threads[hindex];
all_threads[hindex] = tr;
/* must be inside all_threads_lock to avoid race w/ get_list_of_threads */
RSTATS_ADD_PEAK(num_threads, 1);
RSTATS_INC(num_threads_created);
num_known_threads++;
d_r_mutex_unlock(&all_threads_lock);
}
/* return false if couldn't find the thread */
bool
remove_thread(IF_WINDOWS_(HANDLE hthread) thread_id_t tid)
{
thread_record_t *tr = NULL, *prevtr;
uint hindex = HASH_FUNC_BITS(tid, ALL_THREADS_HASH_BITS);
ASSERT(all_threads != NULL);
d_r_mutex_lock(&all_threads_lock);
for (tr = all_threads[hindex], prevtr = NULL; tr; prevtr = tr, tr = tr->next) {
if (tr->id == tid) {
if (prevtr)
prevtr->next = tr->next;
else
all_threads[hindex] = tr->next;
/* must be inside all_threads_lock to avoid race w/ get_list_of_threads */
RSTATS_DEC(num_threads);
#ifdef UNIX
if (tr->execve) {
ASSERT(num_execve_threads > 0);
num_execve_threads--;
}
#endif
num_known_threads--;
#ifdef WINDOWS
close_handle(tr->handle);
#endif
global_heap_free(tr, sizeof(thread_record_t) HEAPACCT(ACCT_THREAD_MGT));
break;
}
}
d_r_mutex_unlock(&all_threads_lock);
return (tr != NULL);
}
/* this bool is protected by reset_pending_lock */
DECLARE_FREQPROT_VAR(static bool reset_at_nth_thread_triggered, false);
/* thread-specific initialization
* if dstack_in is NULL, then a dstack is allocated; else dstack_in is used
* as the thread's dstack
* mc can be NULL for the initial thread
* returns -1 if current thread has already been initialized
*/
/* On UNIX, if dstack_in != NULL, the parent of this new thread must have
* increased uninit_thread_count.
*/
int
dynamo_thread_init(byte *dstack_in, priv_mcontext_t *mc,
void *os_data _IF_CLIENT_INTERFACE(bool client_thread))
{
dcontext_t *dcontext;
/* due to lock issues (see below) we need another var */
bool reset_at_nth_thread_pending = false;
bool under_dynamo_control = true;
APP_EXPORT_ASSERT(dynamo_initialized || dynamo_exited ||
d_r_get_num_threads() ==
0 IF_CLIENT_INTERFACE(|| client_thread),
PRODUCT_NAME " not initialized");
if (INTERNAL_OPTION(nullcalls)) {
ASSERT(uninit_thread_count == 0);
return SUCCESS;
}
/* note that ENTERING_DR is assumed to have already happened: in apc handler
* for win32, in new_thread_setup for linux, in main init for 1st thread
*/
#if defined(WINDOWS) && defined(DR_APP_EXPORTS)
/* We need to identify a thread we intercepted in its APC when we
* take over all threads on dr_app_start(). Stack and pc checks aren't
* simple b/c it can be in ntdll waiting on a lock.
*/
if (dr_api_entry)
os_take_over_mark_thread(d_r_get_thread_id());
#endif
/* Try to handle externally injected threads */
if (dynamo_initialized && !bb_lock_start)
pre_second_thread();
/* synch point so thread creation can be prevented for critical periods */
d_r_mutex_lock(&thread_initexit_lock);
/* XXX i#2611: during detach, there is a race where a thread can
* reach here on Windows despite init_apc_go_native (i#2600).
*/
ASSERT_BUG_NUM(2611, !doing_detach);
/* The assumption is that if dynamo_exited, then we are about to exit and
* clean up, initializing this thread then would be dangerous, better to
* wait here for the app to die.
*/
/* under current implementation of process exit, can happen only under
* debug build, or app_start app_exit interface */
while (dynamo_exited) {
/* logging should be safe, though might not actually result in log
* message */
DODEBUG_ONCE(LOG(GLOBAL, LOG_THREADS, 1,
"Thread %d reached initialization point while dynamo exiting, "
"waiting for app to exit\n",
d_r_get_thread_id()););
d_r_mutex_unlock(&thread_initexit_lock);
os_thread_yield();
/* just in case we want to support exited and then restarted at some
* point */
d_r_mutex_lock(&thread_initexit_lock);
}
if (is_thread_initialized()) {
d_r_mutex_unlock(&thread_initexit_lock);
#if defined(WINDOWS) && defined(DR_APP_EXPORTS)
if (dr_api_entry)
os_take_over_unmark_thread(d_r_get_thread_id());
#endif
return -1;
}
os_tls_init();
dcontext = create_new_dynamo_context(true /*initial*/, dstack_in, mc);
initialize_dynamo_context(dcontext);
set_thread_private_dcontext(dcontext);
/* sanity check */
ASSERT(get_thread_private_dcontext() == dcontext);
/* set local state pointer for access from other threads */
dcontext->local_state = get_local_state();
/* set initial mcontext, if known */
if (mc != NULL)
*get_mcontext(dcontext) = *mc;
/* For hotp_only, the thread should run native, not under dr. However,
* the core should still get control of the thread at hook points to track
* what the application is doing & at patched points to execute hot patches.
* It is the same for thin_client except that there are fewer hooks, only to
* follow children.
*/
if (RUNNING_WITHOUT_CODE_CACHE())
under_dynamo_control = false;
/* add entry to thread hashtable before creating logdir so have thread num.
* otherwise we'd like to do this only after we'd fully initialized the thread, but we
* hold the thread_initexit_lock, so nobody should be listing us -- thread_lookup
* on other than self, or a thread list, should only be done while the initexit_lock
* is held. CHECK: is this always correct? thread_lookup does have an assert
* to try and enforce but cannot tell who has the lock.
*/
add_thread(IF_WINDOWS_ELSE(NT_CURRENT_THREAD, get_process_id()), d_r_get_thread_id(),
under_dynamo_control, dcontext);
#ifdef UNIX /* i#2600: Not easy on Windows: we rely on init_apc_go_native there. */
if (dstack_in != NULL) { /* Else not a thread creation we observed */
ASSERT(uninit_thread_count > 0);
ATOMIC_DEC(int, uninit_thread_count);
}
#endif
#if defined(WINDOWS) && defined(DR_APP_EXPORTS)
/* Now that the thread is in the main thread table we don't need to remember it */
if (dr_api_entry)
os_take_over_unmark_thread(d_r_get_thread_id());
#endif
LOG(GLOBAL, LOG_TOP | LOG_THREADS, 1,
"\ndynamo_thread_init: %d thread(s) now, dcontext=" PFX ", #=%d, id=" TIDFMT
", pid=" PIDFMT "\n\n",
GLOBAL_STAT(num_threads), dcontext, get_thread_num(d_r_get_thread_id()),
d_r_get_thread_id(), get_process_id());
DOLOG(1, LOG_STATS, { dump_global_stats(false); });
#ifdef DEBUG
if (d_r_stats->loglevel > 0) {
dcontext->logfile = open_log_file(thread_logfile_name(), NULL, 0);
print_file(dcontext->logfile, "%s\n", dynamorio_version_string);
} else {
dcontext->logfile = INVALID_FILE;
}
DOLOG(1, LOG_TOP | LOG_THREADS, {
LOG(THREAD, LOG_TOP | LOG_THREADS, 1, PRODUCT_NAME " built with: %s\n",
DYNAMORIO_DEFINES);
LOG(THREAD, LOG_TOP | LOG_THREADS, 1, PRODUCT_NAME " built on: %s\n",
dynamorio_buildmark);
});
LOG(THREAD, LOG_TOP | LOG_THREADS, 1, "%sTHREAD %d (dcontext " PFX ")\n\n",
IF_CLIENT_INTERFACE_ELSE(client_thread ? "CLIENT " : "", ""), d_r_get_thread_id(),
dcontext);
LOG(THREAD, LOG_TOP | LOG_THREADS, 1,
"DR stack is " PFX "-" PFX " (passed in " PFX ")\n",
dcontext->dstack - DYNAMORIO_STACK_SIZE, dcontext->dstack, dstack_in);
#endif
#ifdef DEADLOCK_AVOIDANCE
locks_thread_init(dcontext);
#endif
heap_thread_init(dcontext);
DOSTATS({ stats_thread_init(dcontext); });
#ifdef KSTATS
kstat_thread_init(dcontext);
#endif
os_thread_init(dcontext, os_data);
arch_thread_init(dcontext);
synch_thread_init(dcontext);
if (!DYNAMO_OPTION(thin_client))
vm_areas_thread_init(dcontext);
monitor_thread_init(dcontext);
fcache_thread_init(dcontext);
link_thread_init(dcontext);
fragment_thread_init(dcontext);
/* OS thread init after synch_thread_init and other setup can handle signals, etc. */
os_thread_init_finalize(dcontext, os_data);
/* This lock has served its purposes: A) a barrier to thread creation for those
* iterating over threads, B) mutex for add_thread, and C) mutex for synch_field
* to be set up.
* So we release it to shrink the time spent w/ this big lock, in particular
* to avoid holding it while running private lib thread init code (i#875).
*/
d_r_mutex_unlock(&thread_initexit_lock);
#ifdef CLIENT_INTERFACE
/* Set up client data needed in loader_thread_init for IS_CLIENT_THREAD */
instrument_client_thread_init(dcontext, client_thread);
#endif
loader_thread_init(dcontext);
if (!DYNAMO_OPTION(thin_client)) {
#ifdef CLIENT_INTERFACE
/* put client last, may depend on other thread inits.
* Note that we are calling this prior to instrument_init()
* now (PR 216936), which is required to initialize
* the client dcontext field prior to instrument_init().
*/
instrument_thread_init(dcontext, client_thread, mc != NULL);
#endif
#ifdef SIDELINE
if (dynamo_options.sideline) {
/* wake up sideline thread -- ok to call if thread already awake */
sideline_start();
}
#endif
}
/* must check # threads while holding thread_initexit_lock, yet cannot
* call fcache_reset_all_caches_proactively while holding it due to
* rank order of reset_pending_lock which we must also hold -- so we
* set a local bool reset_at_nth_thread_pending
*/
if (DYNAMO_OPTION(reset_at_nth_thread) != 0 && !reset_at_nth_thread_triggered &&
(uint)d_r_get_num_threads() == DYNAMO_OPTION(reset_at_nth_thread)) {
d_r_mutex_lock(&reset_pending_lock);
if (!reset_at_nth_thread_triggered) {
reset_at_nth_thread_triggered = true;
reset_at_nth_thread_pending = true;
}
d_r_mutex_unlock(&reset_pending_lock);
}
DOLOG(1, LOG_STATS, { dump_thread_stats(dcontext, false); });
if (reset_at_nth_thread_pending) {
d_r_mutex_lock(&reset_pending_lock);
/* fcache_reset_all_caches_proactively() will unlock */
fcache_reset_all_caches_proactively(RESET_ALL);
}
return SUCCESS;
}
/* We don't free cur thread until after client exit event (PR 536058) except for
* fragment_thread_exit(). Since this is called outside of dynamo_thread_exit()
* on process exit we assume fine to skip enter_threadexit().
*/
void
dynamo_thread_exit_pre_client(dcontext_t *dcontext, thread_id_t id)
{
/* fcache stats needs to examine fragment state, so run it before
* fragment exit, but real fcache exit needs to be after fragment exit
*/
#ifdef DEBUG
fcache_thread_exit_stats(dcontext);
#endif
/* must abort now to avoid deleting possibly un-deletable fragments
* monitor_thread_exit remains later b/c of monitor_remove_fragment calls
*/
trace_abort_and_delete(dcontext);
fragment_thread_exit(dcontext);
#ifdef CLIENT_INTERFACE
IF_WINDOWS(loader_pre_client_thread_exit(dcontext));
instrument_thread_exit_event(dcontext);
#endif
}
/* thread-specific cleanup */
/* Note : if this routine is not called by thread id, then other_thread should
* be true and the calling thread should hold the thread_initexit_lock
*/
static int
dynamo_thread_exit_common(dcontext_t *dcontext, thread_id_t id,
IF_WINDOWS_(bool detach_stacked_callbacks) bool other_thread)
{
dcontext_t *dcontext_tmp;
#ifdef WINDOWS
dcontext_t *dcontext_next;
int num_dcontext;
#endif
bool on_dstack = !other_thread && is_currently_on_dstack(dcontext);
/* cache this now for use after freeing dcontext */
local_state_t *local_state = dcontext->local_state;
if (INTERNAL_OPTION(nullcalls) || dcontext == NULL)
return SUCCESS;
/* make sure don't get into deadlock w/ flusher */
enter_threadexit(dcontext);
/* synch point so thread exiting can be prevented for critical periods */
/* see comment at start of method for other thread exit */
if (!other_thread)
d_r_mutex_lock(&thread_initexit_lock);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
#ifdef WINDOWS
/* need to clean up thread stack before clean up other thread data, but
* after we're made nolinking
*/
os_thread_stack_exit(dcontext);
/* free the thread's application stack if requested */
if (dcontext->free_app_stack) {
byte *base;
/* only used for nudge threads currently */
ASSERT(dcontext->nudge_target == generic_nudge_target);
if (get_stack_bounds(dcontext, &base, NULL)) {
NTSTATUS res;
ASSERT(base != NULL);
res = nt_free_virtual_memory(base);
ASSERT(NT_SUCCESS(res));
} else {
/* stack should be available here */
ASSERT_NOT_REACHED();
}
}
#endif
#ifdef SIDELINE
/* N.B.: do not clean up any data structures while sideline thread
* is still running! put it to sleep for duration of this routine!
*/
if (!DYNAMO_OPTION(thin_client)) {
if (dynamo_options.sideline) {
/* put sideline thread to sleep */
sideline_stop();
/* sideline_stop will not return until sideline thread is asleep */
}
}
#endif
LOG(GLOBAL, LOG_TOP | LOG_THREADS, 1,
"\ndynamo_thread_exit (thread #%d id=" TIDFMT "): %d thread(s) now\n\n",
get_thread_num(id), id, GLOBAL_STAT(num_threads) - 1);
DOLOG(1, LOG_STATS, { dump_global_stats(false); });
LOG(THREAD, LOG_STATS | LOG_THREADS, 1, "\n## Statistics for this thread:\n");
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
int i;
ASSERT(dcontext);
LOG(THREAD, LOG_STATS | LOG_THREADS, 1, "\nTop ten cache times:\n");
for (i = 0; i < 10; i++) {
if (dcontext->cache_time[i] > (uint64)0) {
uint top_part, bottom_part;
divide_int64_print(dcontext->cache_time[i], kilo_hertz, false, 3,
&top_part, &bottom_part);
LOG(THREAD, LOG_STATS | LOG_THREADS, 1,
"\t#%2d = %6u.%.3u ms, %9d hits\n", i + 1, top_part, bottom_part,
(int)dcontext->cache_count[i]);
}
}
LOG(THREAD, LOG_STATS | LOG_THREADS, 1, "\n");
}
#endif
/* In order to pass the client a dcontext in the process exit event
* we do some thread cleanup early for the final thread so we can delay
* the rest (PR 536058)
*/
if (!dynamo_exited_and_cleaned)
dynamo_thread_exit_pre_client(dcontext, id);
#ifdef CLIENT_INTERFACE
/* PR 243759: don't free client_data until after all fragment deletion events */
if (!DYNAMO_OPTION(thin_client))
instrument_thread_exit(dcontext);
#endif
/* i#920: we can't take segment/timer/asynch actions for other threads.
* This must be called after dynamo_thread_exit_pre_client where
* we called event callbacks.
*/
if (!other_thread)
dynamo_thread_not_under_dynamo(dcontext);
/* We clean up priv libs prior to setting tls dc to NULL so we can use
* TRY_EXCEPT when calling the priv lib entry routine
*/
if (!dynamo_exited ||
(other_thread &&
(IF_WINDOWS_ELSE(!doing_detach, true) ||
dcontext->owning_thread != d_r_get_thread_id()))) /* else already did this */
loader_thread_exit(dcontext);
/* set tls dc to NULL prior to cleanup, to avoid problems handling
* alarm signals received during cleanup (we'll suppress if tls
* dc==NULL which seems the right thing to do: not worth our
* effort to pass to another thread if thread-group-shared alarm,
* and if thread-private then thread would have exited soon
* anyway). see PR 596127.
*/
/* make sure we invalidate the dcontext before releasing the memory */
/* when cleaning up other threads, we cannot set their dcs to null,
* but we only do this at dynamorio_app_exit so who cares
*/
/* This must be called after instrument_thread_exit, which uses
* get_thread_private_dcontext for app/dr state checks.
*/
if (id == d_r_get_thread_id())
set_thread_private_dcontext(NULL);
fcache_thread_exit(dcontext);
link_thread_exit(dcontext);
monitor_thread_exit(dcontext);
if (!DYNAMO_OPTION(thin_client))
vm_areas_thread_exit(dcontext);
synch_thread_exit(dcontext);
arch_thread_exit(dcontext _IF_WINDOWS(detach_stacked_callbacks));
os_thread_exit(dcontext, other_thread);
DOLOG(1, LOG_STATS, { dump_thread_stats(dcontext, false); });
#ifdef KSTATS
kstat_thread_exit(dcontext);
#endif
DOSTATS({ stats_thread_exit(dcontext); });
heap_thread_exit(dcontext);
#ifdef DEADLOCK_AVOIDANCE
locks_thread_exit(dcontext);
#endif
#ifdef DEBUG
if (dcontext->logfile != INVALID_FILE) {
os_flush(dcontext->logfile);
close_log_file(dcontext->logfile);
}
#endif
/* remove thread from threads hashtable */
remove_thread(IF_WINDOWS_(NT_CURRENT_THREAD) id);
dcontext_tmp = dcontext;
#ifdef WINDOWS
/* clean up all the dcs */
num_dcontext = 0;
# ifdef DCONTEXT_IN_EDI
/* go to one end of list */
while (dcontext_tmp->next_saved)
dcontext_tmp = dcontext_tmp->next_saved;
# else
/* already at one end of list */
# endif
/* delete through to other end */
while (dcontext_tmp) {
num_dcontext++;
dcontext_next = dcontext_tmp->prev_unused;
delete_dynamo_context(dcontext_tmp,
dcontext_tmp == dcontext /*do not free dup cb stacks*/
&& !on_dstack /*do not free own stack*/);
dcontext_tmp = dcontext_next;
}
LOG(GLOBAL, LOG_STATS | LOG_THREADS, 1, "\tdynamo contexts used: %d\n", num_dcontext);
#else /* UNIX */
delete_dynamo_context(dcontext_tmp, !on_dstack /*do not free own stack*/);
#endif /* UNIX */
os_tls_exit(local_state, other_thread);
#ifdef SIDELINE
/* see notes above -- we can now wake up sideline thread */
if (dynamo_options.sideline && d_r_get_num_threads() > 0) {
sideline_start();
}
#endif
if (!other_thread) {
d_r_mutex_unlock(&thread_initexit_lock);
/* FIXME: once thread_initexit_lock is released, we're not on
* thread list, and a terminate targeting us could kill us in the middle
* of this call -- but this can't come before the unlock b/c the lock's
* in the data segment! (see case 3121)
* (note we do not re-protect for process exit, see !dynamo_exited check
* in exiting_dynamorio)
*/
if (!on_dstack) {
EXITING_DR();
/* else, caller will clean up stack and then call EXITING_DR(),
* probably via dynamo_thread_stack_free_and_exit(), as the stack free
* must be done before the exit
*/
}
}
return SUCCESS;
}
/* NOINLINE because dynamo_thread_exit is a stopping point. */
NOINLINE int
dynamo_thread_exit(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
return dynamo_thread_exit_common(dcontext, d_r_get_thread_id(),
IF_WINDOWS_(false) false);
}
/* NOTE : you must hold thread_initexit_lock to call this function! */
int
dynamo_other_thread_exit(thread_record_t *tr _IF_WINDOWS(bool detach_stacked_callbacks))
{
/* FIXME: Usually a safe spot for cleaning other threads should be
* under num_exits_dir_syscall, but for now rewinding all the way
*/
KSTOP_REWIND_DC(tr->dcontext, thread_measured);
KSTART_DC(tr->dcontext, thread_measured);
return dynamo_thread_exit_common(tr->dcontext, tr->id,
IF_WINDOWS_(detach_stacked_callbacks) true);
}
/* Called from another stack to finish cleaning up a thread.
* The final steps are to free the stack and perform the exit hook.
*/
void
dynamo_thread_stack_free_and_exit(byte *stack)
{
if (stack != NULL) {
stack_free(stack, DYNAMORIO_STACK_SIZE);
/* ASSUMPTION: if stack is NULL here, the exit was done earlier
* (fixes case 6967)
*/
EXITING_DR();
}
}
#ifdef DR_APP_EXPORTS
/* API routine to initialize DR */
DR_APP_API int
dr_app_setup(void)
{
/* FIXME: we either have to disallow the client calling this with
* more than one thread running, or we have to suspend all the threads.
* We should share the suspend-and-takeover loop (and for dr_app_setup_and_start
* share the takeover portion) from dr_app_start().
*/
int res;
dcontext_t *dcontext;
dr_api_entry = true;
res = dynamorio_app_init();
/* For dr_api_entry, we do not install all our signal handlers during init (to avoid
* races: i#2335): we delay until dr_app_start(). Plus the vsyscall hook is
* not set up until we find out the syscall method. Thus we're already
* "os_process_not_under_dynamorio".
* We can't as easily avoid initializing the thread TLS and then dropping
* it, however, as parts of init assume we have TLS.
*/
dcontext = get_thread_private_dcontext();
dynamo_thread_not_under_dynamo(dcontext);
return res;
}
/* API routine to exit DR */
DR_APP_API int
dr_app_cleanup(void)
{
thread_record_t *tr;
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_api_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */
/* XXX: The dynamo_thread_[not_]under_dynamo() routines are not idempotent,
* and must be balanced! On Linux, they track the shared itimer refcount,
* so a mismatch will lead to a refleak or negative refcount.
* dynamorio_app_exit() will call dynamo_thread_not_under_dynamo(), so we
* must ensure that we are under DR before calling it. Therefore, we
* require that the caller call dr_app_stop() before calling
* dr_app_cleanup(). However, we cannot make a usage assertion to that
* effect without addressing the FIXME comments in
* dynamo_thread_not_under_dynamo() about updating tr->under_dynamo_control.
*/
tr = thread_lookup(d_r_get_thread_id());
if (tr != NULL && tr->dcontext != NULL) {
os_process_under_dynamorio_initiate(tr->dcontext);
os_process_under_dynamorio_complete(tr->dcontext);
dynamo_thread_under_dynamo(tr->dcontext);
}
return dynamorio_app_exit();
}
/* Called by dr_app_start in arch-specific assembly file */
void
dr_app_start_helper(priv_mcontext_t *mc)
{
apicheck(dynamo_initialized, PRODUCT_NAME " not initialized");
LOG(GLOBAL, LOG_TOP, 1, "dr_app_start in thread " TIDFMT "\n", d_r_get_thread_id());
LOG(THREAD_GET, LOG_TOP, 1, "dr_app_start\n");
if (!INTERNAL_OPTION(nullcalls)) {
/* Adjust the app stack to account for the return address + alignment.
* See dr_app_start in x86.asm.
*/
mc->xsp += DYNAMO_START_XSP_ADJUST;
dynamo_start(mc);
/* the interpreter takes over from here */
}
}
/* Dummy routine that returns control to the app if it is currently
* under dynamo control.
* NOINLINE because dr_app_stop is a stopping point.
*/
DR_APP_API NOINLINE void
dr_app_stop(void)
{
/* the application regains control in here */
}
/* NOINLINE because dr_app_stop_and_cleanup is a stopping point. */
DR_APP_API NOINLINE void
dr_app_stop_and_cleanup(void)
{
dr_app_stop_and_cleanup_with_stats(NULL);
}
/* NOINLINE because dr_app_stop_and_cleanup_with_stats is a stopping point. */
DR_APP_API NOINLINE void
dr_app_stop_and_cleanup_with_stats(dr_stats_t *drstats)
{
/* XXX i#95: today this is a full detach, while a separated dr_app_cleanup()
* is not. We should try and have dr_app_cleanup() take this detach path
* here (and then we can simplify exit_synch_state()) but it's more complicated
* and we need to resolve the unbounded dr_app_stop() time.
*/
if (dynamo_initialized && !dynamo_exited && !doing_detach) {
detach_on_permanent_stack(true /*internal*/, true /*do cleanup*/, drstats);
}
/* the application regains control in here */
}
DR_APP_API int
dr_app_setup_and_start(void)
{
int r = dr_app_setup();
if (r == SUCCESS)
dr_app_start();
return r;
}
#endif
/* For use by threads that start and stop whether dynamo controls them.
*/
void
dynamo_thread_under_dynamo(dcontext_t *dcontext)
{
LOG(THREAD, LOG_ASYNCH, 2, "thread %d under DR control\n", dcontext->owning_thread);
ASSERT(dcontext != NULL);
/* FIXME: mark under_dynamo_control?
* see comments in not routine below
*/
os_thread_under_dynamo(dcontext);
#ifdef SIDELINE
if (dynamo_options.sideline) {
/* wake up sideline thread -- ok to call if thread already awake */
sideline_start();
}
#endif
dcontext->currently_stopped = false;
dcontext->go_native = false;
}
/* For use by threads that start and stop whether dynamo controls them.
* This must be called by the owner of dcontext and not another
* non-executing thread.
*/
void
dynamo_thread_not_under_dynamo(dcontext_t *dcontext)
{
ASSERT_MESSAGE(CHKLVL_ASSERTS + 1 /*expensive*/, "can only act on executing thread",
dcontext == get_thread_private_dcontext());
if (dcontext == NULL)
return;
LOG(THREAD, LOG_ASYNCH, 2, "thread %d not under DR control\n",
dcontext->owning_thread);
dcontext->currently_stopped = true;
os_thread_not_under_dynamo(dcontext);
#ifdef SIDELINE
/* FIXME: if # active threads is 0, then put sideline thread to sleep! */
if (dynamo_options.sideline) {
/* put sideline thread to sleep */
sideline_stop();
}
#endif
#ifdef DEBUG
os_flush(dcontext->logfile);
#endif
}
#define MAX_TAKE_OVER_ATTEMPTS 8
/* Mark this thread as under DR, and take over other threads in the current process.
*/
void
dynamorio_take_over_threads(dcontext_t *dcontext)
{
/* We repeatedly check if there are other threads in the process, since
* while we're checking one may be spawning additional threads.
*/
bool found_threads;
uint attempts = 0;
os_process_under_dynamorio_initiate(dcontext);
/* We can start this thread now that we've set up process-wide actions such
* as handling signals.
*/
dynamo_thread_under_dynamo(dcontext);
signal_event(dr_app_started);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dynamo_started = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
/* XXX i#1305: we should suspend all the other threads for DR init to
* satisfy the parts of the init process that assume there are no races.
*/
do {
found_threads = os_take_over_all_unknown_threads(dcontext);
attempts++;
if (found_threads && !bb_lock_start)
bb_lock_start = true;
} while (found_threads && attempts < MAX_TAKE_OVER_ATTEMPTS);
os_process_under_dynamorio_complete(dcontext);
/* End the barrier to new threads. */
signal_event(dr_attach_finished);
if (found_threads) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_TAKE_OVER_THREADS, 2,
get_application_name(), get_application_pid());
}
char buf[16];
int num_threads = d_r_get_num_threads();
if (num_threads > 1) { /* avoid for early injection */
snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%d", num_threads);
NULL_TERMINATE_BUFFER(buf);
SYSLOG(SYSLOG_INFORMATION, INFO_ATTACHED, 3, buf, get_application_name(),
get_application_pid());
}
}
/* Called by dynamorio_app_take_over in arch-specific assembly file */
void
dynamorio_app_take_over_helper(priv_mcontext_t *mc)
{
static bool have_taken_over = false; /* ASSUMPTION: not an actual write */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
APP_EXPORT_ASSERT(dynamo_initialized, PRODUCT_NAME " not initialized");
#ifdef RETURN_AFTER_CALL
/* FIXME : this is set after dynamo_initialized, so a slight race with
* an injected thread turning on .C protection before the main thread
* sets this. */
dr_preinjected = true; /* currently only relevant on Win32 */
#endif
LOG(GLOBAL, LOG_TOP, 1, "taking over via preinject in %s\n", __FUNCTION__);
if (!INTERNAL_OPTION(nullcalls) && !have_taken_over) {
have_taken_over = true;
LOG(GLOBAL, LOG_TOP, 1, "dynamorio_app_take_over\n");
/* set this flag to indicate that we should run until the program dies: */
automatic_startup = true;
if (DYNAMO_OPTION(inject_primary))
take_over_primary_thread();
/* who knows when this was called -- no guarantee we control all threads --
* unless we were auto-injected (preinject library calls this routine)
*/
control_all_threads = automatic_startup;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Adjust the app stack to account for the return address + alignment.
* See dynamorio_app_take_over in x86.asm.
*/
mc->xsp += DYNAMO_START_XSP_ADJUST;
/* For hotp_only and thin_client, the app should run native, except
* for our hooks.
* This is where apps hooked using appinit key are let go native.
* Even though control is going to native app code, we want
* automatic_startup and control_all_threads set.
*/
if (!RUNNING_WITHOUT_CODE_CACHE())
dynamo_start(mc);
/* the interpreter takes over from here */
} else
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
#ifdef WINDOWS
extern app_pc parent_early_inject_address; /* from os.c */
/* in arch-specific assembly file */
void
dynamorio_app_take_over(void);
DYNAMORIO_EXPORT void
dynamorio_app_init_and_early_takeover(uint inject_location, void *restore_code)
{
int res;
ASSERT(!dynamo_initialized && !dynamo_exited);
/* This routine combines dynamorio_app_init() and dynamrio_app_takeover into
* a single routine that also handles any early injection cleanup needed. */
ASSERT_NOT_IMPLEMENTED(inject_location != INJECT_LOCATION_KiUserApc);
/* currently only Ldr* hook points are known to work */
ASSERT_CURIOSITY(INJECT_LOCATION_IS_LDR(inject_location));
/* See notes in os.c DLLMain. When early injected we are unable to find
* the address of LdrpLoadDll so we use the parent's value which is passed
* to us at the start of restore_code. FIXME - if we start using multiple
* inject locations we'll probably have to ensure we always pass this.
*/
if (INJECT_LOCATION_IS_LDR(inject_location)) {
parent_early_inject_address = *(app_pc *)restore_code;
}
dr_early_injected = true;
dr_early_injected_location = inject_location;
res = dynamorio_app_init();
ASSERT(res == SUCCESS);
ASSERT(dynamo_initialized && !dynamo_exited);
LOG(GLOBAL, LOG_TOP, 1, "taking over via early injection in %s\n", __FUNCTION__);
/* FIXME - restore code needs to be freed, but we have to return through it
* first... could instead duplicate its tail here if we wrap this
* routine in asm or eqv. pass the continuation state in as args. */
ASSERT(inject_location != INJECT_LOCATION_KiUserApc);
dynamorio_app_take_over();
}
/* Called with DR library mapped in but without its imports processed.
*/
void
dynamorio_earliest_init_takeover_C(byte *arg_ptr)
{
int res;
bool earliest_inject;
/* Windows-specific code for the most part */
earliest_inject = earliest_inject_init(arg_ptr);
/* Initialize now that DR dll imports are hooked up */
if (earliest_inject) {
dr_earliest_injected = true;
dr_earliest_inject_args = arg_ptr;
} else
dr_early_injected = true;
res = dynamorio_app_init();
ASSERT(res == SUCCESS);
ASSERT(dynamo_initialized && !dynamo_exited);
LOG(GLOBAL, LOG_TOP, 1, "taking over via earliest injection in %s\n", __FUNCTION__);
/* earliest_inject_cleanup() is called within dynamorio_app_init() to avoid
* confusing the exec areas scan
*/
/* Take over at retaddr
*
* XXX i#626: app_takeover sets preinjected for rct (should prob. rename)
* which needs to be done whenever we takeover not at the bottom of the
* callstack. For earliest won't need to set this if we takeover
* in such a way as to handle the return back to our hook code without a
* violation -- though currently we will see 3 rets (return from
* dynamorio_app_take_over(), return from here, and return from
* dynamorio_earliest_init_takeover() to app hook code).
* Should we have dynamorio_earliest_init_takeover() set up an
* mcontext that we can go to directly instead of interpreting
* the returns in our own code? That would make tools that shadow
* callstacks simpler too.
*/
dynamorio_app_take_over();
}
#endif /* WINDOWS */
/***************************************************************************
* SELF-PROTECTION
*/
/* FIXME: even with -single_privileged_thread, we aren't fully protected,
* because there's a window between us resuming the other threads and
* returning to our caller where another thread could clobber our return
* address or something.
*/
static void
dynamorio_protect(void)
{
ASSERT(SELF_PROTECT_ON_CXT_SWITCH);
LOG(GLOBAL, LOG_DISPATCH, 4, "dynamorio_protect thread=" TIDFMT "\n",
d_r_get_thread_id());
/* we don't protect local heap here, that's done lazily */
d_r_mutex_lock(&protect_info->lock);
ASSERT(protect_info->num_threads_unprot > 0);
/* FIXME: nice to also catch double enters but would need to track more info */
if (protect_info->num_threads_unprot <= 0) {
/* Defensive code to prevent crashes from double exits (the theory
* for case 7631/8030). However, this precludes an extra exit+enter
* pair from working properly (though an extra enter+exit will continue
* to work), though such a pair would have crashed if another thread
* had entered in the interim anyway.
*/
protect_info->num_threads_unprot = 0;
d_r_mutex_unlock(&protect_info->lock);
return;
}
protect_info->num_threads_unprot--;
if (protect_info->num_threads_unprot > 0) {
/* other threads still in DR, cannot protect global memory */
LOG(GLOBAL, LOG_DISPATCH, 4, "dynamorio_protect: not last thread => nop\n");
d_r_mutex_unlock(&protect_info->lock);
return;
}
SELF_PROTECT_GLOBAL(READONLY);
if (INTERNAL_OPTION(single_privileged_thread)) {
/* FIXME: want to resume threads and allow thread creation only
* _after_ protect data segment, but lock is in data segment!
*/
if (protect_info->num_threads_suspended > 0) {
thread_record_t *tr;
int i, num = 0;
/* we do not need to grab the all_threads_lock because
* no threads can be added or removed so who cares if we
* access the data structure simultaneously with another
* reader of it
*/
for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) {
for (tr = all_threads[i]; tr; tr = tr->next) {
if (tr->under_dynamo_control) {
os_thread_resume(all_threads[i]);
num++;
}
}
}
ASSERT(num == protect_info->num_threads_suspended);
protect_info->num_threads_suspended = 0;
}
/* thread init/exit can proceed now */
d_r_mutex_unlock(&thread_initexit_lock);
}
/* FIXME case 8073: temporary until we put in unprots in the
* right places. if we were to leave this here we'd want to combine
* .fspdata and .cspdata for more efficient prot changes.
*/
SELF_PROTECT_DATASEC(DATASEC_FREQ_PROT);
SELF_PROTECT_DATASEC(DATASEC_CXTSW_PROT);
d_r_mutex_unlock(&protect_info->lock);
}
static void
dynamorio_unprotect(void)
{
ASSERT(SELF_PROTECT_ON_CXT_SWITCH);
d_r_mutex_lock(
&protect_info->lock); /* lock in unprot heap, not data segment, so safe! */
protect_info->num_threads_unprot++;
if (protect_info->num_threads_unprot == 1) {
/* was protected, so we need to do the unprotection */
SELF_UNPROTECT_DATASEC(DATASEC_CXTSW_PROT);
/* FIXME case 8073: temporary until we put in unprots in the
* right places. if we were to leave this here we'd want to combine
* .fspdata and .cspdata for more efficient prot changes.
*/
SELF_UNPROTECT_DATASEC(DATASEC_FREQ_PROT);
if (INTERNAL_OPTION(single_privileged_thread)) {
/* FIXME: want to suspend all other threads _before_ unprotecting anything,
* but need to guarantee no new threads while we're suspending them,
* and can't do that without setting a lock => need data segment!
*/
d_r_mutex_lock(&thread_initexit_lock);
if (d_r_get_num_threads() > 1) {
thread_record_t *tr;
int i;
/* current multiple-thread solution: suspend all other threads! */
ASSERT(protect_info->num_threads_suspended == 0);
/* we do not need to grab the all_threads_lock because
* no threads can be added or removed so who cares if we
* access the data structure simultaneously with another
* reader of it
*/
for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) {
for (tr = all_threads[i]; tr; tr = tr->next) {
if (tr->under_dynamo_control) {
DEBUG_DECLARE(bool ok =)
os_thread_suspend(all_threads[i]);
ASSERT(ok);
protect_info->num_threads_suspended++;
}
}
}
}
/* we don't unlock or resume threads until we re-enter cache */
}
SELF_PROTECT_GLOBAL(WRITABLE);
}
/* we don't re-protect local heap here, that's done at points where
* it was protected lazily
*/
d_r_mutex_unlock(&protect_info->lock);
LOG(GLOBAL, LOG_DISPATCH, 4, "dynamorio_unprotect thread=" TIDFMT "\n",
d_r_get_thread_id());
}
#ifdef DEBUG
const char *
get_data_section_name(app_pc pc)
{
uint i;
for (i = 0; i < DATASEC_NUM; i++) {
if (pc >= datasec_start[i] && pc < datasec_end[i])
return DATASEC_NAMES[i];
}
return NULL;
}
bool
check_should_be_protected(uint sec)
{
/* Blindly asserting that a data section is protected is racy as
* another thread could be in an unprot window. We use some
* heuristics to try and identify bugs where a section is left
* unprot, but it's not easy.
*/
if (/* case 8107: for INJECT_LOCATION_LdrpLoadImportModule we
* load a helper library and end up in d_r_dispatch() for
* syscall_while_native before DR is initialized.
*/
!dynamo_initialized ||
# ifdef WINDOWS
/* case 8113: detach currently unprots .data prior to its
* thread synch, so don't count anything after that
*/
doing_detach ||
# endif
!TEST(DATASEC_SELFPROT[sec], DYNAMO_OPTION(protect_mask)) ||
DATASEC_PROTECTED(sec))
return true;
STATS_INC(datasec_not_prot);
/* FIXME: even checking d_r_get_num_threads()==1 is still racy as a thread could
* exit, and it's not worth grabbing thread_initexit_lock here..
*/
if (threads_ever_count == 1
# ifdef DR_APP_EXPORTS
/* For start/stop, can be other threads running around so we bail on
* perfect protection
*/
&& !dr_api_entry
# endif
)
return false;
/* FIXME: no count of threads in DR or anything so can't conclude much
* Just return true and hope developer looks at datasec_not_prot stats.
* We do have an ASSERT_CURIOSITY on the stat in data_section_exit().
*/
return true;
}
# ifdef WINDOWS
/* Assumed to only be called about DR dll writable regions */
bool
data_sections_enclose_region(app_pc start, app_pc end)
{
/* Rather than solve the general enclose problem by sorting,
* we subtract each piece we find.
* It used to be that on 32-bit .data|.fspdata|.cspdata|.nspdata formed
* the only writable region, with .pdata between .data and .fspdata on 64.
* But building with VS2012, I'm seeing the sections in other orders (i#1075).
* And with x64 reachability we moved the interception buffer in .data,
* and marking it +rx results in sub-section calls to here.
*/
int i;
bool found_start = false, found_end = false;
ssize_t sz = end - start;
for (i = 0; i < DATASEC_NUM; i++) {
if (datasec_start[i] <= end && datasec_end[i] >= start) {
byte *overlap_start = MAX(datasec_start[i], start);
byte *overlap_end = MIN(datasec_end[i], end);
sz -= overlap_end - overlap_start;
}
}
return sz == 0;
}
# endif /* WINDOWS */
#endif /* DEBUG */
static void
get_data_section_bounds(uint sec)
{
/* FIXME: on linux we should include .got and .dynamic in one of our
* sections, requiring specifying the order of sections (case 3789)!
* Should use an ld script to ensure that .nspdata is last, or find a unique
* attribute to force separation (perhaps mark as rwx, then
* remove the x at init time?) ld 2.15 puts it at the end, but
* ld 2.13 puts .got and .dynamic after it! For now we simply
* don't protect subsequent guys.
* On win32 there are no other rw sections, fortunately.
*/
ASSERT(sec >= 0 && sec < DATASEC_NUM);
/* for DEBUG we use for data_sections_enclose_region() */
ASSERT(IF_WINDOWS(IF_DEBUG(true ||))
TEST(DATASEC_SELFPROT[sec], dynamo_options.protect_mask));
d_r_mutex_lock(&datasec_lock[sec]);
ASSERT(datasec_start[sec] == NULL);
get_named_section_bounds(get_dynamorio_dll_start(), DATASEC_NAMES[sec],
&datasec_start[sec], &datasec_end[sec]);
d_r_mutex_unlock(&datasec_lock[sec]);
ASSERT(ALIGNED(datasec_start[sec], PAGE_SIZE));
ASSERT(ALIGNED(datasec_end[sec], PAGE_SIZE));
ASSERT(datasec_start[sec] < datasec_end[sec]);
#ifdef WINDOWS
if (IF_DEBUG(true ||) TEST(DATASEC_SELFPROT[sec], dynamo_options.protect_mask))
merge_writecopy_pages(datasec_start[sec], datasec_end[sec]);
#endif
}
#ifdef UNIX
/* We get into problems if we keep a .section open across string literals, etc.
* (such as when wrapping a function to get its local-scope statics in that section),
* but the VAR_IN_SECTION does the real work for us, just so long as we have one
* .section decl somewhere.
*/
DECLARE_DATA_SECTION(RARELY_PROTECTED_SECTION, "w")
DECLARE_DATA_SECTION(FREQ_PROTECTED_SECTION, "w")
DECLARE_DATA_SECTION(NEVER_PROTECTED_SECTION, "w")
END_DATA_SECTION_DECLARATIONS()
#endif
static void
data_section_init(void)
{
uint i;
for (i = 0; i < DATASEC_NUM; i++) {
if (datasec_start[i] != NULL) {
/* We were called early due to an early syslog.
* We still retain our slightly later normal init position so we can
* log, etc. in normal runs.
*/
return;
}
ASSIGN_INIT_LOCK_FREE(datasec_lock[i], datasec_selfprot_lock);
/* for DEBUG we use for data_sections_enclose_region() */
if (IF_WINDOWS(IF_DEBUG(true ||))
TEST(DATASEC_SELFPROT[i], dynamo_options.protect_mask)) {
get_data_section_bounds(i);
}
}
DOCHECK(1, {
/* ensure no overlaps */
uint j;
for (i = 0; i < DATASEC_NUM; i++) {
for (j = i + 1; j < DATASEC_NUM; j++) {
ASSERT(datasec_start[i] >= datasec_end[j] ||
datasec_start[j] >= datasec_end[i]);
}
}
});
}
static void
data_section_exit(void)
{
uint i;
DOSTATS({
/* There can't have been that many races.
* A failure to re-protect should result in a ton of d_r_dispatch
* entrances w/ .data unprot, so should show up here.
* However, an app with threads that are initializing in DR and thus
* unprotected .data while other threads are running new code (such as
* on attach) can easily rack up hundreds of unprot cache entrances.
*/
ASSERT_CURIOSITY(GLOBAL_STAT(datasec_not_prot) < 5000);
});
for (i = 0; i < DATASEC_NUM; i++)
DELETE_LOCK(datasec_lock[i]);
}
#define DATASEC_WRITABLE_MOD(which, op) \
((which) == DATASEC_RARELY_PROT \
? (datasec_writable_rareprot op) \
: ((which) == DATASEC_CXTSW_PROT \
? (datasec_writable_cxtswprot op) \
: ((which) == DATASEC_FREQ_PROT \
? (datasec_writable_freqprot op) \
: (ASSERT_NOT_REACHED(), datasec_writable_neverprot))))
/* WARNING: any DO_ONCE will call this routine, so don't call anything here
* that has a DO_ONCE, to avoid deadlock!
*/
void
protect_data_section(uint sec, bool writable)
{
ASSERT(sec >= 0 && sec < DATASEC_NUM);
ASSERT(TEST(DATASEC_SELFPROT[sec], dynamo_options.protect_mask));
/* We can be called very early before data_section_init() so init here
* (data_section_init() has no dependences).
*/
if (datasec_start[sec] == NULL) {
/* should only happen early in init */
ASSERT(!dynamo_initialized);
data_section_init();
}
d_r_mutex_lock(&datasec_lock[sec]);
ASSERT(datasec_start[sec] != NULL);
/* if using libc, we cannot print while data segment is read-only!
* thus, if making it writable, do that first, otherwise do it last.
* w/ ntdll this is not a problem.
*/
/* Remember that multiple threads can be doing (unprotect,protect) pairs of
* calls simultaneously. The datasec_lock makes each individual call atomic,
* and if all calls are properly nested, our use of counters should result in
* the proper protection only after the final protect call and not in the
* middle of some other thread's writes to the data section.
*/
if (writable) {
/* On-context-switch protection has a separate mechanism for
* only protecting when the final thread leaves DR
*/
ASSERT_CURIOSITY(DATASEC_WRITABLE(sec) <= 2); /* shouldn't nest too deep! */
if (DATASEC_WRITABLE(sec) == 0) {
make_writable(datasec_start[sec], datasec_end[sec] - datasec_start[sec]);
STATS_INC(datasec_prot_changes);
} else
STATS_INC(datasec_prot_wasted_calls);
(void)DATASEC_WRITABLE_MOD(sec, ++);
}
LOG(TEST(DATASEC_SELFPROT[sec], SELFPROT_ON_CXT_SWITCH) ? THREAD_GET : GLOBAL,
LOG_VMAREAS, TEST(DATASEC_SELFPROT[sec], SELFPROT_ON_CXT_SWITCH) ? 3U : 2U,
"protect_data_section: thread " TIDFMT " %s (recur %d, stat %d) %s %s %d\n",
d_r_get_thread_id(), DATASEC_WRITABLE(sec) == 1 ? "changing" : "nop",
DATASEC_WRITABLE(sec), GLOBAL_STAT(datasec_not_prot), DATASEC_NAMES[sec],
writable ? "rw" : "r", DATASEC_WRITABLE(sec));
if (!writable) {
ASSERT(DATASEC_WRITABLE(sec) > 0);
(void)DATASEC_WRITABLE_MOD(sec, --);
if (DATASEC_WRITABLE(sec) == 0) {
make_unwritable(datasec_start[sec], datasec_end[sec] - datasec_start[sec]);
STATS_INC(datasec_prot_changes);
} else
STATS_INC(datasec_prot_wasted_calls);
}
d_r_mutex_unlock(&datasec_lock[sec]);
}
/* enter/exit DR hooks */
void
entering_dynamorio(void)
{
if (SELF_PROTECT_ON_CXT_SWITCH)
dynamorio_unprotect();
ASSERT(HOOK_ENABLED);
LOG(GLOBAL, LOG_DISPATCH, 3, "entering_dynamorio thread=" TIDFMT "\n",
d_r_get_thread_id());
STATS_INC(num_entering_DR);
if (INTERNAL_OPTION(single_thread_in_DR)) {
acquire_recursive_lock(&thread_in_DR_exclusion);
LOG(GLOBAL, LOG_DISPATCH, 3, "entering_dynamorio thread=" TIDFMT " count=%d\n",
d_r_get_thread_id(), thread_in_DR_exclusion.count);
}
}
void
exiting_dynamorio(void)
{
ASSERT(HOOK_ENABLED);
LOG(GLOBAL, LOG_DISPATCH, 3, "exiting_dynamorio thread=" TIDFMT "\n",
d_r_get_thread_id());
STATS_INC(num_exiting_DR);
if (INTERNAL_OPTION(single_thread_in_DR)) {
/* thread init/exit can proceed now */
LOG(GLOBAL, LOG_DISPATCH, 3, "exiting_dynamorio thread=" TIDFMT " count=%d\n",
d_r_get_thread_id(), thread_in_DR_exclusion.count - 1);
release_recursive_lock(&thread_in_DR_exclusion);
}
if (SELF_PROTECT_ON_CXT_SWITCH && !dynamo_exited)
dynamorio_protect();
}
/* Note this includes any stack guard pages */
bool
is_on_initstack(byte *esp)
{
return (esp <= d_r_initstack && esp > d_r_initstack - DYNAMORIO_STACK_SIZE);
}
/* Note this includes any stack guard pages */
bool
is_on_dstack(dcontext_t *dcontext, byte *esp)
{
return (esp <= dcontext->dstack && esp > dcontext->dstack - DYNAMORIO_STACK_SIZE);
}
bool
is_currently_on_dstack(dcontext_t *dcontext)
{
byte *cur_esp;
GET_STACK_PTR(cur_esp);
return is_on_dstack(dcontext, cur_esp);
}
void
pre_second_thread(void)
{
/* i#1111: nop-out bb_building_lock until 2nd thread created.
* While normally we'll call this in the primary thread while not holding
* the lock, it's possible on Windows for an externally injected thread
* (or for a thread sneakily created by some native_exec code w/o going
* through ntdll wrappers) to appear. We solve the problem of the main
* thread currently holding bb_building_lock and us turning its
* unlock into an error by the bb_lock_would_have bool in
* SHARED_BB_UNLOCK().
*/
if (!bb_lock_start) {
d_r_mutex_lock(&bb_building_lock);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
bb_lock_start = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
d_r_mutex_unlock(&bb_building_lock);
}
}
| 1 | 18,013 | I think we need some docs about how to set this parameter correctly. Are we OK with changing the public API like this? We probably at least want to make a change notice to the release doc. | DynamoRIO-dynamorio | c |
@@ -329,10 +329,13 @@ type ProtocolTag byte
const (
ProtocolHeartbeat ProtocolTag = iota
- ProtocolOverlayControlMsg
+ ProtocolConnectionEstablished
+ ProtocolFragmentationReceived
+ ProtocolPMTUVerified
ProtocolGossip
ProtocolGossipUnicast
ProtocolGossipBroadcast
+ ProtocolOverlayControlMsg
)
type ProtocolMsg struct { | 1 | package router
import (
"bytes"
"encoding/gob"
"encoding/hex"
"fmt"
"io"
"time"
)
const (
Protocol = "weave"
ProtocolMinVersion = 1
ProtocolMaxVersion = 2
)
var (
ProtocolBytes = []byte(Protocol)
HeaderTimeout = 10 * time.Second
ProtocolV1Features = []string{
"ConnID",
"Name",
"NickName",
"PeerNameFlavour",
"UID",
}
ErrExpectedCrypto = fmt.Errorf("Password specified, but peer requested an unencrypted connection")
ErrExpectedNoCrypto = fmt.Errorf("No password specificed, but peer requested an encrypted connection")
)
// We don't need the full net.TCPConn to do the protocol intro. This
// interface contains just the parts we do need, to support testing
type ProtocolIntroConn interface {
// io.Reader
Read(b []byte) (n int, err error)
// io.Writer
Write(b []byte) (n int, err error)
// net.Conn's deadline methods
SetDeadline(t time.Time) error
SetReadDeadline(t time.Time) error
SetWriteDeadline(t time.Time) error
}
type ProtocolIntroParams struct {
MinVersion byte
MaxVersion byte
Features map[string]string
Conn ProtocolIntroConn
Password []byte
Outbound bool
}
type ProtocolIntroResults struct {
Features map[string]string
Receiver TCPReceiver
Sender TCPSender
SessionKey *[32]byte
Version byte
}
func (params ProtocolIntroParams) DoIntro() (res ProtocolIntroResults, err error) {
if err = params.Conn.SetDeadline(time.Now().Add(HeaderTimeout)); err != nil {
return
}
if res.Version, err = params.exchangeProtocolHeader(); err != nil {
return
}
var pubKey, privKey *[32]byte
if params.Password != nil {
if pubKey, privKey, err = GenerateKeyPair(); err != nil {
return
}
}
if err = params.Conn.SetWriteDeadline(time.Time{}); err != nil {
return
}
if err = params.Conn.SetReadDeadline(time.Now().Add(TCPHeartbeat * 2)); err != nil {
return
}
switch res.Version {
case 1:
err = res.doIntroV1(params, pubKey, privKey)
case 2:
err = res.doIntroV2(params, pubKey, privKey)
default:
panic("unhandled protocol version")
}
return
}
func (params ProtocolIntroParams) exchangeProtocolHeader() (byte, error) {
// Write in a separate goroutine to avoid the possibility of
// deadlock. The result channel is of size 1 so that the
// goroutine does not linger even if we encounter an error on
// the read side.
sendHeader := append(ProtocolBytes, params.MinVersion, params.MaxVersion)
writeDone := make(chan error, 1)
go func() {
_, err := params.Conn.Write(sendHeader)
writeDone <- err
}()
header := make([]byte, len(ProtocolBytes)+2)
if n, err := io.ReadFull(params.Conn, header); err != nil && n == 0 {
return 0, fmt.Errorf("failed to receive remote protocol header: %s", err)
} else if err != nil {
return 0, fmt.Errorf("received incomplete remote protocol header (%d octets instead of %d): %v; error: %s",
n, len(header), header[:n], err)
}
if !bytes.Equal(ProtocolBytes, header[:len(ProtocolBytes)]) {
return 0, fmt.Errorf("remote protocol header not recognised: %v", header[:len(ProtocolBytes)])
}
theirMinVersion := header[len(ProtocolBytes)]
minVersion := theirMinVersion
if params.MinVersion > minVersion {
minVersion = params.MinVersion
}
theirMaxVersion := header[len(ProtocolBytes)+1]
maxVersion := theirMaxVersion
if maxVersion > params.MaxVersion {
maxVersion = params.MaxVersion
}
if minVersion > maxVersion {
return 0, fmt.Errorf("remote version range [%d,%d] is incompatible with ours [%d,%d]",
theirMinVersion, theirMaxVersion,
params.MinVersion, params.MaxVersion)
}
if err := <-writeDone; err != nil {
return 0, err
}
return maxVersion, nil
}
// The V1 procotol consists of the protocol identification/version
// header, followed by a stream of gobified values. The first value
// is the encoded features map (never encrypted). The subsequent
// values are the messages on the connection (encrypted for an
// encrypted connection). For an encrypted connection, the public key
// is passed in the "PublicKey" feature as a string of hex digits.
func (res *ProtocolIntroResults) doIntroV1(params ProtocolIntroParams, pubKey, privKey *[32]byte) error {
features := filterV1Features(params.Features)
if pubKey != nil {
features["PublicKey"] = hex.EncodeToString(pubKey[:])
}
enc := gob.NewEncoder(params.Conn)
dec := gob.NewDecoder(params.Conn)
// Encode in a separate goroutine to avoid the possibility of
// deadlock. The result channel is of size 1 so that the
// goroutine does not linger even if we encounter an error on
// the read side.
encodeDone := make(chan error, 1)
go func() {
encodeDone <- enc.Encode(features)
}()
if err := dec.Decode(&res.Features); err != nil {
return err
}
if err := <-encodeDone; err != nil {
return err
}
res.Sender = NewGobTCPSender(enc)
res.Receiver = NewGobTCPReceiver(dec)
if pubKey == nil {
if _, present := res.Features["PublicKey"]; present {
return ErrExpectedNoCrypto
}
} else {
remotePubKeyStr, ok := res.Features["PublicKey"]
if !ok {
return ErrExpectedCrypto
}
remotePubKey, err := hex.DecodeString(remotePubKeyStr)
if err != nil {
return err
}
res.setupCrypto(params, remotePubKey, privKey)
}
res.Features = filterV1Features(res.Features)
return nil
}
// In the V1 protocol, the intro fields are sent unencrypted. So we
// restrict them to an established subset of fields that are assumed
// to be safe.
func filterV1Features(intro map[string]string) map[string]string {
safe := make(map[string]string)
for _, k := range ProtocolV1Features {
if val, ok := intro[k]; ok {
safe[k] = val
}
}
return safe
}
// The V2 procotol consists of the protocol identification/version
// header, followed by:
//
// - A single "encryption flag" byte: 0 for no encryption, 1 for
// encryption.
//
// - When the connection is encrypted, 32 bytes follow containing the
// public key.
//
// - Then a stream of length-prefixed messages, which are encrypted
// for an encrypted connection.
//
// The first message contains the encoded features map (so in contrast
// to V1, it will be encrypted on an encrypted connection).
func (res *ProtocolIntroResults) doIntroV2(params ProtocolIntroParams, pubKey, privKey *[32]byte) error {
// Public key exchange
var wbuf []byte
if pubKey == nil {
wbuf = []byte{0}
} else {
wbuf = make([]byte, 1+len(*pubKey))
wbuf[0] = 1
copy(wbuf[1:], (*pubKey)[:])
}
// Write in a separate goroutine to avoid the possibility of
// deadlock. The result channel is of size 1 so that the
// goroutine does not linger even if we encounter an error on
// the read side.
writeDone := make(chan error, 1)
go func() {
_, err := params.Conn.Write(wbuf)
writeDone <- err
}()
rbuf := make([]byte, 1)
if _, err := io.ReadFull(params.Conn, rbuf); err != nil {
return err
}
switch rbuf[0] {
case 0:
if pubKey != nil {
return ErrExpectedCrypto
}
res.Sender = NewLengthPrefixTCPSender(params.Conn)
res.Receiver = NewLengthPrefixTCPReceiver(params.Conn)
case 1:
if pubKey == nil {
return ErrExpectedNoCrypto
}
rbuf = make([]byte, len(pubKey))
if _, err := io.ReadFull(params.Conn, rbuf); err != nil {
return err
}
res.Sender = NewLengthPrefixTCPSender(params.Conn)
res.Receiver = NewLengthPrefixTCPReceiver(params.Conn)
res.setupCrypto(params, rbuf, privKey)
default:
return fmt.Errorf("Bad encryption flag %d", rbuf[0])
}
if err := <-writeDone; err != nil {
return err
}
// Features exchange
go func() {
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(¶ms.Features); err != nil {
writeDone <- err
return
}
writeDone <- res.Sender.Send(buf.Bytes())
}()
rbuf, err := res.Receiver.Receive()
if err != nil {
return err
}
if err := gob.NewDecoder(bytes.NewReader(rbuf)).Decode(&res.Features); err != nil {
return err
}
if err := <-writeDone; err != nil {
return err
}
return nil
}
func (res *ProtocolIntroResults) setupCrypto(params ProtocolIntroParams, remotePubKey []byte, privKey *[32]byte) {
var remotePubKeyArr [32]byte
copy(remotePubKeyArr[:], remotePubKey)
res.SessionKey = FormSessionKey(&remotePubKeyArr, privKey, params.Password)
res.Sender = NewEncryptedTCPSender(res.Sender, res.SessionKey, params.Outbound)
res.Receiver = NewEncryptedTCPReceiver(res.Receiver, res.SessionKey, params.Outbound)
}
type ProtocolTag byte
const (
ProtocolHeartbeat ProtocolTag = iota
ProtocolOverlayControlMsg
ProtocolGossip
ProtocolGossipUnicast
ProtocolGossipBroadcast
)
type ProtocolMsg struct {
tag ProtocolTag
msg []byte
}
type ProtocolSender interface {
SendProtocolMsg(m ProtocolMsg)
}
| 1 | 11,030 | When we undo this for 1.3, do we not need to leave these three entries intact to avoid renumbering of the subsequent constants? If so perhaps the changes to this file should be pulled into a separate initial commit so we can just `git revert` the remainder... | weaveworks-weave | go |
@@ -60,5 +60,9 @@ func (a *API) Setup() {
a.removeUnusedShapes()
}
+ if !a.NoValidataShapeMethods {
+ a.addShapeValidations()
+ }
+
a.initialized = true
} | 1 | package api
import (
"encoding/json"
"os"
"path/filepath"
)
// Load takes a set of files for each filetype and returns an API pointer.
// The API will be initialized once all files have been loaded and parsed.
//
// Will panic if any failure opening the definition JSON files, or there
// are unrecognized exported names.
func Load(api, docs, paginators, waiters string) *API {
a := API{}
a.Attach(api)
a.Attach(docs)
a.Attach(paginators)
a.Attach(waiters)
a.Setup()
return &a
}
// Attach opens a file by name, and unmarshal its JSON data.
// Will proceed to setup the API if not already done so.
func (a *API) Attach(filename string) {
a.path = filepath.Dir(filename)
f, err := os.Open(filename)
defer f.Close()
if err != nil {
panic(err)
}
json.NewDecoder(f).Decode(a)
}
// AttachString will unmarshal a raw JSON string, and setup the
// API if not already done so.
func (a *API) AttachString(str string) {
json.Unmarshal([]byte(str), a)
if !a.initialized {
a.Setup()
}
}
// Setup initializes the API.
func (a *API) Setup() {
a.writeShapeNames()
a.resolveReferences()
a.fixStutterNames()
a.renameExportable()
if !a.NoRenameToplevelShapes {
a.renameToplevelShapes()
}
a.updateTopLevelShapeReferences()
a.createInputOutputShapes()
a.customizationPasses()
if !a.NoRemoveUnusedShapes {
a.removeUnusedShapes()
}
a.initialized = true
}
| 1 | 7,848 | This is never set anywhere in the `cli/gen-api` folder. Should it be? | aws-aws-sdk-go | go |
@@ -23,13 +23,13 @@ import (
"sync"
"time"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
- corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ clientv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection" | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"fmt"
"os"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/utils/clock"
"github.com/jetstack/cert-manager/cmd/controller/app/options"
"github.com/jetstack/cert-manager/pkg/acme/accounts"
clientset "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
intscheme "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/scheme"
informers "github.com/jetstack/cert-manager/pkg/client/informers/externalversions"
"github.com/jetstack/cert-manager/pkg/controller"
"github.com/jetstack/cert-manager/pkg/controller/clusterissuers"
dnsutil "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/util"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/metrics"
"github.com/jetstack/cert-manager/pkg/util"
)
const controllerAgentName = "cert-manager"
func Run(opts *options.ControllerOptions, stopCh <-chan struct{}) {
rootCtx := util.ContextWithStopCh(context.Background(), stopCh)
rootCtx = logf.NewContext(rootCtx, nil, "controller")
log := logf.FromContext(rootCtx)
ctx, kubeCfg, err := buildControllerContext(rootCtx, stopCh, opts)
if err != nil {
log.Error(err, "error building controller context", "options", opts)
os.Exit(1)
}
metricsServer, err := ctx.Metrics.Start(opts.MetricsListenAddress)
if err != nil {
log.Error(err, "failed to listen on prometheus address", "address", opts.MetricsListenAddress)
os.Exit(1)
}
var wg sync.WaitGroup
run := func(_ context.Context) {
for n, fn := range controller.Known() {
log := log.WithValues("controller", n)
// only run a controller if it's been enabled
if !util.Contains(opts.EnabledControllers, n) {
log.V(logf.InfoLevel).Info("not starting controller as it's disabled")
continue
}
// don't run clusterissuers controller if scoped to a single namespace
if ctx.Namespace != "" && n == clusterissuers.ControllerName {
log.V(logf.InfoLevel).Info("not starting controller as cert-manager has been scoped to a single namespace")
continue
}
wg.Add(1)
iface, err := fn(ctx)
if err != nil {
log.Error(err, "error starting controller")
os.Exit(1)
}
go func(n string, fn controller.Interface) {
defer wg.Done()
log.V(logf.InfoLevel).Info("starting controller")
workers := 5
err := fn.Run(workers, stopCh)
if err != nil {
log.Error(err, "error starting controller")
os.Exit(1)
}
}(n, iface)
}
log.V(logf.DebugLevel).Info("starting shared informer factories")
ctx.SharedInformerFactory.Start(stopCh)
ctx.KubeSharedInformerFactory.Start(stopCh)
wg.Wait()
log.V(logf.InfoLevel).Info("control loops exited")
ctx.Metrics.Shutdown(metricsServer)
os.Exit(0)
}
if !opts.LeaderElect {
run(context.TODO())
return
}
log.V(logf.InfoLevel).Info("starting leader election")
leaderElectionClient, err := kubernetes.NewForConfig(rest.AddUserAgent(kubeCfg, "leader-election"))
if err != nil {
log.Error(err, "error creating leader election client")
os.Exit(1)
}
startLeaderElection(rootCtx, opts, leaderElectionClient, ctx.Recorder, run)
}
func buildControllerContext(ctx context.Context, stopCh <-chan struct{}, opts *options.ControllerOptions) (*controller.Context, *rest.Config, error) {
log := logf.FromContext(ctx, "build-context")
// Load the users Kubernetes config
kubeCfg, err := clientcmd.BuildConfigFromFlags(opts.APIServerHost, opts.Kubeconfig)
if err != nil {
return nil, nil, fmt.Errorf("error creating rest config: %s", err.Error())
}
// Add User-Agent to client
kubeCfg = rest.AddUserAgent(kubeCfg, util.CertManagerUserAgent)
// Create a cert-manager api client
intcl, err := clientset.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating internal group client: %s", err.Error())
}
// Create a Kubernetes api client
cl, err := kubernetes.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
nameservers := opts.DNS01RecursiveNameservers
if len(nameservers) == 0 {
nameservers = dnsutil.RecursiveNameservers
}
log.V(logf.InfoLevel).WithValues("nameservers", nameservers).Info("configured acme dns01 nameservers")
HTTP01SolverResourceRequestCPU, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceRequestCPU)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceRequestCPU: %s", err.Error())
}
HTTP01SolverResourceRequestMemory, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceRequestMemory)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceRequestMemory: %s", err.Error())
}
HTTP01SolverResourceLimitsCPU, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceLimitsCPU)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceLimitsCPU: %s", err.Error())
}
HTTP01SolverResourceLimitsMemory, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceLimitsMemory)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceLimitsMemory: %s", err.Error())
}
// Create event broadcaster
// Add cert-manager types to the default Kubernetes Scheme so Events can be
// logged properly
intscheme.AddToScheme(scheme.Scheme)
log.V(logf.DebugLevel).Info("creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(log.V(logf.DebugLevel).Info)
eventBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: cl.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerAgentName})
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(intcl, time.Second*30, informers.WithNamespace(opts.Namespace))
kubeSharedInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(cl, time.Second*30, kubeinformers.WithNamespace(opts.Namespace))
acmeAccountRegistry := accounts.NewDefaultRegistry()
return &controller.Context{
RootContext: ctx,
StopCh: stopCh,
RESTConfig: kubeCfg,
Client: cl,
CMClient: intcl,
Recorder: recorder,
KubeSharedInformerFactory: kubeSharedInformerFactory,
SharedInformerFactory: sharedInformerFactory,
Namespace: opts.Namespace,
Clock: clock.RealClock{},
Metrics: metrics.New(log),
ACMEOptions: controller.ACMEOptions{
HTTP01SolverImage: opts.ACMEHTTP01SolverImage,
HTTP01SolverResourceRequestCPU: HTTP01SolverResourceRequestCPU,
HTTP01SolverResourceRequestMemory: HTTP01SolverResourceRequestMemory,
HTTP01SolverResourceLimitsCPU: HTTP01SolverResourceLimitsCPU,
HTTP01SolverResourceLimitsMemory: HTTP01SolverResourceLimitsMemory,
DNS01CheckAuthoritative: !opts.DNS01RecursiveNameserversOnly,
DNS01Nameservers: nameservers,
AccountRegistry: acmeAccountRegistry,
},
IssuerOptions: controller.IssuerOptions{
ClusterIssuerAmbientCredentials: opts.ClusterIssuerAmbientCredentials,
IssuerAmbientCredentials: opts.IssuerAmbientCredentials,
ClusterResourceNamespace: opts.ClusterResourceNamespace,
RenewBeforeExpiryDuration: opts.RenewBeforeExpiryDuration,
},
IngressShimOptions: controller.IngressShimOptions{
DefaultIssuerName: opts.DefaultIssuerName,
DefaultIssuerKind: opts.DefaultIssuerKind,
DefaultIssuerGroup: opts.DefaultIssuerGroup,
DefaultAutoCertificateAnnotations: opts.DefaultAutoCertificateAnnotations,
},
CertificateOptions: controller.CertificateOptions{
EnableOwnerRef: opts.EnableCertificateOwnerRef,
},
SchedulerOptions: controller.SchedulerOptions{
MaxConcurrentChallenges: opts.MaxConcurrentChallenges,
},
}, kubeCfg, nil
}
func startLeaderElection(ctx context.Context, opts *options.ControllerOptions, leaderElectionClient kubernetes.Interface, recorder record.EventRecorder, run func(context.Context)) {
log := logf.FromContext(ctx, "leader-election")
// Identity used to distinguish between multiple controller manager instances
id, err := os.Hostname()
if err != nil {
log.Error(err, "error getting hostname")
os.Exit(1)
}
// Lock required for leader election
rl := resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: opts.LeaderElectionNamespace,
Name: "cert-manager-controller",
},
Client: leaderElectionClient.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id + "-external-cert-manager-controller",
EventRecorder: recorder,
},
}
// Try and become the leader and start controller manager loops
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: &rl,
LeaseDuration: opts.LeaderElectionLeaseDuration,
RenewDeadline: opts.LeaderElectionRenewDeadline,
RetryPeriod: opts.LeaderElectionRetryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
log.V(logf.ErrorLevel).Info("leader election lost")
os.Exit(1)
},
},
})
}
| 1 | 23,144 | All the changes in this commit are mechanical. Produced by the script in the previous commit. | jetstack-cert-manager | go |
@@ -76,7 +76,7 @@ module Travis
true
when 'server_error'
cmd 'echo -e "\033[31;1mCould not fetch .travis.yml from GitHub.\033[0m"', assert: false, echo: false
- cmd 'travis_terminate 2', assert: false, echo: false
+ raw 'travis_terminate 2', assert: false, echo: false
false
else
true | 1 | require 'core_ext/hash/deep_merge'
require 'core_ext/hash/deep_symbolize_keys'
require 'core_ext/object/false'
require 'erb'
module Travis
module Build
class Script
autoload :Addons, 'travis/build/script/addons'
autoload :Android, 'travis/build/script/android'
autoload :C, 'travis/build/script/c'
autoload :Cpp, 'travis/build/script/cpp'
autoload :Clojure, 'travis/build/script/clojure'
autoload :DirectoryCache, 'travis/build/script/directory_cache'
autoload :Erlang, 'travis/build/script/erlang'
autoload :Git, 'travis/build/script/git'
autoload :Go, 'travis/build/script/go'
autoload :Groovy, 'travis/build/script/groovy'
autoload :Generic, 'travis/build/script/generic'
autoload :Haskell, 'travis/build/script/haskell'
autoload :Helpers, 'travis/build/script/helpers'
autoload :Jdk, 'travis/build/script/jdk'
autoload :Jvm, 'travis/build/script/jvm'
autoload :NodeJs, 'travis/build/script/node_js'
autoload :ObjectiveC, 'travis/build/script/objective_c'
autoload :Perl, 'travis/build/script/perl'
autoload :Php, 'travis/build/script/php'
autoload :PureJava, 'travis/build/script/pure_java'
autoload :Python, 'travis/build/script/python'
autoload :Ruby, 'travis/build/script/ruby'
autoload :RVM, 'travis/build/script/rvm'
autoload :Scala, 'travis/build/script/scala'
autoload :Services, 'travis/build/script/services'
autoload :Stages, 'travis/build/script/stages'
TEMPLATES_PATH = File.expand_path('../script/templates', __FILE__)
STAGES = {
builtin: [:configure, :checkout, :pre_setup, :paranoid_mode, :export, :setup, :announce],
custom: [:before_install, :install, :before_script, :script, :after_result, :after_script]
}
class << self
def defaults
Git::DEFAULTS.merge(self::DEFAULTS)
end
end
include Addons, Git, Helpers, Services, Stages, DirectoryCache
attr_reader :stack, :data, :options
def initialize(data, options = {})
@data = Data.new({ config: self.class.defaults }.deep_merge(data.deep_symbolize_keys))
@options = options
@stack = [Shell::Script.new(log: true, echo: true)]
end
def compile
raw template 'header.sh'
run_stages if check_config
raw template 'footer.sh'
sh.to_s
end
def cache_slug
"cache"
end
private
def check_config
case data.config[:".result"]
when 'not_found'
cmd 'echo -e "\033[31;1mCould not find .travis.yml, using standard configuration.\033[0m"', assert: false, echo: false
true
when 'server_error'
cmd 'echo -e "\033[31;1mCould not fetch .travis.yml from GitHub.\033[0m"', assert: false, echo: false
cmd 'travis_terminate 2', assert: false, echo: false
false
else
true
end
end
def config
data.config
end
def configure
fix_resolv_conf
fix_etc_hosts
end
def export
set 'TRAVIS', 'true', echo: false
set 'CI', 'true', echo: false
set 'CONTINUOUS_INTEGRATION', 'true', echo: false
set 'HAS_JOSH_K_SEAL_OF_APPROVAL', 'true', echo: false
data.env_vars_groups.each do |group|
if group.announce?
cmd "echo -e \"\n\033[33;1mSetting environment variables from #{group.source}\033[0m\"; ", assert: false, echo: false
end
group.vars.each do |var|
set var.key, var.value, echo: var.to_s
end
end
if data.env_vars.any?
# adds a newline to the log
cmd 'echo', echo: false, assert: false, log: false
end
end
def finish
push_directory_cache
end
def pre_setup
start_services
setup_apt_cache if data.cache? :apt
fix_ps4
run_addons(:after_pre_setup)
end
def setup
setup_directory_cache
end
def announce
# overwrite
end
def template(filename)
ERB.new(File.read(File.expand_path(filename, TEMPLATES_PATH))).result(binding)
end
def paranoid_mode
if data.paranoid_mode?
cmd 'echo', echo: false, assert: false, log: false
cmd 'echo', echo: false, assert: false, log: false
cmd 'echo -e "\033[33mNOTE:\033[0m Sudo, services, addons, setuid and setgid have been disabled."', echo: false, assert: false, log: false
cmd 'echo', echo: false, assert: false, log: false
cmd 'sudo -n sh -c "sed -e \'s/^%.*//\' -i.bak /etc/sudoers && rm -f /etc/sudoers.d/travis && find / -perm -4000 -exec chmod a-s {} \; 2>/dev/null"', echo: false, assert: false, log: false
end
end
def setup_apt_cache
if data.hosts && data.hosts[:apt_cache]
cmd 'echo -e "\033[33;1mSetting up APT cache\033[0m"', assert: false, echo: false
cmd %Q{echo 'Acquire::http { Proxy "#{data.hosts[:apt_cache]}"; };' | sudo tee /etc/apt/apt.conf.d/01proxy &> /dev/null}, echo: false, assert: false, log: false
end
end
def fix_resolv_conf
return if data.skip_resolv_updates?
cmd %Q{grep '199.91.168' /etc/resolv.conf > /dev/null || echo 'nameserver 199.91.168.70\nnameserver 199.91.168.71' | sudo tee /etc/resolv.conf &> /dev/null}, assert: false, echo: false, log: false
end
def fix_etc_hosts
return if data.skip_etc_hosts_fix?
cmd %Q{sudo sed -e 's/^\\(127\\.0\\.0\\.1.*\\)$/\\1 '`hostname`'/' -i'.bak' /etc/hosts}, assert: false, echo: false, log: false
cmd %{sudo bash -c 'echo "87.98.253.108 getcomposer.org" >> /etc/hosts'}, assert: false, echo: false, log: false
end
def fix_ps4
set "PS4", "+ ", echo: false
end
end
end
end
| 1 | 11,720 | What's the difference between these 2 versions? | travis-ci-travis-build | rb |
@@ -224,7 +224,7 @@ class KMSScannerTest(unittest_utils.ForsetiTestCase):
self.scanner.run()
crypto_key = self.scanner._retrieve()
violations = self.scanner._find_violations(crypto_key)
- self.assertEquals(1, len(violations))
+ self.assertEquals(6, len(violations))
self.assertEquals(1, mock_output_results.call_count)
| 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KMS Scanner Tests."""
import unittest
import mock
from datetime import datetime
from tests import unittest_utils
from tests.services.util.db import create_test_engine
from tests.scanner.test_data import fake_kms_scanner_data
from google.cloud.forseti.scanner.scanners import kms_scanner
from google.cloud.forseti.services.dao import ModelManager
"""
Assumptions: In data/kms_scanner_test_rules.yaml, rotation_period is set to
100 days.
Test: Create two crypto keys, one with creation time over 100 days ago, and
other with creation time less than 100 days ago.
The crypto key with creation time over 100 days ago should be flagged as a
violation but not the other one.
"""
KEY_RING_ID = '4063867491605246570'
CRYPTO_KEY_ID = '12873861500163377322'
CRYPTO_KEY_ID_1 = '12873861500163377324'
CRYPTO_KEY_ID_2 = '12873861500163377326'
CRYPTO_KEY_ID_3 = '12873861500163377328'
CRYPTO_KEY_ID_4 = '12873861500163377330'
CRYPTO_KEY_ID_5 = '12873861500163377332'
CRYPTO_KEY_ID_6 = '12873861500163377334'
CRYPTO_KEY_ID_6 = '12873861500163377336'
VIOLATION_TYPE = 'CRYPTO_KEY_VIOLATION'
TIME_NOW = datetime.utcnow()
class FakeServiceConfig(object):
def __init__(self):
engine = create_test_engine()
self.model_manager = ModelManager(engine)
class KMSScannerTest(unittest_utils.ForsetiTestCase):
@classmethod
def setUpClass(cls):
cls.service_config = FakeServiceConfig()
cls.model_name = cls.service_config.model_manager.create(
name='kms-scanner-test')
scoped_session, data_access = (
cls.service_config.model_manager.get(cls.model_name))
# Add organization to model.
with scoped_session as session:
organization = data_access.add_resource_by_name(
session, 'organization/12345', '', True)
project = data_access.add_resource(session, 'project/foo',
organization)
key_ring = data_access.add_resource(
session, 'kms_keyring/%s' % KEY_RING_ID, project)
key_ring.data = fake_kms_scanner_data.KEY_RING_DATA
crypto_key = data_access.add_resource(
session, 'kms_cryptokey/%s' % CRYPTO_KEY_ID, key_ring)
crypto_key.data = fake_kms_scanner_data.NON_ROTATED_CRYPTO_KEY_DATA
crypto_key_1 = data_access.add_resource(
session, 'kms_cryptokey/%s' % CRYPTO_KEY_ID_1, key_ring)
crypto_key_1.data = fake_kms_scanner_data.ROTATED_CRYPTO_KEY_DATA
crypto_key_2 = data_access.add_resource(
session, 'kms_cryptokey/%s' % CRYPTO_KEY_ID_2, key_ring)
crypto_key_2.data = (fake_kms_scanner_data.
NON_ROTATED_CRYPTO_KEY_DESTROYED_STATE_DATA)
crypto_key_3 = data_access.add_resource(
session, 'kms_cryptokey/%s' % CRYPTO_KEY_ID_3, key_ring)
crypto_key_3.data = (fake_kms_scanner_data.
PROTECTION_LEVEL_PURPOSE_ALGO_TEST_DATA)
crypto_key_4 = data_access.add_resource(
session, 'kms_cryptokey/%s' % CRYPTO_KEY_ID_4, key_ring)
crypto_key_4.data = (fake_kms_scanner_data.
KEY_STATE_TEST_DATA)
crypto_key_5 = data_access.add_resource(
session, 'kms_cryptokey/%s' % CRYPTO_KEY_ID_5, key_ring)
crypto_key_5.data = (fake_kms_scanner_data.
PROTECTION_LEVEL_PURPOSE_ALGO_TEST_DATA)
crypto_key_6 = data_access.add_resource(
session, 'kms_cryptokey/%s' % CRYPTO_KEY_ID_6, key_ring)
crypto_key_6.data = (fake_kms_scanner_data.
PROTECTION_LEVEL_PURPOSE_ALGO_TEST_DATA)
session.commit()
@mock.patch.object(
kms_scanner.KMSScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner(self, mock_output_results):
self.scanner = kms_scanner.KMSScanner(
{}, {}, self.service_config, self.model_name,
'', unittest_utils.get_datafile_path(
__file__, 'kms_scanner_test_rules.yaml'))
self.scanner.run()
crypto_key = self.scanner._retrieve()
violations = self.scanner._find_violations(crypto_key)
for violation in violations:
state = violation.primary_version.get('state')
self.assertEquals(state, 'ENABLED')
self.assertEquals(violation.resource_type, 'kms_cryptokey')
self.assertEquals(violation.violation_type, VIOLATION_TYPE)
self.assertEquals(1, mock_output_results.call_count)
@mock.patch.object(
kms_scanner.KMSScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner_algo_match(self, mock_output_results):
self.scanner = kms_scanner.KMSScanner(
{}, {}, self.service_config, self.model_name,
'', unittest_utils.get_datafile_path(
__file__,
'kms_scanner_test_algo.yaml'))
self.scanner.run()
crypto_key = self.scanner._retrieve()
violations = self.scanner._find_violations(crypto_key)
for violation in violations:
self.assertEquals(violation.algorithm,
'GOOGLE_SYMMETRIC_ENCRYPTION')
self.assertEquals(1, mock_output_results.call_count)
@mock.patch.object(
kms_scanner.KMSScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner_state_match(self, mock_output_results):
self.scanner = kms_scanner.KMSScanner(
{}, {}, self.service_config, self.model_name,
'', unittest_utils.get_datafile_path(
__file__, 'kms_scanner_test_state_rule.yaml'))
self.scanner.run()
crypto_key = self.scanner._retrieve()
violations = self.scanner._find_violations(crypto_key)
for violation in violations:
self.assertEquals(violation.state, 'ENABLED')
self.assertEquals(1, mock_output_results.call_count)
@mock.patch.object(
kms_scanner.KMSScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner_protection_level_match(self, mock_output_results):
self.scanner = kms_scanner.KMSScanner(
{}, {}, self.service_config, self.model_name,
'', unittest_utils.get_datafile_path(
__file__,
'kms_scanner_test_protection_level.yaml'))
self.scanner.run()
crypto_key = self.scanner._retrieve()
violations = self.scanner._find_violations(crypto_key)
for violation in violations:
self.assertEquals(violation.protection_level, 'SOFTWARE')
self.assertEquals(1, mock_output_results.call_count)
@mock.patch.object(
kms_scanner.KMSScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner_purpose_match(self, mock_output_results):
self.scanner = kms_scanner.KMSScanner(
{}, {}, self.service_config, self.model_name,
'', unittest_utils.get_datafile_path(
__file__,
'kms_scanner_test_purpose.yaml'))
self.scanner.run()
crypto_key = self.scanner._retrieve()
violations = self.scanner._find_violations(crypto_key)
for violation in violations:
self.assertEquals(violation.purpose, 'ENCRYPT_DECRYPT')
self.assertEquals(1, mock_output_results.call_count)
@mock.patch.object(
kms_scanner.KMSScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner_rotation_period_whitelist_match(self,
mock_output_results):
self.scanner = kms_scanner.KMSScanner(
{}, {}, self.service_config, self.model_name,
'', unittest_utils.get_datafile_path(
__file__,
'kms_scanner_whitelist_test.yaml'))
self.scanner.run()
crypto_key = self.scanner._retrieve()
violations = self.scanner._find_violations(crypto_key)
self.assertEquals(1, len(violations))
self.assertEquals(1, mock_output_results.call_count)
if __name__ == '__main__':
unittest.main()
| 1 | 34,060 | @red2k18 Are we sure its correct to only have 1 now? | forseti-security-forseti-security | py |
@@ -151,6 +151,7 @@ void AddFragToMol(RWMol *mol, RWMol *frag, Bond::BondType bondOrder,
newB->setOwningMol(mol);
newB->setBeginAtomIdx(atomIdx1);
newB->setEndAtomIdx(atomIdx2);
+ newB->setProp(RDKit::common_properties::_unspecifiedOrder, 1);
mol->addBond(newB);
delete newB;
} else { | 1 | //
// Copyright (C) 2001-2020 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include <GraphMol/RDKitBase.h>
#include <GraphMol/RDKitQueries.h>
#include <GraphMol/Canon.h>
#include "SmilesParse.h"
#include "SmilesParseOps.h"
#include <list>
#include <algorithm>
#include <boost/dynamic_bitset.hpp>
#include <boost/format.hpp>
#include <RDGeneral/RDLog.h>
namespace SmilesParseOps {
using namespace RDKit;
void ClearAtomChemicalProps(RDKit::Atom *atom) {
TEST_ASSERT(atom);
atom->setIsotope(0);
atom->setFormalCharge(0);
atom->setNumExplicitHs(0);
}
void CheckRingClosureBranchStatus(RDKit::Atom *atom, RDKit::RWMol *mp) {
// github #786 and #1652: if the ring closure comes after a branch,
// the stereochem is wrong.
// This function is called while closing a branch during construction of
// the molecule from SMILES and corrects for what happens when parsing odd
// (and arguably wrong) SMILES constructs like:
// 1) [C@@](F)1(C)CCO1
// 2) C1CN[C@](O)(N)1
// 3) [C@](Cl)(F)1CC[C@H](F)CC1
// In the first two cases the stereochemistry at the chiral atom
// needs to be reversed. In the third case the stereochemistry should be
// reversed when the Cl is added, but left alone when the F is added.
// We recognize these situations using the index of the chiral atom
// and the degree of that chiral atom at the time the ring closure
// digit is encountered during parsing.
// ----------
// github #1972 adds these examples:
// 1) [C@@]1(Cl)(F)I.Br1 (ok)
// 2) [C@@](Cl)1(F)I.Br1 (reverse)
// 3) [C@@](Cl)(F)1I.Br1 (ok)
// 4) [C@@](Cl)(F)(I)1.Br1 (reverse)
PRECONDITION(atom, "bad atom");
PRECONDITION(mp, "bad mol");
if (atom->getIdx() != mp->getNumAtoms(true) - 1 &&
(atom->getDegree() == 1 ||
(atom->getDegree() == 2 && atom->getIdx() != 0) ||
(atom->getDegree() == 3 && atom->getIdx() == 0)) &&
(atom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CW ||
atom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CCW)) {
// std::cerr << "crcbs: " << atom->getIdx() << std::endl;
atom->invertChirality();
}
}
void ReportParseError(const char *message, bool throwIt) {
PRECONDITION(message, "bad message");
if (!throwIt) {
BOOST_LOG(rdErrorLog) << "SMILES Parse Error: " << message << std::endl;
} else {
throw SmilesParseException(message);
}
}
void CleanupAfterParseError(RWMol *mol) {
PRECONDITION(mol, "no molecule");
// blow out any partial bonds:
for (auto markI : *mol->getBondBookmarks()) {
RWMol::BOND_PTR_LIST &bonds = markI.second;
for (auto &bond : bonds) {
delete bond;
}
}
}
namespace {
bool couldBeRingClosure(int val) { return val < 100000 && val >= 0; }
} // namespace
//
// set bondOrder to Bond::IONIC to skip the formation of a bond
// between the fragment and the molecule
//
void AddFragToMol(RWMol *mol, RWMol *frag, Bond::BondType bondOrder,
Bond::BondDir bondDir) {
PRECONDITION(mol, "no molecule");
PRECONDITION(frag, "no fragment");
PRECONDITION(mol->getActiveAtom(), "no active atom");
Atom *lastAt = mol->getActiveAtom();
int nOrigAtoms = mol->getNumAtoms();
int nOrigBonds = mol->getNumBonds();
//
// Add the fragment's atoms and bonds to the molecule:
//
mol->insertMol(*frag);
//
// update ring-closure order information on the added atoms:
//
for (const auto atom : frag->atoms()) {
INT_VECT tmpVect;
if (atom->getPropIfPresent(common_properties::_RingClosures, tmpVect)) {
for (auto &v : tmpVect) {
// if the ring closure is not already a bond, don't touch it:
if (v >= 0) {
v += nOrigBonds;
}
}
auto newAtom = mol->getAtomWithIdx(nOrigAtoms + atom->getIdx());
newAtom->setProp(common_properties::_RingClosures, tmpVect);
}
}
//
// ses up the bond between the mol and the branch
//
if (bondOrder != Bond::IONIC) {
// FIX: this is not so much with the elegance...
auto firstAt = mol->getAtomWithIdx(nOrigAtoms);
int atomIdx1 = firstAt->getIdx();
int atomIdx2 = lastAt->getIdx();
if (frag->hasBondBookmark(ci_LEADING_BOND)) {
// std::cout << "found it" << std::endl;
const ROMol::BOND_PTR_LIST &leadingBonds =
frag->getAllBondsWithBookmark(ci_LEADING_BOND);
for (auto leadingBond : leadingBonds) {
// we've already got a bond, so just set its local info
// and then add it to the molecule intact (no sense doing
// any extra work).
leadingBond->setOwningMol(mol);
leadingBond->setEndAtomIdx(leadingBond->getBeginAtomIdx() + nOrigAtoms);
leadingBond->setBeginAtomIdx(atomIdx2);
mol->addBond(leadingBond, true);
}
mol->clearBondBookmark(ci_LEADING_BOND);
} else {
// SMARTS semantics: unspecified bonds can be single or aromatic
if (bondOrder == Bond::UNSPECIFIED) {
auto *newB = new QueryBond(Bond::SINGLE);
newB->expandQuery(makeBondOrderEqualsQuery(Bond::AROMATIC),
Queries::COMPOSITE_OR, true);
newB->setOwningMol(mol);
newB->setBeginAtomIdx(atomIdx1);
newB->setEndAtomIdx(atomIdx2);
mol->addBond(newB);
delete newB;
} else {
Bond::BondType bo = bondOrder;
if (bo == Bond::DATIVEL) {
std::swap(atomIdx1, atomIdx2);
bo = Bond::DATIVE;
} else if (bo == Bond::DATIVER) {
bo = Bond::DATIVE;
}
int idx = mol->addBond(atomIdx2, atomIdx1, bo) - 1;
mol->getBondWithIdx(idx)->setBondDir(bondDir);
}
}
}
//
// okay, the next thing we have to worry about is the possibility
// that there might be ring opening/closing in the fragment we just
// dealt with e.g. for things like C1C(C1) and C1C.C1
// We deal with this by copying in the bookmarks and partial bonds
// that exist in the fragment
//
for (auto atIt : *frag->getAtomBookmarks()) {
// don't bother even considering bookmarks outside
// the range used for cycles
if (couldBeRingClosure(atIt.first)) {
for (auto at2 : atIt.second) {
int newIdx = at2->getIdx() + nOrigAtoms;
mol->setAtomBookmark(mol->getAtomWithIdx(newIdx), atIt.first);
while (frag->hasBondBookmark(atIt.first)) {
Bond *b = frag->getBondWithBookmark(atIt.first);
int atomIdx1 = b->getBeginAtomIdx() + nOrigAtoms;
b->setOwningMol(mol);
b->setBeginAtomIdx(atomIdx1);
mol->setBondBookmark(b, atIt.first);
frag->clearBondBookmark(atIt.first, b);
}
}
}
}
frag->clearAllAtomBookmarks();
frag->clearAllBondBookmarks();
};
typedef std::pair<size_t, int> SIZET_PAIR;
typedef std::pair<int, int> INT_PAIR;
template <typename T>
bool operator<(const std::pair<T, T> &p1, const std::pair<T, T> &p2) {
return p1.first < p2.first;
}
void AdjustAtomChiralityFlags(RWMol *mol) {
PRECONDITION(mol, "no molecule");
for (auto atom : mol->atoms()) {
Atom::ChiralType chiralType = atom->getChiralTag();
if (chiralType == Atom::CHI_TETRAHEDRAL_CW ||
chiralType == Atom::CHI_TETRAHEDRAL_CCW) {
//
// The atom is marked as chiral, set the SMILES-order of the
// atom's bonds. This is easy for non-ring-closure bonds,
// because the SMILES order is determined solely by the atom
// indices. Things are trickier for ring-closure bonds, which we
// need to insert into the list in a particular order
//
INT_VECT ringClosures;
atom->getPropIfPresent(common_properties::_RingClosures, ringClosures);
#if 0
std::cerr << "CLOSURES: ";
std::copy(ringClosures.begin(), ringClosures.end(),
std::ostream_iterator<int>(std::cerr, " "));
std::cerr << std::endl;
#endif
std::list<SIZET_PAIR> neighbors;
// push this atom onto the list of neighbors (we'll use this
// to find our place later):
neighbors.emplace_back(atom->getIdx(), -1);
std::list<size_t> bondOrder;
for (auto nbrIdx :
boost::make_iterator_range(mol->getAtomNeighbors(atom))) {
Bond *nbrBond = mol->getBondBetweenAtoms(atom->getIdx(), nbrIdx);
if (std::find(ringClosures.begin(), ringClosures.end(),
static_cast<int>(nbrBond->getIdx())) ==
ringClosures.end()) {
neighbors.emplace_back(nbrIdx, nbrBond->getIdx());
}
}
// sort the list of non-ring-closure bonds:
neighbors.sort();
// find the location of this atom. it pretty much has to be
// first in the list, e.g for smiles like [C@](F)(Cl)(Br)I, or
// second (everything else).
auto selfPos = neighbors.begin();
if (selfPos->first != atom->getIdx()) {
++selfPos;
}
CHECK_INVARIANT(selfPos->first == atom->getIdx(), "weird atom ordering");
// copy over the bond ids:
INT_LIST bondOrdering;
for (auto neighborIt = neighbors.begin(); neighborIt != neighbors.end();
++neighborIt) {
if (neighborIt != selfPos) {
bondOrdering.push_back(rdcast<int>(neighborIt->second));
} else {
// we are not going to add the atom itself, but we will push on
// ring closure bonds at this point (if required):
bondOrdering.insert(bondOrdering.end(), ringClosures.begin(),
ringClosures.end());
}
}
// ok, we now have the SMILES ordering of the bonds, figure out the
// permutation order.
//
// This whole thing is necessary because the ring-closure bonds
// in the SMILES come before the bonds to the other neighbors, but
// they come after the neighbors in the molecule we build.
// A crude example:
// in F[C@](Cl)(Br)I the C-Cl bond is index 1 in both SMILES
// and as built
// in F[C@]1(Br)I.Cl1 the C-Cl bond is index 1 in the SMILES
// and index 3 as built.
//
int nSwaps = atom->getPerturbationOrder(bondOrdering);
// FIX: explain this one:
// At least part of what's going on here for degree 3 atoms:
// - The first part: if we're at the beginning of the SMILES and have
// an explicit H, we need to add a swap.
// This is to reflect that [C@](Cl)(F)C is equivalent to Cl[C@@](F)C
// but [C@H](Cl)(F)C is fine as-is (The H-C bond is the one you look
// down).
// - The second part is more complicated and deals with situations like
// F[C@]1CCO1. In this case we otherwise end up looking like we need
// to invert the chirality, which is bogus. The chirality here needs
// to remain @ just as it does in F[C@](Cl)CCO1
// - We have to be careful with the second part to not sweep things like
// C[S@]2(=O).Cl2 into the same bin (was github #760). We detect
// those cases by looking for unsaturated atoms
//
if (Canon::chiralAtomNeedsTagInversion(
*mol, atom, atom->hasProp(common_properties::_SmilesStart),
ringClosures.size())) {
++nSwaps;
}
// std::cerr << "nswaps " << atom->getIdx() << " " << nSwaps
// << std::endl;
// std::copy(bondOrdering.begin(), bondOrdering.end(),
// std::ostream_iterator<int>(std::cerr, ", "));
// std::cerr << std::endl;
if (nSwaps % 2) {
atom->invertChirality();
}
}
}
} // namespace SmilesParseOps
Bond::BondType GetUnspecifiedBondType(const RWMol *mol, const Atom *atom1,
const Atom *atom2) {
PRECONDITION(mol, "no molecule");
PRECONDITION(atom1, "no atom1");
PRECONDITION(atom2, "no atom2");
Bond::BondType res;
if (atom1->getIsAromatic() && atom2->getIsAromatic()) {
res = Bond::AROMATIC;
} else {
res = Bond::SINGLE;
}
return res;
}
void SetUnspecifiedBondTypes(RWMol *mol) {
PRECONDITION(mol, "no molecule");
for (auto bond : mol->bonds()) {
if (bond->hasProp(RDKit::common_properties::_unspecifiedOrder)) {
bond->setBondType(GetUnspecifiedBondType(mol, bond->getBeginAtom(),
bond->getEndAtom()));
if (bond->getBondType() == Bond::AROMATIC) {
bond->setIsAromatic(true);
} else {
bond->setIsAromatic(false);
}
}
}
}
namespace {
void swapBondDirIfNeeded(Bond *bond1, const Bond *bond2) {
PRECONDITION(bond1, "bad bond1");
PRECONDITION(bond2, "bad bond2");
if (bond1->getBondDir() == Bond::NONE && bond2->getBondDir() != Bond::NONE) {
bond1->setBondDir(bond2->getBondDir());
if (bond1->getBeginAtom() != bond2->getBeginAtom()) {
switch (bond1->getBondDir()) {
case Bond::ENDDOWNRIGHT:
bond1->setBondDir(Bond::ENDUPRIGHT);
break;
case Bond::ENDUPRIGHT:
bond1->setBondDir(Bond::ENDDOWNRIGHT);
break;
default:
break;
}
}
}
}
} // namespace
void CloseMolRings(RWMol *mol, bool toleratePartials) {
// Here's what we want to do here:
// loop through the molecule's atom bookmarks
// for each bookmark:
// connect pairs of atoms sharing that bookmark
// left to right (in the order in which they were
// inserted into the molecule).
// whilst doing this, we have to be cognizant of the fact that
// there may well be partial bonds in the molecule which need
// to be tied in as well. WOO HOO! IT'S A BIG MESS!
PRECONDITION(mol, "no molecule");
auto bookmarkIt = mol->getAtomBookmarks()->begin();
while (bookmarkIt != mol->getAtomBookmarks()->end()) {
auto &bookmark = *bookmarkIt;
// don't bother even considering bookmarks outside
// the range used for rings
if (couldBeRingClosure(bookmark.first)) {
RWMol::ATOM_PTR_LIST bookmarkedAtomsToRemove;
auto atomIt = bookmark.second.begin();
auto atomsEnd = bookmark.second.end();
while (atomIt != atomsEnd) {
Atom *atom1 = *atomIt;
++atomIt;
if (!toleratePartials && atomIt == atomsEnd) {
ReportParseError("unclosed ring");
} else if (atomIt != atomsEnd && *atomIt == atom1) {
// make sure we don't try to connect an atom to itself
// this was github #1925
auto fmt =
boost::format{
"duplicated ring closure %1% bonds atom %2% to itself"} %
bookmark.first % atom1->getIdx();
std::string msg = fmt.str();
ReportParseError(msg.c_str(), true);
} else if (mol->getBondBetweenAtoms(atom1->getIdx(),
(*atomIt)->getIdx()) != nullptr) {
auto fmt =
boost::format{
"ring closure %1% duplicates bond between atom %2% and atom "
"%3%"} %
bookmark.first % atom1->getIdx() % (*atomIt)->getIdx();
std::string msg = fmt.str();
ReportParseError(msg.c_str(), true);
} else if (atomIt != atomsEnd) {
// we actually found an atom, so connect it to the first
Atom *atom2 = *atomIt;
++atomIt;
int bondIdx = -1;
// We're guaranteed two partial bonds, one for each time
// the ring index was used. We give the first specification
// priority.
CHECK_INVARIANT(mol->hasBondBookmark(bookmark.first),
"Missing bond bookmark");
// now use the info from the partial bond:
// The partial bond itself will have a proper order and
// directionality (with a minor caveat documented below) and will
// have its beginning atom set already:
RWMol::BOND_PTR_LIST bonds =
mol->getAllBondsWithBookmark(bookmark.first);
auto bondIt = bonds.begin();
CHECK_INVARIANT(bonds.size() >= 2, "Missing bond");
// get pointers to the two bonds:
Bond *bond1 = *bondIt;
++bondIt;
Bond *bond2 = *bondIt;
// remove those bonds from the bookmarks:
mol->clearBondBookmark(bookmark.first, bond1);
mol->clearBondBookmark(bookmark.first, bond2);
// Make sure the bonds have the correct starting atoms:
CHECK_INVARIANT(bond1->getBeginAtomIdx() == atom1->getIdx(),
"bad begin atom");
CHECK_INVARIANT(bond2->getBeginAtomIdx() == atom2->getIdx(),
"bad begin atom");
Bond *matchedBond;
// figure out which (if either) bond has a specified type, we'll
// keep that one. We also need to update the end atom index to
// match FIX: daylight barfs when you give it multiple specs for the
// closure
// bond, we'll just take the first one and ignore others
// NOTE: we used to do this the other way (take the last
// specification),
// but that turned out to be troublesome in odd cases like
// C1CC11CC1.
// std::cerr << ">-------------" << std::endl;
// std::cerr << atom1->getIdx() << "-" << atom2->getIdx() << ": "
// << bond1->getBondType() << "("
// << bond1->hasProp(common_properties::_unspecifiedOrder)
// << "):" << bond1->getBondDir() << " "
// << bond2->getBondType() << "("
// << bond2->hasProp(common_properties::_unspecifiedOrder)
// << "):" << bond2->getBondDir() << std::endl;
if (!bond1->hasProp(common_properties::_unspecifiedOrder)) {
matchedBond = bond1;
if (matchedBond->getBondType() == Bond::DATIVEL) {
matchedBond->setBeginAtomIdx(atom2->getIdx());
matchedBond->setEndAtomIdx(atom1->getIdx());
matchedBond->setBondType(Bond::DATIVE);
} else if (matchedBond->getBondType() == Bond::DATIVER) {
matchedBond->setEndAtomIdx(atom2->getIdx());
matchedBond->setBondType(Bond::DATIVE);
} else {
matchedBond->setEndAtomIdx(atom2->getIdx());
}
swapBondDirIfNeeded(bond1, bond2);
delete bond2;
} else {
matchedBond = bond2;
if (matchedBond->getBondType() == Bond::DATIVEL) {
matchedBond->setBeginAtomIdx(atom1->getIdx());
matchedBond->setEndAtomIdx(atom2->getIdx());
matchedBond->setBondType(Bond::DATIVE);
} else if (matchedBond->getBondType() == Bond::DATIVER) {
matchedBond->setEndAtomIdx(atom1->getIdx());
matchedBond->setBondType(Bond::DATIVE);
} else {
matchedBond->setEndAtomIdx(atom1->getIdx());
}
swapBondDirIfNeeded(bond2, bond1);
delete bond1;
}
if (matchedBond->getBondType() == Bond::UNSPECIFIED &&
!matchedBond->hasQuery()) {
Bond::BondType bondT = GetUnspecifiedBondType(mol, atom1, atom2);
matchedBond->setBondType(bondT);
}
matchedBond->setOwningMol(mol);
if (matchedBond->getBondType() == Bond::AROMATIC) {
matchedBond->setIsAromatic(true);
}
// add the bond:
bondIdx = mol->addBond(matchedBond, true);
// we found a bond, so update the atom's _RingClosures
// property:
if (bondIdx > -1) {
CHECK_INVARIANT(
atom1->hasProp(common_properties::_RingClosures) &&
atom2->hasProp(common_properties::_RingClosures),
"somehow atom doesn't have _RingClosures property.");
INT_VECT closures;
atom1->getProp(common_properties::_RingClosures, closures);
auto closurePos = std::find(closures.begin(), closures.end(),
-(bookmark.first + 1));
CHECK_INVARIANT(closurePos != closures.end(),
"could not find bookmark in atom _RingClosures");
*closurePos = bondIdx - 1;
atom1->setProp(common_properties::_RingClosures, closures);
atom2->getProp(common_properties::_RingClosures, closures);
closurePos = std::find(closures.begin(), closures.end(),
-(bookmark.first + 1));
CHECK_INVARIANT(closurePos != closures.end(),
"could not find bookmark in atom _RingClosures");
*closurePos = bondIdx - 1;
atom2->setProp(common_properties::_RingClosures, closures);
}
bookmarkedAtomsToRemove.push_back(atom1);
bookmarkedAtomsToRemove.push_back(atom2);
}
}
int mark = bookmark.first;
++bookmarkIt;
for (const auto atom : bookmarkedAtomsToRemove) {
mol->clearAtomBookmark(mark, atom);
}
} else {
++bookmarkIt;
}
}
};
void CleanupAfterParsing(RWMol *mol) {
PRECONDITION(mol, "no molecule");
for (auto atom : mol->atoms()) {
atom->clearProp(common_properties::_RingClosures);
atom->clearProp(common_properties::_SmilesStart);
}
for (auto bond : mol->bonds()) {
bond->clearProp(common_properties::_unspecifiedOrder);
}
}
} // end of namespace SmilesParseOps
| 1 | 21,456 | @greglandrum interesting, is this related to #3307 by any chance? | rdkit-rdkit | cpp |
@@ -48,7 +48,15 @@ class ApplicationController < ActionController::Base
private
def current_user
- @current_user ||= User.find_or_create_by(email_address: session[:user]['email']) if session[:user] && session[:user]['email']
+ @current_user ||= find_current_user
+ end
+
+ def find_current_user
+ if ENV['FORCE_USER_ID'] && !Rails.env.production?
+ User.find ENV['FORCE_USER_ID']
+ else
+ User.find_or_create_by(email_address: session[:user]['email']) if session[:user] && session[:user]['email']
+ end
end
def sign_in(user) | 1 | class ApplicationController < ActionController::Base
include Pundit # For authorization checks
include ReturnToHelper
include MarkdownHelper
helper ValueHelper
add_template_helper ClientHelper
protect_from_forgery with: :exception
helper_method :current_user, :signed_in?, :return_to
before_action :disable_peek_by_default
protected
# We are overriding this method to account for ExceptionPolicies
def authorize(record, query=nil, user=nil)
user ||= @current_user
policy = ::PolicyFinder.policy_for(user, record)
# use the action as a default permission
query ||= ("can_" + params[:action].to_s + "!").to_sym
unless policy.public_send(query)
# the method might raise its own exception, or it might return a
# boolean. Both systems are accommodated
# will need to replace this when a new version of pundit arrives
ex = NotAuthorizedError.new("not allowed to #{q} this #{record}")
ex.query, ex.record, ex.policy = q, record, pol
raise ex
end
end
# Override Pundit to account for proposal gymnastics
def policy(record)
obj = ::PolicyFinder.authorizing_object(record)
super(obj)
end
def admin?
signed_in? && current_user.admin?
end
def peek_enabled?
Rails.env.development? || self.admin?
end
private
def current_user
@current_user ||= User.find_or_create_by(email_address: session[:user]['email']) if session[:user] && session[:user]['email']
end
def sign_in(user)
session[:user] ||= {}
session[:user]['email'] = user.email_address
@current_user = user
end
def sign_out
reset_session
@current_user = nil
end
def signed_in?
!!current_user
end
def authenticate_user!
unless signed_in?
flash[:error] = 'You need to sign in for access to this page.'
redirect_to root_url(return_to: self.make_return_to("Previous", request.fullpath))
end
end
def disable_peek_by_default
if cookies[:peek].nil?
cookies[:peek] = false
end
end
end
| 1 | 14,100 | Minor: how about moving the trailing `if` to an `elsif` above? | 18F-C2 | rb |
@@ -321,6 +321,8 @@ class DBUpgrader {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + " SET content_encoded = NULL");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_TAGS + " TEXT;");
+ db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ + " ADD COLUMN " + PodDBAdapter.KEY_MINIMAL_DURATION_FILTER + " INTEGER DEFAULT -1");
}
}
| 1 | package de.danoeh.antennapod.core.storage;
import android.content.ContentValues;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.media.MediaMetadataRetriever;
import android.util.Log;
import de.danoeh.antennapod.model.feed.FeedItem;
import static de.danoeh.antennapod.model.feed.FeedPreferences.SPEED_USE_GLOBAL;
class DBUpgrader {
/**
* Upgrades the given database to a new schema version
*/
static void upgrade(final SQLiteDatabase db, final int oldVersion, final int newVersion) {
if (oldVersion <= 1) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_TYPE + " TEXT");
}
if (oldVersion <= 2) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_LINK + " TEXT");
}
if (oldVersion <= 3) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_ITEM_IDENTIFIER + " TEXT");
}
if (oldVersion <= 4) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_FEED_IDENTIFIER + " TEXT");
}
if (oldVersion <= 5) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_REASON_DETAILED + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_DOWNLOADSTATUS_TITLE + " TEXT");
}
if (oldVersion <= 6) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_CHAPTER_TYPE + " INTEGER");
}
if (oldVersion <= 7) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYBACK_COMPLETION_DATE
+ " INTEGER");
}
if (oldVersion <= 8) {
final int KEY_ID_POSITION = 0;
final int KEY_MEDIA_POSITION = 1;
// Add feeditem column to feedmedia table
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_FEEDITEM
+ " INTEGER");
Cursor feeditemCursor = db.query(PodDBAdapter.TABLE_NAME_FEED_ITEMS,
new String[]{PodDBAdapter.KEY_ID, PodDBAdapter.KEY_MEDIA}, "? > 0",
new String[]{PodDBAdapter.KEY_MEDIA}, null, null, null);
if (feeditemCursor.moveToFirst()) {
db.beginTransaction();
ContentValues contentValues = new ContentValues();
do {
long mediaId = feeditemCursor.getLong(KEY_MEDIA_POSITION);
contentValues.put(PodDBAdapter.KEY_FEEDITEM, feeditemCursor.getLong(KEY_ID_POSITION));
db.update(PodDBAdapter.TABLE_NAME_FEED_MEDIA, contentValues, PodDBAdapter.KEY_ID + "=?", new String[]{String.valueOf(mediaId)});
contentValues.clear();
} while (feeditemCursor.moveToNext());
db.setTransactionSuccessful();
db.endTransaction();
}
feeditemCursor.close();
}
if (oldVersion <= 9) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " INTEGER DEFAULT 1");
}
if (oldVersion <= 10) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN flattr_status"
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN flattr_status"
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYED_DURATION
+ " INTEGER");
}
if (oldVersion <= 11) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_USERNAME
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_PASSWORD
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN image"
+ " INTEGER");
}
if (oldVersion <= 12) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_IS_PAGED + " INTEGER DEFAULT 0");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_NEXT_PAGE_LINK + " TEXT");
}
if (oldVersion <= 13) {
// remove duplicate rows in "Chapters" table that were created because of a bug.
db.execSQL(String.format("DELETE FROM %s WHERE %s NOT IN " +
"(SELECT MIN(%s) as %s FROM %s GROUP BY %s,%s,%s,%s,%s)",
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_TITLE,
PodDBAdapter.KEY_START,
PodDBAdapter.KEY_FEEDITEM,
PodDBAdapter.KEY_LINK,
PodDBAdapter.KEY_CHAPTER_TYPE));
}
if (oldVersion <= 14) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " INTEGER");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " = "
+ "(SELECT " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " FROM " + PodDBAdapter.TABLE_NAME_FEEDS
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEEDS + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_FEED + ")");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_HIDE + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_LAST_UPDATE_FAILED + " INTEGER DEFAULT 0");
// create indexes
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_FEED);
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDMEDIA_FEEDITEM);
db.execSQL(PodDBAdapter.CREATE_INDEX_QUEUE_FEEDITEM);
db.execSQL(PodDBAdapter.CREATE_INDEX_SIMPLECHAPTERS_FEEDITEM);
}
if (oldVersion <= 15) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + " INTEGER DEFAULT -1");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=0"
+ " WHERE " + PodDBAdapter.KEY_DOWNLOADED + "=0");
Cursor c = db.rawQuery("SELECT " + PodDBAdapter.KEY_FILE_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " WHERE " + PodDBAdapter.KEY_DOWNLOADED + "=1 "
+ " AND " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=-1", null);
if (c.moveToFirst()) {
MediaMetadataRetriever mmr = new MediaMetadataRetriever();
do {
String fileUrl = c.getString(0);
try {
mmr.setDataSource(fileUrl);
byte[] image = mmr.getEmbeddedPicture();
if (image != null) {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=1"
+ " WHERE " + PodDBAdapter.KEY_FILE_URL + "='" + fileUrl + "'");
} else {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=0"
+ " WHERE " + PodDBAdapter.KEY_FILE_URL + "='" + fileUrl + "'");
}
} catch (Exception e) {
e.printStackTrace();
}
} while (c.moveToNext());
}
c.close();
}
if (oldVersion <= 16) {
String selectNew = "SELECT " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " INNER JOIN " + PodDBAdapter.TABLE_NAME_FEED_MEDIA + " ON "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID + "="
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_FEEDITEM
+ " LEFT OUTER JOIN " + PodDBAdapter.TABLE_NAME_QUEUE + " ON "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID + "="
+ PodDBAdapter.TABLE_NAME_QUEUE + "." + PodDBAdapter.KEY_FEEDITEM
+ " WHERE "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_READ + " = 0 AND " // unplayed
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_DOWNLOADED + " = 0 AND " // undownloaded
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_POSITION + " = 0 AND " // not partially played
+ PodDBAdapter.TABLE_NAME_QUEUE + "." + PodDBAdapter.KEY_ID + " IS NULL"; // not in queue
String sql = "UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_READ + "=" + FeedItem.NEW
+ " WHERE " + PodDBAdapter.KEY_ID + " IN (" + selectNew + ")";
Log.d("Migration", "SQL: " + sql);
db.execSQL(sql);
}
if (oldVersion <= 17) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DELETE_ACTION + " INTEGER DEFAULT 0");
}
if (oldVersion < 1030005) {
db.execSQL("UPDATE FeedItems SET auto_download=0 WHERE " +
"(read=1 OR id IN (SELECT feeditem FROM FeedMedia WHERE position>0 OR downloaded=1)) " +
"AND id NOT IN (SELECT feeditem FROM Queue)");
}
if (oldVersion < 1040001) {
db.execSQL(PodDBAdapter.CREATE_TABLE_FAVORITES);
}
if (oldVersion < 1040002) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_LAST_PLAYED_TIME + " INTEGER DEFAULT 0");
}
if (oldVersion < 1040013) {
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_PUBDATE);
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_READ);
}
if (oldVersion < 1050003) {
// Migrates feed list filter data
db.beginTransaction();
// Change to intermediate values to avoid overwriting in the following find/replace
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'unplayed', 'noplay')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'not_queued', 'noqueue')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'not_downloaded', 'nodl')");
// Replace played, queued, and downloaded with their opposites
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'played', 'unplayed')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'queued', 'not_queued')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'downloaded', 'not_downloaded')");
// Now replace intermediates for unplayed, not queued, etc. with their opposites
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'noplay', 'played')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'noqueue', 'queued')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'nodl', 'downloaded')");
// Paused doesn't have an opposite, so unplayed is the next best option
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'paused', 'unplayed')");
db.setTransactionSuccessful();
db.endTransaction();
// and now get ready for autodownload filters
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_INCLUDE_FILTER + " TEXT DEFAULT ''");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_EXCLUDE_FILTER + " TEXT DEFAULT ''");
// and now auto refresh
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_KEEP_UPDATED + " INTEGER DEFAULT 1");
}
if (oldVersion < 1050004) {
// prevent old timestamps to be misinterpreted as ETags
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " SET " + PodDBAdapter.KEY_LASTUPDATE + "=NULL");
}
if (oldVersion < 1060200) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_CUSTOM_TITLE + " TEXT");
}
if (oldVersion < 1060596) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + " SET " + PodDBAdapter.KEY_IMAGE_URL + " = ("
+ " SELECT " + PodDBAdapter.KEY_DOWNLOAD_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_IMAGES
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + ".image)");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + " SET " + PodDBAdapter.KEY_IMAGE_URL + " = ("
+ " SELECT " + PodDBAdapter.KEY_DOWNLOAD_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_IMAGES
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEEDS + ".image)");
db.execSQL("DROP TABLE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES);
}
if (oldVersion < 1070400) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_PLAYBACK_SPEED + " REAL DEFAULT " + SPEED_USE_GLOBAL);
}
if (oldVersion < 1070401) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_SORT_ORDER + " TEXT");
}
if (oldVersion < 1090000) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_VOLUME_ADAPTION + " INTEGER DEFAULT 0");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT DEFAULT NULL");
}
if (oldVersion < 1090001) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_SKIP_INTRO + " INTEGER DEFAULT 0;");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_SKIP_ENDING + " INTEGER DEFAULT 0;");
}
if (oldVersion < 2020000) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_EPISODE_NOTIFICATION + " INTEGER DEFAULT 0;");
}
if (oldVersion < 2030000) {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_DESCRIPTION + " = content_encoded, content_encoded = NULL "
+ "WHERE length(" + PodDBAdapter.KEY_DESCRIPTION + ") < length(content_encoded)");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + " SET content_encoded = NULL");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_TAGS + " TEXT;");
}
}
}
| 1 | 20,843 | That's only executed when users switch from 2.2 to 2.3. Please create a new block with code `2050000` for the next release :) Please also adapt the version number in PodDbAdapter | AntennaPod-AntennaPod | java |
@@ -351,6 +351,16 @@ func (c *client) sendConnect(tlsRequired bool) {
// Process the info message if we are a route.
func (c *client) processRouteInfo(info *Info) {
+ // We may need to update route permissions and will need the account
+ // sublist. Since getting the account requires server lock, do the
+ // lookup now.
+
+ // FIXME(dlc) - Add account scoping.
+ gacc := c.srv.globalAccount()
+ gacc.mu.RLock()
+ sl := gacc.sl
+ gacc.mu.RUnlock()
+
c.mu.Lock()
// Connection can be closed at any time (by auth timeout, etc).
// Does not make sense to continue here if connection is gone. | 1 | // Copyright 2013-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/url"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
)
// RouteType designates the router type
type RouteType int
// Type of Route
const (
// This route we learned from speaking to other routes.
Implicit RouteType = iota
// This route was explicitly configured.
Explicit
)
const (
// RouteProtoZero is the original Route protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
RouteProtoZero = iota
// RouteProtoInfo signals a route can receive more then the original INFO block.
// This can be used to update remote cluster permissions, etc...
RouteProtoInfo
// RouteProtoV2 is the new route/cluster protocol that provides account support.
RouteProtoV2
)
// Include the space for the proto
var (
aSubBytes = []byte{'A', '+', ' '}
aUnsubBytes = []byte{'A', '-', ' '}
rSubBytes = []byte{'R', 'S', '+', ' '}
rUnsubBytes = []byte{'R', 'S', '-', ' '}
)
// Used by tests
var testRouteProto = RouteProtoV2
type route struct {
remoteID string
didSolicit bool
retry bool
routeType RouteType
url *url.URL
authRequired bool
tlsRequired bool
connectURLs []string
replySubs map[*subscription]*time.Timer
gatewayURL string
}
type connectInfo struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
TLS bool `json:"tls_required"`
Name string `json:"name"`
Gateway string `json:"gateway,omitempty"`
}
// Route protocol constants
const (
ConProto = "CONNECT %s" + _CRLF_
InfoProto = "INFO %s" + _CRLF_
)
// This will add a timer to watch over remote reply subjects in case
// they fail to receive a response. The duration will be taken from the
// accounts map timeout to match.
// Lock should be held upon entering.
func (c *client) addReplySubTimeout(acc *Account, sub *subscription, d time.Duration) {
var prs *map[*subscription]*time.Timer
switch c.kind {
case ROUTER:
prs = &c.route.replySubs
case GATEWAY:
prs = &c.gw.replySubs
default:
// TODO(ik): return or panic to show that there is a bug?
}
if *prs == nil {
*prs = make(map[*subscription]*time.Timer)
}
rs := *prs
rs[sub] = time.AfterFunc(d, func() {
c.mu.Lock()
delete(rs, sub)
sub.max = 0
c.mu.Unlock()
c.unsubscribe(acc, sub, true)
})
}
// removeReplySub is called when we trip the max on remoteReply subs.
func (c *client) removeReplySub(sub *subscription) {
if sub == nil {
return
}
// Lookup the account based on sub.sid.
if i := bytes.Index(sub.sid, []byte(" ")); i > 0 {
// First part of SID for route/gateway is account name.
if acc, _ := c.srv.LookupAccount(string(sub.sid[:i])); acc != nil {
acc.sl.Remove(sub)
}
c.mu.Lock()
c.removeReplySubTimeout(sub)
delete(c.subs, string(sub.sid))
c.mu.Unlock()
}
}
// removeReplySubTimeout will remove a timer if it exists.
// Lock should be held upon entering.
func (c *client) removeReplySubTimeout(sub *subscription) {
// Remove any reply sub timer if it exists.
var rs map[*subscription]*time.Timer
switch c.kind {
case ROUTER:
rs = c.route.replySubs
case GATEWAY:
rs = c.gw.replySubs
default:
return
}
if t, ok := rs[sub]; ok {
t.Stop()
delete(rs, sub)
}
}
func (c *client) processAccountSub(arg []byte) error {
c.traceInOp("A+", arg)
accName := string(arg)
if c.kind == GATEWAY {
return c.processGatewayAccountSub(accName)
}
return nil
}
func (c *client) processAccountUnsub(arg []byte) {
c.traceInOp("A-", arg)
accName := string(arg)
if c.kind == GATEWAY {
c.processGatewayAccountUnsub(accName)
}
}
// Process an inbound RMSG specification from the remote route.
func (c *client) processRoutedMsgArgs(trace bool, arg []byte) error {
if trace {
c.traceInOp("RMSG", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 0, 1, 2:
return fmt.Errorf("processRoutedMsgArgs Parse Error: '%s'", args)
case 3:
c.pa.reply = nil
c.pa.queues = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
case 4:
c.pa.reply = args[2]
c.pa.queues = nil
c.pa.szb = args[3]
c.pa.size = parseSize(args[3])
default:
// args[2] is our reply indicator. Should be + or | normally.
if len(args[2]) != 1 {
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Reply Indicator: '%s'", args[2])
}
switch args[2][0] {
case '+':
c.pa.reply = args[3]
case '|':
c.pa.reply = nil
default:
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Reply Indicator: '%s'", args[2])
}
// Grab size.
c.pa.szb = args[len(args)-1]
c.pa.size = parseSize(c.pa.szb)
// Grab queue names.
if c.pa.reply != nil {
c.pa.queues = args[4 : len(args)-1]
} else {
c.pa.queues = args[3 : len(args)-1]
}
}
if c.pa.size < 0 {
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Size: '%s'", args)
}
// Common ones processed after check for arg length
c.pa.account = args[0]
c.pa.subject = args[1]
c.pa.pacache = arg[:len(args[0])+len(args[1])+1]
return nil
}
// processInboundRouteMsg is called to process an inbound msg from a route.
func (c *client) processInboundRoutedMsg(msg []byte) {
// Update statistics
c.in.msgs++
// The msg includes the CR_LF, so pull back out for accounting.
c.in.bytes += len(msg) - LEN_CR_LF
if c.trace {
c.traceMsg(msg)
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if c.srv == nil {
return
}
acc, r := c.getAccAndResultFromCache()
if acc == nil {
c.Debugf("Unknown account %q for routed message on subject: %q", c.pa.account, c.pa.subject)
return
}
// Check to see if we need to map/route to another account.
if acc.imports.services != nil {
c.checkForImportServices(acc, msg)
}
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) == 0 {
return
}
// Check to see if we have a routed message with a service reply.
if isServiceReply(c.pa.reply) && acc != nil {
// Need to add a sub here for local interest to send a response back
// to the originating server/requestor where it will be re-mapped.
sid := make([]byte, 0, len(acc.Name)+len(c.pa.reply)+1)
sid = append(sid, acc.Name...)
sid = append(sid, ' ')
sid = append(sid, c.pa.reply...)
// Copy off the reply since otherwise we are referencing a buffer that will be reused.
reply := make([]byte, len(c.pa.reply))
copy(reply, c.pa.reply)
sub := &subscription{client: c, subject: reply, sid: sid, max: 1}
if err := acc.sl.Insert(sub); err != nil {
c.Errorf("Could not insert subscription: %v", err)
} else {
ttl := acc.AutoExpireTTL()
c.mu.Lock()
c.subs[string(sid)] = sub
c.addReplySubTimeout(acc, sub, ttl)
c.mu.Unlock()
}
}
c.processMsgResults(acc, r, msg, c.pa.subject, c.pa.reply, nil)
}
// Helper function for routes and gateways to create qfilters need for
// converted subs from imports, etc.
func (c *client) makeQFilter(qsubs [][]*subscription) {
qs := make([][]byte, 0, len(qsubs))
for _, qsub := range qsubs {
if len(qsub) > 0 {
qs = append(qs, qsub[0].queue)
}
}
c.pa.queues = qs
}
// Lock should be held entering here.
func (c *client) sendConnect(tlsRequired bool) {
var user, pass string
if userInfo := c.route.url.User; userInfo != nil {
user = userInfo.Username()
pass, _ = userInfo.Password()
}
cinfo := connectInfo{
Echo: true,
Verbose: false,
Pedantic: false,
User: user,
Pass: pass,
TLS: tlsRequired,
Name: c.srv.info.ID,
}
b, err := json.Marshal(cinfo)
if err != nil {
c.Errorf("Error marshaling CONNECT to route: %v\n", err)
c.closeConnection(ProtocolViolation)
return
}
c.sendProto([]byte(fmt.Sprintf(ConProto, b)), true)
}
// Process the info message if we are a route.
func (c *client) processRouteInfo(info *Info) {
c.mu.Lock()
// Connection can be closed at any time (by auth timeout, etc).
// Does not make sense to continue here if connection is gone.
if c.route == nil || c.nc == nil {
c.mu.Unlock()
return
}
s := c.srv
remoteID := c.route.remoteID
// Check if this is an INFO for gateways...
if info.Gateway != "" {
c.mu.Unlock()
// If this server has no gateway configured, report error and return.
if !s.gateway.enabled {
// FIXME: Should this be a Fatalf()?
s.Errorf("Received information about gateway %q from %s, but gateway is not configured",
info.Gateway, remoteID)
return
}
s.processGatewayInfoFromRoute(info, remoteID, c)
return
}
// We receive an INFO from a server that informs us about another server,
// so the info.ID in the INFO protocol does not match the ID of this route.
if remoteID != "" && remoteID != info.ID {
c.mu.Unlock()
// Process this implicit route. We will check that it is not an explicit
// route and/or that it has not been connected already.
s.processImplicitRoute(info)
return
}
// Need to set this for the detection of the route to self to work
// in closeConnection().
c.route.remoteID = info.ID
// Get the route's proto version
c.opts.Protocol = info.Proto
// Detect route to self.
if c.route.remoteID == s.info.ID {
c.mu.Unlock()
c.closeConnection(DuplicateRoute)
return
}
// Copy over important information.
c.route.authRequired = info.AuthRequired
c.route.tlsRequired = info.TLSRequired
c.route.gatewayURL = info.GatewayURL
// If this is an update due to config reload on the remote server,
// need to possibly send local subs to the remote server.
if c.flags.isSet(infoReceived) {
s.updateRemoteRoutePerms(c, info)
c.mu.Unlock()
return
}
// Copy over permissions as well.
c.opts.Import = info.Import
c.opts.Export = info.Export
// If we do not know this route's URL, construct one on the fly
// from the information provided.
if c.route.url == nil {
// Add in the URL from host and port
hp := net.JoinHostPort(info.Host, strconv.Itoa(info.Port))
url, err := url.Parse(fmt.Sprintf("nats-route://%s/", hp))
if err != nil {
c.Errorf("Error parsing URL from INFO: %v\n", err)
c.mu.Unlock()
c.closeConnection(ParseError)
return
}
c.route.url = url
}
// Mark that the INFO protocol has been received. Will allow
// to detect INFO updates.
c.flags.set(infoReceived)
// Check to see if we have this remote already registered.
// This can happen when both servers have routes to each other.
c.mu.Unlock()
if added, sendInfo := s.addRoute(c, info); added {
c.Debugf("Registering remote route %q", info.ID)
// Send our subs to the other side.
s.sendSubsToRoute(c)
// Send info about the known gateways to this route.
s.sendGatewayConfigsToRoute(c)
// sendInfo will be false if the route that we just accepted
// is the only route there is.
if sendInfo {
// The incoming INFO from the route will have IP set
// if it has Cluster.Advertise. In that case, use that
// otherwise contruct it from the remote TCP address.
if info.IP == "" {
// Need to get the remote IP address.
c.mu.Lock()
switch conn := c.nc.(type) {
case *net.TCPConn, *tls.Conn:
addr := conn.RemoteAddr().(*net.TCPAddr)
info.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(addr.IP.String(),
strconv.Itoa(info.Port)))
default:
info.IP = c.route.url.String()
}
c.mu.Unlock()
}
// Now let the known servers know about this new route
s.forwardNewRouteInfoToKnownServers(info)
}
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !s.getOpts().Cluster.NoAdvertise {
s.addClientConnectURLsAndSendINFOToClients(info.ClientConnectURLs)
}
} else {
c.Debugf("Detected duplicate remote route %q", info.ID)
c.closeConnection(DuplicateRoute)
}
}
// Possibly sends local subscriptions interest to this route
// based on changes in the remote's Export permissions.
// Lock assumed held on entry
func (s *Server) updateRemoteRoutePerms(route *client, info *Info) {
// Interested only on Export permissions for the remote server.
// Create "fake" clients that we will use to check permissions
// using the old permissions...
oldPerms := &RoutePermissions{Export: route.opts.Export}
oldPermsTester := &client{}
oldPermsTester.setRoutePermissions(oldPerms)
// and the new ones.
newPerms := &RoutePermissions{Export: info.Export}
newPermsTester := &client{}
newPermsTester.setRoutePermissions(newPerms)
route.opts.Import = info.Import
route.opts.Export = info.Export
var (
_localSubs [4096]*subscription
localSubs = _localSubs[:0]
)
// FIXME(dlc) - Add account scoping.
gacc := s.globalAccount()
gacc.sl.localSubs(&localSubs)
route.sendRouteSubProtos(localSubs, false, func(sub *subscription) bool {
subj := string(sub.subject)
// If the remote can now export but could not before, and this server can import this
// subject, then send SUB protocol.
if newPermsTester.canExport(subj) && !oldPermsTester.canExport(subj) && route.canImport(subj) {
return true
}
return false
})
}
// sendAsyncInfoToClients sends an INFO protocol to all
// connected clients that accept async INFO updates.
// The server lock is held on entry.
func (s *Server) sendAsyncInfoToClients() {
// If there are no clients supporting async INFO protocols, we are done.
// Also don't send if we are shutting down...
if s.cproto == 0 || s.shutdown {
return
}
for _, c := range s.clients {
c.mu.Lock()
// Here, we are going to send only to the clients that are fully
// registered (server has received CONNECT and first PING). For
// clients that are not at this stage, this will happen in the
// processing of the first PING (see client.processPing)
if c.opts.Protocol >= ClientProtoInfo && c.flags.isSet(firstPongSent) {
// sendInfo takes care of checking if the connection is still
// valid or not, so don't duplicate tests here.
c.sendInfo(c.generateClientInfoJSON(s.copyInfo()))
}
c.mu.Unlock()
}
}
// This will process implicit route information received from another server.
// We will check to see if we have configured or are already connected,
// and if so we will ignore. Otherwise we will attempt to connect.
func (s *Server) processImplicitRoute(info *Info) {
remoteID := info.ID
s.mu.Lock()
defer s.mu.Unlock()
// Don't connect to ourself
if remoteID == s.info.ID {
return
}
// Check if this route already exists
if _, exists := s.remotes[remoteID]; exists {
return
}
// Check if we have this route as a configured route
if s.hasThisRouteConfigured(info) {
return
}
// Initiate the connection, using info.IP instead of info.URL here...
r, err := url.Parse(info.IP)
if err != nil {
s.Errorf("Error parsing URL from INFO: %v\n", err)
return
}
// Snapshot server options.
opts := s.getOpts()
if info.AuthRequired {
r.User = url.UserPassword(opts.Cluster.Username, opts.Cluster.Password)
}
s.startGoRoutine(func() { s.connectToRoute(r, false) })
}
// hasThisRouteConfigured returns true if info.Host:info.Port is present
// in the server's opts.Routes, false otherwise.
// Server lock is assumed to be held by caller.
func (s *Server) hasThisRouteConfigured(info *Info) bool {
urlToCheckExplicit := strings.ToLower(net.JoinHostPort(info.Host, strconv.Itoa(info.Port)))
for _, ri := range s.getOpts().Routes {
if strings.ToLower(ri.Host) == urlToCheckExplicit {
return true
}
}
return false
}
// forwardNewRouteInfoToKnownServers sends the INFO protocol of the new route
// to all routes known by this server. In turn, each server will contact this
// new route.
func (s *Server) forwardNewRouteInfoToKnownServers(info *Info) {
s.mu.Lock()
defer s.mu.Unlock()
b, _ := json.Marshal(info)
infoJSON := []byte(fmt.Sprintf(InfoProto, b))
for _, r := range s.routes {
r.mu.Lock()
if r.route.remoteID != info.ID {
r.sendInfo(infoJSON)
}
r.mu.Unlock()
}
}
// canImport is whether or not we will send a SUB for interest to the other side.
// This is for ROUTER connections only.
// Lock is held on entry.
func (c *client) canImport(subject string) bool {
// Use pubAllowed() since this checks Publish permissions which
// is what Import maps to.
return c.pubAllowed(subject)
}
// canExport is whether or not we will accept a SUB from the remote for a given subject.
// This is for ROUTER connections only.
// Lock is held on entry
func (c *client) canExport(subject string) bool {
// Use canSubscribe() since this checks Subscribe permissions which
// is what Export maps to.
return c.canSubscribe(subject)
}
// Initialize or reset cluster's permissions.
// This is for ROUTER connections only.
// Client lock is held on entry
func (c *client) setRoutePermissions(perms *RoutePermissions) {
// Reset if some were set
if perms == nil {
c.perms = nil
c.mperms = nil
return
}
// Convert route permissions to user permissions.
// The Import permission is mapped to Publish
// and Export permission is mapped to Subscribe.
// For meaning of Import/Export, see canImport and canExport.
p := &Permissions{
Publish: perms.Import,
Subscribe: perms.Export,
}
c.setPermissions(p)
}
// Type used to hold a list of subs on a per account basis.
type asubs struct {
acc *Account
subs []*subscription
}
// removeRemoteSubs will walk the subs and remove them from the appropriate account.
func (c *client) removeRemoteSubs() {
// We need to gather these on a per account basis.
// FIXME(dlc) - We should be smarter about this..
as := map[string]*asubs{}
c.mu.Lock()
srv := c.srv
subs := c.subs
c.subs = make(map[string]*subscription)
c.mu.Unlock()
for key, sub := range subs {
c.mu.Lock()
sub.max = 0
c.mu.Unlock()
// Grab the account
accountName := strings.Fields(key)[0]
ase := as[accountName]
if ase == nil {
acc, _ := srv.LookupAccount(accountName)
if acc == nil {
continue
}
as[accountName] = &asubs{acc: acc, subs: []*subscription{sub}}
} else {
ase.subs = append(ase.subs, sub)
}
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(accountName, sub, -1)
}
}
// Now remove the subs by batch for each account sublist.
for _, ase := range as {
c.Debugf("Removing %d subscriptions for account %q", len(ase.subs), ase.acc.Name)
ase.acc.sl.RemoveBatch(ase.subs)
}
}
func (c *client) parseUnsubProto(arg []byte) (string, []byte, []byte, error) {
c.traceInOp("RS-", arg)
// Indicate any activity, so pub and sub or unsubs.
c.in.subs++
args := splitArg(arg)
var (
accountName string
subject []byte
queue []byte
)
switch len(args) {
case 2:
case 3:
queue = args[2]
default:
return "", nil, nil, fmt.Errorf("Parse Error: '%s'", arg)
}
subject = args[1]
accountName = string(args[0])
return accountName, subject, queue, nil
}
// Indicates no more interest in the given account/subject for the remote side.
func (c *client) processRemoteUnsub(arg []byte) (err error) {
srv := c.srv
if srv == nil {
return nil
}
accountName, subject, _, err := c.parseUnsubProto(arg)
if err != nil {
return fmt.Errorf("processRemoteUnsub %s", err.Error())
}
// Lookup the account
acc, _ := c.srv.LookupAccount(accountName)
if acc == nil {
c.Debugf("Unknown account %q for subject %q", accountName, subject)
// Mark this account as not interested since we received a RS- and we
// do not have any record of it.
return nil
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
updateGWs := false
// We store local subs by account and subject and optionally queue name.
// RS- will have the arg exactly as the key.
key := string(arg)
sub, ok := c.subs[key]
if ok {
delete(c.subs, key)
acc.sl.Remove(sub)
c.removeReplySubTimeout(sub)
updateGWs = srv.gateway.enabled
}
c.mu.Unlock()
if updateGWs {
srv.gatewayUpdateSubInterest(accountName, sub, -1)
}
if c.opts.Verbose {
c.sendOK()
}
return nil
}
func (c *client) processRemoteSub(argo []byte) (err error) {
c.traceInOp("RS+", argo)
// Indicate activity.
c.in.subs++
srv := c.srv
if srv == nil {
return nil
}
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.queue = nil
case 4:
sub.queue = args[2]
sub.qw = int32(parseSize(args[3]))
default:
return fmt.Errorf("processRemoteSub Parse Error: '%s'", arg)
}
sub.subject = args[1]
// Lookup the account
// FIXME(dlc) - This may start having lots of contention?
accountName := string(args[0])
acc, _ := c.srv.LookupAccount(accountName)
if acc == nil {
if !srv.NewAccountsAllowed() {
c.Debugf("Unknown account %q for subject %q", accountName, sub.subject)
return nil
}
acc, _ = srv.LookupOrRegisterAccount(accountName)
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Check permissions if applicable.
if !c.canExport(string(sub.subject)) {
c.mu.Unlock()
c.Debugf("Can not export %q, ignoring remote subscription request", sub.subject)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.subsExceeded() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// We store local subs by account and subject and optionally queue name.
// If we have a queue it will have a trailing weight which we do not want.
if sub.queue != nil {
sub.sid = arg[:len(arg)-len(args[3])-1]
} else {
sub.sid = arg
}
key := string(sub.sid)
osub := c.subs[key]
updateGWs := false
if osub == nil {
c.subs[string(key)] = sub
// Now place into the account sl.
if err = acc.sl.Insert(sub); err != nil {
delete(c.subs, key)
c.mu.Unlock()
c.Errorf("Could not insert subscription: %v", err)
c.sendErr("Invalid Subscription")
return nil
}
updateGWs = srv.gateway.enabled
} else if sub.queue != nil {
// For a queue we need to update the weight.
atomic.StoreInt32(&osub.qw, sub.qw)
acc.sl.UpdateRemoteQSub(osub)
}
c.mu.Unlock()
if c.opts.Verbose {
c.sendOK()
}
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
return nil
}
// sendSubsToRoute will send over our subject interest to
// the remote side. For each account we will send the
// complete interest for all subjects, both normal as a binary
// and queue group weights.
func (s *Server) sendSubsToRoute(route *client) {
// Send over our account subscriptions.
var _accs [4096]*Account
accs := _accs[:0]
// copy accounts into array first
s.mu.Lock()
for _, a := range s.accounts {
accs = append(accs, a)
}
s.mu.Unlock()
var raw [4096]*subscription
var closed bool
route.mu.Lock()
for _, a := range accs {
subs := raw[:0]
a.mu.RLock()
for key, rme := range a.rm {
// FIXME(dlc) - Just pass rme around.
// Construct a sub on the fly. We need to place
// a client (or im) to properly set the account.
var qn []byte
subEnd := len(key)
if qi := rme.qi; qi > 0 {
subEnd = int(qi) - 1
qn = []byte(key[qi:])
}
c := a.randomClient()
if c == nil {
continue
}
sub := &subscription{client: c, subject: []byte(key[:subEnd]), queue: qn, qw: rme.n}
subs = append(subs, sub)
}
a.mu.RUnlock()
closed = route.sendRouteSubProtos(subs, false, func(sub *subscription) bool {
return route.canImport(string(sub.subject))
})
if closed {
route.mu.Unlock()
return
}
}
route.mu.Unlock()
if !closed {
route.Debugf("Sent local subscriptions to route")
}
}
// Sends SUBs protocols for the given subscriptions. If a filter is specified, it is
// invoked for each subscription. If the filter returns false, the subscription is skipped.
// This function may release the route's lock due to flushing of outbound data. A boolean
// is returned to indicate if the connection has been closed during this call.
// Lock is held on entry.
func (c *client) sendRouteSubProtos(subs []*subscription, trace bool, filter func(sub *subscription) bool) bool {
return c.sendRouteSubOrUnSubProtos(subs, true, trace, filter)
}
// Sends UNSUBs protocols for the given subscriptions. If a filter is specified, it is
// invoked for each subscription. If the filter returns false, the subscription is skipped.
// This function may release the route's lock due to flushing of outbound data. A boolean
// is returned to indicate if the connection has been closed during this call.
// Lock is held on entry.
func (c *client) sendRouteUnSubProtos(subs []*subscription, trace bool, filter func(sub *subscription) bool) bool {
return c.sendRouteSubOrUnSubProtos(subs, false, trace, filter)
}
// Low-level function that sends RS+ or RS- protocols for the given subscriptions.
// Use sendRouteSubProtos or sendRouteUnSubProtos instead for clarity.
// Lock is held on entry.
func (c *client) sendRouteSubOrUnSubProtos(subs []*subscription, isSubProto, trace bool, filter func(sub *subscription) bool) bool {
const staticBufSize = maxBufSize * 2
var (
_buf [staticBufSize]byte // array on stack
buf = _buf[:0] // our buffer will initially point to the stack buffer
mbs = staticBufSize // max size of the buffer
mpMax = int(c.out.mp / 2) // 50% of max_pending
closed bool
)
// We need to make sure that we stay below the user defined max pending bytes.
if mbs > mpMax {
mbs = mpMax
}
for _, sub := range subs {
if filter != nil && !filter(sub) {
continue
}
// Determine the account. If sub has an ImportMap entry, use that, otherwise scoped to
// client. Default to global if all else fails.
var accName string
if sub.im != nil {
accName = sub.im.acc.Name
} else if sub.client != nil && sub.client.acc != nil {
accName = sub.client.acc.Name
} else {
c.Debugf("Falling back to default account for sending subs")
accName = globalAccountName
}
// Check if proto is going to fit.
curSize := len(buf)
// "RS+/- " + account + " " + subject + " " [+ queue + " " + weight] + CRLF
curSize += 4 + len(accName) + 1 + len(sub.subject) + 1 + 2
if len(sub.queue) > 0 {
curSize += len(sub.queue)
if isSubProto {
// Estimate weightlen in 1000s
curSize += 1 + 4
}
}
if curSize >= mbs {
if c.queueOutbound(buf) {
// Need to allocate new array
buf = make([]byte, 0, mbs)
} else {
// We can reuse previous buffer
buf = buf[:0]
}
// Update last activity because flushOutbound() will release
// the lock, which could cause pingTimer to think that this
// connection is stale otherwise.
c.last = time.Now()
c.flushOutbound()
if closed = c.flags.isSet(clearConnection); closed {
break
}
}
as := len(buf)
if isSubProto {
buf = append(buf, rSubBytes...)
} else {
buf = append(buf, rUnsubBytes...)
}
buf = append(buf, accName...)
buf = append(buf, ' ')
buf = append(buf, sub.subject...)
if len(sub.queue) > 0 {
buf = append(buf, ' ')
buf = append(buf, sub.queue...)
// Send our weight if we are a sub proto
if isSubProto {
buf = append(buf, ' ')
var b [12]byte
var i = len(b)
for l := sub.qw; l > 0; l /= 10 {
i--
b[i] = digits[l%10]
}
buf = append(buf, b[i:]...)
}
}
if trace {
c.traceOutOp("", buf[as:])
}
buf = append(buf, CR_LF...)
}
if !closed && len(buf) > 0 {
c.queueOutbound(buf)
c.flushOutbound()
closed = c.flags.isSet(clearConnection)
}
return closed
}
func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client {
// Snapshot server options.
opts := s.getOpts()
didSolicit := rURL != nil
r := &route{didSolicit: didSolicit}
for _, route := range opts.Routes {
if rURL != nil && (strings.ToLower(rURL.Host) == strings.ToLower(route.Host)) {
r.routeType = Explicit
}
}
c := &client{srv: s, nc: conn, opts: clientOpts{}, kind: ROUTER, msubs: -1, mpay: -1, route: r}
// Grab server variables
s.mu.Lock()
s.generateRouteInfoJSON()
infoJSON := s.routeInfoJSON
authRequired := s.routeInfo.AuthRequired
tlsRequired := s.routeInfo.TLSRequired
s.mu.Unlock()
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
// Initialize the per-account cache.
c.in.pacache = make(map[string]*perAccountCache, maxPerAccountCacheSize)
if didSolicit {
// Do this before the TLS code, otherwise, in case of failure
// and if route is explicit, it would try to reconnect to 'nil'...
r.url = rURL
// Set permissions associated with the route user (if applicable).
// No lock needed since we are already under client lock.
c.setRoutePermissions(opts.Cluster.Permissions)
}
// Check for TLS
if tlsRequired {
// Copy off the config to add in ServerName if we
tlsConfig := opts.Cluster.TLSConfig.Clone()
// If we solicited, we will act like the client, otherwise the server.
if didSolicit {
c.Debugf("Starting TLS route client handshake")
// Specify the ServerName we are expecting.
host, _, _ := net.SplitHostPort(rURL.Host)
tlsConfig.ServerName = host
c.nc = tls.Client(c.nc, tlsConfig)
} else {
c.Debugf("Starting TLS route server handshake")
c.nc = tls.Server(c.nc, tlsConfig)
}
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.Cluster.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS route handshake error: %v", err)
c.sendErr("Secure Connection - TLS Required")
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Verify that the connection did not go away while we released the lock.
if c.nc == nil {
c.mu.Unlock()
return nil
}
}
// Do final client initialization
// Set the Ping timer
c.setPingTimer()
// For routes, the "client" is added to s.routes only when processing
// the INFO protocol, that is much later.
// In the meantime, if the server shutsdown, there would be no reference
// to the client (connection) to be closed, leaving this readLoop
// uinterrupted, causing the Shutdown() to wait indefinitively.
// We need to store the client in a special map, under a special lock.
if !s.addToTempClients(c.cid, c) {
c.mu.Unlock()
c.setNoReconnect()
c.closeConnection(ServerShutdown)
return nil
}
// Check for Auth required state for incoming connections.
// Make sure to do this before spinning up readLoop.
if authRequired && !didSolicit {
ttl := secondsToDuration(opts.Cluster.AuthTimeout)
c.setAuthTimer(ttl)
}
// Spin up the read loop.
s.startGoRoutine(c.readLoop)
// Spin up the write loop.
s.startGoRoutine(c.writeLoop)
if tlsRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
// Queue Connect proto if we solicited the connection.
if didSolicit {
c.Debugf("Route connect msg sent")
c.sendConnect(tlsRequired)
}
// Send our info to the other side.
// Our new version requires dynamic information for accounts and a nonce.
c.sendInfo(infoJSON)
c.mu.Unlock()
c.Noticef("Route connection created")
return c
}
const (
_CRLF_ = "\r\n"
_EMPTY_ = ""
)
func (s *Server) addRoute(c *client, info *Info) (bool, bool) {
id := c.route.remoteID
sendInfo := false
s.mu.Lock()
if !s.running {
s.mu.Unlock()
return false, false
}
remote, exists := s.remotes[id]
if !exists {
s.routes[c.cid] = c
s.remotes[id] = c
c.mu.Lock()
c.route.connectURLs = info.ClientConnectURLs
cid := c.cid
c.mu.Unlock()
// Now that we have registered the route, we can remove from the temp map.
s.removeFromTempClients(cid)
// we don't need to send if the only route is the one we just accepted.
sendInfo = len(s.routes) > 1
// If the INFO contains a Gateway URL, add it to the list for our cluster.
if info.GatewayURL != "" {
s.addGatewayURL(info.GatewayURL)
}
}
s.mu.Unlock()
if exists {
var r *route
c.mu.Lock()
// upgrade to solicited?
if c.route.didSolicit {
// Make a copy
rs := *c.route
r = &rs
}
c.mu.Unlock()
remote.mu.Lock()
// r will be not nil if c.route.didSolicit was true
if r != nil {
// If we upgrade to solicited, we still want to keep the remote's
// connectURLs. So transfer those.
r.connectURLs = remote.route.connectURLs
remote.route = r
}
// This is to mitigate the issue where both sides add the route
// on the opposite connection, and therefore end-up with both
// connections being dropped.
remote.route.retry = true
remote.mu.Unlock()
}
return !exists, sendInfo
}
// updateRouteSubscriptionMap will make sure to update the route map for the subscription. Will
// also forward to all routes if needed.
func (s *Server) updateRouteSubscriptionMap(acc *Account, sub *subscription, delta int32) {
if acc == nil || sub == nil {
return
}
acc.mu.RLock()
rm := acc.rm
acc.mu.RUnlock()
// This is non-nil when we know we are in cluster mode.
if rm == nil {
return
}
// We only store state on local subs for transmission across routes.
if sub.client == nil || (sub.client.kind != CLIENT && sub.client.kind != SYSTEM) {
return
}
// Create the fast key which will use the subject or 'subject<spc>queue' for queue subscribers.
var (
_rkey [1024]byte
key []byte
qi int
)
if sub.queue != nil {
// Just make the key subject spc group, e.g. 'foo bar'
key = _rkey[:0]
key = append(key, sub.subject...)
key = append(key, byte(' '))
qi = len(key)
key = append(key, sub.queue...)
} else {
key = sub.subject
}
// We always update for a queue subscriber since we need to send our relative weight.
var entry *rme
var ok bool
// Always update if a queue subscriber.
update := qi > 0
// Copy to hold outside acc lock.
var entryN int32
acc.mu.Lock()
if entry, ok = rm[string(key)]; ok {
entry.n += delta
if entry.n <= 0 {
delete(rm, string(key))
update = true // Update for deleting,
}
} else if delta > 0 {
entry = &rme{qi, delta}
rm[string(key)] = entry
update = true // Adding for normal sub means update.
}
if entry != nil {
entryN = entry.n
}
acc.mu.Unlock()
if !update || entry == nil {
return
}
// We need to send out this update.
// If we are sending a queue sub, copy and place in the queue weight.
if sub.queue != nil {
sub.client.mu.Lock()
nsub := *sub
sub.client.mu.Unlock()
nsub.qw = entryN
sub = &nsub
}
// Note that queue unsubs where entry.n > 0 are still
// subscribes with a smaller weight.
if entryN > 0 {
s.broadcastSubscribe(sub)
} else {
s.broadcastUnSubscribe(sub)
}
}
// broadcastSubscribe will forward a client subscription
// to all active routes as needed.
func (s *Server) broadcastSubscribe(sub *subscription) {
trace := atomic.LoadInt32(&s.logging.trace) == 1
s.mu.Lock()
subs := []*subscription{sub}
for _, route := range s.routes {
route.mu.Lock()
route.sendRouteSubProtos(subs, trace, func(sub *subscription) bool {
return route.canImport(string(sub.subject))
})
route.mu.Unlock()
}
s.mu.Unlock()
}
// broadcastUnSubscribe will forward a client unsubscribe
// action to all active routes.
func (s *Server) broadcastUnSubscribe(sub *subscription) {
trace := atomic.LoadInt32(&s.logging.trace) == 1
s.mu.Lock()
subs := []*subscription{sub}
for _, route := range s.routes {
route.mu.Lock()
route.sendRouteUnSubProtos(subs, trace, func(sub *subscription) bool {
return route.canImport(string(sub.subject))
})
route.mu.Unlock()
}
s.mu.Unlock()
}
func (s *Server) routeAcceptLoop(ch chan struct{}) {
defer func() {
if ch != nil {
close(ch)
}
}()
// Snapshot server options.
opts := s.getOpts()
// Snapshot server options.
port := opts.Cluster.Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on router port: %d - %v", opts.Cluster.Port, e)
return
}
s.Noticef("Listening for route connections on %s",
net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
s.mu.Lock()
// For tests, we want to be able to make this server behave
// as an older server so we use the variable which we can override.
proto := testRouteProto
// Check for TLSConfig
tlsReq := opts.Cluster.TLSConfig != nil
info := Info{
ID: s.info.ID,
Version: s.info.Version,
GoVersion: runtime.Version(),
AuthRequired: false,
TLSRequired: tlsReq,
TLSVerify: tlsReq,
MaxPayload: s.info.MaxPayload,
Proto: proto,
GatewayURL: s.getGatewayURL(),
}
// Set this if only if advertise is not disabled
if !opts.Cluster.NoAdvertise {
info.ClientConnectURLs = s.clientConnectURLs
}
// If we have selected a random port...
if port == 0 {
// Write resolved port back to options.
opts.Cluster.Port = l.Addr().(*net.TCPAddr).Port
}
// Keep track of actual listen port. This will be needed in case of
// config reload.
s.clusterActualPort = opts.Cluster.Port
// Check for Auth items
if opts.Cluster.Username != "" {
info.AuthRequired = true
}
// Check for permissions.
if opts.Cluster.Permissions != nil {
info.Import = opts.Cluster.Permissions.Import
info.Export = opts.Cluster.Permissions.Export
}
s.routeInfo = info
// Possibly override Host/Port and set IP based on Cluster.Advertise
if err := s.setRouteInfoHostPortAndIP(); err != nil {
s.Fatalf("Error setting route INFO with Cluster.Advertise value of %s, err=%v", s.opts.Cluster.Advertise, err)
l.Close()
s.mu.Unlock()
return
}
// Setup state that can enable shutdown
s.routeListener = l
s.mu.Unlock()
// Let them know we are up
close(ch)
ch = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
tmpDelay = s.acceptError("Route", err, tmpDelay)
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createRoute(conn, nil)
s.grWG.Done()
})
}
s.Debugf("Router accept loop exiting..")
s.done <- true
}
// Similar to setInfoHostPortAndGenerateJSON, but for routeInfo.
func (s *Server) setRouteInfoHostPortAndIP() error {
if s.opts.Cluster.Advertise != "" {
advHost, advPort, err := parseHostPort(s.opts.Cluster.Advertise, s.opts.Cluster.Port)
if err != nil {
return err
}
s.routeInfo.Host = advHost
s.routeInfo.Port = advPort
s.routeInfo.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(advHost, strconv.Itoa(advPort)))
} else {
s.routeInfo.Host = s.opts.Cluster.Host
s.routeInfo.Port = s.opts.Cluster.Port
s.routeInfo.IP = ""
}
// (re)generate the routeInfoJSON byte array
s.generateRouteInfoJSON()
return nil
}
// StartRouting will start the accept loop on the cluster host:port
// and will actively try to connect to listed routes.
func (s *Server) StartRouting(clientListenReady chan struct{}) {
defer s.grWG.Done()
// Wait for the client listen port to be opened, and
// the possible ephemeral port to be selected.
<-clientListenReady
// Spin up the accept loop
ch := make(chan struct{})
go s.routeAcceptLoop(ch)
<-ch
// Solicit Routes if needed.
s.solicitRoutes(s.getOpts().Routes)
}
func (s *Server) reConnectToRoute(rURL *url.URL, rtype RouteType) {
tryForEver := rtype == Explicit
// If A connects to B, and B to A (regardless if explicit or
// implicit - due to auto-discovery), and if each server first
// registers the route on the opposite TCP connection, the
// two connections will end-up being closed.
// Add some random delay to reduce risk of repeated failures.
delay := time.Duration(rand.Intn(100)) * time.Millisecond
if tryForEver {
delay += DEFAULT_ROUTE_RECONNECT
}
select {
case <-time.After(delay):
case <-s.quitCh:
s.grWG.Done()
return
}
s.connectToRoute(rURL, tryForEver)
}
// Checks to make sure the route is still valid.
func (s *Server) routeStillValid(rURL *url.URL) bool {
for _, ri := range s.getOpts().Routes {
if urlsAreEqual(ri, rURL) {
return true
}
}
return false
}
func (s *Server) connectToRoute(rURL *url.URL, tryForEver bool) {
// Snapshot server options.
opts := s.getOpts()
defer s.grWG.Done()
attempts := 0
for s.isRunning() && rURL != nil {
if tryForEver && !s.routeStillValid(rURL) {
return
}
s.Debugf("Trying to connect to route on %s", rURL.Host)
conn, err := net.DialTimeout("tcp", rURL.Host, DEFAULT_ROUTE_DIAL)
if err != nil {
s.Errorf("Error trying to connect to route: %v", err)
if !tryForEver {
if opts.Cluster.ConnectRetries <= 0 {
return
}
attempts++
if attempts > opts.Cluster.ConnectRetries {
return
}
}
select {
case <-s.quitCh:
return
case <-time.After(DEFAULT_ROUTE_CONNECT):
continue
}
}
if tryForEver && !s.routeStillValid(rURL) {
conn.Close()
return
}
// We have a route connection here.
// Go ahead and create it and exit this func.
s.createRoute(conn, rURL)
return
}
}
func (c *client) isSolicitedRoute() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.kind == ROUTER && c.route != nil && c.route.didSolicit
}
func (s *Server) solicitRoutes(routes []*url.URL) {
for _, r := range routes {
route := r
s.startGoRoutine(func() { s.connectToRoute(route, true) })
}
}
func (c *client) processRouteConnect(srv *Server, arg []byte, lang string) error {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provide Lang in the CONNECT protocol while ROUTEs don't.
if lang != "" {
errTxt := ErrClientConnectedToRoutePort.Error()
c.Errorf(errTxt)
c.sendErr(errTxt)
c.closeConnection(WrongPort)
return ErrClientConnectedToRoutePort
}
// Unmarshal as a route connect protocol
proto := &connectInfo{}
if err := json.Unmarshal(arg, proto); err != nil {
return err
}
// Reject if this has Gateway which means that it would be from a gateway
// connection that incorrectly connects to the Route port.
if proto.Gateway != "" {
errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the Route port", proto.Gateway)
c.Errorf(errTxt)
c.sendErr(errTxt)
c.closeConnection(WrongGateway)
return ErrWrongGateway
}
var perms *RoutePermissions
if srv != nil {
perms = srv.getOpts().Cluster.Permissions
}
// Grab connection name of remote route.
c.mu.Lock()
c.route.remoteID = c.opts.Name
c.setRoutePermissions(perms)
c.mu.Unlock()
return nil
}
func (s *Server) removeRoute(c *client) {
var rID string
c.mu.Lock()
cid := c.cid
r := c.route
if r != nil {
rID = r.remoteID
}
c.mu.Unlock()
s.mu.Lock()
delete(s.routes, cid)
if r != nil {
rc, ok := s.remotes[rID]
// Only delete it if it is us..
if ok && c == rc {
delete(s.remotes, rID)
}
s.removeGatewayURL(r.gatewayURL)
}
s.removeFromTempClients(cid)
s.mu.Unlock()
}
| 1 | 8,558 | Safe to reference c.srv without capturing it first under a client lock? | nats-io-nats-server | go |
@@ -1,3 +1,19 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package actuators
import ( | 1 | package actuators
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/aws/aws-sdk-go/service/elb/elbiface"
)
// AWSClients contains all the aws clients used by the scopes.
type AWSClients struct {
EC2 ec2iface.EC2API
ELB elbiface.ELBAPI
}
| 1 | 7,422 | @vincepri blame tells me this was you, any objections to the change? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/yarpc/yarpc-go/crossdock/client"
+ "github.com/yarpc/yarpc-go/crossdock/server"
+)
+
+func main() {
+ // TODO need to be able to wait till all inbounds are finished listening
+ go server.StartServerUnderTest()
+ // TODO maybe sleep?
+ time.Sleep(2 * time.Second)
+
+ http.HandleFunc("/", client.TestCaseHandler)
+ http.ListenAndServe(":8080", nil)
+} | 1 | 1 | 9,158 | @abhinav here is where i need to be able to block/wait until the server is started | yarpc-yarpc-go | go |
|
@@ -64,6 +64,9 @@ public class MetricsServlet extends HttpServlet {
/**
* Returns the name of the parameter used to specify the jsonp callback, if any.
+ *
+ * @return the name of the parameter used to specify the jsonp callback, or <code>null</code>, if
+ * no such parameter exists.
*/
protected String getJsonpCallbackParameter() {
return null; | 1 | package com.codahale.metrics.servlets;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import javax.servlet.ServletConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.json.MetricsModule;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.util.JSONPObject;
/**
* A servlet which returns the metrics in a given registry as an {@code application/json} response.
*/
public class MetricsServlet extends HttpServlet {
/**
* An abstract {@link ServletContextListener} which allows you to programmatically inject the
* {@link MetricRegistry}, rate and duration units, and allowed origin for
* {@link MetricsServlet}.
*/
public static abstract class ContextListener implements ServletContextListener {
/**
* @return the {@link MetricRegistry} to inject into the servlet context.
*/
protected abstract MetricRegistry getMetricRegistry();
/**
* @return the {@link TimeUnit} to which rates should be converted, or {@code null} if the
* default should be used.
*/
protected TimeUnit getRateUnit() {
// use the default
return null;
}
/**
* @return the {@link TimeUnit} to which durations should be converted, or {@code null} if
* the default should be used.
*/
protected TimeUnit getDurationUnit() {
// use the default
return null;
}
/**
* @return the {@code Access-Control-Allow-Origin} header value, if any.
*/
protected String getAllowedOrigin() {
// use the default
return null;
}
/**
* Returns the name of the parameter used to specify the jsonp callback, if any.
*/
protected String getJsonpCallbackParameter() {
return null;
}
/**
* Returns the {@link MetricFilter} that shall be used to filter metrics, or {@link MetricFilter#ALL} if
* the default should be used.
*/
protected MetricFilter getMetricFilter() {
// use the default
return MetricFilter.ALL;
}
@Override
public void contextInitialized(ServletContextEvent event) {
final ServletContext context = event.getServletContext();
context.setAttribute(METRICS_REGISTRY, getMetricRegistry());
context.setAttribute(METRIC_FILTER, getMetricFilter());
if (getDurationUnit() != null) {
context.setInitParameter(MetricsServlet.DURATION_UNIT, getDurationUnit().toString());
}
if (getRateUnit() != null) {
context.setInitParameter(MetricsServlet.RATE_UNIT, getRateUnit().toString());
}
if (getAllowedOrigin() != null) {
context.setInitParameter(MetricsServlet.ALLOWED_ORIGIN, getAllowedOrigin());
}
if (getJsonpCallbackParameter() != null) {
context.setAttribute(CALLBACK_PARAM, getJsonpCallbackParameter());
}
}
@Override
public void contextDestroyed(ServletContextEvent event) {
// no-op
}
}
public static final String RATE_UNIT = MetricsServlet.class.getCanonicalName() + ".rateUnit";
public static final String DURATION_UNIT = MetricsServlet.class.getCanonicalName() + ".durationUnit";
public static final String SHOW_SAMPLES = MetricsServlet.class.getCanonicalName() + ".showSamples";
public static final String METRICS_REGISTRY = MetricsServlet.class.getCanonicalName() + ".registry";
public static final String ALLOWED_ORIGIN = MetricsServlet.class.getCanonicalName() + ".allowedOrigin";
public static final String METRIC_FILTER = MetricsServlet.class.getCanonicalName() + ".metricFilter";
public static final String CALLBACK_PARAM = MetricsServlet.class.getCanonicalName() + ".jsonpCallback";
private static final long serialVersionUID = 1049773947734939602L;
private static final String CONTENT_TYPE = "application/json";
private String allowedOrigin;
private String jsonpParamName;
private transient MetricRegistry registry;
private transient ObjectMapper mapper;
public MetricsServlet() {
}
public MetricsServlet(MetricRegistry registry) {
this.registry = registry;
}
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
final ServletContext context = config.getServletContext();
if (null == registry) {
final Object registryAttr = context.getAttribute(METRICS_REGISTRY);
if (registryAttr instanceof MetricRegistry) {
this.registry = (MetricRegistry) registryAttr;
} else {
throw new ServletException("Couldn't find a MetricRegistry instance.");
}
}
final TimeUnit rateUnit = parseTimeUnit(context.getInitParameter(RATE_UNIT),
TimeUnit.SECONDS);
final TimeUnit durationUnit = parseTimeUnit(context.getInitParameter(DURATION_UNIT),
TimeUnit.SECONDS);
final boolean showSamples = Boolean.parseBoolean(context.getInitParameter(SHOW_SAMPLES));
MetricFilter filter = (MetricFilter) context.getAttribute(METRIC_FILTER);
if (filter == null) {
filter = MetricFilter.ALL;
}
this.mapper = new ObjectMapper().registerModule(new MetricsModule(rateUnit,
durationUnit,
showSamples,
filter));
this.allowedOrigin = context.getInitParameter(ALLOWED_ORIGIN);
this.jsonpParamName = context.getInitParameter(CALLBACK_PARAM);
}
@Override
protected void doGet(HttpServletRequest req,
HttpServletResponse resp) throws ServletException, IOException {
resp.setContentType(CONTENT_TYPE);
if (allowedOrigin != null) {
resp.setHeader("Access-Control-Allow-Origin", allowedOrigin);
}
resp.setHeader("Cache-Control", "must-revalidate,no-cache,no-store");
resp.setStatus(HttpServletResponse.SC_OK);
final OutputStream output = resp.getOutputStream();
try {
if (jsonpParamName != null && req.getParameter(jsonpParamName) != null) {
getWriter(req).writeValue(output, new JSONPObject(req.getParameter(jsonpParamName), registry));
} else {
getWriter(req).writeValue(output, registry);
}
} finally {
output.close();
}
}
private ObjectWriter getWriter(HttpServletRequest request) {
final boolean prettyPrint = Boolean.parseBoolean(request.getParameter("pretty"));
if (prettyPrint) {
return mapper.writerWithDefaultPrettyPrinter();
}
return mapper.writer();
}
private TimeUnit parseTimeUnit(String value, TimeUnit defaultValue) {
try {
return TimeUnit.valueOf(String.valueOf(value).toUpperCase(Locale.US));
} catch (IllegalArgumentException e) {
return defaultValue;
}
}
}
| 1 | 6,605 | It seems to me as an unrelated change. Could you please revert it? | dropwizard-metrics | java |
@@ -0,0 +1,3 @@
+package iface
+
+type DaemonAPI interface{} | 1 | 1 | 13,485 | ~Why are putting all these APIs in their own ~packages~ files? So many more ~directories~ files, to what end? Why not just have them all be a part of the same API file and all live alongside each other so you can easily see them?~ Edit: nevermind, probably works best in separate files. | filecoin-project-venus | go |
|
@@ -32,6 +32,10 @@ import static com.github.javaparser.StaticJavaParser.parseClassOrInterfaceType;
*/
public interface NodeWithExtends<N extends Node> {
+ /**
+ * @return All extended types that have been explicitly added.
+ * Note that this will not include {@code java.lang.Object} unless it is explicitly added (e.g. {@code class X extends Object {}})
+ */
NodeList<ClassOrInterfaceType> getExtendedTypes();
void tryAddImportToParentCompilationUnit(Class<?> clazz); | 1 | /*
* Copyright (C) 2007-2010 Júlio Vilmar Gesser.
* Copyright (C) 2011, 2013-2020 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.ast.nodeTypes;
import com.github.javaparser.ast.Node;
import com.github.javaparser.ast.NodeList;
import com.github.javaparser.ast.type.ClassOrInterfaceType;
import static com.github.javaparser.StaticJavaParser.parseClassOrInterfaceType;
/**
* A node that explicitly extends other types, using the {@code extends} keyword.
*/
public interface NodeWithExtends<N extends Node> {
NodeList<ClassOrInterfaceType> getExtendedTypes();
void tryAddImportToParentCompilationUnit(Class<?> clazz);
default ClassOrInterfaceType getExtendedTypes(int i) {
return getExtendedTypes().get(i);
}
N setExtendedTypes(NodeList<ClassOrInterfaceType> extendsList);
@SuppressWarnings("unchecked")
default N setExtendedType(int i, ClassOrInterfaceType extend) {
getExtendedTypes().set(i, extend);
return (N) this;
}
@SuppressWarnings("unchecked")
default N addExtendedType(ClassOrInterfaceType extend) {
getExtendedTypes().add(extend);
return (N) this;
}
/**
* @deprecated use addExtendedType
*/
@Deprecated
default N addExtends(Class<?> clazz) {
return addExtendedType(clazz);
}
/**
* @deprecated use addExtendedType
*/
@Deprecated
default N addExtends(String name) {
return addExtendedType(name);
}
/**
* Add an "extends" to this and automatically add the import
*
* @param clazz the class to extend from
* @return this
*/
default N addExtendedType(Class<?> clazz) {
tryAddImportToParentCompilationUnit(clazz);
return addExtendedType(clazz.getSimpleName());
}
/**
* Add an "extends" to this
*
* @param name the name of the type to extends from
* @return this
*/
@SuppressWarnings("unchecked")
default N addExtendedType(String name) {
getExtendedTypes().add(parseClassOrInterfaceType(name));
return (N) this;
}
}
| 1 | 14,103 | This likely needs to be double checked -- I recall being convinced at the time of writing this, but now I am less sure | javaparser-javaparser | java |
@@ -56,7 +56,7 @@ func main() {
if csv {
// produce a csv report
- fmt.Printf("Module Name,Module Path,Whitelisted,License Path,License Name,Confidence,Exact Match,Similar To,Similarity Confidence,State\n")
+ fmt.Printf("Module Name,Module Path,Whitelisted,License Path,License Name,Confidence,Similar To,Similarity Confidence,State\n")
for _, module := range modules {
fmt.Printf("%s,%s,%v", module.moduleName, module.path, cfg.whitelistedModules[module.moduleName])
for _, l := range module.licenses { | 1 | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path"
)
func main() {
var report bool
var dump bool
var csv bool
var mirror bool
var config string
flag.BoolVar(&report, "report", false, "Generate a report of all license usage.")
flag.BoolVar(&dump, "dump", false, "Generate a dump of all licenses used.")
flag.BoolVar(&csv, "csv", false, "Generate a report of all license usage in CSV format.")
flag.BoolVar(&mirror, "mirror", false, "Creates a 'licenses' directory with the licenses of all dependencies.")
flag.StringVar(&config, "config", "", "Path to config file.")
flag.Parse()
cfg := newConfig()
if config != "" {
var err error
if cfg, err = readConfig(config); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(1)
}
}
modules, err := getLicenses()
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(1)
}
// now do the real work
if csv {
// produce a csv report
fmt.Printf("Module Name,Module Path,Whitelisted,License Path,License Name,Confidence,Exact Match,Similar To,Similarity Confidence,State\n")
for _, module := range modules {
fmt.Printf("%s,%s,%v", module.moduleName, module.path, cfg.whitelistedModules[module.moduleName])
for _, l := range module.licenses {
state := "unrecognized"
if cfg.unrestrictedLicenses[l.analysis.licenseName] {
state = "unrestricted"
} else if cfg.reciprocalLicenses[l.analysis.licenseName] {
state = "reciprocal"
} else if cfg.restrictedLicenses[l.analysis.licenseName] {
state = "restricted"
}
fmt.Printf(",%s,%s,%s,%v,%s,%s,%s", l.path, l.analysis.licenseName, l.analysis.confidence, l.analysis.exactMatch, l.analysis.similarLicense,
l.analysis.similarityConfidence, state)
}
fmt.Printf("\n")
}
} else if mirror {
var basePath = "licenses"
for _, module := range modules {
p := path.Join(basePath, module.moduleName)
_ = os.MkdirAll(p, 0755)
if len(module.licenses) > 0 {
for _, license := range module.licenses {
targetPath := path.Join(p, license.path[len(module.path)+1:])
targetDir := path.Dir(targetPath)
_ = os.MkdirAll(targetDir, 0755)
err := ioutil.WriteFile(targetPath, []byte(license.text), 0644)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ERROR: unable to write license file to %s: %v\n", targetPath, err)
os.Exit(1)
}
}
} else {
targetPath := path.Join(p, "NONE")
err := ioutil.WriteFile(targetPath, []byte("NO LICENSE FOUND\n"), 0644)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ERROR: unable to write file to %s: %v\n", targetPath, err)
os.Exit(1)
}
}
}
} else {
var unlicensedModules []*moduleInfo
var unrecognizedLicenses []*licenseInfo
var unrestrictedLicenses []*licenseInfo
var reciprocalLicenses []*licenseInfo
var restrictedLicenses []*licenseInfo
// categorize the modules
for _, module := range modules {
if !report && !dump {
// if we're not producing a report, then exclude any module on the whitelist
if cfg.whitelistedModules[module.moduleName] {
continue
}
}
if len(module.licenses) == 0 {
// no license found
unlicensedModules = append(unlicensedModules, module)
} else {
for _, l := range module.licenses {
if cfg.unrestrictedLicenses[l.analysis.licenseName] {
unrestrictedLicenses = append(unrestrictedLicenses, l)
} else if cfg.reciprocalLicenses[l.analysis.licenseName] {
reciprocalLicenses = append(reciprocalLicenses, l)
} else if cfg.restrictedLicenses[l.analysis.licenseName] {
restrictedLicenses = append(restrictedLicenses, l)
} else {
unrecognizedLicenses = append(unrecognizedLicenses, l)
}
}
}
}
if report {
fmt.Printf("Modules with unrestricted licenses:\n")
if len(unrestrictedLicenses) == 0 {
fmt.Printf(" <none>\n")
} else {
for _, l := range unrestrictedLicenses {
fmt.Printf(" %s: %s, %s confidence\n", l.module.moduleName, l.analysis.licenseName, l.analysis.confidence)
}
}
fmt.Printf("\n")
fmt.Printf("Modules with reciprocal licenses:\n")
if len(unrestrictedLicenses) == 0 {
fmt.Printf(" <none>\n")
} else {
for _, l := range reciprocalLicenses {
fmt.Printf(" %s: %s, %s confidence\n", l.module.moduleName, l.analysis.licenseName, l.analysis.confidence)
}
}
fmt.Printf("\n")
fmt.Printf("Modules with restricted licenses:\n")
if len(restrictedLicenses) == 0 {
fmt.Printf(" <none>\n")
} else {
for _, l := range restrictedLicenses {
fmt.Printf(" %s: %s, %s confidence\n", l.module.moduleName, l.analysis.licenseName, l.analysis.confidence)
}
}
fmt.Printf("\n")
fmt.Printf("Modules with unrecognized licenses:\n")
if len(unrecognizedLicenses) == 0 {
fmt.Printf(" <none>\n")
} else {
for _, l := range unrecognizedLicenses {
if l.analysis.licenseName != "" {
fmt.Printf(" %s: similar to %s, %s confidence, path '%s'\n", l.module.moduleName, l.analysis.licenseName, l.analysis.confidence, l.path)
} else if l.analysis.similarLicense != "" {
fmt.Printf(" %s: similar to %s, %s confidence, path '%s'\n", l.module.moduleName, l.analysis.similarLicense, l.analysis.similarityConfidence, l.path)
} else {
fmt.Printf(" %s: path '%s'\n", l.module.moduleName, l.path)
}
}
}
fmt.Printf("\n")
fmt.Printf("Modules with no discernible license:\n")
if len(unlicensedModules) == 0 {
fmt.Printf(" <none>\n")
} else {
for _, m := range unlicensedModules {
fmt.Printf(" %s\n", m.moduleName)
}
}
} else if dump {
for _, l := range unrestrictedLicenses {
fmt.Printf("MODULE: %s\n%s\n", l.module.moduleName, l.text)
}
for _, l := range reciprocalLicenses {
fmt.Printf("MODULE: %s\n%s\n", l.module.moduleName, l.text)
}
for _, l := range restrictedLicenses {
fmt.Printf("MODULE: %s\n%s\n", l.module.moduleName, l.text)
}
for _, l := range unrecognizedLicenses {
fmt.Printf("MODULE: %s\n%s\n", l.module.moduleName, l.text)
}
for _, m := range unlicensedModules {
fmt.Printf("MODULE: %s\n%s\n", m.moduleName, "<none>")
}
} else {
failLint := false
if len(unrecognizedLicenses) > 0 {
failLint = true
fmt.Printf("ERROR: Some modules have unrecognized licenses:\n")
for _, l := range unrecognizedLicenses {
if l.analysis.licenseName != "" {
fmt.Printf(" %s: similar to %s, %s confidence, path '%s'\n", l.module.moduleName, l.analysis.licenseName, l.analysis.confidence, l.path)
} else if l.analysis.similarLicense != "" {
fmt.Printf(" %s: similar to %s, %s confidence, path '%s'\n", l.module.moduleName, l.analysis.similarLicense, l.analysis.similarityConfidence, l.path)
} else {
fmt.Printf(" %s: path '%s'\n", l.module.moduleName, l.path)
}
}
fmt.Printf("\n")
}
if len(unlicensedModules) > 0 {
failLint = true
fmt.Printf("ERROR: Some modules have no discernible license:\n")
for _, m := range unlicensedModules {
fmt.Printf(" %s\n", m.moduleName)
}
}
if failLint {
os.Exit(1)
}
}
}
}
| 1 | 8,533 | For a followup - it is better to use acceptlist/denylist. I realize this is a historical artifact of our codebase. | istio-tools | go |
@@ -77,6 +77,13 @@ import org.apache.commons.io.FileUtils;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
+import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
+import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
+import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
+import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
+import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
+import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
+import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
/** | 1 | /*
Copyright (C) 2005-2012, by the President and Fellows of Harvard College.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Dataverse Network - A web application to share, preserve and analyze research data.
Developed at the Institute for Quantitative Social Science, Harvard University.
Version 3.0.
*/
package edu.harvard.iq.dataverse.util;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.DataFile.ChecksumType;
import edu.harvard.iq.dataverse.DataFileServiceBean;
import edu.harvard.iq.dataverse.DatasetVersion;
import edu.harvard.iq.dataverse.FileMetadata;
import edu.harvard.iq.dataverse.TermsOfUseAndAccess;
import edu.harvard.iq.dataverse.dataaccess.ImageThumbConverter;
import static edu.harvard.iq.dataverse.dataaccess.S3AccessIO.S3_IDENTIFIER_PREFIX;
import edu.harvard.iq.dataverse.dataset.DatasetThumbnail;
import edu.harvard.iq.dataverse.datasetutility.FileExceedsMaxSizeException;
import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
import edu.harvard.iq.dataverse.ingest.IngestReport;
import edu.harvard.iq.dataverse.ingest.IngestServiceShapefileHelper;
import edu.harvard.iq.dataverse.ingest.IngestableDataChecker;
import java.awt.image.BufferedImage;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.FileChannel;
import java.nio.channels.WritableByteChannel;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.sql.Timestamp;
import java.text.MessageFormat;
import java.text.SimpleDateFormat;
import java.util.Map;
import java.util.MissingResourceException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.activation.MimetypesFileTypeMap;
import javax.ejb.EJBException;
import javax.xml.stream.XMLStreamConstants;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import org.apache.commons.io.FileUtils;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
/**
* a 4.0 implementation of the DVN FileUtil;
* it provides some of the functionality from the 3.6 implementation,
* but the old code is ported creatively on the method-by-method basis.
*
* @author Leonid Andreev
*/
public class FileUtil implements java.io.Serializable {
private static final Logger logger = Logger.getLogger(FileUtil.class.getCanonicalName());
private static final String[] TABULAR_DATA_FORMAT_SET = {"POR", "SAV", "DTA", "RDA"};
private static Map<String, String> STATISTICAL_FILE_EXTENSION = new HashMap<String, String>();
/*
* The following are Stata, SAS and SPSS syntax/control cards:
* These are recognized as text files (because they are!) so
* we check all the uploaded "text/plain" files for these extensions, and
* assign the following types when they are matched;
* Note that these types are only used in the metadata displayed on the
* dataset page. We don't support ingest on control cards.
* -- L.A. 4.0 Oct. 2014
*/
static {
STATISTICAL_FILE_EXTENSION.put("do", "application/x-stata-syntax");
STATISTICAL_FILE_EXTENSION.put("sas", "application/x-sas-syntax");
STATISTICAL_FILE_EXTENSION.put("sps", "application/x-spss-syntax");
STATISTICAL_FILE_EXTENSION.put("csv", "text/csv");
STATISTICAL_FILE_EXTENSION.put("tsv", "text/tsv");
}
private static MimetypesFileTypeMap MIME_TYPE_MAP = new MimetypesFileTypeMap();
public static final String MIME_TYPE_STATA = "application/x-stata";
public static final String MIME_TYPE_STATA13 = "application/x-stata-13";
public static final String MIME_TYPE_STATA14 = "application/x-stata-14";
public static final String MIME_TYPE_STATA15 = "application/x-stata-15";
public static final String MIME_TYPE_RDATA = "application/x-rlang-transport";
public static final String MIME_TYPE_CSV = "text/csv";
public static final String MIME_TYPE_CSV_ALT = "text/comma-separated-values";
public static final String MIME_TYPE_TSV = "text/tsv";
public static final String MIME_TYPE_TSV_ALT = "text/tab-separated-values";
public static final String MIME_TYPE_XLSX = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
public static final String MIME_TYPE_SPSS_SAV = "application/x-spss-sav";
public static final String MIME_TYPE_SPSS_POR = "application/x-spss-por";
public static final String MIME_TYPE_FITS = "application/fits";
public static final String MIME_TYPE_ZIP = "application/zip";
public static final String MIME_TYPE_FITSIMAGE = "image/fits";
// SHAPE file type:
// this is the only supported file type in the GEO DATA class:
public static final String MIME_TYPE_GEO_SHAPE = "application/zipped-shapefile";
public static final String MIME_TYPE_UNDETERMINED_DEFAULT = "application/octet-stream";
public static final String MIME_TYPE_UNDETERMINED_BINARY = "application/binary";
public static final String SAVED_ORIGINAL_FILENAME_EXTENSION = "orig";
public static final String MIME_TYPE_INGESTED_FILE = "text/tab-separated-values";
/**
* This string can be prepended to a Base64-encoded representation of a PNG
* file in order to imbed an image directly into an HTML page using the
* "img" tag. See also https://en.wikipedia.org/wiki/Data_URI_scheme
*/
public static String DATA_URI_SCHEME = "data:image/png;base64,";
public FileUtil() {
}
public static void copyFile(File inputFile, File outputFile) throws IOException {
FileChannel in = null;
WritableByteChannel out = null;
try {
in = new FileInputStream(inputFile).getChannel();
out = new FileOutputStream(outputFile).getChannel();
long bytesPerIteration = 50000;
long start = 0;
while ( start < in.size() ) {
in.transferTo(start, bytesPerIteration, out);
start += bytesPerIteration;
}
} finally {
if (in != null) { in.close(); }
if (out != null) { out.close(); }
}
}
public static String getFileExtension(String fileName){
String ext = null;
if ( fileName.lastIndexOf(".") != -1){
ext = (fileName.substring( fileName.lastIndexOf(".") + 1 )).toLowerCase();
}
return ext;
}
public static String replaceExtension(String originalName) {
return replaceExtension(originalName, "tab");
}
public static String replaceExtension(String originalName, String newExtension) {
int extensionIndex = originalName.lastIndexOf(".");
if (extensionIndex != -1 ) {
return originalName.substring(0, extensionIndex) + "."+newExtension ;
} else {
return originalName +"."+newExtension ;
}
}
public static String getUserFriendlyFileType(DataFile dataFile) {
String fileType = dataFile.getContentType();
if (fileType != null) {
if (fileType.equalsIgnoreCase(ShapefileHandler.SHAPEFILE_FILE_TYPE)){
return ShapefileHandler.SHAPEFILE_FILE_TYPE_FRIENDLY_NAME;
}
if (fileType.contains(";")) {
fileType = fileType.substring(0, fileType.indexOf(";"));
}
try {
return BundleUtil.getStringFromPropertyFile(fileType,"MimeTypeDisplay" );
} catch (MissingResourceException e) {
return fileType;
}
}
return fileType;
}
public static String getFacetFileType(DataFile dataFile) {
String fileType = dataFile.getContentType();
if (!StringUtil.isEmpty(fileType)) {
if (fileType.contains(";")) {
fileType = fileType.substring(0, fileType.indexOf(";"));
}
try {
return BundleUtil.getStringFromPropertyFile(fileType,"MimeTypeFacets" );
} catch (MissingResourceException e) {
// if there's no defined "facet-friendly" form of this mime type
// we'll truncate the available type by "/", e.g., all the
// unknown image/* types will become "image"; many other, quite
// different types will all become "application" this way -
// but it is probably still better than to tag them all as
// "uknown".
// -- L.A. 4.0 alpha 1
//
// UPDATE, MH 4.9.2
// Since production is displaying both "tabulardata" and "Tabular Data"
// we are going to try to add capitalization here to this function
// in order to capitalize all the unknown types that are not called
// out in MimeTypeFacets.properties
String typeClass = fileType.split("/")[0];
return Character.toUpperCase(typeClass.charAt(0)) + typeClass.substring(1);
}
} else {
try {
return BundleUtil.getStringFromPropertyFile("application/octet-stream","MimeTypeFacets" );
} catch (MissingResourceException ex) {
logger.warning("Could not find \"" + fileType + "\" in bundle file: ");
logger.log(Level.CONFIG, ex.getMessage(), ex);
return null;
}
}
}
public static String getUserFriendlyOriginalType(DataFile dataFile) {
if (!dataFile.isTabularData()) {
return null;
}
String fileType = dataFile.getOriginalFileFormat();
if (fileType != null && !fileType.equals("")) {
if (fileType.contains(";")) {
fileType = fileType.substring(0, fileType.indexOf(";"));
}
try {
return BundleUtil.getStringFromPropertyFile(fileType,"MimeTypeDisplay" );
} catch (MissingResourceException e) {
return fileType;
}
}
return "UNKNOWN";
}
/**
* Returns a content type string for a FileObject
*
*/
private static String determineContentType(File fileObject) {
if (fileObject==null){
return null;
}
String contentType;
try {
contentType = determineFileType(fileObject, fileObject.getName());
} catch (Exception ex) {
logger.warning("FileUtil.determineFileType failed for file with name: " + fileObject.getName());
contentType = null;
}
if ((contentType==null)||(contentType.equals(""))){
contentType = MIME_TYPE_UNDETERMINED_DEFAULT;
}
return contentType;
}
public static String retestIngestableFileType(File file, String fileType) {
IngestableDataChecker tabChecker = new IngestableDataChecker(TABULAR_DATA_FORMAT_SET);
String newType = tabChecker.detectTabularDataFormat(file);
return newType != null ? newType : fileType;
}
public static String determineFileType(File f, String fileName) throws IOException{
String fileType = null;
String fileExtension = getFileExtension(fileName);
// step 1:
// Apply our custom methods to try and recognize data files that can be
// converted to tabular data, or can be parsed for extra metadata
// (such as FITS).
logger.fine("Attempting to identify potential tabular data files;");
IngestableDataChecker tabChk = new IngestableDataChecker(TABULAR_DATA_FORMAT_SET);
fileType = tabChk.detectTabularDataFormat(f);
logger.fine("determineFileType: tabular data checker found "+fileType);
// step 2: If not found, check if graphml or FITS
if (fileType==null) {
if (isGraphMLFile(f)) {
fileType = "text/xml-graphml";
} else // Check for FITS:
// our check is fairly weak (it appears to be hard to really
// really recognize a FITS file without reading the entire
// stream...), so in version 3.* we used to nsist on *both*
// the ".fits" extension and the header check;
// in 4.0, we'll accept either the extension, or the valid
// magic header:
if (isFITSFile(f) || (fileExtension != null
&& fileExtension.equalsIgnoreCase("fits"))) {
fileType = "application/fits";
}
}
// step 3: check the mime type of this file with Jhove
if (fileType == null){
JhoveFileType jw = new JhoveFileType();
String mimeType = jw.getFileMimeType(f);
if (mimeType != null) {
fileType = mimeType;
}
}
// step 4:
// Additional processing; if we haven't gotten much useful information
// back from Jhove, we'll try and make an educated guess based on
// the file extension:
if ( fileExtension != null) {
logger.fine("fileExtension="+fileExtension);
if (fileType == null || fileType.startsWith("text/plain") || "application/octet-stream".equals(fileType)) {
if (fileType != null && fileType.startsWith("text/plain") && STATISTICAL_FILE_EXTENSION.containsKey(fileExtension)) {
fileType = STATISTICAL_FILE_EXTENSION.get(fileExtension);
} else {
fileType = determineFileTypeByExtension(fileName);
}
logger.fine("mime type recognized by extension: "+fileType);
}
} else {
logger.fine("fileExtension is null");
}
// step 5:
// if this is a compressed file - zip or gzip - we'll check the
// file(s) inside the compressed stream and see if it's one of our
// recognized formats that we want to support compressed:
if ("application/x-gzip".equals(fileType)) {
logger.fine("we'll run additional checks on this gzipped file.");
// We want to be able to support gzipped FITS files, same way as
// if they were just regular FITS files:
FileInputStream gzippedIn = new FileInputStream(f);
// (new FileInputStream() can throw a "filen not found" exception;
// however, if we've made it this far, it really means that the
// file does exist and can be opened)
InputStream uncompressedIn = null;
try {
uncompressedIn = new GZIPInputStream(gzippedIn);
if (isFITSFile(uncompressedIn)) {
fileType = "application/fits-gzipped";
}
} catch (IOException ioex) {
if (uncompressedIn != null) {
try {uncompressedIn.close();} catch (IOException e) {}
}
}
}
if ("application/zip".equals(fileType)) {
// Is this a zipped Shapefile?
// Check for shapefile extensions as described here: http://en.wikipedia.org/wiki/Shapefile
//logger.info("Checking for shapefile");
ShapefileHandler shp_handler = new ShapefileHandler(new FileInputStream(f));
if (shp_handler.containsShapefile()){
// logger.info("------- shapefile FOUND ----------");
fileType = ShapefileHandler.SHAPEFILE_FILE_TYPE; //"application/zipped-shapefile";
}
}
logger.fine("returning fileType "+fileType);
return fileType;
}
public static String determineFileTypeByExtension(String fileName) {
logger.fine("Type by extension, for "+fileName+": "+MIME_TYPE_MAP.getContentType(fileName));
return MIME_TYPE_MAP.getContentType(fileName);
}
/*
* Custom method for identifying FITS files:
* TODO:
* the existing check for the "magic header" is very weak (see below);
* it should probably be replaced by attempting to parse and read at
* least the primary HDU, using the NOM fits parser.
* -- L.A. 4.0 alpha
*/
private static boolean isFITSFile(File file) {
BufferedInputStream ins = null;
try {
ins = new BufferedInputStream(new FileInputStream(file));
return isFITSFile(ins);
} catch (IOException ex) {
}
return false;
}
private static boolean isFITSFile(InputStream ins) {
boolean isFITS = false;
// number of header bytes read for identification:
int magicWordLength = 6;
String magicWord = "SIMPLE";
try {
byte[] b = new byte[magicWordLength];
logger.fine("attempting to read "+magicWordLength+" bytes from the FITS format candidate stream.");
if (ins.read(b, 0, magicWordLength) != magicWordLength) {
throw new IOException();
}
if (magicWord.equals(new String(b))) {
logger.fine("yes, this is FITS file!");
isFITS = true;
}
} catch (IOException ex) {
isFITS = false;
} finally {
if (ins != null) {
try {
ins.close();
} catch (Exception e) {
}
}
}
return isFITS;
}
private static boolean isGraphMLFile(File file) {
boolean isGraphML = false;
logger.fine("begin isGraphMLFile()");
try{
FileReader fileReader = new FileReader(file);
javax.xml.stream.XMLInputFactory xmlif = javax.xml.stream.XMLInputFactory.newInstance();
xmlif.setProperty("javax.xml.stream.isCoalescing", java.lang.Boolean.TRUE);
XMLStreamReader xmlr = xmlif.createXMLStreamReader(fileReader);
for (int event = xmlr.next(); event != XMLStreamConstants.END_DOCUMENT; event = xmlr.next()) {
if (event == XMLStreamConstants.START_ELEMENT) {
if (xmlr.getLocalName().equals("graphml")) {
String schema = xmlr.getAttributeValue("http://www.w3.org/2001/XMLSchema-instance", "schemaLocation");
logger.fine("schema = "+schema);
if (schema!=null && schema.contains("http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd")){
logger.fine("graphML is true");
isGraphML = true;
}
}
break;
}
}
} catch(XMLStreamException e) {
logger.fine("XML error - this is not a valid graphML file.");
isGraphML = false;
} catch(IOException e) {
throw new EJBException(e);
}
logger.fine("end isGraphML()");
return isGraphML;
}
// from MD5Checksum.java
public static String CalculateChecksum(String datafile, ChecksumType checksumType) {
FileInputStream fis = null;
try {
fis = new FileInputStream(datafile);
} catch (FileNotFoundException ex) {
throw new RuntimeException(ex);
}
return CalculateChecksum(fis, checksumType);
}
// from MD5Checksum.java
public static String CalculateChecksum(InputStream in, ChecksumType checksumType) {
MessageDigest md = null;
try {
// Use "SHA-1" (toString) rather than "SHA1", for example.
md = MessageDigest.getInstance(checksumType.toString());
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
byte[] dataBytes = new byte[1024];
int nread;
try {
while ((nread = in.read(dataBytes)) != -1) {
md.update(dataBytes, 0, nread);
}
} catch (IOException ex) {
throw new RuntimeException(ex);
} finally {
try {
in.close();
} catch (Exception e) {
}
}
byte[] mdbytes = md.digest();
StringBuilder sb = new StringBuilder("");
for (int i = 0; i < mdbytes.length; i++) {
sb.append(Integer.toString((mdbytes[i] & 0xff) + 0x100, 16).substring(1));
}
return sb.toString();
}
public static String generateOriginalExtension(String fileType) {
if (fileType.equalsIgnoreCase("application/x-spss-sav")) {
return ".sav";
} else if (fileType.equalsIgnoreCase("application/x-spss-por")) {
return ".por";
} else if (fileType.equalsIgnoreCase("application/x-stata")) {
return ".dta";
} else if (fileType.equalsIgnoreCase( "application/x-rlang-transport")) {
return ".RData";
} else if (fileType.equalsIgnoreCase("text/csv")) {
return ".csv";
} else if (fileType.equalsIgnoreCase( "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")) {
return ".xlsx";
}
return "";
}
public static List<DataFile> createDataFiles(DatasetVersion version, InputStream inputStream, String fileName, String suppliedContentType, SystemConfig systemConfig) throws IOException {
List<DataFile> datafiles = new ArrayList<>();
String warningMessage = null;
// save the file, in the temporary location for now:
Path tempFile = null;
Long fileSizeLimit = systemConfig.getMaxFileUploadSize();
if (getFilesTempDirectory() != null) {
tempFile = Files.createTempFile(Paths.get(getFilesTempDirectory()), "tmp", "upload");
// "temporary" location is the key here; this is why we are not using
// the DataStore framework for this - the assumption is that
// temp files will always be stored on the local filesystem.
// -- L.A. Jul. 2014
logger.fine("Will attempt to save the file as: " + tempFile.toString());
Files.copy(inputStream, tempFile, StandardCopyOption.REPLACE_EXISTING);
// A file size check, before we do anything else:
// (note that "no size limit set" = "unlimited")
// (also note, that if this is a zip file, we'll be checking
// the size limit for each of the individual unpacked files)
Long fileSize = tempFile.toFile().length();
if (fileSizeLimit != null && fileSize > fileSizeLimit) {
try {tempFile.toFile().delete();} catch (Exception ex) {}
throw new IOException (MessageFormat.format(BundleUtil.getStringFromBundle("file.addreplace.error.file_exceeds_limit"), bytesToHumanReadable(fileSize), bytesToHumanReadable(fileSizeLimit)));
}
} else {
throw new IOException ("Temp directory is not configured.");
}
logger.fine("mime type supplied: "+suppliedContentType);
// Let's try our own utilities (Jhove, etc.) to determine the file type
// of the uploaded file. (We may already have a mime type supplied for this
// file - maybe the type that the browser recognized on upload; or, if
// it's a harvest, maybe the remote server has already given us the type
// for this file... with our own type utility we may or may not do better
// than the type supplied:
// -- L.A.
String recognizedType = null;
String finalType = null;
try {
recognizedType = determineFileType(tempFile.toFile(), fileName);
logger.fine("File utility recognized the file as " + recognizedType);
if (recognizedType != null && !recognizedType.equals("")) {
// is it any better than the type that was supplied to us,
// if any?
// This is not as trivial a task as one might expect...
// We may need a list of "good" mime types, that should always
// be chosen over other choices available. Maybe it should
// even be a weighed list... as in, "application/foo" should
// be chosen over "application/foo-with-bells-and-whistles".
// For now the logic will be as follows:
//
// 1. If the contentType supplied (by the browser, most likely)
// is some form of "unknown", we always discard it in favor of
// whatever our own utilities have determined;
// 2. We should NEVER trust the browser when it comes to the
// following "ingestable" types: Stata, SPSS, R;
// 2a. We are willing to TRUST the browser when it comes to
// the CSV and XSLX ingestable types.
// 3. We should ALWAYS trust our utilities when it comes to
// ingestable types.
if (suppliedContentType == null
|| suppliedContentType.equals("")
|| suppliedContentType.equalsIgnoreCase(MIME_TYPE_UNDETERMINED_DEFAULT)
|| suppliedContentType.equalsIgnoreCase(MIME_TYPE_UNDETERMINED_BINARY)
|| (canIngestAsTabular(suppliedContentType)
&& !suppliedContentType.equalsIgnoreCase(MIME_TYPE_CSV)
&& !suppliedContentType.equalsIgnoreCase(MIME_TYPE_CSV_ALT)
&& !suppliedContentType.equalsIgnoreCase(MIME_TYPE_XLSX))
|| canIngestAsTabular(recognizedType)
|| recognizedType.equals("application/fits-gzipped")
|| recognizedType.equalsIgnoreCase(ShapefileHandler.SHAPEFILE_FILE_TYPE)
|| recognizedType.equals(MIME_TYPE_ZIP)) {
finalType = recognizedType;
}
}
} catch (Exception ex) {
logger.warning("Failed to run the file utility mime type check on file " + fileName);
}
if (finalType == null) {
finalType = (suppliedContentType == null || suppliedContentType.equals(""))
? MIME_TYPE_UNDETERMINED_DEFAULT
: suppliedContentType;
}
// A few special cases:
// if this is a gzipped FITS file, we'll uncompress it, and ingest it as
// a regular FITS file:
if (finalType.equals("application/fits-gzipped")) {
InputStream uncompressedIn = null;
String finalFileName = fileName;
// if the file name had the ".gz" extension, remove it,
// since we are going to uncompress it:
if (fileName != null && fileName.matches(".*\\.gz$")) {
finalFileName = fileName.replaceAll("\\.gz$", "");
}
DataFile datafile = null;
try {
uncompressedIn = new GZIPInputStream(new FileInputStream(tempFile.toFile()));
File unZippedTempFile = saveInputStreamInTempFile(uncompressedIn, fileSizeLimit);
datafile = createSingleDataFile(version, unZippedTempFile, finalFileName, MIME_TYPE_UNDETERMINED_DEFAULT, systemConfig.getFileFixityChecksumAlgorithm());
} catch (IOException | FileExceedsMaxSizeException ioex) {
datafile = null;
} finally {
if (uncompressedIn != null) {
try {uncompressedIn.close();} catch (IOException e) {}
}
}
// If we were able to produce an uncompressed file, we'll use it
// to create and return a final DataFile; if not, we're not going
// to do anything - and then a new DataFile will be created further
// down, from the original, uncompressed file.
if (datafile != null) {
// remove the compressed temp file:
try {
tempFile.toFile().delete();
} catch (SecurityException ex) {
// (this is very non-fatal)
logger.warning("Failed to delete temporary file "+tempFile.toString());
}
datafiles.add(datafile);
return datafiles;
}
// If it's a ZIP file, we are going to unpack it and create multiple
// DataFile objects from its contents:
} else if (finalType.equals("application/zip")) {
ZipInputStream unZippedIn = null;
ZipEntry zipEntry = null;
int fileNumberLimit = systemConfig.getZipUploadFilesLimit();
try {
Charset charset = null;
/*
TODO: (?)
We may want to investigate somehow letting the user specify
the charset for the filenames in the zip file...
- otherwise, ZipInputStream bails out if it encounteres a file
name that's not valid in the current charest (i.e., UTF-8, in
our case). It would be a bit trickier than what we're doing for
SPSS tabular ingests - with the lang. encoding pulldown menu -
because this encoding needs to be specified *before* we upload and
attempt to unzip the file.
-- L.A. 4.0 beta12
logger.info("default charset is "+Charset.defaultCharset().name());
if (Charset.isSupported("US-ASCII")) {
logger.info("charset US-ASCII is supported.");
charset = Charset.forName("US-ASCII");
if (charset != null) {
logger.info("was able to obtain charset for US-ASCII");
}
}
*/
if (charset != null) {
unZippedIn = new ZipInputStream(new FileInputStream(tempFile.toFile()), charset);
} else {
unZippedIn = new ZipInputStream(new FileInputStream(tempFile.toFile()));
}
while (true) {
try {
zipEntry = unZippedIn.getNextEntry();
} catch (IllegalArgumentException iaex) {
// Note:
// ZipInputStream documentation doesn't even mention that
// getNextEntry() throws an IllegalArgumentException!
// but that's what happens if the file name of the next
// entry is not valid in the current CharSet.
// -- L.A.
warningMessage = "Failed to unpack Zip file. (Unknown Character Set used in a file name?) Saving the file as is.";
logger.warning(warningMessage);
throw new IOException();
}
if (zipEntry == null) {
break;
}
// Note that some zip entries may be directories - we
// simply skip them:
if (!zipEntry.isDirectory()) {
if (datafiles.size() > fileNumberLimit) {
logger.warning("Zip upload - too many files.");
warningMessage = "The number of files in the zip archive is over the limit (" + fileNumberLimit +
"); please upload a zip archive with fewer files, if you want them to be ingested " +
"as individual DataFiles.";
throw new IOException();
}
String fileEntryName = zipEntry.getName();
logger.fine("ZipEntry, file: "+fileEntryName);
if (fileEntryName != null && !fileEntryName.equals("")) {
String shortName = fileEntryName.replaceFirst("^.*[\\/]", "");
// Check if it's a "fake" file - a zip archive entry
// created for a MacOS X filesystem element: (these
// start with "._")
if (!shortName.startsWith("._") && !shortName.startsWith(".DS_Store") && !"".equals(shortName)) {
// OK, this seems like an OK file entry - we'll try
// to read it and create a DataFile with it:
File unZippedTempFile = saveInputStreamInTempFile(unZippedIn, fileSizeLimit);
DataFile datafile = createSingleDataFile(version, unZippedTempFile, shortName, MIME_TYPE_UNDETERMINED_DEFAULT, systemConfig.getFileFixityChecksumAlgorithm(), false);
if (!fileEntryName.equals(shortName)) {
// If the filename looks like a hierarchical folder name (i.e., contains slashes and backslashes),
// we'll extract the directory name, then a) strip the leading and trailing slashes;
// and b) replace all the back slashes with regular ones and b) replace any multiple
// slashes with a single slash:
String directoryName = fileEntryName.replaceFirst("[\\/][\\/]*[^\\/]*$", "").replaceFirst("^[\\/]*", "").replaceAll("[\\/][\\/]*", "/");
if (!"".equals(directoryName)) {
logger.fine("setting the directory label to " + directoryName);
datafile.getFileMetadata().setDirectoryLabel(directoryName);
}
}
if (datafile != null) {
// We have created this datafile with the mime type "unknown";
// Now that we have it saved in a temporary location,
// let's try and determine its real type:
String tempFileName = getFilesTempDirectory() + "/" + datafile.getStorageIdentifier();
try {
recognizedType = determineFileType(new File(tempFileName), shortName);
logger.fine("File utility recognized unzipped file as " + recognizedType);
if (recognizedType != null && !recognizedType.equals("")) {
datafile.setContentType(recognizedType);
}
} catch (Exception ex) {
logger.warning("Failed to run the file utility mime type check on file " + fileName);
}
datafiles.add(datafile);
}
}
}
}
unZippedIn.closeEntry();
}
} catch (IOException ioex) {
// just clear the datafiles list and let
// ingest default to creating a single DataFile out
// of the unzipped file.
logger.warning("Unzipping failed; rolling back to saving the file as is.");
if (warningMessage == null) {
warningMessage = "Failed to unzip the file. Saving the file as is.";
}
datafiles.clear();
} catch (FileExceedsMaxSizeException femsx) {
logger.warning("One of the unzipped files exceeds the size limit; resorting to saving the file as is. " + femsx.getMessage());
warningMessage = femsx.getMessage() + "; saving the zip file as is, unzipped.";
datafiles.clear();
} finally {
if (unZippedIn != null) {
try {unZippedIn.close();} catch (Exception zEx) {}
}
}
if (datafiles.size() > 0) {
// link the data files to the dataset/version:
// (except we no longer want to do this! -- 4.6)
/*Iterator<DataFile> itf = datafiles.iterator();
while (itf.hasNext()) {
DataFile datafile = itf.next();
datafile.setOwner(version.getDataset());
if (version.getFileMetadatas() == null) {
version.setFileMetadatas(new ArrayList());
}
version.getFileMetadatas().add(datafile.getFileMetadata());
datafile.getFileMetadata().setDatasetVersion(version);
version.getDataset().getFiles().add(datafile);
} */
// remove the uploaded zip file:
try {
Files.delete(tempFile);
} catch (IOException ioex) {
// do nothing - it's just a temp file.
logger.warning("Could not remove temp file "+tempFile.getFileName().toString());
}
// and return:
return datafiles;
}
} else if (finalType.equalsIgnoreCase(ShapefileHandler.SHAPEFILE_FILE_TYPE)) {
// Shape files may have to be split into multiple files,
// one zip archive per each complete set of shape files:
//File rezipFolder = new File(this.getFilesTempDirectory());
File rezipFolder = getShapefileUnzipTempDirectory();
IngestServiceShapefileHelper shpIngestHelper;
shpIngestHelper = new IngestServiceShapefileHelper(tempFile.toFile(), rezipFolder);
boolean didProcessWork = shpIngestHelper.processFile();
if (!(didProcessWork)){
logger.severe("Processing of zipped shapefile failed.");
return null;
}
try {
for (File finalFile : shpIngestHelper.getFinalRezippedFiles()) {
FileInputStream finalFileInputStream = new FileInputStream(finalFile);
finalType = determineContentType(finalFile);
if (finalType == null) {
logger.warning("Content type is null; but should default to 'MIME_TYPE_UNDETERMINED_DEFAULT'");
continue;
}
File unZippedShapeTempFile = saveInputStreamInTempFile(finalFileInputStream, fileSizeLimit);
DataFile new_datafile = createSingleDataFile(version, unZippedShapeTempFile, finalFile.getName(), finalType, systemConfig.getFileFixityChecksumAlgorithm());
if (new_datafile != null) {
datafiles.add(new_datafile);
} else {
logger.severe("Could not add part of rezipped shapefile. new_datafile was null: " + finalFile.getName());
}
finalFileInputStream.close();
}
} catch (FileExceedsMaxSizeException femsx) {
logger.severe("One of the unzipped shape files exceeded the size limit; giving up. " + femsx.getMessage());
datafiles.clear();
}
// Delete the temp directory used for unzipping
FileUtils.deleteDirectory(rezipFolder);
if (datafiles.size() > 0) {
// remove the uploaded zip file:
try {
Files.delete(tempFile);
} catch (IOException ioex) {
// do nothing - it's just a temp file.
logger.warning("Could not remove temp file " + tempFile.getFileName().toString());
} catch (SecurityException se) {
logger.warning("Unable to delete: " + tempFile.toString() + "due to Security Exception: "
+ se.getMessage());
}
return datafiles;
}else{
logger.severe("No files added from directory of rezipped shapefiles");
}
return null;
}
// Finally, if none of the special cases above were applicable (or
// if we were unable to unpack an uploaded file, etc.), we'll just
// create and return a single DataFile:
DataFile datafile = createSingleDataFile(version, tempFile.toFile(), fileName, finalType, systemConfig.getFileFixityChecksumAlgorithm());
if (datafile != null && tempFile.toFile() != null) {
if (warningMessage != null) {
createIngestFailureReport(datafile, warningMessage);
datafile.SetIngestProblem();
}
datafiles.add(datafile);
return datafiles;
}
return null;
} // end createDataFiles
private static File saveInputStreamInTempFile(InputStream inputStream, Long fileSizeLimit)
throws IOException, FileExceedsMaxSizeException {
Path tempFile = Files.createTempFile(Paths.get(getFilesTempDirectory()), "tmp", "upload");
if (inputStream != null && tempFile != null) {
Files.copy(inputStream, tempFile, StandardCopyOption.REPLACE_EXISTING);
// size check:
// (note that "no size limit set" = "unlimited")
Long fileSize = tempFile.toFile().length();
if (fileSizeLimit != null && fileSize > fileSizeLimit) {
try {tempFile.toFile().delete();} catch (Exception ex) {}
throw new FileExceedsMaxSizeException (MessageFormat.format(BundleUtil.getStringFromBundle("file.addreplace.error.file_exceeds_limit"), bytesToHumanReadable(fileSize), bytesToHumanReadable(fileSizeLimit)));
}
return tempFile.toFile();
}
throw new IOException("Failed to save uploaded file.");
}
/*
* This method creates a DataFile;
* The bytes from the suppplied InputStream have already been saved in the temporary location.
* This method should only be called by the upper-level methods that handle
* file upload and creation for individual use cases - a single file upload,
* an upload of a zip archive that needs to be unpacked and turned into
* individual files, etc., and once the file name and mime type have already
* been figured out.
*/
private static DataFile createSingleDataFile(DatasetVersion version, File tempFile, String fileName, String contentType, DataFile.ChecksumType checksumType) {
return createSingleDataFile(version, tempFile, fileName, contentType, checksumType, false);
}
private static DataFile createSingleDataFile(DatasetVersion version, File tempFile, String fileName, String contentType, DataFile.ChecksumType checksumType, boolean addToDataset) {
if (tempFile == null) {
return null;
}
DataFile datafile = new DataFile(contentType);
datafile.setModificationTime(new Timestamp(new Date().getTime()));
/**
* @todo Think more about when permissions on files are modified.
* Obviously, here at create time files have some sort of permissions,
* even if these permissions are *implied*, by ViewUnpublishedDataset at
* the dataset level, for example.
*/
datafile.setPermissionModificationTime(new Timestamp(new Date().getTime()));
FileMetadata fmd = new FileMetadata();
// TODO: add directoryLabel?
fmd.setLabel(fileName);
if (addToDataset) {
datafile.setOwner(version.getDataset());
}
fmd.setDataFile(datafile);
datafile.getFileMetadatas().add(fmd);
if (addToDataset) {
if (version.getFileMetadatas() == null) {
version.setFileMetadatas(new ArrayList<>());
}
version.getFileMetadatas().add(fmd);
fmd.setDatasetVersion(version);
version.getDataset().getFiles().add(datafile);
}
generateStorageIdentifier(datafile);
if (!tempFile.renameTo(new File(getFilesTempDirectory() + "/" + datafile.getStorageIdentifier()))) {
return null;
}
try {
// We persist "SHA1" rather than "SHA-1".
datafile.setChecksumType(checksumType);
datafile.setChecksumValue(CalculateChecksum(getFilesTempDirectory() + "/" + datafile.getStorageIdentifier(), datafile.getChecksumType()));
} catch (Exception cksumEx) {
logger.warning("Could not calculate " + checksumType + " signature for the new file " + fileName);
}
return datafile;
}
/**
For the restructuring of zipped shapefiles, create a timestamped directory.
This directory is deleted after successful restructuring.
Naming convention: getFilesTempDirectory() + "shp_" + "yyyy-MM-dd-hh-mm-ss-SSS"
*/
private static File getShapefileUnzipTempDirectory(){
String tempDirectory = getFilesTempDirectory();
if (tempDirectory == null){
logger.severe("Failed to retrieve tempDirectory, null was returned" );
return null;
}
String datestampedFileName = "shp_" + new SimpleDateFormat("yyyy-MM-dd-hh-mm-ss-SSS").format(new Date());
String datestampedFolderName = tempDirectory + "/" + datestampedFileName;
File datestampedFolder = new File(datestampedFolderName);
if (!datestampedFolder.isDirectory()) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
try {
Files.createDirectories(Paths.get(datestampedFolderName));
} catch (IOException ex) {
logger.severe("Failed to create temp. directory to unzip shapefile: " + datestampedFolderName );
return null;
}
}
return datestampedFolder;
}
public static boolean canIngestAsTabular(DataFile dataFile) {
String mimeType = dataFile.getContentType();
return canIngestAsTabular(mimeType);
}
public static boolean canIngestAsTabular(String mimeType) {
/*
* In the final 4.0 we'll be doing real-time checks, going through the
* available plugins and verifying the lists of mime types that they
* can handle. In 4.0 beta, the ingest plugins are still built into the
* main code base, so we can just go through a hard-coded list of mime
* types. -- L.A.
*/
if (mimeType == null) {
return false;
}
switch (mimeType) {
case MIME_TYPE_STATA:
case MIME_TYPE_STATA13:
case MIME_TYPE_STATA14:
case MIME_TYPE_STATA15:
case MIME_TYPE_RDATA:
case MIME_TYPE_CSV:
case MIME_TYPE_CSV_ALT:
case MIME_TYPE_TSV:
case MIME_TYPE_TSV_ALT:
case MIME_TYPE_XLSX:
case MIME_TYPE_SPSS_SAV:
case MIME_TYPE_SPSS_POR:
return true;
default:
return false;
}
}
public static String getFilesTempDirectory() {
String filesRootDirectory = System.getProperty("dataverse.files.directory");
if (filesRootDirectory == null || filesRootDirectory.equals("")) {
filesRootDirectory = "/tmp/files";
}
String filesTempDirectory = filesRootDirectory + "/temp";
if (!Files.exists(Paths.get(filesTempDirectory))) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
try {
Files.createDirectories(Paths.get(filesTempDirectory));
} catch (IOException ex) {
logger.severe("Failed to create filesTempDirectory: " + filesTempDirectory );
return null;
}
}
return filesTempDirectory;
}
public static void generateS3PackageStorageIdentifier(DataFile dataFile) {
String bucketName = System.getProperty("dataverse.files.s3-bucket-name");
String storageId = S3_IDENTIFIER_PREFIX + "://" + bucketName + ":" + dataFile.getFileMetadata().getLabel();
dataFile.setStorageIdentifier(storageId);
}
public static void generateStorageIdentifier(DataFile dataFile) {
dataFile.setStorageIdentifier(generateStorageIdentifier());
}
public static String generateStorageIdentifier() {
UUID uid = UUID.randomUUID();
logger.log(Level.FINE, "UUID value: {0}", uid.toString());
// last 6 bytes, of the random UUID, in hex:
String hexRandom = uid.toString().substring(24);
logger.log(Level.FINE, "UUID (last 6 bytes, 12 hex digits): {0}", hexRandom);
String hexTimestamp = Long.toHexString(new Date().getTime());
logger.log(Level.FINE, "(not UUID) timestamp in hex: {0}", hexTimestamp);
String storageIdentifier = hexTimestamp + "-" + hexRandom;
logger.log(Level.FINE, "timestamp/UUID hybrid: {0}", storageIdentifier);
return storageIdentifier;
}
public static void createIngestFailureReport(DataFile dataFile, String message) {
createIngestReport(dataFile, IngestReport.INGEST_STATUS_FAILURE, message);
}
private static void createIngestReport (DataFile dataFile, int status, String message) {
IngestReport errorReport = new IngestReport();
if (status == IngestReport.INGEST_STATUS_FAILURE) {
errorReport.setFailure();
errorReport.setReport(message);
errorReport.setDataFile(dataFile);
dataFile.setIngestReport(errorReport);
}
}
public enum FileCitationExtension {
ENDNOTE("-endnote.xml"),
RIS(".ris"),
BIBTEX(".bib");
private final String text;
private FileCitationExtension(final String text) {
this.text = text;
}
}
public static String getCiteDataFileFilename(String fileTitle, FileCitationExtension fileCitationExtension) {
if((fileTitle==null) || (fileCitationExtension == null)) {
return null;
}
if (fileTitle.endsWith("tab")) {
return fileTitle.replaceAll("\\.tab$", fileCitationExtension.text);
} else {
return fileTitle + fileCitationExtension.text;
}
}
/**
* @todo Consider returning not only the boolean but the human readable
* reason why the popup is required, which could be used in the GUI to
* elaborate on the text "This file cannot be downloaded publicly."
*/
public static boolean isDownloadPopupRequired(DatasetVersion datasetVersion) {
// Each of these conditions is sufficient reason to have to
// present the user with the popup:
if (datasetVersion == null) {
logger.fine("Download popup required because datasetVersion is null.");
return false;
}
//0. if version is draft then Popup "not required"
if (!datasetVersion.isReleased()) {
logger.fine("Download popup required because datasetVersion has not been released.");
return false;
}
// 1. License and Terms of Use:
if (datasetVersion.getTermsOfUseAndAccess() != null) {
if (!TermsOfUseAndAccess.License.CC0.equals(datasetVersion.getTermsOfUseAndAccess().getLicense())
&& !(datasetVersion.getTermsOfUseAndAccess().getTermsOfUse() == null
|| datasetVersion.getTermsOfUseAndAccess().getTermsOfUse().equals(""))) {
logger.fine("Download popup required because of license or terms of use.");
return true;
}
// 2. Terms of Access:
if (!(datasetVersion.getTermsOfUseAndAccess().getTermsOfAccess() == null) && !datasetVersion.getTermsOfUseAndAccess().getTermsOfAccess().equals("")) {
logger.fine("Download popup required because of terms of access.");
return true;
}
}
// 3. Guest Book:
if (datasetVersion.getDataset() != null && datasetVersion.getDataset().getGuestbook() != null && datasetVersion.getDataset().getGuestbook().isEnabled() && datasetVersion.getDataset().getGuestbook().getDataverse() != null) {
logger.fine("Download popup required because of guestbook.");
return true;
}
logger.fine("Download popup is not required.");
return false;
}
public static boolean isRequestAccessPopupRequired(DatasetVersion datasetVersion){
// Each of these conditions is sufficient reason to have to
// present the user with the popup:
if (datasetVersion == null) {
logger.fine("Download popup required because datasetVersion is null.");
return false;
}
//0. if version is draft then Popup "not required"
if (!datasetVersion.isReleased()) {
logger.fine("Download popup required because datasetVersion has not been released.");
return false;
}
// 1. License and Terms of Use:
if (datasetVersion.getTermsOfUseAndAccess() != null) {
if (!TermsOfUseAndAccess.License.CC0.equals(datasetVersion.getTermsOfUseAndAccess().getLicense())
&& !(datasetVersion.getTermsOfUseAndAccess().getTermsOfUse() == null
|| datasetVersion.getTermsOfUseAndAccess().getTermsOfUse().equals(""))) {
logger.fine("Download popup required because of license or terms of use.");
return true;
}
// 2. Terms of Access:
if (!(datasetVersion.getTermsOfUseAndAccess().getTermsOfAccess() == null) && !datasetVersion.getTermsOfUseAndAccess().getTermsOfAccess().equals("")) {
logger.fine("Download popup required because of terms of access.");
return true;
}
}
logger.fine("Download popup is not required.");
return false;
}
/**
* Provide download URL if no Terms of Use, no guestbook, and not
* restricted.
*/
public static boolean isPubliclyDownloadable(FileMetadata fileMetadata) {
if (fileMetadata == null) {
return false;
}
if (fileMetadata.isRestricted()) {
String msg = "Not publicly downloadable because the file is restricted.";
logger.fine(msg);
return false;
}
boolean popupReasons = isDownloadPopupRequired(fileMetadata.getDatasetVersion());
if (popupReasons == true) {
/**
* @todo The user clicking publish may have a bad "Dude, where did
* the file Download URL go" experience in the following scenario:
*
* - The user creates a dataset and uploads a file.
*
* - The user sets Terms of Use, which means a Download URL should
* not be displayed.
*
* - While the dataset is in draft, the Download URL is displayed
* due to the rule "Download popup required because datasetVersion
* has not been released."
*
* - Once the dataset is published the Download URL disappears due
* to the rule "Download popup required because of license or terms
* of use."
*
* In short, the Download URL disappears on publish in the scenario
* above, which is weird. We should probably attempt to see into the
* future to when the dataset is published to see if the file will
* be publicly downloadable or not.
*/
return false;
}
return true;
}
/**
* This is what the UI displays for "Download URL" on the file landing page
* (DOIs rather than file IDs.
*/
public static String getPublicDownloadUrl(String dataverseSiteUrl, String persistentId) {
String path = "/api/access/datafile/:persistentId?persistentId=" + persistentId;
return dataverseSiteUrl + path;
}
/**
* The FileDownloadServiceBean operates on file IDs, not DOIs.
*/
public static String getFileDownloadUrlPath(String downloadType, Long fileId, boolean gbRecordsWritten) {
String fileDownloadUrl = "/api/access/datafile/" + fileId;
if (downloadType != null && downloadType.equals("bundle")) {
fileDownloadUrl = "/api/access/datafile/bundle/" + fileId;
}
if (downloadType != null && downloadType.equals("original")) {
fileDownloadUrl = "/api/access/datafile/" + fileId + "?format=original";
}
if (downloadType != null && downloadType.equals("RData")) {
fileDownloadUrl = "/api/access/datafile/" + fileId + "?format=RData";
}
if (downloadType != null && downloadType.equals("var")) {
fileDownloadUrl = "/api/access/datafile/" + fileId + "/metadata";
}
if (downloadType != null && downloadType.equals("tab")) {
fileDownloadUrl = "/api/access/datafile/" + fileId + "?format=tab";
}
if (gbRecordsWritten) {
if (downloadType != null && (downloadType.equals("original") || downloadType.equals("RData") || downloadType.equals("tab"))) {
fileDownloadUrl += "&gbrecs=true";
} else {
fileDownloadUrl += "?gbrecs=true";
}
}
logger.fine("Returning file download url: " + fileDownloadUrl);
return fileDownloadUrl;
}
public static File inputStreamToFile(InputStream inputStream) throws IOException {
if (inputStream == null) {
logger.info("In inputStreamToFile but inputStream was null! Returning null rather than a File.");
return null;
}
File file = File.createTempFile(UUID.randomUUID().toString(), UUID.randomUUID().toString());
try(OutputStream outputStream = new FileOutputStream(file)){
int read = 0;
byte[] bytes = new byte[1024];
while ((read = inputStream.read(bytes)) != -1) {
outputStream.write(bytes, 0, read);
}
return file;
}
}
/*
* This method tells you if thumbnail generation is *supported*
* on this type of file. i.e., if true, it does not guarantee that a thumbnail
* can/will be generated; but it means that we can try.
*/
public static boolean isThumbnailSupported (DataFile file) {
if (file == null) {
return false;
}
if (file.isHarvested() || "".equals(file.getStorageIdentifier())) {
return false;
}
String contentType = file.getContentType();
// Some browsers (Chrome?) seem to identify FITS files as mime
// type "image/fits" on upload; this is both incorrect (the official
// mime type for FITS is "application/fits", and problematic: then
// the file is identified as an image, and the page will attempt to
// generate a preview - which of course is going to fail...
if (MIME_TYPE_FITSIMAGE.equalsIgnoreCase(contentType)) {
return false;
}
// besides most image/* types, we can generate thumbnails for
// pdf and "world map" files:
return (contentType != null &&
(contentType.startsWith("image/") ||
contentType.equalsIgnoreCase("application/pdf") ||
(file.isTabularData() && file.hasGeospatialTag()) ||
contentType.equalsIgnoreCase(MIME_TYPE_GEO_SHAPE)));
}
/*
* The method below appears to be unnecessary;
* it duplicates the method generateImageThumbnailFromFileAsBase64() from ImageThumbConverter;
* plus it creates an unnecessary temp file copy of the source file.
public static String rescaleImage(File file) throws IOException {
if (file == null) {
logger.info("file was null!!");
return null;
}
File tmpFile = File.createTempFile("tempFileToRescale", ".tmp");
BufferedImage fullSizeImage = ImageIO.read(file);
if (fullSizeImage == null) {
logger.info("fullSizeImage was null!");
return null;
}
int width = fullSizeImage.getWidth();
int height = fullSizeImage.getHeight();
FileChannel src = new FileInputStream(file).getChannel();
FileChannel dest = new FileOutputStream(tmpFile).getChannel();
dest.transferFrom(src, 0, src.size());
String pathToResizedFile = ImageThumbConverter.rescaleImage(fullSizeImage, width, height, ImageThumbConverter.DEFAULT_CARDIMAGE_SIZE, tmpFile.getAbsolutePath());
File resizedFile = new File(pathToResizedFile);
return ImageThumbConverter.getImageAsBase64FromFile(resizedFile);
}
*/
public static DatasetThumbnail getThumbnail(DataFile file) {
String imageSourceBase64 = ImageThumbConverter.getImageThumbnailAsBase64(file, ImageThumbConverter.DEFAULT_THUMBNAIL_SIZE);
DatasetThumbnail defaultDatasetThumbnail = new DatasetThumbnail(imageSourceBase64, file);
return defaultDatasetThumbnail;
}
public static boolean isPackageFile(DataFile dataFile) {
return DataFileServiceBean.MIME_TYPE_PACKAGE_FILE.equalsIgnoreCase(dataFile.getContentType());
}
}
| 1 | 39,313 | Looks like glassfish went a bit nuts doing a refactor | IQSS-dataverse | java |
@@ -47,4 +47,12 @@ describe('dom.visuallyContains', function () {
assert.isTrue(axe.commons.dom.visuallyContains(target, target.parentNode));
});
+ it('should return true when element is inline', function () {
+ // result depends on the display property of the element
+ fixture.innerHTML = '<label>' +
+ 'My label <input type="text" id="target">' +
+ '</label>';
+ var target = fixture.querySelector('#target');
+ assert.isTrue(axe.commons.dom.visuallyContains(target, target.parentNode));
+ });
}); | 1 | describe('dom.visuallyContains', function () {
'use strict';
var fixture = document.getElementById('fixture');
afterEach(function () {
document.getElementById('fixture').innerHTML = '';
});
it('should return true when element is trivially contained', function () {
fixture.innerHTML = '<div style="height: 40px; width: 30px; background-color: red;">' +
'<div id="target" style="height: 20px; width: 15px; background-color: green;">' +
'</div></div>';
var target = fixture.querySelector('#target');
assert.isTrue(axe.commons.dom.visuallyContains(target, target.parentNode));
});
it('should return false when overflow is hidden', function () {
fixture.innerHTML = '<div style="height: 40px; width: 30px; background-color: red; overflow: hidden;">' +
'<div id="target" style="height: 20px; width: 45px; background-color: green;">' +
'</div></div>';
var target = fixture.querySelector('#target');
assert.isTrue(axe.commons.dom.visuallyContains(target, target.parentNode));
});
it('should return false when element is outside of margin', function () {
fixture.innerHTML = '<div style="height: 40px; width: 30px; margin-left: 30px; background-color: red;">' +
'<div id="target" style="height: 20px; width: 45px; margin-left: -20px; background-color: green;">' +
'</div></div>';
var target = fixture.querySelector('#target');
assert.isFalse(axe.commons.dom.visuallyContains(target, target.parentNode));
});
it('should return false when overflow is visible', function () {
fixture.innerHTML = '<div style="height: 40px; width: 30px; background-color: red; overflow: visible;">' +
'<div id="target" style="height: 20px; width: 45px; background-color: green;">' +
'</div></div>';
var target = fixture.querySelector('#target');
assert.isFalse(axe.commons.dom.visuallyContains(target, target.parentNode));
});
it('should return true when element is scrollable', function () {
fixture.innerHTML = '<div style="height: 40px; width: 30px; background-color: red; overflow: scroll;">' +
'<div id="target" style="height: 20px; width: 45px; background-color: green;">' +
'</div></div>';
var target = fixture.querySelector('#target');
assert.isTrue(axe.commons.dom.visuallyContains(target, target.parentNode));
});
});
| 1 | 11,014 | Couldn't you use position:absolute or float to move inline elements outside their parent? Through clipping an child element can also be outside it's parent. There are probably some other ways to do it too. So I'm not sure the assumption you're making here is right. | dequelabs-axe-core | js |
@@ -78,7 +78,7 @@ public class ExpireSnapshotsProcedure extends BaseProcedure {
@Override
public InternalRow[] call(InternalRow args) {
Identifier tableIdent = toIdentifier(args.getString(0), PARAMETERS[0].name());
- Long olderThanMillis = args.isNullAt(1) ? null : DateTimeUtils.toMillis(args.getLong(1));
+ Long olderThanMillis = args.isNullAt(1) ? null : DateTimeUtils.microsToMillis(args.getLong(1));
Integer retainLastNum = args.isNullAt(2) ? null : args.getInt(2);
return modifyIcebergTable(tableIdent, table -> { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.procedures;
import org.apache.iceberg.actions.Actions;
import org.apache.iceberg.actions.ExpireSnapshotsAction;
import org.apache.iceberg.actions.ExpireSnapshotsActionResult;
import org.apache.iceberg.spark.procedures.SparkProcedures.ProcedureBuilder;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.catalyst.util.DateTimeUtils;
import org.apache.spark.sql.connector.catalog.Identifier;
import org.apache.spark.sql.connector.catalog.TableCatalog;
import org.apache.spark.sql.connector.iceberg.catalog.ProcedureParameter;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
/**
* A procedure that expires snapshots in a table.
*
* @see Actions#expireSnapshots()
*/
public class ExpireSnapshotsProcedure extends BaseProcedure {
private static final ProcedureParameter[] PARAMETERS = new ProcedureParameter[] {
ProcedureParameter.required("table", DataTypes.StringType),
ProcedureParameter.optional("older_than", DataTypes.TimestampType),
ProcedureParameter.optional("retain_last", DataTypes.IntegerType)
};
private static final StructType OUTPUT_TYPE = new StructType(new StructField[]{
new StructField("deleted_data_files_count", DataTypes.LongType, true, Metadata.empty()),
new StructField("deleted_manifest_files_count", DataTypes.LongType, true, Metadata.empty()),
new StructField("deleted_manifest_lists_count", DataTypes.LongType, true, Metadata.empty())
});
public static ProcedureBuilder builder() {
return new BaseProcedure.Builder<ExpireSnapshotsProcedure>() {
@Override
protected ExpireSnapshotsProcedure doBuild() {
return new ExpireSnapshotsProcedure(tableCatalog());
}
};
}
private ExpireSnapshotsProcedure(TableCatalog tableCatalog) {
super(tableCatalog);
}
@Override
public ProcedureParameter[] parameters() {
return PARAMETERS;
}
@Override
public StructType outputType() {
return OUTPUT_TYPE;
}
@Override
public InternalRow[] call(InternalRow args) {
Identifier tableIdent = toIdentifier(args.getString(0), PARAMETERS[0].name());
Long olderThanMillis = args.isNullAt(1) ? null : DateTimeUtils.toMillis(args.getLong(1));
Integer retainLastNum = args.isNullAt(2) ? null : args.getInt(2);
return modifyIcebergTable(tableIdent, table -> {
Actions actions = Actions.forTable(table);
ExpireSnapshotsAction action = actions.expireSnapshots();
if (olderThanMillis != null) {
action.expireOlderThan(olderThanMillis);
}
if (retainLastNum != null) {
action.retainLast(retainLastNum);
}
ExpireSnapshotsActionResult result = action.execute();
InternalRow outputRow = newInternalRow(
result.dataFilesDeleted(),
result.manifestFilesDeleted(),
result.manifestListsDeleted());
return new InternalRow[]{outputRow};
});
}
@Override
public String description() {
return "ExpireSnapshotProcedure";
}
}
| 1 | 36,038 | This could be copied into iceberg code to avoid the spark internal dep? We could use a version check to adjust the method used if needed. | apache-iceberg | java |
@@ -254,7 +254,7 @@ static void subprocess_destroy_finish (flux_future_t *f, void *arg)
if (flux_future_get (f, NULL) < 0) {
flux_t *h = flux_subprocess_aux_get (p, "flux_t");
flux_log_error (h, "subprocess_kill: %ju: %s",
- (uintmax_t) flux_subprocess_pid,
+ (uintmax_t) flux_subprocess_pid (p),
flux_strerror (errno));
}
flux_subprocess_destroy (p); | 1 | /************************************************************\
* Copyright 2019 Lawrence Livermore National Security, LLC
* (c.f. AUTHORS, NOTICE.LLNS, COPYING)
*
* This file is part of the Flux resource manager framework.
* For details, see https://github.com/flux-framework.
*
* SPDX-License-Identifier: LGPL-3.0
\************************************************************/
#if HAVE_CONFIG_H
# include "config.h"
#endif
#include <sys/wait.h>
#define EXIT_CODE(x) __W_EXITCODE(x,0)
#include <flux/core.h>
#include <flux/idset.h>
#include <czmq.h>
#include "src/common/libutil/aux.h"
#include "bulk-exec.h"
struct exec_cmd {
struct idset *ranks;
flux_cmd_t *cmd;
int flags;
};
struct bulk_exec {
flux_t *h;
struct aux_item *aux;
int max_start_per_loop; /* Max subprocess started per event loop cb */
int total; /* Total processes expected to run */
int started; /* Number of processes that have reached start */
int complete; /* Number of processes that have completed */
int exit_status; /* Largest wait status of all complete procs */
unsigned int active:1;
flux_watcher_t *prep;
flux_watcher_t *check;
flux_watcher_t *idle;
struct idset *exit_batch; /* Support for batched exit notify */
flux_watcher_t *exit_batch_timer; /* Timer for batched exit notify */
flux_subprocess_ops_t ops;
zlist_t *commands;
zlist_t *processes;
struct bulk_exec_ops *handlers;
void *arg;
};
int bulk_exec_rc (struct bulk_exec *exec)
{
return (exec->exit_status);
}
int bulk_exec_current (struct bulk_exec *exec)
{
return zlist_size (exec->processes);
}
int bulk_exec_total (struct bulk_exec *exec)
{
return exec->total;
}
int bulk_exec_write (struct bulk_exec *exec, const char *stream,
const char *buf, size_t len)
{
flux_subprocess_t *p = zlist_first (exec->processes);
while (p) {
if (flux_subprocess_write (p, stream, buf, len) < len)
return -1;
p = zlist_next (exec->processes);
}
return 0;
}
int bulk_exec_close (struct bulk_exec *exec, const char *stream)
{
flux_subprocess_t *p = zlist_first (exec->processes);
while (p) {
if (flux_subprocess_close (p, stream) < 0)
return -1;
p = zlist_next (exec->processes);
}
return 0;
}
static int exec_exit_notify (struct bulk_exec *exec)
{
if (exec->handlers->on_exit)
(*exec->handlers->on_exit) (exec, exec->arg, exec->exit_batch);
if (exec->exit_batch_timer) {
flux_watcher_destroy (exec->exit_batch_timer);
exec->exit_batch_timer = NULL;
idset_range_clear (exec->exit_batch, 0, INT_MAX);
}
return 0;
}
static void exit_batch_cb (flux_reactor_t *r, flux_watcher_t *w,
int revents, void *arg)
{
struct bulk_exec *exec = arg;
exec_exit_notify (exec);
}
/* Append completed subprocess 'p' to the current batch for exit
* notification. If this is the first exited process in the batch,
* then start a timer which will fire and call the function to
* notify bulk_exec user of the batch of subprocess exits.
*
* This appraoch avoids unecessarily calling into user's callback
* multiple times when all tasks exit within 0.01s.
*/
static void exit_batch_append (struct bulk_exec *exec, flux_subprocess_t *p)
{
int rank = flux_subprocess_rank (p);
if (idset_set (exec->exit_batch, rank) < 0) {
flux_log_error (exec->h, "exit_batch_append:idset_set");
return;
}
if (!exec->exit_batch_timer) {
flux_reactor_t *r = flux_get_reactor (exec->h);
/* XXX: batch timer should eventually be configurable by caller */
exec->exit_batch_timer =
flux_timer_watcher_create (r, 0.01, 0.,
exit_batch_cb,
exec);
if (!exec->exit_batch_timer) {
flux_log_error (exec->h, "exit_batch_append:timer create");
return;
}
flux_watcher_start (exec->exit_batch_timer);
}
}
static void exec_add_completed (struct bulk_exec *exec, flux_subprocess_t *p)
{
/* Append this process to the current batch for notification */
exit_batch_append (exec, p);
if (++exec->complete == exec->total) {
exec_exit_notify (exec);
if (exec->handlers->on_complete)
(*exec->handlers->on_complete) (exec, exec->arg);
}
}
static void exec_complete_cb (flux_subprocess_t *p)
{
int status = flux_subprocess_status (p);
struct bulk_exec *exec = flux_subprocess_aux_get (p, "job-exec::exec");
if (status > exec->exit_status)
exec->exit_status = status;
exec_add_completed (exec, p);
}
static void exec_state_cb (flux_subprocess_t *p, flux_subprocess_state_t state)
{
struct bulk_exec *exec = flux_subprocess_aux_get (p, "job-exec::exec");
if (state == FLUX_SUBPROCESS_RUNNING) {
if (++exec->started == exec->total) {
if (exec->handlers->on_start)
(*exec->handlers->on_start) (exec, exec->arg);
}
}
else if (state == FLUX_SUBPROCESS_FAILED
|| state == FLUX_SUBPROCESS_EXEC_FAILED) {
int errnum = flux_subprocess_fail_errno (p);
int code = EXIT_CODE(1);
if (errnum == EPERM || errnum == EACCES)
code = EXIT_CODE(126);
else if (errnum == ENOENT)
code = EXIT_CODE(127);
else if (errnum == EHOSTUNREACH)
code = EXIT_CODE(68);
if (code > exec->exit_status)
exec->exit_status = code;
if (exec->handlers->on_error)
(*exec->handlers->on_error) (exec, p, exec->arg);
exec_add_completed (exec, p);
}
}
static void exec_output_cb (flux_subprocess_t *p, const char *stream)
{
struct bulk_exec *exec = flux_subprocess_aux_get (p, "job-exec::exec");
const char *s;
int len;
if (!(s = flux_subprocess_getline (p, stream, &len))) {
flux_log_error (exec->h, "flux_subprocess_getline");
return;
}
if (len) {
int rank = flux_subprocess_rank (p);
if (exec->handlers->on_output)
(*exec->handlers->on_output) (exec, p, stream, s, len, exec->arg);
else
flux_log (exec->h, LOG_INFO, "rank %d: %s: %s", rank, stream, s);
}
}
static void exec_cmd_destroy (void *arg)
{
struct exec_cmd *cmd = arg;
idset_destroy (cmd->ranks);
flux_cmd_destroy (cmd->cmd);
free (cmd);
}
static struct exec_cmd *exec_cmd_create (const struct idset *ranks,
flux_cmd_t *cmd,
int flags)
{
struct exec_cmd *c = calloc (1, sizeof (*c));
if (!c)
return NULL;
if (!(c->ranks = idset_copy (ranks))) {
fprintf (stderr, "exec_cmd_create: idset_copy failed");
goto err;
}
if (!(c->cmd = flux_cmd_copy (cmd))) {
fprintf (stderr, "exec_cmd_create: flux_cmd_copy failed");
goto err;
}
c->flags = flags;
return (c);
err:
exec_cmd_destroy (c);
return NULL;
}
static void subprocess_destroy_finish (flux_future_t *f, void *arg)
{
flux_subprocess_t *p = arg;
if (flux_future_get (f, NULL) < 0) {
flux_t *h = flux_subprocess_aux_get (p, "flux_t");
flux_log_error (h, "subprocess_kill: %ju: %s",
(uintmax_t) flux_subprocess_pid,
flux_strerror (errno));
}
flux_subprocess_destroy (p);
flux_future_destroy (f);
}
static int subprocess_destroy (flux_t *h, flux_subprocess_t *p)
{
flux_future_t *f = flux_subprocess_kill (p, SIGKILL);
if (!f || flux_future_then (f, -1., subprocess_destroy_finish, p) < 0)
return -1;
return 0;
}
static int exec_start_cmd (struct bulk_exec *exec,
struct exec_cmd *cmd,
int max)
{
int count = 0;
uint32_t rank;
rank = idset_first (cmd->ranks);
while (rank != IDSET_INVALID_ID && (max < 0 || count < max)) {
flux_subprocess_t *p = flux_rexec (exec->h,
rank,
cmd->flags,
cmd->cmd,
&exec->ops);
if (!p)
return -1;
if (flux_subprocess_aux_set (p, "job-exec::exec", exec, NULL) < 0
|| zlist_append (exec->processes, p) < 0) {
if (subprocess_destroy (exec->h, p) < 0)
flux_log_error (exec->h, "Unable to destroy pid %ju",
(uintmax_t) flux_subprocess_pid (p));
return -1;
}
zlist_freefn (exec->processes, p,
(zlist_free_fn *) flux_subprocess_unref,
true);
idset_clear (cmd->ranks, rank);
rank = idset_next (cmd->ranks, rank);
count++;
}
return count;
}
void bulk_exec_stop (struct bulk_exec *exec)
{
flux_watcher_stop (exec->prep);
flux_watcher_stop (exec->check);
}
static int exec_start_cmds (struct bulk_exec *exec, int max)
{
while (zlist_size (exec->commands) && (max != 0)) {
struct exec_cmd *cmd = zlist_first (exec->commands);
int rc = exec_start_cmd (exec, cmd, max);
if (rc < 0) {
flux_log_error (exec->h, "exec_start_cmd failed");
return -1;
}
if (idset_count (cmd->ranks) == 0)
zlist_remove (exec->commands, cmd);
if (max > 0)
max -= rc;
}
return 0;
}
static void prep_cb (flux_reactor_t *r, flux_watcher_t *w,
int revents, void *arg)
{
struct bulk_exec *exec = arg;
/* Don't block in reactor if there are commands to run */
if (zlist_size (exec->commands) > 0) {
flux_watcher_start (exec->idle);
flux_watcher_start (exec->check);
}
else
bulk_exec_stop (exec);
}
static void check_cb (flux_reactor_t *r, flux_watcher_t *w,
int revents, void *arg)
{
struct bulk_exec *exec = arg;
flux_watcher_stop (exec->idle);
flux_watcher_stop (exec->check);
if (exec_start_cmds (exec, exec->max_start_per_loop) < 0) {
bulk_exec_stop (exec);
if (exec->handlers->on_error)
(*exec->handlers->on_error) (exec, NULL, exec->arg);
}
}
void bulk_exec_destroy (struct bulk_exec *exec)
{
if (exec) {
zlist_destroy (&exec->processes);
zlist_destroy (&exec->commands);
idset_destroy (exec->exit_batch);
flux_watcher_destroy (exec->prep);
flux_watcher_destroy (exec->check);
flux_watcher_destroy (exec->idle);
aux_destroy (&exec->aux);
free (exec);
}
}
struct bulk_exec * bulk_exec_create (struct bulk_exec_ops *ops, void *arg)
{
flux_subprocess_ops_t sp_ops = {
.on_completion = exec_complete_cb,
.on_state_change = exec_state_cb,
.on_stdout = exec_output_cb,
.on_stderr = exec_output_cb,
};
struct bulk_exec *exec = calloc (1, sizeof (*exec));
if (!exec)
return NULL;
exec->ops = sp_ops;
exec->handlers = ops;
exec->arg = arg;
exec->processes = zlist_new ();
exec->commands = zlist_new ();
exec->exit_batch = idset_create (0, IDSET_FLAG_AUTOGROW);
exec->max_start_per_loop = 1;
return exec;
}
int bulk_exec_set_max_per_loop (struct bulk_exec *exec, int max)
{
if (max == 0) {
errno = EINVAL;
return -1;
}
exec->max_start_per_loop = max;
return 0;
}
int bulk_exec_push_cmd (struct bulk_exec *exec,
const struct idset *ranks,
flux_cmd_t *cmd,
int flags)
{
struct exec_cmd *c = exec_cmd_create (ranks, cmd, flags);
if (!c)
return -1;
if (zlist_append (exec->commands, c) < 0) {
exec_cmd_destroy (c);
return -1;
}
zlist_freefn (exec->commands, c, exec_cmd_destroy, true);
exec->total += idset_count (ranks);
if (exec->active) {
flux_watcher_start (exec->prep);
flux_watcher_start (exec->check);
}
return 0;
}
int bulk_exec_start (flux_t *h, struct bulk_exec *exec)
{
flux_reactor_t *r = flux_get_reactor (h);
exec->h = h;
exec->prep = flux_prepare_watcher_create (r, prep_cb, exec);
exec->check = flux_check_watcher_create (r, check_cb, exec);
exec->idle = flux_idle_watcher_create (r, NULL, NULL);
if (!exec->prep || !exec->check || !exec->idle)
return -1;
flux_watcher_start (exec->prep);
exec->active = 1;
return 0;
}
/* Cancel all pending commands.
*/
int bulk_exec_cancel (struct bulk_exec *exec)
{
struct exec_cmd *cmd = zlist_first (exec->commands);
if (!cmd)
return 0;
while (cmd) {
uint32_t rank = idset_first (cmd->ranks);
while (rank != IDSET_INVALID_ID) {
exec->complete++;
if (idset_set (exec->exit_batch, rank) < 0)
flux_log_error (exec->h, "bulk_exec_cance: idset_set");
rank = idset_next (cmd->ranks, rank);
}
cmd = zlist_next (exec->commands);
}
zlist_purge (exec->commands);
exec_exit_notify (exec);
if (exec->complete == exec->total) {
if (exec->handlers->on_complete)
(*exec->handlers->on_complete) (exec, exec->arg);
}
return 0;
}
flux_future_t *bulk_exec_kill (struct bulk_exec *exec, int signum)
{
flux_subprocess_t *p = zlist_first (exec->processes);
flux_future_t *cf = NULL;
if (!(cf = flux_future_wait_all_create ()))
return NULL;
flux_future_set_flux (cf, exec->h);
while (p) {
if (flux_subprocess_state (p) == FLUX_SUBPROCESS_RUNNING
|| flux_subprocess_state (p) == FLUX_SUBPROCESS_INIT) {
flux_future_t *f = NULL;
char s[64];
if (!(f = flux_subprocess_kill (p, signum))) {
int err = errno;
const char *errstr = flux_strerror (errno);
if ((f = flux_future_create (NULL, NULL)))
flux_future_fulfill_error (f, err, errstr);
else
flux_future_fulfill_error (cf, err, "Internal error");
}
(void) snprintf (s, sizeof (s)-1, "%u",
flux_subprocess_rank (p));
if (flux_future_push (cf, s, f) < 0) {
fprintf (stderr, "flux_future_push: %s\n", strerror (errno));
flux_future_destroy (f);
}
}
p = zlist_next (exec->processes);
}
/* If no child futures were pushed into the wait_all future `cf`,
* then no signals were sent and we should immediately return ENOENT.
*/
if (!flux_future_first_child (cf)) {
flux_future_destroy (cf);
errno = ENOENT;
return NULL;
}
return cf;
}
static void imp_kill_output (struct bulk_exec *kill,
flux_subprocess_t *p,
const char *stream,
const char *data,
int len,
void *arg)
{
int rank = flux_subprocess_rank (p);
flux_log (kill->h, LOG_INFO,
"rank%d: flux-imp kill: %s: %s",
rank,
stream,
data);
}
static void imp_kill_complete (struct bulk_exec *kill, void *arg)
{
flux_future_t *f = arg;
if (bulk_exec_rc (kill) < 0)
flux_future_fulfill_error (f, 0, NULL);
else
flux_future_fulfill (f, NULL, NULL);
}
static void imp_kill_error (struct bulk_exec *kill,
flux_subprocess_t *p,
void *arg)
{
flux_log_error (kill->h,
"imp kill: rank=%d: failed",
flux_subprocess_rank (p));
}
struct bulk_exec_ops imp_kill_ops = {
.on_output = imp_kill_output,
.on_error = imp_kill_error,
.on_complete = imp_kill_complete,
};
static int bulk_exec_push_one (struct bulk_exec *exec,
int rank,
flux_cmd_t *cmd,
int flags)
{
int rc = -1;
struct idset *ids = idset_create (0, IDSET_FLAG_AUTOGROW);
if (!ids || idset_set (ids, rank) < 0)
return -1;
rc = bulk_exec_push_cmd (exec, ids, cmd, flags);
idset_destroy (ids);
return rc;
}
/* Kill all currently executing processes in bulk-exec object `exec`
* using "flux-imp kill" helper for processes potentially running
* under a different userid.
*
* Spawns "flux-imp kill <signal> <pid>" on each rank.
*/
flux_future_t *bulk_exec_imp_kill (struct bulk_exec *exec,
const char *imp_path,
int signum)
{
struct bulk_exec *killcmd = NULL;
flux_subprocess_t *p = NULL;
flux_future_t *f = NULL;
int count = 0;
/* Empty future for return value
*/
if (!(f = flux_future_create (NULL, NULL))) {
flux_log_error (exec->h, "bulk_exec_imp_kill: future_create");
goto err;
}
flux_future_set_flux (f, exec->h);
if (!(killcmd = bulk_exec_create (&imp_kill_ops, f)))
return NULL;
/* Tie bulk exec object destruction to future */
flux_future_aux_set (f, NULL, killcmd, (flux_free_f) bulk_exec_destroy);
p = zlist_first (exec->processes);
while (p) {
if ((flux_subprocess_state (p) == FLUX_SUBPROCESS_RUNNING
|| flux_subprocess_state (p) == FLUX_SUBPROCESS_INIT)) {
pid_t pid = flux_subprocess_pid (p);
int rank = flux_subprocess_rank (p);
flux_cmd_t *cmd = flux_cmd_create (0, NULL, environ);
if (!cmd
|| flux_cmd_setcwd (cmd, "/tmp") < 0
|| flux_cmd_argv_append (cmd, imp_path) < 0
|| flux_cmd_argv_append (cmd, "kill") < 0
|| flux_cmd_argv_appendf (cmd, "%d", signum) < 0
|| flux_cmd_argv_appendf (cmd, "%ld", (long) pid)) {
flux_log_error (exec->h,
"bulk_exec_imp_kill: flux_cmd_argv_append");
goto err;
}
if (bulk_exec_push_one (killcmd, rank, cmd, 0) < 0) {
flux_log_error (exec->h, "bulk_exec_imp_kill: push_cmd");
goto err;
}
count++;
flux_cmd_destroy (cmd);
}
p = zlist_next (exec->processes);
}
if (count == 0) {
errno = ENOENT;
goto err;
}
bulk_exec_aux_set (killcmd, "future", f, NULL);
if (bulk_exec_start (exec->h, killcmd) < 0) {
flux_log_error (exec->h, "bulk_exec_start");
goto err;
}
return f;
err:
flux_future_destroy (f);
return NULL;
}
int bulk_exec_aux_set (struct bulk_exec *exec, const char *key,
void *val, flux_free_f free_fn)
{
return (aux_set (&exec->aux, key, val, free_fn));
}
void * bulk_exec_aux_get (struct bulk_exec *exec, const char *key)
{
return (aux_get (exec->aux, key));
}
/* vi: ts=4 sw=4 expandtab
*/
| 1 | 30,335 | This one had me stumped! | flux-framework-flux-core | c |
@@ -190,10 +190,7 @@ class SettingsAdmin extends Component {
googlesitekit-settings-module__meta-item
googlesitekit-settings-module__meta-item--nomargin
">
- <Optin
- id="opt-in"
- name="optin"
- />
+ <Optin />
</div>
</div>
</div> | 1 | /**
* SettingsOverview component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import Layout from 'GoogleComponents/layout/layout';
import Link from 'GoogleComponents/link';
import Dialog from 'GoogleComponents/dialog';
import Optin from 'GoogleComponents/optin';
import data, { TYPE_CORE } from 'GoogleComponents/data';
import {
clearAppLocalStorage,
getSiteKitAdminURL,
} from 'GoogleUtil';
const { Component, Fragment } = wp.element;
const { __ } = wp.i18n;
class SettingsAdmin extends Component {
constructor() {
super();
const { userData: { email = '', picture = '', name = '' } } = googlesitekit.admin;
this.state = {
data: {
email,
img: picture,
user: name,
},
dialogActive: false,
};
this.handleDialog = this.handleDialog.bind( this );
this.handleUnlinkConfirm = this.handleUnlinkConfirm.bind( this );
this.handleCloseModal = this.handleCloseModal.bind( this );
}
componentDidMount() {
window.addEventListener( 'keyup', this.handleCloseModal, false );
}
componentWillUnmount() {
window.removeEventListener( 'keyup', this.handleCloseModal );
}
handleDialog() {
this.setState( ( prevState ) => {
return {
dialogActive: ! prevState.dialogActive,
};
} );
}
async handleUnlinkConfirm() {
await data.set( TYPE_CORE, 'site', 'reset' );
clearAppLocalStorage();
this.handleDialog();
document.location = getSiteKitAdminURL( 'googlesitekit-splash' );
}
handleCloseModal( e ) {
if ( 27 === e.keyCode ) {
this.setState( {
dialogActive: false,
} );
}
}
render() {
const {
dialogActive,
} = this.state;
return (
<Fragment>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<Layout>
<div className="
googlesitekit-settings-module
googlesitekit-settings-module--active
">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-4-tablet
mdc-layout-grid__cell--span-4-phone
">
<h3 className="
googlesitekit-heading-4
googlesitekit-settings-module__title
">
{ __( 'Plugin Status', 'google-site-kit' ) }
</h3>
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-4-tablet
mdc-layout-grid__cell--span-4-phone
mdc-layout-grid__cell--align-middle
mdc-layout-grid__cell--align-right-tablet
">
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<div className="googlesitekit-settings-module__meta-items">
<p className="googlesitekit-settings-module__status">
{ __( 'Site Kit is connected', 'google-site-kit' ) }
<span className="
googlesitekit-settings-module__status-icon
googlesitekit-settings-module__status-icon--connected
">
<span className="screen-reader-text">
{ __( 'Connected', 'google-site-kit' ) }
</span>
</span>
</p>
</div>
</div>
</div>
</div>
<footer className="googlesitekit-settings-module__footer">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
mdc-layout-grid__cell--span-8-tablet
mdc-layout-grid__cell--span-4-phone
">
<Link
onClick={ this.handleDialog }
inherit
>
{ __( 'Reset Site Kit', 'google-site-kit' ) }
</Link>
</div>
</div>
</div>
</footer>
</div>
</Layout>
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<Layout
header
title={ __( 'Tracking', 'google-site-kit' ) }
className="googlesitekit-settings-meta"
fill
>
<div className="
googlesitekit-settings-module
googlesitekit-settings-module--active
">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<div className="googlesitekit-settings-module__meta-items">
<div className="
googlesitekit-settings-module__meta-item
googlesitekit-settings-module__meta-item--nomargin
">
<Optin
id="opt-in"
name="optin"
/>
</div>
</div>
</div>
</div>
</div>
</div>
</Layout>
</div>
<Dialog
dialogActive={ dialogActive }
handleConfirm={ this.handleUnlinkConfirm }
handleDialog={ this.handleDialog }
title={ __( 'Reset Site Kit', 'google-site-kit' ) }
subtitle={ __( 'Resetting this site will remove access to all services. After disconnecting, you will need to re-authorize your access to restore service.', 'google-site-kit' ) }
confirmButton={ __( 'Reset', 'google-site-kit' ) }
provides={ [] }
/>
</Fragment>
);
}
}
export default SettingsAdmin;
| 1 | 25,296 | Note that this name differs from the name used as default (and thus used now that you removed this). That should be fine, but wanted to flag it. | google-site-kit-wp | js |
@@ -388,12 +388,17 @@ abstract class AbstractCrudController extends AbstractController implements Crud
public function autocomplete(AdminContext $context): JsonResponse
{
- $queryBuilder = $this->createIndexQueryBuilder($context->getSearch(), $context->getEntity(), FieldCollection::new([]), FilterCollection::new());
+ $queryBuilder = $this->createAutocompleteQueryBuilder($context->getSearch(), $context->getEntity());
$paginator = $this->get(PaginatorFactory::class)->create($queryBuilder);
return JsonResponse::fromJsonString($paginator->getResultsAsJson());
}
+ public function createAutocompleteQueryBuilder(SearchDto $searchDto, EntityDto $entityDto): QueryBuilder
+ {
+ return $this->get(EntityRepository::class)->createQueryBuilder($searchDto, $entityDto, FieldCollection::new([]), FilterCollection::new());
+ }
+
public function createIndexQueryBuilder(SearchDto $searchDto, EntityDto $entityDto, FieldCollection $fields, FilterCollection $filters): QueryBuilder
{
return $this->get(EntityRepository::class)->createQueryBuilder($searchDto, $entityDto, $fields, $filters); | 1 | <?php
namespace EasyCorp\Bundle\EasyAdminBundle\Controller;
use Doctrine\DBAL\Exception\ForeignKeyConstraintViolationException;
use Doctrine\ORM\EntityManagerInterface;
use Doctrine\ORM\QueryBuilder;
use EasyCorp\Bundle\EasyAdminBundle\Collection\FieldCollection;
use EasyCorp\Bundle\EasyAdminBundle\Collection\FilterCollection;
use EasyCorp\Bundle\EasyAdminBundle\Config\Action;
use EasyCorp\Bundle\EasyAdminBundle\Config\Actions;
use EasyCorp\Bundle\EasyAdminBundle\Config\Assets;
use EasyCorp\Bundle\EasyAdminBundle\Config\Crud;
use EasyCorp\Bundle\EasyAdminBundle\Config\Filters;
use EasyCorp\Bundle\EasyAdminBundle\Config\KeyValueStore;
use EasyCorp\Bundle\EasyAdminBundle\Context\AdminContext;
use EasyCorp\Bundle\EasyAdminBundle\Contracts\Controller\CrudControllerInterface;
use EasyCorp\Bundle\EasyAdminBundle\Dto\EntityDto;
use EasyCorp\Bundle\EasyAdminBundle\Dto\SearchDto;
use EasyCorp\Bundle\EasyAdminBundle\Event\AfterCrudActionEvent;
use EasyCorp\Bundle\EasyAdminBundle\Event\AfterEntityDeletedEvent;
use EasyCorp\Bundle\EasyAdminBundle\Event\AfterEntityPersistedEvent;
use EasyCorp\Bundle\EasyAdminBundle\Event\AfterEntityUpdatedEvent;
use EasyCorp\Bundle\EasyAdminBundle\Event\BeforeCrudActionEvent;
use EasyCorp\Bundle\EasyAdminBundle\Event\BeforeEntityDeletedEvent;
use EasyCorp\Bundle\EasyAdminBundle\Event\BeforeEntityPersistedEvent;
use EasyCorp\Bundle\EasyAdminBundle\Event\BeforeEntityUpdatedEvent;
use EasyCorp\Bundle\EasyAdminBundle\Exception\EntityRemoveException;
use EasyCorp\Bundle\EasyAdminBundle\Exception\ForbiddenActionException;
use EasyCorp\Bundle\EasyAdminBundle\Exception\InsufficientEntityPermissionException;
use EasyCorp\Bundle\EasyAdminBundle\Factory\ActionFactory;
use EasyCorp\Bundle\EasyAdminBundle\Factory\EntityFactory;
use EasyCorp\Bundle\EasyAdminBundle\Factory\FilterFactory;
use EasyCorp\Bundle\EasyAdminBundle\Factory\FormFactory;
use EasyCorp\Bundle\EasyAdminBundle\Factory\PaginatorFactory;
use EasyCorp\Bundle\EasyAdminBundle\Form\Type\FiltersFormType;
use EasyCorp\Bundle\EasyAdminBundle\Orm\EntityRepository;
use EasyCorp\Bundle\EasyAdminBundle\Orm\EntityUpdater;
use EasyCorp\Bundle\EasyAdminBundle\Provider\AdminContextProvider;
use EasyCorp\Bundle\EasyAdminBundle\Provider\FieldProvider;
use EasyCorp\Bundle\EasyAdminBundle\Router\CrudUrlGenerator;
use EasyCorp\Bundle\EasyAdminBundle\Security\Permission;
use Symfony\Bundle\FrameworkBundle\Controller\AbstractController;
use Symfony\Component\EventDispatcher\EventDispatcherInterface;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\JsonResponse;
use Symfony\Component\HttpFoundation\Response;
/**
* @author Javier Eguiluz <[email protected]>
*/
abstract class AbstractCrudController extends AbstractController implements CrudControllerInterface
{
abstract public static function getEntityFqcn(): string;
public function configureCrud(Crud $crud): Crud
{
return $crud;
}
public function configureAssets(Assets $assets): Assets
{
return $assets;
}
public function configureActions(Actions $actions): Actions
{
return $actions;
}
public function configureFilters(Filters $filters): Filters
{
return $filters;
}
/**
* {@inheritdoc}
*/
public function configureFields(string $pageName): iterable
{
return $this->get(FieldProvider::class)->getDefaultFields($pageName);
}
public static function getSubscribedServices()
{
return array_merge(parent::getSubscribedServices(), [
'event_dispatcher' => '?'.EventDispatcherInterface::class,
ActionFactory::class => '?'.ActionFactory::class,
AdminContextProvider::class => '?'.AdminContextProvider::class,
CrudUrlGenerator::class => '?'.CrudUrlGenerator::class,
EntityFactory::class => '?'.EntityFactory::class,
EntityRepository::class => '?'.EntityRepository::class,
EntityUpdater::class => '?'.EntityUpdater::class,
FieldProvider::class => '?'.FieldProvider::class,
FilterFactory::class => '?'.FilterFactory::class,
FormFactory::class => '?'.FormFactory::class,
PaginatorFactory::class => '?'.PaginatorFactory::class,
]);
}
public function index(AdminContext $context)
{
$event = new BeforeCrudActionEvent($context);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
if (!$this->isGranted(Permission::EA_EXECUTE_ACTION)) {
throw new ForbiddenActionException($context);
}
$fields = FieldCollection::new($this->configureFields(Crud::PAGE_INDEX));
$filters = $this->get(FilterFactory::class)->create($context->getCrud()->getFiltersConfig(), $fields, $context->getEntity());
$queryBuilder = $this->createIndexQueryBuilder($context->getSearch(), $context->getEntity(), $fields, $filters);
$paginator = $this->get(PaginatorFactory::class)->create($queryBuilder);
$entities = $this->get(EntityFactory::class)->createCollection($context->getEntity(), $paginator->getResults());
$this->get(EntityFactory::class)->processFieldsForAll($entities, $fields);
$globalActions = $this->get(EntityFactory::class)->processActionsForAll($entities, $context->getCrud()->getActionsConfig());
$responseParameters = $this->configureResponseParameters(KeyValueStore::new([
'pageName' => Crud::PAGE_INDEX,
'templateName' => 'crud/index',
'entities' => $entities,
'paginator' => $paginator,
'global_actions' => $globalActions,
'filters' => $filters,
// 'batch_form' => $this->createBatchActionsForm(),
]));
$event = new AfterCrudActionEvent($context, $responseParameters);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
return $responseParameters;
}
public function detail(AdminContext $context)
{
$event = new BeforeCrudActionEvent($context);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
if (!$this->isGranted(Permission::EA_EXECUTE_ACTION)) {
throw new ForbiddenActionException($context);
}
if (!$context->getEntity()->isAccessible()) {
throw new InsufficientEntityPermissionException($context);
}
$this->get(EntityFactory::class)->processFields($context->getEntity(), FieldCollection::new($this->configureFields(Crud::PAGE_DETAIL)));
$this->get(EntityFactory::class)->processActions($context->getEntity(), $context->getCrud()->getActionsConfig());
$responseParameters = $this->configureResponseParameters(KeyValueStore::new([
'pageName' => Crud::PAGE_DETAIL,
'templateName' => 'crud/detail',
'entity' => $context->getEntity(),
]));
$event = new AfterCrudActionEvent($context, $responseParameters);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
return $responseParameters;
}
public function edit(AdminContext $context)
{
$event = new BeforeCrudActionEvent($context);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
if (!$this->isGranted(Permission::EA_EXECUTE_ACTION)) {
throw new ForbiddenActionException($context);
}
if (!$context->getEntity()->isAccessible()) {
throw new InsufficientEntityPermissionException($context);
}
$this->get(EntityFactory::class)->processFields($context->getEntity(), FieldCollection::new($this->configureFields(Crud::PAGE_EDIT)));
$this->get(EntityFactory::class)->processActions($context->getEntity(), $context->getCrud()->getActionsConfig());
$entityInstance = $context->getEntity()->getInstance();
if ($context->getRequest()->isXmlHttpRequest()) {
$fieldName = $context->getRequest()->query->get('fieldName');
$newValue = 'true' === mb_strtolower($context->getRequest()->query->get('newValue'));
$event = $this->ajaxEdit($context->getEntity(), $fieldName, $newValue);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
// cast to integer instead of string to avoid sending empty responses for 'false'
return new Response((int) $newValue);
}
$editForm = $this->createEditForm($context->getEntity(), $context->getCrud()->getEditFormOptions(), $context);
$editForm->handleRequest($context->getRequest());
if ($editForm->isSubmitted() && $editForm->isValid()) {
// TODO:
// $this->processUploadedFiles($editForm);
$event = new BeforeEntityUpdatedEvent($entityInstance);
$this->get('event_dispatcher')->dispatch($event);
$entityInstance = $event->getEntityInstance();
$this->updateEntity($this->get('doctrine')->getManagerForClass($context->getEntity()->getFqcn()), $entityInstance);
$this->get('event_dispatcher')->dispatch(new AfterEntityUpdatedEvent($entityInstance));
$submitButtonName = $context->getRequest()->request->get('ea')['newForm']['btn'];
if (Action::SAVE_AND_CONTINUE === $submitButtonName) {
$url = $this->get(CrudUrlGenerator::class)->build()
->setAction(Action::EDIT)
->setEntityId($context->getEntity()->getPrimaryKeyValue())
->generateUrl();
return $this->redirect($url);
}
if (Action::SAVE_AND_RETURN === $submitButtonName) {
$url = $context->getReferrer()
?? $this->get(CrudUrlGenerator::class)->build()->setAction(Action::INDEX)->generateUrl();
return $this->redirect($url);
}
return $this->redirectToRoute($context->getDashboardRouteName());
}
$responseParameters = $this->configureResponseParameters(KeyValueStore::new([
'pageName' => Crud::PAGE_EDIT,
'templateName' => 'crud/edit',
'edit_form' => $editForm,
'entity' => $context->getEntity(),
]));
$event = new AfterCrudActionEvent($context, $responseParameters);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
return $responseParameters;
}
public function new(AdminContext $context)
{
$event = new BeforeCrudActionEvent($context);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
if (!$this->isGranted(Permission::EA_EXECUTE_ACTION)) {
throw new ForbiddenActionException($context);
}
if (!$context->getEntity()->isAccessible()) {
throw new InsufficientEntityPermissionException($context);
}
$context->getEntity()->setInstance($this->createEntity($context->getEntity()->getFqcn()));
$this->get(EntityFactory::class)->processFields($context->getEntity(), FieldCollection::new($this->configureFields(Crud::PAGE_NEW)));
$this->get(EntityFactory::class)->processActions($context->getEntity(), $context->getCrud()->getActionsConfig());
$entityInstance = $context->getEntity()->getInstance();
$newForm = $this->createNewForm($context->getEntity(), $context->getCrud()->getNewFormOptions(), $context);
$newForm->handleRequest($context->getRequest());
if ($newForm->isSubmitted() && $newForm->isValid()) {
// TODO:
// $this->processUploadedFiles($newForm);
$event = new BeforeEntityPersistedEvent($entityInstance);
$this->get('event_dispatcher')->dispatch($event);
$entityInstance = $event->getEntityInstance();
$this->persistEntity($this->get('doctrine')->getManagerForClass($context->getEntity()->getFqcn()), $entityInstance);
$this->get('event_dispatcher')->dispatch(new AfterEntityPersistedEvent($entityInstance));
$context->getEntity()->setInstance($entityInstance);
$submitButtonName = $context->getRequest()->request->get('ea')['newForm']['btn'];
if (Action::SAVE_AND_CONTINUE === $submitButtonName) {
$url = $this->get(CrudUrlGenerator::class)->build()
->setAction(Action::EDIT)
->setEntityId($context->getEntity()->getPrimaryKeyValue())
->generateUrl();
return $this->redirect($url);
}
if (Action::SAVE_AND_RETURN === $submitButtonName) {
$url = $context->getReferrer()
?? $this->get(CrudUrlGenerator::class)->build()->setAction(Action::INDEX)->generateUrl();
return $this->redirect($url);
}
if (Action::SAVE_AND_ADD_ANOTHER === $submitButtonName) {
$url = $this->get(CrudUrlGenerator::class)->build()->setAction(Action::NEW)->generateUrl();
return $this->redirect($url);
}
return $this->redirectToRoute($context->getDashboardRouteName());
}
$responseParameters = $this->configureResponseParameters(KeyValueStore::new([
'pageName' => Crud::PAGE_NEW,
'templateName' => 'crud/new',
'entity' => $context->getEntity(),
'new_form' => $newForm,
]));
$event = new AfterCrudActionEvent($context, $responseParameters);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
return $responseParameters;
}
public function delete(AdminContext $context)
{
$event = new BeforeCrudActionEvent($context);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
if (!$this->isGranted(Permission::EA_EXECUTE_ACTION)) {
throw new ForbiddenActionException($context);
}
if (!$context->getEntity()->isAccessible()) {
throw new InsufficientEntityPermissionException($context);
}
$csrfToken = $context->getRequest()->request->get('token');
if (!$this->isCsrfTokenValid('ea-delete', $csrfToken)) {
return $this->redirectToRoute($context->getDashboardRouteName());
}
$entityInstance = $context->getEntity()->getInstance();
$event = new BeforeEntityDeletedEvent($entityInstance);
$this->get('event_dispatcher')->dispatch($event);
$entityInstance = $event->getEntityInstance();
try {
$this->deleteEntity($this->get('doctrine')->getManagerForClass($context->getEntity()->getFqcn()), $entityInstance);
} catch (ForeignKeyConstraintViolationException $e) {
throw new EntityRemoveException(['entity_name' => $context->getEntity()->getName(), 'message' => $e->getMessage()]);
}
$this->get('event_dispatcher')->dispatch(new AfterEntityDeletedEvent($entityInstance));
$responseParameters = $this->configureResponseParameters(KeyValueStore::new([
'entity' => $context->getEntity(),
]));
$event = new AfterCrudActionEvent($context, $responseParameters);
$this->get('event_dispatcher')->dispatch($event);
if ($event->isPropagationStopped()) {
return $event->getResponse();
}
if (null !== $referrer = $context->getReferrer()) {
return $this->redirect($referrer);
}
return $this->redirect($this->get(CrudUrlGenerator::class)->build()->setAction('index')->unset('entityId')->generateUrl());
}
public function autocomplete(AdminContext $context): JsonResponse
{
$queryBuilder = $this->createIndexQueryBuilder($context->getSearch(), $context->getEntity(), FieldCollection::new([]), FilterCollection::new());
$paginator = $this->get(PaginatorFactory::class)->create($queryBuilder);
return JsonResponse::fromJsonString($paginator->getResultsAsJson());
}
public function createIndexQueryBuilder(SearchDto $searchDto, EntityDto $entityDto, FieldCollection $fields, FilterCollection $filters): QueryBuilder
{
return $this->get(EntityRepository::class)->createQueryBuilder($searchDto, $entityDto, $fields, $filters);
}
public function renderFilters(AdminContext $context): KeyValueStore
{
$fields = FieldCollection::new($this->configureFields(Crud::PAGE_INDEX));
$this->get(EntityFactory::class)->processFields($context->getEntity(), $fields);
$filters = $this->get(FilterFactory::class)->create($context->getCrud()->getFiltersConfig(), $context->getEntity()->getFields(), $context->getEntity());
/** @var FiltersFormType $filtersForm */
$filtersForm = $this->get(FormFactory::class)->createFiltersForm($filters, $context->getRequest());
$formActionParts = parse_url($filtersForm->getConfig()->getAction());
$queryString = $formActionParts['query'] ?? [];
parse_str($queryString, $queryStringAsArray);
$responseParameters = KeyValueStore::new([
'templateName' => 'crud/filters',
'filters_form' => $filtersForm,
'form_action_query_string_as_array' => $queryStringAsArray,
]);
return $this->configureResponseParameters($responseParameters);
}
public function createEntity(string $entityFqcn)
{
return new $entityFqcn();
}
public function updateEntity(EntityManagerInterface $entityManager, $entityInstance): void
{
$entityManager->persist($entityInstance);
$entityManager->flush();
}
public function persistEntity(EntityManagerInterface $entityManager, $entityInstance): void
{
$entityManager->persist($entityInstance);
$entityManager->flush();
}
public function deleteEntity(EntityManagerInterface $entityManager, $entityInstance): void
{
$entityManager->remove($entityInstance);
$entityManager->flush();
}
public function createEditForm(EntityDto $entityDto, KeyValueStore $formOptions, AdminContext $context): FormInterface
{
return $this->createEditFormBuilder($entityDto, $formOptions, $context)->getForm();
}
public function createEditFormBuilder(EntityDto $entityDto, KeyValueStore $formOptions, AdminContext $context): FormBuilderInterface
{
return $this->get(FormFactory::class)->createEditFormBuilder($entityDto, $formOptions, $context);
}
public function createNewForm(EntityDto $entityDto, KeyValueStore $formOptions, AdminContext $context): FormInterface
{
return $this->createNewFormBuilder($entityDto, $formOptions, $context)->getForm();
}
public function createNewFormBuilder(EntityDto $entityDto, KeyValueStore $formOptions, AdminContext $context): FormBuilderInterface
{
return $this->get(FormFactory::class)->createNewFormBuilder($entityDto, $formOptions, $context);
}
/**
* Used to add/modify/remove parameters before passing them to the Twig template.
*/
public function configureResponseParameters(KeyValueStore $responseParameters): KeyValueStore
{
return $responseParameters;
}
private function getContext(): ?AdminContext
{
return $this->get(AdminContextProvider::class)->getContext();
}
private function ajaxEdit(EntityDto $entityDto, ?string $propertyName, bool $newValue): AfterCrudActionEvent
{
if (!$entityDto->hasProperty($propertyName)) {
throw new \RuntimeException(sprintf('The "%s" boolean field cannot be changed because it doesn\'t exist in the "%s" entity.', $propertyName, $entityDto->getName()));
}
$this->get(EntityUpdater::class)->updateProperty($entityDto, $propertyName, $newValue);
$event = new BeforeEntityUpdatedEvent($entityDto->getInstance());
$this->get('event_dispatcher')->dispatch($event);
$entityInstance = $event->getEntityInstance();
$this->updateEntity($this->get('doctrine')->getManagerForClass($entityDto->getFqcn()), $entityInstance);
$this->get('event_dispatcher')->dispatch(new AfterEntityUpdatedEvent($entityInstance));
$entityDto->setInstance($entityInstance);
$parameters = KeyValueStore::new([
'action' => Action::EDIT,
'entity' => $entityDto,
]);
$event = new AfterCrudActionEvent($this->getContext(), $parameters);
$this->get('event_dispatcher')->dispatch($event);
return $event;
}
}
| 1 | 12,811 | Why public and not protected? | EasyCorp-EasyAdminBundle | php |
@@ -51,7 +51,7 @@
locals: {current_org: current_org.id, published: published, scopes: scopes[:all], hide_actions: true}) %>
</div>
<% end %>
- <div id="organisation-templates" role="tabpanel" class="tab-pane<%= !current_user.can_super_admin? || current_tab == 'organisation-templates' ? ' active' : '' %>">
+ <div id="organisation-templates" role="tabpanel" class="tab-pane<%= (!current_user.can_super_admin? && !current_tab.present?) || current_tab == 'organisation-templates' ? ' active' : '' %>">
<h2><%= current_user.can_super_admin? ? _('%{org_name} Templates') % { org_name: current_user.org.name } : _('Own Templates') %></h2>
<%= paginable_renderise(
partial: 'paginable/templates/orgs', | 1 | <% # locals: funder_templates, org_templates, current_user, current_org, orgs, current_tab %>
<div class="row">
<div class="col-md-12">
<h1><%= _('Templates') %></h1>
</div>
<% if current_user.can_super_admin? %>
<div class="col-md-12">
<p><%= _('If you would like to modify one of the templates below, you must first change your organisation affiliation.') %></p>
</div>
<div class="col-md-6">
<%= form_for current_user, url: org_swap_user_path(current_user), html: {method: :put, id: 'super-admin-switch-org'} do |f| %>
<%= render partial: "shared/my_org", locals: {f: f, default_org: current_org, orgs: orgs, allow_other_orgs: false} %>
<%= f.submit _('Change affiliation'), class: 'btn btn-default' %>
<% end %>
</div>
<% end %>
<div class="col-md-12">
<p>
<%= _('If you wish to add an organisational template for a Data Management Plan, use the \'create template\' button. You can create more than one template if desired e.g. one for researchers and one for PhD students. Your template will be presented to users within your organisation when no funder templates apply. If you want to add questions to funder templates use the \'customise template\' options below.') %>
</p>
</div>
</div>
<div class="row">
<div class="col-md-12">
<ul class="nav nav-tabs" role="tablist">
<% if current_user.can_super_admin? %>
<li role="all-templates"<%= current_tab == 'all-templates' ? 'class=active' : '' %>>
<a href="#all-templates" role="tab" aria-controls="all-templates" data-toggle="tab"><%= _('All Templates') %></a>
</li>
<% end %>
<li role="organisation-templates"<%= current_tab == 'organisation-templates' ? 'class=active' : '' %>>
<a href="#organisation-templates" role="tab" aria-controls="organisation-templates" data-toggle="tab"><%= current_user.can_super_admin? ? _('%{org_name} Templates') % { org_name: current_user.org.name } : _('Own Templates') %></a>
</li>
<!-- If the Org is not just a funder then show the customizations table -->
<% if !current_org.funder_only? %>
<li role="funder-templates"<%= current_tab == 'funder-templates' ? 'class=active' : '' %>>
<a href="#funder-templates" role="tab" aria-controls="funder-templates" data-toggle="tab"><%= _('Customizable Templates') %></a>
</li>
<% end %>
</ul>
<div class="tab-content">
<% if current_user.can_super_admin? %>
<div id="all-templates" role="tabpanel" class="tab-pane<%= (current_tab == 'all-templates' || current_tab == '' ? ' active' : '') %>">
<h2><%= _('All Templates') %></h2>
<%= paginable_renderise(
partial: 'paginable/templates/all',
controller: 'paginable/templates',
action: 'all',
scope: all_templates,
locals: {current_org: current_org.id, published: published, scopes: scopes[:all], hide_actions: true}) %>
</div>
<% end %>
<div id="organisation-templates" role="tabpanel" class="tab-pane<%= !current_user.can_super_admin? || current_tab == 'organisation-templates' ? ' active' : '' %>">
<h2><%= current_user.can_super_admin? ? _('%{org_name} Templates') % { org_name: current_user.org.name } : _('Own Templates') %></h2>
<%= paginable_renderise(
partial: 'paginable/templates/orgs',
controller: 'paginable/templates',
action: 'orgs',
scope: own_templates,
locals: {current_org: current_org.id, published: published, scopes: scopes[:orgs], hide_actions: false}) %>
</div>
<!-- If the Org is not just a funder then show the customizations table -->
<% if !current_org.funder_only? %>
<div id="funder-templates" role="tabpanel" class="tab-pane<%= current_tab == 'funder-templates' ? ' active' : '' %>">
<h2><%= _('Customizable Templates') %></h2>
<%= paginable_renderise(
partial: 'paginable/templates/funders',
controller: 'paginable/templates',
action: 'funders',
scope: customizable_templates,
locals: {current_org: current_org.id, customizations: customized_templates, published: published, scopes: scopes[:funders]}) %>
</div>
<% end %>
</div>
<a href="<%= new_org_admin_template_path %>" class="btn btn-primary" role="button">
<%= _('Create a template') %>
</a>
</div>
</div> | 1 | 17,442 | We definitely need to address this duplication after MVP | DMPRoadmap-roadmap | rb |
@@ -113,6 +113,12 @@ export default function UserInputKeywords( { slug, max } ) {
return (
<Cell lgStart={ 6 } lgSize={ 6 } mdSize={ 8 } smSize={ 4 }>
<div ref={ keywordsContainer } className="googlesitekit-user-input__text-options">
+ <label
+ htmlFor={ `${ slug }-keyword-0` }
+ className="screen-reader-text"
+ >
+ { __( 'Enter minimum one (1), maximum three (3) terms', 'google-site-kit' ) }
+ </label>
{ values.map( ( value, i ) => (
<div
key={ i } | 1 | /**
* User Input Keywords.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
import classnames from 'classnames';
/**
* WordPress dependencies
*/
import { useCallback, useRef } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
import { ENTER } from '@wordpress/keycodes';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { CORE_USER } from '../../googlesitekit/datastore/user/constants';
import { Cell, Input, TextField } from '../../material-components';
import Button from '../Button';
import CloseIcon from '../../../svg/close.svg';
import { COMMA } from '../../util/key-codes';
const { useSelect, useDispatch } = Data;
export default function UserInputKeywords( { slug, max } ) {
const keywordsContainer = useRef();
const values = useSelect( ( select ) => select( CORE_USER ).getUserInputSetting( slug ) || [] );
const { setUserInputSetting } = useDispatch( CORE_USER );
// Add an empty string if the values array is empty.
if ( values.length === 0 ) {
values.push( '' );
}
// Need to make sure that dependencies list always has the same number of elements.
const dependencies = values.concat( Array( max ) ).slice( 0, max );
const updateKeywords = useCallback( ( keywords ) => {
const EOT = String.fromCharCode( 4 );
let newKeywords = keywords
// Trim keywords to allow no empty spaces at the beginning and at max one space at the end.
.map( ( keyword ) => keyword.replace( /(\S)\s+$/, '$1 ' ).replace( /^\s+\S/, '' ) )
// EOT is added to the end to properly combine two sequential empty spaces at the end.
.concat( [ '', EOT ] )
.join( EOT )
.replace( new RegExp( `${ EOT }{3,}`, 'g' ), EOT ); // Combine two sequential empty spaces into one.
if ( newKeywords === EOT ) {
newKeywords = [ '' ];
} else {
newKeywords = newKeywords.split( EOT ).slice( 0, max );
}
setUserInputSetting( slug, newKeywords );
}, [ slug ] );
const onKeywordChange = useCallback( ( index, { target } ) => {
if ( target.value[ target.value.length - 1 ] === ',' ) {
return;
}
updateKeywords( [
...values.slice( 0, index ),
target.value,
...values.slice( index + 1 ),
] );
}, dependencies );
const onKeyDown = useCallback( ( index, { keyCode } ) => {
const nonEmptyValues = values.filter( ( value ) => value.length > 0 );
if ( ( keyCode === ENTER || keyCode === COMMA ) && nonEmptyValues.length < max ) {
updateKeywords( [
...values.slice( 0, index + 1 ),
'',
...values.slice( index + 1 ),
] );
setTimeout( () => {
const input = keywordsContainer.current.querySelector( `#${ slug }-keyword-${ index + 1 }` );
if ( input ) {
input.focus();
}
}, 50 );
}
}, [ keywordsContainer.current, ...dependencies ] );
const onKeywordDelete = useCallback( ( index ) => {
updateKeywords( [
...values.slice( 0, index ),
...values.slice( index + 1 ),
] );
}, dependencies );
return (
<Cell lgStart={ 6 } lgSize={ 6 } mdSize={ 8 } smSize={ 4 }>
<div ref={ keywordsContainer } className="googlesitekit-user-input__text-options">
{ values.map( ( value, i ) => (
<div
key={ i }
className={ classnames( {
'googlesitekit-user-input__text-option': values.length > i + 1 || value.length > 0,
} ) }
>
<TextField
label={ i + 1 === values.length ? __( 'Enter minimum one (1), maximum three (3) terms', 'google-site-kit' ) : '' }
noLabel
>
<Input
id={ `${ slug }-keyword-${ i }` }
value={ value }
size={ value.length > 0 ? value.length : undefined }
onChange={ onKeywordChange.bind( null, i ) }
onKeyDown={ onKeyDown.bind( null, i ) }
/>
</TextField>
{ ( value.length > 0 || i + 1 < values.length ) && (
<Button
text
icon={ <CloseIcon width="14" height="14" /> }
onClick={ onKeywordDelete.bind( null, i ) }
/>
) }
</div>
) ) }
</div>
<p className="googlesitekit-user-input__note">
{ __( 'Separate with commas or the Enter key', 'google-site-kit' ) }
</p>
</Cell>
);
}
UserInputKeywords.propTypes = {
slug: PropTypes.string.isRequired,
max: PropTypes.number,
};
UserInputKeywords.defaultProps = {
max: 1,
};
| 1 | 36,743 | This will always generate a label for the first keyword, which means this will be a label linked to no element after the first keyword is entered. This label should be generated inside the `values.map` next to the `<TextField><Input /></TextField>` it is for, because the ID changes based on the index. Also, we have a `VisuallyHidden` component I'd prefer we use over the CSS class. Building as many things out of components is more React-y. Admittedly it's sort of a preference, but it's why we have the `VisuallyHidden` component. So let's use that instead | google-site-kit-wp | js |
@@ -323,6 +323,14 @@ struct roots_desktop *desktop_create(struct roots_server *server,
desktop->server = server;
desktop->config = config;
+
+ desktop->xcursor_theme = roots_xcursor_theme_create("default");
+ if (desktop->xcursor_theme == NULL) {
+ wlr_list_free(desktop->views);
+ free(desktop);
+ return NULL;
+ }
+
desktop->layout = wlr_output_layout_create();
desktop->compositor = wlr_compositor_create(server->wl_display,
server->renderer); | 1 | #define _POSIX_C_SOURCE 199309L
#include <assert.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include <wlr/types/wlr_box.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_cursor.h>
#include <wlr/types/wlr_gamma_control.h>
#include <wlr/types/wlr_server_decoration.h>
#include <wlr/types/wlr_output_layout.h>
#include <wlr/types/wlr_wl_shell.h>
#include <wlr/types/wlr_xdg_shell_v6.h>
#include <wlr/util/log.h>
#include <server-decoration-protocol.h>
#include "rootston/server.h"
#include "rootston/seat.h"
// TODO replace me with a signal
void view_destroy(struct roots_view *view) {
struct roots_desktop *desktop = view->desktop;
struct roots_input *input = desktop->server->input;
struct roots_seat *seat;
wl_list_for_each(seat, &input->seats, link) {
if (seat->focus == view) {
seat->focus = NULL;
seat->cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
}
for (size_t i = 0; i < desktop->views->length; ++i) {
struct roots_view *_view = desktop->views->items[i];
if (view == _view) {
wlr_list_del(desktop->views, i);
break;
}
}
free(view);
}
void view_get_box(const struct roots_view *view, struct wlr_box *box) {
box->x = view->x;
box->y = view->y;
if (view->get_size) {
view->get_size(view, box);
} else {
box->width = view->wlr_surface->current->width;
box->height = view->wlr_surface->current->height;
}
}
static void view_update_output(const struct roots_view *view,
const struct wlr_box *before) {
struct roots_desktop *desktop = view->desktop;
struct roots_output *output;
struct wlr_box box;
view_get_box(view, &box);
wl_list_for_each(output, &desktop->outputs, link) {
bool intersected = before->x != -1 && wlr_output_layout_intersects(
desktop->layout, output->wlr_output,
before->x, before->y, before->x + before->width,
before->y + before->height);
bool intersects = wlr_output_layout_intersects(
desktop->layout, output->wlr_output,
view->x, view->y, view->x + box.width, view->y + box.height);
if (intersected && !intersects) {
wlr_surface_send_leave(view->wlr_surface, output->wlr_output);
}
if (!intersected && intersects) {
wlr_surface_send_enter(view->wlr_surface, output->wlr_output);
}
}
}
void view_move(struct roots_view *view, double x, double y) {
struct wlr_box before;
view_get_box(view, &before);
if (view->move) {
view->move(view, x, y);
} else {
view->x = x;
view->y = y;
}
}
void view_activate(struct roots_view *view, bool activate) {
if (view->activate) {
view->activate(view, activate);
}
}
void view_resize(struct roots_view *view, uint32_t width, uint32_t height) {
struct wlr_box before;
view_get_box(view, &before);
if (view->resize) {
view->resize(view, width, height);
}
view_update_output(view, &before);
}
void view_move_resize(struct roots_view *view, double x, double y,
uint32_t width, uint32_t height) {
if (view->move_resize) {
view->move_resize(view, x, y, width, height);
return;
}
view_move(view, x, y);
view_resize(view, width, height);
}
void view_maximize(struct roots_view *view, bool maximized) {
if (view->maximized == maximized) {
return;
}
if (view->maximize) {
view->maximize(view, maximized);
}
if (!view->maximized && maximized) {
struct wlr_box view_box;
view_get_box(view, &view_box);
view->maximized = true;
view->saved.x = view->x;
view->saved.y = view->y;
view->saved.rotation = view->rotation;
view->saved.width = view_box.width;
view->saved.height = view_box.height;
double output_x, output_y;
wlr_output_layout_closest_point(view->desktop->layout, NULL,
view->x + (double)view_box.width/2,
view->y + (double)view_box.height/2,
&output_x, &output_y);
struct wlr_output *output = wlr_output_layout_output_at(
view->desktop->layout, output_x, output_y);
struct wlr_box *output_box =
wlr_output_layout_get_box(view->desktop->layout, output);
view_move_resize(view, output_box->x, output_box->y, output_box->width,
output_box->height);
view->rotation = 0;
}
if (view->maximized && !maximized) {
view->maximized = false;
view_move_resize(view, view->saved.x, view->saved.y, view->saved.width,
view->saved.height);
view->rotation = view->saved.rotation;
}
}
void view_close(struct roots_view *view) {
if (view->close) {
view->close(view);
}
}
bool view_center(struct roots_view *view) {
struct wlr_box box;
view_get_box(view, &box);
struct roots_desktop *desktop = view->desktop;
struct wlr_output *output =
wlr_output_layout_get_center_output(desktop->layout);
if (!output) {
// empty layout
return false;
}
const struct wlr_output_layout_output *l_output =
wlr_output_layout_get(desktop->layout, output);
int width, height;
wlr_output_effective_resolution(output, &width, &height);
double view_x = (double)(width - box.width) / 2 + l_output->x;
double view_y = (double)(height - box.height) / 2 + l_output->y;
view_move(view, view_x, view_y);
return true;
}
void view_setup(struct roots_view *view) {
struct roots_input *input = view->desktop->server->input;
// TODO what seat gets focus? the one with the last input event?
struct roots_seat *seat;
wl_list_for_each(seat, &input->seats, link) {
roots_seat_focus_view(seat, view);
}
view_center(view);
struct wlr_box before;
view_get_box(view, &before);
view_update_output(view, &before);
}
void view_teardown(struct roots_view *view) {
// TODO replace me with a signal
/*
struct wlr_list *views = view->desktop->views;
if (views->length < 2 || views->items[views->length-1] != view) {
return;
}
struct roots_view *prev_view = views->items[views->length-2];
struct roots_input *input = prev_view->desktop->server->input;
set_view_focus(input, prev_view->desktop, prev_view);
*/
}
struct roots_view *view_at(struct roots_desktop *desktop, double lx, double ly,
struct wlr_surface **surface, double *sx, double *sy) {
for (int i = desktop->views->length - 1; i >= 0; --i) {
struct roots_view *view = desktop->views->items[i];
if (view->type == ROOTS_WL_SHELL_VIEW &&
view->wl_shell_surface->state ==
WLR_WL_SHELL_SURFACE_STATE_POPUP) {
continue;
}
double view_sx = lx - view->x;
double view_sy = ly - view->y;
struct wlr_box box = {
.x = 0,
.y = 0,
.width = view->wlr_surface->current->buffer_width,
.height = view->wlr_surface->current->buffer_height,
};
if (view->rotation != 0.0) {
// Coordinates relative to the center of the view
double ox = view_sx - (double)box.width/2,
oy = view_sy - (double)box.height/2;
// Rotated coordinates
double rx = cos(view->rotation)*ox - sin(view->rotation)*oy,
ry = cos(view->rotation)*oy + sin(view->rotation)*ox;
view_sx = rx + (double)box.width/2;
view_sy = ry + (double)box.height/2;
}
if (view->type == ROOTS_XDG_SHELL_V6_VIEW) {
// TODO: test if this works with rotated views
double popup_sx, popup_sy;
struct wlr_xdg_surface_v6 *popup =
wlr_xdg_surface_v6_popup_at(view->xdg_surface_v6,
view_sx, view_sy, &popup_sx, &popup_sy);
if (popup) {
*sx = view_sx - popup_sx;
*sy = view_sy - popup_sy;
*surface = popup->surface;
return view;
}
}
if (view->type == ROOTS_WL_SHELL_VIEW) {
// TODO: test if this works with rotated views
double popup_sx, popup_sy;
struct wlr_wl_shell_surface *popup =
wlr_wl_shell_surface_popup_at(view->wl_shell_surface,
view_sx, view_sy, &popup_sx, &popup_sy);
if (popup) {
*sx = view_sx - popup_sx;
*sy = view_sy - popup_sy;
*surface = popup->surface;
return view;
}
}
double sub_x, sub_y;
struct wlr_subsurface *subsurface =
wlr_surface_subsurface_at(view->wlr_surface,
view_sx, view_sy, &sub_x, &sub_y);
if (subsurface) {
*sx = view_sx - sub_x;
*sy = view_sy - sub_y;
*surface = subsurface->surface;
return view;
}
if (wlr_box_contains_point(&box, view_sx, view_sy) &&
pixman_region32_contains_point(
&view->wlr_surface->current->input,
view_sx, view_sy, NULL)) {
*sx = view_sx;
*sy = view_sy;
*surface = view->wlr_surface;
return view;
}
}
return NULL;
}
struct roots_desktop *desktop_create(struct roots_server *server,
struct roots_config *config) {
wlr_log(L_DEBUG, "Initializing roots desktop");
struct roots_desktop *desktop = calloc(1, sizeof(struct roots_desktop));
if (desktop == NULL) {
return NULL;
}
desktop->views = wlr_list_create();
if (desktop->views == NULL) {
free(desktop);
return NULL;
}
wl_list_init(&desktop->outputs);
desktop->output_add.notify = output_add_notify;
wl_signal_add(&server->backend->events.output_add, &desktop->output_add);
desktop->output_remove.notify = output_remove_notify;
wl_signal_add(&server->backend->events.output_remove,
&desktop->output_remove);
desktop->server = server;
desktop->config = config;
desktop->layout = wlr_output_layout_create();
desktop->compositor = wlr_compositor_create(server->wl_display,
server->renderer);
desktop->xdg_shell_v6 = wlr_xdg_shell_v6_create(server->wl_display);
wl_signal_add(&desktop->xdg_shell_v6->events.new_surface,
&desktop->xdg_shell_v6_surface);
desktop->xdg_shell_v6_surface.notify = handle_xdg_shell_v6_surface;
desktop->wl_shell = wlr_wl_shell_create(server->wl_display);
wl_signal_add(&desktop->wl_shell->events.new_surface,
&desktop->wl_shell_surface);
desktop->wl_shell_surface.notify = handle_wl_shell_surface;
#ifdef HAS_XWAYLAND
if (config->xwayland) {
desktop->xwayland = wlr_xwayland_create(server->wl_display,
desktop->compositor);
wl_signal_add(&desktop->xwayland->events.new_surface,
&desktop->xwayland_surface);
desktop->xwayland_surface.notify = handle_xwayland_surface;
}
#endif
desktop->gamma_control_manager = wlr_gamma_control_manager_create(
server->wl_display);
desktop->screenshooter = wlr_screenshooter_create(server->wl_display,
server->renderer);
desktop->server_decoration_manager =
wlr_server_decoration_manager_create(server->wl_display);
wlr_server_decoration_manager_set_default_mode(
desktop->server_decoration_manager,
ORG_KDE_KWIN_SERVER_DECORATION_MANAGER_MODE_CLIENT);
return desktop;
}
void desktop_destroy(struct roots_desktop *desktop) {
// TODO
}
| 1 | 9,047 | Does it make sense for this to be fatal? Could there be an embedded application that does not have xcursor themes at all? For instance, compositors without a pointer (like a touch-screen kiosk) will never show a cursor. I think I'm ok with this assumption since it simplifies the code and rootston is assumed to have desktop features, but we should always make sure xcursor is not required to be used in the library. | swaywm-wlroots | c |
@@ -0,0 +1,15 @@
+using Nethermind.Core;
+
+namespace Nethermind.Blockchain.Filters.Topics
+{
+ public abstract class TopicsFilterBase
+ {
+ public abstract bool Accepts(LogEntry entry);
+
+ public abstract bool Accepts(ref LogEntryStructRef entry);
+
+ public abstract bool Matches(Bloom bloom);
+
+ public abstract bool Matches(ref BloomStructRef bloom);
+ }
+} | 1 | 1 | 24,624 | would rename to TopicsFilter and original one to SequenceTopicsFilter | NethermindEth-nethermind | .cs |
|
@@ -240,7 +240,7 @@ class MPLPlot(DimensionedPlot):
def update(self, key):
- if len(self) == 1 and key == 0 and not self.drawn:
+ if len(self) == 1 and ((key == 0) or (key == self.keys[0])) and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
| 1 | from __future__ import division
from itertools import chain
from contextlib import contextmanager
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D # noqa (For 3D plots)
from matplotlib import pyplot as plt
from matplotlib import gridspec, animation
import param
from ...core import (OrderedDict, HoloMap, AdjointLayout, NdLayout,
GridSpace, Element, CompositeOverlay, Empty,
Collator, GridMatrix, Layout)
from ...core.options import Store, SkipRendering
from ...core.util import int_to_roman, int_to_alpha, basestring
from ..plot import (DimensionedPlot, GenericLayoutPlot, GenericCompositePlot,
GenericElementPlot)
from ..util import attach_streams
from .util import compute_ratios, fix_aspect
@contextmanager
def _rc_context(rcparams):
"""
Context manager that temporarily overrides the pyplot rcParams.
"""
old_rcparams = mpl.rcParams.copy()
mpl.rcParams.update(rcparams)
try:
yield
finally:
mpl.rcParams.clear()
mpl.rcParams.update(old_rcparams)
def mpl_rc_context(f):
"""
Decorator for MPLPlot methods applying the matplotlib rc params
in the plots fig_rcparams while when method is called.
"""
def wrapper(self, *args, **kwargs):
with _rc_context(self.fig_rcparams):
return f(self, *args, **kwargs)
return wrapper
class MPLPlot(DimensionedPlot):
"""
An MPLPlot object draws a matplotlib figure object when called or
indexed but can also return a matplotlib animation object as
appropriate. MPLPlots take element objects such as Image, Contours
or Points as inputs and plots them in the appropriate format using
matplotlib. As HoloMaps are supported, all plots support animation
via the anim() method.
"""
backend = 'matplotlib'
sideplots = {}
fig_alpha = param.Number(default=1.0, bounds=(0, 1), doc="""
Alpha of the overall figure background.""")
fig_bounds = param.NumericTuple(default=(0.15, 0.15, 0.85, 0.85),
doc="""
The bounds of the overall figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
fig_inches = param.Parameter(default=4, doc="""
The overall matplotlib figure size in inches. May be set as
an integer in which case it will be used to autocompute a
size. Alternatively may be set with an explicit tuple or list,
in which case it will be applied directly after being scaled
by fig_size. If either the width or height is set to None,
it will be computed automatically.""")
fig_latex = param.Boolean(default=False, doc="""
Whether to use LaTeX text in the overall figure.""")
fig_rcparams = param.Dict(default={}, doc="""
matplotlib rc parameters to apply to the overall figure.""")
fig_size = param.Number(default=100., bounds=(1, None), doc="""
Size relative to the supplied overall fig_inches in percent.""")
initial_hooks = param.HookList(default=[], doc="""
Optional list of hooks called before plotting the data onto
the axis. The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
final_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
sublabel_format = param.String(default=None, allow_None=True, doc="""
Allows labeling the subaxes in each plot with various formatters
including {Alpha}, {alpha}, {numeric} and {roman}.""")
sublabel_position = param.NumericTuple(default=(-0.35, 0.85), doc="""
Position relative to the plot for placing the optional subfigure label.""")
sublabel_size = param.Number(default=18, doc="""
Size of optional subfigure label.""")
projection = param.Parameter(default=None, doc="""
The projection of the plot axis, default of None is equivalent to
2D plot, '3d' and 'polar' are also supported by matplotlib by default.
May also supply a custom projection that is either a matplotlib
projection type or implements the `_as_mpl_axes` method.""")
show_frame = param.Boolean(default=False, doc="""
Whether or not to show a complete frame around the plot.""")
_close_figures = True
def __init__(self, fig=None, axis=None, **params):
self._create_fig = True
super(MPLPlot, self).__init__(**params)
# List of handles to matplotlib objects for animation update
self.fig_scale = self.fig_size/100.
if isinstance(self.fig_inches, (tuple, list)):
self.fig_inches = [None if i is None else i*self.fig_scale
for i in self.fig_inches]
else:
self.fig_inches *= self.fig_scale
if self.fig_latex:
self.fig_rcparams['text.usetex'] = True
if self.renderer.interactive:
plt.ion()
self._close_figures = False
elif not self.renderer.notebook_context:
plt.ioff()
fig, axis = self._init_axis(fig, axis)
self.handles['fig'] = fig
self.handles['axis'] = axis
if self.final_hooks and self.finalize_hooks:
self.warning('Set either final_hooks or deprecated '
'finalize_hooks, not both.')
self.finalize_hooks = self.final_hooks
self.handles['bbox_extra_artists'] = []
@mpl_rc_context
def _init_axis(self, fig, axis):
"""
Return an axis which may need to be initialized from
a new figure.
"""
if not fig and self._create_fig:
fig = plt.figure()
l, b, r, t = self.fig_bounds
inches = self.fig_inches
fig.subplots_adjust(left=l, bottom=b, right=r, top=t)
fig.patch.set_alpha(self.fig_alpha)
if isinstance(inches, (tuple, list)):
inches = list(inches)
if inches[0] is None:
inches[0] = inches[1]
elif inches[1] is None:
inches[1] = inches[0]
fig.set_size_inches(list(inches))
else:
fig.set_size_inches([inches, inches])
axis = fig.add_subplot(111, projection=self.projection)
axis.set_aspect('auto')
return fig, axis
def _subplot_label(self, axis):
layout_num = self.layout_num if self.subplot else 1
if self.sublabel_format and not self.adjoined and layout_num > 0:
from matplotlib.offsetbox import AnchoredText
labels = {}
if '{Alpha}' in self.sublabel_format:
labels['Alpha'] = int_to_alpha(layout_num-1)
elif '{alpha}' in self.sublabel_format:
labels['alpha'] = int_to_alpha(layout_num-1, upper=False)
elif '{numeric}' in self.sublabel_format:
labels['numeric'] = self.layout_num
elif '{Roman}' in self.sublabel_format:
labels['Roman'] = int_to_roman(layout_num)
elif '{roman}' in self.sublabel_format:
labels['roman'] = int_to_roman(layout_num).lower()
at = AnchoredText(self.sublabel_format.format(**labels), loc=3,
bbox_to_anchor=self.sublabel_position, frameon=False,
prop=dict(size=self.sublabel_size, weight='bold'),
bbox_transform=axis.transAxes)
at.patch.set_visible(False)
axis.add_artist(at)
sublabel = at.txt.get_children()[0]
self.handles['sublabel'] = sublabel
self.handles['bbox_extra_artists'] += [sublabel]
def _finalize_axis(self, key):
"""
General method to finalize the axis and plot.
"""
if 'title' in self.handles:
self.handles['title'].set_visible(self.show_title)
self.drawn = True
if self.subplot:
return self.handles['axis']
else:
fig = self.handles['fig']
if not getattr(self, 'overlaid', False) and self._close_figures:
plt.close(fig)
return fig
@property
def state(self):
return self.handles['fig']
def anim(self, start=0, stop=None, fps=30):
"""
Method to return a matplotlib animation. The start and stop
frames may be specified as well as the fps.
"""
figure = self.state or self.initialize_plot()
anim = animation.FuncAnimation(figure, self.update_frame,
frames=self.keys,
interval = 1000.0/fps)
# Close the figure handle
if self._close_figures: plt.close(figure)
return anim
def update(self, key):
if len(self) == 1 and key == 0 and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
class CompositePlot(GenericCompositePlot, MPLPlot):
"""
CompositePlot provides a baseclass for plots coordinate multiple
subplots to form a Layout.
"""
@mpl_rc_context
def update_frame(self, key, ranges=None):
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.update_frame(key, ranges=ranges)
title = self._format_title(key) if self.show_title else ''
if 'title' in self.handles:
self.handles['title'].set_text(title)
else:
title = self.handles['axis'].set_title(title, **self._fontsize('title'))
self.handles['title'] = title
class GridPlot(CompositePlot):
"""
Plot a group of elements in a grid layout based on a GridSpace element
object.
"""
aspect = param.Parameter(default='equal', doc="""
Aspect ratios on GridPlot should be automatically determined.""")
padding = param.Number(default=0.1, doc="""
The amount of padding as a fraction of the total Grid size""")
shared_xaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
shared_yaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
show_legend = param.Boolean(default=False, doc="""
Legends add to much clutter in a grid and are disabled by default.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['bottom', 'top', None], doc="""
Whether and where to display the xaxis, supported options are
'bottom', 'top' and None.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', None], doc="""
Whether and where to display the yaxis, supported options are
'left', 'right' and None.""")
xrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the yticks.""")
def __init__(self, layout, axis=None, create_axes=True, ranges=None,
layout_num=1, keys=None, **params):
if not isinstance(layout, GridSpace):
raise Exception("GridPlot only accepts GridSpace.")
super(GridPlot, self).__init__(layout, layout_num=layout_num,
ranges=ranges, keys=keys, **params)
# Compute ranges layoutwise
grid_kwargs = {}
if axis is not None:
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
grid_kwargs = {'left': l, 'right': l+w, 'bottom': b, 'top': b+h}
self.position = (l, b, w, h)
self.cols, self.rows = layout.shape
self.fig_inches = self._get_size()
self._layoutspec = gridspec.GridSpec(self.rows, self.cols, **grid_kwargs)
with mpl.rc_context(rc=self.fig_rcparams):
self.subplots, self.subaxes, self.layout = self._create_subplots(layout, axis,
ranges, create_axes)
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.traverse(lambda x: attach_streams(self, x.hmap, 2),
[GenericElementPlot])
def _get_size(self):
max_dim = max(self.layout.shape)
# Reduce plot size as GridSpace gets larger
shape_factor = 1. / max_dim
# Expand small grids to a sensible viewing size
expand_factor = 1 + (max_dim - 1) * 0.1
scale_factor = expand_factor * shape_factor
cols, rows = self.layout.shape
if isinstance(self.fig_inches, (tuple, list)):
fig_inches = list(self.fig_inches)
if fig_inches[0] is None:
fig_inches[0] = fig_inches[1] * (cols/rows)
if fig_inches[1] is None:
fig_inches[1] = fig_inches[0] * (rows/cols)
return fig_inches
else:
fig_inches = (self.fig_inches,)*2
return (scale_factor * cols * fig_inches[0],
scale_factor * rows * fig_inches[1])
def _create_subplots(self, layout, axis, ranges, create_axes):
norm_opts = self._traverse_options(layout, 'norm', ['axiswise'], [Element])
axiswise = all(norm_opts['axiswise'])
if not ranges:
self.handles['fig'].set_size_inches(self.fig_inches)
subplots, subaxes = OrderedDict(), OrderedDict()
frame_ranges = self.compute_ranges(layout, None, ranges)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
r, c = (0, 0)
for coord in layout.keys(full_grid=True):
if not isinstance(coord, tuple): coord = (coord,)
view = layout.data.get(coord, None)
# Create subplot
if type(view) in (Layout, NdLayout):
raise SkipRendering("Cannot plot nested Layouts.")
if view is not None:
vtype = view.type if isinstance(view, HoloMap) else view.__class__
opts = self.lookup_options(view, 'plot').options
else:
vtype = None
# Create axes
kwargs = {}
if create_axes:
projection = self._get_projection(view) if vtype else None
subax = plt.subplot(self._layoutspec[r, c], projection=projection)
if not axiswise and self.shared_xaxis and self.xaxis is not None:
self.xaxis = 'top'
if not axiswise and self.shared_yaxis and self.yaxis is not None:
self.yaxis = 'right'
# Disable subplot axes depending on shared axis options
# and the position in the grid
if (self.shared_xaxis or self.shared_yaxis) and not axiswise:
if c == 0 and r != 0:
subax.xaxis.set_ticks_position('none')
kwargs['xaxis'] = 'bottom-bare'
if c != 0 and r == 0 and not layout.ndims == 1:
subax.yaxis.set_ticks_position('none')
kwargs['yaxis'] = 'left-bare'
if r != 0 and c != 0:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
if not self.shared_xaxis:
kwargs['xaxis'] = 'bottom-bare'
if not self.shared_yaxis:
kwargs['yaxis'] = 'left-bare'
else:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
subaxes[(r, c)] = subax
else:
subax = None
if vtype and issubclass(vtype, CompositeOverlay) and (c == self.cols - 1 and
r == self.rows//2):
kwargs['show_legend'] = self.show_legend
kwargs['legend_position'] = 'right'
if (not isinstance(self.layout, GridMatrix) and not
((c == self.cols//2 and r == 0) or
(c == 0 and r == self.rows//2))):
kwargs['labelled'] = []
# Create subplot
if view is not None:
params = dict(fig=self.handles['fig'], axis=subax,
dimensions=self.dimensions, show_title=False,
subplot=not create_axes, ranges=frame_ranges,
uniform=self.uniform, keys=self.keys,
show_legend=False, renderer=self.renderer)
plotting_class = Store.registry['matplotlib'][vtype]
subplot = plotting_class(view, **dict(opts, **dict(params, **kwargs)))
collapsed_layout[coord] = subplot.layout if isinstance(subplot, CompositePlot) else subplot.hmap
subplots[(r, c)] = subplot
elif subax is not None:
subax.set_visible(False)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if create_axes:
self.handles['axis'] = self._layout_axis(layout, axis)
self._adjust_subplots(self.handles['axis'], subaxes)
return subplots, subaxes, collapsed_layout
@mpl_rc_context
def initialize_plot(self, ranges=None):
# Get the extent of the layout elements (not the whole layout)
key = self.keys[-1]
axis = self.handles['axis']
subplot_kwargs = dict()
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges, **subplot_kwargs)
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
self._readjust_axes(axis)
self.drawn = True
if self.subplot: return self.handles['axis']
if self._close_figures: plt.close(self.handles['fig'])
return self.handles['fig']
def _readjust_axes(self, axis):
if self.subplot:
axis.set_position(self.position)
if self.aspect == 'equal':
axis.set_aspect(float(self.rows)/self.cols)
self.handles['fig'].canvas.draw()
self._adjust_subplots(self.handles['axis'], self.subaxes)
def _layout_axis(self, layout, axis):
fig = self.handles['fig']
axkwargs = {'gid': str(self.position)} if axis else {}
layout_axis = fig.add_subplot(1,1,1, **axkwargs)
if axis:
axis.set_visible(False)
layout_axis.set_position(self.position)
layout_axis.patch.set_visible(False)
for ax, ax_obj in zip(['x', 'y'], [layout_axis.xaxis, layout_axis.yaxis]):
tick_fontsize = self._fontsize('%sticks' % ax,'labelsize', common=False)
if tick_fontsize: ax_obj.set_tick_params(**tick_fontsize)
# Set labels
layout_axis.set_xlabel(layout.kdims[0].pprint_label,
**self._fontsize('xlabel'))
if layout.ndims == 2:
layout_axis.set_ylabel(layout.kdims[1].pprint_label,
**self._fontsize('ylabel'))
# Compute and set x- and y-ticks
dims = layout.kdims
keys = layout.keys()
if layout.ndims == 1:
dim1_keys = keys
dim2_keys = [0]
layout_axis.get_yaxis().set_visible(False)
else:
dim1_keys, dim2_keys = zip(*keys)
layout_axis.set_ylabel(dims[1].pprint_label)
layout_axis.set_aspect(float(self.rows)/self.cols)
# Process ticks
plot_width = (1.0 - self.padding) / self.cols
border_width = self.padding / (self.cols-1) if self.cols > 1 else 0
xticks = [(plot_width/2)+(r*(plot_width+border_width)) for r in range(self.cols)]
plot_height = (1.0 - self.padding) / self.rows
border_height = self.padding / (self.rows-1) if layout.ndims > 1 else 0
yticks = [(plot_height/2)+(r*(plot_height+border_height)) for r in range(self.rows)]
layout_axis.set_xticks(xticks)
layout_axis.set_xticklabels([dims[0].pprint_value(l)
for l in sorted(set(dim1_keys))])
for tick in layout_axis.get_xticklabels():
tick.set_rotation(self.xrotation)
ydim = dims[1] if layout.ndims > 1 else None
layout_axis.set_yticks(yticks)
layout_axis.set_yticklabels([ydim.pprint_value(l) if ydim else ''
for l in sorted(set(dim2_keys))])
for tick in layout_axis.get_yticklabels():
tick.set_rotation(self.yrotation)
if not self.show_frame:
layout_axis.spines['right' if self.yaxis == 'left' else 'left'].set_visible(False)
layout_axis.spines['bottom' if self.xaxis == 'top' else 'top'].set_visible(False)
axis = layout_axis
if self.xaxis is not None:
axis.xaxis.set_ticks_position(self.xaxis)
axis.xaxis.set_label_position(self.xaxis)
else:
axis.xaxis.set_visible(False)
if self.yaxis is not None:
axis.yaxis.set_ticks_position(self.yaxis)
axis.yaxis.set_label_position(self.yaxis)
else:
axis.yaxis.set_visible(False)
for pos in ['left', 'right', 'top', 'bottom']:
axis.spines[pos].set_visible(False)
return layout_axis
def _adjust_subplots(self, axis, subaxes):
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
if self.padding:
width_padding = w/(1./self.padding)
height_padding = h/(1./self.padding)
else:
width_padding, height_padding = 0, 0
if self.cols == 1:
b_w = 0
else:
b_w = width_padding / (self.cols - 1)
if self.rows == 1:
b_h = 0
else:
b_h = height_padding / (self.rows - 1)
ax_w = (w - (width_padding if self.cols > 1 else 0)) / self.cols
ax_h = (h - (height_padding if self.rows > 1 else 0)) / self.rows
r, c = (0, 0)
for ax in subaxes.values():
xpos = l + (c*ax_w) + (c * b_w)
ypos = b + (r*ax_h) + (r * b_h)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if not ax is None:
ax.set_position([xpos, ypos, ax_w, ax_h])
class AdjointLayoutPlot(MPLPlot):
"""
LayoutPlot allows placing up to three Views in a number of
predefined and fixed layouts, which are defined by the layout_dict
class attribute. This allows placing subviews next to a main plot
in either a 'top' or 'right' position.
Initially, a LayoutPlot computes an appropriate layout based for
the number of Views in the AdjointLayout object it has been given, but
when embedded in a NdLayout, it can recompute the layout to
match the number of rows and columns as part of a larger grid.
"""
layout_dict = {'Single': ['main'],
'Dual': ['main', 'right'],
'Triple': ['top', None, 'main', 'right'],
'Embedded Dual': [None, 'main']}
def __init__(self, layout, layout_type, subaxes, subplots, **params):
# The AdjointLayout ViewableElement object
self.layout = layout
# Type may be set to 'Embedded Dual' by a call it grid_situate
self.layout_type = layout_type
self.view_positions = self.layout_dict[self.layout_type]
# The supplied (axes, view) objects as indexed by position
self.subaxes = {pos: ax for ax, pos in zip(subaxes, self.view_positions)}
super(AdjointLayoutPlot, self).__init__(subplots=subplots, **params)
@mpl_rc_context
def initialize_plot(self, ranges=None):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
for pos in self.view_positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = self.layout.get(pos, None)
subplot = self.subplots.get(pos, None)
ax = self.subaxes.get(pos, None)
# If no view object or empty position, disable the axis
if None in [view, pos, subplot]:
ax.set_axis_off()
continue
subplot.initialize_plot(ranges=ranges)
self.adjust_positions()
self.drawn = True
def adjust_positions(self, redraw=True):
"""
Make adjustments to the positions of subplots (if available)
relative to the main plot axes as required.
This method is called by LayoutPlot after an initial pass
used to position all the Layouts together. This method allows
LayoutPlots to make final adjustments to the axis positions.
"""
checks = [self.view_positions, self.subaxes, self.subplots]
right = all('right' in check for check in checks)
top = all('top' in check for check in checks)
if not 'main' in self.subplots or not (top or right):
return
if redraw:
self.handles['fig'].canvas.draw()
main_ax = self.subplots['main'].handles['axis']
bbox = main_ax.get_position()
if right:
ax = self.subaxes['right']
subplot = self.subplots['right']
if isinstance(subplot, AdjoinedPlot):
subplot_size = subplot.subplot_size
border_size = subplot.border_size
else:
subplot_size = 0.25
border_size = 0.25
ax.set_position([bbox.x1 + bbox.width * border_size,
bbox.y0,
bbox.width * subplot_size, bbox.height])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
if top:
ax = self.subaxes['top']
subplot = self.subplots['top']
if isinstance(subplot, AdjoinedPlot):
subplot_size = subplot.subplot_size
border_size = subplot.border_size
else:
subplot_size = 0.25
border_size = 0.25
ax.set_position([bbox.x0,
bbox.y1 + bbox.height * border_size,
bbox.width, bbox.height * subplot_size])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
@mpl_rc_context
def update_frame(self, key, ranges=None):
for pos in self.view_positions:
subplot = self.subplots.get(pos)
if subplot is not None:
subplot.update_frame(key, ranges)
def __len__(self):
return max([1 if self.keys is None else len(self.keys), 1])
class LayoutPlot(GenericLayoutPlot, CompositePlot):
"""
A LayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
absolute_scaling = param.ObjectSelector(default=False, doc="""
If aspect_weight is enabled absolute_scaling determines whether
axes are scaled relative to the widest plot or whether the
aspect scales the axes in absolute terms.""")
aspect_weight = param.Number(default=0, doc="""
Weighting of the individual aspects when computing the Layout
grid aspects and overall figure size.""")
fig_bounds = param.NumericTuple(default=(0.05, 0.05, 0.95, 0.95), doc="""
The bounds of the figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
tight = param.Boolean(default=False, doc="""
Tightly fit the axes in the layout within the fig_bounds
and tight_padding.""")
tight_padding = param.Parameter(default=3, doc="""
Integer or tuple specifying the padding in inches in a tight layout.""")
hspace = param.Number(default=0.5, doc="""
Specifies the space between horizontally adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
vspace = param.Number(default=0.3, doc="""
Specifies the space between vertically adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
fontsize = param.Parameter(default={'title':16}, allow_None=True)
# Whether to enable fix for non-square figures
v17_layout_format = True
def __init__(self, layout, keys=None, **params):
super(LayoutPlot, self).__init__(layout=layout, keys=keys, **params)
with mpl.rc_context(rc=self.fig_rcparams):
self.subplots, self.subaxes, self.layout = self._compute_gridspec(layout)
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.traverse(lambda x: attach_streams(self, x.hmap, 2),
[GenericElementPlot])
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
col_widthratios, row_heightratios = {}, {}
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((c, r) if self.transpose else (r, c), (None, None))
if isinstance(view, NdLayout):
raise SkipRendering("Cannot render NdLayout nested inside a Layout")
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = np.nan if isinstance(main, Empty) else main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = np.nan
if layout_type in ['Dual', 'Triple']:
el = layout_view.get('right', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
width_ratios = [4, 4*ratio]
else:
width_ratios = [4, 1]
else:
width_ratios = [4]
inv_aspect = 1./main_aspect if main_aspect else np.NaN
if layout_type in ['Embedded Dual', 'Triple']:
el = layout_view.get('top', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
height_ratios = [4*ratio, 4]
else:
height_ratios = [1, 4]
else:
height_ratios = [4]
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios = [wratio * main_aspect for wratio in width_ratios]
height_ratios = [hratio * inv_aspect for hratio in height_ratios]
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
prev_heights = row_heightratios.get(r, (0, []))
if layout_shape[1] > prev_heights[0]:
row_heightratios[r] = [layout_shape[1], prev_heights[1]]
row_heightratios[r][1].append(height_ratios)
prev_widths = col_widthratios.get(c, (0, []))
if layout_shape[0] > prev_widths[0]:
col_widthratios[c] = (layout_shape[0], prev_widths[1])
col_widthratios[c][1].append(width_ratios)
col_splits = [v[0] for __, v in sorted(col_widthratios.items())]
row_splits = [v[0] for ___, v in sorted(row_heightratios.items())]
widths = np.array([r for col in col_widthratios.values()
for ratios in col[1] for r in ratios])/4
wr_unnormalized = compute_ratios(col_widthratios, False)
hr_list = compute_ratios(row_heightratios)
wr_list = compute_ratios(col_widthratios)
# Compute the number of rows and cols
cols, rows = len(wr_list), len(hr_list)
wr_list = [r if np.isfinite(r) else 1 for r in wr_list]
hr_list = [r if np.isfinite(r) else 1 for r in hr_list]
width = sum([r if np.isfinite(r) else 1 for r in wr_list])
yscale = width/sum([(1/v)*4 if np.isfinite(v) else 4 for v in wr_unnormalized])
if self.absolute_scaling:
width = width*np.nanmax(widths)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = col_splits[c]
hsplits = row_splits[r]
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if view.main is None:
continue
elif empty:
obj = AdjointLayout([])
elif not view.traverse(lambda x: x, [Element]):
self.warning('%s is empty, skipping subplot.' % obj.main)
continue
elif self.transpose:
layout_count = (c*self.rows+(r+1))
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
return layout_subplots, layout_axes, collapsed_layout
def grid_situate(self, current_idx, layout_type, subgrid_width):
"""
Situate the current AdjointLayoutPlot in a LayoutPlot. The
LayoutPlot specifies a layout_type into which the AdjointLayoutPlot
must be embedded. This enclosing layout is guaranteed to have
enough cells to display all the views.
Based on this enforced layout format, a starting index
supplied by LayoutPlot (indexing into a large gridspec
arrangement) is updated to the appropriate embedded value. It
will also return a list of gridspec indices associated with
the all the required layout axes.
"""
# Set the layout configuration as situated in a NdLayout
if layout_type == 'Single':
start, inds = current_idx+1, [current_idx]
elif layout_type == 'Dual':
start, inds = current_idx+2, [current_idx, current_idx+1]
bottom_idx = current_idx + subgrid_width
if layout_type == 'Embedded Dual':
bottom = ((current_idx+1) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx)+1
start, inds = grid_idx, [current_idx, bottom_idx]
elif layout_type == 'Triple':
bottom = ((current_idx+2) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx) + 2
start, inds = grid_idx, [current_idx, current_idx+1,
bottom_idx, bottom_idx+1]
return start, inds
def _create_subplots(self, layout, positions, layout_dimensions, ranges, axes={}, num=1, create=True):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
subplots = {}
projections = []
adjoint_clone = layout.clone(shared_data=False, id=layout.id)
subplot_opts = dict(show_title=False, adjoined=layout)
for pos in positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = layout.get(pos, None)
ax = axes.get(pos, None)
if view is None or not view.traverse(lambda x: x, [Element]):
projections.append(None)
continue
# Determine projection type for plot
projections.append(self._get_projection(view))
if not create:
continue
# Customize plotopts depending on position.
plotopts = self.lookup_options(view, 'plot').options
# Options common for any subplot
override_opts = {}
sublabel_opts = {}
if pos == 'main':
own_params = self.get_param_values(onlychanged=True)
sublabel_opts = {k: v for k, v in own_params
if 'sublabel_' in k}
elif pos == 'right':
right_opts = dict(invert_axes=True,
xaxis=None)
override_opts = dict(subplot_opts, **right_opts)
elif pos == 'top':
top_opts = dict(yaxis=None)
override_opts = dict(subplot_opts, **top_opts)
# Override the plotopts as required
plotopts = dict(sublabel_opts, **plotopts)
plotopts.update(override_opts, fig=self.handles['fig'])
vtype = view.type if isinstance(view, HoloMap) else view.__class__
if isinstance(view, GridSpace):
plotopts['create_axes'] = ax is not None
plot_type = Store.registry['matplotlib'][vtype]
if pos != 'main' and vtype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[vtype]
num = num if len(self.coords) > 1 else 0
subplots[pos] = plot_type(view, axis=ax, keys=self.keys,
dimensions=self.dimensions,
layout_dimensions=layout_dimensions,
ranges=ranges, subplot=True,
uniform=self.uniform, layout_num=num,
renderer=self.renderer, **plotopts)
if isinstance(view, (Element, HoloMap, Collator, CompositeOverlay)):
adjoint_clone[pos] = subplots[pos].hmap
else:
adjoint_clone[pos] = subplots[pos].layout
return subplots, adjoint_clone, projections
@mpl_rc_context
def initialize_plot(self):
key = self.keys[-1]
ranges = self.compute_ranges(self.layout, key, None)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges)
# Create title handle
title_obj = None
title = self._format_title(key)
if self.show_title and len(self.coords) > 1 and title:
title_obj = self.handles['fig'].suptitle(title, **self._fontsize('title'))
self.handles['title'] = title_obj
self.handles['bbox_extra_artists'] += [title_obj]
fig = self.handles['fig']
if (not self.traverse(specs=[GridPlot]) and not isinstance(self.fig_inches, tuple)
and self.v17_layout_format):
traverse_fn = lambda x: x.handles.get('bbox_extra_artists', None)
extra_artists = list(chain(*[artists for artists in self.traverse(traverse_fn)
if artists is not None]))
fix_aspect(fig, self.rows, self.cols,
title_obj, extra_artists,
vspace=self.vspace*self.fig_scale,
hspace=self.hspace*self.fig_scale)
colorbars = self.traverse(specs=[lambda x: hasattr(x, 'colorbar')])
for cbar_plot in colorbars:
if cbar_plot.colorbar:
cbar_plot._draw_colorbar(redraw=False)
adjoined = self.traverse(specs=[AdjointLayoutPlot])
for adjoined in adjoined:
if len(adjoined.subplots) > 1:
adjoined.adjust_positions(redraw=False)
return self._finalize_axis(None)
class AdjoinedPlot(DimensionedPlot):
aspect = param.Parameter(default='auto', doc="""
Aspect ratios on SideHistogramPlot should be determined by the
AdjointLayoutPlot.""")
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0.25, doc="""
The size of the border expressed as a fraction of the main plot.""")
show_title = param.Boolean(default=False, doc="""
Titles should be disabled on all SidePlots to avoid clutter.""")
subplot_size = param.Number(default=0.25, doc="""
The size subplots as expressed as a fraction of the main plot.""")
show_xlabel = param.Boolean(default=False, doc="""
Whether to show the x-label of the plot. Disabled by default
because plots are often too cramped to fit the title correctly.""")
| 1 | 20,308 | Is the idea that ``self.keys[0]`` here normally matches ``init_key``? | holoviz-holoviews | py |
@@ -262,6 +262,11 @@ class UIAHandler(COMObject):
eventHandler.queueEvent(NVDAEventName,obj)
def _isUIAWindowHelper(self,hwnd):
+ if hwnd == winUser.getDesktopWindow():
+ # #6301: In Windows 10 build >= 14901,
+ # calling UiaHasServerSideProvider with the desktop window sometimes freezes when bringing up NVDA windows.
+ # It will always return False anyway.
+ return False
# UIA in NVDA's process freezes in Windows 7 and below
processID=winUser.getWindowThreadProcessID(hwnd)[0]
if windll.kernel32.GetCurrentProcessId()==processID: | 1 | from ctypes import *
from ctypes.wintypes import *
import comtypes.client
from comtypes.automation import VT_EMPTY
from comtypes import *
import weakref
import threading
import time
import api
import appModuleHandler
import queueHandler
import controlTypes
import NVDAHelper
import winKernel
import winUser
import eventHandler
from logHandler import log
from comtypes.gen.UIAutomationClient import *
#Some new win8 UIA constants that could be missing
UIA_StyleIdAttributeId=40034
UIA_AnnotationAnnotationTypeIdPropertyId=30113
UIA_AnnotationTypesAttributeId=40031
AnnotationType_SpellingError=60001
UIA_AnnotationObjectsAttributeId=40032
StyleId_Heading1=70001
StyleId_Heading9=70009
ItemIndex_Property_GUID=GUID("{92A053DA-2969-4021-BF27-514CFC2E4A69}")
ItemCount_Property_GUID=GUID("{ABBF5C45-5CCC-47b7-BB4E-87CB87BBD162}")
UIA_LevelPropertyId=30154
UIA_PositionInSetPropertyId=30152
UIA_SizeOfSetPropertyId=30153
badUIAWindowClassNames=[
"SysTreeView32",
"WuDuiListView",
"ComboBox",
"msctls_progress32",
"Edit",
"CommonPlacesWrapperWndClass",
"SysMonthCal32",
"SUPERGRID", #Outlook 2010 message list
"RichEdit",
"RichEdit20",
"RICHEDIT50W",
"SysListView32",
"_WwG",
'_WwN',
"EXCEL7",
"Button",
]
NVDAUnitsToUIAUnits={
"character":TextUnit_Character,
"word":TextUnit_Word,
"line":TextUnit_Line,
"paragraph":TextUnit_Paragraph,
"readingChunk":TextUnit_Line,
}
UIAControlTypesToNVDARoles={
UIA_ButtonControlTypeId:controlTypes.ROLE_BUTTON,
UIA_CalendarControlTypeId:controlTypes.ROLE_CALENDAR,
UIA_CheckBoxControlTypeId:controlTypes.ROLE_CHECKBOX,
UIA_ComboBoxControlTypeId:controlTypes.ROLE_COMBOBOX,
UIA_EditControlTypeId:controlTypes.ROLE_EDITABLETEXT,
UIA_HyperlinkControlTypeId:controlTypes.ROLE_LINK,
UIA_ImageControlTypeId:controlTypes.ROLE_GRAPHIC,
UIA_ListItemControlTypeId:controlTypes.ROLE_LISTITEM,
UIA_ListControlTypeId:controlTypes.ROLE_LIST,
UIA_MenuControlTypeId:controlTypes.ROLE_POPUPMENU,
UIA_MenuBarControlTypeId:controlTypes.ROLE_MENUBAR,
UIA_MenuItemControlTypeId:controlTypes.ROLE_MENUITEM,
UIA_ProgressBarControlTypeId:controlTypes.ROLE_PROGRESSBAR,
UIA_RadioButtonControlTypeId:controlTypes.ROLE_RADIOBUTTON,
UIA_ScrollBarControlTypeId:controlTypes.ROLE_SCROLLBAR,
UIA_SliderControlTypeId:controlTypes.ROLE_SLIDER,
UIA_SpinnerControlTypeId:controlTypes.ROLE_SPINBUTTON,
UIA_StatusBarControlTypeId:controlTypes.ROLE_STATUSBAR,
UIA_TabControlTypeId:controlTypes.ROLE_TABCONTROL,
UIA_TabItemControlTypeId:controlTypes.ROLE_TAB,
UIA_TextControlTypeId:controlTypes.ROLE_STATICTEXT,
UIA_ToolBarControlTypeId:controlTypes.ROLE_TOOLBAR,
UIA_ToolTipControlTypeId:controlTypes.ROLE_TOOLTIP,
UIA_TreeControlTypeId:controlTypes.ROLE_TREEVIEW,
UIA_TreeItemControlTypeId:controlTypes.ROLE_TREEVIEWITEM,
UIA_CustomControlTypeId:controlTypes.ROLE_UNKNOWN,
UIA_GroupControlTypeId:controlTypes.ROLE_GROUPING,
UIA_ThumbControlTypeId:controlTypes.ROLE_THUMB,
UIA_DataGridControlTypeId:controlTypes.ROLE_DATAGRID,
UIA_DataItemControlTypeId:controlTypes.ROLE_DATAITEM,
UIA_DocumentControlTypeId:controlTypes.ROLE_DOCUMENT,
UIA_SplitButtonControlTypeId:controlTypes.ROLE_SPLITBUTTON,
UIA_WindowControlTypeId:controlTypes.ROLE_WINDOW,
UIA_PaneControlTypeId:controlTypes.ROLE_PANE,
UIA_HeaderControlTypeId:controlTypes.ROLE_HEADER,
UIA_HeaderItemControlTypeId:controlTypes.ROLE_HEADERITEM,
UIA_TableControlTypeId:controlTypes.ROLE_TABLE,
UIA_TitleBarControlTypeId:controlTypes.ROLE_TITLEBAR,
UIA_SeparatorControlTypeId:controlTypes.ROLE_SEPARATOR,
}
UIAPropertyIdsToNVDAEventNames={
UIA_NamePropertyId:"nameChange",
UIA_HelpTextPropertyId:"descriptionChange",
UIA_ExpandCollapseExpandCollapseStatePropertyId:"stateChange",
UIA_ToggleToggleStatePropertyId:"stateChange",
UIA_IsEnabledPropertyId:"stateChange",
UIA_ValueValuePropertyId:"valueChange",
UIA_RangeValueValuePropertyId:"valueChange",
}
UIAEventIdsToNVDAEventNames={
#UIA_Text_TextChangedEventId:"textChanged",
UIA_SelectionItem_ElementSelectedEventId:"UIA_elementSelected",
UIA_MenuOpenedEventId:"gainFocus",
UIA_SelectionItem_ElementAddedToSelectionEventId:"stateChange",
UIA_SelectionItem_ElementRemovedFromSelectionEventId:"stateChange",
#UIA_MenuModeEndEventId:"menuModeEnd",
#UIA_Text_TextSelectionChangedEventId:"caret",
UIA_ToolTipOpenedEventId:"UIA_toolTipOpened",
#UIA_AsyncContentLoadedEventId:"documentLoadComplete",
#UIA_ToolTipClosedEventId:"hide",
UIA_Window_WindowOpenedEventId:"UIA_window_windowOpen",
}
class UIAHandler(COMObject):
_com_interfaces_=[IUIAutomationEventHandler,IUIAutomationFocusChangedEventHandler,IUIAutomationPropertyChangedEventHandler]
def __init__(self):
super(UIAHandler,self).__init__()
self.MTAThreadInitEvent=threading.Event()
self.MTAThreadStopEvent=threading.Event()
self.MTAThreadInitException=None
self.MTAThread=threading.Thread(target=self.MTAThreadFunc)
self.MTAThread.daemon=True
self.MTAThread.start()
self.MTAThreadInitEvent.wait(2)
if self.MTAThreadInitException:
raise self.MTAThreadInitException
def terminate(self):
MTAThreadHandle=HANDLE(windll.kernel32.OpenThread(winKernel.SYNCHRONIZE,False,self.MTAThread.ident))
self.MTAThreadStopEvent.set()
#Wait for the MTA thread to die (while still message pumping)
if windll.user32.MsgWaitForMultipleObjects(1,byref(MTAThreadHandle),False,200,0)!=0:
log.debugWarning("Timeout or error while waiting for UIAHandler MTA thread")
windll.kernel32.CloseHandle(MTAThreadHandle)
del self.MTAThread
def MTAThreadFunc(self):
try:
oledll.ole32.CoInitializeEx(None,comtypes.COINIT_MULTITHREADED)
isUIA8=False
try:
self.clientObject=CoCreateInstance(CUIAutomation8._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
isUIA8=True
except (COMError,WindowsError,NameError):
self.clientObject=CoCreateInstance(CUIAutomation._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
if isUIA8:
try:
self.clientObject=self.clientObject.QueryInterface(IUIAutomation3)
except COMError:
self.clientObject=self.clientObject.QueryInterface(IUIAutomation2)
log.info("UIAutomation: %s"%self.clientObject.__class__.__mro__[1].__name__)
self.windowTreeWalker=self.clientObject.createTreeWalker(self.clientObject.CreateNotCondition(self.clientObject.CreatePropertyCondition(UIA_NativeWindowHandlePropertyId,0)))
self.windowCacheRequest=self.clientObject.CreateCacheRequest()
self.windowCacheRequest.AddProperty(UIA_NativeWindowHandlePropertyId)
self.UIAWindowHandleCache={}
self.baseTreeWalker=self.clientObject.RawViewWalker
self.baseCacheRequest=self.windowCacheRequest.Clone()
import UIAHandler
self.ItemIndex_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemIndex_Property_GUID),u"ItemIndex",1)
self.ItemCount_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemCount_Property_GUID),u"ItemCount",1)
for propertyId in (UIA_FrameworkIdPropertyId,UIA_AutomationIdPropertyId,UIA_ClassNamePropertyId,UIA_ControlTypePropertyId,UIA_ProviderDescriptionPropertyId,UIA_ProcessIdPropertyId,UIA_IsTextPatternAvailablePropertyId):
self.baseCacheRequest.addProperty(propertyId)
self.baseCacheRequest.addPattern(UIA_TextPatternId)
self.rootElement=self.clientObject.getRootElementBuildCache(self.baseCacheRequest)
self.reservedNotSupportedValue=self.clientObject.ReservedNotSupportedValue
self.ReservedMixedAttributeValue=self.clientObject.ReservedMixedAttributeValue
self.clientObject.AddFocusChangedEventHandler(self.baseCacheRequest,self)
self.clientObject.AddPropertyChangedEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys())
for x in UIAEventIdsToNVDAEventNames.iterkeys():
self.clientObject.addAutomationEventHandler(x,self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
except Exception as e:
self.MTAThreadInitException=e
finally:
self.MTAThreadInitEvent.set()
self.MTAThreadStopEvent.wait()
self.clientObject.RemoveAllEventHandlers()
def IUIAutomationEventHandler_HandleAutomationEvent(self,sender,eventID):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if eventID==UIA_MenuOpenedEventId and eventHandler.isPendingEvents("gainFocus"):
# We don't need the menuOpened event if focus has been fired,
# as focus should be more correct.
return
NVDAEventName=UIAEventIdsToNVDAEventNames.get(eventID,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj or (NVDAEventName=="gainFocus" and not obj.shouldAllowUIAFocusEvent):
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationFocusChangedEventHandler_HandleFocusChangedEvent(self,sender):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if not self.isNativeUIAElement(sender):
return
import NVDAObjects.UIA
if isinstance(eventHandler.lastQueuedFocusObject,NVDAObjects.UIA.UIA):
lastFocus=eventHandler.lastQueuedFocusObject.UIAElement
# Ignore duplicate focus events.
# It seems that it is possible for compareElements to return True, even though the objects are different.
# Therefore, don't ignore the event if the last focus object has lost its hasKeyboardFocus state.
if self.clientObject.compareElements(sender,lastFocus) and lastFocus.currentHasKeyboardFocus:
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent("gainFocus",windowHandle=window):
return
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj or not obj.shouldAllowUIAFocusEvent:
return
eventHandler.queueEvent("gainFocus",obj)
def IUIAutomationPropertyChangedEventHandler_HandlePropertyChangedEvent(self,sender,propertyId,newValue):
# #3867: For now manually force this VARIANT type to empty to get around a nasty double free in comtypes/ctypes.
# We also don't use the value in this callback.
newValue.vt=VT_EMPTY
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
NVDAEventName=UIAPropertyIdsToNVDAEventNames.get(propertyId,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def _isUIAWindowHelper(self,hwnd):
# UIA in NVDA's process freezes in Windows 7 and below
processID=winUser.getWindowThreadProcessID(hwnd)[0]
if windll.kernel32.GetCurrentProcessId()==processID:
return False
import NVDAObjects.window
windowClass=NVDAObjects.window.Window.normalizeWindowClassName(winUser.getClassName(hwnd))
# There are certain window classes that just had bad UIA implementations
if windowClass in badUIAWindowClassNames:
return False
if windowClass=="NetUIHWND":
parentHwnd=winUser.getAncestor(hwnd,winUser.GA_ROOT)
# #2816: Outlook 2010 auto complete does not fire enough UIA events, IAccessible is better.
# #4056: Combo boxes in Office 2010 Options dialogs don't expose a name via UIA, but do via MSAA.
if winUser.getClassName(parentHwnd) in {"Net UI Tool Window","NUIDialog"}:
return False
# allow the appModule for the window to also choose if this window is bad
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
if appModule and appModule.isBadUIAWindow(hwnd):
return False
# Ask the window if it supports UIA natively
return windll.UIAutomationCore.UiaHasServerSideProvider(hwnd)
def isUIAWindow(self,hwnd):
now=time.time()
v=self.UIAWindowHandleCache.get(hwnd,None)
if not v or (now-v[1])>0.5:
v=self._isUIAWindowHelper(hwnd),now
self.UIAWindowHandleCache[hwnd]=v
return v[0]
def getNearestWindowHandle(self,UIAElement):
if hasattr(UIAElement,"_nearestWindowHandle"):
# Called previously. Use cached result.
return UIAElement._nearestWindowHandle
try:
window=UIAElement.cachedNativeWindowHandle
except COMError:
window=None
if not window:
# This element reports no window handle, so use the nearest ancestor window handle.
try:
new=self.windowTreeWalker.NormalizeElementBuildCache(UIAElement,self.windowCacheRequest)
except COMError:
return None
try:
window=new.cachedNativeWindowHandle
except COMError:
window=None
# Cache for future use to improve performance.
UIAElement._nearestWindowHandle=window
return window
def isNativeUIAElement(self,UIAElement):
#Due to issues dealing with UIA elements coming from the same process, we do not class these UIA elements as usable.
#It seems to be safe enough to retreave the cached processID, but using tree walkers or fetching other properties causes a freeze.
try:
processID=UIAElement.cachedProcessId
except COMError:
return False
if processID==windll.kernel32.GetCurrentProcessId():
return False
# Whether this is a native element depends on whether its window natively supports UIA.
windowHandle=self.getNearestWindowHandle(UIAElement)
if windowHandle:
if self.isUIAWindow(windowHandle):
return True
if winUser.getClassName(windowHandle)=="DirectUIHWND" and "IEFRAME.dll" in UIAElement.cachedProviderDescription and UIAElement.currentClassName in ("DownloadBox", "accessiblebutton", "DUIToolbarButton", "PushButton"):
# This is the IE 9 downloads list.
# #3354: UiaHasServerSideProvider returns false for the IE 9 downloads list window,
# so we'd normally use MSAA for this control.
# However, its MSAA implementation is broken (fires invalid events) if UIA is initialised,
# whereas its UIA implementation works correctly.
# Therefore, we must use UIA here.
return True
return False
| 1 | 18,555 | Can this be clarified to "Exit early when hwnd is the windows desktop handle, UiaHasServerSideProvider would return false anyway." ? | nvaccess-nvda | py |
@@ -72,11 +72,11 @@ type Task struct {
Containers []*Container
// Volumes are the volumes for the task
Volumes []TaskVolume `json:"volumes"`
- // VCPULimit is a task-level limit for compute resources. A value of 1 means that
+ // Cpu is a task-level limit for compute resources. A value of 1 means that
// the task may access 100% of 1 vCPU on the instance
- VCPULimit float64 `json:"CPULimit,omitempty"`
- // MemoryLimit is a task-level limit for memory resources in bytes
- MemoryLimit int64 `json:"Memory,omitempty"`
+ Cpu float64 `json:"CPULimit,omitempty"`
+ // Memory is a task-level limit for memory resources in bytes
+ Memory int64 `json:"Memory,omitempty"`
// DesiredStatusUnsafe represents the state where the task should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler. | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import (
"encoding/json"
"fmt"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
"github.com/fsouza/go-dockerclient"
"github.com/pkg/errors"
)
const (
// PauseContainerName is the internal name for the pause container
PauseContainerName = "~internal~ecs~pause"
emptyHostVolumeName = "~internal~ecs-emptyvolume-source"
// awsSDKCredentialsRelativeURIPathEnvironmentVariableName defines the name of the environment
// variable containers' config, which will be used by the AWS SDK to fetch
// credentials.
awsSDKCredentialsRelativeURIPathEnvironmentVariableName = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
arnResourceSections = 2
arnResourceDelimiter = "/"
// networkModeNone specifies the string used to define the `none` docker networking mode
networkModeNone = "none"
// networkModeContainerPrefix specifies the prefix string used for setting the
// container's network mode to be mapped to that of another existing container
networkModeContainerPrefix = "container:"
)
// TaskOverrides are the overrides applied to a task
type TaskOverrides struct{}
// Task is the internal representation of a task in the ECS agent
type Task struct {
// Arn is the unique identifer for the task
Arn string
// Overrides are the overrides applied to a task
Overrides TaskOverrides `json:"-"`
// Family is the name of the task definition family
Family string
// Version is the version of the task definition
Version string
// Containers are the containers for the task
Containers []*Container
// Volumes are the volumes for the task
Volumes []TaskVolume `json:"volumes"`
// VCPULimit is a task-level limit for compute resources. A value of 1 means that
// the task may access 100% of 1 vCPU on the instance
VCPULimit float64 `json:"CPULimit,omitempty"`
// MemoryLimit is a task-level limit for memory resources in bytes
MemoryLimit int64 `json:"Memory,omitempty"`
// DesiredStatusUnsafe represents the state where the task should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler.
// The DesiredStatusUnsafe is almost always either TaskRunning or TaskStopped.
// NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `UpdateStatus`,
// `UpdateDesiredStatus`, `SetDesiredStatus`, and `SetDesiredStatus`.
// TODO DesiredStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
DesiredStatusUnsafe TaskStatus `json:"DesiredStatus"`
desiredStatusLock sync.RWMutex
// KnownStatusUnsafe represents the state where the task is. This is generally
// the minimum of equivalent status types for the containers in the task;
// if one container is at ContainerRunning and another is at ContainerPulled,
// the task KnownStatusUnsafe would be TaskPulled.
// NOTE: Do not access KnownStatusUnsafe directly. Instead, use `UpdateStatus`,
// and `GetKnownStatus`.
// TODO KnownStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe TaskStatus `json:"KnownStatus"`
knownStatusLock sync.RWMutex
// KnownStatusTimeUnsafe captures the time when the KnownStatusUnsafe was last updated.
// NOTE: Do not access KnownStatusTime directly, instead use `GetKnownStatusTime`.
KnownStatusTimeUnsafe time.Time `json:"KnownTime"`
knownStatusTimeLock sync.RWMutex
// SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS SubmitTaskStateChange API.
// TODO(samuelkarp) SentStatusUnsafe needs a lock and setters/getters.
// TODO SentStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
SentStatusUnsafe TaskStatus `json:"SentStatus"`
sentStatusLock sync.RWMutex
StartSequenceNumber int64
StopSequenceNumber int64
// credentialsID is used to set the CredentialsId field for the
// IAMRoleCredentials object associated with the task. This id can be
// used to look up the credentials for task in the credentials manager
credentialsID string
credentialsIDLock sync.RWMutex
// ENI is the elastic network interface specified by this task
ENI *ENI
eniLock sync.RWMutex
// MemoryCPULimitsEnabled to determine if task supports CPU, memory limits
MemoryCPULimitsEnabled bool `json:"MemoryCPULimitsEnabled,omitempty"`
memoryCPULimitsEnabledLock sync.RWMutex
}
// PostUnmarshalTask is run after a task has been unmarshalled, but before it has been
// run. It is possible it will be subsequently called after that and should be
// able to handle such an occurrence appropriately (e.g. behave idempotently).
func (task *Task) PostUnmarshalTask(cfg *config.Config, credentialsManager credentials.Manager) {
// TODO, add rudimentary plugin support and call any plugins that want to
// hook into this
task.adjustForPlatform(cfg)
task.initializeEmptyVolumes()
task.initializeCredentialsEndpoint(credentialsManager)
task.addNetworkResourceProvisioningDependency(cfg)
}
func (task *Task) initializeEmptyVolumes() {
requiredEmptyVolumes := []string{}
for _, container := range task.Containers {
for _, mountPoint := range container.MountPoints {
vol, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
continue
}
if _, ok := vol.(*EmptyHostVolume); ok {
if container.SteadyStateDependencies == nil {
container.SteadyStateDependencies = make([]string, 0)
}
container.SteadyStateDependencies = append(container.SteadyStateDependencies, emptyHostVolumeName)
requiredEmptyVolumes = append(requiredEmptyVolumes, mountPoint.SourceVolume)
}
}
}
if len(requiredEmptyVolumes) == 0 {
// No need to create the auxiliary 'empty-volumes' container
return
}
// If we have required empty volumes, add an 'internal' container that handles all
// of them
_, ok := task.ContainerByName(emptyHostVolumeName)
if !ok {
mountPoints := make([]MountPoint, len(requiredEmptyVolumes))
for i, volume := range requiredEmptyVolumes {
// BUG(samuelkarp) On Windows, volumes with names that differ only by case will collide
containerPath := getCanonicalPath(emptyvolume.ContainerPathPrefix + volume)
mountPoints[i] = MountPoint{SourceVolume: volume, ContainerPath: containerPath}
}
sourceContainer := &Container{
Name: emptyHostVolumeName,
Image: emptyvolume.Image + ":" + emptyvolume.Tag,
Command: []string{emptyvolume.Command}, // Command required, but this only gets created so N/A
MountPoints: mountPoints,
Essential: false,
Type: ContainerEmptyHostVolume,
DesiredStatusUnsafe: ContainerRunning,
}
task.Containers = append(task.Containers, sourceContainer)
}
}
// initializeCredentialsEndpoint sets the credentials endpoint for all containers in a task if needed.
func (task *Task) initializeCredentialsEndpoint(credentialsManager credentials.Manager) {
id := task.GetCredentialsID()
if id == "" {
// No credentials set for the task. Do not inject the endpoint environment variable.
return
}
taskCredentials, ok := credentialsManager.GetTaskCredentials(id)
if !ok {
// Task has credentials id set, but credentials manager is unaware of
// the id. This should never happen as the payload handler sets
// credentialsId for the task after adding credentials to the
// credentials manager
seelog.Errorf("Unable to get credentials for task: %s", task.Arn)
return
}
credentialsEndpointRelativeURI := taskCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI()
for _, container := range task.Containers {
// container.Environment map would not be initialized if there are
// no environment variables to be set or overridden in the container
// config. Check if that's the case and initilialize if needed
if container.Environment == nil {
container.Environment = make(map[string]string)
}
container.Environment[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] = credentialsEndpointRelativeURI
}
}
// BuildCNIConfig constructs the cni configuration from eni
func (task *Task) BuildCNIConfig() (*ecscni.Config, error) {
if !task.isNetworkModeVPC() {
return nil, errors.New("task config: task has no ENIs associated with it, unable to generate cni config")
}
cfg := &ecscni.Config{}
eni := task.GetTaskENI()
cfg.ENIID = eni.ID
cfg.ID = eni.MacAddress
cfg.ENIMACAddress = eni.MacAddress
for _, ipv4 := range eni.IPV4Addresses {
if ipv4.Primary {
cfg.ENIIPV4Address = ipv4.Address
break
}
}
// If there is ipv6 assigned to eni then set it
if len(eni.IPV6Addresses) > 0 {
cfg.ENIIPV6Address = eni.IPV6Addresses[0].Address
}
return cfg, nil
}
// isNetworkModeVPC checks if the task is configured to use task-networking feature
func (task *Task) isNetworkModeVPC() bool {
if task.GetTaskENI() == nil {
return false
}
return true
}
func (task *Task) addNetworkResourceProvisioningDependency(cfg *config.Config) {
if !task.isNetworkModeVPC() {
return
}
for _, container := range task.Containers {
if container.IsInternal() {
continue
}
if container.SteadyStateDependencies == nil {
container.SteadyStateDependencies = make([]string, 0)
}
container.SteadyStateDependencies = append(container.SteadyStateDependencies, PauseContainerName)
}
pauseContainer := NewContainerWithSteadyState(ContainerResourcesProvisioned)
pauseContainer.Name = PauseContainerName
pauseContainer.Image = fmt.Sprintf("%s:%s", cfg.PauseContainerImageName, cfg.PauseContainerTag)
pauseContainer.Essential = true
pauseContainer.Type = ContainerCNIPause
task.Containers = append(task.Containers, pauseContainer)
}
// ContainerByName returns the *Container for the given name
func (task *Task) ContainerByName(name string) (*Container, bool) {
for _, container := range task.Containers {
if container.Name == name {
return container, true
}
}
return nil, false
}
// HostVolumeByName returns the task Volume for the given a volume name in that
// task. The second return value indicates the presense of that volume
func (task *Task) HostVolumeByName(name string) (HostVolume, bool) {
for _, v := range task.Volumes {
if v.Name == name {
return v.Volume, true
}
}
return nil, false
}
// UpdateMountPoints updates the mount points of volumes that were created
// without specifying a host path. This is used as part of the empty host
// volume feature.
func (task *Task) UpdateMountPoints(cont *Container, vols map[string]string) {
for _, mountPoint := range cont.MountPoints {
containerPath := getCanonicalPath(mountPoint.ContainerPath)
hostPath, ok := vols[containerPath]
if !ok {
// /path/ -> /path or \path\ -> \path
hostPath, ok = vols[strings.TrimRight(containerPath, string(filepath.Separator))]
}
if ok {
if hostVolume, exists := task.HostVolumeByName(mountPoint.SourceVolume); exists {
if empty, ok := hostVolume.(*EmptyHostVolume); ok {
empty.HostPath = hostPath
}
}
}
}
}
// updateTaskKnownStatus updates the given task's status based on its container's status.
// It updates to the minimum of all containers no matter what
// It returns a TaskStatus indicating what change occurred or TaskStatusNone if
// there was no change
// Invariant: task known status is the minimum of container known status
func (task *Task) updateTaskKnownStatus() (newStatus TaskStatus) {
seelog.Debugf("Updating task's known status, task: %s", task.String())
// Set to a large 'impossible' status that can't be the min
containerEarliestKnownStatus := ContainerZombie
var earliestKnownStatusContainer *Container
essentialContainerStopped := false
for _, container := range task.Containers {
containerKnownStatus := container.GetKnownStatus()
if containerKnownStatus == ContainerStopped && container.Essential {
essentialContainerStopped = true
}
if containerKnownStatus < containerEarliestKnownStatus {
containerEarliestKnownStatus = containerKnownStatus
earliestKnownStatusContainer = container
}
}
if earliestKnownStatusContainer == nil {
seelog.Criticalf(
"Impossible state found while updating tasks's known status, earliest state recorded as %s for task [%v]",
containerEarliestKnownStatus.String(), task)
return TaskStatusNone
}
seelog.Debugf("Container with earliest known container is [%s] for task: %s",
earliestKnownStatusContainer.String(), task.String())
// If the essential container is stopped while other containers may be running
// don't update the task status until the other containers are stopped.
if earliestKnownStatusContainer.IsKnownSteadyState() && essentialContainerStopped {
seelog.Debugf(
"Essential container is stopped while other containers are running, not updating task status for task: %s",
task.String())
return TaskStatusNone
}
// We can't rely on earliest container known status alone for determining if the
// task state needs to be updated as containers can have different steady states
// defined. Instead we should get the task status for all containers' known
// statuses and compute the min of this
earliestKnownTaskStatus := task.getEarliestKnownTaskStatusForContainers()
if task.GetKnownStatus() < earliestKnownTaskStatus {
seelog.Debugf("Updating task's known status to: %s, task: %s",
earliestKnownTaskStatus.String(), task.String())
task.SetKnownStatus(earliestKnownTaskStatus)
return task.GetKnownStatus()
}
return TaskStatusNone
}
// getEarliestKnownTaskStatusForContainers gets the lowest (earliest) task status
// based on the known statuses of all containers in the task
func (task *Task) getEarliestKnownTaskStatusForContainers() TaskStatus {
if len(task.Containers) == 0 {
seelog.Criticalf("No containers in the task: %s", task.String())
return TaskStatusNone
}
// Set earliest container status to an impossible to reach 'high' task status
earliest := TaskZombie
for _, container := range task.Containers {
containerKnownStatus := container.GetKnownStatus()
containerTaskStatus := containerKnownStatus.TaskStatus(container.GetSteadyStateStatus())
if containerTaskStatus < earliest {
earliest = containerTaskStatus
}
}
return earliest
}
// Overridden returns a copy of the task with all container's overridden and
// itself overridden as well
func (task *Task) Overridden() *Task {
result := *task
// Task has no overrides currently, just do the containers
// Shallow copy, take care of the deeper bits too
result.Containers = make([]*Container, len(result.Containers))
for i, cont := range task.Containers {
result.Containers[i] = cont.Overridden()
}
return &result
}
// DockerConfig converts the given container in this task to the format of
// GoDockerClient's 'Config' struct
func (task *Task) DockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) {
return task.Overridden().dockerConfig(container.Overridden())
}
func (task *Task) dockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) {
dockerVolumes, err := task.dockerConfigVolumes(container)
if err != nil {
return nil, &DockerClientConfigError{err.Error()}
}
dockerEnv := make([]string, 0, len(container.Environment))
for envKey, envVal := range container.Environment {
dockerEnv = append(dockerEnv, envKey+"="+envVal)
}
// Convert MB to B
dockerMem := int64(container.Memory * 1024 * 1024)
if dockerMem != 0 && dockerMem < DockerContainerMinimumMemoryInBytes {
dockerMem = DockerContainerMinimumMemoryInBytes
}
var entryPoint []string
if container.EntryPoint != nil {
entryPoint = *container.EntryPoint
}
config := &docker.Config{
Image: container.Image,
Cmd: container.Command,
Entrypoint: entryPoint,
ExposedPorts: task.dockerExposedPorts(container),
Volumes: dockerVolumes,
Env: dockerEnv,
Memory: dockerMem,
CPUShares: task.dockerCPUShares(container.CPU),
}
if container.DockerConfig.Config != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.Config), &config)
if err != nil {
return nil, &DockerClientConfigError{"Unable decode given docker config: " + err.Error()}
}
}
if config.Labels == nil {
config.Labels = make(map[string]string)
}
return config, nil
}
// dockerCPUShares converts containerCPU shares if needed as per the logic stated below:
// Docker silently converts 0 to 1024 CPU shares, which is probably not what we
// want. Instead, we convert 0 to 2 to be closer to expected behavior. The
// reason for 2 over 1 is that 1 is an invalid value (Linux's choice, not Docker's).
func (task *Task) dockerCPUShares(containerCPU uint) int64 {
if containerCPU <= 1 {
seelog.Debugf(
"Converting CPU shares to allowed minimum of 2 for task arn: [%s] and cpu shares: %d",
task.Arn, containerCPU)
return 2
}
return int64(containerCPU)
}
func (task *Task) dockerExposedPorts(container *Container) map[docker.Port]struct{} {
dockerExposedPorts := make(map[docker.Port]struct{})
for _, portBinding := range container.Ports {
dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
dockerExposedPorts[dockerPort] = struct{}{}
}
return dockerExposedPorts
}
func (task *Task) dockerConfigVolumes(container *Container) (map[string]struct{}, error) {
volumeMap := make(map[string]struct{})
for _, m := range container.MountPoints {
vol, exists := task.HostVolumeByName(m.SourceVolume)
if !exists {
return nil, &badVolumeError{"Container " + container.Name + " in task " + task.Arn + " references invalid volume " + m.SourceVolume}
}
// you can handle most volume mount types in the HostConfig at run-time;
// empty mounts are created by docker at create-time (Config) so set
// them here.
if container.Type == ContainerEmptyHostVolume {
// if container.Name == emptyHostVolumeName && container.Type {
_, ok := vol.(*EmptyHostVolume)
if !ok {
return nil, &badVolumeError{"Empty volume container in task " + task.Arn + " was the wrong type"}
}
volumeMap[m.ContainerPath] = struct{}{}
}
}
return volumeMap, nil
}
func (task *Task) DockerHostConfig(
container *Container,
dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) {
return task.Overridden().dockerHostConfig(container.Overridden(), dockerContainerMap)
}
func (task *Task) dockerHostConfig(container *Container, dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) {
dockerLinkArr, err := task.dockerLinks(container, dockerContainerMap)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
dockerPortMap := task.dockerPortMap(container)
volumesFrom, err := task.dockerVolumesFrom(container, dockerContainerMap)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
binds, err := task.dockerHostBinds(container)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
// Populate hostConfig
hostConfig := &docker.HostConfig{
Links: dockerLinkArr,
Binds: binds,
PortBindings: dockerPortMap,
VolumesFrom: volumesFrom,
}
if container.DockerConfig.HostConfig != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig)
if err != nil {
return nil, &HostConfigError{"Unable to decode given host config: " + err.Error()}
}
}
err = task.platformHostConfigOverride(hostConfig)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
// Determine if network mode should be overridden and override it if needed
ok, networkMode := task.shouldOverrideNetworkMode(container, dockerContainerMap)
if !ok {
return hostConfig, nil
}
hostConfig.NetworkMode = networkMode
return hostConfig, nil
}
// shouldOverrideNetworkMode returns true if the network mode of the container needs
// to be overridden. It also returns the override string in this case. It returns
// false otherwise
func (task *Task) shouldOverrideNetworkMode(container *Container, dockerContainerMap map[string]*DockerContainer) (bool, string) {
// TODO. We can do an early return here by determining which kind of task it is
// Example: Does this task have ENIs in its payload, what is its networking mode etc
if container.IsInternal() {
// If it's an internal container, set the network mode to none.
// Currently, internal containers are either for creating empty host
// volumes or for creating the 'pause' container. Both of these
// only need the network mode to be set to "none"
return true, networkModeNone
}
// For other types of containers, determine if the container map contains
// a pause container. Since a pause container is only added to the task
// when using non docker daemon supported network modes, its existence
// indicates the need to configure the network mode outside of supported
// network drivers
if task.GetTaskENI() == nil {
return false, ""
}
pauseContName := ""
for _, cont := range task.Containers {
if cont.Type == ContainerCNIPause {
pauseContName = cont.Name
break
}
}
if pauseContName == "" {
seelog.Critical("Pause container required, but not found in the task: %s", task.String())
return false, ""
}
pauseContainer, ok := dockerContainerMap[pauseContName]
if !ok || pauseContainer == nil {
// This should never be the case and implies a code-bug.
seelog.Criticalf("Pause container required, but not found in container map for container: [%s] in task: %s",
container.String(), task.String())
return false, ""
}
return true, networkModeContainerPrefix + pauseContainer.DockerID
}
func (task *Task) dockerLinks(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) {
dockerLinkArr := make([]string, len(container.Links))
for i, link := range container.Links {
linkParts := strings.Split(link, ":")
if len(linkParts) > 2 {
return []string{}, errors.New("Invalid link format")
}
linkName := linkParts[0]
var linkAlias string
if len(linkParts) == 2 {
linkAlias = linkParts[1]
} else {
seelog.Warnf("Link name [%s] found with no linkalias for container: [%s] in task: [%s]",
linkName, container.String(), task.String())
linkAlias = linkName
}
targetContainer, ok := dockerContainerMap[linkName]
if !ok {
return []string{}, errors.New("Link target not available: " + linkName)
}
dockerLinkArr[i] = targetContainer.DockerName + ":" + linkAlias
}
return dockerLinkArr, nil
}
func (task *Task) dockerPortMap(container *Container) map[docker.Port][]docker.PortBinding {
dockerPortMap := make(map[docker.Port][]docker.PortBinding)
for _, portBinding := range container.Ports {
dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
currentMappings, existing := dockerPortMap[dockerPort]
if existing {
dockerPortMap[dockerPort] = append(currentMappings, docker.PortBinding{HostIP: portBindingHostIP, HostPort: strconv.Itoa(int(portBinding.HostPort))})
} else {
dockerPortMap[dockerPort] = []docker.PortBinding{{HostIP: portBindingHostIP, HostPort: strconv.Itoa(int(portBinding.HostPort))}}
}
}
return dockerPortMap
}
func (task *Task) dockerVolumesFrom(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) {
volumesFrom := make([]string, len(container.VolumesFrom))
for i, volume := range container.VolumesFrom {
targetContainer, ok := dockerContainerMap[volume.SourceContainer]
if !ok {
return []string{}, errors.New("Volume target not available: " + volume.SourceContainer)
}
if volume.ReadOnly {
volumesFrom[i] = targetContainer.DockerName + ":ro"
} else {
volumesFrom[i] = targetContainer.DockerName
}
}
return volumesFrom, nil
}
func (task *Task) dockerHostBinds(container *Container) ([]string, error) {
if container.Name == emptyHostVolumeName {
// emptyHostVolumes are handled as a special case in config, not
// hostConfig
return []string{}, nil
}
binds := make([]string, len(container.MountPoints))
for i, mountPoint := range container.MountPoints {
hv, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
return []string{}, errors.New("Invalid volume referenced: " + mountPoint.SourceVolume)
}
if hv.SourcePath() == "" || mountPoint.ContainerPath == "" {
seelog.Errorf(
"Unable to resolve volume mounts for container [%s]; invalid path: [%s]; [%s] -> [%s] in task: [%s]",
container.Name, mountPoint.SourceVolume, hv.SourcePath(), mountPoint.ContainerPath, task.String())
return []string{}, errors.New("Unable to resolve volume mounts; invalid path: " + container.Name + " " + mountPoint.SourceVolume + "; " + hv.SourcePath() + " -> " + mountPoint.ContainerPath)
}
bind := hv.SourcePath() + ":" + mountPoint.ContainerPath
if mountPoint.ReadOnly {
bind += ":ro"
}
binds[i] = bind
}
return binds, nil
}
// TaskFromACS translates ecsacs.Task to api.Task by first marshaling the recieved
// ecsacs.Task to json and unmrashaling it as api.Task
func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, error) {
data, err := jsonutil.BuildJSON(acsTask)
if err != nil {
return nil, err
}
task := &Task{}
err = json.Unmarshal(data, task)
if err != nil {
return nil, err
}
if task.GetDesiredStatus() == TaskRunning && envelope.SeqNum != nil {
task.StartSequenceNumber = *envelope.SeqNum
} else if task.GetDesiredStatus() == TaskStopped && envelope.SeqNum != nil {
task.StopSequenceNumber = *envelope.SeqNum
}
// TODO: Inspect CgroupSpec upon model changes
return task, nil
}
// UpdateStatus updates a task's known and desired statuses to be compatible
// with all of its containers
// It will return a bool indicating if there was a change
func (task *Task) UpdateStatus() bool {
change := task.updateTaskKnownStatus()
// DesiredStatus can change based on a new known status
task.UpdateDesiredStatus()
return change != TaskStatusNone
}
// UpdateDesiredStatus sets the known status of the task
func (task *Task) UpdateDesiredStatus() {
task.updateTaskDesiredStatus()
task.updateContainerDesiredStatus()
}
// updateTaskDesiredStatus determines what status the task should properly be at based on the containers' statuses
// Invariant: task desired status must be stopped if any essential container is stopped
func (task *Task) updateTaskDesiredStatus() {
seelog.Debugf("Updating task: [%s]", task.String())
// A task's desired status is stopped if any essential container is stopped
// Otherwise, the task's desired status is unchanged (typically running, but no need to change)
for _, cont := range task.Containers {
if cont.Essential && (cont.KnownTerminal() || cont.DesiredTerminal()) {
seelog.Debugf("Updating task desired status to stopped because of container: [%s]; task: [%s]",
cont.Name, task.String())
task.SetDesiredStatus(TaskStopped)
}
}
}
// updateContainerDesiredStatus sets all container's desired status's to the
// task's desired status
// Invariant: container desired status is <= task desired status converted to container status
// Note: task desired status and container desired status is typically only RUNNING or STOPPED
func (task *Task) updateContainerDesiredStatus() {
for _, c := range task.Containers {
taskDesiredStatus := task.GetDesiredStatus()
taskDesiredStatusToContainerStatus := taskDesiredStatus.ContainerStatus(c.GetSteadyStateStatus())
if c.GetDesiredStatus() < taskDesiredStatusToContainerStatus {
c.SetDesiredStatus(taskDesiredStatusToContainerStatus)
}
}
}
// SetKnownStatus sets the known status of the task
func (task *Task) SetKnownStatus(status TaskStatus) {
task.setKnownStatus(status)
task.updateKnownStatusTime()
}
func (task *Task) setKnownStatus(status TaskStatus) {
task.knownStatusLock.Lock()
defer task.knownStatusLock.Unlock()
task.KnownStatusUnsafe = status
}
func (task *Task) updateKnownStatusTime() {
task.knownStatusTimeLock.Lock()
defer task.knownStatusTimeLock.Unlock()
task.KnownStatusTimeUnsafe = ttime.Now()
}
// GetKnownStatus gets the KnownStatus of the task
func (task *Task) GetKnownStatus() TaskStatus {
task.knownStatusLock.RLock()
defer task.knownStatusLock.RUnlock()
return task.KnownStatusUnsafe
}
// GetKnownStatusTime gets the KnownStatusTime of the task
func (task *Task) GetKnownStatusTime() time.Time {
task.knownStatusTimeLock.RLock()
defer task.knownStatusTimeLock.RUnlock()
return task.KnownStatusTimeUnsafe
}
// SetCredentialsID sets the credentials ID for the task
func (task *Task) SetCredentialsID(id string) {
task.credentialsIDLock.Lock()
defer task.credentialsIDLock.Unlock()
task.credentialsID = id
}
// GetCredentialsID gets the credentials ID for the task
func (task *Task) GetCredentialsID() string {
task.credentialsIDLock.RLock()
defer task.credentialsIDLock.RUnlock()
return task.credentialsID
}
// GetDesiredStatus gets the desired status of the task
func (task *Task) GetDesiredStatus() TaskStatus {
task.desiredStatusLock.RLock()
defer task.desiredStatusLock.RUnlock()
return task.DesiredStatusUnsafe
}
// SetDesiredStatus sets the desired status of the task
func (task *Task) SetDesiredStatus(status TaskStatus) {
task.desiredStatusLock.Lock()
defer task.desiredStatusLock.Unlock()
task.DesiredStatusUnsafe = status
}
// GetSentStatus safely returns the SentStatus of the task
func (task *Task) GetSentStatus() TaskStatus {
task.sentStatusLock.RLock()
defer task.sentStatusLock.RUnlock()
return task.SentStatusUnsafe
}
// SetSentStatus safely sets the SentStatus of the task
func (task *Task) SetSentStatus(status TaskStatus) {
task.sentStatusLock.Lock()
defer task.sentStatusLock.Unlock()
task.SentStatusUnsafe = status
}
// SetTaskENI sets the eni information of the task
func (task *Task) SetTaskENI(eni *ENI) {
task.eniLock.Lock()
defer task.eniLock.Unlock()
task.ENI = eni
}
// GetTaskENI returns the eni of task, for now task can only have one enis
func (task *Task) GetTaskENI() *ENI {
task.eniLock.RLock()
defer task.eniLock.RUnlock()
return task.ENI
}
// String returns a human readable string representation of this object
func (t *Task) String() string {
res := fmt.Sprintf("%s:%s %s, TaskStatus: (%s->%s)",
t.Family, t.Version, t.Arn,
t.GetKnownStatus().String(), t.GetDesiredStatus().String())
res += " Containers: ["
for _, c := range t.Containers {
res += fmt.Sprintf("%s (%s->%s),", c.Name, c.GetKnownStatus().String(), c.GetDesiredStatus().String())
}
return res + "]"
}
// GetID is used to retrieve the taskID from taskARN
// Reference: http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-ecs
func (task *Task) GetID() (string, error) {
// Parse taskARN
parsedARN, err := arn.Parse(task.Arn)
if err != nil {
return "", errors.Wrapf(err, "task get-id: malformed taskARN: %s", task.Arn)
}
// Get task resource section
resource := parsedARN.Resource
if !strings.Contains(resource, arnResourceDelimiter) {
return "", errors.New(fmt.Sprintf("task get-id: malformed task resource: %s", resource))
}
resourceSplit := strings.SplitN(resource, arnResourceDelimiter, arnResourceSections)
if len(resourceSplit) != arnResourceSections {
return "", errors.New(fmt.Sprintf("task get-id: invalid task resource split: %s, expected=%d, actual=%d", resource, arnResourceSections, len(resourceSplit)))
}
return resourceSplit[1], nil
}
| 1 | 17,631 | I'm okay with leaving VCPULimit and MemoryLimit on our internal model | aws-amazon-ecs-agent | go |
@@ -73,6 +73,10 @@ const (
defaultWorkers = 4
// Default rule priority for K8s NetworkPolicy rules.
defaultRulePriority = -1
+ // TierIndex is used to index ClusterNetworkPolicies by Tier names.
+ TierIndex = "tier"
+ // maxSupportedTiers is the maximum number of supported Tiers.
+ maxSupportedTiers = 10
)
var ( | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package networkpolicy provides NetworkPolicyController implementation to manage
// and synchronize the Pods and Namespaces affected by Network Policies and enforce
// their rules.
package networkpolicy
import (
"fmt"
"net"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
uuid "github.com/satori/go.uuid"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
networkinginformers "k8s.io/client-go/informers/networking/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
networkinglisters "k8s.io/client-go/listers/networking/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/apis/controlplane"
"github.com/vmware-tanzu/antrea/pkg/apis/core/v1alpha1"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
"github.com/vmware-tanzu/antrea/pkg/apiserver/storage"
"github.com/vmware-tanzu/antrea/pkg/client/clientset/versioned"
corev1a1informers "github.com/vmware-tanzu/antrea/pkg/client/informers/externalversions/core/v1alpha1"
secinformers "github.com/vmware-tanzu/antrea/pkg/client/informers/externalversions/security/v1alpha1"
corev1a1listers "github.com/vmware-tanzu/antrea/pkg/client/listers/core/v1alpha1"
seclisters "github.com/vmware-tanzu/antrea/pkg/client/listers/security/v1alpha1"
"github.com/vmware-tanzu/antrea/pkg/controller/metrics"
"github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy/store"
antreatypes "github.com/vmware-tanzu/antrea/pkg/controller/types"
"github.com/vmware-tanzu/antrea/pkg/features"
)
const (
// NetworkPolicyController is the only writer of the antrea network policy
// storages and will keep re-enqueuing failed items until they succeed.
// Set resyncPeriod to 0 to disable resyncing.
resyncPeriod time.Duration = 0
// How long to wait before retrying the processing of a NetworkPolicy change.
minRetryDelay = 5 * time.Second
maxRetryDelay = 300 * time.Second
// Default number of workers processing a NetworkPolicy change.
defaultWorkers = 4
// Default rule priority for K8s NetworkPolicy rules.
defaultRulePriority = -1
)
var (
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
// uuidNamespace is a uuid.UUID type generated from a string to be
// used to generate uuid.UUID for internal Antrea objects like
// AppliedToGroup, AddressGroup etc.
// 5a5e7dd9-e3fb-49bb-b263-9bab25c95841 was generated using
// uuid.NewV4() function.
uuidNamespace = uuid.FromStringOrNil("5a5e7dd9-e3fb-49bb-b263-9bab25c95841")
// matchAllPeer is a NetworkPolicyPeer matching all source/destination IP addresses.
matchAllPeer = controlplane.NetworkPolicyPeer{
IPBlocks: []controlplane.IPBlock{{CIDR: controlplane.IPNet{IP: controlplane.IPAddress(net.IPv4zero), PrefixLength: 0}}},
}
// matchAllPodsPeer is a networkingv1.NetworkPolicyPeer matching all Pods from all Namespaces.
matchAllPodsPeer = networkingv1.NetworkPolicyPeer{
NamespaceSelector: &metav1.LabelSelector{},
}
// denyAllIngressRule is a NetworkPolicyRule which denies all ingress traffic.
denyAllIngressRule = controlplane.NetworkPolicyRule{Direction: controlplane.DirectionIn}
// denyAllEgressRule is a NetworkPolicyRule which denies all egress traffic.
denyAllEgressRule = controlplane.NetworkPolicyRule{Direction: controlplane.DirectionOut}
// defaultAction is a RuleAction which sets the default Action for the NetworkPolicy rule.
defaultAction = secv1alpha1.RuleActionAllow
)
// NetworkPolicyController is responsible for synchronizing the Namespaces and Pods
// affected by a Network Policy.
type NetworkPolicyController struct {
// kubeClient is a standard Kubernetes clientset.
kubeClient clientset.Interface
// crdClient is the clientset for CRD API group.
crdClient versioned.Interface
podInformer coreinformers.PodInformer
// podLister is able to list/get Pods and is populated by the shared informer passed to
// NewNetworkPolicyController.
podLister corelisters.PodLister
// podListerSynced is a function which returns true if the Pod shared informer has been synced at least once.
podListerSynced cache.InformerSynced
namespaceInformer coreinformers.NamespaceInformer
// namespaceLister is able to list/get Namespaces and is populated by the shared informer passed to
// NewNetworkPolicyController.
namespaceLister corelisters.NamespaceLister
// namespaceListerSynced is a function which returns true if the Namespace shared informer has been synced at least once.
namespaceListerSynced cache.InformerSynced
externalEntityInformer corev1a1informers.ExternalEntityInformer
// externalEntityLister is able to list/get ExternalEntities and is populated by the shared informer passed to
// NewNetworkPolicyController.
externalEntityLister corev1a1listers.ExternalEntityLister
// externalEntitySynced is a function which returns true if the ExternalEntity shared informer has been synced at least once.
externalEntitySynced cache.InformerSynced
networkPolicyInformer networkinginformers.NetworkPolicyInformer
// networkPolicyLister is able to list/get Network Policies and is populated by the shared informer passed to
// NewNetworkPolicyController.
networkPolicyLister networkinglisters.NetworkPolicyLister
// networkPolicyListerSynced is a function which returns true if the Network Policy shared informer has been synced at least once.
networkPolicyListerSynced cache.InformerSynced
cnpInformer secinformers.ClusterNetworkPolicyInformer
// cnpLister is able to list/get ClusterNetworkPolicies and is populated by the shared informer passed to
// NewClusterNetworkPolicyController.
cnpLister seclisters.ClusterNetworkPolicyLister
// cnpListerSynced is a function which returns true if the ClusterNetworkPolicies shared informer has been synced at least once.
cnpListerSynced cache.InformerSynced
anpInformer secinformers.NetworkPolicyInformer
// anpLister is able to list/get AntreaNetworkPolicies and is populated by the shared informer passed to
// NewNetworkPolicyController.
anpLister seclisters.NetworkPolicyLister
// anpListerSynced is a function which returns true if the AntreaNetworkPolicies shared informer has been synced at least once.
anpListerSynced cache.InformerSynced
// addressGroupStore is the storage where the populated Address Groups are stored.
addressGroupStore storage.Interface
// appliedToGroupStore is the storage where the populated AppliedTo Groups are stored.
appliedToGroupStore storage.Interface
// internalNetworkPolicyStore is the storage where the populated internal Network Policy are stored.
internalNetworkPolicyStore storage.Interface
// appliedToGroupQueue maintains the networkpolicy.AppliedToGroup objects that
// need to be synced.
appliedToGroupQueue workqueue.RateLimitingInterface
// addressGroupQueue maintains the networkpolicy.AddressGroup objects that
// need to be synced.
addressGroupQueue workqueue.RateLimitingInterface
// internalNetworkPolicyQueue maintains the networkpolicy.NetworkPolicy objects that
// need to be synced.
internalNetworkPolicyQueue workqueue.RateLimitingInterface
// internalNetworkPolicyMutex protects the internalNetworkPolicyStore from
// concurrent access during updates to the internal NetworkPolicy object.
internalNetworkPolicyMutex sync.RWMutex
// heartbeatCh is an internal channel for testing. It's used to know whether all tasks have been
// processed, and to count executions of each function.
heartbeatCh chan heartbeat
}
type heartbeat struct {
name string
timestamp time.Time
}
// NewNetworkPolicyController returns a new *NetworkPolicyController.
func NewNetworkPolicyController(kubeClient clientset.Interface,
crdClient versioned.Interface,
podInformer coreinformers.PodInformer,
namespaceInformer coreinformers.NamespaceInformer,
externalEntityInformer corev1a1informers.ExternalEntityInformer,
networkPolicyInformer networkinginformers.NetworkPolicyInformer,
cnpInformer secinformers.ClusterNetworkPolicyInformer,
anpInformer secinformers.NetworkPolicyInformer,
addressGroupStore storage.Interface,
appliedToGroupStore storage.Interface,
internalNetworkPolicyStore storage.Interface) *NetworkPolicyController {
n := &NetworkPolicyController{
kubeClient: kubeClient,
crdClient: crdClient,
podInformer: podInformer,
podLister: podInformer.Lister(),
podListerSynced: podInformer.Informer().HasSynced,
namespaceInformer: namespaceInformer,
namespaceLister: namespaceInformer.Lister(),
namespaceListerSynced: namespaceInformer.Informer().HasSynced,
externalEntityInformer: externalEntityInformer,
externalEntityLister: externalEntityInformer.Lister(),
externalEntitySynced: externalEntityInformer.Informer().HasSynced,
networkPolicyInformer: networkPolicyInformer,
networkPolicyLister: networkPolicyInformer.Lister(),
networkPolicyListerSynced: networkPolicyInformer.Informer().HasSynced,
addressGroupStore: addressGroupStore,
appliedToGroupStore: appliedToGroupStore,
internalNetworkPolicyStore: internalNetworkPolicyStore,
appliedToGroupQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "appliedToGroup"),
addressGroupQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "addressGroup"),
internalNetworkPolicyQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "internalNetworkPolicy"),
}
// Add handlers for Pod events.
podInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: n.addPod,
UpdateFunc: n.updatePod,
DeleteFunc: n.deletePod,
},
resyncPeriod,
)
// Add handlers for Namespace events.
namespaceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: n.addNamespace,
UpdateFunc: n.updateNamespace,
DeleteFunc: n.deleteNamespace,
},
resyncPeriod,
)
// Add handlers for NetworkPolicy events.
networkPolicyInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: n.addNetworkPolicy,
UpdateFunc: n.updateNetworkPolicy,
DeleteFunc: n.deleteNetworkPolicy,
},
resyncPeriod,
)
// Register Informer and add handlers for AntreaPolicy events only if the feature is enabled.
if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
n.cnpInformer = cnpInformer
n.cnpLister = cnpInformer.Lister()
n.cnpListerSynced = cnpInformer.Informer().HasSynced
n.anpInformer = anpInformer
n.anpLister = anpInformer.Lister()
n.anpListerSynced = anpInformer.Informer().HasSynced
cnpInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: n.addCNP,
UpdateFunc: n.updateCNP,
DeleteFunc: n.deleteCNP,
},
resyncPeriod,
)
anpInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: n.addANP,
UpdateFunc: n.updateANP,
DeleteFunc: n.deleteANP,
},
resyncPeriod,
)
externalEntityInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: n.addExternalEntity,
UpdateFunc: n.updateExternalEntity,
DeleteFunc: n.deleteExternalEntity,
},
resyncPeriod,
)
}
return n
}
func (n *NetworkPolicyController) heartbeat(name string) {
if n.heartbeatCh != nil {
n.heartbeatCh <- heartbeat{
name: name,
timestamp: time.Now(),
}
}
}
func (n *NetworkPolicyController) GetNetworkPolicyNum() int {
return len(n.internalNetworkPolicyStore.List())
}
func (n *NetworkPolicyController) GetAddressGroupNum() int {
return len(n.addressGroupStore.List())
}
func (n *NetworkPolicyController) GetAppliedToGroupNum() int {
return len(n.appliedToGroupStore.List())
}
// GetConnectedAgentNum gets the number of Agents which are connected to this Controller.
// Since Agent will watch all the three stores (internalNetworkPolicyStore, appliedToGroupStore, addressGroupStore),
// the number of watchers of one of these three stores is equal to the number of connected Agents.
// Here, we uses the number of watchers of internalNetworkPolicyStore to represent the number of connected Agents.
func (n *NetworkPolicyController) GetConnectedAgentNum() int {
return n.internalNetworkPolicyStore.GetWatchersNum()
}
// toGroupSelector converts the podSelector, namespaceSelector and externalEntitySelector
// and NetworkPolicy Namespace to a networkpolicy.GroupSelector object.
func toGroupSelector(namespace string, podSelector, nsSelector, extEntitySelector *metav1.LabelSelector) *antreatypes.GroupSelector {
groupSelector := antreatypes.GroupSelector{}
if podSelector != nil {
pSelector, _ := metav1.LabelSelectorAsSelector(podSelector)
groupSelector.PodSelector = pSelector
}
if extEntitySelector != nil {
eSelector, _ := metav1.LabelSelectorAsSelector(extEntitySelector)
groupSelector.ExternalEntitySelector = eSelector
}
if nsSelector == nil {
// No namespaceSelector indicates that the pods must be selected within
// the NetworkPolicy's Namespace.
groupSelector.Namespace = namespace
} else {
nSelector, _ := metav1.LabelSelectorAsSelector(nsSelector)
groupSelector.NamespaceSelector = nSelector
}
name := generateNormalizedName(groupSelector.Namespace, groupSelector.PodSelector, groupSelector.NamespaceSelector, groupSelector.ExternalEntitySelector)
groupSelector.NormalizedName = name
return &groupSelector
}
// getNormalizedUID generates a unique UUID based on a given string.
// For example, it can be used to generate keys using normalized selectors
// unique within the Namespace by adding the constant UID.
func getNormalizedUID(name string) string {
return uuid.NewV5(uuidNamespace, name).String()
}
// generateNormalizedName generates a string, based on the selectors, in
// the following format: "namespace=NamespaceName And podSelector=normalizedPodSelector".
// Note: Namespace and nsSelector may or may not be set depending on the
// selector. However, they cannot be set simultaneously.
func generateNormalizedName(namespace string, podSelector, nsSelector, eeSelector labels.Selector) string {
normalizedName := []string{}
if nsSelector != nil {
normalizedName = append(normalizedName, fmt.Sprintf("namespaceSelector=%s", nsSelector.String()))
} else if namespace != "" {
normalizedName = append(normalizedName, fmt.Sprintf("namespace=%s", namespace))
}
if podSelector != nil {
normalizedName = append(normalizedName, fmt.Sprintf("podSelector=%s", podSelector.String()))
}
if eeSelector != nil {
normalizedName = append(normalizedName, fmt.Sprintf("eeSelector=%s", eeSelector.String()))
}
sort.Strings(normalizedName)
return strings.Join(normalizedName, " And ")
}
// createAppliedToGroup creates an AppliedToGroup object in store if it is not created already.
func (n *NetworkPolicyController) createAppliedToGroup(npNsName string, pSel, nSel, eSel *metav1.LabelSelector) string {
groupSelector := toGroupSelector(npNsName, pSel, nSel, eSel)
appliedToGroupUID := getNormalizedUID(groupSelector.NormalizedName)
// Get or create a AppliedToGroup for the generated UID.
// Ignoring returned error (here and elsewhere in this file) as with the
// current store implementation, no error is ever returned.
_, found, _ := n.appliedToGroupStore.Get(appliedToGroupUID)
if found {
return appliedToGroupUID
}
// Construct a new AppliedToGroup.
newAppliedToGroup := &antreatypes.AppliedToGroup{
Name: appliedToGroupUID,
UID: types.UID(appliedToGroupUID),
Selector: *groupSelector,
}
klog.V(2).Infof("Creating new AppliedToGroup %s with selector (%s)", newAppliedToGroup.Name, newAppliedToGroup.Selector.NormalizedName)
n.appliedToGroupStore.Create(newAppliedToGroup)
n.enqueueAppliedToGroup(appliedToGroupUID)
return appliedToGroupUID
}
// labelsMatchGroupSelector matches an ExternalEntity or Pod's labels to the
// GroupSelector object and returns true, if and only if the labels
// match any of the selector criteria present in the GroupSelector.
func (n *NetworkPolicyController) labelsMatchGroupSelector(obj metav1.Object, ns *v1.Namespace, sel *antreatypes.GroupSelector) bool {
objSelector := sel.PodSelector
if _, ok := obj.(*v1alpha1.ExternalEntity); ok {
objSelector = sel.ExternalEntitySelector
}
if sel.Namespace != "" {
if sel.Namespace != obj.GetNamespace() {
// Pods or ExternalEntities must be matched within the same Namespace.
return false
}
if objSelector != nil && objSelector.Matches(labels.Set(obj.GetLabels())) {
// podSelector or externalEntitySelector matches the ExternalEntity or Pod's labels.
return true
}
// selector does not match the ExternalEntity or Pod's labels.
return false
} else if sel.NamespaceSelector != nil && objSelector != nil {
// Pod or ExternalEntity event may arrive before Namespace event. In this case, we must
// ensure that the Namespace is not nil.
if ns == nil || !sel.NamespaceSelector.Matches(labels.Set(ns.Labels)) {
// Pod's Namespace do not match namespaceSelector.
return false
}
if !objSelector.Matches(labels.Set(obj.GetLabels())) {
// ExternalEntity or Pod's Namespace matches namespaceSelector but
// labels do not match the podSelector or externalEntitySelector.
return false
}
// ExternalEntity or Pod's Namespace matches namespaceSelector and labels matches
// podSelector or externalEntitySelector.
return true
} else if sel.NamespaceSelector != nil && sel.ExternalEntitySelector == nil && sel.PodSelector == nil {
// Selector only has a NamespaceSelector.
// Pod or ExternalEntity event may arrive before Namespace event. In this case, we must
// ensure that the Namespace is not nil.
if ns == nil || !sel.NamespaceSelector.Matches(labels.Set(ns.Labels)) {
// Namespace labels do not match namespaceSelector.
return false
}
// Namespace labels match namespaceSelector.
return true
} else if objSelector != nil {
// Selector only has a PodSelector/ExternalEntitySelector and no sel.Namespace.
// Pods/ExternalEntities must be matched from all Namespaces.
if !objSelector.Matches(labels.Set(obj.GetLabels())) {
// pod/ee labels do not match PodSelector/ExternalEntitySelector.
return false
}
return true
}
return false
}
// filterAddressGroupsForNamespace computes a list of AddressGroup keys which
// match the Namespace's labels.
func (n *NetworkPolicyController) filterAddressGroupsForNamespace(namespace *v1.Namespace) sets.String {
matchingKeys := sets.String{}
// Only cluster scoped groups or AddressGroups created by CNP can possibly select this Namespace.
addressGroups, _ := n.addressGroupStore.GetByIndex(cache.NamespaceIndex, "")
for _, group := range addressGroups {
addrGroup := group.(*antreatypes.AddressGroup)
// AddressGroup created by CNP might not have NamespaceSelector.
if addrGroup.Selector.NamespaceSelector != nil && addrGroup.Selector.NamespaceSelector.Matches(labels.Set(namespace.Labels)) {
matchingKeys.Insert(addrGroup.Name)
klog.V(2).Infof("Namespace %s matched AddressGroup %s", namespace.Name, addrGroup.Name)
}
}
return matchingKeys
}
// filterAddressGroupsForPodOrExternalEntity computes a list of AddressGroup keys which
// match the ExternalEntity or Pod's labels.
func (n *NetworkPolicyController) filterAddressGroupsForPodOrExternalEntity(obj metav1.Object) sets.String {
matchingKeySet := sets.String{}
// AddressGroups that are in this namespace or that are cluster scoped can possibly select this Pod/ExternalEntity.
localAddressGroups, _ := n.addressGroupStore.GetByIndex(cache.NamespaceIndex, obj.GetNamespace())
clusterScopedAddressGroups, _ := n.addressGroupStore.GetByIndex(cache.NamespaceIndex, "")
ns, _ := n.namespaceLister.Get(obj.GetNamespace())
for _, group := range append(localAddressGroups, clusterScopedAddressGroups...) {
addrGroup := group.(*antreatypes.AddressGroup)
if n.labelsMatchGroupSelector(obj, ns, &addrGroup.Selector) {
matchingKeySet.Insert(addrGroup.Name)
klog.V(2).Infof("%s/%s matched AddressGroup %s", obj.GetNamespace(), obj.GetName(), addrGroup.Name)
}
}
return matchingKeySet
}
// filterAppliedToGroupsForPodOrExternalEntity computes a list of AppliedToGroup keys which
// match the ExternalEntity or Pod's labels.
func (n *NetworkPolicyController) filterAppliedToGroupsForPodOrExternalEntity(obj metav1.Object) sets.String {
matchingKeySet := sets.String{}
// Get appliedToGroups from the namespace level
appliedToGroups, _ := n.appliedToGroupStore.GetByIndex(cache.NamespaceIndex, obj.GetNamespace())
// Get appliedToGroups from the cluster level
clusterATGroups, _ := n.appliedToGroupStore.GetByIndex(cache.NamespaceIndex, "")
appliedToGroups = append(appliedToGroups, clusterATGroups...)
ns, _ := n.namespaceLister.Get(obj.GetNamespace())
for _, group := range appliedToGroups {
appGroup := group.(*antreatypes.AppliedToGroup)
if n.labelsMatchGroupSelector(obj, ns, &appGroup.Selector) {
matchingKeySet.Insert(appGroup.Name)
klog.V(2).Infof("%s/%s matched AppliedToGroup %s", obj.GetNamespace(), obj.GetName(), appGroup.Name)
}
}
return matchingKeySet
}
// createAddressGroup creates an AddressGroup object corresponding to a
// NetworkPolicyPeer object in NetworkPolicyRule. This function simply
// creates the object without actually populating the PodAddresses as the
// affected Pods are calculated during sync process.
func (n *NetworkPolicyController) createAddressGroup(peer networkingv1.NetworkPolicyPeer, np *networkingv1.NetworkPolicy) string {
groupSelector := toGroupSelector(np.ObjectMeta.Namespace, peer.PodSelector, peer.NamespaceSelector, nil)
normalizedUID := getNormalizedUID(groupSelector.NormalizedName)
// Get or create an AddressGroup for the generated UID.
_, found, _ := n.addressGroupStore.Get(normalizedUID)
if found {
return normalizedUID
}
// Create an AddressGroup object per Peer object.
addressGroup := &antreatypes.AddressGroup{
UID: types.UID(normalizedUID),
Name: normalizedUID,
Selector: *groupSelector,
}
klog.V(2).Infof("Creating new AddressGroup %s with selector (%s)", addressGroup.Name, addressGroup.Selector.NormalizedName)
n.addressGroupStore.Create(addressGroup)
return normalizedUID
}
// toAntreaProtocol converts a v1.Protocol object to an Antrea Protocol object.
func toAntreaProtocol(npProtocol *v1.Protocol) *controlplane.Protocol {
// If Protocol is unset, it must default to TCP protocol.
internalProtocol := controlplane.ProtocolTCP
if npProtocol != nil {
internalProtocol = controlplane.Protocol(*npProtocol)
}
return &internalProtocol
}
// toAntreaServices converts a slice of networkingv1.NetworkPolicyPort objects
// to a slice of Antrea Service objects. A bool is returned along with the
// Service objects to indicate whether any named port exists.
func toAntreaServices(npPorts []networkingv1.NetworkPolicyPort) ([]controlplane.Service, bool) {
var antreaServices []controlplane.Service
var namedPortExists bool
for _, npPort := range npPorts {
if npPort.Port != nil && npPort.Port.Type == intstr.String {
namedPortExists = true
}
antreaService := controlplane.Service{
Protocol: toAntreaProtocol(npPort.Protocol),
Port: npPort.Port,
}
antreaServices = append(antreaServices, antreaService)
}
return antreaServices, namedPortExists
}
// toAntreaIPBlock converts a networkingv1.IPBlock to an Antrea IPBlock.
func toAntreaIPBlock(ipBlock *networkingv1.IPBlock) (*controlplane.IPBlock, error) {
// Convert the allowed IPBlock to networkpolicy.IPNet.
ipNet, err := cidrStrToIPNet(ipBlock.CIDR)
if err != nil {
return nil, err
}
exceptNets := []controlplane.IPNet{}
for _, exc := range ipBlock.Except {
// Convert the except IPBlock to networkpolicy.IPNet.
exceptNet, err := cidrStrToIPNet(exc)
if err != nil {
return nil, err
}
exceptNets = append(exceptNets, *exceptNet)
}
antreaIPBlock := &controlplane.IPBlock{
CIDR: *ipNet,
Except: exceptNets,
}
return antreaIPBlock, nil
}
// processNetworkPolicy creates an internal NetworkPolicy instance corresponding
// to the networkingv1.NetworkPolicy object. This method does not commit the
// internal NetworkPolicy in store, instead returns an instance to the caller
// wherein, it will be either stored as a new Object in case of ADD event or
// modified and store the updated instance, in case of an UPDATE event.
func (n *NetworkPolicyController) processNetworkPolicy(np *networkingv1.NetworkPolicy) *antreatypes.NetworkPolicy {
appliedToGroupKey := n.createAppliedToGroup(np.Namespace, &np.Spec.PodSelector, nil, nil)
appliedToGroupNames := []string{appliedToGroupKey}
rules := make([]controlplane.NetworkPolicyRule, 0, len(np.Spec.Ingress)+len(np.Spec.Egress))
var ingressRuleExists, egressRuleExists bool
// Compute NetworkPolicyRule for Ingress Rule.
for _, ingressRule := range np.Spec.Ingress {
ingressRuleExists = true
services, namedPortExists := toAntreaServices(ingressRule.Ports)
rules = append(rules, controlplane.NetworkPolicyRule{
Direction: controlplane.DirectionIn,
From: *n.toAntreaPeer(ingressRule.From, np, controlplane.DirectionIn, namedPortExists),
Services: services,
Priority: defaultRulePriority,
Action: &defaultAction,
})
}
// Compute NetworkPolicyRule for Egress Rule.
for _, egressRule := range np.Spec.Egress {
egressRuleExists = true
services, namedPortExists := toAntreaServices(egressRule.Ports)
rules = append(rules, controlplane.NetworkPolicyRule{
Direction: controlplane.DirectionOut,
To: *n.toAntreaPeer(egressRule.To, np, controlplane.DirectionOut, namedPortExists),
Services: services,
Priority: defaultRulePriority,
Action: &defaultAction,
})
}
// Traffic in a direction must be isolated if Spec.PolicyTypes specify it explicitly.
var ingressIsolated, egressIsolated bool
for _, policyType := range np.Spec.PolicyTypes {
if policyType == networkingv1.PolicyTypeIngress {
ingressIsolated = true
} else if policyType == networkingv1.PolicyTypeEgress {
egressIsolated = true
}
}
// If ingress isolation is specified explicitly and there's no ingress rule, append a deny-all ingress rule.
// See https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-ingress-traffic
if ingressIsolated && !ingressRuleExists {
rules = append(rules, denyAllIngressRule)
}
// If egress isolation is specified explicitly and there's no egress rule, append a deny-all egress rule.
// See https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-egress-traffic
if egressIsolated && !egressRuleExists {
rules = append(rules, denyAllEgressRule)
}
internalNetworkPolicy := &antreatypes.NetworkPolicy{
Name: np.Name,
Namespace: np.Namespace,
UID: np.UID,
SourceRef: &controlplane.NetworkPolicyReference{
Type: controlplane.K8sNetworkPolicy,
Namespace: np.Namespace,
Name: np.Name,
UID: np.UID,
},
AppliedToGroups: appliedToGroupNames,
Rules: rules,
}
return internalNetworkPolicy
}
func (n *NetworkPolicyController) toAntreaPeer(peers []networkingv1.NetworkPolicyPeer, np *networkingv1.NetworkPolicy, dir controlplane.Direction, namedPortExists bool) *controlplane.NetworkPolicyPeer {
var addressGroups []string
// Empty NetworkPolicyPeer is supposed to match all addresses.
// See https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-allow-all-ingress-traffic.
// It's treated as an IPBlock "0.0.0.0/0".
if len(peers) == 0 {
// For an egress Peer that specifies any named ports, it creates or
// reuses the AddressGroup matching all Pods in all Namespaces and
// appends the AddressGroup UID to the returned Peer such that it can be
// used to resolve the named ports.
// For other cases it uses the IPBlock "0.0.0.0/0" to avoid the overhead
// of handling member updates of the AddressGroup.
if dir == controlplane.DirectionIn || !namedPortExists {
return &matchAllPeer
}
allPodsGroupUID := n.createAddressGroup(matchAllPodsPeer, np)
podsPeer := matchAllPeer
podsPeer.AddressGroups = append(addressGroups, allPodsGroupUID)
return &podsPeer
}
var ipBlocks []controlplane.IPBlock
for _, peer := range peers {
// A controlplane.NetworkPolicyPeer will either have an IPBlock or a
// podSelector and/or namespaceSelector set.
if peer.IPBlock != nil {
ipBlock, err := toAntreaIPBlock(peer.IPBlock)
if err != nil {
klog.Errorf("Failure processing NetworkPolicy %s/%s IPBlock %v: %v", np.Namespace, np.Name, peer.IPBlock, err)
continue
}
ipBlocks = append(ipBlocks, *ipBlock)
} else {
normalizedUID := n.createAddressGroup(peer, np)
addressGroups = append(addressGroups, normalizedUID)
}
}
return &controlplane.NetworkPolicyPeer{AddressGroups: addressGroups, IPBlocks: ipBlocks}
}
// addNetworkPolicy receives NetworkPolicy ADD events and creates resources
// which can be consumed by agents to configure corresponding rules on the Nodes.
func (n *NetworkPolicyController) addNetworkPolicy(obj interface{}) {
defer n.heartbeat("addNetworkPolicy")
np := obj.(*networkingv1.NetworkPolicy)
klog.V(2).Infof("Processing NetworkPolicy %s/%s ADD event", np.Namespace, np.Name)
// Create an internal NetworkPolicy object corresponding to this NetworkPolicy
// and enqueue task to internal NetworkPolicy Workqueue.
internalNP := n.processNetworkPolicy(np)
klog.Infof("Creating new internal NetworkPolicy %s/%s", internalNP.Namespace, internalNP.Name)
n.internalNetworkPolicyStore.Create(internalNP)
key, _ := keyFunc(np)
n.enqueueInternalNetworkPolicy(key)
}
// updateNetworkPolicy receives NetworkPolicy UPDATE events and updates resources
// which can be consumed by agents to configure corresponding rules on the Nodes.
func (n *NetworkPolicyController) updateNetworkPolicy(old, cur interface{}) {
defer n.heartbeat("updateNetworkPolicy")
np := cur.(*networkingv1.NetworkPolicy)
klog.V(2).Infof("Processing NetworkPolicy %s/%s UPDATE event", np.Namespace, np.Name)
// Update an internal NetworkPolicy ID, corresponding to this NetworkPolicy and
// enqueue task to internal NetworkPolicy Workqueue.
curInternalNP := n.processNetworkPolicy(np)
klog.V(2).Infof("Updating existing internal NetworkPolicy %s/%s", curInternalNP.Namespace, curInternalNP.Name)
// Retrieve old networkingv1.NetworkPolicy object.
oldNP := old.(*networkingv1.NetworkPolicy)
// Old and current NetworkPolicy share the same key.
key, _ := keyFunc(oldNP)
// Lock access to internal NetworkPolicy store such that concurrent access
// to an internal NetworkPolicy is not allowed. This will avoid the
// case in which an Update to an internal NetworkPolicy object may
// cause the SpanMeta member to be overridden with stale SpanMeta members
// from an older internal NetworkPolicy.
n.internalNetworkPolicyMutex.Lock()
oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
// AppliedToGroups currently only supports a single member.
oldAppliedToGroupUID := oldInternalNP.AppliedToGroups[0]
// Must preserve old internal NetworkPolicy Span.
curInternalNP.SpanMeta = oldInternalNP.SpanMeta
n.internalNetworkPolicyStore.Update(curInternalNP)
// Unlock the internal NetworkPolicy store.
n.internalNetworkPolicyMutex.Unlock()
// Enqueue addressGroup keys to update their Node span.
for _, rule := range curInternalNP.Rules {
for _, addrGroupName := range rule.From.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
for _, addrGroupName := range rule.To.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
}
n.enqueueInternalNetworkPolicy(key)
// AppliedToGroups currently only supports a single member.
curAppliedToGroupUID := curInternalNP.AppliedToGroups[0]
// Delete the old AppliedToGroup object if it is not referenced by any
// internal NetworkPolicy.
if oldAppliedToGroupUID != curAppliedToGroupUID {
n.deleteDereferencedAppliedToGroup(oldAppliedToGroupUID)
}
n.deleteDereferencedAddressGroups(oldInternalNP)
}
// deleteNetworkPolicy receives NetworkPolicy DELETED events and deletes resources
// which can be consumed by agents to delete corresponding rules on the Nodes.
func (n *NetworkPolicyController) deleteNetworkPolicy(old interface{}) {
np, ok := old.(*networkingv1.NetworkPolicy)
if !ok {
tombstone, ok := old.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting NetworkPolicy, invalid type: %v", old)
return
}
np, ok = tombstone.Obj.(*networkingv1.NetworkPolicy)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting NetworkPolicy, invalid type: %v", tombstone.Obj)
return
}
}
defer n.heartbeat("deleteNetworkPolicy")
klog.V(2).Infof("Processing NetworkPolicy %s/%s DELETE event", np.Namespace, np.Name)
key, _ := keyFunc(np)
oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
// AppliedToGroups currently only supports a single member.
oldAppliedToGroupUID := oldInternalNP.AppliedToGroups[0]
klog.Infof("Deleting internal NetworkPolicy %s/%s", np.Namespace, np.Name)
// Delete corresponding internal NetworkPolicy from store.
err := n.internalNetworkPolicyStore.Delete(key)
if err != nil {
klog.Errorf("Error deleting internal NetworkPolicy during NetworkPolicy %s/%s delete: %v", np.Namespace, np.Name, err)
return
}
n.deleteDereferencedAppliedToGroup(oldAppliedToGroupUID)
n.deleteDereferencedAddressGroups(oldInternalNP)
}
// addPod retrieves all AddressGroups and AppliedToGroups which match the Pod's
// labels and enqueues the groups key for further processing.
func (n *NetworkPolicyController) addPod(obj interface{}) {
defer n.heartbeat("addPod")
pod := obj.(*v1.Pod)
klog.V(2).Infof("Processing Pod %s/%s ADD event, labels: %v", pod.Namespace, pod.Name, pod.Labels)
// Find all AppliedToGroup keys which match the Pod's labels.
appliedToGroupKeySet := n.filterAppliedToGroupsForPodOrExternalEntity(pod)
// Find all AddressGroup keys which match the Pod's labels.
addressGroupKeySet := n.filterAddressGroupsForPodOrExternalEntity(pod)
// Enqueue groups to their respective queues for group processing.
for group := range appliedToGroupKeySet {
n.enqueueAppliedToGroup(group)
}
for group := range addressGroupKeySet {
n.enqueueAddressGroup(group)
}
}
// updatePod retrieves all AddressGroups and AppliedToGroups which match the
// updated and old Pod's labels and enqueues the group keys for further
// processing.
func (n *NetworkPolicyController) updatePod(oldObj, curObj interface{}) {
defer n.heartbeat("updatePod")
oldPod := oldObj.(*v1.Pod)
curPod := curObj.(*v1.Pod)
klog.V(2).Infof("Processing Pod %s/%s UPDATE event, labels: %v", curPod.Namespace, curPod.Name, curPod.Labels)
// No need to trigger processing of groups if there is no change in the
// Pod labels or Pods Node or Pods IP.
labelsEqual := labels.Equals(labels.Set(oldPod.Labels), labels.Set(curPod.Labels))
if labelsEqual && oldPod.Spec.NodeName == curPod.Spec.NodeName && oldPod.Status.PodIP == curPod.Status.PodIP {
klog.V(4).Infof("No change in Pod %s/%s. Skipping NetworkPolicy evaluation.", curPod.Namespace, curPod.Name)
return
}
// Find groups matching the old Pod's labels.
oldAddressGroupKeySet := n.filterAddressGroupsForPodOrExternalEntity(oldPod)
oldAppliedToGroupKeySet := n.filterAppliedToGroupsForPodOrExternalEntity(oldPod)
// Find groups matching the new Pod's labels.
curAppliedToGroupKeySet := n.filterAppliedToGroupsForPodOrExternalEntity(curPod)
curAddressGroupKeySet := n.filterAddressGroupsForPodOrExternalEntity(curPod)
// Create set to hold the group keys to enqueue.
var appliedToGroupKeys sets.String
var addressGroupKeys sets.String
// AppliedToGroup keys must be enqueued only if the Pod's Node or IP has changed or
// if Pod's label change causes it to match new Groups.
if oldPod.Status.PodIP != curPod.Status.PodIP || oldPod.Spec.NodeName != curPod.Spec.NodeName {
appliedToGroupKeys = oldAppliedToGroupKeySet.Union(curAppliedToGroupKeySet)
} else if !labelsEqual {
// No need to enqueue common AppliedToGroups as they already have latest Pod
// information.
appliedToGroupKeys = oldAppliedToGroupKeySet.Difference(curAppliedToGroupKeySet).Union(curAppliedToGroupKeySet.Difference(oldAppliedToGroupKeySet))
}
// AddressGroup keys must be enqueued only if the Pod's IP has changed or
// if Pod's label change causes it to match new Groups.
if oldPod.Status.PodIP != curPod.Status.PodIP {
addressGroupKeys = oldAddressGroupKeySet.Union(curAddressGroupKeySet)
} else if !labelsEqual {
// No need to enqueue common AddressGroups as they already have latest Pod
// information.
addressGroupKeys = oldAddressGroupKeySet.Difference(curAddressGroupKeySet).Union(curAddressGroupKeySet.Difference(oldAddressGroupKeySet))
}
for group := range appliedToGroupKeys {
n.enqueueAppliedToGroup(group)
}
for group := range addressGroupKeys {
n.enqueueAddressGroup(group)
}
}
// deletePod retrieves all AddressGroups and AppliedToGroups which match the Pod's
// labels and enqueues the groups key for further processing.
func (n *NetworkPolicyController) deletePod(old interface{}) {
pod, ok := old.(*v1.Pod)
if !ok {
tombstone, ok := old.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting Pod, invalid type: %v", old)
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting Pod, invalid type: %v", tombstone.Obj)
return
}
}
defer n.heartbeat("deletePod")
klog.V(2).Infof("Processing Pod %s/%s DELETE event, labels: %v", pod.Namespace, pod.Name, pod.Labels)
// Find all AppliedToGroup keys which match the Pod's labels.
appliedToGroupKeys := n.filterAppliedToGroupsForPodOrExternalEntity(pod)
// Find all AddressGroup keys which match the Pod's labels.
addressGroupKeys := n.filterAddressGroupsForPodOrExternalEntity(pod)
// Enqueue groups to their respective queues for group processing.
for group := range appliedToGroupKeys {
n.enqueueAppliedToGroup(group)
}
for group := range addressGroupKeys {
n.enqueueAddressGroup(group)
}
}
// addExternalEntity retrieves all AddressGroups and AppliedToGroups which match the ExternalEnitty's
// labels and enqueues the groups key for further processing.
func (n *NetworkPolicyController) addExternalEntity(obj interface{}) {
defer n.heartbeat("addExternalEntity")
ee := obj.(*v1alpha1.ExternalEntity)
klog.V(2).Infof("Processing ExternalEntity %s/%s ADD event, labels: %v", ee.Namespace, ee.Name, ee.Labels)
// Find all AppliedToGroup keys which match the ExternalEntity's labels.
appliedToGroupKeySet := n.filterAppliedToGroupsForPodOrExternalEntity(ee)
// Find all AddressGroup keys which match the ExternalEntity's labels.
addressGroupKeySet := n.filterAddressGroupsForPodOrExternalEntity(ee)
// Enqueue groups to their respective queues for group processing.
for group := range appliedToGroupKeySet {
n.enqueueAppliedToGroup(group)
}
for group := range addressGroupKeySet {
n.enqueueAddressGroup(group)
}
}
// updateExternalEntity retrieves all AddressGroups and AppliedToGroups which match the
// updated and old ExternalEntity's labels and enqueues the group keys for further
// processing.
func (n *NetworkPolicyController) updateExternalEntity(oldObj, curObj interface{}) {
defer n.heartbeat("updateExternalEntity")
oldEE := oldObj.(*v1alpha1.ExternalEntity)
curEE := curObj.(*v1alpha1.ExternalEntity)
klog.V(2).Infof("Processing ExternalEntity %s/%s UPDATE event, labels: %v", curEE.Namespace, curEE.Name, curEE.Labels)
// No need to trigger processing of groups if there is no change in the
// ExternalEntity labels or ExternalEntity's Endpoints.
labelsEqual := labels.Equals(labels.Set(oldEE.Labels), labels.Set(curEE.Labels))
specEqual := reflect.DeepEqual(oldEE.Spec, curEE.Spec)
// TODO: Right now two ExternalEntities are only considered equal if the list of Endpoints and
// all NamedPorts in each Endpoint are of the exact order. Considering implementing custom compare
// method for the ExternalEntity spec to solve this and improve performance.
if labelsEqual && specEqual {
klog.V(4).Infof("No change in ExternalEntity %s/%s. Skipping NetworkPolicy evaluation.", curEE.Namespace, curEE.Name)
return
}
// Find groups matching the old ExternalEntity's labels.
oldAppliedToGroupKeySet := n.filterAppliedToGroupsForPodOrExternalEntity(oldEE)
oldAddressGroupKeySet := n.filterAddressGroupsForPodOrExternalEntity(oldEE)
// Find groups matching the new ExternalEntity's labels.
curAppliedToGroupKeySet := n.filterAppliedToGroupsForPodOrExternalEntity(curEE)
curAddressGroupKeySet := n.filterAddressGroupsForPodOrExternalEntity(curEE)
// Create set to hold the group keys to enqueue.
var appliedToGroupKeys sets.String
var addressGroupKeys sets.String
// AppliedToGroup keys must be enqueued only if the ExternalEntity's spec has changed or
// if ExternalEntity's label change causes it to match new Groups.
if !specEqual {
appliedToGroupKeys = oldAppliedToGroupKeySet.Union(curAppliedToGroupKeySet)
} else if !labelsEqual {
// No need to enqueue common AppliedToGroups as they already have latest Pod
// information.
appliedToGroupKeys = oldAppliedToGroupKeySet.Difference(curAppliedToGroupKeySet).Union(curAppliedToGroupKeySet.Difference(oldAppliedToGroupKeySet))
}
// AddressGroup keys must be enqueued only if the ExternalEntity's spec has changed or
// if ExternalEntity's label change causes it to match new Groups.
if !specEqual {
addressGroupKeys = oldAddressGroupKeySet.Union(curAddressGroupKeySet)
} else if !labelsEqual {
// No need to enqueue common AddressGroups as they already have latest Pod
// information.
addressGroupKeys = oldAddressGroupKeySet.Difference(curAddressGroupKeySet).Union(curAddressGroupKeySet.Difference(oldAddressGroupKeySet))
}
for group := range appliedToGroupKeys {
n.enqueueAppliedToGroup(group)
}
for group := range addressGroupKeys {
n.enqueueAddressGroup(group)
}
}
// deleteExternalEntity retrieves all AddressGroups and AppliedToGroups which match the ExternalEntity's
// labels and enqueues the groups key for further processing.
func (n *NetworkPolicyController) deleteExternalEntity(old interface{}) {
ee, ok := old.(*v1alpha1.ExternalEntity)
if !ok {
tombstone, ok := old.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting ExternalEntity, invalid type: %v", old)
return
}
ee, ok = tombstone.Obj.(*v1alpha1.ExternalEntity)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting ExternalEntity, invalid type: %v", tombstone.Obj)
return
}
}
defer n.heartbeat("deleteExternalEntity")
klog.V(2).Infof("Processing ExternalEntity %s/%s DELETE event, labels: %v", ee.Namespace, ee.Name, ee.Labels)
// Find all AppliedToGroup keys which match the Pod's labels.
appliedToGroupKeys := n.filterAppliedToGroupsForPodOrExternalEntity(ee)
// Find all AddressGroup keys which match the Pod's labels.
addressGroupKeys := n.filterAddressGroupsForPodOrExternalEntity(ee)
// Enqueue groups to their respective queues for group processing.
for group := range appliedToGroupKeys {
n.enqueueAppliedToGroup(group)
}
for group := range addressGroupKeys {
n.enqueueAddressGroup(group)
}
}
// addNamespace retrieves all AddressGroups which match the Namespace
// labels and enqueues the group keys for further processing.
func (n *NetworkPolicyController) addNamespace(obj interface{}) {
defer n.heartbeat("addNamespace")
namespace := obj.(*v1.Namespace)
klog.V(2).Infof("Processing Namespace %s ADD event, labels: %v", namespace.Name, namespace.Labels)
addressGroupKeys := n.filterAddressGroupsForNamespace(namespace)
for group := range addressGroupKeys {
n.enqueueAddressGroup(group)
}
}
// updateNamespace retrieves all AddressGroups which match the current and old
// Namespace labels and enqueues the group keys for further processing.
func (n *NetworkPolicyController) updateNamespace(oldObj, curObj interface{}) {
defer n.heartbeat("updateNamespace")
oldNamespace := oldObj.(*v1.Namespace)
curNamespace := curObj.(*v1.Namespace)
klog.V(2).Infof("Processing Namespace %s UPDATE event, labels: %v", curNamespace.Name, curNamespace.Labels)
// No need to trigger processing of groups if there is no change in the
// Namespace labels.
if labels.Equals(labels.Set(oldNamespace.Labels), labels.Set(curNamespace.Labels)) {
klog.V(4).Infof("No change in Namespace %s labels", curNamespace.Name)
return
}
// Find groups matching the new Namespace's labels.
curAddressGroupKeySet := n.filterAddressGroupsForNamespace(curNamespace)
// Find groups matching the old Namespace's labels.
oldAddressGroupKeySet := n.filterAddressGroupsForNamespace(oldNamespace)
// No need to enqueue common AddressGroups as they already have latest
// Namespace information.
addressGroupKeys := oldAddressGroupKeySet.Difference(curAddressGroupKeySet).Union(curAddressGroupKeySet.Difference(oldAddressGroupKeySet))
for group := range addressGroupKeys {
n.enqueueAddressGroup(group)
}
}
// deleteNamespace retrieves all AddressGroups which match the Namespace's
// labels and enqueues the group keys for further processing.
func (n *NetworkPolicyController) deleteNamespace(old interface{}) {
namespace, ok := old.(*v1.Namespace)
if !ok {
tombstone, ok := old.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting Namespace, invalid type: %v", old)
return
}
namespace, ok = tombstone.Obj.(*v1.Namespace)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting Namespace, invalid type: %v", tombstone.Obj)
return
}
}
defer n.heartbeat("deleteNamespace")
klog.V(2).Infof("Processing Namespace %s DELETE event, labels: %v", namespace.Name, namespace.Labels)
// Find groups matching deleted Namespace's labels and enqueue them
// for further processing.
addressGroupKeys := n.filterAddressGroupsForNamespace(namespace)
for group := range addressGroupKeys {
n.enqueueAddressGroup(group)
}
}
func (n *NetworkPolicyController) enqueueAppliedToGroup(key string) {
klog.V(4).Infof("Adding new key %s to AppliedToGroup queue", key)
n.appliedToGroupQueue.Add(key)
metrics.LengthAppliedToGroupQueue.Set(float64(n.appliedToGroupQueue.Len()))
}
// deleteDereferencedAddressGroups deletes the AddressGroup keys which are no
// longer referenced by any internal NetworPolicy.
func (n *NetworkPolicyController) deleteDereferencedAddressGroups(internalNP *antreatypes.NetworkPolicy) {
addressGroupKeys := []string{}
for _, rule := range internalNP.Rules {
// Populate AddressGroupKeys for ingress rules.
addressGroupKeys = append(addressGroupKeys, rule.From.AddressGroups...)
// Populate AddressGroupKeys for egress rules.
addressGroupKeys = append(addressGroupKeys, rule.To.AddressGroups...)
}
// Delete any AddressGroup key which is no longer referenced by any internal
// NetworkPolicy.
for _, key := range addressGroupKeys {
// Get all internal NetworkPolicy objects that refers this AddressGroup.
nps, err := n.internalNetworkPolicyStore.GetByIndex(store.AddressGroupIndex, key)
if err != nil {
klog.Errorf("Unable to filter internal NetworkPolicies for AddressGroup %s: %v", key, err)
continue
}
if len(nps) == 0 {
klog.V(2).Infof("Deleting unreferenced AddressGroup %s", key)
// No internal NetworkPolicy refers to this Group. Safe to delete.
err = n.addressGroupStore.Delete(key)
if err != nil {
klog.Errorf("Unable to delete AddressGroup %s from store: %v", key, err)
}
}
}
}
// deleteDereferencedAppliedToGroup deletes the AppliedToGroup key if it is no
// longer referenced by any internal NetworPolicy.
func (n *NetworkPolicyController) deleteDereferencedAppliedToGroup(key string) {
// Get all internal NetworkPolicy objects that refers the old AppliedToGroup.
nps, err := n.internalNetworkPolicyStore.GetByIndex(store.AppliedToGroupIndex, key)
if err != nil {
klog.Errorf("Unable to filter internal NetworkPolicies for AppliedToGroup %s: %v", key, err)
return
}
if len(nps) == 0 {
// No internal NetworkPolicy refers to this Group. Safe to delete.
klog.V(2).Infof("Deleting unreferenced AppliedToGroup %s", key)
err := n.appliedToGroupStore.Delete(key)
if err != nil {
klog.Errorf("Unable to delete AppliedToGroup %s from store: %v", key, err)
}
}
}
func (n *NetworkPolicyController) enqueueAddressGroup(key string) {
klog.V(4).Infof("Adding new key %s to AddressGroup queue", key)
n.addressGroupQueue.Add(key)
metrics.LengthAddressGroupQueue.Set(float64(n.addressGroupQueue.Len()))
}
func (n *NetworkPolicyController) enqueueInternalNetworkPolicy(key string) {
klog.V(4).Infof("Adding new key %s to internal NetworkPolicy queue", key)
n.internalNetworkPolicyQueue.Add(key)
metrics.LengthInternalNetworkPolicyQueue.Set(float64(n.internalNetworkPolicyQueue.Len()))
}
// Run begins watching and syncing of a NetworkPolicyController.
func (n *NetworkPolicyController) Run(stopCh <-chan struct{}) {
defer n.appliedToGroupQueue.ShutDown()
defer n.addressGroupQueue.ShutDown()
defer n.internalNetworkPolicyQueue.ShutDown()
klog.Info("Starting NetworkPolicy controller")
defer klog.Info("Shutting down NetworkPolicy controller")
klog.Info("Waiting for caches to sync for NetworkPolicy controller")
if !cache.WaitForCacheSync(stopCh, n.podListerSynced, n.namespaceListerSynced, n.networkPolicyListerSynced) {
klog.Error("Unable to sync caches for NetworkPolicy controller")
return
}
// Only wait for cnpListerSynced and anpListerSynced when AntreaPolicy feature gate is enabled.
if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
if !cache.WaitForCacheSync(stopCh, n.cnpListerSynced) {
klog.Error("Unable to sync CNP caches for NetworkPolicy controller")
return
}
if !cache.WaitForCacheSync(stopCh, n.anpListerSynced) {
klog.Error("Unable to sync ANP caches for NetworkPolicy controller")
return
}
}
klog.Info("Caches are synced for NetworkPolicy controller")
for i := 0; i < defaultWorkers; i++ {
go wait.Until(n.appliedToGroupWorker, time.Second, stopCh)
go wait.Until(n.addressGroupWorker, time.Second, stopCh)
go wait.Until(n.internalNetworkPolicyWorker, time.Second, stopCh)
}
<-stopCh
}
func (n *NetworkPolicyController) appliedToGroupWorker() {
for n.processNextAppliedToGroupWorkItem() {
metrics.OpsAppliedToGroupProcessed.Inc()
metrics.LengthAppliedToGroupQueue.Set(float64(n.appliedToGroupQueue.Len()))
}
}
func (n *NetworkPolicyController) addressGroupWorker() {
for n.processNextAddressGroupWorkItem() {
metrics.OpsAddressGroupProcessed.Inc()
metrics.LengthAddressGroupQueue.Set(float64(n.addressGroupQueue.Len()))
}
}
func (n *NetworkPolicyController) internalNetworkPolicyWorker() {
for n.processNextInternalNetworkPolicyWorkItem() {
metrics.OpsInternalNetworkPolicyProcessed.Inc()
metrics.LengthInternalNetworkPolicyQueue.Set(float64(n.internalNetworkPolicyQueue.Len()))
}
}
// Processes an item in the "internalNetworkPolicy" work queue, by calling
// syncInternalNetworkPolicy after casting the item to a string
// (NetworkPolicy key). If syncInternalNetworkPolicy returns an error, this
// function handles it by requeueing the item so that it can be processed again
// later. If syncInternalNetworkPolicy is successful, the NetworkPolicy is
// removed from the queue until we get notify of a new change. This function
// return false if and only if the work queue was shutdown (no more items will
// be processed).
func (n *NetworkPolicyController) processNextInternalNetworkPolicyWorkItem() bool {
defer n.heartbeat("processNextInternalNetworkPolicyWorkItem")
key, quit := n.internalNetworkPolicyQueue.Get()
if quit {
return false
}
// We call Done here so the workqueue knows we have finished processing this item. We also
// must remember to call Forget if we do not want this work item being re-queued. For
// example, we do not call Forget if a transient error occurs, instead the item is put back
// on the workqueue and attempted again after a back-off period.
defer n.internalNetworkPolicyQueue.Done(key)
err := n.syncInternalNetworkPolicy(key.(string))
if err != nil {
// Put the item back on the workqueue to handle any transient errors.
n.internalNetworkPolicyQueue.AddRateLimited(key)
klog.Errorf("Failed to sync internal NetworkPolicy %s: %v", key, err)
return true
}
// If no error occurs we Forget this item so it does not get queued again until
// another change happens.
n.internalNetworkPolicyQueue.Forget(key)
return true
}
// Processes an item in the "addressGroup" work queue, by calling
// syncAddressGroup after casting the item to a string (addressGroup key).
// If syncAddressGroup returns an error, this function handles it by requeueing
// the item so that it can be processed again later. If syncAddressGroup is
// successful, the AddressGroup is removed from the queue until we get notify
// of a new change. This function return false if and only if the work queue
// was shutdown (no more items will be processed).
func (n *NetworkPolicyController) processNextAddressGroupWorkItem() bool {
defer n.heartbeat("processNextAddressGroupWorkItem")
key, quit := n.addressGroupQueue.Get()
if quit {
return false
}
defer n.addressGroupQueue.Done(key)
err := n.syncAddressGroup(key.(string))
if err != nil {
// Put the item back on the workqueue to handle any transient errors.
n.addressGroupQueue.AddRateLimited(key)
klog.Errorf("Failed to sync AddressGroup %s: %v", key, err)
return true
}
// If no error occurs we Forget this item so it does not get queued again until
// another change happens.
n.addressGroupQueue.Forget(key)
return true
}
// Processes an item in the "appliedToGroup" work queue, by calling
// syncAppliedToGroup after casting the item to a string (appliedToGroup key).
// If syncAppliedToGroup returns an error, this function handles it by
// requeueing the item so that it can be processed again later. If
// syncAppliedToGroup is successful, the AppliedToGroup is removed from the
// queue until we get notify of a new change. This function return false if
// and only if the work queue was shutdown (no more items will be processed).
func (n *NetworkPolicyController) processNextAppliedToGroupWorkItem() bool {
defer n.heartbeat("processNextAppliedToGroupWorkItem")
key, quit := n.appliedToGroupQueue.Get()
if quit {
return false
}
defer n.appliedToGroupQueue.Done(key)
err := n.syncAppliedToGroup(key.(string))
if err != nil {
// Put the item back on the workqueue to handle any transient errors.
n.appliedToGroupQueue.AddRateLimited(key)
klog.Errorf("Failed to sync AppliedToGroup %s: %v", key, err)
return true
}
// If no error occurs we Forget this item so it does not get queued again until
// another change happens.
n.appliedToGroupQueue.Forget(key)
return true
}
// syncAddressGroup retrieves all the internal NetworkPolicies which have a
// reference to this AddressGroup and updates it's Pod IPAddresses set to
// reflect the current state of affected Pods based on the GroupSelector.
func (n *NetworkPolicyController) syncAddressGroup(key string) error {
startTime := time.Now()
defer func() {
d := time.Since(startTime)
metrics.DurationAddressGroupSyncing.Observe(float64(d.Milliseconds()))
klog.V(2).Infof("Finished syncing AddressGroup %s. (%v)", key, d)
}()
// Get all internal NetworkPolicy objects that refers this AddressGroup.
nps, err := n.internalNetworkPolicyStore.GetByIndex(store.AddressGroupIndex, key)
if err != nil {
return fmt.Errorf("unable to filter internal NetworkPolicies for AddressGroup %s: %v", key, err)
}
addressGroupObj, found, _ := n.addressGroupStore.Get(key)
if !found {
// AddressGroup was already deleted. No need to process further.
klog.V(2).Infof("AddressGroup %s not found.", key)
return nil
}
addressGroup := addressGroupObj.(*antreatypes.AddressGroup)
// NodeNames set must be considered immutable once generated and updated
// in the store. If any change is needed, the set must be regenerated with
// the new NodeNames and the store must be updated.
addrGroupNodeNames := sets.String{}
for _, internalNPObj := range nps {
internalNP := internalNPObj.(*antreatypes.NetworkPolicy)
addrGroupNodeNames = addrGroupNodeNames.Union(internalNP.SpanMeta.NodeNames)
}
// Find all Pods and ExternalEntities matching its selectors and update store.
groupSelector := addressGroup.Selector
pods, externalEntities := n.processSelector(groupSelector)
podSet := controlplane.GroupMemberPodSet{}
memberSet := controlplane.GroupMemberSet{}
for _, pod := range pods {
if pod.Status.PodIP == "" {
// No need to insert Pod IPAddress when it is unset.
continue
}
podSet.Insert(podToMemberPod(pod, true, false))
}
for _, entity := range externalEntities {
memberSet.Insert(externalEntityToGroupMember(entity))
}
updatedAddressGroup := &antreatypes.AddressGroup{
Name: addressGroup.Name,
UID: addressGroup.UID,
Selector: addressGroup.Selector,
Pods: podSet,
GroupMembers: memberSet,
SpanMeta: antreatypes.SpanMeta{NodeNames: addrGroupNodeNames},
}
klog.V(2).Infof("Updating existing AddressGroup %s with %d pods, %d external entities and %d Nodes", key, len(podSet), len(memberSet), addrGroupNodeNames.Len())
n.addressGroupStore.Update(updatedAddressGroup)
return nil
}
// podToMemberPod is util function to convert a Pod to a GroupMemberPod type.
// A controlplane.NamedPort item will be set in the GroupMemberPod, only if the
// Pod contains a Port with the name field set. Depending on the input, the
// Pod IP and/or PodReference will also be set.
func podToMemberPod(pod *v1.Pod, includeIP, includePodRef bool) *controlplane.GroupMemberPod {
memberPod := &controlplane.GroupMemberPod{}
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
// Only include container ports with name set.
if port.Name != "" {
memberPod.Ports = append(memberPod.Ports, controlplane.NamedPort{
Port: port.ContainerPort,
Name: port.Name,
Protocol: controlplane.Protocol(port.Protocol),
})
}
}
}
if includeIP {
memberPod.IP = ipStrToIPAddress(pod.Status.PodIP)
}
if includePodRef {
podRef := controlplane.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
}
memberPod.Pod = &podRef
}
return memberPod
}
func externalEntityToGroupMember(ee *v1alpha1.ExternalEntity) *controlplane.GroupMember {
memberEntity := &controlplane.GroupMember{}
for _, endpoint := range ee.Spec.Endpoints {
var namedPorts []controlplane.NamedPort
for _, port := range endpoint.Ports {
namedPorts = append(namedPorts, controlplane.NamedPort{
Port: port.Port,
Name: port.Name,
Protocol: controlplane.Protocol(port.Protocol),
})
}
ep := controlplane.Endpoint{
IP: ipStrToIPAddress(endpoint.IP),
Ports: namedPorts,
}
memberEntity.Endpoints = append(memberEntity.Endpoints, ep)
}
entityRef := controlplane.ExternalEntityReference{
Name: ee.Name,
Namespace: ee.Namespace,
}
memberEntity.ExternalEntity = &entityRef
return memberEntity
}
func (n *NetworkPolicyController) processSelector(groupSelector antreatypes.GroupSelector) ([]*v1.Pod, []*v1alpha1.ExternalEntity) {
var pods []*v1.Pod
var externalEntities []*v1alpha1.ExternalEntity
if groupSelector.Namespace != "" {
// Namespace presence indicates Pods and ExternalEnitities must be selected from the same Namespace.
if groupSelector.PodSelector != nil {
pods, _ = n.podLister.Pods(groupSelector.Namespace).List(groupSelector.PodSelector)
} else if groupSelector.ExternalEntitySelector != nil {
externalEntities, _ = n.externalEntityLister.ExternalEntities(groupSelector.Namespace).List(groupSelector.ExternalEntitySelector)
}
} else if groupSelector.NamespaceSelector != nil && (groupSelector.PodSelector != nil || groupSelector.ExternalEntitySelector != nil) {
// Pods and ExternalEntities must be selected from Namespaces matching nsSelector.
namespaces, _ := n.namespaceLister.List(groupSelector.NamespaceSelector)
for _, ns := range namespaces {
if groupSelector.PodSelector != nil {
nsPods, _ := n.podLister.Pods(ns.Name).List(groupSelector.PodSelector)
pods = append(pods, nsPods...)
} else if groupSelector.ExternalEntitySelector != nil {
nsExtEntities, _ := n.externalEntityLister.ExternalEntities(ns.Name).List(groupSelector.ExternalEntitySelector)
externalEntities = append(externalEntities, nsExtEntities...)
}
}
} else if groupSelector.NamespaceSelector != nil {
// All the Pods from Namespaces matching the nsSelector must be selected.
namespaces, _ := n.namespaceLister.List(groupSelector.NamespaceSelector)
for _, ns := range namespaces {
nsPods, _ := n.podLister.Pods(ns.Name).List(labels.Everything())
pods = append(pods, nsPods...)
}
} else if groupSelector.PodSelector != nil {
// Lack of Namespace and NamespaceSelector indicates Pods must be selected
// from all Namespaces.
pods, _ = n.podLister.Pods("").List(groupSelector.PodSelector)
} else if groupSelector.ExternalEntitySelector != nil {
externalEntities, _ = n.externalEntityLister.ExternalEntities("").List(groupSelector.ExternalEntitySelector)
}
return pods, externalEntities
}
// syncAppliedToGroup enqueues all the internal NetworkPolicy keys that
// refer this AppliedToGroup and update the AppliedToGroup Pod
// references by Node to reflect the latest set of affected Pods based
// on it's GroupSelector.
func (n *NetworkPolicyController) syncAppliedToGroup(key string) error {
startTime := time.Now()
defer func() {
d := time.Since(startTime)
metrics.DurationAppliedToGroupSyncing.Observe(float64(d.Milliseconds()))
klog.V(2).Infof("Finished syncing AppliedToGroup %s. (%v)", key, d)
}()
var pods []*v1.Pod
appGroupNodeNames := sets.String{}
appliedToGroupObj, found, _ := n.appliedToGroupStore.Get(key)
if !found {
klog.V(2).Infof("AppliedToGroup %s not found.", key)
return nil
}
podSetByNode := make(map[string]controlplane.GroupMemberPodSet)
memberSetByNode := make(map[string]controlplane.GroupMemberSet)
scheduledPodNum, scheduledExtEntityNum := 0, 0
appliedToGroup := appliedToGroupObj.(*antreatypes.AppliedToGroup)
groupSelector := appliedToGroup.Selector
pods, externalEntities := n.processSelector(groupSelector)
for _, pod := range pods {
if pod.Spec.NodeName == "" {
// No need to process Pod when it's not scheduled.
continue
}
scheduledPodNum++
podSet := podSetByNode[pod.Spec.NodeName]
if podSet == nil {
podSet = controlplane.GroupMemberPodSet{}
}
podSet.Insert(podToMemberPod(pod, false, true))
// Update the Pod references by Node.
podSetByNode[pod.Spec.NodeName] = podSet
// Update the NodeNames in order to set the SpanMeta for AppliedToGroup.
appGroupNodeNames.Insert(pod.Spec.NodeName)
}
for _, extEntity := range externalEntities {
if extEntity.Spec.ExternalNode == "" {
continue
}
scheduledExtEntityNum++
entitySet := memberSetByNode[extEntity.Spec.ExternalNode]
if entitySet == nil {
entitySet = controlplane.GroupMemberSet{}
}
entitySet.Insert(externalEntityToGroupMember(extEntity))
memberSetByNode[extEntity.Spec.ExternalNode] = entitySet
appGroupNodeNames.Insert(extEntity.Spec.ExternalNode)
}
updatedAppliedToGroup := &antreatypes.AppliedToGroup{
UID: appliedToGroup.UID,
Name: appliedToGroup.Name,
Selector: appliedToGroup.Selector,
PodsByNode: podSetByNode,
GroupMemberByNode: memberSetByNode,
SpanMeta: antreatypes.SpanMeta{NodeNames: appGroupNodeNames},
}
klog.V(2).Infof("Updating existing AppliedToGroup %s with %d Pods and %d External Entities on %d Nodes",
key, scheduledPodNum, scheduledExtEntityNum, appGroupNodeNames.Len())
n.appliedToGroupStore.Update(updatedAppliedToGroup)
// Get all internal NetworkPolicy objects that refers this AppliedToGroup.
// Note that this must be executed after storing the result, to ensure that
// both of the NetworkPolicies that referred it before storing it and the
// ones after storing it can get the right span.
nps, err := n.internalNetworkPolicyStore.GetByIndex(store.AppliedToGroupIndex, key)
if err != nil {
return fmt.Errorf("unable to filter internal NetworkPolicies for AppliedToGroup %s: %v", key, err)
}
// Enqueue syncInternalNetworkPolicy for each affected internal NetworkPolicy so
// that corresponding Node spans are updated.
for _, npObj := range nps {
// Error can be ignored as npObj is of type antreatypes.NetworkPolicy.
npKey, _ := store.NetworkPolicyKeyFunc(npObj)
n.enqueueInternalNetworkPolicy(npKey)
}
return nil
}
// syncInternalNetworkPolicy retrieves all the AppliedToGroups associated with
// itself in order to calculate the Node span for this policy.
func (n *NetworkPolicyController) syncInternalNetworkPolicy(key string) error {
startTime := time.Now()
defer func() {
d := time.Since(startTime)
metrics.DurationInternalNetworkPolicySyncing.Observe(float64(d.Milliseconds()))
klog.V(2).Infof("Finished syncing internal NetworkPolicy %s. (%v)", key, d)
}()
klog.V(2).Infof("Syncing internal NetworkPolicy %s", key)
nodeNames := sets.String{}
// Lock the internal NetworkPolicy store as we may have a case where in the
// same internal NetworkPolicy is being updated in the NetworkPolicy UPDATE
// handler.
n.internalNetworkPolicyMutex.Lock()
internalNPObj, found, _ := n.internalNetworkPolicyStore.Get(key)
if !found {
// Make sure to unlock the store before returning.
n.internalNetworkPolicyMutex.Unlock()
return fmt.Errorf("internal NetworkPolicy %s not found", key)
}
internalNP := internalNPObj.(*antreatypes.NetworkPolicy)
// Maintain a copy of old SpanMeta Nodenames so we can later enqueue Groups
// only if it is updated.
oldNodeNames := internalNP.SpanMeta.NodeNames
// Calculate the set of Node names based on the span of the
// AppliedToGroups referenced by this NetworkPolicy.
for _, appliedToGroupName := range internalNP.AppliedToGroups {
appGroupObj, found, _ := n.appliedToGroupStore.Get(appliedToGroupName)
if !found {
continue
}
appGroup := appGroupObj.(*antreatypes.AppliedToGroup)
nodeNames = nodeNames.Union(appGroup.SpanMeta.NodeNames)
}
updatedNetworkPolicy := &antreatypes.NetworkPolicy{
UID: internalNP.UID,
Name: internalNP.Name,
Namespace: internalNP.Namespace,
SourceRef: internalNP.SourceRef,
Rules: internalNP.Rules,
AppliedToGroups: internalNP.AppliedToGroups,
Priority: internalNP.Priority,
TierPriority: internalNP.TierPriority,
SpanMeta: antreatypes.SpanMeta{NodeNames: nodeNames},
}
klog.V(4).Infof("Updating internal NetworkPolicy %s with %d Nodes", key, nodeNames.Len())
n.internalNetworkPolicyStore.Update(updatedNetworkPolicy)
// Internal NetworkPolicy update is complete. Safe to unlock the
// critical section.
n.internalNetworkPolicyMutex.Unlock()
if nodeNames.Equal(oldNodeNames) {
// Node span for internal NetworkPolicy was not modified. No need to enqueue
// AddressGroups.
klog.V(4).Infof("Internal NetworkPolicy %s Node span remains unchanged. No need to enqueue AddressGroups.", key)
return nil
}
// Enqueue addressGroup keys to update their Node span.
for _, rule := range internalNP.Rules {
for _, addrGroupName := range rule.From.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
for _, addrGroupName := range rule.To.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
}
return nil
}
// ipStrToIPAddress converts an IP string to a controlplane.IPAddress.
// nil will returned if the IP string is not valid.
func ipStrToIPAddress(ip string) controlplane.IPAddress {
return controlplane.IPAddress(net.ParseIP(ip))
}
// cidrStrToIPNet converts a CIDR (eg. 10.0.0.0/16) to a *controlplane.IPNet.
func cidrStrToIPNet(cidr string) (*controlplane.IPNet, error) {
// Split the cidr to retrieve the IP and prefix.
s := strings.Split(cidr, "/")
if len(s) != 2 {
return nil, fmt.Errorf("invalid format for IPBlock CIDR: %s", cidr)
}
// Convert prefix length to int32
prefixLen64, err := strconv.ParseInt(s[1], 10, 32)
if err != nil {
return nil, fmt.Errorf("invalid prefix length: %s", s[1])
}
ipNet := &controlplane.IPNet{
IP: ipStrToIPAddress(s[0]),
PrefixLength: int32(prefixLen64),
}
return ipNet, nil
}
| 1 | 20,675 | 5 or 10? | antrea-io-antrea | go |
@@ -27,6 +27,10 @@ internal class MyExporter : ActivityExporter
public override Task<ExportResult> ExportAsync(
IEnumerable<Activity> batch, CancellationToken cancellationToken)
{
+ // Exporter code which can generate further
+ // telemetry should do so inside SuppressInstrumentation
+ // scope. This suppresses telemetry from
+ // exporter's own code to avoid live-loop situation.
using var scope = Sdk.SuppressInstrumentation.Begin();
foreach (var activity in batch) | 1 | // <copyright file="MyExporter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Threading;
using System.Threading.Tasks;
using OpenTelemetry;
using OpenTelemetry.Trace;
internal class MyExporter : ActivityExporter
{
public override Task<ExportResult> ExportAsync(
IEnumerable<Activity> batch, CancellationToken cancellationToken)
{
using var scope = Sdk.SuppressInstrumentation.Begin();
foreach (var activity in batch)
{
Console.WriteLine($"{activity.DisplayName}");
}
return Task.FromResult(ExportResult.Success);
}
public override Task ShutdownAsync(CancellationToken cancellationToken)
{
Console.WriteLine($"MyExporter.ShutdownAsync");
return Task.CompletedTask;
}
protected override void Dispose(bool disposing)
{
Console.WriteLine($"MyExporter.Dispose");
}
}
| 1 | 16,151 | Should we explain more here? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -30,9 +30,10 @@ import (
// ec2Svc are the functions from the ec2 service, not the client, this actuator needs.
// This should never need to import the ec2 sdk.
type ec2Svc interface {
- CreateInstance(*clusterv1.Machine) (*ec2svc.Instance, error)
+ CreateInstance(*clusterv1.Machine, *v1alpha1.AWSMachineProviderConfig, *v1alpha1.AWSClusterProviderStatus) (*ec2svc.Instance, error)
InstanceIfExists(*string) (*ec2svc.Instance, error)
TerminateInstance(*string) error
+ CreateOrGetMachine(*clusterv1.Machine, *v1alpha1.AWSMachineProviderStatus, *v1alpha1.AWSMachineProviderConfig, *v1alpha1.AWSClusterProviderStatus) (*ec2svc.Instance, error)
}
// codec are the functions off the generated codec that this actuator uses. | 1 | // Copyright © 2018 The Kubernetes Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package machine
// should not need to import the ec2 sdk here
import (
"fmt"
"sigs.k8s.io/cluster-api-provider-aws/cloud/aws/providerconfig/v1alpha1"
ec2svc "sigs.k8s.io/cluster-api-provider-aws/cloud/aws/services/ec2"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
client "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1"
)
// ec2Svc are the functions from the ec2 service, not the client, this actuator needs.
// This should never need to import the ec2 sdk.
type ec2Svc interface {
CreateInstance(*clusterv1.Machine) (*ec2svc.Instance, error)
InstanceIfExists(*string) (*ec2svc.Instance, error)
TerminateInstance(*string) error
}
// codec are the functions off the generated codec that this actuator uses.
type codec interface {
DecodeFromProviderConfig(clusterv1.ProviderConfig, runtime.Object) error
DecodeProviderStatus(*runtime.RawExtension, runtime.Object) error
EncodeProviderStatus(runtime.Object) (*runtime.RawExtension, error)
}
// Actuator is responsible for performing machine reconciliation
type Actuator struct {
codec codec
// Services
ec2 ec2Svc
machinesGetter client.MachinesGetter
}
// ActuatorParams holds parameter information for Actuator
type ActuatorParams struct {
// Codec is needed to work with the provider configs and statuses.
Codec codec
// Services
// ClusterService is the interface to cluster-api.
MachinesGetter client.MachinesGetter
// EC2Service is the interface to ec2.
EC2Service ec2Svc
}
// NewActuator returns an actuator.
func NewActuator(params ActuatorParams) (*Actuator, error) {
return &Actuator{
codec: params.Codec,
ec2: params.EC2Service,
machinesGetter: params.MachinesGetter,
}, nil
}
// Create creates a machine and is invoked by the machine controller.
func (a *Actuator) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
// will need this machine config in a bit
_, err := a.machineProviderConfig(machine.Spec.ProviderConfig)
if err != nil {
glog.Errorf("Failed to decode the machine provider config: %v", err)
return err
}
// Get the machine status
status, err := a.machineProviderStatus(machine)
if err != nil {
return err
}
// does the instance exist with a valid status? we're good
// otherwise create it and move on.
_, err = a.ec2.InstanceIfExists(status.InstanceID)
if err != nil {
return err
}
i, err := a.ec2.CreateInstance(machine)
if err != nil {
return err
}
status.InstanceID = &i.ID
status.InstanceState = &i.State
return a.updateStatus(machine, status)
}
// Delete deletes a machine and is invoked by the Machine Controller
func (a *Actuator) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
glog.Infof("Deleting machine %v for cluster %v.", machine.Name, cluster.Name)
status, err := a.machineProviderStatus(machine)
if err != nil {
return errors.Wrap(err, "failed to get machine provider status")
}
instance, err := a.ec2.InstanceIfExists(status.InstanceID)
if err != nil {
return errors.Wrap(err, "failed to get instance")
}
// The machine hasn't been created yet
if instance == nil {
return nil
}
// Check the instance state. If it's already shutting down or terminated,
// do nothing. Otherwise attempt to delete it.
// This decision is based on the ec2-instance-lifecycle graph at
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html
switch instance.State {
case ec2svc.InstanceStateShuttingDown, ec2svc.InstanceStateTerminated:
return nil
default:
err = a.ec2.TerminateInstance(status.InstanceID)
if err != nil {
return errors.Wrap(err, "failed to terminate instance")
}
}
return nil
}
// Update updates a machine and is invoked by the Machine Controller
func (a *Actuator) Update(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
glog.Infof("Updating machine %v for cluster %v.", machine.Name, cluster.Name)
// Handling of machine config changes is not yet implemented.
// We should check which pieces of configuration have been updated, throw
// errors if an attempt is made to modify any immutable state, otherwise
// go ahead and modify what we can.
// Get the new status from the provided machine object.
status, err := a.machineProviderStatus(machine)
if err != nil {
return errors.Wrap(err, "failed to get machine status")
}
err = a.updateStatus(machine, status)
if err != nil {
return errors.Wrap(err, "failed to update machine status")
}
return nil
}
// Exists test for the existence of a machine and is invoked by the Machine Controller
func (a *Actuator) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) {
glog.Infof("Checking if machine %v for cluster %v exists.", machine.Name, cluster.Name)
status, err := a.machineProviderStatus(machine)
if err != nil {
return false, err
}
instance, err := a.ec2.InstanceIfExists(status.InstanceID)
if err != nil {
return false, err
}
if instance == nil {
return false, nil
}
// TODO update status here
switch instance.State {
case ec2svc.InstanceStateRunning, ec2svc.InstanceStatePending:
return true, nil
default:
return false, nil
}
}
func (a *Actuator) machineProviderConfig(providerConfig clusterv1.ProviderConfig) (*v1alpha1.AWSMachineProviderConfig, error) {
machineProviderCfg := &v1alpha1.AWSMachineProviderConfig{}
err := a.codec.DecodeFromProviderConfig(providerConfig, machineProviderCfg)
return machineProviderCfg, err
}
func (a *Actuator) machineProviderStatus(machine *clusterv1.Machine) (*v1alpha1.AWSMachineProviderStatus, error) {
status := &v1alpha1.AWSMachineProviderStatus{}
err := a.codec.DecodeProviderStatus(machine.Status.ProviderStatus, status)
return status, err
}
func (a *Actuator) updateStatus(machine *clusterv1.Machine, status *v1alpha1.AWSMachineProviderStatus) error {
machinesClient := a.machinesGetter.Machines(machine.Namespace)
encodedProviderStatus, err := a.codec.EncodeProviderStatus(status)
if err != nil {
return fmt.Errorf("failed to encode machine status: %v", err)
}
if encodedProviderStatus != nil {
machine.Status.ProviderStatus = encodedProviderStatus
if _, err := machinesClient.UpdateStatus(machine); err != nil {
return fmt.Errorf("failed to update machine status: %v", err)
}
}
return nil
}
| 1 | 6,306 | I've been struggling with this on my cloud-init integration work as well. In addition to info that is currently stored in the cluster providerstatus, we also need some of the info that is available within the base cluster object as well. I think it would make sense to unify the machine and cluster info needed into a consolidated struct rather than continuing to add additional disparate structs. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -17,7 +17,9 @@ import java.util.Map;
* UIs without the package prefixes.
*
* @author Brian Remedios
+ * @deprecated Is internal API
*/
+@Deprecated
public final class ClassUtil {
public static final Class<?>[] EMPTY_CLASS_ARRAY = new Class[0]; | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.util;
import java.lang.reflect.Method;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Various class-related utility methods intended for mapping common java.lang
* types to their short short forms allowing end users to enter these names in
* UIs without the package prefixes.
*
* @author Brian Remedios
*/
public final class ClassUtil {
public static final Class<?>[] EMPTY_CLASS_ARRAY = new Class[0];
@SuppressWarnings("PMD.AvoidUsingShortType")
private static final TypeMap PRIMITIVE_TYPE_NAMES = new TypeMap(new Class[] { int.class, byte.class, long.class,
short.class, float.class, double.class, char.class, boolean.class, });
private static final TypeMap TYPES_BY_NAME = new TypeMap(
new Class[] { Integer.class, Byte.class, Long.class, Short.class, Float.class, Double.class,
Character.class, Boolean.class, BigDecimal.class, String.class, Object.class, Class.class, });
private static final Map<Class<?>, String> SHORT_NAMES_BY_TYPE = computeClassShortNames();
private ClassUtil() {
}
/**
* Returns the type(class) for the name specified or null if not found.
*
* @param name
* String
* @return Class
*/
public static Class<?> getPrimitiveTypeFor(String name) {
return PRIMITIVE_TYPE_NAMES.typeFor(name);
}
/**
* Return a map of all the short names of classes we maintain mappings for.
* The names are keyed by the classes themselves.
*
* @return Map<Class, String>
*/
private static Map<Class<?>, String> computeClassShortNames() {
Map<Class<?>, String> map = new HashMap<>();
map.putAll(PRIMITIVE_TYPE_NAMES.asInverseWithShortName());
map.putAll(TYPES_BY_NAME.asInverseWithShortName());
return map;
}
public static Map<Class<?>, String> getClassShortNames() {
return SHORT_NAMES_BY_TYPE;
}
/**
* Attempt to determine the actual class given the short name.
*
* @param shortName
* String
* @return Class
*/
public static Class<?> getTypeFor(String shortName) {
Class<?> type = TYPES_BY_NAME.typeFor(shortName);
if (type != null) {
return type;
}
type = PRIMITIVE_TYPE_NAMES.typeFor(shortName);
if (type != null) {
return type;
}
return CollectionUtil.getCollectionTypeFor(shortName);
}
/**
* Return the name of the type in its short form if its known to us
* otherwise return its name fully packaged.
*
* @param type
* @return String
*/
public static String asShortestName(Class<?> type) {
String name = SHORT_NAMES_BY_TYPE.get(type);
return name == null ? type.getName() : name;
}
/**
* Returns the abbreviated name of the type, without the package name
*
* @param fullTypeName
* @return String
*/
public static String withoutPackageName(String fullTypeName) {
int dotPos = fullTypeName.lastIndexOf('.');
return dotPos > 0 ? fullTypeName.substring(dotPos + 1) : fullTypeName;
}
/**
* Attempts to return the specified method from the class provided but will
* walk up its superclasses until it finds a match. Returns null if it
* doesn't.
*
* @param clasz
* Class
* @param methodName
* String
* @param paramTypes
* Class[]
* @return Method
*/
public static Method methodFor(Class<?> clasz, String methodName, Class<?>[] paramTypes) {
Method method = null;
Class<?> current = clasz;
while (current != Object.class) {
try {
method = current.getDeclaredMethod(methodName, paramTypes);
} catch (NoSuchMethodException ex) {
current = current.getSuperclass();
}
if (method != null) {
return method;
}
}
return null;
}
/**
* Return the methods as a map keyed by their common declaration types.
*
* @param methods
* @return methods grouped by declaring type name
*/
public static Map<String, List<Method>> asMethodGroupsByTypeName(Method[] methods) {
Map<String, List<Method>> methodGroups = new HashMap<>(methods.length);
for (int i = 0; i < methods.length; i++) {
String clsName = asShortestName(methods[i].getDeclaringClass());
if (!methodGroups.containsKey(clsName)) {
methodGroups.put(clsName, new ArrayList<Method>());
}
methodGroups.get(clsName).add(methods[i]);
}
return methodGroups;
}
/**
* Return the methods as a map keyed by their common declaration types.
*
* @param methods
*
* @return methods grouped by declaring type name
*/
public static Map<String, List<Method>> asMethodGroupsByTypeName(List<Method> methods) {
Map<String, List<Method>> methodGroups = new HashMap<>(methods.size());
for (Method m : methods) {
String clsName = asShortestName(m.getDeclaringClass());
if (!methodGroups.containsKey(clsName)) {
methodGroups.put(clsName, new ArrayList<Method>());
}
methodGroups.get(clsName).add(m);
}
return methodGroups;
}
}
| 1 | 16,679 | Why not `@InternalApi`? | pmd-pmd | java |
@@ -22,6 +22,11 @@ import datetime
import decimal
from inspect import getfullargspec, isclass
+try:
+ from typing import _GenericAlias # type: ignore
+except ImportError:
+ from typing import GenericMeta as _GenericAlias # type: ignore
+
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utilities to deal with types. This is mostly focused on python3.
"""
import typing
import datetime
import decimal
from inspect import getfullargspec, isclass
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
import pyspark.sql.types as types
try:
from pyspark.sql.types import to_arrow_type, from_arrow_type
except ImportError:
from pyspark.sql.pandas.types import to_arrow_type, from_arrow_type
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.typedef.string_typehints import resolve_string_type_hint
T = typing.TypeVar("T")
Scalar = typing.Union[
int, float, bool, str, bytes, decimal.Decimal, datetime.date, datetime.datetime, None
]
# A column of data, with the data type.
class SeriesType(typing.Generic[T]):
def __init__(self, tpe):
self.tpe = tpe # type: types.DataType
def __repr__(self):
return "SeriesType[{}]".format(self.tpe)
class DataFrameType(object):
def __init__(self, tpe, names=None):
if names is None:
# Default names `c0, c1, ... cn`.
self.tpe = types.StructType(
[types.StructField("c%s" % i, tpe[i]) for i in range(len(tpe))]
) # type: types.StructType
else:
self.tpe = types.StructType(
[types.StructField(n, t) for n, t in zip(names, tpe)]
) # type: types.StructType
def __repr__(self):
return "DataFrameType[{}]".format(self.tpe)
# The type is a scalar type that is furthermore understood by Spark.
class ScalarType(object):
def __init__(self, tpe):
self.tpe = tpe # type: types.DataType
def __repr__(self):
return "ScalarType[{}]".format(self.tpe)
# The type is left unspecified or we do not know about this type.
class UnknownType(object):
def __init__(self, tpe):
self.tpe = tpe
def __repr__(self):
return "UnknownType[{}]".format(self.tpe)
class NameTypeHolder(object):
name = None
tpe = None
def as_spark_type(tpe) -> types.DataType:
"""
Given a Python type, returns the equivalent spark type.
Accepts:
- the built-in types in Python
- the built-in types in numpy
- list of pairs of (field_name, type)
- dictionaries of field_name -> type
- Python3's typing system
"""
# TODO: Add "boolean" and "string" types.
# ArrayType
if tpe in (np.ndarray,):
# TODO: support other child types
return types.ArrayType(types.StringType())
# BinaryType
elif tpe in (bytes, np.character, np.bytes_, np.string_):
return types.BinaryType()
# BooleanType
elif tpe in (bool, np.bool, "bool", "?"):
return types.BooleanType()
# DateType
elif tpe in (datetime.date,):
return types.DateType()
# NumericType
elif tpe in (np.int8, np.byte, "int8", "byte", "b"):
return types.ByteType()
elif tpe in (decimal.Decimal,):
# TODO: considering about the precision & scale for decimal type.
return types.DecimalType(38, 18)
elif tpe in (float, np.float, np.float64, "float", "float64", "double"):
return types.DoubleType()
elif tpe in (np.float32, "float32", "f"):
return types.FloatType()
elif tpe in (np.int32, "int32", "i"):
return types.IntegerType()
elif tpe in (int, np.int, np.int64, "int", "int64", "long", "bigint"):
return types.LongType()
elif tpe in (np.int16, "int16", "short"):
return types.ShortType()
# StringType
elif tpe in (str, np.unicode_, "str", "U"):
return types.StringType()
# TimestampType
elif tpe in (datetime.datetime, np.datetime64, "datetime64[ns]", "M"):
return types.TimestampType()
else:
raise TypeError("Type %s was not understood." % tpe)
def spark_type_to_pandas_dtype(spark_type):
""" Return the given Spark DataType to pandas dtype. """
if isinstance(spark_type, (types.DateType, types.StructType, types.UserDefinedType)):
return np.dtype("object")
elif isinstance(spark_type, types.TimestampType):
return np.dtype("datetime64[ns]")
else:
return np.dtype(to_arrow_type(spark_type).to_pandas_dtype())
def infer_pd_series_spark_type(s: pd.Series) -> types.DataType:
"""Infer Spark DataType from pandas Series dtype.
:param s: :class:`pandas.Series` to be inferred
:return: the inferred Spark data type
"""
dt = s.dtype
if dt == np.dtype("object"):
if len(s) == 0 or s.isnull().all():
raise ValueError("can not infer schema from empty or null dataset")
elif hasattr(s[0], "__UDT__"):
return s[0].__UDT__
else:
return from_arrow_type(pa.Array.from_pandas(s).type)
elif is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
return types.TimestampType()
else:
return from_arrow_type(pa.from_numpy_dtype(dt))
def infer_return_type(f) -> typing.Union[SeriesType, DataFrameType, ScalarType, UnknownType]:
"""
>>> def func() -> int:
... pass
>>> infer_return_type(func).tpe
LongType
>>> def func() -> ks.Series[int]:
... pass
>>> infer_return_type(func).tpe
LongType
>>> def func() -> ks.DataFrame[np.float, str]:
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
>>> def func() -> ks.DataFrame[np.float]:
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(c0,DoubleType,true)))
>>> def func() -> 'int':
... pass
>>> infer_return_type(func).tpe
LongType
>>> def func() -> 'ks.Series[int]':
... pass
>>> infer_return_type(func).tpe
LongType
>>> def func() -> 'ks.DataFrame[np.float, str]':
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
>>> def func() -> 'ks.DataFrame[np.float]':
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(c0,DoubleType,true)))
>>> def func() -> ks.DataFrame['a': np.float, 'b': int]:
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
>>> def func() -> "ks.DataFrame['a': np.float, 'b': int]":
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
>>> def func() -> ks.DataFrame[pdf.dtypes]:
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
>>> def func() -> ks.DataFrame[zip(pdf.columns, pdf.dtypes)]:
... pass
>>> infer_return_type(func).tpe
StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))
"""
# We should re-import to make sure the class 'SeriesType' is not treated as a class
# within this module locally. See Series.__class_getitem__ which imports this class
# canonically.
from databricks.koalas.typedef import SeriesType, NameTypeHolder
spec = getfullargspec(f)
tpe = spec.annotations.get("return", None)
if isinstance(tpe, str):
# This type hint can happen when given hints are string to avoid forward reference.
tpe = resolve_string_type_hint(tpe)
if hasattr(tpe, "__origin__") and (
issubclass(tpe.__origin__, SeriesType) or tpe.__origin__ == ks.Series
):
# TODO: remove "tpe.__origin__ == ks.Series" when we drop Python 3.5 and 3.6.
inner = as_spark_type(tpe.__args__[0])
return SeriesType(inner)
if hasattr(tpe, "__origin__") and tpe.__origin__ == ks.DataFrame:
# When Python version is lower then 3.7. Unwrap it to a Tuple type
# hints.
tpe = tpe.__args__[0]
# Note that, DataFrame type hints will create a Tuple.
# Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.
# Check if the name is Tuple.
name = getattr(tpe, "_name", getattr(tpe, "__name__", None))
if name == "Tuple":
tuple_type = tpe
if hasattr(tuple_type, "__tuple_params__"):
# Python 3.5.0 to 3.5.2 has '__tuple_params__' instead.
# See https://github.com/python/cpython/blob/v3.5.2/Lib/typing.py
parameters = getattr(tuple_type, "__tuple_params__")
else:
parameters = getattr(tuple_type, "__args__")
if len(parameters) > 0 and all(
isclass(p) and issubclass(p, NameTypeHolder) for p in parameters
):
names = [p.name for p in parameters if issubclass(p, NameTypeHolder)]
types = [p.tpe for p in parameters if issubclass(p, NameTypeHolder)]
return DataFrameType([as_spark_type(t) for t in types], names)
return DataFrameType([as_spark_type(t) for t in parameters])
inner = as_spark_type(tpe)
if inner is None:
return UnknownType(tpe)
else:
return ScalarType(inner)
| 1 | 16,942 | FYI: `GenericMeta` is renamed to `_GenericAlias` in Python3.7. | databricks-koalas | py |
@@ -63,7 +63,7 @@
<% unless phase.sections.all?(&:modifiable?) %>
<%= _("You may place them before or after the main template sections.") %>
<% end %>
- <% else %>
+ <% elsif template.latest? %>
<%= link_to _('Re-order sections'),
org_admin_template_phase_versions_path(phase.template, phase),
method: "post", | 1 | <% title "#{template.title}" %>
<% modifiable = template.latest? && !template.customization_of.present? && template.id.present? && (template.org_id = current_user.org.id) %>
<div class="row">
<div class="col-md-12">
<h1><%= template.title %></h1>
<% referrer = template.customization_of.present? ? customisable_org_admin_templates_path : organisational_org_admin_templates_path %>
<%= link_to _('View all templates'), referrer, class: 'btn btn-default pull-right' %>
</div>
</div>
<div class="row">
<div class="col-md-12">
<!-- render navigation tabs for the template-->
<%= render partial: "/org_admin/templates/navigation",
locals: local_assigns.merge({ modifiable: modifiable }) %>
<div class="tab-content">
<div role="tabpanel" class="tab-pane active">
<div class="panel panel-default">
<div class="panel-body">
<% # locals: { phase, template, edit, current_section } %>
<div class="pull-left">
<h2><%= _('Phase details')%></h2>
</div>
<div class="pull-right">
<%= link_to(_('Preview'),
preview_org_admin_template_phase_path(template, phase),
class: 'btn btn-default phase_preview_link', role: 'button') %>
</div>
<div class="clearfix"></div>
<div class="row">
<div class="col-md-12">
<%= render partial: partial_path,
locals: local_assigns.merge({ modifiable: modifiable }) %>
</div>
</div>
<div class="row">
<div class="col-md-12">
<h2><%= _('Sections') %></h2>
<div class="row">
<div class="col-sm-6">
<% if phase.sections.many? %>
<div id="sections-accordion-controls">
<div class="accordion-controls"
data-parent="sections_accordion">
<a href="#" data-toggle-direction="show">
<%= _('expand all') %>
</a>
<span>|</span>
<a href="#" data-toggle-direction="hide">
<%= _('collapse all') %>
</a>
</div>
</div>
<% end %>
</div>
<div class="col-sm-6">
<div class='text-right text-muted'>
<% if template.latest? && (modifiable || template.customization_of.present?) %>
<i class="fa fa-info-circle small"></i>
<%= _("Drag arrows to rearrange sections.") %>
<% unless phase.sections.all?(&:modifiable?) %>
<%= _("You may place them before or after the main template sections.") %>
<% end %>
<% else %>
<%= link_to _('Re-order sections'),
org_admin_template_phase_versions_path(phase.template, phase),
method: "post",
class: "btn btn-primary btn-sm" %>
<% end %>
</div>
<div class="clear">
</div>
</div>
</div>
<%= render partial: 'org_admin/sections/index',
locals: local_assigns.merge(modifiable: modifiable) %>
</div>
</div>
</div>
</div>
</div>
</div>
| 1 | 19,014 | So this hides the link to re-order sections on Historic Templates? Good catch, Just checked on DMPonline and hitting that throws a 404 | DMPRoadmap-roadmap | rb |
@@ -69,7 +69,7 @@ module RSpec
# same process.
def self.clear_examples
world.reset
- configuration.reporter.reset
+ configuration.reset_reporter
configuration.start_time = ::RSpec::Core::Time.now
configuration.reset_filters
end | 1 | # rubocop:disable Style/GlobalVars
$_rspec_core_load_started_at = Time.now
# rubocop:enable Style/GlobalVars
require "rspec/support"
RSpec::Support.require_rspec_support "caller_filter"
RSpec::Support.define_optimized_require_for_rspec(:core) { |f| require_relative f }
%w[
version
warnings
set
flat_map
filter_manager
dsl
notifications
reporter
hooks
memoized_helpers
metadata
metadata_filter
pending
formatters
ordering
world
configuration
option_parser
configuration_options
runner
invocations
example
shared_example_group
example_group
].each { |name| RSpec::Support.require_rspec_core name }
# Namespace for all core RSpec code.
module RSpec
autoload :SharedContext, 'rspec/core/shared_context'
extend RSpec::Core::Warnings
class << self
# Setters for shared global objects
# @api private
attr_writer :configuration, :world
end
# Used to ensure examples get reloaded and user configuration gets reset to
# defaults between multiple runs in the same process.
#
# Users must invoke this if they want to have the configuration reset when
# they use the runner multiple times within the same process. Users must deal
# themselves with re-configuration of RSpec before run.
def self.reset
RSpec::ExampleGroups.remove_all_constants
@world = nil
@configuration = nil
end
# Used to ensure examples get reloaded between multiple runs in the same
# process and ensures user configuration is persisted.
#
# Users must invoke this if they want to clear all examples but preserve
# current configuration when they use the runner multiple times within the
# same process.
def self.clear_examples
world.reset
configuration.reporter.reset
configuration.start_time = ::RSpec::Core::Time.now
configuration.reset_filters
end
# Returns the global [Configuration](RSpec/Core/Configuration) object. While
# you _can_ use this method to access the configuration, the more common
# convention is to use [RSpec.configure](RSpec#configure-class_method).
#
# @example
# RSpec.configuration.drb_port = 1234
# @see RSpec.configure
# @see Core::Configuration
def self.configuration
@configuration ||= RSpec::Core::Configuration.new
end
# Yields the global configuration to a block.
# @yield [Configuration] global configuration
#
# @example
# RSpec.configure do |config|
# config.add_formatter 'documentation'
# end
# @see Core::Configuration
def self.configure
yield configuration if block_given?
end
# The example being executed.
#
# The primary audience for this method is library authors who need access
# to the example currently being executed and also want to support all
# versions of RSpec 2 and 3.
#
# @example
#
# RSpec.configure do |c|
# # context.example is deprecated, but RSpec.current_example is not
# # available until RSpec 3.0.
# fetch_current_example = RSpec.respond_to?(:current_example) ?
# proc { RSpec.current_example } : proc { |context| context.example }
#
# c.before(:example) do
# example = fetch_current_example.call(self)
#
# # ...
# end
# end
#
def self.current_example
RSpec::Support.thread_local_data[:current_example]
end
# Set the current example being executed.
# @api private
def self.current_example=(example)
RSpec::Support.thread_local_data[:current_example] = example
end
# @private
# Internal container for global non-configuration data.
def self.world
@world ||= RSpec::Core::World.new
end
# Namespace for the rspec-core code.
module Core
autoload :ExampleStatusPersister, "rspec/core/example_status_persister"
autoload :Profiler, "rspec/core/profiler"
# @private
# This avoids issues with reporting time caused by examples that
# change the value/meaning of Time.now without properly restoring
# it.
class Time
class << self
define_method(:now, &::Time.method(:now))
end
end
# @private path to executable file.
def self.path_to_executable
@path_to_executable ||= File.expand_path('../../../exe/rspec', __FILE__)
end
end
# @private
MODULES_TO_AUTOLOAD = {
:Matchers => "rspec/expectations",
:Expectations => "rspec/expectations",
:Mocks => "rspec/mocks"
}
# @private
def self.const_missing(name)
# Load rspec-expectations when RSpec::Matchers is referenced. This allows
# people to define custom matchers (using `RSpec::Matchers.define`) before
# rspec-core has loaded rspec-expectations (since it delays the loading of
# it to allow users to configure a different assertion/expectation
# framework). `autoload` can't be used since it works with ruby's built-in
# require (e.g. for files that are available relative to a load path dir),
# but not with rubygems' extended require.
#
# As of rspec 2.14.1, we no longer require `rspec/mocks` and
# `rspec/expectations` when `rspec` is required, so we want
# to make them available as an autoload.
require MODULES_TO_AUTOLOAD.fetch(name) { return super }
::RSpec.const_get(name)
end
Core::DSL.expose_globally!
Core::SharedExampleGroup::TopLevelDSL.expose_globally!
end
| 1 | 16,585 | Is `Reporter#reset` no longer used? If so, can we remove it? | rspec-rspec-core | rb |
@@ -188,7 +188,7 @@ public final class EthHash {
out.writeLongScalar(header.getTimestamp());
out.writeBytes(header.getExtraData());
if (header.getBaseFee().isPresent()) {
- out.writeLongScalar(header.getBaseFee().get());
+ out.writeUInt256Scalar(header.getBaseFee().get());
}
out.endList();
return Bytes32.wrap( | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.SealableBlockHeader;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.security.DigestException;
import java.security.MessageDigest;
import java.util.Arrays;
import java.util.function.BiConsumer;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.bouncycastle.jcajce.provider.digest.Keccak;
/** Implementation of EthHash. */
public final class EthHash {
public static final int HASH_BYTES = 64;
public static final BigInteger TARGET_UPPER_BOUND = BigInteger.valueOf(2).pow(256);
public static final int EPOCH_LENGTH = 30000;
private static final int DATASET_INIT_BYTES = 1 << 30;
private static final int DATASET_GROWTH_BYTES = 1 << 23;
private static final int CACHE_INIT_BYTES = 1 << 24;
private static final int CACHE_GROWTH_BYTES = 1 << 17;
private static final int MIX_BYTES = 128;
private static final int HASH_WORDS = 16;
private static final int CACHE_ROUNDS = 3;
private static final int WORD_BYTES = 4;
private static final int DATASET_PARENTS = 256;
private static final int ACCESSES = 64;
private static final ThreadLocal<MessageDigest> KECCAK_512 =
ThreadLocal.withInitial(Keccak.Digest512::new);
/**
* Hashimoto Light Implementation.
*
* @param size Dataset size for the given header hash
* @param cache EthHash Cache
* @param header Truncated BlockHeader hash
* @param nonce Nonce to use for hashing
* @return A byte array holding MixHash in its first 32 bytes and the EthHash result in the in
* bytes 32 to 63
*/
public static PoWSolution hashimotoLight(
final long size, final int[] cache, final Bytes header, final long nonce) {
return hashimoto(header, size, nonce, (target, ind) -> calcDatasetItem(target, cache, ind));
}
public static PoWSolution hashimoto(
final Bytes header,
final long size,
final long nonce,
final BiConsumer<byte[], Integer> datasetLookup) {
final int n = (int) Long.divideUnsigned(size, MIX_BYTES);
final MessageDigest keccak512 = KECCAK_512.get();
keccak512.update(header.toArrayUnsafe());
keccak512.update(Longs.toByteArray(Long.reverseBytes(nonce)));
final byte[] seed = keccak512.digest();
final ByteBuffer mixBuffer = ByteBuffer.allocate(MIX_BYTES).order(ByteOrder.LITTLE_ENDIAN);
for (int i = 0; i < MIX_BYTES / HASH_BYTES; ++i) {
mixBuffer.put(seed);
}
mixBuffer.position(0);
final int[] mix = new int[MIX_BYTES / 4];
for (int i = 0; i < MIX_BYTES / 4; ++i) {
mix[i] = mixBuffer.getInt();
}
final byte[] lookupResult = new byte[HASH_BYTES];
final byte[] temp = new byte[MIX_BYTES];
for (int i = 0; i < ACCESSES; ++i) {
final int p =
Integer.remainderUnsigned(
fnv(i ^ readLittleEndianInt(seed, 0), mix[i % (MIX_BYTES / WORD_BYTES)]), n);
for (int j = 0; j < MIX_BYTES / HASH_BYTES; ++j) {
datasetLookup.accept(lookupResult, 2 * p + j);
System.arraycopy(lookupResult, 0, temp, j * HASH_BYTES, HASH_BYTES);
}
fnvHash(mix, temp);
}
final int[] cmix = new int[mix.length / 4];
for (int i = 0; i < mix.length; i += 4) {
cmix[i / 4] = fnv(fnv(fnv(mix[i], mix[i + 1]), mix[i + 2]), mix[i + 3]);
}
final byte[] result = new byte[32 + 32];
intToByte(result, cmix);
final MessageDigest keccak256 = DirectAcyclicGraphSeed.KECCAK_256.get();
keccak256.update(seed);
keccak256.update(result, 0, 32);
try {
keccak256.digest(result, 32, 32);
} catch (final DigestException ex) {
throw new IllegalStateException(ex);
}
return new PoWSolution(
nonce,
Hash.wrap(Bytes32.wrap(Arrays.copyOf(result, 32))),
Bytes32.wrap(result, 32),
header);
}
/**
* Calculates a dataset item and writes it to a given buffer.
*
* @param buffer Buffer to store dataset item in
* @param cache EthHash Cache
* @param index Index of the dataset item to calculate
*/
public static void calcDatasetItem(final byte[] buffer, final int[] cache, final int index) {
final int rows = cache.length / HASH_WORDS;
final int[] mixInts = new int[HASH_BYTES / 4];
final int offset = index % rows * HASH_WORDS;
mixInts[0] = cache[offset] ^ index;
System.arraycopy(cache, offset + 1, mixInts, 1, HASH_WORDS - 1);
intToByte(buffer, mixInts);
final MessageDigest keccak512 = KECCAK_512.get();
keccak512.update(buffer);
try {
keccak512.digest(buffer, 0, HASH_BYTES);
ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer().get(mixInts);
for (int i = 0; i < DATASET_PARENTS; ++i) {
fnvHash(
mixInts,
cache,
Integer.remainderUnsigned(fnv(index ^ i, mixInts[i % 16]), rows) * HASH_WORDS);
}
intToByte(buffer, mixInts);
keccak512.update(buffer);
keccak512.digest(buffer, 0, HASH_BYTES);
} catch (final DigestException ex) {
throw new IllegalStateException(ex);
}
}
/**
* Hashes a BlockHeader without its nonce and MixHash.
*
* @param header Block Header
* @return Truncated BlockHeader hash
*/
public static Bytes32 hashHeader(final SealableBlockHeader header) {
final BytesValueRLPOutput out = new BytesValueRLPOutput();
out.startList();
out.writeBytes(header.getParentHash());
out.writeBytes(header.getOmmersHash());
out.writeBytes(header.getCoinbase());
out.writeBytes(header.getStateRoot());
out.writeBytes(header.getTransactionsRoot());
out.writeBytes(header.getReceiptsRoot());
out.writeBytes(header.getLogsBloom());
out.writeUInt256Scalar(header.getDifficulty());
out.writeLongScalar(header.getNumber());
out.writeLongScalar(header.getGasLimit());
out.writeLongScalar(header.getGasUsed());
out.writeLongScalar(header.getTimestamp());
out.writeBytes(header.getExtraData());
if (header.getBaseFee().isPresent()) {
out.writeLongScalar(header.getBaseFee().get());
}
out.endList();
return Bytes32.wrap(
DirectAcyclicGraphSeed.KECCAK_256.get().digest(out.encoded().toArrayUnsafe()));
}
/**
* Generates the EthHash cache for given parameters.
*
* @param cacheSize Size of the cache to generate
* @param block Block Number to generate cache for
* @param epochCalculator EpochCalculator used to determine current epoch length
* @return EthHash Cache
*/
public static int[] mkCache(
final int cacheSize, final long block, final EpochCalculator epochCalculator) {
final MessageDigest keccak512 = KECCAK_512.get();
keccak512.update(DirectAcyclicGraphSeed.dagSeed(block, epochCalculator));
final int rows = cacheSize / HASH_BYTES;
final byte[] cache = new byte[rows * HASH_BYTES];
try {
keccak512.digest(cache, 0, HASH_BYTES);
} catch (final DigestException ex) {
throw new IllegalStateException(ex);
}
for (int i = 1; i < rows; ++i) {
keccak512.update(cache, (i - 1) * HASH_BYTES, HASH_BYTES);
try {
keccak512.digest(cache, i * HASH_BYTES, HASH_BYTES);
} catch (final DigestException ex) {
throw new IllegalStateException(ex);
}
}
final byte[] temp = new byte[HASH_BYTES];
for (int i = 0; i < CACHE_ROUNDS; ++i) {
for (int j = 0; j < rows; ++j) {
final int offset = j * HASH_BYTES;
for (int k = 0; k < HASH_BYTES; ++k) {
temp[k] =
(byte)
(cache[(j - 1 + rows) % rows * HASH_BYTES + k]
^ cache[
Integer.remainderUnsigned(readLittleEndianInt(cache, offset), rows)
* HASH_BYTES
+ k]);
}
keccak512.update(temp);
try {
keccak512.digest(temp, 0, HASH_BYTES);
} catch (final DigestException ex) {
throw new IllegalStateException(ex);
}
System.arraycopy(temp, 0, cache, offset, HASH_BYTES);
}
}
final int[] result = new int[cache.length / 4];
ByteBuffer.wrap(cache).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer().get(result);
return result;
}
/**
* Calculates EthHash Cache size at a given epoch.
*
* @param epoch EthHash Epoch
* @return Cache size
*/
public static long cacheSize(final long epoch) {
long size = epoch * CACHE_GROWTH_BYTES + CACHE_INIT_BYTES - HASH_BYTES;
while (!isPrime(Long.divideUnsigned(size, HASH_BYTES))) {
size -= 2 * HASH_BYTES;
}
return size;
}
/**
* Calculates EthHash DataSet size at a given epoch.
*
* @param epoch EthHash Epoch
* @return DataSet size
*/
public static long datasetSize(final long epoch) {
long size = epoch * DATASET_GROWTH_BYTES + DATASET_INIT_BYTES - MIX_BYTES;
while (!isPrime(Long.divideUnsigned(size, MIX_BYTES))) {
size -= 2 * MIX_BYTES;
}
return size;
}
private static boolean isPrime(final long num) {
if (num > 2 && (num & 1) == 0) {
return false;
}
for (int i = 3; i * i <= num; i += 2) {
if (num % i == 0) {
return false;
}
}
return true;
}
private static int readLittleEndianInt(final byte[] buffer, final int offset) {
return Ints.fromBytes(
buffer[offset + 3], buffer[offset + 2], buffer[offset + 1], buffer[offset]);
}
private static void intToByte(final byte[] target, final int[] ints) {
final ByteBuffer buffer = ByteBuffer.wrap(target).order(ByteOrder.LITTLE_ENDIAN);
for (final int i : ints) {
buffer.putInt(i);
}
}
private static void fnvHash(final int[] mix, final byte[] cache) {
for (int i = 0; i < mix.length; i++) {
mix[i] = fnv(mix[i], readLittleEndianInt(cache, i * Integer.BYTES));
}
}
private static void fnvHash(final int[] mix, final int[] cache, final int offset) {
for (int i = 0; i < mix.length; i++) {
mix[i] = fnv(mix[i], cache[offset + i]);
}
}
private static int fnv(final int a, final int b) {
return a * 0x01000193 ^ b;
}
}
| 1 | 26,687 | same concern here about difference in the write scalar implementation. I am not sure if this could present a consensus problem or not | hyperledger-besu | java |
@@ -0,0 +1,7 @@
+using UIKit;
+using MvvmCross.Core.ViewModels;
+
+namespace MvvmCross.tvOS.Views
+{
+ public interface IMvxSplitViewController : IMvxTvosView { }
+} | 1 | 1 | 13,512 | Can we remove this file? | MvvmCross-MvvmCross | .cs |
|
@@ -799,7 +799,7 @@ func (c *Operator) sync(ctx context.Context, key string) error {
if ok && sErr.ErrStatus.Code == 422 && sErr.ErrStatus.Reason == metav1.StatusReasonInvalid {
c.metrics.StsDeleteCreateCounter().Inc()
- level.Info(c.logger).Log("msg", "resolving illegal update of Alertmanager StatefulSet", "details", sErr.ErrStatus.Details)
+ level.Info(c.logger).Log("msg", "recreating AlertManager StatefulSet because the update operation wasn't possible", "reason", sErr.ErrStatus.Details.Causes[0].Message)
propagationPolicy := metav1.DeletePropagationForeground
if err := ssetClient.Delete(ctx, sset.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action") | 1 | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alertmanager
import (
"context"
"fmt"
"net"
"net/url"
"reflect"
"regexp"
"strings"
"time"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
"github.com/prometheus-operator/prometheus-operator/pkg/assets"
monitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
"github.com/prometheus-operator/prometheus-operator/pkg/informers"
"github.com/prometheus-operator/prometheus-operator/pkg/k8sutil"
"github.com/prometheus-operator/prometheus-operator/pkg/listwatch"
"github.com/prometheus-operator/prometheus-operator/pkg/operator"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/mitchellh/hashstructure"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
resyncPeriod = 5 * time.Minute
)
var (
managedByOperatorLabel = "managed-by"
managedByOperatorLabelValue = "prometheus-operator"
managedByOperatorLabels = map[string]string{
managedByOperatorLabel: managedByOperatorLabelValue,
}
)
// Operator manages life cycle of Alertmanager deployments and
// monitoring configurations.
type Operator struct {
kclient kubernetes.Interface
mclient monitoringclient.Interface
logger log.Logger
nsAlrtInf cache.SharedIndexInformer
nsAlrtCfgInf cache.SharedIndexInformer
alrtInfs *informers.ForResource
alrtCfgInfs *informers.ForResource
secrInfs *informers.ForResource
ssetInfs *informers.ForResource
queue workqueue.RateLimitingInterface
metrics *operator.Metrics
config Config
}
type Config struct {
Host string
LocalHost string
ClusterDomain string
ReloaderConfig operator.ReloaderConfig
AlertmanagerDefaultBaseImage string
Namespaces operator.Namespaces
Labels operator.Labels
AlertManagerSelector string
SecretListWatchSelector string
}
// New creates a new controller.
func New(ctx context.Context, c operator.Config, logger log.Logger, r prometheus.Registerer) (*Operator, error) {
cfg, err := k8sutil.NewClusterConfig(c.Host, c.TLSInsecure, &c.TLSConfig)
if err != nil {
return nil, errors.Wrap(err, "instantiating cluster config failed")
}
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "instantiating kubernetes client failed")
}
mclient, err := monitoringclient.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "instantiating monitoring client failed")
}
o := &Operator{
kclient: client,
mclient: mclient,
logger: logger,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "alertmanager"),
metrics: operator.NewMetrics("alertmanager", r),
config: Config{
Host: c.Host,
LocalHost: c.LocalHost,
ClusterDomain: c.ClusterDomain,
ReloaderConfig: c.ReloaderConfig,
AlertmanagerDefaultBaseImage: c.AlertmanagerDefaultBaseImage,
Namespaces: c.Namespaces,
Labels: c.Labels,
AlertManagerSelector: c.AlertManagerSelector,
SecretListWatchSelector: c.SecretListWatchSelector,
},
}
if err := o.bootstrap(ctx); err != nil {
return nil, err
}
return o, nil
}
func (c *Operator) bootstrap(ctx context.Context) error {
var err error
if _, err := labels.Parse(c.config.AlertManagerSelector); err != nil {
return errors.Wrap(err, "can not parse alertmanager selector value")
}
c.alrtInfs, err = informers.NewInformersForResource(
informers.NewMonitoringInformerFactories(
c.config.Namespaces.AlertmanagerAllowList,
c.config.Namespaces.DenyList,
c.mclient,
resyncPeriod,
func(options *metav1.ListOptions) {
options.LabelSelector = c.config.AlertManagerSelector
},
),
monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.AlertmanagerName),
)
if err != nil {
return errors.Wrap(err, "error creating alertmanager informers")
}
var alertmanagerStores []cache.Store
for _, informer := range c.alrtInfs.GetInformers() {
alertmanagerStores = append(alertmanagerStores, informer.Informer().GetStore())
}
c.metrics.MustRegister(newAlertmanagerCollectorForStores(alertmanagerStores...))
c.alrtCfgInfs, err = informers.NewInformersForResource(
informers.NewMonitoringInformerFactories(
c.config.Namespaces.AllowList,
c.config.Namespaces.DenyList,
c.mclient,
resyncPeriod,
nil,
),
monitoringv1alpha1.SchemeGroupVersion.WithResource(monitoringv1alpha1.AlertmanagerConfigName),
)
if err != nil {
return errors.Wrap(err, "error creating alertmanagerconfig informers")
}
secretListWatchSelector, err := fields.ParseSelector(c.config.SecretListWatchSelector)
if err != nil {
return errors.Wrap(err, "can not parse secrets selector value")
}
c.secrInfs, err = informers.NewInformersForResource(
informers.NewKubeInformerFactories(
c.config.Namespaces.AllowList,
c.config.Namespaces.DenyList,
c.kclient,
resyncPeriod,
func(options *metav1.ListOptions) {
options.FieldSelector = secretListWatchSelector.String()
},
),
v1.SchemeGroupVersion.WithResource("secrets"),
)
if err != nil {
return errors.Wrap(err, "error creating secret informers")
}
c.ssetInfs, err = informers.NewInformersForResource(
informers.NewKubeInformerFactories(
c.config.Namespaces.AlertmanagerAllowList,
c.config.Namespaces.DenyList,
c.kclient,
resyncPeriod,
nil,
),
appsv1.SchemeGroupVersion.WithResource("statefulsets"),
)
if err != nil {
return errors.Wrap(err, "error creating statefulset informers")
}
newNamespaceInformer := func(o *Operator, allowList map[string]struct{}) cache.SharedIndexInformer {
// nsResyncPeriod is used to control how often the namespace informer
// should resync. If the unprivileged ListerWatcher is used, then the
// informer must resync more often because it cannot watch for
// namespace changes.
nsResyncPeriod := 15 * time.Second
// If the only namespace is v1.NamespaceAll, then the client must be
// privileged and a regular cache.ListWatch will be used. In this case
// watching works and we do not need to resync so frequently.
if listwatch.IsAllNamespaces(allowList) {
nsResyncPeriod = resyncPeriod
}
nsInf := cache.NewSharedIndexInformer(
o.metrics.NewInstrumentedListerWatcher(
listwatch.NewUnprivilegedNamespaceListWatchFromClient(ctx, o.logger, o.kclient.CoreV1().RESTClient(), allowList, o.config.Namespaces.DenyList, fields.Everything()),
),
&v1.Namespace{}, nsResyncPeriod, cache.Indexers{},
)
return nsInf
}
c.nsAlrtCfgInf = newNamespaceInformer(c, c.config.Namespaces.AllowList)
if listwatch.IdenticalNamespaces(c.config.Namespaces.AllowList, c.config.Namespaces.AlertmanagerAllowList) {
c.nsAlrtInf = c.nsAlrtCfgInf
} else {
c.nsAlrtInf = newNamespaceInformer(c, c.config.Namespaces.AlertmanagerAllowList)
}
return nil
}
// waitForCacheSync waits for the informers' caches to be synced.
func (c *Operator) waitForCacheSync(ctx context.Context) error {
ok := true
for _, infs := range []struct {
name string
informersForResource *informers.ForResource
}{
{"Alertmanager", c.alrtInfs},
{"AlertmanagerConfig", c.alrtCfgInfs},
{"Secret", c.secrInfs},
{"StatefulSet", c.ssetInfs},
} {
for _, inf := range infs.informersForResource.GetInformers() {
if !operator.WaitForNamedCacheSync(ctx, "alertmanager", log.With(c.logger, "informer", infs.name), inf.Informer()) {
ok = false
}
}
}
for _, inf := range []struct {
name string
informer cache.SharedIndexInformer
}{
{"AlertmanagerNamespace", c.nsAlrtInf},
{"AlertmanagerConfigNamespace", c.nsAlrtCfgInf},
} {
if !operator.WaitForNamedCacheSync(ctx, "alertmanager", log.With(c.logger, "informer", inf.name), inf.informer) {
ok = false
}
}
if !ok {
return errors.New("failed to sync caches")
}
level.Info(c.logger).Log("msg", "successfully synced all caches")
return nil
}
// addHandlers adds the eventhandlers to the informers.
func (c *Operator) addHandlers() {
c.alrtInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleAlertmanagerAdd,
DeleteFunc: c.handleAlertmanagerDelete,
UpdateFunc: c.handleAlertmanagerUpdate,
})
c.alrtCfgInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleAlertmanagerConfigAdd,
DeleteFunc: c.handleAlertmanagerConfigDelete,
UpdateFunc: c.handleAlertmanagerConfigUpdate,
})
c.secrInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleSecretAdd,
DeleteFunc: c.handleSecretDelete,
UpdateFunc: c.handleSecretUpdate,
})
c.ssetInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleStatefulSetAdd,
DeleteFunc: c.handleStatefulSetDelete,
UpdateFunc: c.handleStatefulSetUpdate,
})
// The controller needs to watch the namespaces in which the
// alertmanagerconfigs live because a label change on a namespace may
// trigger a configuration change.
// It doesn't need to watch on addition/deletion though because it's
// already covered by the event handlers on alertmanagerconfigs.
c.nsAlrtCfgInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.handleNamespaceUpdate,
})
}
func (c *Operator) handleAlertmanagerConfigAdd(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "AlertmanagerConfig added")
c.metrics.TriggerByCounter(monitoringv1alpha1.AlertmanagerConfigKind, "add").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleAlertmanagerConfigUpdate(old, cur interface{}) {
if old.(*monitoringv1alpha1.AlertmanagerConfig).ResourceVersion == cur.(*monitoringv1alpha1.AlertmanagerConfig).ResourceVersion {
return
}
o, ok := c.getObject(cur)
if ok {
level.Debug(c.logger).Log("msg", "AlertmanagerConfig updated")
c.metrics.TriggerByCounter(monitoringv1alpha1.AlertmanagerConfigKind, "update").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleAlertmanagerConfigDelete(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "AlertmanagerConfig delete")
c.metrics.TriggerByCounter(monitoringv1alpha1.AlertmanagerConfigKind, "delete").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Do we need to enqueue secrets just for the namespace or in general?
func (c *Operator) handleSecretDelete(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "Secret deleted")
c.metrics.TriggerByCounter("Secret", "delete").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleSecretUpdate(old, cur interface{}) {
if old.(*v1.Secret).ResourceVersion == cur.(*v1.Secret).ResourceVersion {
return
}
o, ok := c.getObject(cur)
if ok {
level.Debug(c.logger).Log("msg", "Secret updated")
c.metrics.TriggerByCounter("Secret", "update").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleSecretAdd(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "Secret added")
c.metrics.TriggerByCounter("Secret", "add").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// enqueueForNamespace enqueues all Alertmanager object keys that belong to the
// given namespace or select objects in the given namespace.
func (c *Operator) enqueueForNamespace(nsName string) {
nsObject, exists, err := c.nsAlrtCfgInf.GetStore().GetByKey(nsName)
if err != nil {
level.Error(c.logger).Log(
"msg", "get namespace to enqueue Alertmanager instances failed",
"err", err,
)
return
}
if !exists {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("get namespace to enqueue Alertmanager instances failed: namespace %q does not exist", nsName),
)
return
}
ns := nsObject.(*v1.Namespace)
err = c.alrtInfs.ListAll(labels.Everything(), func(obj interface{}) {
// Check for Alertmanager instances in the namespace.
am := obj.(*monitoringv1.Alertmanager)
if am.Namespace == nsName {
c.enqueue(am)
return
}
// Check for Alertmanager instances selecting AlertmanagerConfigs in
// the namespace.
acNSSelector, err := metav1.LabelSelectorAsSelector(am.Spec.AlertmanagerConfigNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert AlertmanagerConfigNamespaceSelector of %q to selector", am.Name),
"err", err,
)
return
}
if acNSSelector.Matches(labels.Set(ns.Labels)) {
c.enqueue(am)
return
}
})
if err != nil {
level.Error(c.logger).Log(
"msg", "listing all Alertmanager instances from cache failed",
"err", err,
)
}
}
// Run the controller.
func (c *Operator) Run(ctx context.Context) error {
defer c.queue.ShutDown()
errChan := make(chan error)
go func() {
v, err := c.kclient.Discovery().ServerVersion()
if err != nil {
errChan <- errors.Wrap(err, "communicating with server failed")
return
}
level.Info(c.logger).Log("msg", "connection established", "cluster-version", v)
errChan <- nil
}()
select {
case err := <-errChan:
if err != nil {
return err
}
level.Info(c.logger).Log("msg", "CRD API endpoints ready")
case <-ctx.Done():
return nil
}
go c.worker(ctx)
go c.alrtInfs.Start(ctx.Done())
go c.alrtCfgInfs.Start(ctx.Done())
go c.secrInfs.Start(ctx.Done())
go c.ssetInfs.Start(ctx.Done())
go c.nsAlrtCfgInf.Run(ctx.Done())
if c.nsAlrtInf != c.nsAlrtCfgInf {
go c.nsAlrtInf.Run(ctx.Done())
}
if err := c.waitForCacheSync(ctx); err != nil {
return err
}
c.addHandlers()
c.metrics.Ready().Set(1)
<-ctx.Done()
return nil
}
func (c *Operator) keyFunc(obj interface{}) (string, bool) {
k, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
level.Error(c.logger).Log("msg", "creating key failed", "err", err)
return k, false
}
return k, true
}
func (c *Operator) getObject(obj interface{}) (metav1.Object, bool) {
ts, ok := obj.(cache.DeletedFinalStateUnknown)
if ok {
obj = ts.Obj
}
o, err := meta.Accessor(obj)
if err != nil {
level.Error(c.logger).Log("msg", "get object failed", "err", err)
return nil, false
}
return o, true
}
// enqueue adds a key to the queue. If obj is a key already it gets added
// directly. Otherwise, the key is extracted via keyFunc.
func (c *Operator) enqueue(obj interface{}) {
if obj == nil {
return
}
key, ok := obj.(string)
if !ok {
key, ok = c.keyFunc(obj)
if !ok {
return
}
}
c.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them
// and marks them done. It enforces that the syncHandler is never invoked
// concurrently with the same key.
func (c *Operator) worker(ctx context.Context) {
for c.processNextWorkItem(ctx) {
}
}
func (c *Operator) processNextWorkItem(ctx context.Context) bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
c.metrics.ReconcileCounter().Inc()
err := c.sync(ctx, key.(string))
c.metrics.SetSyncStatus(key.(string), err == nil)
if err == nil {
c.queue.Forget(key)
return true
}
c.metrics.ReconcileErrorsCounter().Inc()
utilruntime.HandleError(errors.Wrap(err, fmt.Sprintf("Sync %q failed", key)))
c.queue.AddRateLimited(key)
return true
}
func (c *Operator) alertmanagerForStatefulSet(sset interface{}) *monitoringv1.Alertmanager {
key, ok := c.keyFunc(sset)
if !ok {
return nil
}
match, aKey := statefulSetKeyToAlertmanagerKey(key)
if !match {
level.Debug(c.logger).Log("msg", "StatefulSet key did not match an Alertmanager key format", "key", key)
return nil
}
a, err := c.alrtInfs.Get(aKey)
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
level.Error(c.logger).Log("msg", "Alertmanager lookup failed", "err", err)
return nil
}
return a.(*monitoringv1.Alertmanager)
}
func statefulSetNameFromAlertmanagerName(name string) string {
return "alertmanager-" + name
}
func statefulSetKeyToAlertmanagerKey(key string) (bool, string) {
r := regexp.MustCompile("^(.+)/alertmanager-(.+)$")
matches := r.FindAllStringSubmatch(key, 2)
if len(matches) != 1 {
return false, ""
}
if len(matches[0]) != 3 {
return false, ""
}
return true, matches[0][1] + "/" + matches[0][2]
}
func alertmanagerKeyToStatefulSetKey(key string) string {
keyParts := strings.Split(key, "/")
return keyParts[0] + "/alertmanager-" + keyParts[1]
}
func (c *Operator) handleAlertmanagerAdd(obj interface{}) {
key, ok := c.keyFunc(obj)
if !ok {
return
}
level.Debug(c.logger).Log("msg", "Alertmanager added", "key", key)
c.metrics.TriggerByCounter(monitoringv1.AlertmanagersKind, "add").Inc()
checkAlertmanagerSpecDeprecation(key, obj.(*monitoringv1.Alertmanager), c.logger)
c.enqueue(key)
}
func (c *Operator) handleAlertmanagerDelete(obj interface{}) {
key, ok := c.keyFunc(obj)
if !ok {
return
}
level.Debug(c.logger).Log("msg", "Alertmanager deleted", "key", key)
c.metrics.TriggerByCounter(monitoringv1.AlertmanagersKind, "delete").Inc()
c.enqueue(key)
}
func (c *Operator) handleAlertmanagerUpdate(old, cur interface{}) {
if old.(*monitoringv1.Alertmanager).ResourceVersion == cur.(*monitoringv1.Alertmanager).ResourceVersion {
return
}
key, ok := c.keyFunc(cur)
if !ok {
return
}
level.Debug(c.logger).Log("msg", "Alertmanager updated", "key", key)
c.metrics.TriggerByCounter(monitoringv1.AlertmanagersKind, "update").Inc()
checkAlertmanagerSpecDeprecation(key, cur.(*monitoringv1.Alertmanager), c.logger)
c.enqueue(key)
}
func (c *Operator) handleStatefulSetDelete(obj interface{}) {
if a := c.alertmanagerForStatefulSet(obj); a != nil {
level.Debug(c.logger).Log("msg", "StatefulSet delete")
c.metrics.TriggerByCounter("StatefulSet", "delete").Inc()
c.enqueue(a)
}
}
func (c *Operator) handleStatefulSetAdd(obj interface{}) {
if a := c.alertmanagerForStatefulSet(obj); a != nil {
level.Debug(c.logger).Log("msg", "StatefulSet added")
c.metrics.TriggerByCounter("StatefulSet", "add").Inc()
c.enqueue(a)
}
}
func (c *Operator) handleStatefulSetUpdate(oldo, curo interface{}) {
old := oldo.(*appsv1.StatefulSet)
cur := curo.(*appsv1.StatefulSet)
level.Debug(c.logger).Log("msg", "update handler", "old", old.ResourceVersion, "cur", cur.ResourceVersion)
// Periodic resync may resend the deployment without changes in-between.
// Also breaks loops created by updating the resource ourselves.
if old.ResourceVersion == cur.ResourceVersion {
return
}
// Wake up Alertmanager resource the deployment belongs to.
if a := c.alertmanagerForStatefulSet(cur); a != nil {
level.Debug(c.logger).Log("msg", "StatefulSet updated")
c.metrics.TriggerByCounter("StatefulSet", "update").Inc()
c.enqueue(a)
}
}
func (c *Operator) handleNamespaceUpdate(oldo, curo interface{}) {
old := oldo.(*v1.Namespace)
cur := curo.(*v1.Namespace)
level.Debug(c.logger).Log("msg", "update handler", "namespace", cur.GetName(), "old", old.ResourceVersion, "cur", cur.ResourceVersion)
// Periodic resync may resend the Namespace without changes
// in-between.
if old.ResourceVersion == cur.ResourceVersion {
return
}
level.Debug(c.logger).Log("msg", "Namespace updated", "namespace", cur.GetName())
c.metrics.TriggerByCounter("Namespace", "update").Inc()
// Check for Alertmanager instances selecting AlertmanagerConfigs in the namespace.
err := c.alrtInfs.ListAll(labels.Everything(), func(obj interface{}) {
a := obj.(*monitoringv1.Alertmanager)
sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, a.Spec.AlertmanagerConfigNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"err", err,
"name", a.Name,
"namespace", a.Namespace,
)
return
}
if sync {
c.enqueue(a)
}
})
if err != nil {
level.Error(c.logger).Log(
"msg", "listing all Alertmanager instances from cache failed",
"err", err,
)
}
}
func (c *Operator) sync(ctx context.Context, key string) error {
aobj, err := c.alrtInfs.Get(key)
if apierrors.IsNotFound(err) {
c.metrics.ForgetObject(key)
// Dependent resources are cleaned up by K8s via OwnerReferences
return nil
}
if err != nil {
return err
}
am := aobj.(*monitoringv1.Alertmanager)
am = am.DeepCopy()
am.APIVersion = monitoringv1.SchemeGroupVersion.String()
am.Kind = monitoringv1.AlertmanagersKind
if am.Spec.Paused {
return nil
}
level.Info(c.logger).Log("msg", "sync alertmanager", "key", key)
assetStore := assets.NewStore(c.kclient.CoreV1(), c.kclient.CoreV1())
if err := c.provisionAlertmanagerConfiguration(ctx, am, assetStore); err != nil {
return errors.Wrap(err, "provision alertmanager configuration")
}
if err := c.createOrUpdateTLSAssetSecret(ctx, am, assetStore); err != nil {
return errors.Wrap(err, "creating tls asset secret failed")
}
// Create governing service if it doesn't exist.
svcClient := c.kclient.CoreV1().Services(am.Namespace)
if err = k8sutil.CreateOrUpdateService(ctx, svcClient, makeStatefulSetService(am, c.config)); err != nil {
return errors.Wrap(err, "synchronizing governing service failed")
}
newSSetInputHash, err := createSSetInputHash(*am, c.config)
if err != nil {
return err
}
sset, err := makeStatefulSet(am, c.config, newSSetInputHash)
if err != nil {
return errors.Wrap(err, "failed to make statefulset")
}
operator.SanitizeSTS(sset)
ssetClient := c.kclient.AppsV1().StatefulSets(am.Namespace)
obj, err := c.ssetInfs.Get(alertmanagerKeyToStatefulSetKey(key))
if err != nil {
if !apierrors.IsNotFound(err) {
return errors.Wrap(err, "failed to retrieve statefulset")
}
if _, err := ssetClient.Create(ctx, sset, metav1.CreateOptions{}); err != nil {
return errors.Wrap(err, "failed to create statefulset")
}
return nil
}
oldSSetInputHash := obj.(*appsv1.StatefulSet).ObjectMeta.Annotations[sSetInputHashName]
if newSSetInputHash == oldSSetInputHash {
level.Debug(c.logger).Log("msg", "new statefulset generation inputs match current, skipping any actions")
return nil
}
err = k8sutil.UpdateStatefulSet(ctx, ssetClient, sset)
sErr, ok := err.(*apierrors.StatusError)
if ok && sErr.ErrStatus.Code == 422 && sErr.ErrStatus.Reason == metav1.StatusReasonInvalid {
c.metrics.StsDeleteCreateCounter().Inc()
level.Info(c.logger).Log("msg", "resolving illegal update of Alertmanager StatefulSet", "details", sErr.ErrStatus.Details)
propagationPolicy := metav1.DeletePropagationForeground
if err := ssetClient.Delete(ctx, sset.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action")
}
return nil
}
if err != nil {
return errors.Wrap(err, "updating StatefulSet failed")
}
return nil
}
func createSSetInputHash(a monitoringv1.Alertmanager, c Config) (string, error) {
hash, err := hashstructure.Hash(struct {
A monitoringv1.Alertmanager
C Config
}{a, c},
nil,
)
if err != nil {
return "", errors.Wrap(
err,
"failed to calculate combined hash of Alertmanager CRD and config",
)
}
return fmt.Sprintf("%d", hash), nil
}
func (c *Operator) provisionAlertmanagerConfiguration(ctx context.Context, am *monitoringv1.Alertmanager, store *assets.Store) error {
secretName := defaultConfigSecretName(am.Name)
if am.Spec.ConfigSecret != "" {
secretName = am.Spec.ConfigSecret
}
// Tentatively retrieve the secret containing the user-provided Alertmanager
// configuration.
secret, err := c.kclient.CoreV1().Secrets(am.Namespace).Get(ctx, secretName, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return errors.Wrap(err, "get base configuration secret")
}
var secretData map[string][]byte
if secret != nil {
secretData = secret.Data
}
rawBaseConfig := []byte(`route:
receiver: 'null'
receivers:
- name: 'null'`)
if len(secretData[alertmanagerConfigFile]) > 0 {
rawBaseConfig = secretData[alertmanagerConfigFile]
} else {
if secret == nil {
level.Info(c.logger).Log("msg", "base config secret not found",
"secret", secretName, "alertmanager", am.Name, "namespace", am.Namespace)
} else {
level.Info(c.logger).Log("msg", "key not found in base config secret",
"secret", secretName, "key", alertmanagerConfigFile, "alertmanager", am.Name, "namespace", am.Namespace)
}
}
baseConfig, err := loadCfg(string(rawBaseConfig))
if err != nil {
return errors.Wrap(err, "base config from Secret could not be parsed")
}
// If no AlertmanagerConfig selectors are configured, the user wants to
// manage configuration themselves.
if am.Spec.AlertmanagerConfigSelector == nil {
level.Debug(c.logger).Log("msg", "no AlertmanagerConfig selector specified, copying base config as-is",
"base config secret", secretName, "mounted config secret", generatedConfigSecretName(am.Name),
"alertmanager", am.Name, "namespace", am.Namespace,
)
err = c.createOrUpdateGeneratedConfigSecret(ctx, am, rawBaseConfig, secretData)
if err != nil {
return errors.Wrap(err, "create or update generated config secret failed")
}
return nil
}
amConfigs, err := c.selectAlertmanagerConfigs(ctx, am, store)
if err != nil {
return errors.Wrap(err, "selecting AlertmanagerConfigs failed")
}
generator := newConfigGenerator(c.logger, store)
generatedConfig, err := generator.generateConfig(ctx, *baseConfig, amConfigs)
if err != nil {
return errors.Wrap(err, "generating Alertmanager config yaml failed")
}
err = c.createOrUpdateGeneratedConfigSecret(ctx, am, generatedConfig, secretData)
if err != nil {
return errors.Wrap(err, "create or update generated config secret failed")
}
return nil
}
func (c *Operator) createOrUpdateGeneratedConfigSecret(ctx context.Context, am *monitoringv1.Alertmanager, conf []byte, additionalData map[string][]byte) error {
boolTrue := true
sClient := c.kclient.CoreV1().Secrets(am.Namespace)
generatedConfigSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: generatedConfigSecretName(am.Name),
Labels: c.config.Labels.Merge(managedByOperatorLabels),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: am.APIVersion,
BlockOwnerDeletion: &boolTrue,
Controller: &boolTrue,
Kind: am.Kind,
Name: am.Name,
UID: am.UID,
},
},
},
Data: map[string][]byte{},
}
for k, v := range additionalData {
generatedConfigSecret.Data[k] = v
}
generatedConfigSecret.Data[alertmanagerConfigFile] = conf
err := k8sutil.CreateOrUpdateSecret(ctx, sClient, generatedConfigSecret)
if err != nil {
return errors.Wrap(err, "failed to update generated config secret")
}
return nil
}
func (c *Operator) selectAlertmanagerConfigs(ctx context.Context, am *monitoringv1.Alertmanager, store *assets.Store) (map[string]*monitoringv1alpha1.AlertmanagerConfig, error) {
namespaces := []string{}
// If 'AlertmanagerConfigNamespaceSelector' is nil, only check own namespace.
if am.Spec.AlertmanagerConfigNamespaceSelector == nil {
namespaces = append(namespaces, am.Namespace)
level.Debug(c.logger).Log("msg", "selecting AlertmanagerConfigs from alertmanager's namespace", "namespace", am.Namespace, "alertmanager", am.Name)
} else {
amConfigNSSelector, err := metav1.LabelSelectorAsSelector(am.Spec.AlertmanagerConfigNamespaceSelector)
if err != nil {
return nil, err
}
err = cache.ListAll(c.nsAlrtCfgInf.GetStore(), amConfigNSSelector, func(obj interface{}) {
namespaces = append(namespaces, obj.(*v1.Namespace).Name)
})
if err != nil {
return nil, errors.Wrap(err, "failed to list namespaces")
}
level.Debug(c.logger).Log("msg", "filtering namespaces to select AlertmanagerConfigs from", "namespaces", strings.Join(namespaces, ","), "namespace", am.Namespace, "alertmanager", am.Name)
}
// Selectors (<namespace>/<name>) might overlap. Deduplicate them along the keyFunc.
amConfigs := make(map[string]*monitoringv1alpha1.AlertmanagerConfig)
amConfigSelector, err := metav1.LabelSelectorAsSelector(am.Spec.AlertmanagerConfigSelector)
if err != nil {
return nil, err
}
for _, ns := range namespaces {
err := c.alrtCfgInfs.ListAllByNamespace(ns, amConfigSelector, func(obj interface{}) {
k, ok := c.keyFunc(obj)
if ok {
amConfigs[k] = obj.(*monitoringv1alpha1.AlertmanagerConfig)
}
})
if err != nil {
return nil, errors.Wrapf(err, "failed to list alertmanager configs in namespace %s", ns)
}
}
var rejected int
res := make(map[string]*monitoringv1alpha1.AlertmanagerConfig, len(amConfigs))
for namespaceAndName, amc := range amConfigs {
if err := checkAlertmanagerConfig(ctx, amc, store); err != nil {
rejected++
level.Warn(c.logger).Log(
"msg", "skipping alertmanagerconfig",
"error", err.Error(),
"alertmanagerconfig", namespaceAndName,
"namespace", am.Namespace,
"alertmanager", am.Name,
)
continue
}
res[namespaceAndName] = amc
}
amcKeys := []string{}
for k := range res {
amcKeys = append(amcKeys, k)
}
level.Debug(c.logger).Log("msg", "selected AlertmanagerConfigs", "alertmanagerconfigs", strings.Join(amcKeys, ","), "namespace", am.Namespace, "prometheus", am.Name)
if amKey, ok := c.keyFunc(am); ok {
c.metrics.SetSelectedResources(amKey, monitoringv1alpha1.AlertmanagerConfigKind, len(res))
c.metrics.SetRejectedResources(amKey, monitoringv1alpha1.AlertmanagerConfigKind, rejected)
}
return res, nil
}
// checkAlertmanagerConfig verifies that an AlertmanagerConfig object is valid
// and has no missing references to other objects.
func checkAlertmanagerConfig(ctx context.Context, amc *monitoringv1alpha1.AlertmanagerConfig, store *assets.Store) error {
receiverNames, err := checkReceivers(ctx, amc, store)
if err != nil {
return err
}
return checkAlertmanagerRoutes(amc.Spec.Route, receiverNames, true)
}
func checkReceivers(ctx context.Context, amc *monitoringv1alpha1.AlertmanagerConfig, store *assets.Store) (map[string]struct{}, error) {
var err error
receiverNames := make(map[string]struct{})
for i, receiver := range amc.Spec.Receivers {
if _, found := receiverNames[receiver.Name]; found {
return nil, errors.Errorf("%q receiver is not unique", receiver.Name)
}
receiverNames[receiver.Name] = struct{}{}
amcKey := fmt.Sprintf("alertmanagerConfig/%s/%s/%d", amc.GetNamespace(), amc.GetName(), i)
err = checkPagerDutyConfigs(ctx, receiver.PagerDutyConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
err = checkOpsGenieConfigs(ctx, receiver.OpsGenieConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
err = checkSlackConfigs(ctx, receiver.SlackConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
err = checkWebhookConfigs(ctx, receiver.WebhookConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
err = checkWechatConfigs(ctx, receiver.WeChatConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
err = checkEmailConfigs(ctx, receiver.EmailConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
err = checkVictorOpsConfigs(ctx, receiver.VictorOpsConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
err = checkPushoverConfigs(ctx, receiver.PushoverConfigs, amc.GetNamespace(), amcKey, store)
if err != nil {
return nil, err
}
}
return receiverNames, nil
}
func checkPagerDutyConfigs(ctx context.Context, configs []monitoringv1alpha1.PagerDutyConfig, namespace string, key string, store *assets.Store) error {
for i, config := range configs {
pagerDutyConfigKey := fmt.Sprintf("%s/pagerduty/%d", key, i)
if config.RoutingKey != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.RoutingKey); err != nil {
return err
}
}
if config.ServiceKey != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.ServiceKey); err != nil {
return err
}
}
if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, pagerDutyConfigKey, store); err != nil {
return err
}
}
return nil
}
func checkOpsGenieConfigs(ctx context.Context, configs []monitoringv1alpha1.OpsGenieConfig, namespace string, key string, store *assets.Store) error {
for i, config := range configs {
opsgenieConfigKey := fmt.Sprintf("%s/opsgenie/%d", key, i)
if config.APIKey != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.APIKey); err != nil {
return err
}
}
if err := config.Validate(); err != nil {
return err
}
if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, opsgenieConfigKey, store); err != nil {
return err
}
}
return nil
}
func checkSlackConfigs(ctx context.Context, configs []monitoringv1alpha1.SlackConfig, namespace string, key string, store *assets.Store) error {
for i, config := range configs {
slackConfigKey := fmt.Sprintf("%s/slack/%d", key, i)
if config.APIURL != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.APIURL); err != nil {
return err
}
}
if err := config.Validate(); err != nil {
return err
}
if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, slackConfigKey, store); err != nil {
return err
}
}
return nil
}
func checkWebhookConfigs(ctx context.Context, configs []monitoringv1alpha1.WebhookConfig, namespace string, key string, store *assets.Store) error {
for i, config := range configs {
webhookConfigKey := fmt.Sprintf("%s/webhook/%d", key, i)
if config.URL == nil && config.URLSecret == nil {
return errors.New("one of url or urlSecret should be specified")
}
if config.URLSecret != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.URLSecret); err != nil {
return err
}
}
if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, webhookConfigKey, store); err != nil {
return err
}
}
return nil
}
func checkWechatConfigs(ctx context.Context, configs []monitoringv1alpha1.WeChatConfig, namespace string, key string, store *assets.Store) error {
for i, config := range configs {
wechatConfigKey := fmt.Sprintf("%s/wechat/%d", key, i)
if len(config.APIURL) > 0 {
_, err := url.Parse(config.APIURL)
if err != nil {
return errors.New("API URL not valid")
}
}
if config.APISecret != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.APISecret); err != nil {
return err
}
}
if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, wechatConfigKey, store); err != nil {
return err
}
}
return nil
}
func checkEmailConfigs(ctx context.Context, configs []monitoringv1alpha1.EmailConfig, namespace string, key string, store *assets.Store) error {
for _, config := range configs {
if config.To == "" {
return errors.New("missing to address in email config")
}
if config.Smarthost != "" {
_, _, err := net.SplitHostPort(config.Smarthost)
if err != nil {
return errors.New("invalid email field SMARTHOST")
}
}
if config.AuthPassword != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.AuthPassword); err != nil {
return err
}
}
if config.AuthSecret != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.AuthSecret); err != nil {
return err
}
}
if config.Headers != nil {
// Header names are case-insensitive, check for collisions.
normalizedHeaders := map[string]struct{}{}
for _, v := range config.Headers {
normalized := strings.Title(v.Key)
if _, ok := normalizedHeaders[normalized]; ok {
return fmt.Errorf("duplicate header %q in email config", normalized)
}
normalizedHeaders[normalized] = struct{}{}
}
}
if err := store.AddSafeTLSConfig(ctx, namespace, config.TLSConfig); err != nil {
return err
}
}
return nil
}
func checkVictorOpsConfigs(ctx context.Context, configs []monitoringv1alpha1.VictorOpsConfig, namespace string, key string, store *assets.Store) error {
for i, config := range configs {
if config.APIKey != nil {
if _, err := store.GetSecretKey(ctx, namespace, *config.APIKey); err != nil {
return err
}
}
// from https://github.com/prometheus/alertmanager/blob/a7f9fdadbecbb7e692d2cd8d3334e3d6de1602e1/config/notifiers.go#L497
reservedFields := map[string]struct{}{
"routing_key": {},
"message_type": {},
"state_message": {},
"entity_display_name": {},
"monitoring_tool": {},
"entity_id": {},
"entity_state": {},
}
if len(config.CustomFields) > 0 {
for _, v := range config.CustomFields {
if _, ok := reservedFields[v.Key]; ok {
return fmt.Errorf("usage of reserved word %q is not allowed in custom fields", v.Key)
}
}
}
if config.RoutingKey == "" {
return errors.New("missing Routing key in VictorOps config")
}
victoropsConfigKey := fmt.Sprintf("%s/victorops/%d", key, i)
if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, victoropsConfigKey, store); err != nil {
return err
}
}
return nil
}
func checkPushoverConfigs(ctx context.Context, configs []monitoringv1alpha1.PushoverConfig, namespace string, key string, store *assets.Store) error {
checkSecret := func(secret *v1.SecretKeySelector, name string) error {
if secret == nil {
return errors.Errorf("mandatory field %s is empty", name)
}
s, err := store.GetSecretKey(ctx, namespace, *secret)
if err != nil {
return err
}
if s == "" {
return errors.New("mandatory field userKey is empty")
}
return nil
}
for i, config := range configs {
if err := checkSecret(config.UserKey, "userKey"); err != nil {
return err
}
if err := checkSecret(config.Token, "token"); err != nil {
return err
}
if config.Retry != "" {
_, err := time.ParseDuration(config.Retry)
if err != nil {
return errors.New("invalid retry duration")
}
}
if config.Expire != "" {
_, err := time.ParseDuration(config.Expire)
if err != nil {
return errors.New("invalid expire duration")
}
}
pushoverConfigKey := fmt.Sprintf("%s/pushover/%d", key, i)
if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, pushoverConfigKey, store); err != nil {
return err
}
}
return nil
}
// checkAlertmanagerRoutes verifies that the given route and all its children are semantically valid.
func checkAlertmanagerRoutes(r *monitoringv1alpha1.Route, receivers map[string]struct{}, topLevelRoute bool) error {
if r == nil {
return nil
}
if _, found := receivers[r.Receiver]; !found && (r.Receiver != "" || topLevelRoute) {
return errors.Errorf("receiver %q not found", r.Receiver)
}
children, err := r.ChildRoutes()
if err != nil {
return err
}
for i := range children {
if err := checkAlertmanagerRoutes(&children[i], receivers, false); err != nil {
return errors.Wrapf(err, "route[%d]", i)
}
}
return nil
}
// configureHTTPConfigInStore configure the asset store for HTTPConfigs.
func configureHTTPConfigInStore(ctx context.Context, httpConfig *monitoringv1alpha1.HTTPConfig, namespace string, key string, store *assets.Store) error {
if httpConfig == nil {
return nil
}
var err error
if httpConfig.BearerTokenSecret != nil {
if err = store.AddBearerToken(ctx, namespace, *httpConfig.BearerTokenSecret, key); err != nil {
return err
}
}
if err = store.AddBasicAuth(ctx, namespace, httpConfig.BasicAuth, key); err != nil {
return err
}
return store.AddSafeTLSConfig(ctx, namespace, httpConfig.TLSConfig)
}
func (c *Operator) createOrUpdateTLSAssetSecret(ctx context.Context, am *monitoringv1.Alertmanager, store *assets.Store) error {
boolTrue := true
sClient := c.kclient.CoreV1().Secrets(am.Namespace)
tlsAssetsSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: tlsAssetsSecretName(am.Name),
Labels: c.config.Labels.Merge(managedByOperatorLabels),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: am.APIVersion,
BlockOwnerDeletion: &boolTrue,
Controller: &boolTrue,
Kind: am.Kind,
Name: am.Name,
UID: am.UID,
},
},
},
Data: make(map[string][]byte, len(store.TLSAssets)),
}
for key, asset := range store.TLSAssets {
tlsAssetsSecret.Data[key.String()] = []byte(asset)
}
err := k8sutil.CreateOrUpdateSecret(ctx, sClient, tlsAssetsSecret)
if err != nil {
return errors.Wrap(err, "failed to create TLS assets secret for Alertmanager")
}
return nil
}
//checkAlertmanagerSpecDeprecation checks for deprecated fields in the prometheus spec and logs a warning if applicable
func checkAlertmanagerSpecDeprecation(key string, a *monitoringv1.Alertmanager, logger log.Logger) {
deprecationWarningf := "alertmanager key=%v, field %v is deprecated, '%v' field should be used instead"
if a.Spec.BaseImage != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.baseImage", "spec.image"))
}
if a.Spec.Tag != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.tag", "spec.image"))
}
if a.Spec.SHA != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.sha", "spec.image"))
}
}
func ListOptions(name string) metav1.ListOptions {
return metav1.ListOptions{
LabelSelector: fields.SelectorFromSet(fields.Set(map[string]string{
"app.kubernetes.io/name": "alertmanager",
"alertmanager": name,
})).String(),
}
}
func Status(ctx context.Context, kclient kubernetes.Interface, a *monitoringv1.Alertmanager) (*monitoringv1.AlertmanagerStatus, []v1.Pod, error) {
res := &monitoringv1.AlertmanagerStatus{Paused: a.Spec.Paused}
pods, err := kclient.CoreV1().Pods(a.Namespace).List(ctx, ListOptions(a.Name))
if err != nil {
return nil, nil, errors.Wrap(err, "retrieving pods of failed")
}
sset, err := kclient.AppsV1().StatefulSets(a.Namespace).Get(ctx, statefulSetNameFromAlertmanagerName(a.Name), metav1.GetOptions{})
if err != nil {
return nil, nil, errors.Wrap(err, "retrieving stateful set failed")
}
res.Replicas = int32(len(pods.Items))
var oldPods []v1.Pod
for _, pod := range pods.Items {
ready, err := k8sutil.PodRunningAndReady(pod)
if err != nil {
return nil, nil, errors.Wrap(err, "cannot determine pod ready state")
}
if ready {
res.AvailableReplicas++
// TODO(fabxc): detect other fields of the pod template
// that are mutable.
if needsUpdate(&pod, sset.Spec.Template) {
oldPods = append(oldPods, pod)
} else {
res.UpdatedReplicas++
}
continue
}
res.UnavailableReplicas++
}
return res, oldPods, nil
}
func needsUpdate(pod *v1.Pod, tmpl v1.PodTemplateSpec) bool {
c1 := pod.Spec.Containers[0]
c2 := tmpl.Spec.Containers[0]
if c1.Image != c2.Image {
return true
}
if !reflect.DeepEqual(c1.Args, c2.Args) {
return true
}
return false
}
func tlsAssetsSecretName(name string) string {
return fmt.Sprintf("%s-tls-assets", prefixedName(name))
}
| 1 | 16,190 | Why listing only first error reason (`ErrStatus.Details.Causes[0].Message`)? | prometheus-operator-prometheus-operator | go |
@@ -77,6 +77,17 @@ public interface RewriteDataFiles extends SnapshotUpdate<RewriteDataFiles, Rewri
*/
String TARGET_FILE_SIZE_BYTES = "target-file-size-bytes";
+ /**
+ * If the compaction should use the sequence number of the snapshot at compaction start time for new data files,
+ * instead of using the sequence number of the newly produced snapshot.
+ * <p>
+ * This avoids commit conflicts with updates that add newer equality deletes at a higher sequence number.
+ * <p>
+ * Defaults to true.
+ */
+ String USE_STARTING_SEQUENCE_NUMBER = "use-starting-sequence-number";
+ boolean USE_STARTING_SEQUENCE_NUMBER_DEFAULT = true;
+
/**
* Choose BINPACK as a strategy for this rewrite operation
* @return this for method chaining | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.actions;
import java.util.List;
import org.apache.iceberg.SortOrder;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.expressions.Expression;
/**
* An action for rewriting data files according to a rewrite strategy.
* Generally used for optimizing the sizing and layout of data files within a table.
*/
public interface RewriteDataFiles extends SnapshotUpdate<RewriteDataFiles, RewriteDataFiles.Result> {
/**
* Enable committing groups of files (see max-file-group-size-bytes) prior to the entire rewrite completing.
* This will produce additional commits but allow for progress even if some groups fail to commit. This setting
* will not change the correctness of the rewrite operation as file groups can be compacted independently.
* <p>
* The default is false, which produces a single commit when the entire job has completed.
*/
String PARTIAL_PROGRESS_ENABLED = "partial-progress.enabled";
boolean PARTIAL_PROGRESS_ENABLED_DEFAULT = false;
/**
* The maximum amount of Iceberg commits that this rewrite is allowed to produce if partial progress is enabled. This
* setting has no effect if partial progress is disabled.
*/
String PARTIAL_PROGRESS_MAX_COMMITS = "partial-progress.max-commits";
int PARTIAL_PROGRESS_MAX_COMMITS_DEFAULT = 10;
/**
* The entire rewrite operation is broken down into pieces based on partitioning and within partitions based
* on size into groups. These sub-units of the rewrite are referred to as file groups. The largest amount of data that
* should be compacted in a single group is controlled by {@link #MAX_FILE_GROUP_SIZE_BYTES}. This helps with
* breaking down the rewriting of very large partitions which may not be rewritable otherwise due to the resource
* constraints of the cluster. For example a sort based rewrite may not scale to terabyte sized partitions, those
* partitions need to be worked on in small subsections to avoid exhaustion of resources.
* <p>
* When grouping files, the underlying rewrite strategy will use this value as to limit the files which
* will be included in a single file group. A group will be processed by a single framework "action". For example,
* in Spark this means that each group would be rewritten in its own Spark action. A group will never contain files
* for multiple output partitions.
*/
String MAX_FILE_GROUP_SIZE_BYTES = "max-file-group-size-bytes";
long MAX_FILE_GROUP_SIZE_BYTES_DEFAULT = 1024L * 1024L * 1024L * 100L; // 100 Gigabytes
/**
* The max number of file groups to be simultaneously rewritten by the rewrite strategy. The structure and
* contents of the group is determined by the rewrite strategy. Each file group will be rewritten
* independently and asynchronously.
**/
String MAX_CONCURRENT_FILE_GROUP_REWRITES = "max-concurrent-file-group-rewrites";
int MAX_CONCURRENT_FILE_GROUP_REWRITES_DEFAULT = 1;
/**
* The output file size that this rewrite strategy will attempt to generate when rewriting files. By default this
* will use the "write.target-file-size-bytes value" in the table properties of the table being updated.
*/
String TARGET_FILE_SIZE_BYTES = "target-file-size-bytes";
/**
* Choose BINPACK as a strategy for this rewrite operation
* @return this for method chaining
*/
default RewriteDataFiles binPack() {
return this;
}
/**
* Choose SORT as a strategy for this rewrite operation using the table's sortOrder
* @return this for method chaining
*/
default RewriteDataFiles sort() {
throw new UnsupportedOperationException("SORT Rewrite Strategy not implemented for this framework");
}
/**
* Choose SORT as a strategy for this rewrite operation and manually specify the sortOrder to use
* @param sortOrder user defined sortOrder
* @return this for method chaining
*/
default RewriteDataFiles sort(SortOrder sortOrder) {
throw new UnsupportedOperationException("SORT Rewrite Strategy not implemented for this framework");
}
/**
* A user provided filter for determining which files will be considered by the rewrite strategy. This will be used
* in addition to whatever rules the rewrite strategy generates. For example this would be used for providing a
* restriction to only run rewrite on a specific partition.
*
* @param expression An iceberg expression used to determine which files will be considered for rewriting
* @return this for chaining
*/
RewriteDataFiles filter(Expression expression);
/**
* A map of file group information to the results of rewriting that file group. If the results are null then
* that particular file group failed. We should only have failed groups if partial progress is enabled otherwise we
* will report a total failure for the job.
*/
interface Result {
List<FileGroupRewriteResult> rewriteResults();
default int addedDataFilesCount() {
return rewriteResults().stream().mapToInt(FileGroupRewriteResult::addedDataFilesCount).sum();
}
default int rewrittenDataFilesCount() {
return rewriteResults().stream().mapToInt(FileGroupRewriteResult::rewrittenDataFilesCount).sum();
}
}
/**
* For a particular file group, the number of files which are newly created and the number of files
* which were formerly part of the table but have been rewritten.
*/
interface FileGroupRewriteResult {
FileGroupInfo info();
int addedDataFilesCount();
int rewrittenDataFilesCount();
}
/**
* A description of a file group, when it was processed, and within which partition. For use
* tracking rewrite operations and for returning results.
*/
interface FileGroupInfo {
/**
* returns which file group this is out of the total set of file groups for this rewrite
*/
int globalIndex();
/**
* returns which file group this is out of the set of file groups for this partition
*/
int partitionIndex();
/**
* returns which partition this file group contains files from
*/
StructLike partition();
}
}
| 1 | 45,799 | Now that this is true, do we have to ignore it with V1 Tables? | apache-iceberg | java |
@@ -87,6 +87,12 @@ Blockly.Css.inject = function(hasCss, pathToMedia) {
// Strip off any trailing slash (either Unix or Windows).
Blockly.Css.mediaPath_ = pathToMedia.replace(/[\\\/]$/, '');
text = text.replace(/<<<PATH>>>/g, Blockly.Css.mediaPath_);
+ // Dynamically replace colours in the CSS text, in case they have
+ // been set at run-time injection.
+ for (var colourProperty in Blockly.Colours) {
+ if (!Blockly.Colours.hasOwnProperty(colourProperty)) continue;
+ text = text.replace('$colour_' + colourProperty, Blockly.Colours[colourProperty]);
+ }
// Inject CSS tag.
var cssNode = document.createElement('style');
document.head.appendChild(cssNode); | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2013 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Inject Blockly's CSS synchronously.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.Css');
goog.require('Blockly.Colours');
goog.require('goog.userAgent');
/**
* List of cursors.
* @enum {string}
*/
Blockly.Css.Cursor = {
OPEN: 'handopen',
CLOSED: 'handclosed',
DELETE: 'handdelete'
};
/**
* Current cursor (cached value).
* @type {string}
* @private
*/
Blockly.Css.currentCursor_ = '';
/**
* Large stylesheet added by Blockly.Css.inject.
* @type {Element}
* @private
*/
Blockly.Css.styleSheet_ = null;
/**
* Path to media directory, with any trailing slash removed.
* @type {string}
* @private
*/
Blockly.Css.mediaPath_ = '';
/**
* Inject the CSS into the DOM. This is preferable over using a regular CSS
* file since:
* a) It loads synchronously and doesn't force a redraw later.
* b) It speeds up loading by not blocking on a separate HTTP transfer.
* c) The CSS content may be made dynamic depending on init options.
* @param {boolean} hasCss If false, don't inject CSS
* (providing CSS becomes the document's responsibility).
* @param {string} pathToMedia Path from page to the Blockly media directory.
*/
Blockly.Css.inject = function(hasCss, pathToMedia) {
// Only inject the CSS once.
if (Blockly.Css.styleSheet_) {
return;
}
// Placeholder for cursor rule. Must be first rule (index 0).
var text = '.blocklyDraggable {}\n';
if (hasCss) {
text += Blockly.Css.CONTENT.join('\n');
if (Blockly.FieldDate) {
text += Blockly.FieldDate.CSS.join('\n');
}
}
// Strip off any trailing slash (either Unix or Windows).
Blockly.Css.mediaPath_ = pathToMedia.replace(/[\\\/]$/, '');
text = text.replace(/<<<PATH>>>/g, Blockly.Css.mediaPath_);
// Inject CSS tag.
var cssNode = document.createElement('style');
document.head.appendChild(cssNode);
var cssTextNode = document.createTextNode(text);
cssNode.appendChild(cssTextNode);
Blockly.Css.styleSheet_ = cssNode.sheet;
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
};
/**
* Set the cursor to be displayed when over something draggable.
* @param {Blockly.Css.Cursor} cursor Enum.
*/
Blockly.Css.setCursor = function(cursor) {
if (goog.userAgent.MOBILE || goog.userAgent.ANDROID || goog.userAgent.IPAD) {
// Don't try to switch the mouse cursor on a mobile device.
// This is an optimization - since we almost never have cursors on mobile anyway.
return;
}
if (Blockly.Css.currentCursor_ == cursor) {
return;
}
Blockly.Css.currentCursor_ = cursor;
var url = 'url(' + Blockly.Css.mediaPath_ + '/' + cursor + '.cur), auto';
// There are potentially hundreds of draggable objects. Changing their style
// properties individually is too slow, so change the CSS rule instead.
var rule = '.blocklyDraggable {\n cursor: ' + url + ';\n}\n';
Blockly.Css.styleSheet_.deleteRule(0);
Blockly.Css.styleSheet_.insertRule(rule, 0);
// There is probably only one toolbox, so just change its style property.
var toolboxen = document.getElementsByClassName('blocklyToolboxDiv');
for (var i = 0, toolbox; toolbox = toolboxen[i]; i++) {
if (cursor == Blockly.Css.Cursor.DELETE) {
toolbox.style.cursor = url;
} else {
toolbox.style.cursor = '';
}
}
// Set cursor on the whole document, so that rapid movements
// don't result in cursor changing to an arrow momentarily.
var html = document.body.parentNode;
if (cursor == Blockly.Css.Cursor.OPEN) {
html.style.cursor = '';
} else {
html.style.cursor = url;
}
};
/**
* Array making up the CSS content for Blockly.
*/
Blockly.Css.CONTENT = [
'.blocklySvg {',
'background-color: ' + Blockly.Colours.workspace + ';',
'outline: none;',
'overflow: hidden;', /* IE overflows by default. */
'}',
/* Necessary to position the drag surface */
'.blocklyRelativeWrapper {',
'position: relative;',
'width: 100%;',
'height: 100%;',
'}',
'.blocklyWidgetDiv {',
'display: none;',
'position: absolute;',
'z-index: 999;',
'}',
'.blocklyWidgetDiv.fieldTextInput {',
'overflow: hidden;',
'border: 1px solid;',
'box-sizing: border-box;',
'transform-origin: 0 0;',
'-ms-transform-origin: 0 0;',
'-moz-transform-origin: 0 0;',
'-webkit-transform-origin: 0 0;',
'}',
'.blocklyNonSelectable {',
'user-select: none;',
'-moz-user-select: none;',
'-webkit-user-select: none;',
'-ms-user-select: none;',
'}',
'.blocklyTooltipDiv {',
'background-color: #ffffc7;',
'border: 1px solid #ddc;',
'box-shadow: 4px 4px 20px 1px rgba(0,0,0,.15);',
'color: #000;',
'display: none;',
'font-family: sans-serif;',
'font-size: 9pt;',
'opacity: 0.9;',
'padding: 2px;',
'position: absolute;',
'z-index: 1000;',
'}',
'.blocklyDragSurface {',
'display: none;',
'position: absolute;',
'top: 0;',
'left: 0;',
'right: 0;',
'bottom: 0;',
'overflow: visible !important;',
'z-index: 5000;', /* Always display on top */
'-webkit-backface-visibility: hidden;',
'backface-visibility: hidden;',
'-webkit-perspective: 1000;',
'perspective: 1000;',
'}',
'.blocklyResizeSE {',
'cursor: se-resize;',
'fill: #aaa;',
'}',
'.blocklyResizeSW {',
'cursor: sw-resize;',
'fill: #aaa;',
'}',
'.blocklyResizeLine {',
'stroke: #888;',
'stroke-width: 1;',
'}',
'.blocklyHighlightedConnectionPath {',
'fill: none;',
'stroke: #fc3;',
'stroke-width: 4px;',
'}',
'.blocklyPath {',
'stroke-width: 1px;',
'}',
'.blocklySelected>.blocklyPath {',
// 'stroke: #fc3;',
// 'stroke-width: 3px;',
'}',
'.blocklyDragging>.blocklyPath {',
'}',
'.blocklyDisabled>.blocklyPath {',
'fill-opacity: .5;',
'stroke-opacity: .5;',
'}',
'.blocklyText {',
'cursor: default;',
'fill: #fff;',
'font-family: sans-serif;',
'font-size: 11pt;',
'}',
'.blocklyNonEditableText>text {',
'pointer-events: none;',
'}',
'.blocklyNonEditableText>rect,',
'.blocklyEditableText>rect {',
'fill: #fff;',
'fill-opacity: .6;',
'}',
'.blocklyNonEditableText>text,',
'.blocklyEditableText>text {',
'fill: ' + Blockly.Colours.text + ';',
'}',
'.blocklyEditableText:hover>rect {',
'stroke: #fff;',
'stroke-width: 2;',
'}',
'.blocklyBubbleText {',
'fill:' + Blockly.Colours.text +';',
'}',
/*
Don't allow users to select text. It gets annoying when trying to
drag a block and selected text moves instead.
*/
'.blocklySvg text {',
'user-select: none;',
'-moz-user-select: none;',
'-webkit-user-select: none;',
'cursor: inherit;',
'}',
'.blocklyHidden {',
'display: none;',
'}',
'.blocklyFieldDropdown:not(.blocklyHidden) {',
'display: block;',
'}',
'.blocklyIconGroup {',
'cursor: default;',
'}',
'.blocklyIconGroup:not(:hover),',
'.blocklyIconGroupReadonly {',
'opacity: .6;',
'}',
'.blocklyIconShape {',
'fill: #00f;',
'stroke: #fff;',
'stroke-width: 1px;',
'}',
'.blocklyIconSymbol {',
'fill: #fff;',
'}',
'.blocklyMinimalBody {',
'margin: 0;',
'padding: 0;',
'}',
'.blocklyCommentTextarea {',
'background-color: #ffc;',
'border: 0;',
'margin: 0;',
'padding: 2px;',
'resize: none;',
'}',
'.blocklyHtmlInput {',
'border: none;',
'font-family: sans-serif;',
'height: 100%;',
'margin: 0;',
'outline: none;',
'padding: 2px 0;',
'width: 100%;',
'text-align: center;',
'}',
'.blocklyMainBackground {',
'stroke-width: 1;',
'stroke: #c6c6c6;', /* Equates to #ddd due to border being off-pixel. */
'}',
'.blocklyMutatorBackground {',
'fill: #fff;',
'stroke: #ddd;',
'stroke-width: 1;',
'}',
'.blocklyFlyoutBackground {',
'fill: #ddd;',
'fill-opacity: .8;',
'}',
'.blocklyScrollbarBackground {',
'opacity: 0;',
'}',
'.blocklyScrollbarKnob {',
'fill: #ccc;',
'}',
'.blocklyScrollbarBackground:hover+.blocklyScrollbarKnob,',
'.blocklyScrollbarKnob:hover {',
'fill: #bbb;',
'}',
'.blocklyZoom>image {',
'opacity: .4;',
'}',
'.blocklyZoom>image:hover {',
'opacity: .6;',
'}',
'.blocklyZoom>image:active {',
'opacity: .8;',
'}',
/* Darken flyout scrollbars due to being on a grey background. */
/* By contrast, workspace scrollbars are on a white background. */
'.blocklyFlyout .blocklyScrollbarKnob {',
'fill: #bbb;',
'}',
'.blocklyFlyout .blocklyScrollbarBackground:hover+.blocklyScrollbarKnob,',
'.blocklyFlyout .blocklyScrollbarKnob:hover {',
'fill: #aaa;',
'}',
'.blocklyInvalidInput {',
'background: #faa;',
'}',
'.blocklyAngleCircle {',
'stroke: #444;',
'stroke-width: 1;',
'fill: #ddd;',
'fill-opacity: .8;',
'}',
'.blocklyAngleMarks {',
'stroke: #444;',
'stroke-width: 1;',
'}',
'.blocklyAngleGauge {',
'fill: #f88;',
'fill-opacity: .8;',
'}',
'.blocklyAngleLine {',
'stroke: #f00;',
'stroke-width: 2;',
'stroke-linecap: round;',
'}',
'.blocklyContextMenu {',
'border-radius: 4px;',
'}',
'.blocklyDropdownMenu {',
'padding: 0 !important;',
'}',
/* Override the default Closure URL. */
'.blocklyWidgetDiv .goog-option-selected .goog-menuitem-checkbox,',
'.blocklyWidgetDiv .goog-option-selected .goog-menuitem-icon {',
'background: url(<<<PATH>>>/sprites.png) no-repeat -48px -16px !important;',
'}',
/* Category tree in Toolbox. */
'.blocklyToolboxDiv {',
'background-color: #ddd;',
'overflow-x: visible;',
'overflow-y: auto;',
'position: absolute;',
'}',
'.blocklyTreeRoot {',
'padding: 4px 0;',
'}',
'.blocklyTreeRoot:focus {',
'outline: none;',
'}',
'.blocklyTreeRow {',
'height: 22px;',
'line-height: 22px;',
'margin-bottom: 3px;',
'padding-right: 8px;',
'white-space: nowrap;',
'}',
'.blocklyHorizontalTree {',
'float: left;',
'margin: 1px 5px 8px 0px;',
'}',
'.blocklyHorizontalTreeRtl {',
'float: right;',
'margin: 1px 0px 8px 5px;',
'}',
'.blocklyToolboxDiv[dir="RTL"] .blocklyTreeRow {',
'margin-left: 8px;',
'}',
'.blocklyTreeRow:not(.blocklyTreeSelected):hover {',
'background-color: #e4e4e4;',
'}',
'.blocklyTreeSeparator {',
'border-bottom: solid #e5e5e5 1px;',
'height: 0px;',
'margin: 5px 0;',
'}',
'.blocklyTreeSeparatorHorizontal {',
'border-right: solid #e5e5e5 1px;',
'width: 0px;',
'padding: 5px 0;',
'margin: 0 5px;',
'}',
'.blocklyTreeIcon {',
'background-image: url(<<<PATH>>>/sprites.png);',
'height: 16px;',
'vertical-align: middle;',
'width: 16px;',
'}',
'.blocklyTreeIconClosedLtr {',
'background-position: -32px -1px;',
'}',
'.blocklyTreeIconClosedRtl {',
'background-position: 0px -1px;',
'}',
'.blocklyTreeIconOpen {',
'background-position: -16px -1px;',
'}',
'.blocklyTreeSelected>.blocklyTreeIconClosedLtr {',
'background-position: -32px -17px;',
'}',
'.blocklyTreeSelected>.blocklyTreeIconClosedRtl {',
'background-position: 0px -17px;',
'}',
'.blocklyTreeSelected>.blocklyTreeIconOpen {',
'background-position: -16px -17px;',
'}',
'.blocklyTreeIconNone,',
'.blocklyTreeSelected>.blocklyTreeIconNone {',
'background-position: -48px -1px;',
'}',
'.blocklyTreeLabel {',
'cursor: default;',
'font-family: sans-serif;',
'font-size: 16px;',
'padding: 0 3px;',
'vertical-align: middle;',
'}',
'.blocklyTreeSelected .blocklyTreeLabel {',
'color: #fff;',
'}',
/* Copied from: goog/css/colorpicker-simplegrid.css */
/*
* Copyright 2007 The Closure Library Authors. All Rights Reserved.
*
* Use of this source code is governed by the Apache License, Version 2.0.
* See the COPYING file for details.
*/
/* Author: [email protected] (Daniel Pupius) */
/*
Styles to make the colorpicker look like the old gmail color picker
NOTE: without CSS scoping this will override styles defined in palette.css
*/
'.blocklyWidgetDiv .goog-palette {',
'outline: none;',
'cursor: default;',
'}',
'.blocklyWidgetDiv .goog-palette-table {',
'border: 1px solid #666;',
'border-collapse: collapse;',
'}',
'.blocklyWidgetDiv .goog-palette-cell {',
'height: 13px;',
'width: 15px;',
'margin: 0;',
'border: 0;',
'text-align: center;',
'vertical-align: middle;',
'border-right: 1px solid #666;',
'font-size: 1px;',
'}',
'.blocklyWidgetDiv .goog-palette-colorswatch {',
'position: relative;',
'height: 13px;',
'width: 15px;',
'border: 1px solid #666;',
'}',
'.blocklyWidgetDiv .goog-palette-cell-hover .goog-palette-colorswatch {',
'border: 1px solid #FFF;',
'}',
'.blocklyWidgetDiv .goog-palette-cell-selected .goog-palette-colorswatch {',
'border: 1px solid #000;',
'color: #fff;',
'}',
/* Copied from: goog/css/menu.css */
/*
* Copyright 2009 The Closure Library Authors. All Rights Reserved.
*
* Use of this source code is governed by the Apache License, Version 2.0.
* See the COPYING file for details.
*/
/**
* Standard styling for menus created by goog.ui.MenuRenderer.
*
* @author [email protected] (Attila Bodis)
*/
'.blocklyWidgetDiv .goog-menu {',
'background: #fff;',
'border-color: #ccc #666 #666 #ccc;',
'border-style: solid;',
'border-width: 1px;',
'cursor: default;',
'font: normal 13px Arial, sans-serif;',
'margin: 0;',
'outline: none;',
'padding: 4px 0;',
'position: absolute;',
'overflow-y: auto;',
'overflow-x: hidden;',
'max-height: 100%;',
'z-index: 20000;', /* Arbitrary, but some apps depend on it... */
'}',
/* Copied from: goog/css/menuitem.css */
/*
* Copyright 2009 The Closure Library Authors. All Rights Reserved.
*
* Use of this source code is governed by the Apache License, Version 2.0.
* See the COPYING file for details.
*/
/**
* Standard styling for menus created by goog.ui.MenuItemRenderer.
*
* @author [email protected] (Attila Bodis)
*/
/**
* State: resting.
*
* NOTE(mleibman,chrishenry):
* The RTL support in Closure is provided via two mechanisms -- "rtl" CSS
* classes and BiDi flipping done by the CSS compiler. Closure supports RTL
* with or without the use of the CSS compiler. In order for them not
* to conflict with each other, the "rtl" CSS classes need to have the #noflip
* annotation. The non-rtl counterparts should ideally have them as well, but,
* since .goog-menuitem existed without .goog-menuitem-rtl for so long before
* being added, there is a risk of people having templates where they are not
* rendering the .goog-menuitem-rtl class when in RTL and instead rely solely
* on the BiDi flipping by the CSS compiler. That's why we're not adding the
* #noflip to .goog-menuitem.
*/
'.blocklyWidgetDiv .goog-menuitem {',
'color: #000;',
'font: normal 13px Arial, sans-serif;',
'list-style: none;',
'margin: 0;',
/* 28px on the left for icon or checkbox; 7em on the right for shortcut. */
'padding: 4px 7em 4px 28px;',
'white-space: nowrap;',
'}',
/* BiDi override for the resting state. */
/* #noflip */
'.blocklyWidgetDiv .goog-menuitem.goog-menuitem-rtl {',
/* Flip left/right padding for BiDi. */
'padding-left: 7em;',
'padding-right: 28px;',
'}',
/* If a menu doesn't have checkable items or items with icons, remove padding. */
'.blocklyWidgetDiv .goog-menu-nocheckbox .goog-menuitem,',
'.blocklyWidgetDiv .goog-menu-noicon .goog-menuitem {',
'padding-left: 12px;',
'}',
/*
* If a menu doesn't have items with shortcuts, leave just enough room for
* submenu arrows, if they are rendered.
*/
'.blocklyWidgetDiv .goog-menu-noaccel .goog-menuitem {',
'padding-right: 20px;',
'}',
'.blocklyWidgetDiv .goog-menuitem-content {',
'color: #000;',
'font: normal 13px Arial, sans-serif;',
'}',
/* State: disabled. */
'.blocklyWidgetDiv .goog-menuitem-disabled .goog-menuitem-accel,',
'.blocklyWidgetDiv .goog-menuitem-disabled .goog-menuitem-content {',
'color: #ccc !important;',
'}',
'.blocklyWidgetDiv .goog-menuitem-disabled .goog-menuitem-icon {',
'opacity: 0.3;',
'-moz-opacity: 0.3;',
'filter: alpha(opacity=30);',
'}',
/* State: hover. */
'.blocklyWidgetDiv .goog-menuitem-highlight,',
'.blocklyWidgetDiv .goog-menuitem-hover {',
'background-color: #d6e9f8;',
/* Use an explicit top and bottom border so that the selection is visible',
* in high contrast mode. */
'border-color: #d6e9f8;',
'border-style: dotted;',
'border-width: 1px 0;',
'padding-bottom: 3px;',
'padding-top: 3px;',
'}',
/* State: selected/checked. */
'.blocklyWidgetDiv .goog-menuitem-checkbox,',
'.blocklyWidgetDiv .goog-menuitem-icon {',
'background-repeat: no-repeat;',
'height: 16px;',
'left: 6px;',
'position: absolute;',
'right: auto;',
'vertical-align: middle;',
'width: 16px;',
'}',
/* BiDi override for the selected/checked state. */
/* #noflip */
'.blocklyWidgetDiv .goog-menuitem-rtl .goog-menuitem-checkbox,',
'.blocklyWidgetDiv .goog-menuitem-rtl .goog-menuitem-icon {',
/* Flip left/right positioning. */
'left: auto;',
'right: 6px;',
'}',
'.blocklyWidgetDiv .goog-option-selected .goog-menuitem-checkbox,',
'.blocklyWidgetDiv .goog-option-selected .goog-menuitem-icon {',
/* Client apps may override the URL at which they serve the sprite. */
'background: url(//ssl.gstatic.com/editor/editortoolbar.png) no-repeat -512px 0;',
'}',
/* Keyboard shortcut ("accelerator") style. */
'.blocklyWidgetDiv .goog-menuitem-accel {',
'color: #999;',
/* Keyboard shortcuts are untranslated; always left-to-right. */
/* #noflip */
'direction: ltr;',
'left: auto;',
'padding: 0 6px;',
'position: absolute;',
'right: 0;',
'text-align: right;',
'}',
/* BiDi override for shortcut style. */
/* #noflip */
'.blocklyWidgetDiv .goog-menuitem-rtl .goog-menuitem-accel {',
/* Flip left/right positioning and text alignment. */
'left: 0;',
'right: auto;',
'text-align: left;',
'}',
/* Mnemonic styles. */
'.blocklyWidgetDiv .goog-menuitem-mnemonic-hint {',
'text-decoration: underline;',
'}',
'.blocklyWidgetDiv .goog-menuitem-mnemonic-separator {',
'color: #999;',
'font-size: 12px;',
'padding-left: 4px;',
'}',
/* Copied from: goog/css/menuseparator.css */
/*
* Copyright 2009 The Closure Library Authors. All Rights Reserved.
*
* Use of this source code is governed by the Apache License, Version 2.0.
* See the COPYING file for details.
*/
/**
* Standard styling for menus created by goog.ui.MenuSeparatorRenderer.
*
* @author [email protected] (Attila Bodis)
*/
'.blocklyWidgetDiv .goog-menuseparator {',
'border-top: 1px solid #ccc;',
'margin: 4px 0;',
'padding: 0;',
'}',
''
];
| 1 | 7,648 | prefer if (condition) { stuff; } to if !(condition) { continue; } stuff | LLK-scratch-blocks | js |
@@ -1,11 +1,8 @@
-<h1><%= h(@title) %></h1>
-
-<% content_for :head do %>
-<%= auto_discovery_link_tag :atom, :action => 'georss', :display_name => @display_name, :tag => @tag %>
-<% end %>
-
-<p>
- <%= rss_link_to :action => 'georss', :display_name => @display_name, :tag => @tag %>
+<% content_for :heading do %>
+ <h1><%= h(@title) %></h1>
+ <span><%= raw(@description) %></span>
+ |
+ <%= rss_link_to :action => 'georss', :display_name => @display_name, :tag => @tag %>
| <%= link_to t('trace.trace_header.upload_trace'), :action => 'create' %>
<% if @tag %>
| <%= link_to t('trace.trace_header.see_all_traces'), :controller => 'trace', :action => 'list', :display_name => nil, :tag => nil, :page => nil %> | 1 | <h1><%= h(@title) %></h1>
<% content_for :head do %>
<%= auto_discovery_link_tag :atom, :action => 'georss', :display_name => @display_name, :tag => @tag %>
<% end %>
<p>
<%= rss_link_to :action => 'georss', :display_name => @display_name, :tag => @tag %>
| <%= link_to t('trace.trace_header.upload_trace'), :action => 'create' %>
<% if @tag %>
| <%= link_to t('trace.trace_header.see_all_traces'), :controller => 'trace', :action => 'list', :display_name => nil, :tag => nil, :page => nil %>
| <%= link_to t('trace.trace_header.see_your_traces'), :action => 'mine', :tag => nil, :page => nil %>
<% else %>
<% if @display_name %>
| <%= link_to t('trace.trace_header.see_all_traces'), :controller => 'trace', :action => 'list', :display_name => nil, :tag => nil, :page => nil %>
<% end %>
<%= unless_user(@target_user, :span) do %>
| <%= link_to t('trace.trace_header.see_your_traces'), :action => 'mine', :tag => nil, :page => nil %>
<% end %>
<% end %>
</p>
<% if @traces.size > 0 %>
<%= render :partial => 'trace_paging_nav' %>
<table id="trace_list" cellpadding="3">
<tr>
<th></th>
<th></th>
</tr>
<%= render :partial => 'trace', :collection => @traces unless @traces.nil? %>
</table>
<%= render :partial => 'trace_paging_nav' %>
<% else %>
<p><%= t 'trace.list.empty_html', :upload_link => trace_create_path %></p>
<% end %>
<%= render :partial => 'trace_optionals' %>
| 1 | 7,999 | Should this section not be converted to a secondary action list? | openstreetmap-openstreetmap-website | rb |
@@ -225,8 +225,17 @@ class CommandDispatcher:
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
+ result = True
if tab is None:
return
+
+ if tab.pin is True:
+ result = message.ask("Are you sure you want to close a pinned tab?",
+ mode=usertypes.PromptMode.yesno, default=False)
+
+ if result is False or result is None:
+ return
+
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(left, right,
opposite) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
from PyQt5.QtWidgets import QApplication, QTabBar
from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
try:
from PyQt5.QtWebKitWidgets import QWebPage
except ImportError:
QWebPage = None
try:
from PyQt5.QtWebEngineWidgets import QWebEnginePage
except ImportError:
QWebEnginePage = None
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configexc
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem)
from qutebrowser.browser.webkit import downloads
try:
from qutebrowser.browser.webkit import mhtml
except ImportError:
# Failing imports on QtWebEngine, only used in QtWebKit commands.
# FIXME:qtwebengine don't import this anymore at all
pass
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, typing)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import instances, sortfilter
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self):
"""Get a tabbed-browser from a new window."""
from qutebrowser.mainwindow import mainwindow
new_window = mainwindow.MainWindow()
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
explicit=True):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window), 'tbw')
if window:
tabbed_browser = self._new_tabbed_browser()
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, explicit=explicit)
elif background:
tabbed_browser.tabopen(url, background=True, explicit=explicit)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, left, right, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
left: Force selecting the tab to the left of the current tab.
right: Force selecting the tab to the right of the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((left, right, opposite), 'lro')
if left:
return QTabBar.SelectLeftTab
elif right:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.get('tabs', 'select-on-remove')
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs->select-on-remove' set to "
"'previous'!")
else: # pragma: no cover
raise ValueError("Invalid select-on-remove value "
"{!r}!".format(conf_selection))
return None
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, left=False, right=False, opposite=False, count=None):
"""Close the current/[count]th tab.
Args:
left: Force selecting the tab to the left of the current tab.
right: Force selecting the tab to the right of the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(left, right,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=usertypes.Completion.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, implicit=False,
bg=False, tab=False, window=False, count=None):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
implicit: If opening a new tab, treat the tab as implicit (like
clicking on a link).
count: The tab index to open the URL in, or None.
"""
if url is None:
if tab or bg or window:
urls = [config.get('general', 'default-page')]
else:
raise cmdexc.CommandError("No URL given, but -t/-b/-w is not "
"set!")
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if not window and i > 0:
tab = False
bg = True
if tab or bg or window:
self._open(cur_url, tab, bg, window, not implicit)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
diag = QPrintPreviewDialog()
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() |
Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(tab.printing.to_printer)
diag.exec_()
elif pdf:
pdf = os.path.expanduser(pdf)
directory = os.path.dirname(pdf)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(pdf)
log.misc.debug("Print to file: {}".format(pdf))
else:
diag = QPrintDialog()
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.open(lambda: tab.printing.to_printer(diag.printer()))
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs-are-windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser()
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg, explicit=True)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.get('tabs', 'show-favicons'):
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.get('tabs', 'tabs-are-windows'):
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(curtab.history.serialize())
newtab.zoom.set_factor(curtab.zoom.factor())
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_detach(self):
"""Detach the current tab to its own window."""
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach one tab.")
url = self._current_url()
self._open(url, window=True)
cur_widget = self._current_widget()
self._tabbed_browser.close_tab(cur_widget, add_undo=False)
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
for _ in range(count):
if forward:
if not widget.history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
widget.history.forward()
else:
if not widget.history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
widget.history.back()
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_perc(self, perc: float=None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str=None, bottom_navigate: str=None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/The-Compiler/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = url.query()
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.get('general', 'yank-ignored-url-parameters'):
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
caret = self._current_widget().caret
s = caret.selection()
if not caret.has_selection() or not s:
message.info("Nothing to yank")
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.maybe_leave(self._win_id, KeyMode.caret,
"yank selected")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(perc))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(perc))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom: int=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
level = count if count is not None else zoom
if level is None:
level = config.get('ui', 'default-zoom')
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(level))
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, left=False, right=False):
"""Close all tabs except for the current one.
Args:
left: Keep tabs to the left of the current.
right: Keep tabs to the right of the current.
"""
cmdutils.check_exclusive((left, right), 'lr')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
for i, tab in enumerate(self._tabbed_browser.widgets()):
if (i == cur_idx or (left and i < cur_idx) or
(right and i > cur_idx)):
continue
else:
self._tabbed_browser.close_tab(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open a closed tab (optionally skipping [count] closed tabs)."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/The-Compiler/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/The-Compiler/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated="Use :open {clipboard}")
def paste(self, sel=False, tab=False, bg=False, window=False):
"""Open a page from the clipboard.
If the pasted text contains newlines, each line gets opened in its own
tab.
Args:
sel: Use the primary selection instead of the clipboard.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in new window.
"""
force_search = False
if not utils.supports_selection():
sel = False
try:
text = utils.get_clipboard(selection=sel)
except utils.ClipboardError as e:
raise cmdexc.CommandError(e)
text_urls = [u for u in text.split('\n') if u.strip()]
if (len(text_urls) > 1 and not urlutils.is_url(text_urls[0]) and
urlutils.get_path_if_valid(
text_urls[0], check_exists=True) is None):
force_search = True
text_urls = [text]
for i, text_url in enumerate(text_urls):
if not window and i > 0:
tab = False
bg = True
try:
url = urlutils.fuzzy_url(text_url, force_search=force_search)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=usertypes.Completion.tab)
def buffer(self, index):
"""Select tab by index or url/title best match.
Focuses window if necessary.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = instances.get(usertypes.Completion.tab)
sf = sortfilter.CompletionFilterModel(source=model)
sf.set_pattern(index)
if sf.count() > 0:
index = sf.data(sf.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
window = objreg.window_registry[win_id]
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentIndex(idx-1)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True, zero_count=True)
def tab_focus(self, index: typing.Union[str, int]=None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
The special value 0 focuses the rightmost tab.
"""
if index == 'last':
self._tab_focus_last()
return
index = count if count is not None else index
if index is None:
self.tab_next()
return
elif index == 0:
index = self._count()
elif index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int]=None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.get('tabs', 'wrap'):
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
tab = self._current_widget()
cur_idx = self._current_index()
icon = self._tabbed_browser.tabIcon(cur_idx)
label = self._tabbed_browser.page_title(cur_idx)
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.setUpdatesEnabled(False)
try:
color = self._tabbed_browser.tabBar().tab_data(
cur_idx, 'indicator-color')
self._tabbed_browser.removeTab(cur_idx)
self._tabbed_browser.insertTab(new_idx, tab, icon, label)
self._set_current_index(new_idx)
self._tabbed_browser.set_tab_indicator_color(new_idx, color)
finally:
self._tabbed_browser.setUpdatesEnabled(True)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self._run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.get('general', 'startpage')[0])
def _run_userscript(self, cmd, *args, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
tab = self._tabbed_browser.currentWidget()
if tab is not None and tab.caret.has_selection():
env['QUTE_SELECTED_TEXT'] = tab.caret.selection()
try:
env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True)
except browsertab.UnsupportedOperationError:
pass
# FIXME:qtwebengine: If tab is None, run_async will fail!
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name',
completion=usertypes.Completion.quickmark_by_name)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name',
completion=usertypes.Completion.quickmark_by_name)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If None, use url of current page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if url is None:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}!" if was_added else "Removed bookmark {}!"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window',
backend=usertypes.Backend.QtWebKit)
@cmdutils.argument('dest_old', hide=True)
def download(self, url=None, dest_old=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
The form `:download [url] [dest]` is deprecated, use `:download --dest
[dest] [url]` instead.
Args:
url: The URL to download. If not given, download the current page.
dest_old: (deprecated) Same as dest.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
if dest_old is not None:
message.warning(":download [url] [dest] is deprecated - use "
":download --dest [dest] [url]")
if dest is not None:
raise cmdexc.CommandError("Can't give two destinations for the"
" download.")
dest = dest_old
download_manager = objreg.get('download-manager', scope='window',
window=self._win_id)
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
if dest is None:
target = None
else:
target = usertypes.FileDownloadTarget(dest)
download_manager.get(url, target=target)
elif mhtml_:
self._download_mhtml(dest)
else:
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
# pylint: disable=protected-access
qnam = tab._widget.page().networkAccessManager()
# pylint: enable=protected-access
if dest is None:
target = None
else:
target = usertypes.FileDownloadTarget(dest)
download_manager.get(self._current_url(), qnam=qnam, target=target)
def _download_mhtml(self, dest=None):
"""Download the current page as an MHTML file, including all assets.
Args:
dest: The file path to write the download to.
"""
tab = self._current_widget()
if dest is None:
suggested_fn = self._current_title() + ".mht"
suggested_fn = utils.sanitize_filename(suggested_fn)
filename, q = downloads.ask_for_filename(suggested_fn, parent=tab,
url=tab.url())
if filename is not None:
mhtml.start_download_checked(filename, tab=tab)
else:
q.answered.connect(functools.partial(
mhtml.start_download_checked, tab=tab))
q.ask()
else:
mhtml.start_download_checked(dest, tab=tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page."""
# pylint: disable=no-member
# WORKAROUND for https://bitbucket.org/logilab/pylint/issue/491/
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
def show_source_cb(source):
"""Show source as soon as it's ready."""
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(full=True,
linenos='table')
highlighted = pygments.highlight(source, lexer, formatter)
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
new_tab = self._tabbed_browser.tabopen(explicit=True)
new_tab.set_html(highlighted, current_url)
new_tab.data.viewing_source = True
tab.dump_async(show_source_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=usertypes.Completion.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__\->__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif '->' in topic:
parts = topic.split('->')
if len(parts) != 2:
raise cmdexc.CommandError("Invalid help topic {}!".format(
topic))
try:
config.get(*parts)
except configexc.NoSectionError:
raise cmdexc.CommandError("Invalid section {}!".format(
parts[0]))
except configexc.NoOptionError:
raise cmdexc.CommandError("Invalid option {}!".format(
parts[1]))
path = 'settings.html#{}'.format(topic.replace('->', '-'))
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='error', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.text(use_js=True)
ed = editor.ExternalEditor(self._tabbed_browser)
ed.editing_finished.connect(functools.partial(
self.on_editing_finished, elem))
ed.edit(text)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`general -> editor` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_text(text, use_js=True)
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher',
deprecated="Use :insert-text {primary}",
modes=[KeyMode.insert], hide=True, scope='window',
backend=usertypes.Backend.QtWebKit)
def paste_primary(self):
"""Paste the primary selection at cursor position."""
try:
self.insert_text(utils.get_clipboard(selection=True))
except utils.SelectionUnsupportedError:
self.insert_text(utils.get_clipboard())
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget=
usertypes.ClickTarget.normal):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found!")
return
try:
elem.click(target)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
tab.search.clear()
options = {
'ignore_case': config.get('general', 'ignore-case'),
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
if text:
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
else:
cb = None
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
See http://doc.qt.io/qt-5/qwebpage.html#WebAction-enum for the
available actions.
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebKit:
assert QWebPage is not None
member = getattr(QWebPage, action, None)
base = QWebPage.WebAction
elif tab.backend == usertypes.Backend.QtWebEngine:
assert QWebEnginePage is not None
member = getattr(QWebEnginePage, action, None)
base = QWebEnginePage.WebAction
if not isinstance(member, base):
raise cmdexc.CommandError("{} is not a valid web action!".format(
action))
for _ in range(count):
# This whole command is backend-specific anyways, so it makes no
# sense to introduce some API for this.
# pylint: disable=protected-access
tab._widget.triggerPageAction(member)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int]=None):
"""Evaluate a JavaScript string.
Args:
js_code: The string to evaluate.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
try:
tab = objreg.get('tab', scope='tab', tab='current')
except objreg.RegistryUnavailableError:
raise cmdexc.CommandError("No focused webview!")
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`general -> editor` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.editing_finished.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
if bg or tab or window or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window)
| 1 | 16,978 | This askes a blocking question, which means a local Qt eventloop will be running. While this is unavoidable sometimes, the async functions should be used whenever possible (`message.confirm_async` in this case). This means: - Split everything after this question in a separate private method (you can probably just call it `_tab_close`) - If the tab is not pinned, just call `_tab_close` directly - If it is pinned, call `message.confirm_async` with `yes_action=self._tab_close` | qutebrowser-qutebrowser | py |
@@ -28,7 +28,7 @@ define(["jQuery", "loading", "libraryMenu", "fnchecked"], function($, loading, l
IsHidden: !1
})).then(function(folders) {
loadDeleteFolders(page, user, folders.Items)
- }), user.Policy.IsDisabled ? $(".disabledUserBanner", page).show() : $(".disabledUserBanner", page).hide(), "Guest" == user.ConnectLinkType ? ($("#fldConnectInfo", page).hide(), $("#txtUserName", page).prop("disabled", "disabled")) : ($("#txtUserName", page).prop("disabled", "").removeAttr("disabled"), $("#fldConnectInfo", page).show()), $(".lnkEditUserPreferences", page).attr("href", "mypreferencesmenu.html?userId=" + user.Id), libraryMenu.setTitle(user.Name), page.querySelector(".username").innerHTML = user.Name, $("#txtUserName", page).val(user.Name), $("#chkIsAdmin", page).checked(user.Policy.IsAdministrator), $("#chkDisabled", page).checked(user.Policy.IsDisabled), $("#chkIsHidden", page).checked(user.Policy.IsHidden), $("#chkRemoteControlSharedDevices", page).checked(user.Policy.EnableSharedDeviceControl), $("#chkEnableRemoteControlOtherUsers", page).checked(user.Policy.EnableRemoteControlOfOtherUsers), $("#chkEnableDownloading", page).checked(user.Policy.EnableContentDownloading), $("#chkManageLiveTv", page).checked(user.Policy.EnableLiveTvManagement), $("#chkEnableLiveTvAccess", page).checked(user.Policy.EnableLiveTvAccess), $("#chkEnableMediaPlayback", page).checked(user.Policy.EnableMediaPlayback), $("#chkEnableAudioPlaybackTranscoding", page).checked(user.Policy.EnableAudioPlaybackTranscoding), $("#chkEnableVideoPlaybackTranscoding", page).checked(user.Policy.EnableVideoPlaybackTranscoding), $("#chkEnableVideoPlaybackRemuxing", page).checked(user.Policy.EnablePlaybackRemuxing), $("#chkRemoteAccess", page).checked(null == user.Policy.EnableRemoteAccess || user.Policy.EnableRemoteAccess), $("#chkEnableSyncTranscoding", page).checked(user.Policy.EnableSyncTranscoding), $("#chkEnableConversion", page).checked(user.Policy.EnableMediaConversion || !1), $("#chkEnableSharing", page).checked(user.Policy.EnablePublicSharing), $("#txtRemoteClientBitrateLimit", page).val(user.Policy.RemoteClientBitrateLimit / 1e6 || ""), loading.hide()
+ }), user.Policy.IsDisabled ? $(".disabledUserBanner", page).show() : $(".disabledUserBanner", page).hide(), "Guest" == user.ConnectLinkType ? ($("#fldConnectInfo", page).hide(), $("#txtUserName", page).prop("disabled", "disabled")) : ($("#txtUserName", page).prop("disabled", "").removeAttr("disabled"), $("#fldConnectInfo", page).show()), $(".lnkEditUserPreferences", page).attr("href", "mypreferencesmenu.html?userId=" + user.Id), libraryMenu.setTitle(user.Name), page.querySelector(".username").innerHTML = user.Name, $("#txtUserName", page).val(user.Name), $("#chkIsAdmin", page).checked(user.Policy.IsAdministrator), $("#chkDisabled", page).checked(user.Policy.IsDisabled), $("#chkIsHidden", page).checked(user.Policy.IsHidden), $("#chkRemoteControlSharedDevices", page).checked(user.Policy.EnableSharedDeviceControl), $("#chkEnableRemoteControlOtherUsers", page).checked(user.Policy.EnableRemoteControlOfOtherUsers), $("#chkEnableDownloading", page).checked(user.Policy.EnableContentDownloading), $("#chkManageLiveTv", page).checked(user.Policy.EnableLiveTvManagement), $("#chkEnableLiveTvAccess", page).checked(user.Policy.EnableLiveTvAccess), $("#chkEnableMediaPlayback", page).checked(user.Policy.EnableMediaPlayback), $("#chkEnableAudioPlaybackTranscoding", page).checked(user.Policy.EnableAudioPlaybackTranscoding), $("#chkEnableVideoPlaybackTranscoding", page).checked(user.Policy.EnableVideoPlaybackTranscoding), $("#chkEnableVideoPlaybackRemuxing", page).checked(user.Policy.EnablePlaybackRemuxing), $("#chkRemoteAccess", page).checked(null == user.Policy.EnableRemoteAccess || user.Policy.EnableRemoteAccess), $("#chkEnableSyncTranscoding", page).checked(user.Policy.EnableSyncTranscoding), $("#chkEnableConversion", page).checked(user.Policy.EnableMediaConversion || !1), $("#chkEnableSharing", page).checked(user.Policy.EnablePublicSharing), $("#txtRemoteClientBitrateLimit", page).val(user.Policy.RemoteClientBitrateLimit / 1e6 || ""), $("#txtLoginAttemptsBeforeLockout", page).val(user.Policy.LoginAttemptsBeforeLockout || "0"), loading.hide()
}
function onSaveComplete(page, user) { | 1 | define(["jQuery", "loading", "libraryMenu", "fnchecked"], function($, loading, libraryMenu) {
"use strict";
function loadDeleteFolders(page, user, mediaFolders) {
ApiClient.getJSON(ApiClient.getUrl("Channels", {
SupportsMediaDeletion: !0
})).then(function(channelsResult) {
var i, length, folder, isChecked, checkedAttribute, html = "";
for (i = 0, length = mediaFolders.length; i < length; i++) folder = mediaFolders[i], isChecked = user.Policy.EnableContentDeletion || -1 != user.Policy.EnableContentDeletionFromFolders.indexOf(folder.Id), checkedAttribute = isChecked ? ' checked="checked"' : "", html += '<label><input type="checkbox" is="emby-checkbox" class="chkFolder" data-id="' + folder.Id + '" ' + checkedAttribute + "><span>" + folder.Name + "</span></label>";
for (i = 0, length = channelsResult.Items.length; i < length; i++) folder = channelsResult.Items[i], isChecked = user.Policy.EnableContentDeletion || -1 != user.Policy.EnableContentDeletionFromFolders.indexOf(folder.Id), checkedAttribute = isChecked ? ' checked="checked"' : "", html += '<label><input type="checkbox" is="emby-checkbox" class="chkFolder" data-id="' + folder.Id + '" ' + checkedAttribute + "><span>" + folder.Name + "</span></label>";
$(".deleteAccess", page).html(html).trigger("create"), $("#chkEnableDeleteAllFolders", page).checked(user.Policy.EnableContentDeletion).trigger("change")
})
}
function loadAuthProviders(page, user, providers) {
providers.length > 1 && !user.Policy.IsAdministrator ? page.querySelector(".fldSelectLoginProvider").classList.remove("hide") : page.querySelector(".fldSelectLoginProvider").classList.add("hide");
var currentProviderId = user.Policy.AuthenticationProviderId;
page.querySelector(".selectLoginProvider").innerHTML = providers.map(function(provider) {
var selected = provider.Id === currentProviderId || providers.length < 2 ? " selected" : "";
return '<option value="' + provider.Id + '"' + selected + ">" + provider.Name + "</option>"
})
}
function loadUser(page, user) {
currentUser = user, ApiClient.getJSON(ApiClient.getUrl("Auth/Providers")).then(function(providers) {
loadAuthProviders(page, user, providers)
}), ApiClient.getJSON(ApiClient.getUrl("Library/MediaFolders", {
IsHidden: !1
})).then(function(folders) {
loadDeleteFolders(page, user, folders.Items)
}), user.Policy.IsDisabled ? $(".disabledUserBanner", page).show() : $(".disabledUserBanner", page).hide(), "Guest" == user.ConnectLinkType ? ($("#fldConnectInfo", page).hide(), $("#txtUserName", page).prop("disabled", "disabled")) : ($("#txtUserName", page).prop("disabled", "").removeAttr("disabled"), $("#fldConnectInfo", page).show()), $(".lnkEditUserPreferences", page).attr("href", "mypreferencesmenu.html?userId=" + user.Id), libraryMenu.setTitle(user.Name), page.querySelector(".username").innerHTML = user.Name, $("#txtUserName", page).val(user.Name), $("#chkIsAdmin", page).checked(user.Policy.IsAdministrator), $("#chkDisabled", page).checked(user.Policy.IsDisabled), $("#chkIsHidden", page).checked(user.Policy.IsHidden), $("#chkRemoteControlSharedDevices", page).checked(user.Policy.EnableSharedDeviceControl), $("#chkEnableRemoteControlOtherUsers", page).checked(user.Policy.EnableRemoteControlOfOtherUsers), $("#chkEnableDownloading", page).checked(user.Policy.EnableContentDownloading), $("#chkManageLiveTv", page).checked(user.Policy.EnableLiveTvManagement), $("#chkEnableLiveTvAccess", page).checked(user.Policy.EnableLiveTvAccess), $("#chkEnableMediaPlayback", page).checked(user.Policy.EnableMediaPlayback), $("#chkEnableAudioPlaybackTranscoding", page).checked(user.Policy.EnableAudioPlaybackTranscoding), $("#chkEnableVideoPlaybackTranscoding", page).checked(user.Policy.EnableVideoPlaybackTranscoding), $("#chkEnableVideoPlaybackRemuxing", page).checked(user.Policy.EnablePlaybackRemuxing), $("#chkRemoteAccess", page).checked(null == user.Policy.EnableRemoteAccess || user.Policy.EnableRemoteAccess), $("#chkEnableSyncTranscoding", page).checked(user.Policy.EnableSyncTranscoding), $("#chkEnableConversion", page).checked(user.Policy.EnableMediaConversion || !1), $("#chkEnableSharing", page).checked(user.Policy.EnablePublicSharing), $("#txtRemoteClientBitrateLimit", page).val(user.Policy.RemoteClientBitrateLimit / 1e6 || ""), loading.hide()
}
function onSaveComplete(page, user) {
Dashboard.navigate("userprofiles.html");
loading.hide();
require(["toast"], function(toast) {
toast(Globalize.translate("SettingsSaved"));
});
}
function saveUser(user, page) {
user.Name = $("#txtUserName", page).val(), user.Policy.IsAdministrator = $("#chkIsAdmin", page).checked(), user.Policy.IsHidden = $("#chkIsHidden", page).checked(), user.Policy.IsDisabled = $("#chkDisabled", page).checked(), user.Policy.EnableRemoteControlOfOtherUsers = $("#chkEnableRemoteControlOtherUsers", page).checked(), user.Policy.EnableLiveTvManagement = $("#chkManageLiveTv", page).checked(), user.Policy.EnableLiveTvAccess = $("#chkEnableLiveTvAccess", page).checked(), user.Policy.EnableSharedDeviceControl = $("#chkRemoteControlSharedDevices", page).checked(), user.Policy.EnableMediaPlayback = $("#chkEnableMediaPlayback", page).checked(), user.Policy.EnableAudioPlaybackTranscoding = $("#chkEnableAudioPlaybackTranscoding", page).checked(), user.Policy.EnableVideoPlaybackTranscoding = $("#chkEnableVideoPlaybackTranscoding", page).checked(), user.Policy.EnablePlaybackRemuxing = $("#chkEnableVideoPlaybackRemuxing", page).checked(), user.Policy.EnableContentDownloading = $("#chkEnableDownloading", page).checked(), user.Policy.EnableSyncTranscoding = $("#chkEnableSyncTranscoding", page).checked(), user.Policy.EnableMediaConversion = $("#chkEnableConversion", page).checked(), user.Policy.EnablePublicSharing = $("#chkEnableSharing", page).checked(), user.Policy.EnableRemoteAccess = $("#chkRemoteAccess", page).checked(), user.Policy.RemoteClientBitrateLimit = parseInt(1e6 * parseFloat($("#txtRemoteClientBitrateLimit", page).val() || "0")), user.Policy.AuthenticationProviderId = page.querySelector(".selectLoginProvider").value, user.Policy.EnableContentDeletion = $("#chkEnableDeleteAllFolders", page).checked(), user.Policy.EnableContentDeletionFromFolders = user.Policy.EnableContentDeletion ? [] : $(".chkFolder", page).get().filter(function(c) {
return c.checked
}).map(function(c) {
return c.getAttribute("data-id")
}), ApiClient.updateUser(user).then(function() {
ApiClient.updateUserPolicy(user.Id, user.Policy).then(function() {
onSaveComplete(page, user)
})
})
}
function onSubmit() {
var page = $(this).parents(".page")[0];
return loading.show(), getUser().then(function(result) {
saveUser(result, page)
}), !1
}
function getUser() {
var userId = getParameterByName("userId");
return ApiClient.getUser(userId)
}
function loadData(page) {
loading.show(), getUser().then(function(user) {
loadUser(page, user)
})
}
var currentUser;
$(document).on("pageinit", "#editUserPage", function() {
$(".editUserProfileForm").off("submit", onSubmit).on("submit", onSubmit), this.querySelector(".sharingHelp").innerHTML = Globalize.translate("OptionAllowLinkSharingHelp", 30);
var page = this;
$("#chkEnableDeleteAllFolders", this).on("change", function() {
this.checked ? $(".deleteAccess", page).hide() : $(".deleteAccess", page).show()
}), ApiClient.getServerConfiguration().then(function(config) {
config.EnableRemoteAccess ? page.querySelector(".fldRemoteAccess").classList.remove("hide") : page.querySelector(".fldRemoteAccess").classList.add("hide")
})
}).on("pagebeforeshow", "#editUserPage", function() {
loadData(this)
})
});
| 1 | 10,983 | Can we deuglify this? | jellyfin-jellyfin-web | js |
@@ -177,7 +177,7 @@ public interface Option<T> extends Value<T>, Serializable {
*/
default <R> Option<R> collect(PartialFunction<? super T, ? extends R> partialFunction) {
Objects.requireNonNull(partialFunction, "partialFunction is null");
- return filter(partialFunction::isDefinedAt).map(partialFunction::apply);
+ return flatMap(partialFunction.lift()::apply);
}
/** | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/.ɪᴏ
* ᶜᵒᵖʸʳᶦᵍʰᵗ ᵇʸ ᵛᵃᵛʳ ⁻ ˡᶦᶜᵉⁿˢᵉᵈ ᵘⁿᵈᵉʳ ᵗʰᵉ ᵃᵖᵃᶜʰᵉ ˡᶦᶜᵉⁿˢᵉ ᵛᵉʳˢᶦᵒⁿ ᵗʷᵒ ᵈᵒᵗ ᶻᵉʳᵒ
*/
package io.vavr.control;
import io.vavr.PartialFunction;
import io.vavr.Value;
import io.vavr.collection.Iterator;
import io.vavr.collection.Seq;
import io.vavr.collection.Vector;
import java.io.Serializable;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
/**
* Replacement for {@link java.util.Optional}.
* <p>
* Option is a <a href="http://stackoverflow.com/questions/13454347/monads-with-java-8">monadic</a> container type which
* represents an optional value. Instances of Option are either an instance of {@link Some} or the
* singleton {@link None}.
* <p>
* Most of the API is taken from {@link java.util.Optional}. A similar type can be found in <a
* href="http://hackage.haskell.org/package/base-4.6.0.1/docs/Data-Maybe.html">Haskell</a> and <a
* href="http://www.scala-lang.org/api/current/#scala.Option">Scala</a>.
*
* @param <T> The type of the optional value.
* @author Daniel Dietrich
*/
public interface Option<T> extends Value<T>, Serializable {
long serialVersionUID = 1L;
/**
* Creates a new {@code Option} of a given value.
*
* @param value A value
* @param <T> type of the value
* @return {@code Some(value)} if value is not {@code null}, {@code None} otherwise
*/
static <T> Option<T> of(T value) {
return (value == null) ? none() : some(value);
}
/**
* Reduces many {@code Option}s into a single {@code Option} by transforming an
* {@code Iterable<Option<? extends T>>} into a {@code Option<Seq<T>>}. If any of
* the Options are {@link Option.None}, then this returns {@link Option.None}.
*
* @param values An {@code Iterable} of {@code Option}s
* @param <T> type of the Options
* @return An {@code Option} of a {@link Seq} of results
* @throws NullPointerException if {@code values} is null
*/
static <T> Option<Seq<T>> sequence(Iterable<? extends Option<? extends T>> values) {
Objects.requireNonNull(values, "values is null");
Vector<T> vector = Vector.empty();
for (Option<? extends T> value : values) {
if (value.isEmpty()) {
return Option.none();
}
vector = vector.append(value.get());
}
return Option.some(vector);
}
/**
* Creates a new {@code Some} of a given value.
* <p>
* The only difference to {@link Option#of(Object)} is, when called with argument {@code null}.
* <pre>
* <code>
* Option.of(null); // = None
* Option.some(null); // = Some(null)
* </code>
* </pre>
*
* @param value A value
* @param <T> type of the value
* @return {@code Some(value)}
*/
static <T> Option<T> some(T value) {
return new Some<>(value);
}
/**
* Returns the single instance of {@code None}
*
* @param <T> component type
* @return the single instance of {@code None}
*/
static <T> Option<T> none() {
@SuppressWarnings("unchecked")
final None<T> none = (None<T>) None.INSTANCE;
return none;
}
/**
* Narrows a widened {@code Option<? extends T>} to {@code Option<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param option A {@code Option}.
* @param <T> Component type of the {@code Option}.
* @return the given {@code option} instance as narrowed type {@code Option<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Option<T> narrow(Option<? extends T> option) {
return (Option<T>) option;
}
/**
* Creates {@code Some} of suppliers value if condition is true, or {@code None} in other case
*
* @param <T> type of the optional value
* @param condition A boolean value
* @param supplier An optional value supplier, may supply {@code null}
* @return return {@code Some} of supplier's value if condition is true, or {@code None} in other case
* @throws NullPointerException if the given {@code supplier} is null
*/
static <T> Option<T> when(boolean condition, Supplier<? extends T> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return condition ? some(supplier.get()) : none();
}
/**
* Creates {@code Some} of value if condition is true, or {@code None} in other case
*
* @param <T> type of the optional value
* @param condition A boolean value
* @param value An optional value, may be {@code null}
* @return return {@code Some} of value if condition is true, or {@code None} in other case
*/
static <T> Option<T> when(boolean condition, T value) {
return condition ? some(value) : none();
}
/**
* Wraps a Java Optional to a new Option
*
* @param optional a given optional to wrap in {@code Option}
* @param <T> type of the value
* @return {@code Some(optional.get())} if value is Java {@code Optional} is present, {@code None} otherwise
*/
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
static <T> Option<T> ofOptional(Optional<? extends T> optional) {
Objects.requireNonNull(optional, "optional is null");
return optional.<Option<T>>map(Option::of).orElseGet(Option::none);
}
/**
* Collects value that is in the domain of the given {@code partialFunction} by mapping the value to type {@code R}.
*
* <pre>{@code
* partialFunction.isDefinedAt(value)
* }</pre>
*
* If the element makes it through that filter, the mapped instance is wrapped in {@code Option}
*
* <pre>{@code
* R newValue = partialFunction.apply(value)
* }</pre>
*
*
* @param partialFunction A function that is not necessarily defined on value of this option.
* @param <R> The new value type
* @return A new {@code Option} instance containing value of type {@code R}
* @throws NullPointerException if {@code partialFunction} is null
*/
default <R> Option<R> collect(PartialFunction<? super T, ? extends R> partialFunction) {
Objects.requireNonNull(partialFunction, "partialFunction is null");
return filter(partialFunction::isDefinedAt).map(partialFunction::apply);
}
/**
* Returns true, if this is {@code None}, otherwise false, if this is {@code Some}.
*
* @return true, if this {@code Option} is empty, false otherwise
*/
@Override
boolean isEmpty();
/**
* Runs a Java Runnable passed as parameter if this {@code Option} is empty.
*
* @param action a given Runnable to be run
* @return this {@code Option}
*/
default Option<T> onEmpty(Runnable action) {
Objects.requireNonNull(action, "action is null");
if (isEmpty()) {
action.run();
}
return this;
}
/**
* An {@code Option}'s value is computed synchronously.
*
* @return false
*/
@Override
default boolean isAsync() {
return false;
}
/**
* Returns true, if this is {@code Some}, otherwise false, if this is {@code None}.
* <p>
* Please note that it is possible to create {@code new Some(null)}, which is defined.
*
* @return true, if this {@code Option} has a defined value, false otherwise
*/
default boolean isDefined() {
return !isEmpty();
}
/**
* An {@code Option}'s value is computed eagerly.
*
* @return false
*/
@Override
default boolean isLazy() {
return false;
}
/**
* An {@code Option} is single-valued.
*
* @return {@code true}
*/
@Override
default boolean isSingleValued() {
return true;
}
/**
* Gets the value if this is a {@code Some} or throws if this is a {@code None}.
*
* @return the value
* @throws NoSuchElementException if this is a {@code None}.
*/
@Override
T get();
/**
* Returns the value if this is a {@code Some} or the {@code other} value if this is a {@code None}.
* <p>
* Please note, that the other value is eagerly evaluated.
*
* @param other An alternative value
* @return This value, if this Option is defined or the {@code other} value, if this Option is empty.
*/
@Override
default T getOrElse(T other) {
return isEmpty() ? other : get();
}
/**
* Returns this {@code Option} if it is nonempty, otherwise return the alternative.
*
* @param other An alternative {@code Option}
* @return this {@code Option} if it is nonempty, otherwise return the alternative.
*/
@SuppressWarnings("unchecked")
default Option<T> orElse(Option<? extends T> other) {
Objects.requireNonNull(other, "other is null");
return isEmpty() ? (Option<T>) other : this;
}
/**
* Returns this {@code Option} if it is nonempty, otherwise return the result of evaluating supplier.
*
* @param supplier An alternative {@code Option} supplier
* @return this {@code Option} if it is nonempty, otherwise return the result of evaluating supplier.
*/
@SuppressWarnings("unchecked")
default Option<T> orElse(Supplier<? extends Option<? extends T>> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return isEmpty() ? (Option<T>) supplier.get() : this;
}
/**
* Returns the value if this is a {@code Some}, otherwise the {@code other} value is returned,
* if this is a {@code None}.
* <p>
* Please note, that the other value is lazily evaluated.
*
* @param supplier An alternative value supplier
* @return This value, if this Option is defined or the {@code other} value, if this Option is empty.
*/
@Override
default T getOrElse(Supplier<? extends T> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return isEmpty() ? supplier.get() : get();
}
/**
* Returns the value if this is a {@code Some}, otherwise throws an exception.
*
* @param exceptionSupplier An exception supplier
* @param <X> A throwable
* @return This value, if this Option is defined, otherwise throws X
* @throws X a throwable
*/
@Override
default <X extends Throwable> T getOrElseThrow(Supplier<X> exceptionSupplier) throws X {
Objects.requireNonNull(exceptionSupplier, "exceptionSupplier is null");
if (isEmpty()) {
throw exceptionSupplier.get();
} else {
return get();
}
}
/**
* Returns {@code Some(value)} if this is a {@code Some} and the value satisfies the given predicate.
* Otherwise {@code None} is returned.
*
* @param predicate A predicate which is used to test an optional value
* @return {@code Some(value)} or {@code None} as specified
*/
default Option<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return isEmpty() || predicate.test(get()) ? this : none();
}
/**
* Maps the value to a new {@code Option} if this is a {@code Some}, otherwise returns {@code None}.
*
* @param mapper A mapper
* @param <U> Component type of the resulting Option
* @return a new {@code Option}
*/
@SuppressWarnings("unchecked")
default <U> Option<U> flatMap(Function<? super T, ? extends Option<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? none() : (Option<U>) mapper.apply(get());
}
/**
* Maps the value and wraps it in a new {@code Some} if this is a {@code Some}, returns {@code None}.
*
* @param mapper A value mapper
* @param <U> The new value type
* @return a new {@code Some} containing the mapped value if this Option is defined, otherwise {@code None}, if this is empty.
*/
@Override
default <U> Option<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? none() : some(mapper.apply(get()));
}
/**
* Folds either the {@code None} or the {@code Some} side of the Option value.
*
* @param ifNone maps the left value if this is a None
* @param f maps the value if this is a Some
* @param <U> type of the folded value
* @return A value of type U
*/
default <U> U fold(Supplier<? extends U> ifNone, Function<? super T, ? extends U> f) {
return this.<U>map(f).getOrElse(ifNone);
}
/**
* Applies an action to this value, if this option is defined, otherwise does nothing.
*
* @param action An action which can be applied to an optional value
* @return this {@code Option}
*/
@Override
default Option<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (isDefined()) {
action.accept(get());
}
return this;
}
/**
* Transforms this {@code Option}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
default <U> U transform(Function<? super Option<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
default Iterator<T> iterator() {
return isEmpty() ? Iterator.empty() : Iterator.of(get());
}
@Override
boolean equals(Object o);
@Override
int hashCode();
@Override
String toString();
/**
* Some represents a defined {@link Option}. It contains a value which may be null. However, to
* create an Option containing null, {@code new Some(null)} has to be called. In all other cases
* {@link Option#of(Object)} is sufficient.
*
* @param <T> The type of the optional value.
* @author Daniel Dietrich
*/
final class Some<T> implements Option<T>, Serializable {
private static final long serialVersionUID = 1L;
private final T value;
/**
* Creates a new Some containing the given value.
*
* @param value A value, may be null
*/
private Some(T value) {
this.value = value;
}
@Override
public T get() {
return value;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public boolean equals(Object obj) {
return (obj == this) || (obj instanceof Some && Objects.equals(value, ((Some<?>) obj).value));
}
@Override
public int hashCode() {
return Objects.hashCode(value);
}
@Override
public String stringPrefix() {
return "Some";
}
@Override
public String toString() {
return stringPrefix() + "(" + value + ")";
}
}
/**
* None is a singleton representation of the undefined {@link Option}.
*
* @param <T> The type of the optional value.
* @author Daniel Dietrich
*/
final class None<T> implements Option<T>, Serializable {
private static final long serialVersionUID = 1L;
/**
* The singleton instance of None.
*/
private static final None<?> INSTANCE = new None<>();
/**
* Hidden constructor.
*/
private None() {
}
@Override
public T get() {
throw new NoSuchElementException("No value present");
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public boolean equals(Object o) {
return o == this;
}
@Override
public int hashCode() {
return 1;
}
@Override
public String stringPrefix() {
return "None";
}
@Override
public String toString() {
return stringPrefix();
}
// -- Serializable implementation
/**
* Instance control for object serialization.
*
* @return The singleton instance of None.
* @see Serializable
*/
private Object readResolve() {
return INSTANCE;
}
}
}
| 1 | 12,404 | @skestle Just recognized that the `::apply` is not necessary. Does it compile if you leave it away? Could you please check that, I'm on vacation and have no IDE at hand... Thx! | vavr-io-vavr | java |
@@ -366,7 +366,8 @@ public class TestMergeAppend extends TableTestBase {
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
- // build the new spec using the table's schema, which uses fresh IDs
+ // build the new spec using the table's sche
+ // ma, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4) | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import java.io.File;
import java.io.IOException;
import java.util.Set;
import org.apache.iceberg.ManifestEntry.Status;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.junit.Assert;
import org.junit.Test;
import static com.google.common.collect.Iterators.concat;
public class TestMergeAppend extends TableTestBase {
@Test
public void testEmptyTableAppend() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Snapshot pending = table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.apply();
Assert.assertEquals("Should create 1 manifest for initial write",
1, pending.manifests().size());
long pendingId = pending.snapshotId();
validateManifest(pending.manifests().get(0), ids(pendingId, pendingId), files(FILE_A, FILE_B));
}
@Test
public void testEmptyTableAppendManifest() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
Snapshot pending = table.newAppend()
.appendManifest(manifest)
.apply();
validateSnapshot(base.currentSnapshot(), pending, FILE_A, FILE_B);
// validate that the metadata summary is correct when using appendManifest
Assert.assertEquals("Summary metadata should include 2 added files",
"2", pending.summary().get("added-data-files"));
}
@Test
public void testEmptyTableAppendFilesAndManifest() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.appendManifest(manifest)
.apply();
long pendingId = pending.snapshotId();
validateManifest(pending.manifests().get(0),
ids(pendingId, pendingId),
files(FILE_C, FILE_D));
validateManifest(pending.manifests().get(1),
ids(pendingId, pendingId),
files(FILE_A, FILE_B));
}
@Test
public void testMergeWithAppendFilesAndManifest() throws IOException {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.appendManifest(manifest)
.apply();
long pendingId = pending.snapshotId();
Assert.assertEquals("Should create 1 merged manifest", 1, pending.manifests().size());
validateManifest(pending.manifests().get(0),
ids(pendingId, pendingId, pendingId, pendingId),
files(FILE_C, FILE_D, FILE_A, FILE_B));
}
@Test
public void testMergeWithExistingManifest() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
validateManifest(newManifest,
ids(pendingId, pendingId, baseId, baseId),
concat(files(FILE_C, FILE_D), files(initialManifest)));
}
@Test
public void testManifestMergeMinCount() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "2")
// each manifest file is 4554 bytes, so 10000 bytes limit will give us 2 bins with 3 manifest/data files.
.set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "10000")
.commit();
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
ManifestFile manifest = writeManifest(FILE_A);
ManifestFile manifest2 = writeManifestWithName("FILE_C", FILE_C);
ManifestFile manifest3 = writeManifestWithName("FILE_D", FILE_D);
table.newAppend()
.appendManifest(manifest)
.appendManifest(manifest2)
.appendManifest(manifest3)
.commit();
Assert.assertEquals("Should contain 2 merged manifest for first write",
2, readMetadata().currentSnapshot().manifests().size());
table.newAppend()
.appendManifest(manifest)
.appendManifest(manifest2)
.appendManifest(manifest3)
.commit();
Assert.assertEquals("Should contain 3 merged manifest for second write",
3, readMetadata().currentSnapshot().manifests().size());
// validate that the metadata summary is correct when using appendManifest
Assert.assertEquals("Summary metadata should include 3 added files",
"3", readMetadata().currentSnapshot().summary().get("added-data-files"));
}
@Test
public void testManifestDoNotMergeMinCount() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.updateProperties().set("commit.manifest.min-count-to-merge", "4").commit();
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
ManifestFile manifest2 = writeManifestWithName("FILE_C", FILE_C);
ManifestFile manifest3 = writeManifestWithName("FILE_D", FILE_D);
Snapshot pending = table.newAppend()
.appendManifest(manifest)
.appendManifest(manifest2)
.appendManifest(manifest3)
.apply();
Assert.assertEquals("Should contain 3 merged manifest after 1st write write",
3, pending.manifests().size());
// validate that the metadata summary is correct when using appendManifest
Assert.assertEquals("Summary metadata should include 4 added files",
"4", pending.summary().get("added-data-files"));
}
@Test
public void testMergeWithExistingManifestAfterDelete() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.newDelete()
.deleteFile(FILE_A)
.commit();
TableMetadata delete = readMetadata();
long deleteId = delete.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 filtered manifest for delete",
1, delete.currentSnapshot().manifests().size());
ManifestFile deleteManifest = delete.currentSnapshot().manifests().get(0);
validateManifestEntries(deleteManifest,
ids(deleteId, baseId),
files(FILE_A, FILE_B),
statuses(Status.DELETED, Status.EXISTING));
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
// the deleted entry from the previous manifest should be removed
validateManifestEntries(newManifest,
ids(pendingId, pendingId, baseId),
files(FILE_C, FILE_D, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING));
}
@Test
public void testMinMergeCount() {
// only merge when there are at least 4 manifests
table.updateProperties().set("commit.manifest.min-count-to-merge", "4").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newFastAppend()
.appendFile(FILE_A)
.commit();
long idFileA = readMetadata().currentSnapshot().snapshotId();
table.newFastAppend()
.appendFile(FILE_B)
.commit();
long idFileB = readMetadata().currentSnapshot().snapshotId();
Assert.assertEquals("Should have 2 manifests from setup writes",
2, readMetadata().currentSnapshot().manifests().size());
table.newAppend()
.appendFile(FILE_C)
.commit();
long idFileC = readMetadata().currentSnapshot().snapshotId();
TableMetadata base = readMetadata();
Assert.assertEquals("Should have 3 unmerged manifests",
3, base.currentSnapshot().manifests().size());
Set<ManifestFile> unmerged = Sets.newHashSet(base.currentSnapshot().manifests());
Snapshot pending = table.newAppend()
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest after the 4th write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertFalse("Should not contain previous manifests", unmerged.contains(newManifest));
long pendingId = pending.snapshotId();
validateManifest(newManifest,
ids(pendingId, idFileC, idFileB, idFileA),
files(FILE_D, FILE_C, FILE_B, FILE_A));
}
@Test
public void testMergeSizeTargetWithExistingManifest() {
// use a small limit on manifest size to prevent merging
table.updateProperties()
.set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "10")
.commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 2 unmerged manifests after second write",
2, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
validateManifest(newManifest, ids(pendingId, pendingId), files(FILE_C, FILE_D));
validateManifest(pending.manifests().get(1), ids(baseId, baseId), files(initialManifest));
}
@Test
public void testChangedPartitionSpec() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
DataFile newFileC = DataFiles.builder(newSpec)
.copy(FILE_C)
.withPartitionPath("data_bucket=2/id_bucket=3")
.build();
Snapshot pending = table.newAppend()
.appendFile(newFileC)
.apply();
Assert.assertEquals("Should use 2 manifest files",
2, pending.manifests().size());
// new manifest comes first
validateManifest(pending.manifests().get(0), ids(pending.snapshotId()), files(newFileC));
Assert.assertEquals("Second manifest should be the initial manifest with the old spec",
initialManifest, pending.manifests().get(1));
}
@Test
public void testChangedPartitionSpecMergeExisting() {
table.newAppend()
.appendFile(FILE_A)
.commit();
long id1 = readMetadata().currentSnapshot().snapshotId();
// create a second compatible manifest
table.newFastAppend()
.appendFile(FILE_B)
.commit();
long id2 = readMetadata().currentSnapshot().snapshotId();
TableMetadata base = readMetadata();
Assert.assertEquals("Should contain 2 manifests",
2, base.currentSnapshot().manifests().size());
ManifestFile manifest = base.currentSnapshot().manifests().get(0);
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
DataFile newFileC = DataFiles.builder(newSpec)
.copy(FILE_C)
.withPartitionPath("data_bucket=2/id_bucket=3")
.build();
Snapshot pending = table.newAppend()
.appendFile(newFileC)
.apply();
Assert.assertEquals("Should use 2 manifest files",
2, pending.manifests().size());
Assert.assertFalse("First manifest should not be in the new snapshot",
pending.manifests().contains(manifest));
validateManifest(pending.manifests().get(0), ids(pending.snapshotId()), files(newFileC));
validateManifest(pending.manifests().get(1), ids(id2, id1), files(FILE_B, FILE_A));
}
@Test
public void testFailure() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.ops().failCommits(5);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testAppendManifestCleanup() throws IOException {
// inject 5 failures
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(5);
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
AppendFiles append = table.newAppend().appendManifest(manifest);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testRecovery() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.ops().failCommits(3);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
append.commit();
TableMetadata metadata = readMetadata();
Assert.assertTrue("Should reuse the new manifest", new File(newManifest.path()).exists());
Assert.assertEquals("Should commit the same new manifest during retry",
Lists.newArrayList(newManifest), metadata.currentSnapshot().manifests());
}
}
| 1 | 17,133 | Looks like this is an unnecessary change. | apache-iceberg | java |
@@ -290,7 +290,7 @@ class CrawlerTest(CrawlerBase):
'disk': {'resource': 3},
'firewall': {'resource': 3},
'folder': {'iam_policy': 2, 'resource': 2},
- 'forwardingrule': {'resource': 1},
+ 'forwardingrule': {'resource': 2},
'instance': {'resource': 3},
'instancegroup': {'resource': 2},
'instancegroupmanager': {'resource': 2}, | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests: Inventory crawler for Forseti Server."""
import copy
import os
import unittest
import mock
from sqlalchemy.orm import sessionmaker
from tests.services.inventory import gcp_api_mocks
from tests.services.util.db import create_test_engine_with_file
from tests.services.util.mock import MockServerConfig
from tests import unittest_utils
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services.base.config import InventoryConfig
from google.cloud.forseti.services.inventory.storage import initialize
from google.cloud.forseti.services.inventory.base.progress import Progresser
from google.cloud.forseti.services.inventory.base.storage import Memory as MemoryStorage
from google.cloud.forseti.services.inventory.crawler import run_crawler
LOGGER = logger.get_logger(__name__)
TEST_RESOURCE_DIR_PATH = os.path.join(
os.path.dirname(__file__), 'test_data')
GCP_API_RESOURCES = {
'appengine_app': {'resource': 2},
'appengine_instance': {'resource': 3},
'appengine_service': {'resource': 1},
'appengine_version': {'resource': 1},
'backendservice': {'resource': 1},
'billing_account': {'resource': 2, 'iam_policy': 2},
'bucket': {'gcs_policy': 2, 'iam_policy': 2, 'resource': 2},
'cloudsqlinstance': {'resource': 1},
'compute_project': {'resource': 2},
'crm_org_policy': {'resource': 5},
'dataset': {'dataset_policy': 1, 'iam_policy': 1, 'resource': 1},
'disk': {'resource': 4},
'firewall': {'resource': 7},
'folder': {'iam_policy': 3, 'resource': 3},
'forwardingrule': {'resource': 1},
'gsuite_group': {'resource': 4},
'gsuite_groups_settings': {'resource': 4},
'gsuite_group_member': {'resource': 1},
'gsuite_user': {'resource': 4},
'gsuite_user_member': {'resource': 3},
'image': {'resource': 2},
'instance': {'resource': 4},
'instancegroup': {'resource': 2},
'instancegroupmanager': {'resource': 2},
'instancetemplate': {'resource': 2},
'kubernetes_cluster': {'resource': 1, 'service_config': 1},
'lien': {'resource': 1},
'network': {'resource': 2},
'organization': {'iam_policy': 1, 'resource': 1},
'project': {'billing_info': 4, 'enabled_apis': 4, 'iam_policy': 4,
'resource': 4},
'role': {'resource': 20},
'serviceaccount': {'iam_policy': 2, 'resource': 2},
'serviceaccount_key': {'resource': 1},
'sink': {'resource': 7},
'snapshot': {'resource': 3},
'subnetwork': {'resource': 24},
}
class FakeServerConfig(MockServerConfig):
"""Fake server config."""
def __init__(self, engine):
"""Initialize."""
self.engine = engine
def get_engine(self):
"""Get engine."""
return self.engine
class NullProgresser(Progresser):
"""No-op progresser to suppress output."""
def __init__(self):
super(NullProgresser, self).__init__()
self.errors = 0
self.objects = 0
self.warnings = 0
def on_new_object(self, resource):
self.objects += 1
def on_warning(self, warning):
LOGGER.error("Progressor Warning: %s", warning)
self.warnings += 1
def on_error(self, error):
LOGGER.exception("Progressor Error: %s", error)
self.errors += 1
def get_summary(self):
pass
class CrawlerBase(unittest_utils.ForsetiTestCase):
"""Base class for Crawler tests."""
def setUp(self):
"""Setup method."""
self.maxDiff = None
unittest_utils.ForsetiTestCase.setUp(self)
def tearDown(self):
"""Tear down method."""
unittest_utils.ForsetiTestCase.tearDown(self)
def _get_resource_counts_from_storage(self, storage):
result_counts = {}
for item in storage.mem.values():
item_type = item.type()
item_counts = result_counts.setdefault(
item_type, {'resource': 0})
item_counts['resource'] += 1
if item.get_iam_policy():
item_counts.setdefault('iam_policy', 0)
item_counts['iam_policy'] += 1
if item.get_gcs_policy():
item_counts.setdefault('gcs_policy', 0)
item_counts['gcs_policy'] += 1
if item.get_dataset_policy():
item_counts.setdefault('dataset_policy', 0)
item_counts['dataset_policy'] += 1
if item.get_billing_info():
item_counts.setdefault('billing_info', 0)
item_counts['billing_info'] += 1
if item.get_enabled_apis():
item_counts.setdefault('enabled_apis', 0)
item_counts['enabled_apis'] += 1
if item.get_kubernetes_service_config():
item_counts.setdefault('service_config', 0)
item_counts['service_config'] += 1
return result_counts
def _run_crawler(self, config, has_org_access=True, session=None):
"""Runs the crawler with a specific InventoryConfig.
Args:
config (InventoryConfig): The configuration to test.
has_org_access (bool): True if crawler has access to the org
resource.
session (object): An existing sql session, required for testing
Cloud Asset API integration.
Returns:
dict: the resource counts returned by the crawler.
"""
with MemoryStorage(session=session) as storage:
progresser = NullProgresser()
with gcp_api_mocks.mock_gcp(has_org_access=has_org_access):
run_crawler(storage,
progresser,
config,
parallel=True)
self.assertEqual(0,
progresser.errors,
'No errors should have occurred')
return self._get_resource_counts_from_storage(storage)
class CrawlerTest(CrawlerBase):
"""Test inventory storage."""
def test_crawling_to_memory_storage(self):
"""Crawl mock environment, test that there are items in storage."""
config = InventoryConfig(
gcp_api_mocks.ORGANIZATION_ID,
'',
{},
'',
{})
config.set_service_config(FakeServerConfig('mock_engine'))
result_counts = self._run_crawler(config)
expected_counts = GCP_API_RESOURCES
self.assertEqual(expected_counts, result_counts)
def test_crawling_from_folder(self):
"""Crawl from folder, verify expected resources crawled."""
config = InventoryConfig(
'folders/1032',
'',
{},
'',
{})
config.set_service_config(FakeServerConfig('mock_engine'))
result_counts = self._run_crawler(config)
expected_counts = {
'appengine_app': {'resource': 1},
'appengine_instance': {'resource': 3},
'appengine_service': {'resource': 1},
'appengine_version': {'resource': 1},
'bucket': {'gcs_policy': 1, 'iam_policy': 1, 'resource': 1},
'folder': {'iam_policy': 2, 'resource': 2},
'project': {'billing_info': 1, 'enabled_apis': 1, 'iam_policy': 1,
'resource': 1},
'role': {'resource': 1},
'sink': {'resource': 1},
}
self.assertEqual(expected_counts, result_counts)
def test_crawling_from_project(self):
"""Crawl from project, verify expected resources crawled."""
config = InventoryConfig(
'projects/1041',
'',
{},
'',
{})
config.set_service_config(FakeServerConfig('mock_engine'))
result_counts = self._run_crawler(config)
expected_counts = {
'backendservice': {'resource': 1},
'compute_project': {'resource': 1},
'crm_org_policy': {'resource': 1},
'disk': {'resource': 3},
'firewall': {'resource': 3},
'forwardingrule': {'resource': 1},
'instance': {'resource': 3},
'instancegroup': {'resource': 2},
'instancegroupmanager': {'resource': 2},
'instancetemplate': {'resource': 2},
'kubernetes_cluster': {'resource': 1, 'service_config': 1},
'lien': {'resource': 1},
'network': {'resource': 1},
'project': {'billing_info': 1, 'enabled_apis': 1, 'iam_policy': 1,
'resource': 1},
'serviceaccount': {'iam_policy': 1, 'resource': 1},
'serviceaccount_key': {'resource': 1},
'sink': {'resource': 2},
'snapshot': {'resource': 2},
'subnetwork': {'resource': 12},
}
self.assertEqual(expected_counts, result_counts)
def test_crawling_from_composite_root(self):
"""Crawl from composite_root with folder and project."""
config = InventoryConfig(
None,
'',
{},
'',
{},
['folders/1032', 'projects/1041'])
config.set_service_config(FakeServerConfig('mock_engine'))
result_counts = self._run_crawler(config)
expected_counts = {
'appengine_app': {'resource': 1},
'appengine_instance': {'resource': 3},
'appengine_service': {'resource': 1},
'appengine_version': {'resource': 1},
'backendservice': {'resource': 1},
'bucket': {'gcs_policy': 1, 'iam_policy': 1, 'resource': 1},
'composite_root': {'resource': 1},
'compute_project': {'resource': 1},
'crm_org_policy': {'resource': 1},
'disk': {'resource': 3},
'firewall': {'resource': 3},
'folder': {'iam_policy': 2, 'resource': 2},
'forwardingrule': {'resource': 1},
'instance': {'resource': 3},
'instancegroup': {'resource': 2},
'instancegroupmanager': {'resource': 2},
'instancetemplate': {'resource': 2},
'kubernetes_cluster': {'resource': 1, 'service_config': 1},
'lien': {'resource': 1},
'network': {'resource': 1},
'project': {'billing_info': 2, 'enabled_apis': 2, 'iam_policy': 2,
'resource': 2},
'role': {'resource': 1},
'serviceaccount': {'iam_policy': 1, 'resource': 1},
'serviceaccount_key': {'resource': 1},
'sink': {'resource': 3},
'snapshot': {'resource': 2},
'subnetwork': {'resource': 12},
}
self.assertEqual(expected_counts, result_counts)
def test_crawling_no_org_access(self):
"""Crawl with no access to organization, only child projects."""
config = InventoryConfig(
gcp_api_mocks.ORGANIZATION_ID,
'',
{},
'',
{})
config.set_service_config(FakeServerConfig('mock_engine'))
result_counts = self._run_crawler(config, has_org_access=False)
# The crawl should be the same as test_crawling_to_memory_storage, but
# without organization iam_policy, org_policy (needs Org access) or
# gsuite_* resources (needs directoryCustomerId from Organization).
expected_counts = copy.deepcopy(GCP_API_RESOURCES)
expected_counts['organization'].pop('iam_policy')
expected_counts['crm_org_policy']['resource'] -= 2
expected_counts.pop('gsuite_group')
expected_counts.pop('gsuite_groups_settings')
expected_counts.pop('gsuite_group_member')
expected_counts.pop('gsuite_user')
expected_counts.pop('gsuite_user_member')
self.assertEqual(expected_counts, result_counts)
def test_crawling_with_apis_disabled(self):
"""Crawl with the appengine and cloudsql APIs disabled."""
config = InventoryConfig(
gcp_api_mocks.ORGANIZATION_ID,
'',
{
'appengine': {'disable_polling': True},
'sqladmin': {'disable_polling': True},
},
'',
{})
config.set_service_config(FakeServerConfig('mock_engine'))
result_counts = self._run_crawler(config, has_org_access=True)
# The crawl should be the same as test_crawling_to_memory_storage, but
# without appengine and cloudsql resources.
expected_counts = copy.deepcopy(GCP_API_RESOURCES)
expected_counts.pop('appengine_app')
expected_counts.pop('appengine_instance')
expected_counts.pop('appengine_service')
expected_counts.pop('appengine_version')
expected_counts.pop('cloudsqlinstance')
self.assertEqual(expected_counts, result_counts)
class CloudAssetCrawlerTest(CrawlerBase):
"""Test CloudAsset integration with crawler."""
def setUp(self):
"""Setup method."""
CrawlerBase.setUp(self)
self.engine, self.dbfile = create_test_engine_with_file()
session_maker = sessionmaker()
self.session = session_maker(bind=self.engine)
initialize(self.engine)
self.inventory_config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID,
'',
{},
0,
{'enabled': True,
'gcs_path': 'gs://test-bucket'}
)
self.inventory_config.set_service_config(FakeServerConfig(self.engine))
# Ensure test data doesn't get deleted
self.mock_unlink = mock.patch.object(
os, 'unlink', autospec=True).start()
self.mock_copy_file_from_gcs = mock.patch.object(
file_loader,
'copy_file_from_gcs',
autospec=True).start()
self.maxDiff = None
# Mock copy_file_from_gcs to return correct test data file
def _copy_file_from_gcs(file_path, *args, **kwargs):
"""Fake copy_file_from_gcs."""
del args, kwargs
if 'resource' in file_path:
return os.path.join(TEST_RESOURCE_DIR_PATH,
'mock_cai_resources.dump')
elif 'iam_policy' in file_path:
return os.path.join(TEST_RESOURCE_DIR_PATH,
'mock_cai_iam_policies.dump')
self.mock_copy_file_from_gcs.side_effect = _copy_file_from_gcs
def tearDown(self):
"""tearDown."""
CrawlerBase.tearDown(self)
mock.patch.stopall()
# Stop mocks before unlinking the database file.
os.unlink(self.dbfile)
def test_cai_crawl_to_memory(self):
"""Crawl mock environment, test that there are items in storage."""
result_counts = self._run_crawler(self.inventory_config,
session=self.session)
expected_counts = copy.deepcopy(GCP_API_RESOURCES)
expected_counts.update({
'cloudsqlinstance': {'resource': 2},
'compute_autoscaler': {'resource': 1},
'compute_backendbucket': {'resource': 1},
'compute_healthcheck': {'resource': 1},
'compute_httphealthcheck': {'resource': 1},
'compute_httpshealthcheck': {'resource': 1},
'compute_license': {'resource': 1},
'compute_router': {'resource': 1},
'compute_sslcertificate': {'resource': 1},
'compute_targethttpproxy': {'resource': 1},
'compute_targethttpsproxy': {'resource': 1},
'compute_targetinstance': {'resource': 1},
'compute_targetpool': {'resource': 1},
'compute_targetsslproxy': {'resource': 1},
'compute_targettcpproxy': {'resource': 1},
'compute_targetvpngateway': {'resource': 1},
'compute_urlmap': {'resource': 1},
'compute_vpntunnel': {'resource': 1},
'dataproc_cluster': {'resource': 2, 'iam_policy': 1},
'dataset': {'dataset_policy': 2, 'iam_policy': 2, 'resource': 3},
'dns_managedzone': {'resource': 1},
'dns_policy': {'resource': 1},
'kms_cryptokey': {'iam_policy': 1, 'resource': 1},
'kms_cryptokeyversion': {'resource': 1},
'kms_keyring': {'iam_policy': 1, 'resource': 1},
'pubsub_subscription': {'iam_policy': 1, 'resource': 1},
'pubsub_topic': {'iam_policy': 1, 'resource': 1},
'spanner_database': {'resource': 1},
'spanner_instance': {'resource': 1},
})
self.assertEqual(expected_counts, result_counts)
def test_crawl_cai_api_polling_disabled(self):
"""Validate using only CAI and no API polling works."""
self.inventory_config.api_quota_configs = {
'admin': {'disable_polling': True},
'appengine': {'disable_polling': True},
'bigquery': {'disable_polling': True},
'cloudbilling': {'disable_polling': True},
'compute': {'disable_polling': True},
'container': {'disable_polling': True},
'crm': {'disable_polling': True},
'iam': {'disable_polling': True},
'logging': {'disable_polling': True},
'servicemanagement': {'disable_polling': True},
'sqladmin': {'disable_polling': True},
'storage': {'disable_polling': True},
}
result_counts = self._run_crawler(self.inventory_config,
session=self.session)
# Any resource not included in Cloud Asset export will not be in the
# inventory.
expected_counts = {
'appengine_app': {'resource': 2},
'appengine_service': {'resource': 1},
'appengine_version': {'resource': 1},
'backendservice': {'resource': 1},
'billing_account': {'iam_policy': 2, 'resource': 2},
'bucket': {'gcs_policy': 2, 'iam_policy': 2, 'resource': 2},
'cloudsqlinstance': {'resource': 2},
'compute_autoscaler': {'resource': 1},
'compute_backendbucket': {'resource': 1},
'compute_healthcheck': {'resource': 1},
'compute_httphealthcheck': {'resource': 1},
'compute_httpshealthcheck': {'resource': 1},
'compute_license': {'resource': 1},
'compute_project': {'resource': 2},
'compute_router': {'resource': 1},
'compute_sslcertificate': {'resource': 1},
'compute_targethttpproxy': {'resource': 1},
'compute_targethttpsproxy': {'resource': 1},
'compute_targetinstance': {'resource': 1},
'compute_targetpool': {'resource': 1},
'compute_targetsslproxy': {'resource': 1},
'compute_targettcpproxy': {'resource': 1},
'compute_targetvpngateway': {'resource': 1},
'compute_urlmap': {'resource': 1},
'compute_vpntunnel': {'resource': 1},
'dataproc_cluster': {'resource': 2, 'iam_policy': 1},
'dataset': {'dataset_policy': 2, 'iam_policy': 2, 'resource': 3},
'disk': {'resource': 4},
'dns_managedzone': {'resource': 1},
'dns_policy': {'resource': 1},
'firewall': {'resource': 7},
'folder': {'iam_policy': 3, 'resource': 3},
'forwardingrule': {'resource': 1},
'image': {'resource': 2},
'instance': {'resource': 4},
'instancegroup': {'resource': 2},
'instancegroupmanager': {'resource': 2},
'instancetemplate': {'resource': 2},
'kms_cryptokey': {'iam_policy': 1, 'resource': 1},
'kms_cryptokeyversion': {'resource': 1},
'kms_keyring': {'iam_policy': 1, 'resource': 1},
# 'kubernetes_cluster': {'resource': 1},
'network': {'resource': 2},
'organization': {'iam_policy': 1, 'resource': 1},
'project': {'iam_policy': 4, 'resource': 4},
'pubsub_subscription': {'iam_policy': 1, 'resource': 1},
'pubsub_topic': {'iam_policy': 1, 'resource': 1},
'role': {'resource': 2},
'serviceaccount': {'iam_policy': 2, 'resource': 2},
'snapshot': {'resource': 3},
'spanner_database': {'resource': 1},
'spanner_instance': {'resource': 1},
'subnetwork': {'resource': 24}}
self.assertEqual(expected_counts, result_counts)
def test_crawl_cai_data_with_asset_types(self):
"""Validate including asset_types in the CAI inventory config works."""
asset_types = ['cloudresourcemanager.googleapis.com/Folder',
'cloudresourcemanager.googleapis.com/Organization',
'cloudresourcemanager.googleapis.com/Project']
inventory_config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID,
'',
{},
0,
{'enabled': True,
'gcs_path': 'gs://test-bucket',
'asset_types': asset_types}
)
inventory_config.set_service_config(FakeServerConfig(self.engine))
# Create subsets of the mock resource dumps that only contain the
# filtered asset types
filtered_assets = []
with open(os.path.join(TEST_RESOURCE_DIR_PATH,
'mock_cai_resources.dump'), 'r') as f:
for line in f:
if any('"%s"' % asset_type in line
for asset_type in asset_types):
filtered_assets.append(line)
filtered_assets = ''.join(filtered_assets)
filtered_iam = []
with open(os.path.join(TEST_RESOURCE_DIR_PATH,
'mock_cai_iam_policies.dump'), 'r') as f:
for line in f:
if any('"%s"' % asset_type in line
for asset_type in asset_types):
filtered_iam.append(line)
filtered_iam = ''.join(filtered_iam)
with unittest_utils.create_temp_file(filtered_assets) as resources:
with unittest_utils.create_temp_file(filtered_iam) as iam_policies:
def _copy_file_from_gcs(file_path, *args, **kwargs):
"""Fake copy_file_from_gcs."""
del args, kwargs
if 'resource' in file_path:
return resources
elif 'iam_policy' in file_path:
return iam_policies
self.mock_copy_file_from_gcs.side_effect = _copy_file_from_gcs
with MemoryStorage(session=self.session) as storage:
progresser = NullProgresser()
with gcp_api_mocks.mock_gcp() as gcp_mocks:
run_crawler(storage,
progresser,
inventory_config)
# Validate export_assets called with asset_types
expected_calls = [
mock.call(gcp_api_mocks.ORGANIZATION_ID,
mock.ANY,
content_type='RESOURCE',
asset_types=asset_types,
blocking=mock.ANY,
timeout=mock.ANY),
mock.call(gcp_api_mocks.ORGANIZATION_ID,
mock.ANY,
content_type='IAM_POLICY',
asset_types=asset_types,
blocking=mock.ANY,
timeout=mock.ANY)]
(gcp_mocks.mock_cloudasset.export_assets
.assert_has_calls(expected_calls, any_order=True))
self.assertEqual(0,
progresser.errors,
'No errors should have occurred')
result_counts = self._get_resource_counts_from_storage(
storage)
expected_counts = {
'crm_org_policy': {'resource': 5},
'folder': {'iam_policy': 3, 'resource': 3},
'gsuite_group': {'resource': 4},
'gsuite_group_member': {'resource': 1},
'gsuite_groups_settings': {'resource': 4},
'gsuite_user': {'resource': 4},
'gsuite_user_member': {'resource': 3},
'kubernetes_cluster': {'resource': 1, 'service_config': 1},
'lien': {'resource': 1},
'organization': {'iam_policy': 1, 'resource': 1},
'project': {'billing_info': 4, 'enabled_apis': 4, 'iam_policy': 4,
'resource': 4},
'role': {'resource': 18},
'sink': {'resource': 6},
}
self.assertEqual(expected_counts, result_counts)
if __name__ == '__main__':
unittest.main()
| 1 | 34,036 | This one won't change as the resource is not included in the project getting tested with the composite root. | forseti-security-forseti-security | py |
@@ -336,6 +336,7 @@ static CALI_BPF_INLINE int calico_tc(struct __sk_buff *skb)
.reason = CALI_REASON_UNKNOWN,
};
struct calico_nat_dest *nat_dest = NULL;
+ __u8 nat_lvl1_drop = 0;
/* we assume we do FIB and from this point on, we only set it to false
* if we decide not to do it. | 1 | // Project Calico BPF dataplane programs.
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#include <asm/types.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/if_ether.h>
#include <iproute2/bpf_elf.h>
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include "bpf.h"
#include "log.h"
#include "skb.h"
#include "policy.h"
#include "conntrack.h"
#include "nat.h"
#include "routes.h"
#include "jump.h"
#include "reasons.h"
#include "icmp.h"
#ifndef CALI_FIB_LOOKUP_ENABLED
#define CALI_FIB_LOOKUP_ENABLED true
#endif
#ifndef CALI_DROP_WORKLOAD_TO_HOST
#define CALI_DROP_WORKLOAD_TO_HOST false
#endif
#ifdef CALI_DEBUG_ALLOW_ALL
/* If we want to just compile the code without defining any policies and to
* avoid compiling out code paths that are not reachable if traffic is denied,
* we can compile it with allow all
*/
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, allow);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#else
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, deny);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#endif /* CALI_DEBUG_ALLOW_ALL */
__attribute__((section("1/0")))
int calico_tc_norm_pol_tail(struct __sk_buff *skb)
{
CALI_DEBUG("Entering normal policy tail call\n");
__u32 key = 0;
struct cali_tc_state *state = cali_v4_state_lookup_elem(&key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
state->pol_rc = execute_policy_norm(skb, state->ip_proto, state->ip_src,
state->ip_dst, state->sport, state->dport);
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to post-policy program failed: DROP\n");
deny:
return TC_ACT_SHOT;
}
struct fwd {
int res;
uint32_t mark;
enum calico_reason reason;
#if FIB_ENABLED
uint32_t fib_flags;
bool fib;
#endif
};
#if FIB_ENABLED
#define fwd_fib(fwd) ((fwd)->fib)
#define fwd_fib_set(fwd, v) ((fwd)->fib = v)
#define fwd_fib_set_flags(fwd, flags) ((fwd)->fib_flags = flags)
#else
#define fwd_fib(fwd) false
#define fwd_fib_set(fwd, v)
#define fwd_fib_set_flags(fwd, flags)
#endif
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest);
static CALI_BPF_INLINE int skb_nat_l4_csum_ipv4(struct __sk_buff *skb, size_t off,
__be32 ip_from, __be32 ip_to,
__u16 port_from, __u16 port_to,
uint64_t flags)
{
int ret = 0;
if (ip_from != ip_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) IP from %x to %x\n", off,
be32_to_host(ip_from), be32_to_host(ip_to));
ret = bpf_l4_csum_replace(skb, off, ip_from, ip_to, flags | BPF_F_PSEUDO_HDR | 4);
CALI_DEBUG("bpf_l4_csum_replace(IP): %d\n", ret);
}
if (port_from != port_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) port from %d to %d\n",
off, be16_to_host(port_from), be16_to_host(port_to));
int rc = bpf_l4_csum_replace(skb, off, port_from, port_to, flags | 2);
CALI_DEBUG("bpf_l4_csum_replace(port): %d\n", rc);
ret |= rc;
}
return ret;
}
static CALI_BPF_INLINE int forward_or_drop(struct __sk_buff *skb,
struct cali_tc_state *state,
struct fwd *fwd)
{
int rc = fwd->res;
enum calico_reason reason = fwd->reason;
if (rc == TC_ACT_SHOT) {
goto deny;
}
if (rc == CALI_RES_REDIR_IFINDEX) {
int redir_flags = 0;
if (CALI_F_FROM_HOST) {
redir_flags = BPF_F_INGRESS;
}
/* Revalidate the access to the packet */
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
/* Swap the MACs as we are turning it back */
struct ethhdr *eth_hdr = (void *)(long)skb->data;
unsigned char mac[ETH_ALEN];
__builtin_memcpy(mac, ð_hdr->h_dest, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_dest, ð_hdr->h_source, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_source, mac, ETH_ALEN);
rc = bpf_redirect(skb->ifindex, redir_flags);
if (rc == TC_ACT_REDIRECT) {
CALI_DEBUG("Redirect to the same interface (%d) succeeded\n", skb->ifindex);
goto skip_fib;
}
CALI_DEBUG("Redirect to the same interface (%d) failed\n", skb->ifindex);
goto deny;
}
#if FIB_ENABLED
// Try a short-circuit FIB lookup.
if (fwd_fib(fwd)) {
/* XXX we might include the tot_len in the fwd, set it once when
* we get the ip_header the first time and only adjust the value
* when we modify the packet - to avoid geting the header here
* again - it is simpler though.
*/
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
struct iphdr *ip_header = skb_iphdr(skb);
struct bpf_fib_lookup fib_params = {
.family = 2, /* AF_INET */
.tot_len = be16_to_host(ip_header->tot_len),
.ifindex = skb->ingress_ifindex,
.l4_protocol = state->ip_proto,
.sport = host_to_be16(state->sport),
.dport = host_to_be16(state->dport),
};
/* set the ipv4 here, otherwise the ipv4/6 unions do not get
* zeroed properly
*/
fib_params.ipv4_src = state->ip_src;
fib_params.ipv4_dst = state->ip_dst;
CALI_DEBUG("FIB family=%d\n", fib_params.family);
CALI_DEBUG("FIB tot_len=%d\n", fib_params.tot_len);
CALI_DEBUG("FIB ifindex=%d\n", fib_params.ifindex);
CALI_DEBUG("FIB l4_protocol=%d\n", fib_params.l4_protocol);
CALI_DEBUG("FIB sport=%d\n", be16_to_host(fib_params.sport));
CALI_DEBUG("FIB dport=%d\n", be16_to_host(fib_params.dport));
CALI_DEBUG("FIB ipv4_src=%x\n", be32_to_host(fib_params.ipv4_src));
CALI_DEBUG("FIB ipv4_dst=%x\n", be32_to_host(fib_params.ipv4_dst));
CALI_DEBUG("Traffic is towards the host namespace, doing Linux FIB lookup\n");
rc = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params), fwd->fib_flags);
if (rc == 0) {
CALI_DEBUG("FIB lookup succeeded\n");
/* Since we are going to short circuit the IP stack on
* forward, check if TTL is still alive. If not, let the
* IP stack handle it. It was approved by policy, so it
* is safe.
*/
if ip_ttl_exceeded(ip_header) {
rc = TC_ACT_UNSPEC;
goto cancel_fib;
}
// Update the MACs. NAT may have invalidated pointer into the packet so need to
// revalidate.
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
struct ethhdr *eth_hdr = (void *)(long)skb->data;
__builtin_memcpy(ð_hdr->h_source, fib_params.smac, sizeof(eth_hdr->h_source));
__builtin_memcpy(ð_hdr->h_dest, fib_params.dmac, sizeof(eth_hdr->h_dest));
// Redirect the packet.
CALI_DEBUG("Got Linux FIB hit, redirecting to iface %d.\n", fib_params.ifindex);
rc = bpf_redirect(fib_params.ifindex, 0);
/* now we know we will bypass IP stack and ip->ttl > 1, decrement it! */
if (rc == TC_ACT_REDIRECT) {
ip_dec_ttl(ip_header);
}
} else if (rc < 0) {
CALI_DEBUG("FIB lookup failed (bad input): %d.\n", rc);
rc = TC_ACT_UNSPEC;
} else {
CALI_DEBUG("FIB lookup failed (FIB problem): %d.\n", rc);
rc = TC_ACT_UNSPEC;
}
}
cancel_fib:
#endif /* FIB_ENABLED */
skip_fib:
if (CALI_F_TO_HOST) {
/* If we received the packet from the tunnel and we forward it to a
* workload we need to skip RPF check since there might be a better path
* for the packet if the host has multiple ifaces and might get dropped.
*
* XXX We should check ourselves that we got our tunnel packets only from
* XXX those devices where we expect them before we even decap.
*/
if (CALI_F_FROM_HEP && state->tun_ip != 0) {
fwd->mark = CALI_SKB_MARK_SKIP_RPF;
}
/* Packet is towards host namespace, mark it so that downstream
* programs know that they're not the first to see the packet.
*/
CALI_DEBUG("Traffic is towards host namespace, marking with %x.\n", fwd->mark);
/* FIXME: this ignores the mask that we should be using.
* However, if we mask off the bits, then clang spots that it
* can do a 16-bit store instead of a 32-bit load/modify/store,
* which trips up the validator.
*/
skb->mark = fwd->mark | CALI_SKB_MARK_SEEN; /* make sure that each pkt has SEEN mark */
}
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=ALLOW (%d). Program execution time: %lluns\n",
rc, prog_end_time-state->prog_start_time);
}
return rc;
deny:
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=DENY (%x). Program execution time: %lluns\n",
reason, prog_end_time-state->prog_start_time);
}
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE int calico_tc(struct __sk_buff *skb)
{
struct cali_tc_state state = {};
struct fwd fwd = {
.res = TC_ACT_UNSPEC,
.reason = CALI_REASON_UNKNOWN,
};
struct calico_nat_dest *nat_dest = NULL;
/* we assume we do FIB and from this point on, we only set it to false
* if we decide not to do it.
*/
fwd_fib_set(&fwd, true);
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
state.prog_start_time = bpf_ktime_get_ns();
}
state.tun_ip = 0;
#ifdef CALI_SET_SKB_MARK
/* workaround for test since bpftool run cannot set it in context, wont
* be necessary if fixed in kernel
*/
skb->mark = CALI_SET_SKB_MARK;
#endif
if (!CALI_F_TO_HOST && skb->mark == CALI_SKB_MARK_BYPASS) {
CALI_DEBUG("Packet pre-approved by another hook, allow.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
}
struct iphdr *ip_header;
if (CALI_F_TO_HEP || CALI_F_TO_WEP) {
switch (skb->mark) {
case CALI_SKB_MARK_BYPASS_FWD:
CALI_DEBUG("Packet approved for forward.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
case CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP:
CALI_DEBUG("Packet approved for forward - src ip fixup\n");
fwd.reason = CALI_REASON_BYPASS;
/* we need to fix up the right src host IP */
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__be32 ip_src = ip_header->saddr;
if (ip_src == HOST_IP) {
CALI_DEBUG("src ip fixup not needed %x\n", be32_to_host(ip_src));
goto allow;
}
/* XXX do a proper CT lookup to find this */
ip_header->saddr = HOST_IP;
int l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
int res = bpf_l3_csum_replace(skb, l3_csum_off, ip_src, HOST_IP, 4);
if (res) {
fwd.reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
goto allow;
}
}
// Parse the packet.
// TODO Do we need to handle any odd-ball frames here (e.g. with a 0 VLAN header)?
switch (host_to_be16(skb->protocol)) {
case ETH_P_IP:
break;
case ETH_P_ARP:
CALI_DEBUG("ARP: allowing packet\n");
fwd_fib_set(&fwd, false);
goto allow;
case ETH_P_IPV6:
if (CALI_F_WEP) {
CALI_DEBUG("IPv6 from workload: drop\n");
return TC_ACT_SHOT;
} else {
// FIXME: support IPv6.
CALI_DEBUG("IPv6 on host interface: allow\n");
return TC_ACT_UNSPEC;
}
default:
if (CALI_F_WEP) {
CALI_DEBUG("Unknown ethertype (%x), drop\n", be16_to_host(skb->protocol));
goto deny;
} else {
CALI_DEBUG("Unknown ethertype on host interface (%x), allow\n",
be16_to_host(skb->protocol));
return TC_ACT_UNSPEC;
}
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
if (dnat_should_decap() && is_vxlan_tunnel(ip_header)) {
struct udphdr *udp_header = (void*)(ip_header+1);
/* decap on host ep only if directly for the node */
CALI_DEBUG("VXLAN tunnel packet to %x (host IP=%x)\n", ip_header->daddr, HOST_IP);
if (rt_addr_is_local_host(ip_header->daddr) &&
vxlan_udp_csum_ok(udp_header) &&
vxlan_size_ok(skb, udp_header) &&
vxlan_vni_is_valid(skb, udp_header) &&
vxlan_vni(skb, udp_header) == CALI_VXLAN_VNI) {
state.tun_ip = ip_header->saddr;
CALI_DEBUG("vxlan decap\n");
if (vxlan_v4_decap(skb)) {
fwd.reason = CALI_REASON_DECAP_FAIL;
goto deny;
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short after VXLAN decap\n");
goto deny;
}
ip_header = skb_iphdr(skb);
CALI_DEBUG("vxlan decap origin %x\n", be32_to_host(state.tun_ip));
}
}
// Drop malformed IP packets
if (ip_header->ihl < 5) {
fwd.reason = CALI_REASON_IP_MALFORMED;
CALI_DEBUG("Drop malformed IP packets\n");
goto deny;
} else if (ip_header->ihl > 5) {
/* Drop packets with IP options from/to WEP.
* Also drop packets with IP options if the dest IP is not host IP
*/
if (CALI_F_WEP || (CALI_F_FROM_HEP && !rt_addr_is_local_host(ip_header->daddr))) {
fwd.reason = CALI_REASON_IP_OPTIONS;
CALI_DEBUG("Drop packets with IP options\n");
goto deny;
}
CALI_DEBUG("Allow packets with IP options and dst IP = hostIP\n");
goto allow;
}
// Setting all of these up-front to keep the verifier happy.
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
struct icmphdr *icmp_header = (void*)(ip_header+1);
tc_state_fill_from_iphdr(&state, ip_header);
switch (state.ip_proto) {
case IPPROTO_TCP:
// Re-check buffer space for TCP (has larger headers than UDP).
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
state.sport = be16_to_host(tcp_header->source);
state.dport = be16_to_host(tcp_header->dest);
CALI_DEBUG("TCP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_UDP:
state.sport = be16_to_host(udp_header->source);
state.dport = be16_to_host(udp_header->dest);
CALI_DEBUG("UDP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_ICMP:
icmp_header = (void*)(ip_header+1);
CALI_DEBUG("ICMP; type=%d code=%d\n",
icmp_header->type, icmp_header->code);
break;
case 4:
// IPIP
if (CALI_F_HEP) {
// TODO IPIP whitelist.
CALI_DEBUG("IPIP: allow\n");
fwd_fib_set(&fwd, false);
goto allow;
}
default:
CALI_DEBUG("Unknown protocol (%d), unable to extract ports\n", (int)state.ip_proto);
}
state.pol_rc = CALI_POL_NO_MATCH;
switch (state.ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_ICMP:
break;
default:
if (CALI_F_HEP) {
// FIXME: allow unknown protocols through on host endpoints.
goto allow;
}
// FIXME non-port based conntrack.
goto deny;
}
struct ct_ctx ct_lookup_ctx = {
.skb = skb,
.proto = state.ip_proto,
.src = state.ip_src,
.sport = state.sport,
.dst = state.ip_dst,
.dport = state.dport,
.tun_ip = state.tun_ip,
};
if (state.ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_lookup_ctx.tcp = tcp_header;
}
/* Do conntrack lookup before anything else */
state.ct_result = calico_ct_v4_lookup(&ct_lookup_ctx);
/* check if someone is trying to spoof a tunnel packet */
if (CALI_F_FROM_HEP && ct_result_tun_src_changed(state.ct_result.rc)) {
CALI_DEBUG("dropping tunnel pkt with changed source node\n");
goto deny;
}
if (state.ct_result.flags & CALI_CT_FLAG_NAT_OUT) {
state.flags |= CALI_ST_NAT_OUTGOING;
}
/* We are possibly past (D)NAT, but that is ok, we need to let the IP
* stack do the RPF check on the source, dest is not importatnt.
*/
if (CALI_F_TO_HOST && ct_result_rpf_failed(state.ct_result.rc)) {
fwd_fib_set(&fwd, false);
}
/* skip policy if we get conntrack hit */
if (ct_result_rc(state.ct_result.rc) != CALI_CT_NEW) {
goto skip_policy;
}
/* Unlike from WEP where we can do RPF by comparing to calico routing
* info, we must rely in Linux to do it for us when receiving packets
* from outside of the host. We enforce RPF failed on every new flow.
* This will make it to skip fib in calico_tc_skb_accepted()
*/
if (CALI_F_FROM_HEP) {
ct_result_set_flag(state.ct_result.rc, CALI_CT_RPF_FAILED);
}
/* No conntrack entry, check if we should do NAT */
nat_dest = calico_v4_nat_lookup2(state.ip_src, state.ip_dst,
state.ip_proto, state.dport,
state.tun_ip != 0);
if (nat_dest != NULL) {
state.post_nat_ip_dst = nat_dest->addr;
state.post_nat_dport = nat_dest->port;
} else {
state.post_nat_ip_dst = state.ip_dst;
state.post_nat_dport = state.dport;
}
if (CALI_F_TO_WEP &&
skb->mark != CALI_SKB_MARK_SEEN &&
cali_rt_flags_local_host(cali_rt_lookup_flags(state.ip_src))) {
/* Host to workload traffic always allowed. We discount traffic that was
* seen by another program since it must have come in via another interface.
*/
CALI_DEBUG("Packet is from the host: ACCEPT\n");
state.pol_rc = CALI_POL_ALLOW;
goto skip_policy;
}
if (CALI_F_FROM_WEP) {
/* Do RPF check since it's our responsibility to police that. */
CALI_DEBUG("Workload RPF check src=%x skb iface=%d.\n",
be32_to_host(state.ip_src), skb->ifindex);
struct cali_rt *r = cali_rt_lookup(state.ip_src);
if (!r) {
CALI_INFO("Workload RPF fail: missing route.\n");
goto deny;
}
if (!cali_rt_flags_local_workload(r->flags)) {
CALI_INFO("Workload RPF fail: not a local workload.\n");
goto deny;
}
if (r->if_index != skb->ifindex) {
CALI_INFO("Workload RPF fail skb iface (%d) != route iface (%d)\n",
skb->ifindex, r->if_index);
goto deny;
}
// Check whether the workload needs outgoing NAT to this address.
if (r->flags & CALI_RT_NAT_OUT) {
if (!(cali_rt_lookup_flags(state.post_nat_ip_dst) & CALI_RT_IN_POOL)) {
CALI_DEBUG("Source is in NAT-outgoing pool "
"but dest is not, need to SNAT.\n");
state.flags |= CALI_ST_NAT_OUTGOING;
}
}
}
/* icmp_type and icmp_code share storage with the ports; now we've used
* the ports set to 0 to do the conntrack lookup, we can set the ICMP fields
* for policy.
*/
if (state.ip_proto == IPPROTO_ICMP) {
state.icmp_type = icmp_header->type;
state.icmp_code = icmp_header->code;
}
// Set up an entry in the state map and then jump to the normal policy program.
int key = 0;
struct cali_tc_state *map_state = cali_v4_state_lookup_elem(&key);
if (!map_state) {
// Shouldn't be possible; the map is pre-allocated.
CALI_INFO("State map lookup failed: DROP\n");
goto deny;
}
state.pol_rc = CALI_POL_NO_MATCH;
if (nat_dest) {
state.nat_dest.addr = nat_dest->addr;
state.nat_dest.port = nat_dest->port;
} else {
state.nat_dest.addr = 0;
state.nat_dest.port = 0;
}
*map_state = state;
if (CALI_F_HEP) {
/* We don't support host-endpoint policy yet, skip straight to
* the epilogue program.
* FIXME we really want to just call calico_tc_skb_accepted()
* here but that runs out of stack space.
*/
map_state->pol_rc = CALI_POL_ALLOW;
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to epilogue program failed: ALLOW\n");
return TC_ACT_UNSPEC;
}
CALI_DEBUG("About to jump to policy program; lack of further "
"logs means policy dropped the packet...\n");
bpf_tail_call(skb, &cali_jump, 0);
CALI_DEBUG("Tail call to policy program failed: DROP\n");
return TC_ACT_SHOT;
skip_policy:
fwd = calico_tc_skb_accepted(skb, ip_header, &state, nat_dest);
allow:
finalize:
return forward_or_drop(skb, &state, &fwd);
deny:
fwd.res = TC_ACT_SHOT;
goto finalize;
}
__attribute__((section("1/1")))
int calico_tc_skb_accepted_entrypoint(struct __sk_buff *skb)
{
CALI_DEBUG("Entering calico_tc_skb_accepted_entrypoint\n");
struct iphdr *ip_header = NULL;
if (skb_too_short(skb)) {
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__u32 key = 0;
struct cali_tc_state *state = bpf_map_lookup_elem(&cali_v4_state, &key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
struct calico_nat_dest *nat_dest = NULL;
struct calico_nat_dest nat_dest_2 = {
.addr=state->nat_dest.addr,
.port=state->nat_dest.port,
};
if (state->nat_dest.addr != 0) {
nat_dest = &nat_dest_2;
}
struct fwd fwd = calico_tc_skb_accepted(skb, ip_header, state, nat_dest);
return forward_or_drop(skb, state, &fwd);
deny:
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest)
{
CALI_DEBUG("Entering calico_tc_skb_accepted\n");
enum calico_reason reason = CALI_REASON_UNKNOWN;
int rc = TC_ACT_UNSPEC;
bool fib = false;
struct ct_ctx ct_nat_ctx = {};
int ct_rc = ct_result_rc(state->ct_result.rc);
bool ct_related = ct_result_is_related(state->ct_result.rc);
uint32_t seen_mark;
size_t l4_csum_off = 0, l3_csum_off;
uint32_t fib_flags = 0;
CALI_DEBUG("src=%x dst=%x\n", be32_to_host(state->ip_src), be32_to_host(state->ip_dst));
CALI_DEBUG("post_nat=%x:%d\n", be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
CALI_DEBUG("tun_ip=%x\n", state->tun_ip);
CALI_DEBUG("pol_rc=%d\n", state->pol_rc);
CALI_DEBUG("sport=%d\n", state->sport);
CALI_DEBUG("flags=%x\n", state->flags);
CALI_DEBUG("ct_rc=%d\n", ct_rc);
CALI_DEBUG("ct_related=%d\n", ct_related);
// Set the dport to 0, to make sure conntrack entries for icmp is proper as we use
// dport to hold icmp type and code
if (state->ip_proto == IPPROTO_ICMP) {
state->dport = 0;
}
if (CALI_F_FROM_WEP && (state->flags & CALI_ST_NAT_OUTGOING)) {
seen_mark = CALI_SKB_MARK_NAT_OUT;
} else {
/* XXX we do it here again because doing it in one place only
* XXX in calico_tc() irritates the verifier :'(
*/
if (!CALI_F_TO_HOST || !ct_result_rpf_failed(state->ct_result.rc)) {
fib = true;
}
seen_mark = CALI_SKB_MARK_SEEN;
}
/* We check the ttl here to avoid needing complicated handling of
* related trafic back from the host if we let the host to handle it.
*/
CALI_DEBUG("ip->ttl %d\n", ip_header->ttl);
if (ip_ttl_exceeded(ip_header)) {
switch (ct_rc){
case CALI_CT_NEW:
if (nat_dest) {
goto icmp_ttl_exceeded;
}
break;
case CALI_CT_ESTABLISHED_DNAT:
case CALI_CT_ESTABLISHED_SNAT:
goto icmp_ttl_exceeded;
}
}
l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
if (ct_related) {
if (ip_header->protocol == IPPROTO_ICMP) {
struct icmphdr *icmp;
bool outer_ip_snat;
/* if we do SNAT ... */
outer_ip_snat = ct_rc == CALI_CT_ESTABLISHED_SNAT;
/* ... there is a return path to the tunnel ... */
outer_ip_snat = outer_ip_snat && state->ct_result.tun_ip;
/* ... and should do encap and it is not DSR or it is leaving host
* and either DSR from WEP or originated at host ... */
outer_ip_snat = outer_ip_snat &&
((dnat_return_should_encap() && !CALI_F_DSR) ||
(CALI_F_TO_HEP &&
((CALI_F_DSR && skb_seen(skb)) || !skb_seen(skb))));
/* ... then fix the outer header IP first */
if (outer_ip_snat) {
ip_header->saddr = state->ct_result.nat_ip;
int res = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
CALI_DEBUG("ICMP related: outer IP SNAT to %x\n",
be32_to_host(state->ct_result.nat_ip));
}
if (!icmp_skb_get_hdr(skb, &icmp)) {
CALI_DEBUG("Ooops, we already passed one such a check!!!\n");
goto deny;
}
l3_csum_off += sizeof(*ip_header) + sizeof(*icmp);
ip_header = (struct iphdr *)(icmp + 1); /* skip to inner ip */
/* flip the direction, we need to reverse the original packet */
switch (ct_rc) {
case CALI_CT_ESTABLISHED_SNAT:
/* handle the DSR case, see CALI_CT_ESTABLISHED_SNAT where nat is done */
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
}
ct_rc = CALI_CT_ESTABLISHED_DNAT;
break;
case CALI_CT_ESTABLISHED_DNAT:
if (CALI_F_FROM_HEP && state->tun_ip && ct_result_np_node(state->ct_result)) {
/* Packet is returning from a NAT tunnel, just forward it. */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("ICMP related returned from NAT tunnel\n");
goto allow;
}
ct_rc = CALI_CT_ESTABLISHED_SNAT;
break;
}
}
}
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
__u8 ihl = ip_header->ihl * 4;
int res = 0;
bool encap_needed = false;
if (state->ip_proto == IPPROTO_ICMP && ct_related) {
/* do not fix up embedded L4 checksum for related ICMP */
} else {
switch (ip_header->protocol) {
case IPPROTO_TCP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct tcphdr, check);
break;
case IPPROTO_UDP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct udphdr, check);
break;
}
}
switch (ct_rc){
case CALI_CT_NEW:
switch (state->pol_rc) {
case CALI_POL_NO_MATCH:
CALI_DEBUG("Implicitly denied by normal policy: DROP\n");
goto deny;
case CALI_POL_DENY:
CALI_DEBUG("Denied by normal policy: DROP\n");
goto deny;
case CALI_POL_ALLOW:
CALI_DEBUG("Allowed by normal policy: ACCEPT\n");
}
if (CALI_F_FROM_WEP &&
CALI_DROP_WORKLOAD_TO_HOST &&
cali_rt_flags_local_host(
cali_rt_lookup_flags(state->post_nat_ip_dst))) {
CALI_DEBUG("Workload to host traffic blocked by "
"DefaultEndpointToHostAction: DROP\n");
goto deny;
}
ct_nat_ctx.skb = skb;
ct_nat_ctx.proto = state->ip_proto;
ct_nat_ctx.src = state->ip_src;
ct_nat_ctx.sport = state->sport;
ct_nat_ctx.dst = state->post_nat_ip_dst;
ct_nat_ctx.dport = state->post_nat_dport;
ct_nat_ctx.tun_ip = state->tun_ip;
if (state->flags & CALI_ST_NAT_OUTGOING) {
ct_nat_ctx.flags |= CALI_CT_FLAG_NAT_OUT;
}
if (state->ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_nat_ctx.tcp = tcp_header;
}
// If we get here, we've passed policy.
if (nat_dest == NULL) {
if (conntrack_create(&ct_nat_ctx, CT_CREATE_NORMAL)) {
CALI_DEBUG("Creating normal conntrack failed\n");
goto deny;
}
goto allow;
}
ct_nat_ctx.orig_dst = state->ip_dst;
ct_nat_ctx.orig_dport = state->dport;
/* fall through as DNAT is now established */
case CALI_CT_ESTABLISHED_DNAT:
/* align with CALI_CT_NEW */
if (ct_rc == CALI_CT_ESTABLISHED_DNAT) {
if (CALI_F_FROM_HEP && state->tun_ip && ct_result_np_node(state->ct_result)) {
/* Packet is returning from a NAT tunnel,
* already SNATed, just forward it.
*/
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("returned from NAT tunnel\n");
goto allow;
}
state->post_nat_ip_dst = state->ct_result.nat_ip;
state->post_nat_dport = state->ct_result.nat_port;
}
CALI_DEBUG("CT: DNAT to %x:%d\n",
be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
encap_needed = dnat_should_encap();
/* We have not created the conntrack yet since we did not know
* if we need encap or not. Must do before MTU check and before
* we jump to do the encap.
*/
if (ct_rc == CALI_CT_NEW) {
struct cali_rt * rt;
int nat_type = CT_CREATE_NAT;
if (encap_needed) {
/* When we need to encap, we need to find out if the backend is
* local or not. If local, we actually do not need the encap.
*/
rt = cali_rt_lookup(state->post_nat_ip_dst);
if (!rt) {
reason = CALI_REASON_RT_UNKNOWN;
goto deny;
}
CALI_DEBUG("rt found for 0x%x local %d\n",
be32_to_host(state->post_nat_ip_dst), !!cali_rt_is_local(rt));
encap_needed = !cali_rt_is_local(rt);
if (encap_needed) {
if (CALI_F_FROM_HEP && state->tun_ip == 0) {
if (CALI_F_DSR) {
ct_nat_ctx.flags |= CALI_CT_FLAG_DSR_FWD;
}
ct_nat_ctx.flags |= CALI_CT_FLAG_NP_FWD;
}
nat_type = CT_CREATE_NAT_FWD;
ct_nat_ctx.tun_ip = rt->next_hop;
state->ip_dst = rt->next_hop;
}
}
if (conntrack_create(&ct_nat_ctx, nat_type)) {
CALI_DEBUG("Creating NAT conntrack failed\n");
goto deny;
}
} else {
if (encap_needed && ct_result_np_node(state->ct_result)) {
CALI_DEBUG("CT says encap to node %x\n", be32_to_host(state->ct_result.tun_ip));
state->ip_dst = state->ct_result.tun_ip;
} else {
encap_needed = false;
}
}
if (encap_needed) {
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Request packet with DNF set is too big\n");
goto icmp_too_big;
}
state->ip_src = HOST_IP;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
/* We cannot enforce RPF check on encapped traffic, do FIB if you can */
fib = true;
goto nat_encap;
}
ip_header->daddr = state->post_nat_ip_dst;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->dest = host_to_be16(state->post_nat_dport);
break;
case IPPROTO_UDP:
udp_header->dest = host_to_be16(state->post_nat_dport);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_dst,
state->post_nat_ip_dst, host_to_be16(state->dport),
host_to_be16(state->post_nat_dport),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
res |= bpf_l3_csum_replace(skb, l3_csum_off, state->ip_dst, state->post_nat_ip_dst, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
/* Handle returning ICMP related to tunnel
*
* N.B. we assume that we can fit in the MTU. Since it is ICMP
* and even though Linux sends up to min ipv4 MTU, it is
* unlikely that we are anywhere to close the MTU limit. If we
* are, we need to fail anyway.
*/
if (ct_related && state->ip_proto == IPPROTO_ICMP
&& state->ct_result.tun_ip
&& !CALI_F_DSR) {
if (dnat_return_should_encap()) {
CALI_DEBUG("Returning related ICMP from workload to tunnel\n");
state->ip_dst = state->ct_result.tun_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
} else if (CALI_F_TO_HEP) {
/* Special case for ICMP error being returned by the host with the
* backing workload into the tunnel back to the original host. It is
* ICMP related and there is a return tunnel path. We need to change
* both the source and destination at once.
*
* XXX the packet was routed to the original client as if it was XXX
* DSR and we might not be on the right iface!!! Should we XXX try
* to reinject it to fix the routing?
*/
CALI_DEBUG("Returning related ICMP from host to tunnel\n");
state->ip_src = HOST_IP;
state->ip_dst = state->ct_result.tun_ip;
goto nat_encap;
}
}
state->dport = state->post_nat_dport;
state->ip_dst = state->post_nat_ip_dst;
goto allow;
case CALI_CT_ESTABLISHED_SNAT:
CALI_DEBUG("CT: SNAT from %x:%d\n",
be32_to_host(state->ct_result.nat_ip), state->ct_result.nat_port);
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Return ICMP mtu is too big\n");
goto icmp_too_big;
}
}
// Actually do the NAT.
ip_header->saddr = state->ct_result.nat_ip;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->source = host_to_be16(state->ct_result.nat_port);
break;
case IPPROTO_UDP:
udp_header->source = host_to_be16(state->ct_result.nat_port);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_src,
state->ct_result.nat_ip, host_to_be16(state->sport),
host_to_be16(state->ct_result.nat_port),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
CALI_VERB("L3 checksum update (csum is at %d) port from %x to %x\n",
l3_csum_off, state->ip_src, state->ct_result.nat_ip);
int csum_rc = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
CALI_VERB("bpf_l3_csum_replace(IP): %d\n", csum_rc);
res |= csum_rc;
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
state->ip_dst = state->ct_result.tun_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
}
state->sport = state->ct_result.nat_port;
state->ip_src = state->ct_result.nat_ip;
goto allow;
case CALI_CT_ESTABLISHED_BYPASS:
seen_mark = CALI_SKB_MARK_BYPASS;
// fall through
case CALI_CT_ESTABLISHED:
goto allow;
default:
if (CALI_F_FROM_HEP) {
/* Since we're using the host endpoint program for TC-redirect
* acceleration for workloads (but we haven't fully implemented
* host endpoint support yet), we can get an incorrect conntrack
* invalid for host traffic.
*
* FIXME: Properly handle host endpoint conntrack failures
*/
CALI_DEBUG("Traffic is towards host namespace but not conntracked, "
"falling through to iptables\n");
fib = false;
goto allow;
}
goto deny;
}
CALI_INFO("We should never fall through here\n");
goto deny;
icmp_ttl_exceeded:
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
/* we silently drop the packet if things go wrong */
/* XXX we should check if it is broadcast or multicast and not respond */
/* do not respond to IP fragments except the first */
if (ip_frag_no(ip_header)) {
goto deny;
}
if (icmp_v4_ttl_exceeded(skb)) {
goto deny;
}
/* we need to allow the reponse for the IP stack to route it back.
* XXX we might want to send it back the same iface
*/
goto icmp_allow;
icmp_too_big:
if (icmp_v4_too_big(skb)) {
reason = CALI_REASON_ICMP_DF;
goto deny;
}
/* XXX we might use skb->ifindex to redirect it straight back
* to where it came from if it is guaranteed to be the path
*/
fib_flags |= BPF_FIB_LOOKUP_OUTPUT;
if (CALI_F_FROM_WEP) {
/* we know it came from workload, just send it back the same way */
rc = CALI_RES_REDIR_IFINDEX;
}
goto icmp_allow;
icmp_allow:
/* recheck the size of the packet after it was turned into icmp and set
* state so that it can processed further.
*/
if (skb_shorter(skb, ETH_IPV4_UDP_SIZE)) {
reason = CALI_REASON_SHORT;
goto deny;
}
ip_header = skb_iphdr(skb);
tc_state_fill_from_iphdr(state, ip_header);
state->sport = state->dport = 0;
/* packet was created because of approved traffic, treat it as related */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
goto allow;
nat_encap:
if (vxlan_v4_encap(skb, state->ip_src, state->ip_dst)) {
reason = CALI_REASON_ENCAP_FAIL;
goto deny;
}
state->sport = state->dport = CALI_VXLAN_PORT;
state->ip_proto = IPPROTO_UDP;
allow:
{
struct fwd fwd = {
.res = rc,
.mark = seen_mark,
};
fwd_fib_set(&fwd, fib);
fwd_fib_set_flags(&fwd, fib_flags);
return fwd;
}
deny:
{
struct fwd fwd = {
.res = TC_ACT_SHOT,
.reason = reason,
};
return fwd;
}
}
#ifndef CALI_ENTRYPOINT_NAME
#define CALI_ENTRYPOINT_NAME calico_entrypoint
#endif
// Entrypoint with definable name. It's useful to redefine the name for each entrypoint
// because the name is exposed by bpftool et al.
__attribute__((section(XSTR(CALI_ENTRYPOINT_NAME))))
int tc_calico_entry(struct __sk_buff *skb)
{
return calico_tc(skb);
}
char ____license[] __attribute__((section("license"), used)) = "GPL";
| 1 | 17,892 | We have `stdbool` imported, might as well use that for clarity. | projectcalico-felix | go |
@@ -30,8 +30,8 @@ namespace Datadog.Trace.ClrProfiler.IntegrationTests
Assert.True(spans.Count > 0, "expected at least one span");
foreach (var span in spans)
{
- Assert.Equal(SqlServerIntegration.OperationName, span.Name);
- Assert.Equal($"Samples.SqlServer-{SqlServerIntegration.ServiceName}", span.Service);
+ Assert.Equal("sql-server.query", span.Name);
+ Assert.Equal($"Samples.SqlServer-sql-server", span.Service);
Assert.Equal(SpanTypes.Sql, span.Type);
}
} | 1 | using Datadog.Trace.ClrProfiler.Integrations;
using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
// EFCore targets netstandard2.0, so it requires net461 or higher or netcoreapp2.0 or higher
#if !NET452
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
public class SqlServerTests : TestHelper
{
private const int AgentPort = 9002;
public SqlServerTests(ITestOutputHelper output)
: base("SqlServer", output)
{
}
[Fact]
[Trait("Category", "EndToEnd")]
public void SubmitsTraces()
{
using (var agent = new MockTracerAgent(9002))
using (ProcessResult processResult = RunSampleAndWaitForExit(9002))
{
Assert.True(processResult.ExitCode >= 0, $"Process exited with code {processResult.ExitCode}");
var spans = agent.WaitForSpans(1);
Assert.True(spans.Count > 0, "expected at least one span");
foreach (var span in spans)
{
Assert.Equal(SqlServerIntegration.OperationName, span.Name);
Assert.Equal($"Samples.SqlServer-{SqlServerIntegration.ServiceName}", span.Service);
Assert.Equal(SpanTypes.Sql, span.Type);
}
}
}
}
}
#endif
| 1 | 14,590 | Why do we not also have integration tests for postgres? | DataDog-dd-trace-dotnet | .cs |
@@ -205,7 +205,7 @@ describe('Db', function() {
/**
* An example showing how to force a reindex of a collection.
*/
- it('shouldCorrectlyForceReindexOnCollection', {
+ it.skip('shouldCorrectlyForceReindexOnCollection', {
metadata: {
requires: { topology: ['single'] }
}, | 1 | 'use strict';
var test = require('./shared').assert;
var setupDatabase = require('./shared').setupDatabase;
const expect = require('chai').expect;
const { Db, DBRef } = require('../..');
describe('Db', function() {
before(function() {
return setupDatabase(this.configuration);
});
it('shouldCorrectlyHandleIllegalDbNames', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
// Assert rename
try {
new Db(5);
} catch (err) {
test.ok(err instanceof Error);
test.equal('database name must be a string', err.message);
}
try {
new Db('');
} catch (err) {
test.ok(err instanceof Error);
test.equal('database name cannot be the empty string', err.message);
}
try {
new Db('te$t', function() {});
} catch (err) {
test.equal("database names cannot contain the character '$'", err.message);
}
try {
new Db('.test', function() {});
} catch (err) {
test.equal("database names cannot contain the character '.'", err.message);
}
try {
new Db('\\test', function() {});
} catch (err) {
test.equal("database names cannot contain the character '\\'", err.message);
}
try {
new Db('\\test', function() {});
} catch (err) {
test.equal("database names cannot contain the character '\\'", err.message);
}
try {
new Db('test test', function() {});
} catch (err) {
test.equal("database names cannot contain the character ' '", err.message);
}
done();
}
});
it('should not call callback twice on collection() with callback', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: true
});
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
var count = 0;
var coll = db.collection('coll_name', function(e) {
test.equal(null, e);
count = count + 1;
});
try {
coll.findOne({}, null, function() {
//e - errors b/c findOne needs a query selector
test.equal(1, count);
client.close(done);
});
} catch (e) {
process.nextTick(function() {
test.equal(1, count);
client.close(done);
});
}
});
}
});
it('should callback with an error only when a MongoError', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
let configuration = this.configuration;
let client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: true
});
client.connect(function(err, client) {
let callbackCalled = 0;
test.equal(null, err);
let db = client.db(configuration.db);
try {
db.collection('collectionCallbackTest', function(e) {
callbackCalled++;
test.equal(null, e);
throw new Error('Erroring on purpose with a non MongoError');
});
} catch (e) {
test.equal(callbackCalled, 1);
client.close(done);
}
});
}
});
it('shouldCorrectlyHandleFailedConnection', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var fs_client = configuration.newClient('mongodb://127.0.0.1:25117/test', {
auto_reconnect: false,
serverSelectionTimeoutMS: 10
});
fs_client.connect(function(err) {
test.ok(err != null);
done();
});
}
});
it('shouldCorrectlyResaveDBRef', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
db.dropCollection('test_resave_dbref', function() {
test.equal(null, err);
db.createCollection('test_resave_dbref', function(err, collection) {
test.equal(null, err);
collection.insert({ name: 'parent' }, { safe: true }, function(err, r) {
test.equal(null, err);
test.ok(r.ops.length === 1 && r.ops[0]._id != null);
var parent = r.ops[0];
var child = { name: 'child', parent: new DBRef('test_resave_dbref', parent._id) };
collection.insert(child, { safe: true }, function(err) {
test.equal(null, err);
collection.findOne({ name: 'child' }, function(err, child) {
//Child deserialized
test.ok(child != null);
collection.save(child, { save: true }, function(err) {
test.equal(null, err);
collection.findOne(
{ parent: new DBRef('test_resave_dbref', parent._id) },
function(err, child) {
test.ok(child != null); //!!!! Main test point!
client.close(done);
}
);
});
});
});
});
});
});
});
}
});
/**
* An example showing how to force a reindex of a collection.
*/
it('shouldCorrectlyForceReindexOnCollection', {
metadata: {
requires: { topology: ['single'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
// DOC_LINE var client = new MongoClient(new Server('localhost', 27017));
// DOC_START
// Establish connection to db
client.connect(function(err, client) {
var db = client.db('integration_tests');
// Create a collection we want to drop later
db.createCollection('create_and_drop_all_indexes', function(err, collection) {
test.equal(null, err);
// Insert a bunch of documents for the index
collection.insert(
[
{ a: 1, b: 1 },
{ a: 2, b: 2 },
{ a: 3, b: 3 },
{ a: 4, b: 4, c: 4 }
],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Create an index on the a field
collection.ensureIndex(
{ a: 1, b: 1 },
{ unique: true, background: true, w: 1 },
function(err) {
test.equal(null, err);
// Force a reindex of the collection
collection.reIndex(function(err, result) {
test.equal(null, err);
test.equal(true, result);
// Verify that the index is gone
collection.indexInformation(function(err, indexInformation) {
test.deepEqual([['_id', 1]], indexInformation._id_);
test.deepEqual(
[
['a', 1],
['b', 1]
],
indexInformation.a_1_b_1
);
client.close(done);
});
});
}
);
}
);
});
});
// DOC_END
}
});
it('shouldCorrectlyGetErrorDroppingNonExistingDb', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var _db = client.db('nonexistingdb');
_db.dropDatabase(function(err, result) {
test.equal(null, err);
test.equal(true, result);
client.close(done);
});
});
}
});
it.skip('shouldCorrectlyThrowWhenTryingToReOpenConnection', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(err => {
expect(err).to.not.exist;
try {
client.connect(function() {});
test.ok(false);
} catch (err) {
client.close(done);
}
});
}
});
it('shouldCorrectlyReconnectWhenError', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(`mongodb://127.0.0.1:27088/test`, {
auto_reconnect: false,
poolSize: 4,
serverSelectionTimeoutMS: 10
});
// Establish connection to db
client.connect(function(err) {
test.ok(err != null);
client.connect(function(err) {
test.ok(err != null);
client.close(done);
});
});
}
});
it('should not cut collection name when it is the same as the database', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
test.equal(null, err);
var db1 = client.db('node972');
db1.collection('node972.test').insertOne({ a: 1 }, function(err) {
test.equal(null, err);
db1.collections(function(err, collections) {
test.equal(null, err);
collections = collections.map(function(c) {
return c.collectionName;
});
test.notEqual(-1, collections.indexOf('node972.test'));
client.close(done);
});
});
});
}
});
it('shouldCorrectlyUseCursorWithListCollectionsCommand', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
test.equal(null, err);
// Get a db we that does not have any collections
var db1 = client.db('shouldCorrectlyUseCursorWithListCollectionsCommand');
// Create a collection
db1.collection('test').insertOne({ a: 1 }, function(err) {
test.equal(null, err);
// Create a collection
db1.collection('test1').insertOne({ a: 1 }, function() {
test.equal(null, err);
// Get listCollections filtering out the name
var cursor = db1.listCollections({ name: 'test1' });
cursor.toArray(function(err, names) {
test.equal(null, err);
test.equal(1, names.length);
client.close(done);
});
});
});
});
}
});
it('shouldCorrectlyUseCursorWithListCollectionsCommandAndBatchSize', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
test.equal(null, err);
// Get a db we that does not have any collections
var db1 = client.db('shouldCorrectlyUseCursorWithListCollectionsCommandAndBatchSize');
// Create a collection
db1.collection('test').insertOne({ a: 1 }, function(err) {
test.equal(null, err);
// Create a collection
db1.collection('test1').insertOne({ a: 1 }, function() {
test.equal(null, err);
// Get listCollections filtering out the name
var cursor = db1.listCollections({ name: 'test' }, { batchSize: 1 });
cursor.toArray(function(err, names) {
test.equal(null, err);
test.equal(1, names.length);
client.close(done);
});
});
});
});
}
});
it('should correctly list collection names with . in the middle', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
test.equal(null, err);
// Get a db we that does not have any collections
var db1 = client.db('shouldCorrectlyListCollectionsWithDotsOnThem');
// Create a collection
db1.collection('test.collection1').insertOne({ a: 1 }, function(err) {
test.equal(null, err);
// Create a collection
db1.collection('test.collection2').insertOne({ a: 1 }, function() {
test.equal(null, err);
// Get listCollections filtering out the name
var cursor = db1.listCollections({ name: /test.collection/ });
cursor.toArray(function(err, names) {
test.equal(null, err);
test.equal(2, names.length);
// Get listCollections filtering out the name
var cursor = db1.listCollections({ name: 'test.collection1' }, {});
cursor.toArray(function(err, names) {
test.equal(null, err);
test.equal(1, names.length);
client.close(done);
});
});
});
});
});
}
});
it('should correctly list collection names with batchSize 1 for 2.8 or higher', {
metadata: {
requires: {
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'],
mongodb: '>= 2.8.0'
}
},
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
test.equal(null, err);
// Get a db we that does not have any collections
var db1 = client.db('shouldCorrectlyListCollectionsWithDotsOnThemFor28');
// Create a collection
db1.collection('test.collection1').insertOne({ a: 1 }, function(err) {
test.equal(null, err);
// Create a collection
db1.collection('test.collection2').insertOne({ a: 1 }, function() {
test.equal(null, err);
// Get listCollections filtering out the name
var cursor = db1.listCollections({ name: /test.collection/ }, { batchSize: 1 });
cursor.toArray(function(err, names) {
test.equal(null, err);
test.equal(2, names.length);
client.close(done);
});
});
});
});
}
});
});
| 1 | 17,702 | I think its safe to remove this if we're removing `reIndex` outright. The description shows us that the test is "An example showing how to force a reindex of a collection" | mongodb-node-mongodb-native | js |
@@ -43,6 +43,7 @@ const (
EEXIST Errno = 0x11
EINTR Errno = 0x4
ENOTDIR Errno = 0x14
+ EINVAL Errno = 22
EMFILE Errno = 0x18
EAGAIN Errno = 0x23
ETIMEDOUT Errno = 0x3c | 1 | // +build darwin
package syscall
// This file defines errno and constants to match the darwin libsystem ABI.
// Values have been copied from src/syscall/zerrors_darwin_amd64.go.
// This function returns the error location in the darwin ABI.
// Discovered by compiling the following code using Clang:
//
// #include <errno.h>
// int getErrno() {
// return errno;
// }
//
//export __error
func libc___error() *int32
// getErrno returns the current C errno. It may not have been caused by the last
// call, so it should only be relied upon when the last call indicates an error
// (for example, by returning -1).
func getErrno() Errno {
errptr := libc___error()
return Errno(uintptr(*errptr))
}
func (e Errno) Is(target error) bool {
switch target.Error() {
case "permission denied":
return e == EACCES || e == EPERM
case "file already exists":
return e == EEXIST
case "file does not exist":
return e == ENOENT
}
return false
}
const (
EPERM Errno = 0x1
ENOENT Errno = 0x2
EACCES Errno = 0xd
EEXIST Errno = 0x11
EINTR Errno = 0x4
ENOTDIR Errno = 0x14
EMFILE Errno = 0x18
EAGAIN Errno = 0x23
ETIMEDOUT Errno = 0x3c
ENOSYS Errno = 0x4e
EWOULDBLOCK Errno = EAGAIN
)
type Signal int
const (
SIGCHLD Signal = 0x14
SIGINT Signal = 0x2
SIGKILL Signal = 0x9
SIGTRAP Signal = 0x5
SIGQUIT Signal = 0x3
SIGTERM Signal = 0xf
)
const (
O_RDONLY = 0x0
O_WRONLY = 0x1
O_RDWR = 0x2
O_APPEND = 0x8
O_SYNC = 0x80
O_CREAT = 0x200
O_TRUNC = 0x400
O_EXCL = 0x800
)
| 1 | 12,950 | Can you make this hexadecimal to match the other constants? | tinygo-org-tinygo | go |
@@ -209,12 +209,5 @@ module Bolt
Bolt::Logger.warn("missing_project_name", message)
end
end
-
- def check_deprecated_file
- if (@path + 'project.yaml').file?
- msg = "Project configuration file 'project.yaml' is deprecated; use 'bolt-project.yaml' instead."
- Bolt::Logger.warn("project_yaml", msg)
- end
- end
end
end | 1 | # frozen_string_literal: true
require 'pathname'
require 'bolt/config'
require 'bolt/validator'
require 'bolt/pal'
require 'bolt/module'
module Bolt
class Project
BOLTDIR_NAME = 'Boltdir'
CONFIG_NAME = 'bolt-project.yaml'
attr_reader :path, :data, :inventory_file, :hiera_config,
:puppetfile, :rerunfile, :type, :resource_types, :project_file,
:downloads, :plans_path, :modulepath, :managed_moduledir,
:backup_dir, :plugin_cache_file, :plan_cache_file, :task_cache_file
def self.default_project
create_project(File.expand_path(File.join('~', '.puppetlabs', 'bolt')), 'user')
# If homedir isn't defined use the system config path
rescue ArgumentError
create_project(Bolt::Config.system_path, 'system')
end
# Search recursively up the directory hierarchy for the Project. Look for a
# directory called Boltdir or a file called bolt-project.yaml (for a control
# repo type Project). Otherwise, repeat the check on each directory up the
# hierarchy, falling back to the default if we reach the root.
def self.find_boltdir(dir)
dir = Pathname.new(dir)
if (dir + BOLTDIR_NAME).directory?
create_project(dir + BOLTDIR_NAME, 'embedded')
elsif (dir + CONFIG_NAME).file?
create_project(dir, 'local')
elsif dir.root?
default_project
else
Bolt::Logger.debug(
"Did not detect Boltdir or bolt-project.yaml at '#{dir}'. This directory won't be loaded as a project."
)
find_boltdir(dir.parent)
end
end
def self.create_project(path, type = 'option')
fullpath = Pathname.new(path).expand_path
if type == 'user'
begin
# This is already expanded if the type is user
FileUtils.mkdir_p(path)
rescue StandardError
Bolt::Logger.warn(
"non_writeable_project",
"Could not create default project at #{path}. Continuing without a writeable project. "\
"Log and rerun files will not be written."
)
end
end
if type == 'option' && !File.directory?(path)
raise Bolt::Error.new("Could not find project at #{path}", "bolt/project-error")
end
if !Bolt::Util.windows? && type != 'environment' && fullpath.world_writable?
raise Bolt::Error.new(
"Project directory '#{fullpath}' is world-writable which poses a security risk. Set "\
"BOLT_PROJECT='#{fullpath}' to force the use of this project directory.",
"bolt/world-writable-error"
)
end
project_file = File.join(fullpath, CONFIG_NAME)
data = Bolt::Util.read_optional_yaml_hash(File.expand_path(project_file), 'project')
default = type =~ /user|system/ ? 'default ' : ''
if File.exist?(File.expand_path(project_file))
Bolt::Logger.info("Loaded #{default}project from '#{fullpath}'")
end
Bolt::Validator.new.tap do |validator|
validator.validate(data, schema, project_file)
validator.warnings.each { |warning| Bolt::Logger.warn(warning[:id], warning[:msg]) }
validator.deprecations.each { |dep| Bolt::Logger.deprecate(dep[:id], dep[:msg]) }
end
new(data, path, type)
end
# Builds the schema for bolt-project.yaml used by the validator.
#
def self.schema
{
type: Hash,
properties: Bolt::Config::PROJECT_OPTIONS.map { |opt| [opt, _ref: opt] }.to_h,
definitions: Bolt::Config::OPTIONS
}
end
def initialize(data, path, type = 'option')
@type = type
@path = Pathname.new(path).expand_path
@project_file = @path + CONFIG_NAME
@inventory_file = @path + 'inventory.yaml'
@hiera_config = @path + 'hiera.yaml'
@puppetfile = @path + 'Puppetfile'
@rerunfile = @path + '.rerun.json'
@resource_types = @path + '.resource_types'
@downloads = @path + 'downloads'
@plans_path = @path + 'plans'
@managed_moduledir = @path + '.modules'
@backup_dir = @path + '.bolt-bak'
@plugin_cache_file = @path + '.plugin_cache.json'
@plan_cache_file = @path + '.plan_cache.json'
@task_cache_file = @path + '.task_cache.json'
@modulepath = [(@path + 'modules').to_s]
if (tc = Bolt::Config::INVENTORY_OPTIONS.keys & data.keys).any?
Bolt::Logger.warn(
"project_transport_config",
"Transport configuration isn't supported in bolt-project.yaml. Ignoring keys #{tc}."
)
end
@data = data.slice(*Bolt::Config::PROJECT_OPTIONS)
validate if project_file?
end
def to_s
@path.to_s
end
# This API is used to prepend the project as a module to Puppet's internal
# module_references list. CHANGE AT YOUR OWN RISK
def to_h
{ path: @path.to_s,
name: name,
load_as_module?: load_as_module? }
end
def eql?(other)
path == other.path
end
alias == eql?
def project_file?
@project_file.file?
end
def load_as_module?
!name.nil?
end
def name
@data['name']
end
def tasks
@data['tasks']
end
def plans
@data['plans']
end
def plugin_cache
@data['plugin-cache']
end
def module_install
@data['module-install']
end
def disable_warnings
@data['disable-warnings'] || []
end
def modules
mod_data = @data['modules'] || []
@modules ||= mod_data.map do |mod|
if mod.is_a?(String)
{ 'name' => mod }
else
mod
end
end
end
def validate
if name
if name !~ Bolt::Module::MODULE_NAME_REGEX
raise Bolt::ValidationError, <<~ERROR_STRING
Invalid project name '#{name}' in bolt-project.yaml; project name must begin with a lowercase letter
and can include lowercase letters, numbers, and underscores.
ERROR_STRING
elsif Dir.children(Bolt::Config::Modulepath::BOLTLIB_PATH).include?(name)
raise Bolt::ValidationError, "The project '#{name}' will not be loaded. The project name conflicts "\
"with a built-in Bolt module of the same name."
end
elsif name.nil? &&
(File.directory?(plans_path) ||
File.directory?(@path + 'tasks') ||
File.directory?(@path + 'files'))
message = "No project name is specified in bolt-project.yaml. Project-level content will not be available."
Bolt::Logger.warn("missing_project_name", message)
end
end
def check_deprecated_file
if (@path + 'project.yaml').file?
msg = "Project configuration file 'project.yaml' is deprecated; use 'bolt-project.yaml' instead."
Bolt::Logger.warn("project_yaml", msg)
end
end
end
end
| 1 | 18,580 | Can this get moved to `Bolt::Project#validate`? | puppetlabs-bolt | rb |
@@ -426,7 +426,15 @@ public interface List<T> extends Seq<T>, Stack<T> {
@Override
default Tuple2<List<T>, List<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
- return Tuple.of(filter(predicate), filter(predicate.negate()));
+ List<T> left = nil(), right = nil();
+ for (T t : this) {
+ if (predicate.test(t)) {
+ left = left.prepend(t);
+ } else {
+ right = right.prepend(t);
+ }
+ }
+ return Tuple.of(left.reverse(), right.reverse());
}
@Override | 1 | /* / \____ _ ______ _____ / \____ ____ _____
* / \__ \/ \ / \__ \ / __// \__ \ / \/ __ \ Javaslang
* _/ // _\ \ \/ / _\ \\_ \/ // _\ \ /\ \__/ / Copyright 2014-2015 Daniel Dietrich
* /___/ \_____/\____/\_____/____/\___\_____/_/ \_/____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Tuple;
import javaslang.Tuple2;
import javaslang.control.None;
import javaslang.control.Option;
import javaslang.control.Some;
import java.io.*;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
/**
* A {@code List} is an eager sequence of elements. Its immutability makes it suitable for concurrent programming.
* <p>
* A {@code List} is composed of a {@code head} element and a {@code tail} {@code List}.
* <p>
* There are two implementations of the {@code List} interface:
* <ul>
* <li>{@link Nil}, which represents the empty {@code List}.</li>
* <li>{@link Cons}, which represents a {@code List} containing one or more elements.</li>
* </ul>
* Methods to obtain a {@code List}:
* <pre>
* <code>
* // factory methods
* List.nil() // = List.of() = Nil.instance()
* List.of(x) // = new Cons<>(x, Nil.instance())
* List.of(Object...) // e.g. List.of(1, 2, 3)
* List.ofAll(Iterable) // e.g. List.ofAll(Stream.of(1, 2, 3)) = 1, 2, 3
*
* // int sequences
* List.range(0, 3) // = 0, 1, 2
* List.rangeClosed(0, 3) // = 0, 1, 2, 3
* </code>
* </pre>
*
* Note: A {@code List} is primary a {@code Seq} and extends {@code Stack} for technical reasons (so {@code Stack} does not need to wrap {@code List}).
* <p>
* See Okasaki, Chris: <em>Purely Functional Data Structures</em> (p. 7 ff.). Cambridge, 2003.
*
* @param <T> Component type of the List
* @since 1.1.0
*/
public interface List<T> extends Seq<T>, Stack<T> {
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.List}
* .
*
* @param <T> Component type of the List.
* @return A javaslang.collection.List Collector.
*/
static <T> Collector<T, ArrayList<T>, List<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, List<T>> finisher = List::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns the single instance of Nil. Convenience method for {@code Nil.instance()} .
* <p>
* Note: this method intentionally returns type {@code List} and not {@code Nil}. This comes handy when folding.
* If you explicitly need type {@code Nil} use {@linkplain Nil#instance()}.
*
* @param <T> Component type of Nil, determined by type inference in the particular context.
* @return The empty list.
*/
static <T> List<T> nil() {
return Nil.instance();
}
/**
* Returns a singleton {@code List}, i.e. a {@code List} of one element.
*
* @param element An element.
* @param <T> The component type
* @return A new List instance containing the given element
*/
static <T> List<T> of(T element) {
return new Cons<>(element, Nil.instance());
}
/**
* <p>
* Creates a List of the given elements.
* </p>
*
* <pre>
* <code> List.of(1, 2, 3, 4)
* = Nil.instance().prepend(4).prepend(3).prepend(2).prepend(1)
* = new Cons(1, new Cons(2, new Cons(3, new Cons(4, Nil.instance()))))</code>
* </pre>
*
* @param <T> Component type of the List.
* @param elements Zero or more elements.
* @return A list containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SafeVarargs
static <T> List<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
List<T> result = Nil.<T>instance();
for (int i = elements.length - 1; i >= 0; i--) {
result = result.prepend(elements[i]);
}
return result;
}
/**
* Creates a List of the given elements.
*
* @param <T> Component type of the List.
* @param elements An Iterable of elements.
* @return A list containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
static <T> List<T> ofAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof List) {
@SuppressWarnings("unchecked")
final List<T> list = (List<T>) elements;
return list;
} else {
List<T> result = Nil.instance();
for (T element : elements) {
result = result.prepend(element);
}
return result.reverse();
}
}
/**
* Creates a List of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or {@code Nil} if {@code from >= toExclusive}
*/
static List<Integer> range(int from, int toExclusive) {
if (from >= toExclusive) {
return Nil.instance();
} else {
return List.rangeClosed(from, toExclusive - 1);
}
}
/**
* Creates a List of int numbers starting from {@code from}, extending to {@code toInclusive}.
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or {@code Nil} if {@code from > toInclusive}
*/
static List<Integer> rangeClosed(int from, int toInclusive) {
if (from > toInclusive) {
return Nil.instance();
} else if (toInclusive == Integer.MIN_VALUE) {
return List.of(Integer.MIN_VALUE);
} else {
List<Integer> result = Nil.instance();
for (int i = toInclusive; i >= from; i--) {
result = result.prepend(i);
}
return result;
}
}
@Override
default List<T> append(T element) {
return foldRight(List.of(element), (x, xs) -> xs.prepend(x));
}
@Override
default List<T> appendAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return foldRight(List.ofAll(elements), (x, xs) -> xs.prepend(x));
}
@Override
default List<T> clear() {
return Nil.instance();
}
@Override
default List<List<T>> combinations() {
return List.rangeClosed(0, length()).map(this::combinations).flatten(Function.identity());
}
@Override
default List<List<T>> combinations(int k) {
class Recursion {
List<List<T>> combinations(List<T> elements, int k) {
return (k == 0)
? List.of(List.nil())
: elements.zipWithIndex().flatMap(t -> combinations(elements.drop(t._2 + 1), (k - 1))
.map((List<T> c) -> c.prepend(t._1)));
}
}
return new Recursion().combinations(this, Math.max(k, 0));
}
@Override
default List<T> distinct() {
return distinct(Function.identity());
}
@Override
default <U> List<T> distinct(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
final java.util.Set<U> seen = new java.util.HashSet<>();
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
default List<T> drop(int n) {
List<T> list = this;
for (int i = n; i > 0 && !list.isEmpty(); i--) {
list = list.tail();
}
return list;
}
@Override
default List<T> dropRight(int n) {
return reverse().drop(n).reverse();
}
@Override
default List<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
List<T> list = this;
while (!list.isEmpty() && predicate.test(list.head())) {
list = list.tail();
}
return list;
}
@Override
default List<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return isEmpty() ? this : foldLeft(List.<T>nil(), (xs, x) -> predicate.test(x) ? xs.prepend(x) : xs).reverse();
}
@Override
default List<T> findAll(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return filter(predicate);
}
@Override
default <U> List<U> flatMap(Function<? super T, ? extends Iterable<U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return nil();
} else {
List<U> list = nil();
for (T t : this) {
for (U u : mapper.apply(t)) {
list = list.prepend(u);
}
}
return list.reverse();
}
}
/**
* Flattens a {@code List} using a function {@code f}. A common use case is to use the identity
* {@code list.flatten(Function::identity)} to flatten a {@code List} of {@code List}s.
* <p>
* Examples:
* <pre>
* <code>
* Match<List<U>> f = Match
* .when((List<U> l) -> l)
* .when((U u) -> List.of(u));
* List.of(1).flatten(f); // = List(1)
* List.of(List.of(1)).flatten(f); // = List(1)
* List.of(Nil.instance()).flatten(f); // = Nil
* Nil.instance().flatten(f); // = Nil
* </code>
* </pre>
*
* @param <U> component type of the result {@code List}
* @param f a function which maps elements of this {@code List} to {@code List}s
* @return a new {@code List}
* @throws NullPointerException if {@code f} is null
*/
@Override
default <U> List<U> flatten(Function<? super T, ? extends Iterable<U>> f) {
Objects.requireNonNull(f, "f is null");
return isEmpty() ? Nil.instance() : foldRight(nil(), (t, xs) -> xs.prependAll(f.apply(t)));
}
@Override
default void forEach(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
Stack.super.forEach(action);
}
@Override
default boolean forAll(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Stack.super.forAll(predicate);
}
@Override
default T get(int index) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("get(" + index + ") on Nil");
}
if (index < 0) {
throw new IndexOutOfBoundsException("get(" + index + ")");
}
List<T> list = this;
for (int i = index - 1; i >= 0; i--) {
list = list.tail();
if (list.isEmpty()) {
throw new IndexOutOfBoundsException(String.format("get(%s) on List of length %s", index, index - i));
}
}
return list.head();
}
@Override
default List<List<T>> grouped(int size) {
return sliding(size, size);
}
@Override
default int indexOf(T element) {
int index = 0;
for (List<T> list = this; !list.isEmpty(); list = list.tail(), index++) {
if (Objects.equals(list.head(), element)) {
return index;
}
}
return -1;
}
@Override
List<T> init();
@Override
Option<List<T>> initOption();
@Override
default List<T> insert(int index, T element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
}
List<T> preceding = Nil.instance();
List<T> tail = this;
for (int i = index; i > 0; i--, tail = tail.tail()) {
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on List of length " + length());
}
preceding = preceding.prepend(tail.head());
}
List<T> result = tail.prepend(element);
for (T next : preceding) {
result = result.prepend(next);
}
return result;
}
@Override
default List<T> insertAll(int index, Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
}
List<T> preceding = Nil.instance();
List<T> tail = this;
for (int i = index; i > 0; i--, tail = tail.tail()) {
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on List of length " + length());
}
preceding = preceding.prepend(tail.head());
}
List<T> result = tail.prependAll(elements);
for (T next : preceding) {
result = result.prepend(next);
}
return result;
}
@Override
default List<T> intersperse(T element) {
return isEmpty() ? Nil.instance() : foldRight(nil(), (x, xs) -> xs.isEmpty() ? xs.prepend(x) : xs.prepend(element).prepend(x));
}
@Override
default int lastIndexOf(T element) {
int result = -1, index = 0;
for (List<T> list = this; !list.isEmpty(); list = list.tail(), index++) {
if (Objects.equals(list.head(), element)) {
result = index;
}
}
return result;
}
@Override
default <U> List<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
List<U> list = nil();
for (T t : this) {
list = list.prepend(mapper.apply(t));
}
return list.reverse();
}
@Override
default Tuple2<List<T>, List<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(filter(predicate), filter(predicate.negate()));
}
@Override
default T peek() {
return head();
}
/**
* Performs an action on the head element of this {@code List}.
*
* @param action A {@code Consumer}
* @return this {@code List}
*/
@Override
default List<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(head());
}
return this;
}
@Override
default List<List<T>> permutations() {
if (isEmpty()) {
return Nil.instance();
} else {
final List<T> tail = tail();
if (tail.isEmpty()) {
return List.of(this);
} else {
final List<List<T>> zero = Nil.instance();
// TODO: IntelliJ IDEA 14.1.1 needs a redundant cast here, jdk 1.8.0_40 compiles fine
return distinct().foldLeft(zero, (xs, x) -> xs.appendAll(remove(x).permutations().map((Function<List<T>, List<T>>) l -> l.prepend(x))));
}
}
}
@Override
default List<T> pop() {
return tail();
}
@Override
Option<List<T>> popOption();
@Override
default Tuple2<T, List<T>> pop2() {
return Tuple.of(head(), tail());
}
@Override
Option<Tuple2<T, List<T>>> pop2Option();
@Override
default List<T> prepend(T element) {
return new Cons<>(element, this);
}
@Override
default List<T> prependAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return isEmpty() ? List.ofAll(elements) : List.ofAll(elements).reverse().foldLeft(this, List::prepend);
}
@Override
default List<T> push(T element) {
return new Cons<>(element, this);
}
@SuppressWarnings("unchecked")
@Override
default List<T> push(T... elements) {
Objects.requireNonNull(elements, "elements is null");
List<T> result = Nil.<T>instance();
for (T element : elements) {
result = result.prepend(element);
}
return result;
}
@Override
default List<T> pushAll(Iterable<T> elements) {
Objects.requireNonNull(elements, "elements is null");
List<T> result = Nil.<T>instance();
for (T element : elements) {
result = result.prepend(element);
}
return result;
}
@Override
default List<T> remove(T element) {
List<T> preceding = Nil.instance();
List<T> tail = this;
boolean found = false;
while (!found && !tail.isEmpty()) {
final T head = tail.head();
if (head.equals(element)) {
found = true;
} else {
preceding = preceding.prepend(head);
}
tail = tail.tail();
}
List<T> result = tail;
for (T next : preceding) {
result = result.prepend(next);
}
return result;
}
@Override
default List<T> removeAll(T removed) {
List<T> result = Nil.instance();
for (T element : this) {
if (!element.equals(removed)) {
result = result.prepend(element);
}
}
return result.reverse();
}
@Override
default List<T> removeAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
List<T> removed = List.ofAll(elements).distinct();
List<T> result = Nil.instance();
for (T element : this) {
if (!removed.contains(element)) {
result = result.prepend(element);
}
}
return result.reverse();
}
@Override
default List<T> replace(T currentElement, T newElement) {
List<T> preceding = Nil.instance();
List<T> tail = this;
while (!tail.isEmpty() && !Objects.equals(tail.head(), currentElement)) {
preceding = preceding.prepend(tail.head());
tail = tail.tail();
}
if (tail.isEmpty()) {
return this;
}
// skip the current head element because it is replaced
List<T> result = tail.tail().prepend(newElement);
for (T next : preceding) {
result = result.prepend(next);
}
return result;
}
@Override
default List<T> replaceAll(T currentElement, T newElement) {
List<T> result = Nil.instance();
for (List<T> list = this; !list.isEmpty(); list = list.tail()) {
final T head = list.head();
final T elem = Objects.equals(head, currentElement) ? newElement : head;
result = result.prepend(elem);
}
return result.reverse();
}
@Override
default List<T> replaceAll(UnaryOperator<T> operator) {
Objects.requireNonNull(operator, "operator is null");
List<T> result = Nil.instance();
for (T element : this) {
result = result.prepend(operator.apply(element));
}
return result.reverse();
}
@Override
default List<T> retainAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
final List<T> keeped = List.ofAll(elements).distinct();
List<T> result = Nil.instance();
for (T element : this) {
if (keeped.contains(element)) {
result = result.prepend(element);
}
}
return result.reverse();
}
@Override
default List<T> reverse() {
return isEmpty() ? this : foldLeft(nil(), List::prepend);
}
@Override
default List<T> set(int index, T element) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("set(" + index + ", e) on Nil");
}
if (index < 0) {
throw new IndexOutOfBoundsException("set(" + index + ", e)");
}
List<T> preceding = Nil.instance();
List<T> tail = this;
for (int i = index; i > 0; i--, tail = tail.tail()) {
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("set(" + index + ", e) on List of length " + length());
}
preceding = preceding.prepend(tail.head());
}
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("set(" + index + ", e) on List of length " + length());
}
// skip the current head element because it is replaced
List<T> result = tail.tail().prepend(element);
for (T next : preceding) {
result = result.prepend(next);
}
return result;
}
@Override
default List<List<T>> sliding(int size) {
return sliding(size, 1);
}
@Override
default List<List<T>> sliding(int size, int step) {
if (size <= 0 || step <= 0) {
throw new IllegalArgumentException(String.format("size: %s or step: %s not positive", size, step));
}
List<List<T>> result = Nil.instance();
List<T> list = this;
while (!list.isEmpty()) {
final Tuple2<List<T>, List<T>> split = list.splitAt(size);
result = result.prepend(split._1);
list = split._2.isEmpty() ? Nil.instance() : list.drop(step);
}
return result.reverse();
}
@Override
default List<T> sort() {
return isEmpty() ? this : toJavaStream().sorted().collect(List.collector());
}
@Override
default List<T> sort(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(List.collector());
}
@Override
default Tuple2<List<T>, List<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(takeWhile(predicate), dropWhile(predicate));
}
@Override
default Tuple2<List<T>, List<T>> splitAt(int n) {
return Tuple.of(take(n), drop(n));
}
@Override
default Spliterator<T> spliterator() {
// the focus of the Stream API is on random-access collections of *known size*
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
@Override
default List<T> subsequence(int beginIndex) {
if (beginIndex < 0) {
throw new IndexOutOfBoundsException("subsequence(" + beginIndex + ")");
}
List<T> result = this;
for (int i = 0; i < beginIndex; i++, result = result.tail()) {
if (result.isEmpty()) {
throw new IndexOutOfBoundsException(
String.format("subsequence(%s) on List of length %s", beginIndex, i));
}
}
return result;
}
@Override
default List<T> subsequence(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex > endIndex) {
throw new IndexOutOfBoundsException(
String.format("subsequence(%s, %s) on List of length %s", beginIndex, endIndex, length()));
}
List<T> result = Nil.instance();
List<T> list = this;
for (int i = 0; i < endIndex; i++, list = list.tail()) {
if (list.isEmpty()) {
throw new IndexOutOfBoundsException(
String.format("subsequence(%s, %s) on List of length %s", beginIndex, endIndex, i));
}
if (i >= beginIndex) {
result = result.prepend(list.head());
}
}
return result.reverse();
}
@Override
List<T> tail();
@Override
Option<List<T>> tailOption();
@Override
default List<T> take(int n) {
List<T> result = Nil.instance();
List<T> list = this;
for (int i = 0; i < n && !list.isEmpty(); i++, list = list.tail()) {
result = result.prepend(list.head());
}
return result.reverse();
}
@Override
default List<T> takeRight(int n) {
return reverse().take(n).reverse();
}
@Override
default List<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
List<T> result = Nil.instance();
for (List<T> list = this; !list.isEmpty() && predicate.test(list.head()); list = list.tail()) {
result = result.prepend(list.head());
}
return result.reverse();
}
@Override
default <T1, T2> Tuple2<List<T1>, List<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
List<T1> xs = Nil.instance();
List<T2> ys = Nil.instance();
for (T element : this) {
final Tuple2<? extends T1, ? extends T2> t = unzipper.apply(element);
xs = xs.prepend(t._1);
ys = ys.prepend(t._2);
}
return Tuple.of(xs.reverse(), ys.reverse());
}
@Override
default <U> List<Tuple2<T, U>> zip(Iterable<U> that) {
Objects.requireNonNull(that, "that is null");
List<Tuple2<T, U>> result = Nil.instance();
List<T> list1 = this;
Iterator<U> list2 = that.iterator();
while (!list1.isEmpty() && list2.hasNext()) {
result = result.prepend(Tuple.of(list1.head(), list2.next()));
list1 = list1.tail();
}
return result.reverse();
}
@Override
default <U> List<Tuple2<T, U>> zipAll(Iterable<U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
List<Tuple2<T, U>> result = Nil.instance();
Iterator<T> list1 = this.iterator();
Iterator<U> list2 = that.iterator();
while (list1.hasNext() || list2.hasNext()) {
final T elem1 = list1.hasNext() ? list1.next() : thisElem;
final U elem2 = list2.hasNext() ? list2.next() : thatElem;
result = result.prepend(Tuple.of(elem1, elem2));
}
return result.reverse();
}
@Override
default List<Tuple2<T, Integer>> zipWithIndex() {
List<Tuple2<T, Integer>> result = Nil.instance();
int index = 0;
for (List<T> list = this; !list.isEmpty(); list = list.tail()) {
result = result.prepend(Tuple.of(list.head(), index++));
}
return result.reverse();
}
/**
* Non-empty {@code List}, consisting of a {@code head} and a {@code tail}.
*
* @param <T> Component type of the List.
* @since 1.1.0
*/
// DEV NOTE: class declared final because of serialization proxy pattern (see Effective Java, 2nd ed., p. 315)
final class Cons<T> extends AbstractList<T> implements Serializable {
private static final long serialVersionUID = 1L;
private final T head;
private final List<T> tail;
/**
* Creates a List consisting of a head value and a trailing List.
*
* @param head The head
* @param tail The tail
*/
Cons(T head, List<T> tail) {
this.head = head;
this.tail = tail;
}
@Override
public T head() {
return head;
}
@Override
public Some<T> headOption() {
return new Some<>(head);
}
@Override
public List<T> init() {
return dropRight(1);
}
@Override
public Some<List<T>> initOption() {
return new Some<>(init());
}
@Override
public Some<T> peekOption() {
return new Some<>(head());
}
@Override
public Some<List<T>> popOption() {
return new Some<>(tail());
}
@Override
public Some<Tuple2<T, List<T>>> pop2Option() {
return new Some<>(Tuple.of(head(), tail()));
}
@Override
public List<T> tail() {
return tail;
}
@Override
public Some<List<T>> tailOption() {
return new Some<>(tail);
}
@Override
public boolean isEmpty() {
return false;
}
/**
* <p>
* {@code writeReplace} method for the serialization proxy pattern.
* </p>
* <p>
* The presence of this method causes the serialization system to emit a SerializationProxy instance instead of
* an instance of the enclosing class.
* </p>
*
* @return A SerialiationProxy for this enclosing class.
*/
private Object writeReplace() {
return new SerializationProxy<>(this);
}
/**
* <p>
* {@code readObject} method for the serialization proxy pattern.
* </p>
* Guarantees that the serialization system will never generate a serialized instance of the enclosing class.
*
* @param stream An object serialization stream.
* @throws java.io.InvalidObjectException This method will throw with the message "Proxy required".
*/
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
/**
* A serialization proxy which, in this context, is used to deserialize immutable, linked Lists with final
* instance fields.
*
* @param <T> The component type of the underlying list.
*/
// DEV NOTE: The serialization proxy pattern is not compatible with non-final, i.e. extendable,
// classes. Also, it may not be compatible with circular object graphs.
private static final class SerializationProxy<T> implements Serializable {
private static final long serialVersionUID = 1L;
// the instance to be serialized/deserialized
private transient Cons<T> list;
/**
* Constructor for the case of serialization, called by {@link Cons#writeReplace()}.
* <p/>
* The constructor of a SerializationProxy takes an argument that concisely represents the logical state of
* an instance of the enclosing class.
*
* @param list a Cons
*/
SerializationProxy(Cons<T> list) {
this.list = list;
}
/**
* Write an object to a serialization stream.
*
* @param s An object serialization stream.
* @throws java.io.IOException If an error occurs writing to the stream.
*/
private void writeObject(ObjectOutputStream s) throws IOException {
s.defaultWriteObject();
s.writeInt(list.length());
for (List<T> l = list; !l.isEmpty(); l = l.tail()) {
s.writeObject(l.head());
}
}
/**
* Read an object from a deserialization stream.
*
* @param s An object deserialization stream.
* @throws ClassNotFoundException If the object's class read from the stream cannot be found.
* @throws InvalidObjectException If the stream contains no list elements.
* @throws IOException If an error occurs reading from the stream.
*/
private void readObject(ObjectInputStream s) throws ClassNotFoundException, IOException {
s.defaultReadObject();
final int size = s.readInt();
if (size <= 0) {
throw new InvalidObjectException("No elements");
}
List<T> temp = Nil.instance();
for (int i = 0; i < size; i++) {
@SuppressWarnings("unchecked")
final T element = (T) s.readObject();
temp = temp.prepend(element);
}
list = (Cons<T>) temp.reverse();
}
/**
* <p>
* {@code readResolve} method for the serialization proxy pattern.
* </p>
* Returns a logically equivalent instance of the enclosing class. The presence of this method causes the
* serialization system to translate the serialization proxy back into an instance of the enclosing class
* upon deserialization.
*
* @return A deserialized instance of the enclosing class.
*/
private Object readResolve() {
return list;
}
}
}
/**
* Representation of the singleton empty {@code List}.
*
* @param <T> Component type of the List.
* @since 1.1.0
*/
final class Nil<T> extends AbstractList<T> implements Serializable {
private static final long serialVersionUID = 1L;
private static final Nil<?> INSTANCE = new Nil<>();
// hidden
private Nil() {
}
/**
* Returns the singleton instance of the liked list.
*
* @param <T> Component type of the List
* @return the singleton instance of the linked list.
*/
@SuppressWarnings("unchecked")
public static <T> Nil<T> instance() {
return (Nil<T>) INSTANCE;
}
@Override
public T head() {
throw new NoSuchElementException("head of empty list");
}
@Override
public None<T> headOption() {
return None.instance();
}
@Override
public List<T> init() {
throw new UnsupportedOperationException("init of empty list");
}
@Override
public None<List<T>> initOption() {
return None.instance();
}
@Override
public None<T> peekOption() {
return None.instance();
}
@Override
public None<List<T>> popOption() {
return None.instance();
}
@Override
public None<Tuple2<T, List<T>>> pop2Option() {
return None.instance();
}
@Override
public List<T> tail() {
throw new UnsupportedOperationException("tail of empty list");
}
@Override
public None<List<T>> tailOption() {
return None.instance();
}
@Override
public boolean isEmpty() {
return true;
}
/**
* Instance control for object serialization.
*
* @return The singleton instance of Nil.
* @see java.io.Serializable
*/
private Object readResolve() {
return INSTANCE;
}
}
/**
* <p>
* This class is needed because the interface {@link List} cannot use default methods to override Object's non-final
* methods equals, hashCode and toString.
* </p>
* See <a href="http://mail.openjdk.java.net/pipermail/lambda-dev/2013-March/008435.html">Allow default methods to
* override Object's methods</a>.
*
* @param <T> Component type of the List.
* @since 1.1.0
*/
abstract class AbstractList<T> implements List<T> {
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof List) {
List<?> list1 = this;
List<?> list2 = (List<?>) o;
while (!list1.isEmpty() && !list2.isEmpty()) {
final boolean isEqual = Objects.equals(list1.head(), list2.head());
if (!isEqual) {
return false;
}
list1 = list1.tail();
list2 = list2.tail();
}
return list1.isEmpty() && list2.isEmpty();
} else {
return false;
}
}
@Override
public int hashCode() {
int hashCode = 1;
for (T element : this) {
hashCode = 31 * hashCode + Objects.hashCode(element);
}
return hashCode;
}
@Override
public String toString() {
return map(String::valueOf).join(", ", "List(", ")");
}
}
}
| 1 | 5,989 | I'm pretty sure it doesn't harm, but don't we have a second pass here with the reverse() operation? | vavr-io-vavr | java |
@@ -24,6 +24,7 @@ public interface CapabilityType {
String PLATFORM = "platform";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
+ String TAKES_HEAP_SNAPSHOT = "takesHeapSnapshot";
String VERSION = "version";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled"; | 1 | /*
Copyright 2007-2010 Selenium committers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
String PLATFORM = "platform";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_BROWSER_CONNECTION = "browserConnectionEnabled";
String SUPPORTS_FINDING_BY_CSS = "cssSelectorsEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
/**
* Moved InternetExplorer specific CapabilityTypes into InternetExplorerDriver.java for consistency
*/
@Deprecated
String ENABLE_PERSISTENT_HOVERING = "enablePersistentHover";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 10,646 | This is not a standard capability and should be hidden behind a vendor prefix: -chromium-takesHeapSnapshot | SeleniumHQ-selenium | py |
@@ -144,9 +144,9 @@ trainer::execution_context_key_pair_t trainer::check_and_build_execution_context
if(dynamic_cast<observer_ptr<sgd_training_algorithm>>(&alg) != nullptr) {
/// @todo BVE FIXME Figure out how to get a good mini-batch size
/// in here
- context = make_unique<sgd_execution_context>(this, m_comm, mode, model->get_max_mini_batch_size());
+ context = make_unique<sgd_execution_context>(this, &alg, m_comm, mode, model->get_max_mini_batch_size());
}else {
- context = make_unique<execution_context>(this, m_comm, mode);
+ context = make_unique<execution_context>(this, &alg, m_comm, mode);
}
m_model_execution_context.emplace(key,std::move(context));
} | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/trainers/trainer.hpp"
#include "lbann/callbacks/callback.hpp"
//#include "lbann/callbacks/callback_save_model.hpp"
#include "lbann/io/persist.hpp"
#include "lbann/layers/io/input/generic_input_layer.hpp"
#include "lbann/layers/transform/dummy.hpp"
#include "lbann/layers/transform/split.hpp"
#include "lbann/layers/transform/evaluation.hpp"
#include "lbann/objective_functions/layer_term.hpp"
#include "lbann/metrics/layer_metric.hpp"
#include "lbann/utils/random.hpp"
#include "lbann/utils/omp_diagnostics.hpp"
#include "lbann/utils/description.hpp"
#include "lbann/execution_contexts/sgd_execution_context.hpp"
#include "lbann/training_algorithms/sgd_training_algorithm.hpp"
#include <string>
#include <unistd.h>
#include <iomanip>
#include <queue>
#include <unordered_set>
#include <lbann.pb.h>
#include "mpi.h"
namespace lbann {
////////////////////////////////////////////////////////////
// Constructors and destructor
////////////////////////////////////////////////////////////
trainer::trainer(lbann_comm *comm)
: m_comm(comm),
m_io_thread_pool(),
m_background_io_allowed(true) {
// Default trainer name
m_name = "trainer" + std::to_string(m_comm->get_trainer_rank());
}
trainer::trainer(const trainer& other) :
m_comm(other.m_comm),
m_background_io_allowed(other.m_background_io_allowed) {
// Deep copies
// m_io_thread_pool = (other.m_io_thread_pool ?
// other.m_io_thread_pool->copy() : nullptr);
m_callbacks.reserve(other.m_callbacks.size());
for (auto const& cb : other.m_callbacks) {
m_callbacks.emplace_back(cb->copy());
}
}
trainer& trainer::operator=(const trainer& other) {
// Shallow copies
m_comm = other.m_comm;
m_background_io_allowed = other.m_background_io_allowed;
// Deep copies
// m_io_thread_pool = (other.m_io_thread_pool ?
// other.m_io_thread_pool->copy() : nullptr);
m_callbacks.reserve(other.m_callbacks.size());
for (auto const& cb : other.m_callbacks) {
m_callbacks.emplace_back(cb->copy());
}
return *this;
}
trainer::~trainer() {
}
////////////////////////////////////////////////////////////
// Trainer specification
////////////////////////////////////////////////////////////
void trainer::set_name(std::string const& name) {
if (name.empty()) {
LBANN_ERROR("attempted to rename trainer \"", get_name(), "\" with empty string");
}
m_name = name;
}
description trainer::get_description() const {
// Construct description object
description desc(get_name());
desc.add("Background I/O", m_background_io_allowed);
// Result
return desc;
}
////////////////////////////////////////////////////////////
// Setup
////////////////////////////////////////////////////////////
void trainer::setup(std::unique_ptr<thread_pool> io_thread_pool) {
// Setup I/O threads - set up before setting up the layers (input
// layer depends on having a properly initialized thread pool)
m_io_thread_pool = std::move(io_thread_pool);
// Set up callbacks
for (auto& cb : m_callbacks) {
cb->setup(this);
}
}
/// Check if there is already an execution context for the model in this mode, if not create one
trainer::execution_context_key_pair_t trainer::check_and_build_execution_context(training_algorithm& alg,
observer_ptr<model> model,
execution_mode mode) {
auto key = std::make_pair(model,mode);
if(m_model_execution_context.count(key) == 0) {
/// Create a execution context for each model and execution mode
std::unique_ptr<execution_context> context;
if(dynamic_cast<observer_ptr<sgd_training_algorithm>>(&alg) != nullptr) {
/// @todo BVE FIXME Figure out how to get a good mini-batch size
/// in here
context = make_unique<sgd_execution_context>(this, m_comm, mode, model->get_max_mini_batch_size());
}else {
context = make_unique<execution_context>(this, m_comm, mode);
}
m_model_execution_context.emplace(key,std::move(context));
}
return key;
}
/// Check if there is already an execution context for the model in this mode, if not create one
trainer::execution_context_key_pair_t trainer::check_and_build_execution_context(const execution_context& c,
model& model,
execution_mode mode) {
auto key = std::make_pair(&model, mode);
if(m_model_execution_context.count(key) == 0) {
std::unique_ptr<execution_context> context;
if(dynamic_cast<observer_ptr<const sgd_execution_context>>(&c) != nullptr) {
context = make_unique<sgd_execution_context>(this, m_comm, mode, model.get_max_mini_batch_size());
}else {
context = make_unique<execution_context>(this, m_comm, mode);
}
m_model_execution_context.emplace(key,std::move(context));
}
return key;
}
execution_context& trainer::get_execution_context(observer_ptr<model> model,
execution_mode mode) {
auto key = std::make_pair(model,mode);
return get_execution_context(key);
}
execution_context& trainer::get_execution_context(execution_context_key_pair_t key) {
if(m_model_execution_context.count(key) == 0) {
LBANN_ERROR("No execution context for this model / mode pair");
}
return static_cast<sgd_execution_context&>(*(m_model_execution_context[key].get()));
}
void trainer::delete_execution_context(execution_context_key_pair_t key) {
if(m_model_execution_context.count(key) == 0) {
LBANN_WARNING("Attempting to delete an invalid execution context for model="
+ (key.first)->get_name() + " / " + to_string(key.second));
}
m_model_execution_context.erase(key);
}
/// @todo BVE FIXME seems like there is a bug here about mapping
/// execution contexts to the right model
void trainer::for_each_execution_context(std::function<void(observer_ptr<execution_context>)>fn) {
for(auto&& c : m_model_execution_context) {
// auto&& model = c.first.first;
// auto&& mode = c.first.second;
auto&& context = c.second;
fn(context.get());
}
}
////////////////////////////////////////////////////////////
// Evaluation and training
////////////////////////////////////////////////////////////
void trainer::apply(training_algorithm& alg,
observer_ptr<model> model,
execution_mode mode,
termination_criteria const& term_criteria) {
auto key = check_and_build_execution_context(alg, model, mode);
/// Apply the training algorithm to train the model
alg.apply(*(m_model_execution_context[key].get()), *model, mode, term_criteria);
}
void trainer::train(observer_ptr<model> model, El::Int num_epochs, El::Int num_batches) {
auto sgd = make_unique<sgd_training_algorithm>();
auto key = check_and_build_execution_context(*sgd.get(), model, execution_mode::training);
/// Apply the training algorithm to train the model
sgd.get()->train(static_cast<sgd_execution_context&>(*(m_model_execution_context[key].get())), *model, num_epochs, num_batches);
}
void trainer::evaluate(observer_ptr<model> model, execution_mode mode, El::Int num_batches) {
auto sgd = make_unique<sgd_training_algorithm>();
auto key = check_and_build_execution_context(*sgd.get(), model, mode);
/// Apply the training algorithm to evaluate the model
sgd.get()->evaluate(static_cast<sgd_execution_context&>(*(m_model_execution_context[key].get())), *model, mode, num_batches);
}
// =============================================
// Checkpointing
// =============================================
bool trainer::save_to_checkpoint_shared(persist& p) {
auto save_checkpoint = [&p](observer_ptr<execution_context> ctx)
->void { ctx->save_to_checkpoint_shared(p); };
for_each_execution_context(save_checkpoint);
return true;
}
bool trainer::load_from_checkpoint_shared(persist& p) {
return true;
}
bool trainer::load_from_checkpoint_shared(persist& p, model& m, execution_context& c) {
execution_mode current_mode = c.get_execution_mode();
for(execution_mode mode : execution_mode_iterator()) {
/// Restart should optionally load any other valid contexts
if(mode == execution_mode::invalid) { continue; }
trainer::execution_context_key_pair_t key;
try {
if(current_mode == mode) {
/// Restart has to be able to load the currently running execution context
c.load_from_checkpoint_shared(p);
}else {
key = check_and_build_execution_context(c, m, mode);
auto& evaluation_context = static_cast<sgd_execution_context&>(get_execution_context(key));
evaluation_context.load_from_checkpoint_shared(p);
}
}catch (NonexistentArchiveFile const&) {
// Ignore the exception if the file is not for the current execution mode
if(current_mode == mode) {
LBANN_ERROR("Failed to restart model, invalid execution mode: " + to_string(current_mode));
}else {
delete_execution_context(key);
}
}
}
return true;
}
bool trainer::save_to_checkpoint_distributed(persist& p){
auto save_checkpoint = [&p](observer_ptr<execution_context> ctx)
->void { ctx->save_to_checkpoint_distributed(p); };
for_each_execution_context(save_checkpoint);
return true;
}
bool trainer::load_from_checkpoint_distributed(persist& p){
return true;
}
bool trainer::load_from_checkpoint_distributed(persist& p, model& m, execution_context& c){
execution_mode current_mode = c.get_execution_mode();
for(execution_mode mode : execution_mode_iterator()) {
/// Restart should optionally load any other valid contexts
if(mode == execution_mode::invalid) { continue; }
trainer::execution_context_key_pair_t key;
try {
if(current_mode == mode) {
/// Restart has to be able to load the currently running execution context
c.load_from_checkpoint_distributed(p);
}else {
key = check_and_build_execution_context(c, m, mode);
auto& evaluation_context = static_cast<sgd_execution_context&>(get_execution_context(key));
evaluation_context.load_from_checkpoint_distributed(p);
}
}catch (NonexistentArchiveFile const&) {
// Ignore the exception if the file is not for the current execution mode
if(current_mode == mode) {
LBANN_ERROR("Failed to restart model, invalid execution mode: " + to_string(current_mode));
}else {
delete_execution_context(key);
}
}
}
return true;
}
} // namespace lbann
| 1 | 15,284 | This shouldn't work with pointers -- prefer references unless you can meaningfully pass `nullptr`. The address-of operator here is clunky at best. | LLNL-lbann | cpp |
@@ -140,7 +140,8 @@ func TestBuild(t *testing.T) {
for _, t := range tests {
switch t {
case "atomic.go":
- // Not supported due to unaligned atomic accesses.
+ // Requires GCC 11.2.0 or above for interface comparison.
+ // https://github.com/gcc-mirror/gcc/commit/f30dd607669212de135dec1f1d8a93b8954c327c
case "reflect.go":
// Reflect tests do not work due to type code issues. | 1 | package main
// This file tests the compiler by running Go files in testdata/*.go and
// comparing their output with the expected output in testdata/*.txt.
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"testing"
"time"
"github.com/tinygo-org/tinygo/builder"
"github.com/tinygo-org/tinygo/compileopts"
"github.com/tinygo-org/tinygo/goenv"
)
const TESTDATA = "testdata"
var testTarget = flag.String("target", "", "override test target")
var sema = make(chan struct{}, runtime.NumCPU())
func TestBuild(t *testing.T) {
t.Parallel()
tests := []string{
"alias.go",
"atomic.go",
"binop.go",
"calls.go",
"cgo/",
"channel.go",
"float.go",
"gc.go",
"goroutines.go",
"init.go",
"init_multi.go",
"interface.go",
"json.go",
"map.go",
"math.go",
"print.go",
"reflect.go",
"slice.go",
"sort.go",
"stdlib.go",
"string.go",
"structs.go",
"testing.go",
"zeroalloc.go",
}
_, minor, err := goenv.GetGorootVersion(goenv.Get("GOROOT"))
if err != nil {
t.Fatal("could not read version from GOROOT:", err)
}
if minor >= 17 {
tests = append(tests, "go1.17.go")
}
if *testTarget != "" {
// This makes it possible to run one specific test (instead of all),
// which is especially useful to quickly check whether some changes
// affect a particular target architecture.
runPlatTests(optionsFromTarget(*testTarget, sema), tests, t)
return
}
t.Run("Host", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromTarget("", sema), tests, t)
})
// Test a few build options.
t.Run("build-options", func(t *testing.T) {
t.Parallel()
// Test with few optimizations enabled (no inlining, etc).
t.Run("opt=1", func(t *testing.T) {
t.Parallel()
opts := optionsFromTarget("", sema)
opts.Opt = "1"
runTestWithConfig("stdlib.go", t, opts, nil, nil)
})
// Test with only the bare minimum of optimizations enabled.
// TODO: fix this for stdlib.go, which currently fails.
t.Run("opt=0", func(t *testing.T) {
t.Parallel()
opts := optionsFromTarget("", sema)
opts.Opt = "0"
runTestWithConfig("print.go", t, opts, nil, nil)
})
t.Run("ldflags", func(t *testing.T) {
t.Parallel()
opts := optionsFromTarget("", sema)
opts.GlobalValues = map[string]map[string]string{
"main": {
"someGlobal": "foobar",
},
}
runTestWithConfig("ldflags.go", t, opts, nil, nil)
})
})
if testing.Short() {
// Don't test other targets when the -short flag is used. Only test the
// host system.
return
}
t.Run("EmulatedCortexM3", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromTarget("cortex-m-qemu", sema), tests, t)
})
t.Run("EmulatedRISCV", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromTarget("riscv-qemu", sema), tests, t)
})
t.Run("AVR", func(t *testing.T) {
// LLVM backend crash:
// LIBCLANG FATAL ERROR: Cannot select: t3: i16 = JumpTable<0>
// This bug is non-deterministic, and only happens when run concurrently with non-AVR tests.
// For this reason, we do not t.Parallel() here.
var avrTests []string
for _, t := range tests {
switch t {
case "atomic.go":
// Not supported due to unaligned atomic accesses.
case "reflect.go":
// Reflect tests do not work due to type code issues.
case "gc.go":
// Does not pass due to high mark false positive rate.
case "json.go", "stdlib.go", "testing.go":
// Breaks interp.
case "map.go":
// Reflect size calculation crashes.
case "binop.go":
// Interface comparison results are inverted.
case "channel.go":
// Freezes after recv from closed channel.
case "float.go", "math.go", "print.go":
// Stuck in runtime.printfloat64.
case "interface.go":
// Several comparison tests fail.
case "cgo/":
// CGo does not work on AVR.
default:
avrTests = append(avrTests, t)
}
}
runPlatTests(optionsFromTarget("simavr", sema), avrTests, t)
})
if runtime.GOOS == "linux" {
t.Run("X86Linux", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromOSARCH("linux/386", sema), tests, t)
})
t.Run("ARMLinux", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromOSARCH("linux/arm/6", sema), tests, t)
})
t.Run("ARM64Linux", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromOSARCH("linux/arm64", sema), tests, t)
})
t.Run("WebAssembly", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromTarget("wasm", sema), tests, t)
})
t.Run("WASI", func(t *testing.T) {
t.Parallel()
runPlatTests(optionsFromTarget("wasi", sema), tests, t)
})
}
}
func runPlatTests(options compileopts.Options, tests []string, t *testing.T) {
emuCheck(t, options)
spec, err := compileopts.LoadTarget(&options)
if err != nil {
t.Fatal("failed to load target spec:", err)
}
for _, name := range tests {
name := name // redefine to avoid race condition
t.Run(name, func(t *testing.T) {
t.Parallel()
runTest(name, options, t, nil, nil)
})
}
if len(spec.Emulator) == 0 || spec.Emulator[0] != "simavr" {
t.Run("env.go", func(t *testing.T) {
t.Parallel()
runTest("env.go", options, t, []string{"first", "second"}, []string{"ENV1=VALUE1", "ENV2=VALUE2"})
})
}
if options.Target == "wasi" || options.Target == "wasm" {
t.Run("alias.go-scheduler-none", func(t *testing.T) {
t.Parallel()
options := compileopts.Options(options)
options.Scheduler = "none"
runTest("alias.go", options, t, nil, nil)
})
}
if options.Target == "" || options.Target == "wasi" {
t.Run("filesystem.go", func(t *testing.T) {
t.Parallel()
runTest("filesystem.go", options, t, nil, nil)
})
}
if options.Target == "" || options.Target == "wasi" || options.Target == "wasm" {
t.Run("rand.go", func(t *testing.T) {
t.Parallel()
runTest("rand.go", options, t, nil, nil)
})
}
}
func emuCheck(t *testing.T, options compileopts.Options) {
// Check if the emulator is installed.
spec, err := compileopts.LoadTarget(&options)
if err != nil {
t.Fatal("failed to load target spec:", err)
}
if len(spec.Emulator) != 0 {
_, err := exec.LookPath(spec.Emulator[0])
if err != nil {
if errors.Is(err, exec.ErrNotFound) {
t.Skipf("emulator not installed: %q", spec.Emulator[0])
}
t.Errorf("searching for emulator: %v", err)
return
}
}
}
func optionsFromTarget(target string, sema chan struct{}) compileopts.Options {
return compileopts.Options{
// GOOS/GOARCH are only used if target == ""
GOOS: goenv.Get("GOOS"),
GOARCH: goenv.Get("GOARCH"),
GOARM: goenv.Get("GOARM"),
Target: target,
Semaphore: sema,
Debug: true,
VerifyIR: true,
Opt: "z",
}
}
// optionsFromOSARCH returns a set of options based on the "osarch" string. This
// string is in the form of "os/arch/subarch", with the subarch only sometimes
// being necessary. Examples are "darwin/amd64" or "linux/arm/7".
func optionsFromOSARCH(osarch string, sema chan struct{}) compileopts.Options {
parts := strings.Split(osarch, "/")
options := compileopts.Options{
GOOS: parts[0],
GOARCH: parts[1],
Semaphore: sema,
Debug: true,
VerifyIR: true,
Opt: "z",
}
if options.GOARCH == "arm" {
options.GOARM = parts[2]
}
return options
}
func runTest(name string, options compileopts.Options, t *testing.T, cmdArgs, environmentVars []string) {
runTestWithConfig(name, t, options, cmdArgs, environmentVars)
}
func runTestWithConfig(name string, t *testing.T, options compileopts.Options, cmdArgs, environmentVars []string) {
// Get the expected output for this test.
// Note: not using filepath.Join as it strips the path separator at the end
// of the path.
path := TESTDATA + "/" + name
// Get the expected output for this test.
txtpath := path[:len(path)-3] + ".txt"
if path[len(path)-1] == '/' {
txtpath = path + "out.txt"
}
expected, err := ioutil.ReadFile(txtpath)
if err != nil {
t.Fatal("could not read expected output file:", err)
}
// Create a temporary directory for test output files.
tmpdir := t.TempDir()
// Determine whether we're on a system that supports environment variables
// and command line parameters (operating systems, WASI) or not (baremetal,
// WebAssembly in the browser). If we're on a system without an environment,
// we need to pass command line arguments and environment variables through
// global variables (built into the binary directly) instead of the
// conventional way.
spec, err := compileopts.LoadTarget(&options)
if err != nil {
t.Fatal("failed to load target spec:", err)
}
needsEnvInVars := spec.GOOS == "js"
for _, tag := range spec.BuildTags {
if tag == "baremetal" {
needsEnvInVars = true
}
}
if needsEnvInVars {
runtimeGlobals := make(map[string]string)
if len(cmdArgs) != 0 {
runtimeGlobals["osArgs"] = strings.Join(cmdArgs, "\x00")
}
if len(environmentVars) != 0 {
runtimeGlobals["osEnv"] = strings.Join(environmentVars, "\x00")
}
if len(runtimeGlobals) != 0 {
// This sets the global variables like they would be set with
// `-ldflags="-X=runtime.osArgs=first\x00second`.
// The runtime package has two variables (osArgs and osEnv) that are
// both strings, from which the parameters and environment variables
// are read.
options.GlobalValues = map[string]map[string]string{
"runtime": runtimeGlobals,
}
}
}
// Build the test binary.
binary := filepath.Join(tmpdir, "test")
if spec.GOOS == "windows" {
binary += ".exe"
}
err = Build("./"+path, binary, &options)
if err != nil {
printCompilerError(t.Log, err)
t.Fail()
return
}
// Create the test command, taking care of emulators etc.
var cmd *exec.Cmd
if len(spec.Emulator) == 0 {
cmd = exec.Command(binary)
} else {
args := append(spec.Emulator[1:], binary)
cmd = exec.Command(spec.Emulator[0], args...)
}
if len(spec.Emulator) != 0 && spec.Emulator[0] == "wasmtime" {
// Allow reading from the current directory.
cmd.Args = append(cmd.Args, "--dir=.")
for _, v := range environmentVars {
cmd.Args = append(cmd.Args, "--env", v)
}
cmd.Args = append(cmd.Args, cmdArgs...)
} else {
if !needsEnvInVars {
cmd.Args = append(cmd.Args, cmdArgs...) // works on qemu-aarch64 etc
cmd.Env = append(cmd.Env, environmentVars...)
}
}
// Run the test.
runComplete := make(chan struct{})
ranTooLong := false
stdout := &bytes.Buffer{}
if len(spec.Emulator) != 0 && spec.Emulator[0] == "simavr" {
cmd.Stdout = os.Stderr
cmd.Stderr = stdout
} else {
cmd.Stdout = stdout
cmd.Stderr = os.Stderr
}
err = cmd.Start()
if err != nil {
t.Fatal("failed to start:", err)
}
go func() {
// Terminate the process if it runs too long.
maxDuration := 10 * time.Second
if runtime.GOOS == "windows" {
// For some reason, tests on Windows can take around
// 30s to complete. TODO: investigate why and fix this.
maxDuration = 40 * time.Second
}
timer := time.NewTimer(maxDuration)
select {
case <-runComplete:
timer.Stop()
case <-timer.C:
ranTooLong = true
if runtime.GOOS == "windows" {
cmd.Process.Signal(os.Kill) // Windows doesn't support SIGINT.
} else {
cmd.Process.Signal(os.Interrupt)
}
}
}()
err = cmd.Wait()
close(runComplete)
if ranTooLong {
stdout.WriteString("--- test ran too long, terminating...\n")
}
// putchar() prints CRLF, convert it to LF.
actual := bytes.Replace(stdout.Bytes(), []byte{'\r', '\n'}, []byte{'\n'}, -1)
expected = bytes.Replace(expected, []byte{'\r', '\n'}, []byte{'\n'}, -1) // for Windows
if len(spec.Emulator) != 0 && spec.Emulator[0] == "simavr" {
// Strip simavr log formatting.
actual = bytes.Replace(actual, []byte{0x1b, '[', '3', '2', 'm'}, nil, -1)
actual = bytes.Replace(actual, []byte{0x1b, '[', '0', 'm'}, nil, -1)
actual = bytes.Replace(actual, []byte{'.', '.', '\n'}, []byte{'\n'}, -1)
}
// Check whether the command ran successfully.
fail := false
if err != nil {
t.Log("failed to run:", err)
fail = true
} else if !bytes.Equal(expected, actual) {
t.Logf("output did not match (expected %d bytes, got %d bytes):", len(expected), len(actual))
fail = true
}
if fail {
r := bufio.NewReader(bytes.NewReader(actual))
for {
line, err := r.ReadString('\n')
if err != nil {
break
}
t.Log("stdout:", line[:len(line)-1])
}
t.Fail()
}
}
func TestTest(t *testing.T) {
t.Parallel()
type targ struct {
name string
opts compileopts.Options
}
targs := []targ{
// Host
{"Host", optionsFromTarget("", sema)},
}
if !testing.Short() {
if runtime.GOOS == "linux" {
targs = append(targs,
// Linux
targ{"X86Linux", optionsFromOSARCH("linux/386", sema)},
targ{"ARMLinux", optionsFromOSARCH("linux/arm/6", sema)},
targ{"ARM64Linux", optionsFromOSARCH("linux/arm64", sema)},
)
}
targs = append(targs,
// QEMU microcontrollers
targ{"EmulatedCortexM3", optionsFromTarget("cortex-m-qemu", sema)},
targ{"EmulatedRISCV", optionsFromTarget("riscv-qemu", sema)},
// Node/Wasmtime
targ{"WASM", optionsFromTarget("wasm", sema)},
targ{"WASI", optionsFromTarget("wasi", sema)},
)
}
for _, targ := range targs {
targ := targ
t.Run(targ.name, func(t *testing.T) {
t.Parallel()
emuCheck(t, targ.opts)
t.Run("Pass", func(t *testing.T) {
t.Parallel()
// Test a package which builds and passes normally.
var wg sync.WaitGroup
defer wg.Wait()
out := ioLogger(t, &wg)
defer out.Close()
opts := targ.opts
passed, err := Test("github.com/tinygo-org/tinygo/tests/testing/pass", out, out, &opts, false, false, false, "", "", "", "")
if err != nil {
t.Errorf("test error: %v", err)
}
if !passed {
t.Error("test failed")
}
})
t.Run("Fail", func(t *testing.T) {
t.Parallel()
// Test a package which builds fine but fails.
var wg sync.WaitGroup
defer wg.Wait()
out := ioLogger(t, &wg)
defer out.Close()
opts := targ.opts
passed, err := Test("github.com/tinygo-org/tinygo/tests/testing/fail", out, out, &opts, false, false, false, "", "", "", "")
if err != nil {
t.Errorf("test error: %v", err)
}
if passed {
t.Error("test passed")
}
})
if targ.name != "Host" {
// Emulated tests are somewhat slow, and these do not need to be run across every platform.
return
}
t.Run("Nothing", func(t *testing.T) {
t.Parallel()
// Test a package with no test files.
var wg sync.WaitGroup
defer wg.Wait()
out := ioLogger(t, &wg)
defer out.Close()
var output bytes.Buffer
opts := targ.opts
passed, err := Test("github.com/tinygo-org/tinygo/tests/testing/nothing", io.MultiWriter(&output, out), out, &opts, false, false, false, "", "", "", "")
if err != nil {
t.Errorf("test error: %v", err)
}
if !passed {
t.Error("test failed")
}
if !strings.Contains(output.String(), "[no test files]") {
t.Error("missing [no test files] in output")
}
})
t.Run("BuildErr", func(t *testing.T) {
t.Parallel()
// Test a package which fails to build.
var wg sync.WaitGroup
defer wg.Wait()
out := ioLogger(t, &wg)
defer out.Close()
opts := targ.opts
passed, err := Test("github.com/tinygo-org/tinygo/tests/testing/builderr", out, out, &opts, false, false, false, "", "", "", "")
if err == nil {
t.Error("test did not error")
}
if passed {
t.Error("test passed")
}
})
})
}
}
func ioLogger(t *testing.T, wg *sync.WaitGroup) io.WriteCloser {
r, w := io.Pipe()
wg.Add(1)
go func() {
defer wg.Done()
defer r.Close()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
t.Log(scanner.Text())
}
}()
return w
}
// This TestMain is necessary because TinyGo may also be invoked to run certain
// LLVM tools in a separate process. Not capturing these invocations would lead
// to recursive tests.
func TestMain(m *testing.M) {
if len(os.Args) >= 2 {
switch os.Args[1] {
case "clang", "ld.lld", "wasm-ld":
// Invoke a specific tool.
err := builder.RunTool(os.Args[1], os.Args[2:]...)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Exit(0)
}
}
// Run normal tests.
os.Exit(m.Run())
}
| 1 | 14,183 | Sidenote: some day we'll drop the avr-gcc and avr-libc dependencies so that this case becomes supported. | tinygo-org-tinygo | go |
@@ -619,6 +619,6 @@ public class TestFieldCacheVsDocValues extends LuceneTestCase {
protected boolean codecAcceptsHugeBinaryValues(String field) {
String name = TestUtil.getDocValuesFormat(field);
- return !(name.equals("Memory")); // Direct has a different type of limit
+ return true;
}
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.uninverting;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.index.SlowCompositeReaderWrapper;
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
public class TestFieldCacheVsDocValues extends LuceneTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
assumeFalse("test unsupported on J9 temporarily, see https://issues.apache.org/jira/browse/LUCENE-6522",
Constants.JAVA_VENDOR.startsWith("IBM"));
}
public void testByteMissingVsFieldCache() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
doTestMissingVsFieldCache(Byte.MIN_VALUE, Byte.MAX_VALUE);
}
}
public void testShortMissingVsFieldCache() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
doTestMissingVsFieldCache(Short.MIN_VALUE, Short.MAX_VALUE);
}
}
public void testIntMissingVsFieldCache() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
doTestMissingVsFieldCache(Integer.MIN_VALUE, Integer.MAX_VALUE);
}
}
public void testLongMissingVsFieldCache() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
doTestMissingVsFieldCache(Long.MIN_VALUE, Long.MAX_VALUE);
}
}
public void testSortedFixedLengthVsFieldCache() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
int fixedLength = TestUtil.nextInt(random(), 1, 10);
doTestSortedVsFieldCache(fixedLength, fixedLength);
}
}
public void testSortedVariableLengthVsFieldCache() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
doTestSortedVsFieldCache(1, 10);
}
}
public void testSortedSetFixedLengthVsUninvertedField() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
int fixedLength = TestUtil.nextInt(random(), 1, 10);
doTestSortedSetVsUninvertedField(fixedLength, fixedLength);
}
}
public void testSortedSetVariableLengthVsUninvertedField() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
doTestSortedSetVsUninvertedField(1, 10);
}
}
// LUCENE-4853
public void testHugeBinaryValues() throws Exception {
Analyzer analyzer = new MockAnalyzer(random());
// FSDirectory because SimpleText will consume gobbs of
// space when storing big binary values:
try (Directory d = newFSDirectory(createTempDir("hugeBinaryValues"))) {
boolean doFixed = random().nextBoolean();
int numDocs;
int fixedLength = 0;
if (doFixed) {
// Sometimes make all values fixed length since some
// codecs have different code paths for this:
numDocs = TestUtil.nextInt(random(), 10, 20);
fixedLength = TestUtil.nextInt(random(), 65537, 256 * 1024);
} else {
numDocs = TestUtil.nextInt(random(), 100, 200);
}
try (IndexWriter w = new IndexWriter(d, newIndexWriterConfig(analyzer))) {
List<byte[]> docBytes = new ArrayList<>();
long totalBytes = 0;
for (int docID = 0; docID < numDocs; docID++) {
// we don't use RandomIndexWriter because it might add
// more docvalues than we expect !!!!
// Must be > 64KB in size to ensure more than 2 pages in
// PagedBytes would be needed:
int numBytes;
if (doFixed) {
numBytes = fixedLength;
} else if (docID == 0 || random().nextInt(5) == 3) {
numBytes = TestUtil.nextInt(random(), 65537, 3 * 1024 * 1024);
} else {
numBytes = TestUtil.nextInt(random(), 1, 1024 * 1024);
}
totalBytes += numBytes;
if (totalBytes > 5 * 1024 * 1024) {
break;
}
byte[] bytes = new byte[numBytes];
random().nextBytes(bytes);
docBytes.add(bytes);
Document doc = new Document();
BytesRef b = new BytesRef(bytes);
b.length = bytes.length;
doc.add(new BinaryDocValuesField("field", b));
doc.add(new StringField("id", "" + docID, Field.Store.YES));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
if (iae.getMessage().indexOf("is too large") == -1) {
throw iae;
} else {
// OK: some codecs can't handle binary DV > 32K
assertFalse(codecAcceptsHugeBinaryValues("field"));
w.rollback();
d.close();
return;
}
}
}
DirectoryReader r;
try {
r = DirectoryReader.open(w);
} catch (IllegalArgumentException iae) {
if (iae.getMessage().indexOf("is too large") == -1) {
throw iae;
} else {
assertFalse(codecAcceptsHugeBinaryValues("field"));
// OK: some codecs can't handle binary DV > 32K
w.rollback();
d.close();
return;
}
}
try (LeafReader ar = SlowCompositeReaderWrapper.wrap(r)) {
TestUtil.checkReader(ar);
BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field");
for (int docID = 0; docID < docBytes.size(); docID++) {
Document doc = ar.document(docID);
assertEquals(docID, s.nextDoc());
BytesRef bytes = s.binaryValue();
byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
assertEquals(expected.length, bytes.length);
assertEquals(new BytesRef(expected), bytes);
}
assertTrue(codecAcceptsHugeBinaryValues("field"));
}
}
}
}
private static final int LARGE_BINARY_FIELD_LENGTH = (1 << 15) - 2;
// TODO: get this out of here and into the deprecated codecs (4.0, 4.2)
public void testHugeBinaryValueLimit() throws Exception {
// We only test DVFormats that have a limit
assumeFalse("test requires codec with limits on max binary field length", codecAcceptsHugeBinaryValues("field"));
Analyzer analyzer = new MockAnalyzer(random());
// FSDirectory because SimpleText will consume gobbs of
// space when storing big binary values:
Directory d = newFSDirectory(createTempDir("hugeBinaryValues"));
boolean doFixed = random().nextBoolean();
int numDocs;
int fixedLength = 0;
if (doFixed) {
// Sometimes make all values fixed length since some
// codecs have different code paths for this:
numDocs = TestUtil.nextInt(random(), 10, 20);
fixedLength = LARGE_BINARY_FIELD_LENGTH;
} else {
numDocs = TestUtil.nextInt(random(), 100, 200);
}
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(analyzer));
List<byte[]> docBytes = new ArrayList<>();
long totalBytes = 0;
for(int docID=0;docID<numDocs;docID++) {
// we don't use RandomIndexWriter because it might add
// more docvalues than we expect !!!!
// Must be > 64KB in size to ensure more than 2 pages in
// PagedBytes would be needed:
int numBytes;
if (doFixed) {
numBytes = fixedLength;
} else if (docID == 0 || random().nextInt(5) == 3) {
numBytes = LARGE_BINARY_FIELD_LENGTH;
} else {
numBytes = TestUtil.nextInt(random(), 1, LARGE_BINARY_FIELD_LENGTH);
}
totalBytes += numBytes;
if (totalBytes > 5 * 1024*1024) {
break;
}
byte[] bytes = new byte[numBytes];
random().nextBytes(bytes);
docBytes.add(bytes);
Document doc = new Document();
BytesRef b = new BytesRef(bytes);
b.length = bytes.length;
doc.add(new BinaryDocValuesField("field", b));
doc.add(new StringField("id", ""+docID, Field.Store.YES));
w.addDocument(doc);
}
DirectoryReader r = DirectoryReader.open(w);
w.close();
LeafReader ar = SlowCompositeReaderWrapper.wrap(r);
TestUtil.checkReader(ar);
BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field");
for(int docID=0;docID<docBytes.size();docID++) {
assertEquals(docID, s.nextDoc());
Document doc = ar.document(docID);
BytesRef bytes = s.binaryValue();
byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
assertEquals(expected.length, bytes.length);
assertEquals(new BytesRef(expected), bytes);
}
ar.close();
d.close();
}
private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
Document doc = new Document();
Field idField = new StringField("id", "", Field.Store.NO);
Field indexedField = new StringField("indexed", "", Field.Store.NO);
Field dvField = new SortedDocValuesField("dv", new BytesRef());
doc.add(idField);
doc.add(indexedField);
doc.add(dvField);
// index some docs
int numDocs = atLeast(300);
for (int i = 0; i < numDocs; i++) {
idField.setStringValue(Integer.toString(i));
final int length;
if (minLength == maxLength) {
length = minLength; // fixed length
} else {
length = TestUtil.nextInt(random(), minLength, maxLength);
}
String value = TestUtil.randomSimpleString(random(), length);
indexedField.setStringValue(value);
dvField.setBytesValue(new BytesRef(value));
writer.addDocument(doc);
if (random().nextInt(31) == 0) {
writer.commit();
}
}
// delete some docs
int numDeletions = random().nextInt(numDocs/10);
for (int i = 0; i < numDeletions; i++) {
int id = random().nextInt(numDocs);
writer.deleteDocuments(new Term("id", Integer.toString(id)));
}
writer.close();
// compare
DirectoryReader ir = DirectoryReader.open(dir);
for (LeafReaderContext context : ir.leaves()) {
LeafReader r = context.reader();
SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
SortedDocValues actual = r.getSortedDocValues("dv");
assertEquals(r.maxDoc(), expected, actual);
}
ir.close();
dir.close();
}
private void doTestSortedSetVsUninvertedField(int minLength, int maxLength) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new MockAnalyzer(random()));
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
// index some docs
int numDocs = atLeast(300);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
doc.add(idField);
final int length = TestUtil.nextInt(random(), minLength, maxLength);
int numValues = random().nextInt(17);
// create a random list of strings
List<String> values = new ArrayList<>();
for (int v = 0; v < numValues; v++) {
values.add(TestUtil.randomSimpleString(random(), minLength, length));
}
// add in any order to the indexed field
ArrayList<String> unordered = new ArrayList<>(values);
Collections.shuffle(unordered, random());
for (String v : values) {
doc.add(newStringField("indexed", v, Field.Store.NO));
}
// add in any order to the dv field
ArrayList<String> unordered2 = new ArrayList<>(values);
Collections.shuffle(unordered2, random());
for (String v : unordered2) {
doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
}
writer.addDocument(doc);
if (random().nextInt(31) == 0) {
writer.commit();
}
}
// delete some docs
int numDeletions = random().nextInt(numDocs/10);
for (int i = 0; i < numDeletions; i++) {
int id = random().nextInt(numDocs);
writer.deleteDocuments(new Term("id", Integer.toString(id)));
}
// compare per-segment
DirectoryReader ir = writer.getReader();
for (LeafReaderContext context : ir.leaves()) {
LeafReader r = context.reader();
SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(r, "indexed", null);
SortedSetDocValues actual = r.getSortedSetDocValues("dv");
assertEquals(r.maxDoc(), expected, actual);
}
ir.close();
writer.forceMerge(1);
// now compare again after the merge
ir = writer.getReader();
LeafReader ar = getOnlyLeafReader(ir);
SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(ar, "indexed", null);
SortedSetDocValues actual = ar.getSortedSetDocValues("dv");
assertEquals(ir.maxDoc(), expected, actual);
ir.close();
writer.close();
dir.close();
}
private void doTestMissingVsFieldCache(LongProducer longs) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
Field idField = new StringField("id", "", Field.Store.NO);
Field indexedField = newStringField("indexed", "", Field.Store.NO);
Field dvField = new NumericDocValuesField("dv", 0);
// index some docs
int numDocs = atLeast(300);
// numDocs should be always > 256 so that in case of a codec that optimizes
// for numbers of values <= 256, all storage layouts are tested
assert numDocs > 256;
for (int i = 0; i < numDocs; i++) {
idField.setStringValue(Integer.toString(i));
long value = longs.next();
indexedField.setStringValue(Long.toString(value));
dvField.setLongValue(value);
Document doc = new Document();
doc.add(idField);
// 1/4 of the time we neglect to add the fields
if (random().nextInt(4) > 0) {
doc.add(indexedField);
doc.add(dvField);
}
writer.addDocument(doc);
if (random().nextInt(31) == 0) {
writer.commit();
}
}
// delete some docs
int numDeletions = random().nextInt(numDocs/10);
for (int i = 0; i < numDeletions; i++) {
int id = random().nextInt(numDocs);
writer.deleteDocuments(new Term("id", Integer.toString(id)));
}
// merge some segments and ensure that at least one of them has more than
// 256 values
writer.forceMerge(numDocs / 256);
writer.close();
// compare
DirectoryReader ir = DirectoryReader.open(dir);
for (LeafReaderContext context : ir.leaves()) {
LeafReader r = context.reader();
Bits expected = FieldCache.DEFAULT.getDocsWithField(r, "indexed", null);
Bits actual = FieldCache.DEFAULT.getDocsWithField(r, "dv", null);
assertEquals(expected, actual);
}
ir.close();
dir.close();
}
private void doTestMissingVsFieldCache(final long minValue, final long maxValue) throws Exception {
doTestMissingVsFieldCache(new LongProducer() {
@Override
long next() {
return TestUtil.nextLong(random(), minValue, maxValue);
}
});
}
static abstract class LongProducer {
abstract long next();
}
private void assertEquals(Bits expected, Bits actual) throws Exception {
assertEquals(expected.length(), actual.length());
for (int i = 0; i < expected.length(); i++) {
assertEquals(expected.get(i), actual.get(i));
}
}
private void assertEquals(int maxDoc, SortedDocValues expected, SortedDocValues actual) throws Exception {
// can be null for the segment if no docs actually had any SortedDocValues
// in this case FC.getDocTermsOrds returns EMPTY
if (actual == null) {
assertEquals(expected.getValueCount(), 0);
return;
}
assertEquals(expected.getValueCount(), actual.getValueCount());
// compare ord lists
while (true) {
int docID = expected.nextDoc();
if (docID == NO_MORE_DOCS) {
assertEquals(NO_MORE_DOCS, actual.nextDoc());
break;
}
assertEquals(docID, actual.nextDoc());
assertEquals(expected.ordValue(), actual.ordValue());
assertEquals(expected.binaryValue(), actual.binaryValue());
}
// compare ord dictionary
for (long i = 0; i < expected.getValueCount(); i++) {
final BytesRef expectedBytes = BytesRef.deepCopyOf(expected.lookupOrd((int) i));
final BytesRef actualBytes = actual.lookupOrd((int) i);
assertEquals(expectedBytes, actualBytes);
}
// compare termsenum
assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum());
}
private void assertEquals(int maxDoc, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception {
// can be null for the segment if no docs actually had any SortedDocValues
// in this case FC.getDocTermsOrds returns EMPTY
if (actual == null) {
assertEquals(expected.getValueCount(), 0);
return;
}
assertEquals(expected.getValueCount(), actual.getValueCount());
while (true) {
int docID = expected.nextDoc();
assertEquals(docID, actual.nextDoc());
if (docID == NO_MORE_DOCS) {
break;
}
long expectedOrd;
while ((expectedOrd = expected.nextOrd()) != NO_MORE_ORDS) {
assertEquals(expectedOrd, actual.nextOrd());
}
assertEquals(NO_MORE_ORDS, actual.nextOrd());
}
// compare ord dictionary
for (long i = 0; i < expected.getValueCount(); i++) {
final BytesRef expectedBytes = BytesRef.deepCopyOf(expected.lookupOrd(i));
final BytesRef actualBytes = actual.lookupOrd(i);
assertEquals(expectedBytes, actualBytes);
}
// compare termsenum
assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum());
}
private void assertEquals(long numOrds, TermsEnum expected, TermsEnum actual) throws Exception {
BytesRef ref;
// sequential next() through all terms
while ((ref = expected.next()) != null) {
assertEquals(ref, actual.next());
assertEquals(expected.ord(), actual.ord());
assertEquals(expected.term(), actual.term());
}
assertNull(actual.next());
// sequential seekExact(ord) through all terms
for (long i = 0; i < numOrds; i++) {
expected.seekExact(i);
actual.seekExact(i);
assertEquals(expected.ord(), actual.ord());
assertEquals(expected.term(), actual.term());
}
// sequential seekExact(BytesRef) through all terms
for (long i = 0; i < numOrds; i++) {
expected.seekExact(i);
assertTrue(actual.seekExact(expected.term()));
assertEquals(expected.ord(), actual.ord());
assertEquals(expected.term(), actual.term());
}
// sequential seekCeil(BytesRef) through all terms
for (long i = 0; i < numOrds; i++) {
expected.seekExact(i);
assertEquals(SeekStatus.FOUND, actual.seekCeil(expected.term()));
assertEquals(expected.ord(), actual.ord());
assertEquals(expected.term(), actual.term());
}
// random seekExact(ord)
for (long i = 0; i < numOrds; i++) {
long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
expected.seekExact(randomOrd);
actual.seekExact(randomOrd);
assertEquals(expected.ord(), actual.ord());
assertEquals(expected.term(), actual.term());
}
// random seekExact(BytesRef)
for (long i = 0; i < numOrds; i++) {
long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
expected.seekExact(randomOrd);
actual.seekExact(expected.term());
assertEquals(expected.ord(), actual.ord());
assertEquals(expected.term(), actual.term());
}
// random seekCeil(BytesRef)
for (long i = 0; i < numOrds; i++) {
BytesRef target = new BytesRef(TestUtil.randomUnicodeString(random()));
SeekStatus expectedStatus = expected.seekCeil(target);
assertEquals(expectedStatus, actual.seekCeil(target));
if (expectedStatus != SeekStatus.END) {
assertEquals(expected.ord(), actual.ord());
assertEquals(expected.term(), actual.term());
}
}
}
protected boolean codecAcceptsHugeBinaryValues(String field) {
String name = TestUtil.getDocValuesFormat(field);
return !(name.equals("Memory")); // Direct has a different type of limit
}
}
| 1 | 26,988 | do we still need this method? | apache-lucene-solr | java |
@@ -618,10 +618,14 @@ RETDesc *GenericUpdate::createOldAndNewCorrelationNames(BindWA *bindWA, NABoolea
rd = new (bindWA->wHeap()) RETDesc(bindWA);
}
+ /* if ((getOperatorType() != REL_UNARY_INSERT) ||
+ getUpdateCKorUniqueIndexKey() ||
+ ((getOperatorType() == REL_UNARY_INSERT) &&((Insert *)this)->isMerge()) ||
+ ((getOperatorType() == REL_UNARY_INSERT) && ((Insert *)this)->isUpsert() && (CmpCommon::getDefault(TRAF_UPSERT_TO_EFF_TREE) == DF_ON ))) */
if ((getOperatorType() != REL_UNARY_INSERT) ||
getUpdateCKorUniqueIndexKey() ||
((getOperatorType() == REL_UNARY_INSERT) &&((Insert *)this)->isMerge()) ||
- ((getOperatorType() == REL_UNARY_INSERT) && ((Insert *)this)->isUpsert() && (CmpCommon::getDefault(TRAF_UPSERT_TO_EFF_TREE) == DF_ON )))
+ ((getOperatorType() == REL_UNARY_INSERT) &&((Insert *)this)->xformedEffUpsert()))
{
// DELETE or UPDATE --
// Now merge the old/target/before valueid's (the Scan child RETDesc) | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: Inlining.cpp
* Description: Methods for the inlining of triggers.
*
* Created: 6/23/98
* Language: C++
* Status: $State: Exp $
*
*/
#define INITIALIZE_OLD_AND_NEW_NAMES // used in Triggers.h
#define SQLPARSERGLOBALS_FLAGS // must precede all #include's
#define SQLPARSERGLOBALS_CONTEXT_AND_DIAGS
#include "Sqlcomp.h"
#include "AllItemExpr.h"
#include "AllRelExpr.h"
#include "BindWA.h"
#include "GroupAttr.h"
#include "parser.h"
#include "StmtNode.h"
#include "Inlining.h"
#include "ex_error.h"
#include "Triggers.h"
#include "TriggerDB.h"
#include "TriggerEnable.h"
#include "StmtDDLCreateTrigger.h"
#include "MVInfo.h"
#include "Refresh.h"
#include "ChangesTable.h"
#include "MvRefreshBuilder.h"
#include "MjvBuilder.h"
#include "ItmFlowControlFunction.h"
#include <CmpMain.h>
#include "RelSequence.h"
#ifdef NA_DEBUG_GUI
#include "ComSqlcmpdbg.h"
#endif
#include "SqlParserGlobals.h" // must be last #include
#define DISABLE_TRIGGERS 0
#define DISABLE_RI 0
extern THREAD_P NABoolean GU_DEBUG;
static const char NEWTable [] = "NEW"; // QSTUFF: corr for embedded d/u
static const char OLDTable [] = "OLD"; // QSTUFF: corr for embedded d/u
/*******************************************************************************
**** Independant Utility Functions
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
// Create a CorrName to the temp table from the subject table name.
//////////////////////////////////////////////////////////////////////////////
/*static CorrName *calcTempTableName(const CorrName &theTable, CollHeap *heap)
{
const QualifiedName &tableName = theTable.getQualifiedNameObj();
CorrName *result = new(heap)
CorrName(subjectNameToTrigTemp(tableName.getObjectName()),
heap,
tableName.getSchemaName(),
tableName.getCatalogName());
// Specify the trigger temporary table namespace.
result->setSpecialType(ExtendedQualName::TRIGTEMP_TABLE);
return result;
}
//////////////////////////////////////////////////////////////////////////////
// Does this column name start with NEW_COLUMN_PREFIX?
//////////////////////////////////////////////////////////////////////////////
static NABoolean isNewCol(const NAString& colName)
{
if (colName.length() < sizeof(NEW_COLUMN_PREFIX))
return FALSE;
return (colName(0,sizeof(NEW_COLUMN_PREFIX)-1) == NEW_COLUMN_PREFIX);
}
//////////////////////////////////////////////////////////////////////////////
// Does this column name start with OLD_COLUMN_PREFIX?
//////////////////////////////////////////////////////////////////////////////
static NABoolean isOldCol(const NAString& colName)
{
if (colName.length() < sizeof(OLD_COLUMN_PREFIX))
return FALSE;
return (colName(0,sizeof(OLD_COLUMN_PREFIX)-1) == OLD_COLUMN_PREFIX);
}
//////////////////////////////////////////////////////////////////////////////
// Remove the temp table column name prefix.
//////////////////////////////////////////////////////////////////////////////
static void FixTempColName(NAString *colName)
{
CMPASSERT (sizeof(OLD_COLUMN_PREFIX) == sizeof(NEW_COLUMN_PREFIX));
colName->remove(0,sizeof(OLD_COLUMN_PREFIX)-1); // remove the prefix
}
//////////////////////////////////////////////////////////////////////////////
// Does this column name from the temp table contain the @ sign?
// If so - it must be either a NEW@ or an OLD@ column.
// If not - it must be part of either the primary or clustering keys.
//////////////////////////////////////////////////////////////////////////////
static NABoolean isSingleCopyColumn(const NAString& colName)
{
return !colName.contains(NON_SQL_TEXT_CHAR);
}*/
//////////////////////////////////////////////////////////////////////////////
// Utility function used by the fixTentativeRETDesc() method below.
// Creates a Cast ItemExpr around the parameter, so it stays the same but
// gets a new ValueId.
//////////////////////////////////////////////////////////////////////////////
static ValueId wrapWithCastExpr(BindWA *bindWA, ValueId col, CollHeap *heap)
{
ItemExpr *expr = col.getItemExpr();
ItemExpr *cast = new(heap) Cast(expr, col.getType().newCopy(heap));
cast->bindNode(bindWA);
return cast->getValueId();
}
/*****************************************************************************
******************************************************************************
**** Inlining functions of classes other than GenericUpdate
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
// Is this CorrName using a name of a trigger transition table (from the
// REFERENCING clause)?
// Find if we are in the scope of a trigger action, and if the names match.
// onlyNew has a default value of FALSE, which is overridden only for DDL
// semantic checks of before triggers.
//////////////////////////////////////////////////////////////////////////////
NABoolean CorrName::isATriggerTransitionName(BindWA *bindWA, NABoolean onlyNew) const
{
BindScope *scope = bindWA->findNextScopeWithTriggerInfo();
if ((scope==NULL) || (scope->context()->triggerObj() == NULL))
return FALSE;
const NAString& objName = getQualifiedNameObj().getObjectName();
if (onlyNew)
return scope->context()->triggerObj()->isNewTransitionName(objName);
else
return scope->context()->triggerObj()->isTransitionName(objName);
}
//////////////////////////////////////////////////////////////////////////////
// This is a Scan on a temp table inside the action of a statement trigger.
// Add the uniqifier WHERE expression for it.
// Put a RelRoot node with a select list on top of it, to select only the
// needed columns. Above the RelRoot, put a RenameTable node to change the
// scanned temp-table to the corrlation name used in the trigger action.
//////////////////////////////////////////////////////////////////////////////
RelExpr *Scan::buildTriggerTransitionTableView(BindWA *bindWA)
{
BindScope *scope = bindWA->findNextScopeWithTriggerInfo();
CMPASSERT((scope!=NULL) && (scope->context()->triggerObj() != NULL));
StmtDDLCreateTrigger *createTriggerNode =
scope->context()->triggerObj();
if (!createTriggerNode->isStatement())
{
// 11019 Only statement triggers can select from the transition table.
*CmpCommon::diags() << DgSqlCode(-11019);
bindWA->setErrStatus();
return this;
}
// The transition table name must not have catalog/schema.
QualifiedName& objName = userTableName_.getQualifiedNameObj();
if (objName.getSchemaName() != "")
{
// *** 4057 Correlation name MYNEW conflicts with qualified identifier of table CAT.SCHM.MYNEW.
*CmpCommon::diags() << DgSqlCode(-4057)
<< DgString0(objName.getObjectName())
<< DgTableName(objName.getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (bindWA->inDDL())
{
// This bind is for DDL semantic checks during a CREATE TRIGGER statement.
// When the first trigger is created, the temp table does not yet exist,
// so we replace the scan on the temp table by a scan on the subject
// table. This is actually simpler since the column names stay the same.
// Set the name of the table to be scanned to be the fully qualified
// subject table.
if (userTableName_.getCorrNameAsString() == "")
userTableName_.setCorrName(objName.getObjectName());
objName = createTriggerNode->getTableNameObject();
// Don't fix the name in the LocList.
userTableName_.getQualifiedNameObj().setNamePosition(0);
// Use the RI flag to skip incrementing the NATable reference counter.
NABoolean inRIFlag = bindWA->getCurrentScope()->context()->inRIConstraint();
bindWA->getCurrentScope()->context()->inRIConstraint() = TRUE;
RelExpr *boundNode = bindNode(bindWA);
if (bindWA->errStatus())
return this;
// Remove the SYSKEY column from the current RETDesc. It is illegal
// for a trigger action to select the SYSKEY from the transition table.
ColRefName syskeyName("SYSKEY");
bindWA->getCurrentScope()->getRETDesc()->delColumn(bindWA, syskeyName, SYSTEM_COLUMN);
// Restore the previous BindWA state.
bindWA->getCurrentScope()->context()->inRIConstraint() = inRIFlag;
return boundNode;
}
else
{
// OK - this is DML time - build the select list for the root and rename.
QualifiedName& thisTable = userTableName_.getQualifiedNameObj();
ChangesTable::RowsType scanType;
if (createTriggerNode->isOldTransitionName(thisTable.getObjectName()))
{
scanType = ChangesTable::DELETED_ROWS;
}
else
{
CMPASSERT(createTriggerNode->isNewTransitionName(thisTable.getObjectName()));
scanType = ChangesTable::INSERTED_ROWS;
}
CorrName subjectTable = createTriggerNode->getTableNameObject();
subjectTable.setCorrName(userTableName_.getExposedNameAsString());
TriggersTempTable tempTableObj(subjectTable, this, scanType, bindWA);
RelExpr *transformedScan = tempTableObj.transformScan();
return transformedScan->bindNode(bindWA); // Bind the result
}
}
//////////////////////////////////////////////////////////////////////////////
// This method does the insertion of a MVImmediate trigger in order to
// refresh the specified ON STATEMENT MJV after an insert operation.
//
// The refresh action is by default implemented as a statement after trigger.
// This implies that the trigger is always driven by a scan from the
// temp-table. The overhead of inserting into the temp-table, scanning it, and
// deleting from it afterwards is relatively high when the number of rows is
// quite small. So, when the insert is driven by a Tuple or TupleList node
// (with up to a certain number of tuples in it), we implement the refresh
// action as a row after trigger, and thus avoid using the temp-table at whole.
// When before triggers exist we don't consider the optimization, since the
// values in the Tuple (TupleList) are not the actual values inserted into the
// table (they are modified by the before trigger).
//
//////////////////////////////////////////////////////////////////////////////
void Insert::insertMvToTriggerList(BeforeAndAfterTriggers *list,
BindWA *bindWA,
CollHeap *heap,
const QualifiedName &mvName,
MVInfoForDML *mvInfo,
const QualifiedName &subjectTable,
UpdateColumns *updatedCols)
{
CMPASSERT(getOperatorType() == REL_UNARY_INSERT);
CorrName mvCorrName(mvName, heap);
// The namespace is set in order to allow special update directly on the MV
mvCorrName.setSpecialType(ExtendedQualName::MV_TABLE);
// Instansiate the apprpriate builder
MjvImmInsertBuilder *triggerBuilder = new(heap)
MjvImmInsertBuilder(mvCorrName, mvInfo, this, bindWA);
// By default, the refresh is implemented as an after statement trigger
ComGranularity granularity = COM_STATEMENT;
// If MV_AS_ROW_TRIGGER default value is ON or if we deal with insert of only
// 1 row (i.e. the child is REL_TUPLE), generate the optimized refresh tree,
// in which case the refresh is implemented as a row after trigger.
if (child(0)->getOperatorType() == REL_TUPLE ||
CmpCommon::getDefault(MV_AS_ROW_TRIGGER) == DF_ON)
{
// build the optimized version of the tree as requested
granularity = COM_ROW;
triggerBuilder->optimizeForFewRows();
}
// Instansiate the MVImmediate trigger
MVImmediate *mvTrigger = new(heap) MVImmediate(bindWA,
triggerBuilder,
mvName,
subjectTable,
COM_INSERT,
granularity,
updatedCols);
// Register the trigger in the general list of triggers
if (granularity == COM_STATEMENT)
{
list->addNewAfterStatementTrigger(mvTrigger);
}
else
{
list->addNewAfterRowTrigger(mvTrigger);
}
}
//////////////////////////////////////////////////////////////////////////////
// This method does the insertion of a MVImmediate trigger in order to
// refresh the specified ON STATEMENT MJV after an update operation.
//
// The update operation may be one of two types:
//
// 1. NONE of the updated columns participate in the clustering index of
// the table and/or join predicate of the MJV. This is called direct update.
// 2. Some of the above columns are updated. This is called indirect update.
//
// For direct update, a single row trigger that directly updates the
// appropriate columns is sufficient to refresh the MJV.
//
// For indirect update, we should have two MVImmediate triggers defined:
// 1) a row after trigger that deletes each row in the MJV that corresponds to
// the updated ones.
// 2) a statement after trigger that inserts all the rows that result from
// applying the join expression of the MJV between the delta on the subject
// table (the updated rows sotred in the temp-table) and the other tables
// participating in the MJV (the ones that were not changed).
//
//////////////////////////////////////////////////////////////////////////////
void Update::insertMvToTriggerList(BeforeAndAfterTriggers *list,
BindWA *bindWA,
CollHeap *heap,
const QualifiedName &mvName,
MVInfoForDML *mvInfo,
const QualifiedName &subjectTable,
UpdateColumns *updatedCols)
{
CMPASSERT(getOperatorType() == REL_UNARY_UPDATE);
CorrName mvCorrName(mvName, heap);
// The namespace is set in order to allow special update directly on the MV
mvCorrName.setSpecialType(ExtendedQualName::MV_TABLE);
switch(checkUpdateType(mvInfo, subjectTable, updatedCols))
{
case DIRECT:
{
/////////////////////////////////////////////////////////
// adding a row trigger to directly update rows in the MV
/////////////////////////////////////////////////////////
// Instansiate the apprpriate builder
MvRefreshBuilder *triggerBuilder = new(heap)
MjvImmDirectUpdateBuilder(mvCorrName, mvInfo, this, bindWA);
// Instansiate the MVImmediate trigger
MVImmediate *mvTrigger = new(heap) MVImmediate(bindWA,
triggerBuilder,
mvName,
subjectTable,
COM_UPDATE,
COM_ROW,
updatedCols);
// Register the trigger in the general list of triggers
list->addNewAfterRowTrigger(mvTrigger);
break;
}
case INDIRECT:
{
///////////////////////////////////////////////////////////////////
// PART I: adding a row trigger to delete updated rows from the MJV
///////////////////////////////////////////////////////////////////
// Instansiate the apprpriate builder
MvRefreshBuilder *rowTriggerBuilder = new(heap)
MjvImmDeleteBuilder(mvCorrName, mvInfo, this, bindWA);
// Instansiate the MVImmediate trigger
MVImmediate *mvRowTrigger = new(heap) MVImmediate(bindWA,
rowTriggerBuilder,
mvName,
subjectTable,
COM_DELETE,
COM_ROW,
updatedCols);
// Register the trigger in the general list of triggers
list->addNewAfterRowTrigger(mvRowTrigger);
//////////////////////////////////////////////////////////////////////////
// PART II: adding a statement trigger to insert the new rows into the MJV
//////////////////////////////////////////////////////////////////////////
// Instansiate the apprpriate builder
MvRefreshBuilder *stmtTriggerBuilder = new(heap)
MjvImmInsertBuilder(mvCorrName, mvInfo, this, bindWA);
// Instansiate the MVImmediate trigger
MVImmediate *mvStmtTrigger = new(heap) MVImmediate(bindWA,
stmtTriggerBuilder,
mvName,
subjectTable,
COM_INSERT,
COM_STATEMENT,
updatedCols);
// Register the trigger in the general list of triggers
list->addNewAfterStatementTrigger(mvStmtTrigger);
break;
}
default:
break; // update is IRELEVANT - nothing to do
}
}
//////////////////////////////////////////////////////////////////////////////
// This method checks which type of update is it:
// IRELEVANT, DIRECT or INDIRECT.
//
// An update is considered as IRELEVANT until proven otherwise. The list of
// columns of the subject table that are in use by the MJV is scanned. For
// each column we check if it is updated in the current update operation. If
// so, the update is not IRELEVANT anymore, and is considered DIRECT until
// proven otherwise. Once we find an indirect-update column that is updated
// in the current operation, the update is considered INDIRECT and the loop
// is stopped at once. MJV columns of type complex cause INDIRECT update also.
//
//////////////////////////////////////////////////////////////////////////////
Update::MvUpdateType Update::checkUpdateType(MVInfoForDML *mvInfo,
const QualifiedName &subjectTable,
UpdateColumns *updatedCols) const
{
MvUpdateType updateType = IRELEVANT;
mvInfo->initUsedObjectsHash(); // initialization for the searching
const MVUsedObjectInfo *mvUseInfo = mvInfo->findUsedInfoForTable(subjectTable);
const LIST (Lng32) &colsUsedByMv = mvUseInfo->getUsedColumnList();
for (CollIndex i = 0; i < colsUsedByMv.entries(); i++ )
{
// Check whether the column was updated in the current update operation.
// If so, this update is not IRELEVANT anymore. Otherwise, skip this column.
if (updatedCols->contains(colsUsedByMv[i]))
{
updateType = DIRECT;
}
else
{
continue;
}
// Check whether the column is an indirect-update column in the MJV, which
// means that updating it may not only affect the corresponding row in the
// MJV, but also affect other rows in it.
if (mvUseInfo->isIndirectUpdateCol(colsUsedByMv[i]))
{
updateType = INDIRECT;
break; // no further search is needed
}
}
return updateType;
}
//////////////////////////////////////////////////////////////////////////////
// This method does the insertion of a MVImmediate trigger in order to
// refresh the specified ON STATEMENT MJV after a delete operation.
//
// The refresh action is implemented as a row after trigger.
//
//////////////////////////////////////////////////////////////////////////////
void Delete::insertMvToTriggerList(BeforeAndAfterTriggers *list,
BindWA *bindWA,
CollHeap *heap,
const QualifiedName &mvName,
MVInfoForDML *mvInfo,
const QualifiedName &subjectTable,
UpdateColumns *updatedCols)
{
CMPASSERT(getOperatorType() == REL_UNARY_DELETE);
CorrName mvCorrName(mvName, heap);
// The namespace is set in order to allow special update directly on the MV
mvCorrName.setSpecialType(ExtendedQualName::MV_TABLE);
// Instansiate the apprpriate builder
MvRefreshBuilder *triggerBuilder = new(heap)
MjvImmDeleteBuilder(mvCorrName, mvInfo, this, bindWA);
// Instansiate the MVImmediate trigger
MVImmediate *mvTrigger = new(heap) MVImmediate(bindWA,
triggerBuilder,
mvName,
subjectTable,
COM_DELETE,
COM_ROW,
updatedCols);
// Register the trigger in the general list of triggers
list->addNewAfterRowTrigger(mvTrigger);
}
/*****************************************************************************
******************************************************************************
**** Inlining methods of the GenericUpdate class
******************************************************************************
*****************************************************************************/
// This method was originaly called
// setRETDescForTSJTree().
//
// For GenericUpdate Referential Integrity, Index Maintenance and Triggers
// (for IM, compare the createIM*() functions).
//
// Note that, if a RETDesc needs to be created, correlation names of
// "OLD@" and/or "NEW@" are given to the tables and columns.
// These names are safe to use because they contain a special non-Ansi
// character (the "@"), which is only accepted by Parser if a special
// "internal-only" flag is set as described for RI below.
//
// (Without the "@", the names "OLD" and "NEW" would NOT be safe to use:
// although they are in ReservedWords.h's PotentialAnsiReservedWords list,
// RI and IM could have a name collision ambiguity or misbinding due to a
// user table or index named "OLD" or "NEW" as a *delimited identifier*.
// Note also that since RI does need to use the Parser, we can't use
// CorrName::isFabricated()...)
//
// This function always sets gu's RETDesc to *non-empty*, signifying that
// this GenericUpdate produces outputs. This RETDesc is also context
// for our being called multiple times.
//
// Concatenate **internal** correlation names (used by Binder)
// and **external** Ansi delimited-identifier names (used by Parser only).
//
// RI will use the latter set of names when building parseable predicate text;
// i.e. it should pass them to RefConstraint::getPredicateText().
//
RETDesc *GenericUpdate::createOldAndNewCorrelationNames(BindWA *bindWA, NABoolean createRETDescOnly)
{
CMPASSERT(getOperatorType() == REL_UNARY_INSERT ||
getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE);
CMPASSERT(getRETDesc());
CMPASSERT(getRETDesc()->isEmpty());
RETDesc *rd;
if (getOperatorType() != REL_UNARY_DELETE)
{
// INSERT or UPDATE --
// GenericUpdate::bindNode has previously set its TableDesc to the
// desc whose column valueid's represent the new/source/after values.
// Put these into a new RETDesc with columns all named "NEW@.<colname>"
//
CorrName corrName(getTableDesc()->getCorrNameObj().getQualifiedNameObj(),
bindWA->wHeap(),
NEWCorr);
rd = new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc(), &corrName);
// IM: Add all columns in rd as local reference to current scope.
// This is needed if the current scope describes a view which selects
// a proper subset of all the basetable columns -- here we say that
// yes, this view can produce *all* the b.t.cols as outputs...
//
ValueIdList vidList;
rd->getValueIdList(vidList, USER_AND_SYSTEM_COLUMNS);
for (CollIndex i = 0; i < vidList.entries(); i++)
bindWA->getCurrentScope()->addLocalRef(vidList[i]);
// ##IM:## Having done the above for local refs, oughtn't we also now
// ##recompute inputs?
// ## bindWA->getCurrentScope()->mergeOuterRefs(bindWA->getCurrentScope()->getOuterRefs());
// ## gu->getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs());
}
else
{
// DELETE -- init an empty RETDesc, and next merge in "OLD@.<col>"'s
rd = new (bindWA->wHeap()) RETDesc(bindWA);
}
if ((getOperatorType() != REL_UNARY_INSERT) ||
getUpdateCKorUniqueIndexKey() ||
((getOperatorType() == REL_UNARY_INSERT) &&((Insert *)this)->isMerge()) ||
((getOperatorType() == REL_UNARY_INSERT) && ((Insert *)this)->isUpsert() && (CmpCommon::getDefault(TRAF_UPSERT_TO_EFF_TREE) == DF_ON )))
{
// DELETE or UPDATE --
// Now merge the old/target/before valueid's (the Scan child RETDesc)
// into this RETDesc such that these cols are all named "OLD@.<col>"
//
Scan *scan ;
if (getOperatorType() != REL_UNARY_INSERT)
scan = getScanNode();
else
scan = getLeftmostScanNode();
if ((getOperatorType() == REL_UNARY_INSERT) && ((Insert *)this)->isUpsert() && (CmpCommon::getDefault(TRAF_UPSERT_TO_EFF_TREE) == DF_ON ))
{
RelSequence *olapChild = getOlapChild();
CorrName corrName(getTableDesc()->getCorrNameObj().getQualifiedNameObj(),
bindWA->wHeap(),
OLDCorr);
// ColumnDescList *colList = (olapChild->getRETDesc())->getColumnList();
for (short i = 0; i< olapChild->getRETDesc()->getDegree();i++)
{
// we remembered if the original columns was from the right side of
// this olap node so add those to the RetDesc since those are the
//ones we want to delete from the dependent indexes.
if ((olapChild->getRETDesc()->getValueId(i)).getItemExpr()->origOpType() == ITM_INSTANTIATE_NULL)
{
rd->addColumn(bindWA,
ColRefName(olapChild->getRETDesc()->getColRefNameObj(i).getColName(), corrName),
olapChild->getRETDesc()->getValueId(i),
USER_COLUMN,
olapChild->getRETDesc()->getHeading(i));
}
}
rd->addColumns(bindWA, *olapChild->getRETDesc()->getSystemColumnList(), SYSTEM_COLUMN,&corrName);
}
else
{
CMPASSERT(scan);
CorrName corrName(scan->getTableDesc()->getCorrNameObj().getQualifiedNameObj(),
bindWA->wHeap(),
OLDCorr);
rd->addColumns(bindWA, *scan->getRETDesc(), &corrName);
}
}
Set_SqlParser_Flags(ALLOW_FUNNY_IDENTIFIER); // allow "@" processing
Set_SqlParser_Flags(DELAYED_RESET); // allow multiple parser calls.
CMPASSERT(!rd->isEmpty());
// we only need the RETDesc to use it later for the transformation. Don't change the
// RETDesc of the current operator or the current bind scope.
if (!createRETDescOnly)
{
delete getRETDesc(); // safe because empty
setRETDesc(rd);
bindWA->getCurrentScope()->setRETDesc(rd);
}
return rd;
} // GenericUpdate::createOldAndNewCorrelationNames()
//////////////////////////////////////////////////////////////////////////////
// Add a virtual column to the RETDesc of this GenericUpdate node.
// Currently used for the Unique Execute ID column (before triggers),
// and for MV logging: the current Epoch, row type and row count.
//////////////////////////////////////////////////////////////////////////////
ValueId GenericUpdate::addVirtualColumn(BindWA *bindWA,
ItemExpr *colExpr,
const char *colName,
CollHeap *heap)
{
RETDesc *retDesc = getRETDesc();
colExpr->bindNode(bindWA);
ValueId exprValueId = wrapWithCastExpr(bindWA, colExpr->getValueId(), heap);
ColRefName virtualColName(colName);
retDesc->addColumn(bindWA, virtualColName, exprValueId);
return exprValueId;
}
//////////////////////////////////////////////////////////////////////////////
// Inline the temp insert sub-tree directly into the backbone.
// The resulting tree looks like this:
// TSJ
// / \
// topNode RelRoot
// |
// LeafInsert
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::inlineTempInsert(RelExpr *topNode,
BindWA *bindWA,
TriggersTempTable& tempTableObj,
NABoolean isDrivenByBeforeTriggers,
NABoolean isTopMostTSJ,
CollHeap *heap)
{
// The new top node should be a left TSJ for after triggers,
// and a normal TSJ for before triggers.
OperatorTypeEnum JoinType =
( (isDrivenByBeforeTriggers || isTopMostTSJ)
? REL_TSJ
: REL_LEFT_TSJ);
if (isDrivenByBeforeTriggers)
tempTableObj.setBeforeTriggersExist();
RelExpr *insertNode = tempTableObj.buildInsert(!isDrivenByBeforeTriggers);
if (bindWA->errStatus())
return NULL;
RelRoot *rootNode = new (heap) RelRoot(insertNode);
rootNode->setRootFlag(FALSE);
rootNode->setEmptySelectList();
topNode = new(heap) Join(topNode, rootNode, JoinType);
topNode->getInliningInfo().setFlags(II_DrivingTempInsert |
II_SingleExecutionForTriggersTSJ |
II_AccessSetNeeded);
// Indicate to the Normalizer that this TSJ cannot be optimized away,
// and that if the write operation is implemented via a cursor then
// it cannot be a "flow" cursor operation - i.e. it cannot utilize
// a TSJFlow node.
// Genesis case #10-990116-7164
((Join *)topNode)->setTSJForWrite(TRUE);
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
((Join *)topNode)->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
((Join *)topNode)->setTSJForSetNFError(TRUE);
}
return topNode;
}
//////////////////////////////////////////////////////////////////////////////
// Do we need Index Maintenance on this GenericUpdate node?
// We do if there are indexes except the clustering index. In case this is
// an Update, at least one such index must match ant of the columns actually
// being updated.
//////////////////////////////////////////////////////////////////////////////
NABoolean GenericUpdate::isIMNeeded(UpdateColumns *updatedColumns)
{
NABoolean imNeeded = FALSE;
if (!getTableDesc()->hasSecondaryIndexes())
return FALSE;
const LIST(IndexDesc *) indexList = getTableDesc()->getIndexes();
for (CollIndex i=0; (i<indexList.entries()) && !imNeeded; i++)
{
IndexDesc *index = indexList[i];
// The base table itself is an index (the clustering index);
// obviously IM need not deal with it.
if (index->isClusteringIndex())
continue;
// An index always needs maintenance on an Insert or Delete...
if((getOperatorType() != REL_UNARY_UPDATE) ||
(isMerge()))
imNeeded = TRUE;
else
{
// This is Update - check if columns match.
CMPASSERT(updatedColumns!=NULL);
const ValueIdList &indexColumns = index->getIndexColumns();
for (CollIndex j=0; j < indexColumns.entries() && !imNeeded; j++)
{
Lng32 indexCol = indexColumns[j].getNAColumn()->getPosition();
if (updatedColumns->contains(indexCol))
{
imNeeded = TRUE;
break;
}
} // for k
} // else
} // for j
return imNeeded;
}
//////////////////////////////////////////////////////////////////////////////
// Inline Index Maintainance.
// We get here only if isIMNeeded() returned TRUE.
// The result looks like this:
// TSJ
// / \
// topNode IM
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::inlineIM(RelExpr *topNode,
BindWA *bindWA,
NABoolean isLastTSJ,
UpdateColumns *updatedColumns,
CollHeap *heap,
NABoolean useInternalSyskey,
NABoolean rowTriggersPresents)
{
// Create the tree that handles Index Maintainance.
RelExpr *imTree = createIMTree(bindWA, updatedColumns, useInternalSyskey);
// If no IM tree was created than we have a bug!
CMPASSERT(imTree != NULL);
if (bindWA->isTrafLoadPrep())
return imTree;
// Drive IM and RI trees using a TSJ on top of the GenericUpdate node.
topNode = new(bindWA->wHeap())
Join(topNode,
imTree,
(isLastTSJ ? REL_TSJ : REL_LEFT_TSJ));
topNode->getInliningInfo().setFlags(II_DrivingIM | II_AccessSetNeeded);
if (rowTriggersPresents)
topNode->getInliningInfo().setFlags(II_SingleExecutionForTriggersTSJ);
// Indicate to the Normalizer that this TSJ cannot be optimized away,
// and that if the write operation is implemented via a cursor then
// it cannot be a "flow" cursor operation - i.e. it cannot utilize
// a TSJFlow node.
// Genesis case #10-990116-7164
((Join *)topNode)->setTSJForWrite(TRUE);
return topNode;
}
//////////////////////////////////////////////////////////////////////////////
// Add to a trigger WHEN clause, the check if the trigger is enabled.
// Result condition ItemExpr tree looks like this:
//
// AND
// / \
// GetBitValueAt WHEN clause
// / \
// GetTriggerStatus TriggerIndex
//
//////////////////////////////////////////////////////////////////////////////
static ItemExpr *addCheckForTriggerEnabled(BindWA *bindWA,
ItemExpr *whenClause,
Trigger *triggerObj,
CollHeap *heap)
{
// Register the trigger timestamp in the list managed by bindWA. The
// returned value is the index into the trigger array for this RelExpr
CollIndex triggerIndex = bindWA->addTrigger(triggerObj->getTimeStamp());
CollIndex MaxTriggersPerStatement = MAX_TRIGGERS_PER_STATEMENT;
// debugging for coverage
#ifndef NDEBUG
char* env = getenv("TESTING_MAX_TRIGGERS_PER_STATEMENT");
if (env)
{
MaxTriggersPerStatement = atol(env);
}
#endif
if (triggerIndex >= MaxTriggersPerStatement)
{
// There are more than 256 triggers in this statement
*CmpCommon::diags() << DgSqlCode(-11001);
bindWA->setErrStatus();
return NULL;
}
ItemExpr *enableCheck = new(heap)
GetBitValueAt(new(heap) GetTriggersStatus(),
#pragma nowarn(1506) // warning elimination
new(heap) ConstValue(triggerIndex) );
#pragma warn(1506) // warning elimination
// Check if whenClause is empty or TRUE
if (whenClause == NULL || whenClause->getOperatorType() == ITM_RETURN_TRUE)
return enableCheck;
else {
/*
* for proper typing and transformation use correct AND subtree as follows:
* (AND (AND (NOT_NULL GBVA) (NOT_EQUALS GBVA 0)) whenClause)
* Here enableCheck is GBVA (GetBitValueAt)
*/
ItemExpr *notNullNode = new(heap) UnLogic (ITM_IS_NOT_NULL, enableCheck);
ItemExpr *constZeroNode = new(heap) ConstValue(0);
ItemExpr *notZeroNode = new(heap) BiRelat(ITM_NOT_EQUAL, enableCheck, constZeroNode);
enableCheck = new(heap) BiLogic(ITM_AND, notNullNode, notZeroNode);
return new(heap) BiLogic(ITM_AND, enableCheck, whenClause);
}
}
// auxiliary function that reutrns an expression that wraps a RaiseError
// built-in function.
static ItemExpr *addTrigActionExcept (Trigger *trigObj, CollHeap *heap)
{
ItemExpr *trigActionExceptExprPred =
new (heap) RaiseError(ComDiags_TrigActionExceptionSQLCODE,
trigObj->getTriggerName(),
trigObj->getSubjectTableName());
return trigActionExceptExprPred;
}
//////////////////////////////////////////////////////////////////////////////
// This function is only called for row triggers. Look for all update
// nodes in the action of the trigger and flag them as being in the action
// of a row trigger
//////////////////////////////////////////////////////////////////////////////
static void flagUpdateAsRowTrigger(RelExpr *topNode)
{
if (topNode)
{
if (topNode->getOperatorType() == REL_UNARY_UPDATE)
{
topNode->getInliningInfo().setFlags(II_InActionOfRowTrigger);
}
flagUpdateAsRowTrigger((RelExpr *)topNode->child(0));
flagUpdateAsRowTrigger((RelExpr *)topNode->child(1));
}
}
//////////////////////////////////////////////////////////////////////////////
// Statement triggers are blocked by an ordered union node, but then
// driven to execute in parallel, by connecting them using a left linear tree
// of union nodes. Row triggers are similarly attached to each other, but are
// connected to the trigger backbone as part of the "Pipelined actions".
// The result looks like this for three triggers:
// The trigger transformation code expects this trigger group to be
// in a left linear tree structure.
//
// Statement triggers: Row triggers:
// OU
// / \
// topNode U U
// / \ / \
// U ST3 U RT3
// / \ / \
// ST1 ST2 RT1 RT2
//////////////////////////////////////////////////////////////////////////////
static RelExpr *inlineTriggerGroup(RelExpr *topNode,
const TriggerList *triggers,
NABoolean isRow,
CollHeap *heap,
BindWA *bindWA)
{
RelExpr *topUnion = NULL;
if ((triggers == NULL) || (triggers->entries() == 0))
return topNode;
// Now do the rest, and connect them with Union nodes.
for (CollIndex i=0; i<triggers->entries(); i++)
{
// Get the Trigger object.
Trigger *current = (*triggers)[i];
// Get the trigger action tree.
RelExpr *triggerTree = current->getParsedTrigger(bindWA);
if (bindWA->errStatus())
return NULL;
triggerTree->getInliningInfo().setFlags(II_TriggerRoot);
triggerTree->getInliningInfo().setTriggerObject(current);
// The check whether the trigger is enabled is applicable only for regular
// triggers. ON STATEMENT MVs (MVImmediate triggers) are always enabled.
if ( !(current->isMVImmediate()) )
{
Union *triggerRoot = (Union *)triggerTree->getChild(0); // Get past the RelRoot
if (isRow)
triggerRoot = (Union *)triggerRoot->getChild(0); // Get past the RenameReference
if (triggerRoot != NULL && triggerRoot->getOperatorType() == REL_UNION)
{
// mark the update nodes as being in the action of a row trigger
// so IM can be set appropriately of this case to avoid a data
// corruption - refer to method createIMNodes for more info on this issue
flagUpdateAsRowTrigger((RelExpr *)triggerRoot->getChild(0));
ItemExpr *whenClause = triggerRoot->removeCondExprTree();
triggerRoot->addCondExprTree(
addCheckForTriggerEnabled(bindWA, whenClause, current, heap));
triggerRoot->addTrigExceptExprTree(
addTrigActionExcept(current, heap));
// if we are in a not atomic statement, set a flag in the unary_union node
// this flag will passed to the executor through the union tdb. During execution
// if condExpr() evaluates to truw and this flag is set, then error -30029 is raised.
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
triggerRoot->setInNotAtomicStatement();
}
}
}
if (i == 0) // topUnion is still NULL?
topUnion = triggerTree;
else
{
topUnion = new(heap) Union(topUnion, triggerTree, NULL, NULL, REL_UNION,
CmpCommon::statementHeap(), TRUE);
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
((Union *)topUnion)->setInNotAtomicStatement();
}
}
topUnion->getInliningInfo().setFlags(II_AccessSetNeeded);
}
if (isRow)
return topUnion;
else
{
Union *newOU = new(heap) Union(topNode, topUnion, NULL, NULL, REL_UNION,
CmpCommon::statementHeap(), TRUE);
newOU->setOrderedUnion();
newOU->setNoOutputs();
newOU->getInliningInfo().setFlags(II_DrivingStatementTrigger |
II_AccessSetNeeded);
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
newOU->setInNotAtomicStatement();
}
return newOU;
}
}
//////////////////////////////////////////////////////////////////////////////
// Create a dummy sub-tree as a place-holder for missing pipelined actions
// (RI or row triggers), in order to maintain a regular tree.
// Such dummy statements are later removed by the trigger transformation code.
//////////////////////////////////////////////////////////////////////////////
static RelExpr *createDummyStatement(CollHeap *heap)
{
Tuple *tupleNode = new(heap) Tuple(new(heap) ConstValue(0));
tupleNode->getInliningInfo().setFlags(II_DummyStatement);
RelRoot *result = new RelRoot(tupleNode);
result->setRootFlag(FALSE);
result->setEmptySelectList();
return result;
}
//////////////////////////////////////////////////////////////////////////////
// Inline the pipelined actions: RI and row triggers.
// The root node above the TSJ has an empty select-list, so that the Union
// node above it will accept it as compatible with it's other child. It also
// will not open a new BindScope when bound, so that when the binding will
// reach the already bound 'this' node - it will be in the same scope. This
// avoids problems when calculating inputs.
//
// The resulting tree looks like this:
// RelRoot
// |
// TSJ
// / \
// topNode U
// / \
// RI Row
// Triggers
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::inlinePipelinedActions(RelExpr *topNode,
BindWA *bindWA,
TriggerList *rowTriggers,
RefConstraintList *riList,
CollHeap *heap)
{
RelExpr *resultTree=topNode;
if ((rowTriggers != NULL) || (riList != NULL))
{
RelExpr *rowTriggersTree=NULL;
RelExpr *riTree=NULL;
// Create the tree that handles all row triggers
if (rowTriggers != NULL)
{
rowTriggersTree = inlineTriggerGroup(NULL,
rowTriggers,
TRUE,
heap,
bindWA);
}
else
{
rowTriggersTree = createDummyStatement(heap);
}
// Create the tree that handles Referencial Integrity.
if (riList != NULL)
riTree = inlineRI(bindWA, riList, heap);
else
riTree = createDummyStatement(heap);
resultTree = new(heap) Union(riTree, rowTriggersTree, NULL, NULL,
REL_UNION, CmpCommon::statementHeap(), TRUE);
resultTree->getInliningInfo().setFlags(II_AccessSetNeeded);
if (riList!=NULL)
resultTree->getInliningInfo().setFlags(II_DrivingRI);
if (rowTriggers != NULL)
resultTree->getInliningInfo().setFlags(II_DrivingRowTrigger);
// Put a RelRoot on top of the Union to avoid trashing the current scope.
resultTree = new(heap) RelRoot(resultTree);
((RelRoot *)resultTree)->setRootFlag(FALSE);
((RelRoot *)resultTree)->setEmptySelectList();
// Drive IM and RI trees using a TSJ on top of the GenericUpdate node.
NABoolean needsOutputs = isMtsStatement() ||
(getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_DELETE));
OperatorTypeEnum joinType = needsOutputs ? REL_ANTI_SEMITSJ : REL_TSJ;
resultTree = new(bindWA->wHeap()) Join(topNode, resultTree, joinType);
resultTree->getInliningInfo().setFlags(II_AccessSetNeeded);
resultTree->getInliningInfo().setFlags(II_DrivingPipelinedActions);
// disable parallele execution for TSJs that control row triggers
// execution. Parallel execution for triggers TSJ introduces the
// potential for non-deterministic execution
if (rowTriggers)
resultTree->getInliningInfo().setFlags(II_SingleExecutionForTriggersTSJ);
// Indicate to the Normalizer that this TSJ cannot be optimized away,
// and that if the write operation is implemented via a cursor then
// it cannot be a "flow" cursor operation - i.e. it cannot utilize
// a TSJFlow node.
// Genesis case #10-990116-7164
((Join *)resultTree)->setTSJForWrite(TRUE);
}
RelRoot *rootNode = new(heap) RelRoot(resultTree);
rootNode->setRootFlag(FALSE);
rootNode->setEmptySelectList();
rootNode->setDontOpenNewScope();
return rootNode;
} // GenericUpdate::inlinePipelinedActions
//////////////////////////////////////////////////////////////////////////////
// Create the sub-tree that deletes the affected set from the temporary table.
// The corresponding SQL text is
// DELETE FROM <temp table> WHERE <Uniquifier> = <Uniquifier value>;
//
// The result looks like this:
// OU
// / \
// topNode Delete
// |
// Scan (temp table)
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::inlineTempDelete(BindWA *bindWA,
RelExpr *topNode,
TriggersTempTable& tempTableObj,
CollHeap *heap)
{
RelExpr *deleteSubTree = tempTableObj.buildDelete();
Union *newOU = new(heap) Union(topNode, deleteSubTree, NULL, NULL,
REL_UNION,CmpCommon::statementHeap(),
TRUE);
newOU->setOrderedUnion();
newOU->getInliningInfo().setFlags(II_DrivingTempDelete |
II_AccessSetNeeded);
return newOU;
}
/*****************************************************************************
******************************************************************************
**** Methods for handling of before triggers.
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
// Find all the NEW@columns in the RETDesc of the GU that is about to be
// replaced by a "tentative execution" node. These columns point to columns
// in the IUD TableDesc that will soon be not valid anymore. The names NEW@.*
// should instead reference the appropriate expresions from the new record
// expression.
//////////////////////////////////////////////////////////////////////////////
void GenericUpdate::fixTentativeRETDesc(BindWA *bindWA, CollHeap *heap)
{
RETDesc *retDesc = getRETDesc();
// The name of the subject table should no longer be in the XTNM.
bindWA->getCurrentScope()->getXTNM()->remove(&getTableName());
// Create a "virtual column" called @EXECID for the value of the Unique
// Execute ID. All the before trigger SIGNAL expressions will be piggy-
// backed on this "column" before it is inserted into the temp table.
ItemExpr *execId = new(heap) UniqueExecuteId();
addVirtualColumn(bindWA, execId, InliningInfo::getExecIdVirtualColName(), heap);
if (getOperatorType() == REL_UNARY_DELETE)
return; // No NEW@ values for Delete.
CorrName newCorr(NEWCorr);
ColumnDescList newCols(heap);
ValueIdArray& newRecordExpr = newRecExprArray();
ValueId *foundAssignToCol;
// Get all the NEW@ columns from the GU's RETDesc.
newCols.insert(*(retDesc->getQualColumnList(newCorr)));
// The getQualColumnList() call does not return SYSKEY, so if it's there
// find it and add it to the list.
NAString syskeyName("SYSKEY", heap);
ColRefName newSyskeyName(syskeyName, newCorr, heap);
ColumnNameMap *newSyskeyMap = retDesc->findColumn(newSyskeyName);
if (newSyskeyMap != NULL)
{
ColumnDesc *newSyskey = newSyskeyMap->getColumnDesc();
// Make sure SYSKEY is not already in the list.
CMPASSERT(!newCols.contains(newSyskey));
newCols.insert(newSyskey);
}
for (CollIndex i=0; i<newCols.entries(); i++) // For each NEW@ column,
{
ValueId tempValueId;
foundAssignToCol = NULL;
const ColRefName& colName = newCols[i]->getColRefNameObj();
// Lookup the column name in the target of an Assign node in the
// new record expression of the GU.
for (CollIndex j=0; j<newRecordExpr.entries(); j++)
{
ItemExpr *currentExpr = newRecordExpr[j].getItemExpr();
CMPASSERT(currentExpr->getOperatorType() == ITM_ASSIGN);
Assign *currentAssign = (Assign *)currentExpr;
ItemExpr *target = currentAssign->getTarget().getItemExpr();
if (target->getOperatorType() == ITM_BASECOLUMN)
{
// Do the column names match?
const NAString& trgtCol = ((BaseColumn *)target)->getColName();
if (!trgtCol.compareTo(colName.getColName()))
{
// Save the ValueId of the Assign source expression.
tempValueId = currentAssign->getSource();
foundAssignToCol = &tempValueId;
// No need to check the rest of the new record expression.
break;
}
}
} // for each Assign in the newRecExpr
// Delete the current NEW@.* column from the RETDesc.
if (colName.getColName() == "SYSKEY")
{
retDesc->delColumn(bindWA, colName, SYSTEM_COLUMN);
}
else
{
retDesc->delColumn(bindWA, colName, USER_COLUMN);
}
// it is no longer a local reference in the current scope.
ValueId colValId = newCols[i]->getValueId();
bindWA->getCurrentScope()->removeLocalRef(colValId);
// Did we find a matching Assign expression?
if (foundAssignToCol != NULL)
{
ValueId newExpr = *foundAssignToCol;
// Yes - Make NEW@.* reference it.
if (getGroupAttr()->isCharacteristicInput(newExpr))
newExpr = wrapWithCastExpr(bindWA, newExpr, heap);
retDesc->addColumn(bindWA, colName, newExpr);
}
else
{
// No - this must be a column that is not SET into, in an Update node.
CMPASSERT (getOperatorType() == REL_UNARY_UPDATE);
// OK - just reference the appropriate OLD@.* value.
CorrName oldCorr(OLDCorr);
ColRefName oldColName(colName.getColName(), oldCorr);
ValueId oldCol = retDesc->findColumn(oldColName)->getValueId();
// Wrap it with a Cast expression to give it a different ValueId
// than the original OLD@.* column, thus marking it as a
// duplicate entry in the XCNM.
retDesc->addColumn(bindWA, colName, wrapWithCastExpr(bindWA, oldCol, heap));
}
} // for each NEW@ column
// The next block handles the value of the SYSKEY column in the
// temporary table for Insert operations. The problem is that the row
// is inserted into the temp table before it is inserted into the
// subject table, and so - before the actual SYSKEY value was
// generated for it. Since this column is part of the primary key of
// the temp table, and the rest of the primary key columns may be
// identical, this SYSKEY value must be unique for each row.
// Solution - give it the value of JulianTimestamp.
// The Timestamp ItemExpr is a new BuiltInFunction that is evaluated
// for each row.
if (getOperatorType() == REL_UNARY_INSERT)
{
// Does this RETDesc have a [email protected] column?
CorrName newCorr(NEWCorr);
ColRefName syskey("SYSKEY", newCorr);
if (retDesc->findColumn(syskey) != NULL)
{
// code added to make sure that the generated julian
// timestamp is unique. This change is added to support
// bulk inserts (including rowset inserts) when before
// row triggers are used.
ItemExpr* counterExpr, *incrementExpr;
counterExpr = new (heap) ItmPersistentExpressionVar(0);
incrementExpr = new (heap) ItmBlockFunction
(counterExpr, new (heap) Assign (counterExpr,
new (heap) BiArith(ITM_PLUS, counterExpr,
new (heap) ConstValue(1))));
// Synthesize the types and value IDs for the new items
incrementExpr->synthTypeAndValueId(TRUE);
// Construct a new JulianTimestamp expression
ItemExpr *fakeSyskey = new (heap)
BiArith(ITM_PLUS, incrementExpr, new (heap)
JulianTimestamp(new (heap) InternalTimestamp));
fakeSyskey->bindNode(bindWA);
// Make [email protected] point to the timestamp expression.
retDesc->delColumn(bindWA, syskey, SYSTEM_COLUMN);
retDesc->addColumn(bindWA, syskey, fakeSyskey->getValueId());
}
} // if REL_UNARY_INSERT
} // End of GenericUpdate::fixTentativeRETDesc()
//////////////////////////////////////////////////////////////////////////////
// For Update nodes, add the predicate: OLD@.<ci> = NEW@.<ci>
//////////////////////////////////////////////////////////////////////////////
void GenericUpdate::addPredicateOnClusteringKey(BindWA *bindWA,
RelExpr *tentativeNode,
CollHeap *heap)
{
TableDesc *newTableDesc = getTableDesc();
const IndexDesc *newCI = newTableDesc->getClusteringIndex();
const ValueIdList& newCICols = newCI->getIndexKey();
CorrName newCorrName(NEWCorr);
CorrName oldCorrName(OLDCorr);
ItemExpr *predicate = NULL;
for (CollIndex i=0; i<newCICols.entries(); i++)
{
ItemExpr *currentCol = newCICols[i].getItemExpr();
CMPASSERT(currentCol->getOperatorType() == ITM_INDEXCOLUMN);
IndexColumn *currentBaseCol = (IndexColumn *) currentCol;
const NAString& colName = currentBaseCol->getNAColumn()->getColName();
ColRefName *newColName = new(heap) ColRefName(colName, newCorrName, heap);
ColRefName *oldColName = new(heap) ColRefName(colName, oldCorrName, heap);
ItemExpr *currentPredicate = new(heap)
BiRelat(ITM_EQUAL,
new(heap) ColReference(newColName),
new(heap) ColReference(oldColName));
if (predicate == NULL)
predicate = currentPredicate;
else
predicate = new(heap) BiLogic(ITM_AND, predicate, currentPredicate);
}
CMPASSERT(predicate != NULL);
predicate->bindNode(bindWA);
if (bindWA->errStatus())
return;
tentativeNode->selectionPred().insert(predicate->getValueId());
}
//////////////////////////////////////////////////////////////////////////////
// This method replaces the GenericUpdate node that fired the triggers (this)
// with a "tentative execution" node.
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::createTentativeGU(BindWA *bindWA, CollHeap *heap)
{
// 'this' is the GU that started the whole mess. Let's kill it!
// Create a dummy Rename node.
RelExpr *tentativeNode = new(heap)
RenameTable(child(0), "TentativeGU");
// Fix the NEW@ cols to point to the right expressions.
fixTentativeRETDesc(bindWA, heap);
// Give it my RETDesc with all the NEW and OLD stuff
tentativeNode->setRETDesc(getRETDesc());
tentativeNode->setGroupAttr(getGroupAttr());
ColumnNameMap *execIdCol =
getRETDesc()->findColumn(InliningInfo::getExecIdVirtualColName());
CMPASSERT(execIdCol != NULL);
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
outputs.remove(execIdCol->getValueId());
tentativeNode->getGroupAttr()->addCharacteristicOutputs(outputs);
// For Update nodes, add the predicate: OLD@.<ci> = NEW@.<ci>s
if (getOperatorType() == REL_UNARY_UPDATE)
{
addPredicateOnClusteringKey(bindWA, tentativeNode, heap);
if (bindWA->errStatus())
return NULL;
}
tentativeNode->markAsBound(); // No more binding is needed here.
return tentativeNode;
}
//////////////////////////////////////////////////////////////////////////////
// Find the position of all columns SET to by this before Trigger, and add
// them to colToSet. This way, when we call TriggerDB for after triggers we
// will get triggers that fire on these columns too.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::addUpdatedColumns(UpdateColumns *colsToSet,
const NATable *naTable)
{
if (setList_ == NULL) // Does this trigger have a SET clause?
return;
// For each Assign expression, add the col position to ColsToSet.
for (CollIndex i=0; i<setList_->entries(); i++)
{
Lng32 targetColPosition = getTargetColumn(i, NULL, naTable);
CMPASSERT(targetColPosition != -1);
colsToSet->addColumn(targetColPosition);
}
}
//////////////////////////////////////////////////////////////////////////////
// This method is called only when before triggers exist, and it builds the
// left side of the inlining tree - the tentative execution tree.
// 1. Create the tentative GU node, that replaces this node.
// 2. Build a totem of before triggers on top of the tentative GU node.
// Columns updated by before triggers are added to the list of columns
// updated by the triggering statement, passed in colsToSet.
// 3. Inline the temp insert above the before triggers.
// 4. Add a RelRoot. This root has an empty select-list, so that the Union
// node above it will accept it as compatible with it's other child. It
// also will not open a new BindScope when bound, so that when the binding
// will reach the already bound tentative node - it will be in the same
// scope. This avoids problems when calculating inputs.
//
// The result looks like this, with 3 before triggers BT1, BT2 and BT3:
// (the triggers are already sorted by timestamp, BT1 is the oldest)
// RelRoot
// |
// TSJ
// / \
// BT3 Temp Insert
// |
// BT2
// |
// BT1
// |
// TentativeGU (replacing this)
// |
// this->child(0)
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::createTentativeSubTree(BindWA *bindWA,
TriggerList *beforeTriggers,
UpdateColumns *updatedColumns,
TriggersTempTable& tempTableObj,
CollHeap *heap)
{
const NATable *naTable = getTableDesc()->getNATable();
// Step 1.
RelExpr *topNode = createTentativeGU(bindWA, heap);
if (bindWA->errStatus())
return NULL;
// Step 2.
Trigger *current;
BeforeTrigger *triggerNode=0;
for (CollIndex i=0; i<beforeTriggers->entries(); i++)
{
current = (*beforeTriggers)[i];
triggerNode = (BeforeTrigger *)current->getParsedTrigger(bindWA);
if (bindWA->errStatus())
return this;
CMPASSERT(triggerNode->getOperatorType() == REL_BEFORE_TRIGGER);
// BeforeTrigger nodes are created childless,
// And the child is always added before binding it.
triggerNode->child(0) = topNode;
topNode = triggerNode;
if (updatedColumns != NULL) // Is this an UPDATE op?
{
// Add columns changed by the trigger to the columns changed
// by the original UPDATE operation.
triggerNode->addUpdatedColumns(updatedColumns, naTable);
}
ItemExpr *whenClause = triggerNode->getWhenClause();
whenClause = addCheckForTriggerEnabled(bindWA, whenClause, current, heap);
triggerNode->setWhenClause(whenClause);
}
// Step 3.
topNode = inlineTempInsert(topNode, bindWA, tempTableObj, TRUE, TRUE, heap);
if (bindWA->errStatus())
return NULL;
// Save a pointer to the TSJ for tansformNod().
CMPASSERT(triggerNode);
triggerNode->setParentTSJ(topNode);
// Step 4
RelRoot *rootNode = new(heap) RelRoot(topNode);
rootNode->setRootFlag(FALSE);
rootNode->setEmptySelectList();
rootNode->setDontOpenNewScope();
return rootNode;
}
//////////////////////////////////////////////////////////////////////////////
// The effective GU affects the rows in the affected set into the subject
// table itself. Since this operation is different in Insert, Update and
// Delete, I implemented it as a virtual method, that is implemented by the
// child classes only.
//////////////////////////////////////////////////////////////////////////////
// we are not supposed to get here
// LCOV_EXCL_START
RelExpr *GenericUpdate::createEffectiveGU(BindWA *bindWA,
CollHeap *heap,
TriggersTempTable& tempTableObj,
GenericUpdate **effectiveGUNode,
UpdateColumns *colsToSet)
{
CMPASSERT(FALSE); // Not supposed to get here !!!
return NULL;
}
// LCOV_EXCL_STOP
//////////////////////////////////////////////////////////////////////////////
// Create an Insert node that inserts the NEW@ values into the subject table.
//////////////////////////////////////////////////////////////////////////////
RelExpr *Insert::createEffectiveGU(BindWA *bindWA,
CollHeap *heap,
TriggersTempTable& tempTableObj,
GenericUpdate **effectiveGUNode,
UpdateColumns *colsToSet)
{
// Create the Scan on the temporary table.
Scan *tempScanNode =
tempTableObj.buildScan(ChangesTable::INSERTED_ROWS);
CorrName& tempTable = tempScanNode->getTableName();
ItemExpr *selectList = tempTableObj.buildBaseColsSelectList(tempTable);
// Build a RelRoot on top of the Scan node.
RelRoot *rootNode = new(heap) RelRoot(tempScanNode, REL_ROOT, selectList);
rootNode->setRootFlag(FALSE);
// Create an Insert node above the RelRoot.
// The newRecExpr will be created during binding if the Insert node.
GenericUpdate *gu = new(heap)
Insert(getTableName(), NULL, REL_UNARY_INSERT, rootNode);
// If this GU is an action of a trigger - don't count rows affected.
gu->rowsAffected() = rowsAffected();
gu->getInliningInfo().setFlags(II_EffectiveGU);
*effectiveGUNode = gu;
return gu;
}
//////////////////////////////////////////////////////////////////////////////
// Create an Update node that updates rows in the subject table according to
// primary key, and sets them to the NEW@ values.
// Normally, the Update node itself does not have any outputs, because the
// OLD and NEW values are taken from the temp table. However, when MV
// logging is needed, the Update node must project the CurrentEpoch column.
//////////////////////////////////////////////////////////////////////////////
RelExpr *Update::createEffectiveGU(BindWA *bindWA,
CollHeap *heap,
TriggersTempTable& tempTableObj,
GenericUpdate **effectiveGUNode,
UpdateColumns *colsToSet)
{
ItemExpr *assignList=NULL;
Assign *assignExpr=NULL;
CorrName newCorrName(NEWCorr);
NABoolean mvLoggingRequired = isMvLoggingRequired();
// Get the columns of the subject table.
const NAColumnArray &subjectColumns =
getTableDesc()->getNATable()->getNAColumnArray();
for (CollIndex i=0; i<subjectColumns.entries(); i++)
{
// If this column was not SET into, no need to change it.
#pragma nowarn(1506) // warning elimination
if (!colsToSet->contains(i))
#pragma warn(1506) // warning elimination
continue;
#pragma nowarn(1506) // warning elimination
NAColumn *currentColumn = subjectColumns.getColumn(i);
#pragma warn(1506) // warning elimination
const NAString &colName = currentColumn->getColName();
// Cannot update a clustering/primary key column!
// This error is caught during binding in DDL.
// see BeforeTrigger::bindSetClause() in BindRelExpr.cpp
CMPASSERT(!currentColumn->isClusteringKey() && !currentColumn->isPrimaryKey());
NAString tempColName(colName);
assignExpr = new(heap)
Assign(new(heap) ColReference(
new(heap) ColRefName(colName)),
new(heap) ColReference(new(heap) ColRefName(tempColName, newCorrName)),
FALSE);
if (assignList==NULL)
assignList = assignExpr;
else
assignList = new(heap) ItemList(assignExpr, assignList);
}
// The selection predicate on the Scan is on the NEW@ clustering index cols.
// The left side of the equation is using the "@SYSKEY" column while the right
// side uses the SYSKEY column.
NAString newName(NEWCorr);
ItemExpr *selectionPredicate = new(heap)
BiRelat(ITM_EQUAL,
tempTableObj.buildClusteringIndexVector(&newName,TRUE),
tempTableObj.buildClusteringIndexVector() );
Scan *baseScanNode = new(heap) Scan(getTableName());
baseScanNode->addSelPredTree(selectionPredicate);
GenericUpdate *effectiveGu = new(heap)
Update(getTableName(), NULL, REL_UNARY_UPDATE, baseScanNode, assignList);
// If this GU is an action of a trigger - don't count rows affected.
effectiveGu->rowsAffected() = rowsAffected();
effectiveGu->getInliningInfo().setFlags(II_EffectiveGU);
RelRoot *effectiveRoot = new(heap) RelRoot(effectiveGu);
if (!mvLoggingRequired)
effectiveRoot->setEmptySelectList();
RelExpr *joinTemps = tempTableObj.buildOldAndNewJoin();
OperatorTypeEnum joinType = mvLoggingRequired ? REL_TSJ : REL_LEFT_TSJ;
Join *tsjNode = new(heap) Join(joinTemps, effectiveRoot, joinType);
tsjNode->setTSJForWrite(TRUE);
*effectiveGUNode = effectiveGu;
return tsjNode;
}
//////////////////////////////////////////////////////////////////////////////
// Create a Delete node that deletes rows from the subject table according
// to the primary from the row read by the temp Scan node.
//////////////////////////////////////////////////////////////////////////////
RelExpr *Delete::createEffectiveGU(BindWA *bindWA,
CollHeap *heap,
TriggersTempTable& tempTableObj,
GenericUpdate **effectiveGUNode,
UpdateColumns *colsToSet)
{
// Create the Scan on the temporary table.
Scan *tempScanNode = tempTableObj.buildScan(ChangesTable::DELETED_ROWS);
Delete *gu = new(heap)
Delete(getTableName(), NULL, REL_UNARY_DELETE, tempScanNode);
// If this GU is an action of a trigger - don't count rows affected.
gu->rowsAffected() = rowsAffected();
gu->getInliningInfo().setFlags(II_EffectiveGU);
*effectiveGUNode = gu;
return gu;
}
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::bindEffectiveGU(BindWA *bindWA)
{
if (getInliningInfo().hasPipelinedActions())
{
setNoFlow(TRUE);
if (getOperatorType() != REL_UNARY_UPDATE)
{
createOldAndNewCorrelationNames(bindWA);
}
if (isMvLoggingRequired())
{
prepareForMvLogging(bindWA, bindWA->wHeap());
}
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
setPotentialOutputValues(outputs);
}
// case of before triggers and after statement triggers where the after triggers are in conflict
// and the effective GU is either an insert or a delete.
else if (getOperatorType() != REL_UNARY_UPDATE)
{
RETDesc *rd = createOldAndNewCorrelationNames(bindWA, TRUE /* only create RETDesc */);
getInliningInfo().buildTriggerBindInfo(bindWA, rd, bindWA->wHeap());
delete rd;
}
// indicate that this is a subject table for enable/disable
getOptStoi()->getStoi()->setSubjectTable(TRUE);
return this;
}
/*****************************************************************************
******************************************************************************
**** Methods for handling Index Maintenance.
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
// See comments in common/ComTransInfo.h
//////////////////////////////////////////////////////////////////////////////
static void setScanLockForIM(const RelExpr *re)
{
if (re->getOperator().match(REL_SCAN)) {
Scan *rs = (Scan *)re;
rs->accessOptions().setScanLockForIM(TRUE);
}
for (Int32 i = 0; i < re->getArity(); ++i ) {
if (re->child(i))
setScanLockForIM(re->child(i));
}
}
// All table info in these createIM*() methods must come from the TableDesc.
// In particular, use of getTableName() is wrong: that is the name of the
// topmost view if the target table is a view. The TableDesc always represents
// the underlying *base* table.
//
RelExpr *GenericUpdate::createIMTree(BindWA *bindWA,
UpdateColumns *updatedColumns,
NABoolean useInternalSyskey)
{
RelExpr *imTree = NULL;
NAString origCorr(getTableDesc()->getCorrNameObj().getCorrNameAsString(),
bindWA->wHeap());
const LIST(IndexDesc *) indexList = getTableDesc()->getIndexes();
for (CollIndex i=0; i < indexList.entries(); i++) {
IndexDesc *index = indexList[i];
// The base table itself is an index (the clustering index);
// obviously IM need not deal with it.
//
if (!index->isClusteringIndex()) {
// An index always needs maintenance on an Insert or Delete...
//
NABoolean imNeeded =
((getOperatorType() != REL_UNARY_UPDATE) ||
(isMerge()));
// ...but for an Update, it needs maint if it contains any of the columns
// being updated. The test for intersection must use column position
// (which is always the position in the *base table*, so comparisons
// *are* meaningful!). Someday should be abstracted into a shared ##
// method also used by ItemConstr.h ColSignature stuff... ##
//
if (!imNeeded) {
const ValueIdList &indexColumns = index->getIndexColumns();
for (CollIndex j=0; j < indexColumns.entries() && !imNeeded; j++) {
Lng32 indexCol = indexColumns[j].getNAColumn()->getPosition();
if (updatedColumns != NULL) // -- Triggers
imNeeded |= updatedColumns->contains(indexCol);
else {
for (CollIndex k=0; k < newRecExprArray().entries(); k++) {
Lng32 tableCol = newRecExprArray()[k].getItemExpr()->child(0).
getNAColumn()->getPosition();
if (indexCol <= tableCol) {
if (indexCol == tableCol) imNeeded = TRUE;
break; // newRecExprArray is ordered by position, so break if <=
}
} // for k
} // else
} // for j
#ifndef NDEBUG
if (imNeeded && GU_DEBUG)
cerr << "imNeeded: " << index->getNAFileSet()->getExtFileSetName()
<< endl;
#endif
} // Update, need to test whether IM is needed for this index
if (imNeeded)
if (!imTree)
{
imTree = createIMNodes(bindWA, useInternalSyskey, index);
if (getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE)
setScanLockForIM(child(0));
if (bindWA->isTrafLoadPrep())
imTree->setChild(0,this);
}
else
{
if (!bindWA->isTrafLoadPrep())
{
imTree = new (bindWA->wHeap())
Union(imTree, createIMNodes(bindWA, useInternalSyskey, index),
NULL, NULL, REL_UNION, CmpCommon::statementHeap(), TRUE, TRUE);
imTree->setBlockStmt(isinBlockStmt());
imTree->getInliningInfo().setFlags(II_isIMUnion);
} // not bulk load
else {
RelExpr * oldIMTree = imTree;
imTree = createIMNodes(bindWA, useInternalSyskey, index);
imTree->setChild(0,oldIMTree);
} // is bulk load
}
} // !clusteringIndex
} // loop over all indexes
// ##IM: This extra RelRoot is probably unnecessary (wasteful),
// ## due to createIMNode*() always returning a RelRoot-topped tree --
// ## but I didn't have time to remove it and re-test.
if (imTree && imTree->getOperatorType() != REL_ROOT)
imTree = new (bindWA->wHeap()) RelRoot(imTree);
getTableDesc()->getCorrNameObj().setCorrName(origCorr);
return imTree;
} // GenericUpdate::createIMTree()
// Here, we make virgin ColReferences, which when bound will be found
// in an outer scope engendered by our (and/or GenericUpdate's) interposing
// a RelRoot between us and our parent GenericUpdate --
// and the LeafXxx will add these outer refs to its characteristic inputs.
static RelExpr *createIMNode(BindWA *bindWA,
CorrName &tableCorrName,
const CorrName &indexCorrName,
IndexDesc *index,
NABoolean isIMInsert,
NABoolean useInternalSyskey,
NABoolean isForUpdate,
NABoolean isForMerge,
NABoolean isEffUpsert)
{
// See createOldAndNewCorrelationNames() for info on OLDCorr/NEWCorr
// A merge statement can perform an update or an insert if a matching row
// is detected or not, correspondingly. In either case, a update operation is
// performed on the index table. If a unique index is involved, a update
// operation would be able to delete a corresponding row in the index table
// using the index "key" column before inserting a new row. If it is a insert
// operation on the base table, then a update on the index table should not
// delete a row that corresponds to a different row in the base table(index
// key belonging to a different row). Hence in this case, it is better to always
// match not only the index key but also remaining columns in the index table
// that correspond to the base table. Hence we introduce
// robustDelete below. This flag could also be called
// isIMOnAUniqueIndexForMerge
NABoolean robustDelete = (isForMerge && index->isUniqueIndex()) || (isEffUpsert && index->isUniqueIndex());
tableCorrName.setCorrName(isIMInsert ? NEWCorr : OLDCorr);
ItemExprList *colRefList = new(bindWA->wHeap()) ItemExprList(bindWA->wHeap());
const ValueIdList &indexColVids = ((isIMInsert || robustDelete )?
index->getIndexColumns() :
index->getIndexKey());
ItemExpr *preCond = NULL; // pre-condition for delete, insert if any
for (CollIndex i=0; i < indexColVids.entries(); i++) {
const NAString &colName = indexColVids[i].getNAColumn()->getColName();
NAString realColName = colName;
if (useInternalSyskey && colName == "SYSKEY")
{
realColName = "@SYSKEY";
}
ColReference *colRef =
new (bindWA->wHeap()) ColReference
(new (bindWA->wHeap()) ColRefName (realColName, tableCorrName, bindWA->wHeap()));
colRefList->insert(colRef);
}
// There are 4 cases here. Following table shows when precondition
// expression is addded
// Index Type/IM operation-> Delete | Insert
// Non-unique Index Yes No
// Unique Index Yes Yes
if ((!isIMInsert && isForUpdate)||robustDelete )
{
// For delete nodes that are part of an update, generate a
// comparison expression between old and new index column values
// and suppress the delete if no columns change. This avoids the
// situation where we delete and then re-insert the same index
// row within one millisecond and get the same HBase timestamp
// value assigned. In that case, the delete will win out over
// the insert, even though the insert happens later in time. The
// HBase-trx folks are also working on a change to avoid that.
// similar checks are added for unique index insert (into the index
// table only, using the robustDelete flag above).
// Since we do checkandput for unique indexes, putting
// an existing row (key + value) will raise a uniqueness violation,
// while from the user's point of view no change in the uninque index
// table is expected.
CorrName predValues(tableCorrName);
if (isIMInsert)
predValues.setCorrName(OLDCorr);
else
predValues.setCorrName(NEWCorr);
for (CollIndex cc=0; cc<colRefList->entries(); cc++)
{
ColReference *predColRef = static_cast<ColReference *>
((*colRefList)[cc]);
BiRelat *comp1Col = NULL;
// create a predicate OLD@.<col> = NEW@.<col>
comp1Col = new (bindWA->wHeap())
BiRelat(ITM_EQUAL,
predColRef,
new (bindWA->wHeap()) ColReference(
new (bindWA->wHeap()) ColRefName(
predColRef->getColRefNameObj().getColName(),
predValues,
bindWA->wHeap())),
TRUE); // special NULLs, treat NULL == NULL as true
if (preCond == NULL)
preCond = comp1Col;
else
preCond = new (bindWA->wHeap()) BiLogic(ITM_AND, preCond, comp1Col);
}
// the actual condition is that the values are NOT the same
preCond = new (bindWA->wHeap()) UnLogic(ITM_NOT, preCond);
}
// NULL tableDesc here, like all Insert/Update/Delete ctors in SqlParser,
// because the LeafXxx::bindNode will call GenericUpdate::bindNode
// which will do the appropriate createTableDesc.
GenericUpdate *imNode;
if (isIMInsert)
{
if (!bindWA->isTrafLoadPrep())
{
imNode = new (bindWA->wHeap())
LeafInsert(indexCorrName, NULL, colRefList, REL_LEAF_INSERT,
preCond, bindWA->wHeap());
HostArraysWA * arrayWA = bindWA->getHostArraysArea() ;
if (arrayWA && arrayWA->hasHostArraysInTuple()) {
if (arrayWA->getTolerateNonFatalError() == TRUE)
imNode->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
}
// For index maintenance on non-unique HBase indexes we can use a put.
// In fact we must use a put, otherwise we'll get a uniqueness constraint
// violation for those rows that didn't change and therefore didn't get
// deleted, due to the precondition (see below).
if (!index->isUniqueIndex())
imNode->setNoCheck(TRUE);
} // regular insert
else {
imNode = new (bindWA->wHeap()) Insert(indexCorrName,
NULL,
REL_UNARY_INSERT,
NULL);
((Insert *)imNode)->setBaseColRefs(colRefList);
((Insert *)imNode)->setInsertType(Insert::UPSERT_LOAD);
((Insert *)imNode)->setIsTrafLoadPrep(true);
((Insert *)imNode)->setNoIMneeded(TRUE);
} // traf load prep
}
else
{
imNode = new (bindWA->wHeap()) LeafDelete(indexCorrName,
NULL,
colRefList,
(robustDelete )?TRUE:FALSE,
REL_LEAF_DELETE,
preCond,
bindWA->wHeap());
}
// The base table's rowsAffected() will get set in ImplRule.cpp,
// but we don't want any of these indexes' rowsAffected to be computed
// (if I insert one row into a table, I want to see "1 row(s) inserted",
// not 1 + number of indexes being maintained!).
imNode->rowsAffected() = GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED;
// Do not collect STOI info for security checks.
imNode->getInliningInfo().setFlags(II_AvoidSecurityChecks);
// Set the flag that this GU is part of IM
imNode->getInliningInfo().setFlags(II_isIMGU);
if (bindWA->isTrafLoadPrep())
return imNode;
// Add a root here to prevent error 4056 when binding the LeafDelete+Insert
// pair for an Update.
return new (bindWA->wHeap()) RelRoot(imNode);
} // static createIMNode()
RelExpr *GenericUpdate::createIMNodes(BindWA *bindWA,
NABoolean useInternalSyskey,
IndexDesc *index)
{
// We call getExtFileSetObj (returns QualifiedName),
// NOT getExtFileSetName (returns NAString),
// hence the CorrName ctor sets up entire QualifiedName,
// NOT just an (erroneously delimited) objectName part of one.
//
CorrName indexCorrName(index->getNAFileSet()->getExtFileSetObj());
indexCorrName.setSpecialType(ExtendedQualName::INDEX_TABLE);
CorrName &tableCorrName = getTableDesc()->getCorrNameObj();
if(tableCorrName.isVolatile())
indexCorrName.setIsVolatile(TRUE);
RelExpr *indexInsert = NULL, *indexDelete = NULL, *indexOp = NULL;
NABoolean isForUpdate = (getOperatorType() == REL_UNARY_UPDATE ||
isMergeUpdate());
NABoolean isEffUpsert = ((CmpCommon::getDefault(TRAF_UPSERT_TO_EFF_TREE) == DF_ON ) && (getOperatorType() == REL_UNARY_INSERT && ((Insert*)this)->isUpsert()));
if (indexCorrName.getUgivenName().isNull())
{
indexCorrName.setUgivenName(tableCorrName.getUgivenName());
}
// Create a list of base columns ColReferences for
// ALL the index columns as AFTER/NEW columns.
//
if (getOperatorType() == REL_UNARY_INSERT ||
getOperatorType() == REL_UNARY_UPDATE || isEffUpsert)
indexInsert = indexOp = createIMNode(bindWA,
tableCorrName,
indexCorrName,
index,
TRUE,
useInternalSyskey,
isForUpdate,
isMerge(),
isEffUpsert);
// Create a list of base columns ColReferences for
// ONLY the index KEY columns as BEFORE/OLD columns.
//
if (getOperatorType() == REL_UNARY_DELETE ||
getOperatorType() == REL_UNARY_UPDATE ||
isEffUpsert)
indexDelete = indexOp = createIMNode(bindWA,
tableCorrName, indexCorrName,
index,
FALSE,
useInternalSyskey,
isForUpdate,
isMerge(),
isEffUpsert);
if ((getOperatorType() == REL_UNARY_UPDATE) || isEffUpsert){
indexOp = new (bindWA->wHeap()) Union(indexDelete, indexInsert,
NULL, NULL, REL_UNION,
CmpCommon::statementHeap(),TRUE,TRUE);
// is this in a compound statement?
indexOp->setBlockStmt(isinBlockStmt());
// is this GU driven by a row trigger
// this a temporary fix to prevent data corruption when
// the update operation is in the action of a row
// trigger. The data corruption is the result of the
// insert side of the IM tree lagging behind the delete
// in the case where multiple request to update the same rows
// are flowing to this update node.
// This is also the case when updates are being driven
// by rowsets.
// The fix is to unconditionally block the ordered union
// to handle all cases of IM updates.
// Note that this may cause performance issues. Improving
// the performance is an RFE at the moment.
//if (this->getInliningInfo().isInActionOfRowTrigger() ||
// bindWA->getHostArraysArea())
//{
((Union *)indexOp)->setBlockedUnion();
//}
//else
//{
// ((Union *)indexOp)->setOrderedUnion();
//}
// Add a root just to be consistent, so all returns from this method
// are topped with a RelRoot.
// Set this Union is part of IM
indexOp->getInliningInfo().setFlags(II_isIMUnion);
indexOp = new (bindWA->wHeap()) RelRoot(indexOp);
}
return indexOp;
} // GenericUpdate::createIMNodes()
/*****************************************************************************
******************************************************************************
**** Methods for handling Undo (for an insert) .
******************************************************************************
*****************************************************************************/
// All table info in these createUndo*() methods must come from the TableDesc.
// In particular, use of getTableName() is wrong: that is the name of the
// topmost view if the target table is a view. The TableDesc always represents
// the underlying *base* table.
//
RelExpr *GenericUpdate::createUndoTree(BindWA *bindWA,
UpdateColumns *updatedColumns,
NABoolean useInternalSyskey,
NABoolean imOrRiPresent,
NABoolean ormvPresent,
TriggersTempTable *tempTableObj)
{
RelExpr *undoTree = NULL;
NAString origCorr(getTableDesc()->getCorrNameObj().getCorrNameAsString(),
bindWA->wHeap());
const LIST(IndexDesc *) indexList = getTableDesc()->getIndexes();
NABoolean undoNeeded = getOperatorType() == REL_UNARY_INSERT;
if (!undoNeeded)
return NULL;
if (imOrRiPresent)
{
for (CollIndex i=0; i < indexList.entries(); i++)
{
IndexDesc *index = indexList[i];
if (undoNeeded)
if (!undoTree)
undoTree = createUndoNodes(bindWA, useInternalSyskey, index);
else
{
undoTree = new (bindWA->wHeap())
Union(undoTree, createUndoNodes(bindWA, useInternalSyskey, index),
NULL, NULL, REL_UNION, CmpCommon::statementHeap(), TRUE, TRUE);
undoTree->setBlockStmt(isinBlockStmt());
}
} // loop over all indexes
}
if (ormvPresent)
{
if (!undoTree)
undoTree = createUndoIUDLog(bindWA);
else
undoTree = new (bindWA->wHeap())
Union(undoTree, createUndoIUDLog(bindWA),
NULL, NULL, REL_UNION, CmpCommon::statementHeap(), TRUE, TRUE);
}
if (tempTableObj)
{
if (!undoTree)
undoTree = createUndoTempTable(tempTableObj,bindWA);
else
undoTree = new (bindWA->wHeap())
Union(undoTree, createUndoTempTable(tempTableObj,bindWA),
NULL, NULL, REL_UNION, CmpCommon::statementHeap(), TRUE, TRUE);
}
// ##IM: This extra RelRoot is probably unnecessary (wasteful),
// ## due to createIMNode*() always returning a RelRoot-topped tree --
// ## but I didn't have time to remove it and re-test.
if (undoTree && undoTree->getOperatorType() != REL_ROOT)
undoTree = new (bindWA->wHeap()) RelRoot(undoTree);
getTableDesc()->getCorrNameObj().setCorrName(origCorr);
return undoTree;
} // GenericUpdate::createUndoTree()
RelExpr * GenericUpdate::createUndoTempTable(TriggersTempTable *tempTableObj,BindWA *bindWA)
{
// TriggersTempTable *tempTableObj = new(bindWA->wHeap()) TriggersTempTable(this, bindWA);
const NAColumnArray &tempColumns = tempTableObj->getNaTable()->getNAColumnArray();
ItemExprList *tempColRefList = new(bindWA->wHeap()) ItemExprList(bindWA->wHeap());
/* CorrName tempCorrName = *(tempTableObj->calcTargetTableName(tempTableObj->getSubjectTableName().getQualifiedNameObj()));*/
CorrName tempCorrName = *(tempTableObj->getTableName());
CorrName origTempCorrName = tempCorrName;
tempCorrName.setCorrName( NEWCorr);
for (CollIndex i=0; i<tempColumns.entries(); i++)
{
#pragma nowarn(1506) // warning elimination
NAString tempColName(tempColumns.getColumn(i)->getColName());
#pragma warn(1506) // warning elimination
ColReference *tempColRef = new(bindWA->wHeap())
ColReference(new(bindWA->wHeap()) ColRefName(tempColName, tempCorrName));
tempColRefList->insert(tempColRef);
}
RelExpr *delTemp = new (bindWA->wHeap()) LeafDelete(origTempCorrName, NULL,
tempColRefList,FALSE);
((LeafDelete *)delTemp)->setTrigTemp(tempTableObj);
((GenericUpdate *)delTemp)->rowsAffected() = GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED;
return new (bindWA->wHeap()) RelRoot(delTemp);
}
// Here, we make virgin ColReferences, which when bound will be found
// in an outer scope engendered by our (and/or GenericUpdate's) interposing
// a RelRoot between us and our parent GenericUpdate --
// and the LeafXxx will add these outer refs to its characteristic inputs.
static RelExpr *createUndoNode(BindWA *bindWA,
CorrName &tableCorrName,
const CorrName &indexCorrName,
IndexDesc *index,
NABoolean useInternalSyskey)
{
// See createOldAndNewCorrelationNames() for info on OLDCorr/NEWCorr
CorrName btCorrName = tableCorrName;
tableCorrName.setCorrName( NEWCorr);
ItemExprList *colRefList = new(bindWA->wHeap()) ItemExprList(bindWA->wHeap());
const ValueIdList &indexColVids =
(index->isUniqueIndex()? index->getIndexColumns(): index->getIndexKey());
for (CollIndex i=0; i < indexColVids.entries(); i++) {
const NAString &colName = indexColVids[i].getNAColumn()->getColName();
NAString realColName = colName;
if (useInternalSyskey && colName == "SYSKEY")
{
realColName = "@SYSKEY";
}
ColReference *colRef =
new (bindWA->wHeap()) ColReference
(new (bindWA->wHeap()) ColRefName (realColName, tableCorrName, bindWA->wHeap()));
colRefList->insert(colRef);
}
// NULL tableDesc here, like all Insert/Update/Delete ctors in SqlParser,
// because the LeafXxx::bindNode will call GenericUpdate::bindNode
// which will do the appropriate createTableDesc.
GenericUpdate *delIndex;
if (index->isClusteringIndex())
delIndex = new (bindWA->wHeap()) LeafDelete(btCorrName, NULL,
colRefList);
else
delIndex = new (bindWA->wHeap()) LeafDelete(indexCorrName, NULL
,colRefList
,index->isUniqueIndex());
// The base table's rowsAffected() will get set in ImplRule.cpp,
// but we don't want any of these indexes' rowsAffected to be computed
// (if I insert one row into a table, I want to see "1 row(s) inserted",
// not 1 + number of indexes being maintained!).
delIndex->rowsAffected() = GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED;
// Do not collect STOI info for security checks.
delIndex->getInliningInfo().setFlags(II_AvoidSecurityChecks);
// Add a root here to prevent error 4056 when binding the LeafDelete+Insert
// pair for an Update.
return new (bindWA->wHeap()) RelRoot(delIndex);
return NULL;
} // static createUndoNode()
RelExpr *GenericUpdate::createUndoNodes(BindWA *bindWA,
NABoolean useInternalSyskey,
IndexDesc *index)
{
// We call getExtFileSetObj (returns QualifiedName),
// NOT getExtFileSetName (returns NAString),
// hence the CorrName ctor sets up entire QualifiedName,
// NOT just an (erroneously delimited) objectName part of one.
//
CorrName indexCorrName(index->getNAFileSet()->getExtFileSetObj());
indexCorrName.setSpecialType(ExtendedQualName::INDEX_TABLE);
CorrName &tableCorrName = getTableDesc()->getCorrNameObj();
RelExpr *undoInsert = NULL;
if (indexCorrName.getUgivenName().isNull())
{
indexCorrName.setUgivenName(tableCorrName.getUgivenName());
}
if(tableCorrName.isVolatile())
indexCorrName.setIsVolatile(TRUE);
// Create a list of base columns ColReferences for
// ALL the index columns as AFTER/NEW columns.
//
undoInsert = createUndoNode(bindWA,
tableCorrName,
indexCorrName,
index,
useInternalSyskey);
return undoInsert;
} // GenericUpdate::createUndoNodes()
/*****************************************************************************
******************************************************************************
**** Methods for handling undo from IUD log
******************************************************************************
*****************************************************************************/
RelExpr *GenericUpdate::createUndoIUDLog(BindWA *bindWA)
{
MvIudLog logTableObj(this, bindWA);
RelExpr *undoIUDNode = logTableObj.buildInsert(TRUE,
ChangesTable::ALL_ROWS,
TRUE,
TRUE);
return undoIUDNode;
}
/*****************************************************************************
******************************************************************************
**** Methods for handling RI constraints
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
RefConstraintList *GenericUpdate::getRIs(BindWA *bindWA,
const NATable *naTable)
{
RefConstraintList *allRIConstraints = new(bindWA->wHeap())
RefConstraintList(bindWA->wHeap());
if ((getOperatorType() == REL_UNARY_INSERT) ||
(getOperatorType()== REL_UNARY_UPDATE))
naTable->getRefConstraints().getRefConstraints(bindWA,
newRecExpr(),
*allRIConstraints);
if ((getOperatorType() == REL_UNARY_DELETE) ||
(getOperatorType()== REL_UNARY_UPDATE))
naTable->getUniqueConstraints().getRefConstraints(bindWA,
newRecExpr(),
*allRIConstraints);
CollIndex numConstraints = allRIConstraints->entries();
for(CollIndex index = 0; index < numConstraints; index++)
{
if(NOT (allRIConstraints->at(index)->getIsEnforced()))
{
allRIConstraints->removeAt(index);
numConstraints--;
index--;
}
}
if(allRIConstraints->isEmpty())
{
delete allRIConstraints;
allRIConstraints = NULL;
}
return allRIConstraints;
}
//////////////////////////////////////////////////////////////////////////////
// Given the columns being updated, do we need to enforce this RI constraint?
// Check if there is any matching columns in both the RI key columns and
// in the set of columns being updated.
//////////////////////////////////////////////////////////////////////////////
NABoolean RefConstraint::isRINeededForUpdatedColumns(UpdateColumns *UpdatedColumns)
{
// UpdatedColumns is NULL for INSERT and DELETE. Columns should be matched
// for UPDATE operations only.
if (NOT getIsEnforced())
return FALSE;
if (UpdatedColumns == NULL)
return TRUE;
NABoolean isAReferencingConstraint = isaForeignKeyinTableBeingUpdated()
&& getIsEnforced();
const KeyColumns *riColumns;
if (isAReferencingConstraint)
riColumns = &keyColumns();
else
riColumns = uniqueConstraintReferencedByMe_.keyColumns_;
for (CollIndex i=0; i<riColumns->entries(); i++)
{
if (UpdatedColumns->contains(riColumns->at(i)->getPosition()))
return TRUE;
}
return FALSE;
}
//////////////////////////////////////////////////////////////////////////////
// Return a list of only the RI constraints needed when updating the
// specified columns.
//////////////////////////////////////////////////////////////////////////////
RefConstraintList *RefConstraintList::getNeededRIs(UpdateColumns *updatedColumns,
CollHeap *heap)
{
// Insert into neededRIs only the constraints that match the columns
// being updated.
RefConstraintList *neededRIs = new(heap) RefConstraintList(heap);
for (CollIndex i=0; i<entries(); i++)
if (at(i)->isRINeededForUpdatedColumns(updatedColumns))
neededRIs->insert(at(i));
return neededRIs;
}
//////////////////////////////////////////////////////////////////////////////
// The tree below with a refConstraint after its bound.
// TSJ
// / \
// Insert RelRoot
// | |
// Tuple GroupByAgg
// |
// Scan
//
// This function creates the rightSubtree of the TSJ node.
// Step 1. create the scan node with the predicate.
// Step 2. Create the GroupBy Node with the aggregate expression.
// Step 3. create the RelRoot node.
//////////////////////////////////////////////////////////////////////////////
RelExpr* GenericUpdate::createRISubtree(BindWA *bindWA,
const NATable *naTable,
const RefConstraint& refConstraint,
CollHeap *heap)
{
RelExpr *newScan = NULL;
NAString constraintName(bindWA->wHeap());
NAString tableName(bindWA->wHeap());
Parser parser(bindWA->currentCmpContext());
NABoolean isReferencingConstraint =
(refConstraint.isaForeignKeyinTableBeingUpdated() &&
refConstraint.getIsEnforced());
// Step 1: Create a scan node with predicate given by the refConstraint.
const QualifiedName parentQualName = refConstraint.getOtherTableName();
NAString scanPredicateTxt;
CorrName corrName(naTable->getTableName(), heap,
(isReferencingConstraint ? NEWAnsi : OLDAnsi));
NAString corrNameString = corrName.getCorrNameAsString();
refConstraint.getPredicateText(scanPredicateTxt, &corrNameString);
constraintName = refConstraint.getConstraintName().getQualifiedNameAsAnsiString();
tableName = naTable->getTableName().getQualifiedNameAsAnsiString();
// Create the Scan node.
newScan = new (heap) Scan (CorrName(parentQualName));
ItemExpr *newScanPredicate = parser.getItemExprTree
((char *)scanPredicateTxt.data());
newScan->addSelPredTree(newScanPredicate);
((Scan *)newScan)->accessOptions().accessType() = REPEATABLE_;
// Do not collect STOI info for security checks.
newScan->getInliningInfo().setFlags(II_AvoidSecurityChecks);
// Step 2: Create GroupBy Node.
// Create a selection predicate and attach it to the
// GroupBy Node. in the
// case
// |
// IfThenElse
// ________|__________
// | | |
// | | |
// | | |
// OR False RaiseError
// / \
// / \
// OneTrue (FK1 IS NULL
// | OR
// = FK2 IS NULL..)
// / \
// 1 1
ItemExpr *aggSelPredicate = parser.getItemExprTree ("1 = 1");
ItemExpr *newAggExpr = NULL;
// For inserts we have to see if a value exists in the referenced table
// and for deletes we have to check that the deleted value does not exist
// in the referencing table. Update is a delete and an insert, so we
// have to check both.
// You can tell whether to use "EXISTS" or "NOT EXISTS" by looking at
// RefConstraint::isaForeignKeyinTableBeingUpdated
if (isReferencingConstraint)
{
// According to ANSI SQL99 (4.17.2), RI constraint is satisfied if one of the
// following conditions is true, depending on the <match option> specified in the
// <referential constraint definition>:
// If no < match type> was specified then, for each row R1 of the referencing
// table, either at least one of the values of the referencing columns in
// R1 shall be a null value, or the value of each referencing column in R1 shall
// be equal to the value of the corresponding referenced column in
// some row of the referenced table.
// The MX 2.0 does not support <match type>, so it would be equivalent to
// not specifying the <match type>.
// This MatchOptionPredicate of the form (fk1 IS NULL or fk2 IS NULL)
// is added to let the FKs with NULL values pass the RI constraint.
// Also note that this MatchOptionPredicate is evaluated in the GroupBy
// instead of Scan node. This is because when this predicate is added
// to the row value constructor in the Scan node, the Optimizer fails to
// recognise it as a key predicate, hence affecting performance.
NAString matchOptionPredicateTxt;
refConstraint.getMatchOptionPredicateText(matchOptionPredicateTxt,
&corrNameString);
ItemExpr *matchOptionPred = parser.getItemExprTree
((char *)matchOptionPredicateTxt.data());
newAggExpr = new (heap) BiLogic(ITM_OR,
new (heap)
Aggregate(ITM_ONE_TRUE, aggSelPredicate),
matchOptionPred
);
}
else
{
newAggExpr = new (heap)
UnLogic(ITM_NOT, new (heap) Aggregate(ITM_ONE_TRUE, aggSelPredicate));
}
ItemExpr *grbySelectionPred = new (heap)
Case(NULL,
new (heap)
IfThenElse(newAggExpr,
new (heap) BoolVal(ITM_RETURN_FALSE),
new (heap)
RaiseError((Lng32)EXE_RI_CONSTRAINT_VIOLATION, constraintName,
tableName)));
// Create a GroupBy on the newScan, and attach the new case as "having" predicate.
RelExpr * newGrby = new(heap)
GroupByAgg(newScan, REL_GROUPBY);
newGrby->addSelPredTree(grbySelectionPred);
// Create the Root Node.
RelExpr *newRoot = new (heap)
RelRoot(newGrby,
REPEATABLE_,
SHARE_);
((RelRoot *)newRoot)->setEmptySelectList();
return newRoot;
} // createRISubtree()
//////////////////////////////////////////////////////////////////////////////
// Given the ConstraintList, this function creates a RI subtree and returns
// the root of the subtree to the caller.
//////////////////////////////////////////////////////////////////////////////
RelExpr* GenericUpdate::inlineRI (BindWA *bindWA,
const RefConstraintList *refConstraints,
CollHeap *heap)
{
Int32 entries = 0;
RelExpr *riSubtree = NULL;
const NATable *naTable = getTableDesc()->getNATable();
CMPASSERT (!refConstraints->isEmpty())
#pragma nowarn(1506) // warning elimination
if ((entries = refConstraints->entries()))
#pragma warn(1506) // warning elimination
{
riSubtree = createRISubtree(bindWA, naTable, *(refConstraints->at(0)), heap);
for (Int32 i=1; i < entries; i++)
{
riSubtree = new(heap)
Union(createRISubtree(bindWA, naTable, *(refConstraints->at(i)), heap),
riSubtree, NULL, NULL, REL_UNION, CmpCommon::statementHeap(), TRUE);
} //end of for
}
CMPASSERT(riSubtree);
// Create the Root Node, only if the "riSubtree" is not a RelRoot.
// "riSubtree" will be a relroot when there is only one RI constraint.
// Avoding duplicate roots.
if (riSubtree->getOperatorType() != REL_ROOT) {
riSubtree = new (heap)
RelRoot(riSubtree,
REPEATABLE_,
SHARE_);
((RelRoot *)riSubtree)->setEmptySelectList();
}
riSubtree->getInliningInfo().setFlags(II_ActionOfRI);
return riSubtree;
} // inlineRI.
/*****************************************************************************
******************************************************************************
**** Methods for handling MV Logging
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
// Logging should NOT be done in the following cases:
// 1. DELETE FROM MV
// 2. Update/Delete on INSERTLOG table.
// 3. NOLOG option specified
// 4. Pipelined refresh.
// 5. Recompute.
// In the first 3 cases, the table should be marked as inconsistent, and
// getIsInsertLog() returns INCONSISTENT_NOLOG.
// In the cases of pipelined refresh and recompute, logging is
// disabled, but the table is NOT marked as inconsistent (CONSISTENT_NOLOG).
// P.S. Marking the table as inconsistent is not implemented yet.
//////////////////////////////////////////////////////////////////////////////
NABoolean GenericUpdate::isMvLoggingRequired()
{
const ComMvAttributeBitmap& bitmap =
getTableDesc()->getNATable()->getMvAttributeBitmap();
if (!bitmap.getLoggingRequired())
{
// Just in case the user used NOLOG when logging is not required. We don't
// want to mark the table as inconsistent.
isNoLogOperation_ = NORMAL_LOGGING;
return FALSE;
}
#if 0
// A set loggingRequired flag does not mean the MVs on this table
// are initialized. If all the ON REQUEST MVs using this table
// are not initialized, no logging is required.
// This is commented out until we update the redefinition timestamp
// of the base table when we initialize the MV.
const UsingMvInfoList& mvsUsingMe = getTableDesc()->getNATable()->getMvsUsingMe();
NABoolean foundInitializedMVs = FALSE;
CollIndex maxEntries = mvsUsingMe.entries();
for (CollIndex i=0; i<maxEntries; i++)
{
const UsingMvInfo* mv = mvsUsingMe[i];
// Ignore MVs that are not ON REQUEST.
if ( mv->getRefreshType() == COM_ON_REQUEST &&
mv->isInitialized() )
{
// The MV is initialized.
// No need to continue the loop - logging is required.
foundInitializedMVs = TRUE;
break;
}
}
if (foundInitializedMVs == FALSE)
{
// No initialized ON REQUEST MVs were found, so no reason to log.
// Do not cache this query, to avoid breaking such statements
// after the MV has been initialized.
setNonCacheable();
return FALSE;
}
#endif
// Check for Update/Delete on an INSERTLOG table
if (getOperatorType() != REL_UNARY_INSERT &&
bitmap.getIsInsertLog() )
{
setNoLogOperation(FALSE); // This is an inconsistent operation.
}
// Marking the table as inconsistent is not implemented yet,
// therefore the INCONSISTENT_NOLOG mode is not considered logging.
// When implemented, it will mean writing the CurrentEpoch to the UMD
// table, and will be handled just like logging.
return (isNoLogOperation() == NORMAL_LOGGING);
//return (isNoLogOperation() == NORMAL_LOGGING ||
// isNoLogOperation() == INCONSISTENT_NOLOG);
}
//////////////////////////////////////////////////////////////////////////////
// Add to the RETDesc the virtual columns needed for MV logging.
//////////////////////////////////////////////////////////////////////////////
void GenericUpdate::prepareForMvLogging(BindWA *bindWA,
CollHeap *heap)
{
// Create a "virtual column" called @CURRENT_EPOCH for the CurrentEpoch
// function. This function must be evaluated on the GU, and pipelined to
// the Log Insert node.
ItemExpr *currEpoch = new(heap) GenericUpdateOutputFunction(ITM_CURRENTEPOCH);
ValueId epochId =
addVirtualColumn(bindWA, currEpoch, InliningInfo::getEpochVirtualColName(), heap);
ValueId rowTypeId, rowCountId;
if (getOperatorType() == REL_UNARY_INSERT &&
getTableDesc()->getNATable()->getMvAttributeBitmap().getAutomaticRangeLoggingRequired())
{
// LCOV_EXCL_START
// dead code, range logging is not supported
ItemExpr *rowType = new(heap) GenericUpdateOutputFunction(ITM_VSBBROWTYPE);
ItemExpr *rowCount = new(heap) GenericUpdateOutputFunction(ITM_VSBBROWCOUNT);
rowTypeId =
addVirtualColumn(bindWA, rowType, InliningInfo::getRowTypeVirtualColName(), heap);
rowCountId =
addVirtualColumn(bindWA, rowCount, InliningInfo::getRowCountVirtualColName(), heap);
// LCOV_EXCL_STOP
}
ItemExpr *tsOutExpr = new (heap)
GenericUpdateOutputFunction(ITM_JULIANTIMESTAMP,
1,
new (heap) InternalTimestamp);
ValueId tsId = addVirtualColumn(bindWA,
tsOutExpr,
InliningInfo::getMvLogTsColName(),
heap);
ValueIdSet potentialOutputs;
getPotentialOutputValues(potentialOutputs);
potentialOutputs += epochId;
potentialOutputs += tsOutExpr->getValueId();
setPotentialOutputValues(potentialOutputs);
getInliningInfo().setFlags(II_isMVLoggingInlined);
// for push down
if ( ((getOperatorType() == REL_UNARY_UPDATE) && CmpCommon::getDefault(MV_LOG_PUSH_DOWN_DP2_UPDATE) == DF_ON) ||
((getOperatorType() == REL_UNARY_DELETE) && CmpCommon::getDefault(MV_LOG_PUSH_DOWN_DP2_DELETE) == DF_ON) ||
((getOperatorType() == REL_UNARY_INSERT) && CmpCommon::getDefault(MV_LOG_PUSH_DOWN_DP2_INSERT) == DF_ON) )
getInliningInfo().setFlags(II_isUsedForMvLogging);
}
//////////////////////////////////////////////////////////////////////////////
// Insert the OLD and NEW values into the MV IUD Log
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::createMvLogInsert(BindWA *bindWA,
CollHeap *heap,
UpdateColumns *updatedColumns,
NABoolean projectMidRangeRows)
{
MvIudLog logTableObj(this, bindWA);
logTableObj.setUpdatedColumns(updatedColumns);
RelExpr *insertNode = logTableObj.buildInsert(TRUE,
ChangesTable::ALL_ROWS,
FALSE,
TRUE);
if (bindWA->errStatus())
return NULL;
prepareForMvLogging(bindWA, heap);
if (projectMidRangeRows)
{
// Set the flag on this node.
// This flag should be set even when range logging is off.
getInliningInfo().setFlags(II_ProjectMidRangeRows);
}
RelExpr *topNode = insertNode;
if (logTableObj.needsRangeLogging() && projectMidRangeRows)
{
// LCOV_EXCL_START
// dead code, range logging is not supported
RelRoot *rootNode = new (heap) RelRoot(insertNode);
rootNode->setEmptySelectList();
ItemExpr *noIgnoreCondition = new(heap)
BiRelat(ITM_NOT_EQUAL,
new(heap) ConstValue(ComMvRowType_MidRange),
new(heap) ColReference(new(heap)
ColRefName(InliningInfo::getRowTypeVirtualColName())) );
// for the "else" of the 'when clause'
ItemExpr *noOpArg = new (heap) ConstValue(0);
RelExpr *noOp = new (heap) Tuple(noOpArg);
RelRoot *noOpRoot = new (heap) RelRoot(noOp);
noOpRoot->setEmptySelectList();
Union *ifNode = new(heap) Union
(rootNode, noOpRoot, NULL, noIgnoreCondition, REL_UNION,
CmpCommon::statementHeap(), TRUE);
ifNode->setCondUnary();
topNode = ifNode;
// LCOV_EXCL_STOP
}
RelRoot *rootNode = new (heap) RelRoot(topNode);
rootNode->setEmptySelectList();
rootNode->getInliningInfo().setFlags(II_ActionOfRI);
return rootNode;
}
// This helper function is called only from GenericUpdate::handleInlining and GenericUpdate::getTriggeredMVs
// It checks if we are compiling a NOT ATOMIC statement as raises the appropriate
// error/warning. A warning is raised for ODBC and the statement will be compiled as an
// ATOMIC statement. This method retuns TRUE if an error is raised and returns FALSE otherwise
NABoolean GenericUpdate::checkForNotAtomicStatement(BindWA *bindWA, Lng32 sqlcode, NAString objname, NAString tabname)
{
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError()) {
if (CmpCommon::getDefault(ODBC_PROCESS) != DF_ON) {
*CmpCommon::diags() << DgSqlCode(-sqlcode)
<< DgString0(objname)
<< DgString1(tabname);
bindWA->setErrStatus();
return TRUE;
}
else {
bindWA->getHostArraysArea()->setTolerateNonFatalError(FALSE);
setTolerateNonFatalError(RelExpr::UNSPECIFIED_);
*CmpCommon::diags() << DgSqlCode(sqlcode)
<< DgString0(objname)
<< DgString1(tabname);
}
}
return FALSE ;
}
/*****************************************************************************
******************************************************************************
**** Methods for handling ON STATEMENT MVs
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
// This method inserts for each ON STATEMENT MV, that is affected by the
// action at the IUD node, a MVImmediate trigger(s) to the list of triggers to
// be fired on the subject table. These MVImmediate triggers are responsible for
// refreshing the MVs.
//
// The algorithm is as follows:
//
// 1. get list of MVs defined on the subject table
// 2. for each MV in the list do
// 2.1 ensure refresh type is "ON STATEMENT" - if not, skip MV
// 2.2 verify that the MV is initialized - if not, skip MV
// 2.3 add MV to the list of triggers (call insertMvToTriggerList)
//
// For now, only ON STATEMENT MJVs are supported!!!
//
//////////////////////////////////////////////////////////////////////////////
BeforeAndAfterTriggers *
GenericUpdate::getTriggeredMvs(BindWA *bindWA,
BeforeAndAfterTriggers *list,
UpdateColumns *updatedColumns)
{
CollHeap *heap = bindWA->wHeap();
const NATable *subjectNA = tabId_->getNATable();
CMPASSERT(subjectNA != NULL);
const UsingMvInfoList &mvList = subjectNA->getMvsUsingMe();
if (mvList.isEmpty())
{
// No MVs are to be refreshed - return the given list as is
return list;
}
else { //check for any non-atomic statements
const PartitioningFunction *partFunc =
subjectNA->getClusteringIndex()->getPartitioningFunction();
for (CollIndex i = 0; i < mvList.entries(); i++)
{
if ((CmpCommon::getDefault(NAR_DEPOBJ_ENABLE2) == DF_OFF) ||
((partFunc->isARangePartitioningFunction())))
{
if (checkForNotAtomicStatement(bindWA,30033,
(mvList[i]->getMvName()).getQualifiedNameAsAnsiString(),
(subjectNA->getTableName()).getQualifiedNameAsAnsiString()))
{
return list;
}
}
if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) {
*CmpCommon::diags() << DgSqlCode(-3232)
<< DgString0((subjectNA->getTableName()).getQualifiedNameAsAnsiString())
<< DgString1("Materialized View :")
<< DgString2((mvList[i]->getMvName()).getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return list ;
}
}
}
// If the given list of triggers is still empty, allocate a new one for the
// new MVImmediate trigger(s) that might be added
BeforeAndAfterTriggers *newList = list;
if (newList == NULL)
{
newList = new(heap)
BeforeAndAfterTriggers(NULL, NULL, NULL, subjectNA->getRedefTime());
}
// Search for MVs that should be refreshed
for (CollIndex i = 0; i < mvList.entries(); i++)
{
// If MV is not ON STATEMENT - skip it
if (mvList[i]->getRefreshType() != COM_ON_STATEMENT)
{
continue;
}
// If MV not intialized - skip it
if (!mvList[i]->isInitialized())
{
continue;
}
// Retreive NATable of the MV. Return on error.
CorrName mvCorr = CorrName(mvList[i]->getMvName(), heap);
NATable *naTableMv = bindWA->getNATable(mvCorr);
if (bindWA->errStatus())
{
return list;
}
// Retreive MVInfo for the MV. Return on error.
MVInfoForDML *mvInfo = naTableMv->getMVInfo(bindWA);
if (mvInfo == NULL)
{
return list;
}
// For now, only ON STATMENET MJVs are supported!
if (mvInfo->getMVType() != COM_MJV)
{
continue;
}
if (bindWA->getTopRoot() != NULL)
bindWA->getTopRoot()->setContainsOnStatementMV(TRUE);
// The MV have passed all the general pre-conditions. Do some specific
// checks and if it passes, add it to the triggers list.
insertMvToTriggerList(newList,
bindWA,
heap,
mvList[i]->getMvName(),
mvInfo,
getTableName().getQualifiedNameObj(),
updatedColumns);
}
if (newList->entries() == 0)
{
//"newList" will be freed by statementHeap
// add code annotation to prevent Coverity RESORUCE_LEAK checking error
// coverity[leaked_storage]
return NULL; // the list doesn't contain any triggers
}
// If there are only immediate MVs but not triggers, and this is an
// embedded IUD statement, abort with an error.
if (list == NULL &&
(getGroupAttr()->isEmbeddedUpdateOrDelete() || getGroupAttr()->isEmbeddedInsert()))
{
*CmpCommon::diags() << DgSqlCode(-12118);
bindWA->setErrStatus();
return NULL;
}
return newList; // return the updated list (including added MVs, if any)
}
//////////////////////////////////////////////////////////////////////////////
// This method does the actual insertion of the MVImmediate trigger to the
// list of triggers to be fired. Implemented only for derived classes!
//
//////////////////////////////////////////////////////////////////////////////
// we are not supposed to get here
// LCOV_EXCL_START
void GenericUpdate::insertMvToTriggerList(BeforeAndAfterTriggers *list,
BindWA *bindWA,
CollHeap *heap,
const QualifiedName &mvName,
MVInfoForDML *mvInfo,
const QualifiedName &subjectTable,
UpdateColumns *updateCols)
{
CMPASSERT(false); // not implemented in GenericUpdate
}
// LCOV_EXCL_STOP
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::inlineOnlyRIandIMandMVLogging(BindWA *bindWA,
RelExpr *topNode,
NABoolean needIM,
RefConstraintList *riConstraints,
NABoolean isMVLoggingRequired,
UpdateColumns *columns,
CollHeap *heap)
{
RelExpr *imTree = NULL;
RelExpr *undoTree = NULL;
RelExpr *riTree = NULL;
RelExpr *mvTree = NULL;
RelExpr *result = NULL;
if (needIM || (riConstraints!=NULL) || isMVLoggingRequired)
{
if (topNode->getFirstNRows() >= 0)
{
// create a firstN node to delete N rows.
FirstN * firstn = new(bindWA->wHeap())
FirstN(topNode, topNode->getFirstNRows());
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
topNode->setFirstNRows(-1);
topNode = firstn;
}
}
// Create the tree that handles Index Maintainance.
if (needIM)
{
// here we don't use the internal syskey column name ("@SYSKEY") since the
// whole backbone is driven by the IUD node on the subject table
imTree = createIMTree(bindWA, columns, FALSE);
}
// Create the tree that handles RI
if (riConstraints!=NULL)
riTree = inlineRI(bindWA, riConstraints, heap);
// Create the tree for MV Logging.
if (isMVLoggingRequired)
{
// When RI/IM/Triggers are not inlined, we can skip the projection of
// the rows that are not Single/BeginRange/EndRange.
NABoolean projectMidRangeRows = TRUE;
if (imTree==NULL && riTree==NULL)
projectMidRangeRows = FALSE;
mvTree = createMvLogInsert(bindWA, heap, columns, projectMidRangeRows);
if (mvTree != NULL)
{
// Use REL_SEMITSJ if it should not project any outputs
// to the IM or RI trees.
OperatorTypeEnum
joinType = (imTree || riTree) ? REL_LEFT_TSJ : REL_TSJ;
NABoolean needsOutputs = isMtsStatement() ||
(getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_DELETE));
if (needsOutputs && joinType == REL_TSJ)
joinType = REL_ANTI_SEMITSJ;
// joinType = (imTree || riTree) ? REL_ANTI_SEMITSJ : REL_SEMITSJ;
Join *logTSJ = new(heap) Join (topNode, mvTree, joinType);
logTSJ->getInliningInfo().setFlags(II_DrivingMvLogInsert);
logTSJ->setTSJForWrite(TRUE);
if ( ((getOperatorType() == REL_UNARY_UPDATE) && CmpCommon::getDefault(MV_LOG_PUSH_DOWN_DP2_UPDATE) != DF_ON) ||
((getOperatorType() == REL_UNARY_DELETE) && CmpCommon::getDefault(MV_LOG_PUSH_DOWN_DP2_DELETE) != DF_ON) ||
((getOperatorType() == REL_UNARY_INSERT) && CmpCommon::getDefault(MV_LOG_PUSH_DOWN_DP2_INSERT) != DF_ON) )
logTSJ->setAllowPushDown (FALSE);
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->getTolerateNonFatalError())
logTSJ->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
topNode = logTSJ;
}
}
result = imTree;
if (riTree != NULL)
{
if (result == NULL)
{
result = riTree;
}
else
{
result = new (heap) Union(imTree, riTree, NULL, NULL, REL_UNION,
CmpCommon::statementHeap(), TRUE);
}
}
// For NAR , generate an undo tree as well
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->getTolerateNonFatalError() && (imTree || riTree))
undoTree = createUndoTree(bindWA,columns,FALSE,(imTree||riTree),isMVLoggingRequired,NULL);
if (undoTree && result)
{
OperatorTypeEnum joinType= REL_TSJ;
Join * joinResultUndo = new (heap) Join(result,undoTree,joinType);
joinResultUndo->setTSJForWrite(TRUE);
joinResultUndo->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
joinResultUndo->setTSJForUndo(TRUE);
result = joinResultUndo;
}
if (bindWA->isTrafLoadPrep())
return result ;
if (result!=NULL)
{
// This RelRoot opens a new BindScope, so that Union::bindChildern()
// will not overwrite the RETDesc of the current scope with the NEW
// and OLD values.
RelRoot *rootNode = new(heap) RelRoot(result);
rootNode->setRootFlag(FALSE);
rootNode->setEmptySelectList();
OperatorTypeEnum joinOp;
if ((topNode->child(0).getGroupAttr()->getEmbeddedIUD()) ||
isMtsStatement() || // This is an embedded IUD statement
// (i.e. an IUD statement that has an outer select)
(getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_DELETE)))
{
// originally index maintenance was using a TSJ joining the
// tuples to be deleted from the base table with the tuples to
// be deleted in the invidual indices. When returning tuples to
// the user this causes the tuples to be deleted multiplied by
// the number of indices. Therefore we now use an ANTI_SEMITSJ
// which does not require LeafDeletes to return tuples...i.e. it
// only return the leaf tuple if nothing is returned from the right
// child...this is what we want.
joinOp = REL_ANTI_SEMITSJ;
}
else
{
joinOp = REL_TSJ;
}
Join *newTSJ = new(heap) Join (topNode, rootNode, joinOp);
topNode = newTSJ;
}
((Join *)topNode)->setTSJForWrite(TRUE);
if (isMerge())
{
((Join *)topNode)->setTSJForMerge(TRUE);
if (isMergeUpdate())
{
if (((MergeUpdate*)this)->insertValues())
((Join *)topNode)->setTSJForMergeWithInsert(TRUE);
}
else
{
if (((MergeDelete*)this)->insertValues())
((Join *)topNode)->setTSJForMergeWithInsert(TRUE);
}
}
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
((Join *)topNode)->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
((Join *)topNode)->setTSJForSetNFError(TRUE);
}
return topNode;
}
/*****************************************************************************
******************************************************************************
**** The "main" methods of inlining.
**** Build the trigger backbone and handle all the special cases.
******************************************************************************
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::inlineAfterOnlyBackbone(BindWA *bindWA,
TriggersTempTable& tempTableObj,
TriggerList *rowTriggers,
TriggerList *stmtTriggers,
RefConstraintList *riConstraints,
NABoolean needIM,
NABoolean isMVLoggingRequired,
UpdateColumns *updatedColumns,
CollHeap *heap)
{
RelExpr *topNode = this;
if (topNode->getFirstNRows() > 0)
{
// create a firstN node to delete N rows.
FirstN * firstn = new(bindWA->wHeap())
FirstN(topNode, topNode->getFirstNRows());
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
topNode->setFirstNRows(-1);
topNode = firstn;
}
NABoolean noPipelinedActions =
(rowTriggers == NULL) && (riConstraints == NULL);
NABoolean rowTriggersPresent = (rowTriggers != NULL);
// First inline MV logging, so it will be pushed to DP2 with the IUD.
if (isMVLoggingRequired)
{
RelExpr *mvTree = createMvLogInsert(bindWA, heap, updatedColumns, TRUE);
if (mvTree != NULL)
{
Join *logTSJ = new(heap) Join(topNode, mvTree, REL_LEFT_TSJ);
logTSJ->setTSJForWrite(TRUE);
logTSJ->getInliningInfo().setFlags(II_DrivingMvLogInsert);
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->getTolerateNonFatalError())
logTSJ->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
topNode = logTSJ;
}
}
// Next inline Index Maintainance
if (needIM)
{
// here we don't use the internal syskey column name ("@SYSKEY") since the
// whole backbone is driven by the IUD node on the subject table
topNode = inlineIM(topNode, bindWA, FALSE, updatedColumns, heap, FALSE, rowTriggersPresent);
}
// Next inline the temp Insert
topNode = inlineTempInsert(topNode,
bindWA,
tempTableObj,
FALSE,
noPipelinedActions,
heap);
if (bindWA->errStatus())
return NULL;
Insert *pTempInsert = (Insert *)(RelExpr *)(topNode->child(1)->child(0));
CMPASSERT(pTempInsert->getOperatorType() == REL_LEAF_INSERT ||
pTempInsert->getOperatorType() == REL_UNARY_INSERT );
// Inline RI and row triggers.
topNode = inlinePipelinedActions(topNode, bindWA,
rowTriggers,
riConstraints,
heap);
// Inline statement triggers
topNode = inlineTriggerGroup(topNode, stmtTriggers, FALSE, heap, bindWA);
// Inline the Temp delete.
topNode = inlineTempDelete(bindWA, topNode, tempTableObj, heap);
((Union *)topNode)->setBlockedUnion();
((Union *)topNode)->setNoOutputs();
if (isMtsStatement())
((Union *)topNode)->setIsTemporary();
// If we are in an NAR, then set the InNotAtomicStatement flag
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
((Union *)topNode)->setInNotAtomicStatement();
}
if (bindWA->errStatus())
{
return this;
}
// Now bind the resulting tree
topNode = topNode->bindNode(bindWA);
if (bindWA->errStatus())
{
return this;
}
// store information for triggers transformation phase
getInliningInfo().buildTriggerBindInfo(bindWA, getRETDesc(), heap);
return topNode;
}
RelExpr *GenericUpdate::inlineAfterOnlyBackboneForUndo(BindWA *bindWA,
TriggersTempTable& tempTableObj,
TriggerList *rowTriggers,
TriggerList *stmtTriggers,
RefConstraintList *riConstraints,
NABoolean needIM,
NABoolean isMVLoggingRequired,
UpdateColumns *updatedColumns,
CollHeap *heap)
{
RelExpr *imTree = NULL;
RelExpr *undoTree = NULL;
RelExpr *riTree = NULL;
RelExpr *mvTree = NULL;
RelExpr *result = NULL;
RelExpr *topNode = this;
if (topNode->getFirstNRows() > 0)
{
// create a firstN node to delete N rows.
FirstN * firstn = new(bindWA->wHeap())
FirstN(topNode, topNode->getFirstNRows());
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
topNode->setFirstNRows(-1);
topNode = firstn;
}
NABoolean noPipelinedActions =
(rowTriggers == NULL) && (riConstraints == NULL);
NABoolean rowTriggersPresent = (rowTriggers != NULL);
// create the temp insert tree here
RelExpr *tempInsertNode = tempTableObj.buildInsert(TRUE);
RelRoot *tempInsRoot = new (heap) RelRoot(tempInsertNode);
tempInsRoot->setRootFlag(FALSE);
tempInsRoot->setEmptySelectList();
if (isMVLoggingRequired)
mvTree = createMvLogInsert(bindWA, heap, updatedColumns, TRUE);
if (mvTree != NULL)
{
RelRoot *mvRoot = new (heap) RelRoot(mvTree);
if (mvRoot != NULL)
{
Join *logTSJ = new(heap) Join(topNode, mvRoot, REL_LEFT_TSJ);
logTSJ->setTSJForWrite(TRUE);
logTSJ->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
logTSJ->getInliningInfo().setFlags(II_DrivingMvLogInsert);
topNode = logTSJ;
}
}
if (tempInsRoot != NULL)
{
Join *logTSJ = new(heap) Join(topNode, tempInsRoot, REL_LEFT_TSJ);
logTSJ->setTSJForWrite(TRUE);
logTSJ->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
topNode = logTSJ;
}
// Create the tree that handles Index Maintainance.
if (needIM)
{
// here we don't use the internal syskey column name ("@SYSKEY") since the
// whole backbone is driven by the IUD node on the subject table
imTree = createIMTree(bindWA, updatedColumns, FALSE);
}
// Create the tree that handles RI
if (riConstraints!=NULL)
riTree = inlineRI(bindWA, riConstraints, heap);
result = imTree;
if (riTree != NULL)
{
if (result == NULL)
{
result = riTree;
}
else
{
result = new (heap) Union(imTree, riTree, NULL, NULL, REL_UNION,
CmpCommon::statementHeap(), TRUE);
}
}
undoTree = createUndoTree(bindWA,updatedColumns,FALSE,(imTree||riTree),isMVLoggingRequired, &tempTableObj);
if (undoTree && result)
{
OperatorTypeEnum joinType= REL_TSJ;
Join * joinResultUndo = new (heap) Join(result,undoTree,joinType);
joinResultUndo->setTSJForWrite(TRUE);
joinResultUndo->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
joinResultUndo->setTSJForUndo(TRUE);
result = joinResultUndo;
}
if (result!=NULL)
{
// This RelRoot opens a new BindScope, so that Union::bindChildern()
// will not overwrite the RETDesc of the current scope with the NEW
// and OLD values.
RelRoot *rootNode = new(heap) RelRoot(result);
rootNode->setRootFlag(FALSE);
rootNode->setEmptySelectList();
OperatorTypeEnum joinOp;
if ((topNode->child(0).getGroupAttr()->getEmbeddedIUD()) ||
isMtsStatement()) // This is an embedded IUD statement
// (i.e. an IUD statement that has an outer select)
{
// originally index maintenance was using a TSJ joining the
// tuples to be deleted from the base table with the tuples to
// be deleted in the invidual indices. When returning tuples to
// the user this causes the tuples to be deleted multiplied by
// the number of indices. Therefore we now use an ANTI_SEMITSJ
// which does not require LeafDeletes to return tuples...i.e. it
// only return the leaf tuple if nothing is returned from the right
// child...this is what we want.
joinOp = REL_ANTI_SEMITSJ;
}
else
{
joinOp = REL_TSJ;
}
Join *newTSJ = new(heap) Join (topNode, rootNode, joinOp);
newTSJ->setTSJForWrite(TRUE);
newTSJ->setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
newTSJ->setTSJForSetNFError(TRUE);
RelRoot *rootNode2 = new(heap) RelRoot(newTSJ);
rootNode2->setRootFlag(FALSE);
rootNode2->setEmptySelectList();
topNode = rootNode2;
}
// Inline statement triggers
topNode = inlineTriggerGroup(topNode, stmtTriggers, FALSE, heap, bindWA);
// Inline the Temp delete.
topNode = inlineTempDelete(bindWA, topNode, tempTableObj, heap);
((Union *)topNode)->setBlockedUnion();
((Union *)topNode)->setNoOutputs();
if (isMtsStatement())
((Union *)topNode)->setIsTemporary();
// If we are in an NAR, then set the InNotAtomicStatement flag
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
((Union *)topNode)->setInNotAtomicStatement();
}
if (bindWA->errStatus())
{
return this;
}
// Now bind the resulting tree
topNode = topNode->bindNode(bindWA);
if (bindWA->errStatus())
{
return this;
}
// store information for triggers transformation phase
getInliningInfo().buildTriggerBindInfo(bindWA, getRETDesc(), heap);
return topNode;
}
//////////////////////////////////////////////////////////////////////////////
// Remove from the inputs of 'node' any ValueIds that do not reference
// any of the values in realInputs.
//////////////////////////////////////////////////////////////////////////////
static void minimizeInputsForNode(RelExpr *node, ValueIdSet &realInputs)
{
ValueIdSet inputsOfNode(node->getGroupAttr()->getCharacteristicInputs());
realInputs.weedOutUnreferenced(inputsOfNode);
node->getGroupAttr()->setCharacteristicInputs(inputsOfNode);
}
//////////////////////////////////////////////////////////////////////////////
// For each and every node in 'subtree' call minimizeInputsForNode()
// to remove the redundant inputs.
//////////////////////////////////////////////////////////////////////////////
static void minimizeInputsForSubtree(RelExpr *subtree, ValueIdSet &realInputs)
{
minimizeInputsForNode(subtree, realInputs);
for (Int32 i=0; i<subtree->getArity(); i++)
minimizeInputsForSubtree(subtree->child(i), realInputs);
}
//////////////////////////////////////////////////////////////////////////////
// The binding process adds redundant inputs to the temp insert subtree.
// This usually works out in the transformation and normalization, but
// when the triggering action is an Update with sub-queries - it does not.
// The real inputs for the temp insert subtree, are the OLD values, plus
// the UniqueExecuteId. Remove from thecharacteristic inputs of every node
// of the temp insert subtree, all the values that do not reference the
// real inputs as defined above.
//////////////////////////////////////////////////////////////////////////////
void GenericUpdate::removeRedundantInputsFromTempInsertTree(BindWA *bindWA,
RelExpr *tentativeSubtree)
{
// The OLD values are the outputs of the Scan node below the triggering IUD
// node (this node).
Scan *scanNode = getScanNode();
CMPASSERT(scanNode != NULL);
ValueIdSet realInputs(scanNode->getGroupAttr()->getCharacteristicOutputs());
// All instantiations of the UniqueExcuteId funcction have the same ValueId.
// Once the transformation code changes are merged in, we can take the ExecId
// ValueId from the BindInfo.
ItemExpr *execId = new(bindWA->wHeap()) UniqueExecuteId();
execId->bindNode(bindWA);
realInputs += execId->getValueId();
// Find the top node of the temp insert subtree.(through the RelRoot and TSJ).
RelExpr *tempInsertNode = tentativeSubtree->child(0)->child(1);
CMPASSERT(tempInsertNode->getOperatorType() == REL_ROOT);
CMPASSERT(tempInsertNode->child(0)->getOperatorType() == REL_UNARY_INSERT);
// Now remove the redundant inputs.
minimizeInputsForSubtree(tempInsertNode, realInputs);
}
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
RelExpr *GenericUpdate::inlineBeforeAndAfterBackbone(BindWA *bindWA,
RelExpr *tentativeSubtree,
TriggersTempTable& tempTableObj,
TriggerList *rowTriggers,
TriggerList *stmtTriggers,
RefConstraintList *riConstraints,
NABoolean needIM,
NABoolean isMVLoggingRequired,
UpdateColumns *updatedColumns,
CollHeap *heap)
{
// Create the effective Insert, Update or Delete node.
GenericUpdate *effectiveGuNode = NULL;
RelExpr *effectiveGuRootNode =
createEffectiveGU(bindWA, heap, tempTableObj, &effectiveGuNode, updatedColumns);
CMPASSERT(effectiveGuNode != NULL);
RelExpr *topNode = effectiveGuRootNode;
// used by the generator to indicate that this plan has triggers so
// if the trigger is dropped the plan is recompiled
topNode->getInliningInfo().setFlags(II_hasTriggers);
// We only use the FiringTriggers flag if this GU will have to generate
// the NEW and OLD values for Row triggers, RI or IM.
if (getInliningInfo().hasPipelinedActions())
{
effectiveGuNode->getInliningInfo().setFlags(II_hasPipelinedActions);
}
// First inline MV logging, so it will be pushed to DP2 with the IUD.
if (isMVLoggingRequired)
{
RelExpr *mvTree = createMvLogInsert(bindWA, heap, updatedColumns, TRUE);
if (mvTree != NULL)
{
OperatorTypeEnum joinType = REL_LEFT_TSJ;
if (!rowTriggers && !riConstraints && !needIM)
joinType = REL_TSJ;
Join *logTSJ = new(heap) Join(topNode, mvTree, joinType);
logTSJ->setTSJForWrite(TRUE);
// disable parallele execution for TSJs that control row triggers
// execution. Parallel execution for triggers TSJ introduces the
// potential for non-deterministic execution
if (rowTriggers)
logTSJ->getInliningInfo().setFlags(II_SingleExecutionForTriggersTSJ);
topNode = logTSJ;
}
}
if (needIM)
{
// Next inline Index Maintainance
NABoolean imIsLastTSJ = ((rowTriggers==NULL) && (riConstraints==NULL));
// here we do use the internal syskey column name ("@SYSKEY") since the
// whole backbone is driven by the temp-table insert node
NABoolean rowTriggersPresent = (rowTriggers != NULL);
if (getOperatorType() == REL_UNARY_INSERT)
{
topNode = inlineIM(topNode, bindWA, imIsLastTSJ, updatedColumns,
heap, FALSE, rowTriggersPresent);
}
else // REL_UNARY_UPDATE or REL_UNARY_DELETE
{
topNode = inlineIM(topNode, bindWA, imIsLastTSJ, updatedColumns,
heap, TRUE, rowTriggersPresent);
}
}
// Next inline RI and row triggers (temp Insert is on the tentative side).
topNode = inlinePipelinedActions(topNode,
bindWA,
rowTriggers,
riConstraints,
heap);
// Inline statement triggers
topNode = inlineTriggerGroup(topNode, stmtTriggers, FALSE, heap, bindWA);
// Inline the Temp delete.
topNode = inlineTempDelete(bindWA, topNode, tempTableObj, heap);
topNode->getInliningInfo().setFlags(II_BeforeTriggersExist);
// Open a new scope for the after-trigger part.
topNode = new(heap) RelRoot(topNode);
((RelRoot *)topNode)->setRootFlag(FALSE);
// Join the two parts of the backbone using a blocked Union node.
Union *topUnion = new(heap) Union(tentativeSubtree, topNode, NULL, NULL,
REL_UNION, CmpCommon::statementHeap(),
TRUE);
topUnion->setBlockedUnion();
topUnion->setNoOutputs();
topUnion->getInliningInfo().setFlags(II_DrivingBeforeTriggers);
// If we are in an NAR, then set the InNotAtomicStatement flag
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
topUnion->setInNotAtomicStatement();
}
topNode = topUnion;
// Now bind the resulting tree
topNode = topNode->bindNode(bindWA);
if (bindWA->errStatus())
return this;
// store information for triggers transformation phase
InliningInfo &info = effectiveGuNode->getInliningInfo();
if (!(info.hasPipelinedActions()) && getOperatorType() != REL_UNARY_UPDATE)
{
// case of before triggers and after statement triggers where the after triggers are in conflict
// and the effective GU is either an insert or a delete.
info.getTriggerBindInfo()->setBackboneIudNum(bindWA->getUniqueIudNum());
}
else
{
info.buildTriggerBindInfo(bindWA, effectiveGuRootNode->getRETDesc(), heap);
}
// The binding process adds redundant inputs to the temp insert subtree.
// This usually works out in the transformation and normalization, but
// when the triggering action is an Update with sub-queries - it does not.
if (getOperatorType() == REL_UNARY_UPDATE)
removeRedundantInputsFromTempInsertTree(bindWA, tentativeSubtree);
return topNode;
}
//////////////////////////////////////////////////////////////////////////////
// Is this backbone cascaded from a row trigger?
//////////////////////////////////////////////////////////////////////////////
NABoolean GenericUpdate::shouldForbidMaterializeNodeHere(BindWA *bindWA)
{
// If this backbone is cascaded from a row after trigger, Do not allow the
// optimizer to use any Materialize nodes below this point.
BindScope *triggerScope = NULL;
// Skip this if the flag was already set by a trigger above us.
if (!getInliningInfo().isMaterializeNodeForbidden())
{
// For each scope above us, that has a trigger object
while ((triggerScope = bindWA->findNextScopeWithTriggerInfo(triggerScope))
!= NULL)
{
StmtDDLCreateTrigger* triggerObj = triggerScope->context()->triggerObj();
// If its not a row after trigger - continue searching.
if (triggerObj->isStatement() || !triggerObj->isAfter())
continue;
return TRUE;
}
}
return FALSE;
}
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
void GenericUpdate::InliningFinale(BindWA *bindWA, RelExpr *topNode,
RETDesc *origRETDesc)
{
// QSTUFF
// this expression is executed once all inlining and binding
// for index maintenance, RI and triggers has been done. We replace the
// current scope with a RETDesc containing references to old and new column
// values as can be referenced in the return clause of an embedded delete or
// and embedded update
if (getGroupAttr()->isEmbeddedUpdateOrDelete() ||
isMtsStatement() ||
(getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_DELETE)))
{
// lets only return the implicit old and new table columns
if (!getRETDesc()) delete getRETDesc();
CorrName corrNEWTable
(getTableDesc()->getCorrNameObj().getQualifiedNameObj(),
bindWA->wHeap(),NEWTable);
if (getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_INSERT){
// expose NEW table columns
setRETDesc(new (bindWA->wHeap())
RETDesc(bindWA,getTableDesc(),&corrNEWTable));
}
else
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
if (getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE)
{
CorrName corrOLDTable
(getScanNode(TRUE)->getTableDesc()->getCorrNameObj().getQualifiedNameObj(),
bindWA->wHeap(),OLDTable
);
// expose OLD table columns
getRETDesc()->
addColumns(bindWA, *getScanNode(TRUE)->getRETDesc(), &corrOLDTable);
}
// allow the TSJRule to be used to transform Updates/Deletes
setNoFlow(TRUE);
// record the GenericUpdateRoot property in the group
// attributes of the root of the generic update tree.
// this is used by pushdowncovered expression to prevent
// expression to be push beyond the root of a generic update
// tree
topNode->getGroupAttr()->setGenericUpdateRoot(TRUE);
// set current scope to contain NEW and OLD tables only
origRETDesc = getRETDesc();
}
if (bindWA->inDDL())
return;
//QSTUFF
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
addPotentialOutputValues(outputs);
// If ever extend the SQL syntax to such non-Ansi constructs as
// INSERT INTO TI (DELETE FROM TD ... );
// SELECT * FROM (UPDATE TU SET ... ) X;
// then we'll want to revisit these next two lines,
// (resetting our RETDesc to an empty one) because otherwise
// this nonempty one'll become our parent RelRoot's compExpr(),
// which will cause
// RelRoot::preCodeGen - compExpr().replaceVEGExpressions -
// VEGReference::replaceVEGReference
// to assert with "no available values hence valuesToBeBound.isEmpty".
//
// QSTUFF
// please see above..we made the extensions and did fixed whats
// referred to above
// select * from (delete from x)y, z where y.x = z.x;
// select * from (update x set x = x + 1) y, z where y.x = z.x;
if (!getGroupAttr()->isEmbeddedUpdateOrDelete() &&
!isMtsStatement() &&
!(getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_DELETE)))
{
delete getRETDesc();
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
}
// QSTUFF
topNode->setRETDesc(getRETDesc());
bindWA->getCurrentScope()->setRETDesc(origRETDesc);
}
// Helper function used to check the existence of at least one update
// trigger whose explicit columns match at least one of the columns
// supplied as a second parameter
NABoolean atLeastOneMatch(const BeforeAndAfterTriggers *allTriggers,
const UpdateColumns *columns)
{
TriggerList *trigs = NULL;
assert(allTriggers != NULL);
// check the before row triggers for a match
if (allTriggers->getBeforeTriggers() != NULL)
{
trigs =
allTriggers->getBeforeTriggers()->getColumnMatchingTriggers (columns);
}
// check the after row triggers for a match
if ((trigs == NULL) && (allTriggers->getAfterRowTriggers() != NULL))
{
trigs =
allTriggers->getAfterRowTriggers()->getColumnMatchingTriggers (columns);
}
// check the after statement triggers for a match
if ((trigs == NULL) && (allTriggers->getAfterStatementTriggers() != NULL))
{
trigs =
allTriggers->getAfterStatementTriggers()->getColumnMatchingTriggers (columns);
}
// at least one match was found
if (trigs != NULL)
{
return TRUE;
}
// no match
return FALSE;
}
NABoolean GenericUpdate::checkNonSupportedTriggersUse(BindWA *bindWA,
QualifiedName &subjectTable,
ComOperation op,
BeforeAndAfterTriggers *allTriggers)
{
CollHeap *heap = bindWA->wHeap();
// disable embedded update and delete as trigger events
if ((allTriggers != NULL) && (getGroupAttr()->isEmbeddedUpdateOrDelete()))
{
// for update operations we have to make sure that there is at least
// one trigger defined on the column updated by the operation
if (op == COM_UPDATE)
{
// Get the list of updated columns.
UpdateColumns *columns = new(heap) UpdateColumns(stoi_->getStoi());
// find at least one trigger that is fired by the columns updated
// by this operation
if (atLeastOneMatch(allTriggers, columns))
{
*CmpCommon::diags() << DgSqlCode(-11027);
bindWA->setErrStatus();
// "columns" will be freed by statementHeap.
// add code annotation to prevent Coverity checking error
// coverity[leaked_storage]
return TRUE;
}
}
else // delete - all delete triggers are considered
{
*CmpCommon::diags() << DgSqlCode(-11027);
bindWA->setErrStatus();
return TRUE;
}
}
// disable embedded insert as trigger events
if ((allTriggers != NULL) && (getGroupAttr()->isEmbeddedInsert()))
{
*CmpCommon::diags() << DgSqlCode(-11027);
bindWA->setErrStatus();
return TRUE;
}
// the set clause of SET ON ROLLBACK statements may not change
// columns on which update triggers are defined
if (newRecBeforeExpr() != NULL)
{
BeforeAndAfterTriggers *allTriggers2 = allTriggers;
// for SET ON ROLLBACK delete statements, we are not interested
// in the triggers fired by the delete operation but rather by the
// triggers defined on the columns updated by the SET ON ROLLBACK
// clause. These triggers are update triggers not delete triggers.
if (op == COM_DELETE)
{
ComOperation op2 = COM_UPDATE;
allTriggers2 =
bindWA->getSchemaDB()->getTriggerDB()->getTriggers(subjectTable,
op2, bindWA);
if (bindWA->errStatus())
return TRUE;
}
if (allTriggers2 != NULL)
{
ValueId exprId;
UpdateColumns *columns =
new(heap) UpdateColumns((SqlTableOpenInfo *)NULL);
for (exprId = newRecBeforeExpr().init();
newRecBeforeExpr().next(exprId);
newRecBeforeExpr().advance(exprId))
{
ItemExpr *thisIE = exprId.getItemExpr();
columns->addColumn((thisIE->child(0).getNAColumn())->getPosition());
}
// if at least one of the columns updated in SET ON ROLLBACK clause
// of the SET ON ROLLBACK statement is a subject column of an update
// trigger, raise an error message
if (atLeastOneMatch(allTriggers2, columns))
{
*CmpCommon::diags() << DgSqlCode(-11026);
bindWA->setErrStatus();
// "columns" will be freed by statementHeap.
// add code annotation to prevent Coverity checking error
// coverity[leaked_storage]
return TRUE;
}
}
}
return FALSE;
}
//////////////////////////////////////////////////////////////////////////////
// Handle the inlining of Triggers, RI, IM, MV logging and ON STATEMENT MVs.
// This method is called from the end of the bindNode() methods of Insert,
// Update and Delete.
// The trigger backbone is different if before triggers exist.
// Please read the Triggers internal documentation before trying to understand
// this code.
//////////////////////////////////////////////////////////////////////////////
RelExpr * GenericUpdate::handleInlining(BindWA *bindWA, RelExpr *boundExpr)
{
RETDesc *origScopeRETDesc = bindWA->getCurrentScope()->getRETDesc();
CorrName &subjectTableCorr = getTableDesc()->getCorrNameObj();
if (bindWA->inDDL())
{
// some QSTUFF code in inlineingFinale() should be executed when we
// are in create view statement.
InliningFinale(bindWA, boundExpr, origScopeRETDesc);
return boundExpr;
}
// MultiCommit is currently only valid for DELETE statements
if (NOT getOperator().match(REL_ANY_DELETE)
&&
CmpCommon::transMode()->getMultiCommit() == TransMode::MC_ON_)
{
*CmpCommon::diags() << DgSqlCode(-4351);
bindWA->setErrStatus();
return boundExpr;
}
// We don't do views.
// ignore location specified operations.
if ( (getTableDesc()->getNATable()->getViewText() != NULL) ||
(subjectTableCorr.isLocationNameSpecified() ))
return boundExpr;
if (subjectTableCorr.getSpecialType() == ExtendedQualName::SG_TABLE)
{
InliningFinale(bindWA, boundExpr, origScopeRETDesc);
return boundExpr; // Nothing for us to do here.
}
// A "DELETE [FIRST n] FROM <IUD-log-table>"
// is the only case we allow a special table through here.
NABoolean firstN_OnIudLogTable = FALSE;
if ((subjectTableCorr.getSpecialType() == ExtendedQualName::IUD_LOG_TABLE) &&
(getFirstNRows() > 0) )
firstN_OnIudLogTable = TRUE;
// Don't waste time on special tables like index etc.
// The IUD log is allowed here because we allow delete with multi commit on it.
if ((subjectTableCorr.getSpecialType() != ExtendedQualName::NORMAL_TABLE) &&
(subjectTableCorr.getSpecialType() != ExtendedQualName::MV_TABLE) &&
!firstN_OnIudLogTable &&
!getInliningInfo().isNeedGuOutputs() )
return boundExpr;
// no inlining for the effective GU of a before trigger
if (getInliningInfo().isEffectiveGU())
return bindEffectiveGU(bindWA);
if (getInliningInfo().isNeedGuOutputs())
{
// The OLD/NEW outputs of this GU node are needed for some purpose
// other than triggers/RI/IM etc.
createOldAndNewCorrelationNames(bindWA);
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
setPotentialOutputValues(outputs);
setNoFlow(TRUE);
return boundExpr;
}
// If code future changes cause this assertion to fail, we need to make
// sure our code still works.
CMPASSERT(boundExpr == this);
ComOperation op;
switch (getOperatorType())
{
case REL_UNARY_INSERT: op = COM_INSERT; break;
case REL_UNARY_UPDATE: op = COM_UPDATE; break;
case REL_UNARY_DELETE: op = COM_DELETE; break;
default : return boundExpr; // We only handle these three operators.
}
CollHeap *heap = bindWA->wHeap();
QualifiedName& subjectTable = subjectTableCorr.getQualifiedNameObj();
// get all triggers in the triggerDB, and RIs from the SchemaDB.
#if DISABLE_TRIGGERS
BeforeAndAfterTriggers *allTriggers = NULL;
#else
BeforeAndAfterTriggers *allTriggers = 0;
if ((isIgnoreTriggers() == FALSE) && !firstN_OnIudLogTable )
{
if (getUpdateCKorUniqueIndexKey())
{
// if this the delete node of updateCKorUniqueIndexKey then skip
// inlining triggers altogether (allTriggers remains NULL).
if (op == COM_INSERT)
{
op = COM_UPDATE;
allTriggers = bindWA->getSchemaDB()->getTriggerDB()->getTriggers(subjectTable, op, bindWA);
}
}
else
{
allTriggers = bindWA->getSchemaDB()->getTriggerDB()->getTriggers(subjectTable, op, bindWA);
if ((allTriggers == NULL) && (isMerge()))
{
// Triggers are not supported with Merge statement.
// if update part of merge didn't cause any triggers to be inlined,
// check for insert triggers.
// These triggers will be inlined here but an error will be
// returned during preCodeGen.
allTriggers = bindWA->getSchemaDB()->getTriggerDB()->getTriggers(
subjectTable, COM_INSERT, bindWA);
}
}
}
#endif
if (bindWA->errStatus())
return NULL;
if (allTriggers) {
getInliningInfo().setFlags(II_hasTriggers);
bindWA->setInTrigger();
}
// certain triggers uses are currently not supported and errors
// are raised if these uses are tried
if (checkNonSupportedTriggersUse(bindWA, subjectTable, op, allTriggers))
{
return this;
}
#if DISABLE_RI
RefConstraintList *riConstraints = NULL;
#else
RefConstraintList *riConstraints =
getRIs(bindWA, getTableDesc()->getNATable());
#endif
if (riConstraints)
getInliningInfo().setFlags(II_hasRI);
#if DISABLE_MV_LOGGING
NABoolean isMVLoggingRequired = FALSE;
#else
NABoolean isMVLoggingRequired = isMvLoggingRequired();
#endif
RelExpr *topNode=boundExpr;
// Get the list of updated columns.
UpdateColumns *columns = NULL;
if (op == COM_UPDATE)
columns = new(heap) UpdateColumns(stoi_->getStoi());
// Get only the before triggers that match these columns.
TriggerList *beforeTriggers = NULL;
if ((allTriggers != NULL) && (allTriggers->getBeforeTriggers() != NULL))
beforeTriggers = allTriggers->getBeforeTriggers()->getColumnMatchingTriggers(columns);
NABoolean tsjRETDescCreated = FALSE;
if (beforeTriggers != NULL) {
if (checkForNotAtomicStatement(bindWA, 30027,
((*beforeTriggers)[0])->getTriggerName(),
subjectTable.getQualifiedNameAsAnsiString())) {
return this;
}
tsjRETDescCreated = TRUE;
createOldAndNewCorrelationNames(bindWA);
}
// Save the previous IudNum, it will be restored at the end of the
// inlining of the current backbone.
Lng32 prevIudNum = bindWA->getUniqueIudNum();
// Set the IudNum for the current generic update backbone.
// This value is used later as part of the uniquifier
// as the temporary table column UNIQUEIUD_COLUMN
bindWA->setUniqueIudNum();
// When triggers defined on the subject table, it is assured that the
// temp-table exists for this table. Since during initialization of the
// TriggersTempTable object there's a seek for the temp-table's NATable, we
// must be sure this table actually exists before creating that object.
TriggersTempTable *tempTableObj = NULL;
if (allTriggers != NULL)
{
tempTableObj = new(heap) TriggersTempTable(this, bindWA);
}
// Build the before triggers side of the inlining backbone.
// This method also adds to 'columns' any column that is updated by before triggers.
RelExpr *beforeSubtree = NULL;
if (beforeTriggers != NULL)
beforeSubtree = createTentativeSubTree(bindWA,
beforeTriggers,
columns,
*tempTableObj,
heap);
if (bindWA->errStatus())
return NULL;
#if DISABLE_TRIGGERS
// Nothing to do.
#else
if ((allTriggers != NULL) && (allTriggers->getAfterStatementTriggers() != NULL))
{
TriggerList *pureStmtTriggers = allTriggers->getAfterStatementTriggers()->getColumnMatchingTriggers(columns);
// There are statement triggers which are not for statement MVs.
// The ones used for statement MVs will be pupulated with the
// getTriggeredMVs call below.
if (pureStmtTriggers)
if (checkForNotAtomicStatement(bindWA, 30034,
((*pureStmtTriggers)[0])->getTriggerName(),
subjectTable.getQualifiedNameAsAnsiString()))
{
return this;
}
}
// Now that we know the final set of columns updated in the query, we
// can determine whether the update of the MVs is direct or indirect.
allTriggers = getTriggeredMvs(bindWA, allTriggers, columns);
if (bindWA->errStatus())
return this;
#endif
if ((allTriggers == NULL) &&
(riConstraints == NULL) &&
!isMVLoggingRequired &&
!getTableDesc()->hasSecondaryIndexes())
{
InliningFinale(bindWA, boundExpr, origScopeRETDesc);
bindWA->resetUniqueIudNum(prevIudNum); // restore the saved IudNum
return boundExpr; // Nothing for us to do here.
}
// Get the row and statement triggers that match the updated column list.
TriggerList *rowTriggers = NULL;
if ((allTriggers != NULL) && (allTriggers->getAfterRowTriggers() != NULL))
rowTriggers = allTriggers->getAfterRowTriggers()->getColumnMatchingTriggers(columns);
TriggerList *stmtTriggers = NULL;
if ((allTriggers != NULL) && (allTriggers->getAfterStatementTriggers() != NULL))
stmtTriggers = allTriggers->getAfterStatementTriggers()->getColumnMatchingTriggers(columns);
if (rowTriggers != NULL) {
if (checkForNotAtomicStatement(bindWA, 30034,
((*rowTriggers)[0])->getTriggerName(),
subjectTable.getQualifiedNameAsAnsiString()))
{
return this;
}
else if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
*CmpCommon::diags() << DgSqlCode(-3232)
<< DgString0(subjectTable.getQualifiedNameAsAnsiString())
<< DgString1("After Trigger :")
<< DgString2(((*rowTriggers)[0])->getTriggerName());
bindWA->setErrStatus();
return this ;
}
}
else if (stmtTriggers != NULL) {
if ((CmpCommon::getDefault(NAR_DEPOBJ_ENABLE2) == DF_OFF) )
{
if (checkForNotAtomicStatement(bindWA, 30034,
((*stmtTriggers)[0])->getTriggerName(),
subjectTable.getQualifiedNameAsAnsiString()))
{
return this;
}
}
else if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
*CmpCommon::diags() << DgSqlCode(-3232)
<< DgString0(subjectTable.getQualifiedNameAsAnsiString())
<< DgString1("After Trigger :")
<< DgString2(((*stmtTriggers)[0])->getTriggerName());
bindWA->setErrStatus();
return this ;
}
}
if ((stmtTriggers != NULL) || (beforeTriggers != NULL))
{
NAString trigName = stmtTriggers ? ((*stmtTriggers)[0])->getTriggerName() : ((*beforeTriggers)[0])->getTriggerName() ;
if (getFirstNRows() > 0)
{
// first N delete not supported with before triggers and after statement triggers.
*CmpCommon::diags() << DgSqlCode(-11045)
<< DgString0(trigName)
<< DgString1(subjectTable.getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
}
// Filter only the RI constraints that match the updated columns.
if (riConstraints != NULL)
{
riConstraints = riConstraints->getNeededRIs(columns, heap);
if (riConstraints->isEmpty())
riConstraints = NULL;
else // There are RI constraints that need to be enforced.
{
// Disallow embedded delete on a referenced table.
// Disallow embedded updates on columns which are part of an RI constraint.
if (getGroupAttr()->isEmbeddedDelete())
{
*CmpCommon::diags() << DgSqlCode(-4183);
bindWA->setErrStatus();
return this;
}
if (getGroupAttr()->isEmbeddedUpdate())
{
*CmpCommon::diags() << DgSqlCode(-4184);
bindWA->setErrStatus();
return this;
}
if ((CmpCommon::getDefault(NAR_DEPOBJ_ENABLE2) == DF_OFF) )
{
if (checkForNotAtomicStatement(bindWA, 30028,
(riConstraints->at(0)->getConstraintName()).getQualifiedNameAsAnsiString(),
subjectTable.getQualifiedNameAsAnsiString())) {
return this ;
}
}
if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) {
*CmpCommon::diags() << DgSqlCode(-3232)
<< DgString0(subjectTable.getQualifiedNameAsAnsiString())
<< DgString1("Referential Intergrity Constraint :")
<< DgString2((riConstraints->at(0)->getConstraintName()).getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this ;
}
}
}
NABoolean needIM = isIMNeeded(columns);
if (needIM) {
if ((isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) ||
(bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())) {
NAString indexName;
const LIST(IndexDesc *) indexList = getTableDesc()->getIndexes();
for (CollIndex i=0; i<indexList.entries(); i++)
{
IndexDesc *index = indexList[i];
if (!(index->isClusteringIndex())) {
indexName = index->getExtIndexName();
break;
}
}
if ((CmpCommon::getDefault(NAR_DEPOBJ_ENABLE) == DF_OFF) )
{
if (checkForNotAtomicStatement(bindWA,
30026,
indexName,
subjectTable.getQualifiedNameAsAnsiString()))
{
return this;
}
}
if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) {
*CmpCommon::diags() << DgSqlCode(-3232)
<< DgString0(subjectTable.getQualifiedNameAsAnsiString())
<< DgString1("Index :")
<< DgString2(indexName);
bindWA->setErrStatus();
return this ;
}
}
getInliningInfo().setFlags(II_hasIM);
}
// Do we have any triggers to work on?
if ((beforeTriggers == NULL) &&
(rowTriggers == NULL) &&
(stmtTriggers == NULL) )
{
bindWA->resetUniqueIudNum(prevIudNum);
if (!needIM && !isMVLoggingRequired && (riConstraints == NULL))
{
// We get here only if this is an Update operation, and there are
// triggers/IM/RI defined on Update on this table, but all of them
// are on columns other than the ones updated.
// e.g. A trigger AFTER UPDATE OF (a) ON T1,
// and a triggering statement like: UPDATE T1 SET B=5;
InliningFinale(bindWA, boundExpr, origScopeRETDesc);
CMPASSERT (op == COM_UPDATE);
return boundExpr;
}
// OK, so we have no triggers - just RI, IM or both.
createOldAndNewCorrelationNames(bindWA);
setNoFlow(TRUE);
getInliningInfo().setFlags(II_hasPipelinedActions);
topNode =
inlineOnlyRIandIMandMVLogging(bindWA,
this,
needIM,
riConstraints,
isMVLoggingRequired,
columns,
heap);
if (needIM)
{
topNode->getInliningInfo().setFlags(II_DrivingIM);
}
topNode = topNode->bindNode(bindWA);
// Create the tree that handles Index Maintainance.
if (needIM)
{
// don't allow index maintenance on non-audited tables unless
// this is enabled by a default
// We allow index maintenance in an Internal refresh statement
if (!getTableDesc()->getClusteringIndex()->getNAFileSet()->isAudited() &&
!bindWA->isBindingMvRefresh())
{
NAString dummyTokString(bindWA->wHeap());
const NATable *naTable = bindWA->getNATable(getTableName());
DefaultToken imAllowed =
bindWA->getSchemaDB()->getDefaults().token(IUD_NONAUDITED_INDEX_MAINT,
dummyTokString);
switch (imAllowed)
{
case DF_ON:
// go ahead and do it, the user asked for it
break;
case DF_WARN:
// emit a warning and continue
*CmpCommon::diags() << DgSqlCode(4203) <<
DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
break;
default:
// stop with an error, index maintenance is not allowed on
// nonaudited tables
*CmpCommon::diags() << DgSqlCode(-4203) <<
DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
}
}
if (bindWA->errStatus())
{
return boundExpr;
}
getInliningInfo().setFlags(II_hasInlinedActions);
InliningFinale(bindWA, topNode, origScopeRETDesc);
return topNode;
}
// Now, that we know for sure there are triggers to be fired, tempTableObj
// must be initialized. In case there are no regular triggers (not MVImmediate)
// defined on the table, the tempTableObj is not initialized yet (see initializing
// of tempTableObj above).
if (tempTableObj == NULL)
{
tempTableObj = new(heap) TriggersTempTable(this, bindWA);
}
setNoFlow(TRUE);
if (!tsjRETDescCreated)
createOldAndNewCorrelationNames(bindWA);
if ( (rowTriggers != NULL) ||
(riConstraints != NULL) ||
needIM ||
isMVLoggingRequired)
getInliningInfo().setFlags(II_hasPipelinedActions);
// Forbid the use of the Materialize node by the optimizer, for the entire
// backbone, if we are now cascaded from a row after trigger.
NABoolean forbidMaterializeNodeHere = shouldForbidMaterializeNodeHere(bindWA);
Int32 prevStateOfFlags = 0;
if (forbidMaterializeNodeHere)
{
// Set this InliningInfo flag in every node being bound (see RelExpr::bindSelf())
// but save the previous state first.
prevStateOfFlags = bindWA->getInliningInfoFlagsToSetRecursivly();
bindWA->setInliningInfoFlagsToSetRecursivly(II_MaterializeNodeForbidden);
}
// Create and bind the rest of the trigger backbone.
if (beforeTriggers == NULL)
{
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->getTolerateNonFatalError()&& (needIM || riConstraints))
{
topNode = inlineAfterOnlyBackboneForUndo(bindWA,
*tempTableObj,
rowTriggers,
stmtTriggers,
riConstraints,
needIM,
isMVLoggingRequired,
columns,
heap);
}
else
{
topNode = inlineAfterOnlyBackbone(bindWA,
*tempTableObj,
rowTriggers,
stmtTriggers,
riConstraints,
needIM,
isMVLoggingRequired,
columns,
heap);
}
}
else
topNode = inlineBeforeAndAfterBackbone(bindWA,
beforeSubtree,
*tempTableObj,
rowTriggers,
stmtTriggers,
riConstraints,
needIM,
isMVLoggingRequired,
columns,
heap);
if (forbidMaterializeNodeHere)
{
// Restore to the the previous state. This effectivly resets the flag
// we set before binding, so we don't affect sibtrees that will be bound
// after us.
bindWA->setInliningInfoFlagsToSetRecursivly(prevStateOfFlags);
}
bindWA->resetUniqueIudNum(prevIudNum);
// indicate that this is a subject table for enable/disable
getOptStoi()->getStoi()->setSubjectTable(TRUE);
getInliningInfo().setFlags(II_hasInlinedActions);
InliningFinale(bindWA, topNode, origScopeRETDesc);
return topNode;
}
//////////////////////////////////////////////////////////////////////////////
// This method checks to see if an update should be transformed into
// insert/delete nodes. This is true if the update is on a primary key and
// special conditions don't exist.
//
// Return value:
// TRUE - if the update is on a clustering or unique index key and no special
// conditions exist. Causes the update to be transformed to a delete
// followed by an insert with intervening order by.
// FALSE - if the update is on a clustering or unique index key and no special
// conditions exist. OR
// NOT update is on a clustering or unique index key.
//////////////////////////////////////////////////////////////////////////////
NABoolean Update::updatesClusteringKeyOrUniqueIndexKey(BindWA *bindWA)
{
// This CQD must be ON or AGGRESSIVE in order to enable the transformation
// for special conditions. If not, return false
if (CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) == DF_OFF)
return FALSE;
ULng32 numberofKeys = getTableDesc()->
getClusteringIndex()->getIndexKey().entries();
NABoolean hasSysKey = getTableDesc()->getClusteringIndex()->
getNAFileSet()->hasSyskey();
// this restriction (i.e unable to transform update of unique index key
// if base table has only syskey as clustering key may be removable
// the intention is to revisit this once a similar problem has been
// solved for the Halloween feature.
if ((numberofKeys == 1) AND hasSysKey)
return FALSE; // This should never be reached.
// 1. Determine whether columns being updated are clustering key columns
// of base table or of a unique index. Set flags used later.
const LIST(IndexDesc *) & ixlist = getTableDesc()->getIndexes();
NABoolean isUniqueIndexCol = FALSE;
NABoolean isClusteringKeyCol = FALSE;
NAString ckColName; // Save column name if is clustering key.
Scan * scanNode = getScanNode();
const ValueIdList colUpdated = scanNode->getTableDesc()->getColUpdated();
for (CollIndex indexNo = 0; indexNo < ixlist.entries(); indexNo++)
{
if (isUniqueIndexCol && isClusteringKeyCol)
break ;
IndexDesc *idesc = ixlist[indexNo];
if (idesc->isUniqueIndex() || idesc->isClusteringIndex())
{
const ValueIdList indexKey = idesc->getIndexKey();
for (CollIndex i = 0; i < colUpdated.entries(); i++)
{
ItemExpr *updateCol = colUpdated[i].getItemExpr();
CMPASSERT(updateCol->getOperatorType() == ITM_BASECOLUMN);
for (CollIndex j = 0; j < indexKey.entries(); j++)
{
ItemExpr *keyCol = indexKey[j].getItemExpr();
ItemExpr *baseCol = ((IndexColumn*)keyCol)->getDefinition().getItemExpr();
CMPASSERT(baseCol->getOperatorType() == ITM_BASECOLUMN);
if (((BaseColumn*)updateCol)->getColNumber() ==
((BaseColumn*)baseCol)->getColNumber())
{
if (idesc->isUniqueIndex())
isUniqueIndexCol = TRUE;
else {
isClusteringKeyCol = TRUE;
ckColName = ((BaseColumn*)updateCol)->getColName();
}
}
}
} // for (CollIndex ...)
}
} // for (CollIndex ...)
if ((CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) == DF_AGGRESSIVE) &&
(NOT isClusteringKeyCol))
return FALSE;
// 2. If columns being updated are unique index or clustering key columns,
// check for 5 unsupported special cases and issue error in each case.
if (isUniqueIndexCol || isClusteringKeyCol)
{
// 2a. Check for first three unsupported special cases:
// i. User declared cursor and performing update current of.
// ii. Update statement with set on rollback.
// iii. Update with a select surrounding it.
if ( updateCurrentOf() ||
newRecBeforeExpr().entries() > 0 ||
getGroupAttr()->isEmbeddedUpdate()
)
{
if (isUniqueIndexCol && NOT isClusteringKeyCol)
return FALSE; // column being updated is unique key but one of the
// three special cases apply. Revert to delete followed by insert
// without intervening sort. NOT AN ERROR.
// Set appropriate error for first three special cases.
if ( updateCurrentOf() )
*CmpCommon::diags() << DgSqlCode(-4118) ;
if (newRecBeforeExpr().entries() > 0)
*CmpCommon::diags() << DgSqlCode(-4199) ;
if (getGroupAttr()->isEmbeddedUpdate())
*CmpCommon::diags() << DgSqlCode(-4198) ;
bindWA->setErrStatus();
return FALSE ; // ERROR condition.
}
// 2b. Check for unsupported special case 4:
// iv. There is an MV on this table that is defined on
// the clustering key(s).
const NATable *naTable = bindWA->getNATable(getTableName());
if (naTable)
{
// Check for MVs on table.
const UsingMvInfoList &mvList = naTable->getMvsUsingMe();
if (!mvList.isEmpty())
{
// MV(s) exist. Check to see if any MV is ON STATEMENT and is
// significant. (Update on a primary key with an MV on that
// key is not supported - return FALSE.)
for (CollIndex i = 0; i < mvList.entries(); i++)
if (mvList[i]->isInitialized() &&
mvList[i]->getRefreshType() == COM_ON_STATEMENT)
{
CorrName mvCorr = CorrName(mvList[i]->getMvName(), bindWA->wHeap());
NATable *naTableMv = bindWA->getNATable(mvCorr);
if (bindWA->errStatus()) return FALSE;
MVInfoForDML *mvInfo = naTableMv->getMVInfo(bindWA);
// getMVInfo() reads from metadata, but saves the info found
// and will be called later anyway during binding.
UpdateColumns *columns = NULL;
columns = new(bindWA->wHeap()) UpdateColumns(getOptStoi()->getStoi());
if (checkUpdateType(mvInfo, naTable->getTableName(), columns) !=
IRELEVANT)
{
if (isClusteringKeyCol) // update CK not supported in this case.
{
*CmpCommon::diags() << DgSqlCode(-4033) << DgColumnName(ckColName);
bindWA->setErrStatus();
}
return FALSE;
}
}
}
}
if (isIgnoreTriggers() == FALSE)
{
ComOperation op;
op = COM_UPDATE;
BeforeAndAfterTriggers *allTriggers = 0;
QualifiedName& subjectTable = getTableDesc()->getCorrNameObj().getQualifiedNameObj();
allTriggers = bindWA->getSchemaDB()->getTriggerDB()->getTriggers(subjectTable, op, bindWA);
UpdateColumns *columns = NULL;
columns = new(bindWA->wHeap()) UpdateColumns(getOptStoi()->getStoi());
TriggerList *beforeTriggers = NULL;
if ((allTriggers != NULL) && (allTriggers->getBeforeTriggers() != NULL))
beforeTriggers = allTriggers->getBeforeTriggers()->getColumnMatchingTriggers(columns);
if (beforeTriggers)
{
if (isClusteringKeyCol) // update CK not supported if table has beforeTriggers.
{
*CmpCommon::diags() << DgSqlCode(-4033) << DgColumnName(ckColName);
bindWA->setErrStatus();
}
return FALSE;
}
}
return TRUE; // Column being updated is CK or unique index key and no special cases apply.
} // if (isUniqueIndexCol || isClusteringKeyCol)
return FALSE ; // Column being updated is not CK or unique index key
}
RelExpr *Update::transformUpdatePrimaryKey(BindWA *bindWA)
{
Delete * delNode = new (bindWA->wHeap())
Delete(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap()),
NULL,
REL_UNARY_DELETE,
child(0),
NULL);
delNode->setNoLogOp(CONSISTENT_NOLOG);
delNode->setUpdateCKorUniqueIndexKey(TRUE);
delNode->rowsAffected() = GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED;
ValueIdList selectList, sourceColsList, lhsOfSetClause;
getTableDesc()->getUserColumnList(selectList);
getScanIndexDesc()->getPrimaryTableDesc()->getUserColumnList(sourceColsList);
ValueId vid ;
CollIndex pos;
// newRecExprArray is a list of assigns. For each assign
// child(0) is the LHS of the set clause and child(1) is
// the RHS of the SET clause
for (CollIndex i=0; i < newRecExprArray().entries(); i++)
{
lhsOfSetClause.insertAt(i,newRecExprArray().at(i).getItemExpr()->child(0).getValueId());
}
for (CollIndex i=0; i < selectList.entries(); i++)
{
if ((pos = lhsOfSetClause.index(selectList[i])) == NULL_COLL_INDEX)
selectList[i] = sourceColsList[i];
else
selectList[i] = newRecExprArray().at(pos).getItemExpr()->child(1).getValueId();
}
for (CollIndex i=0; i < oldToNewMap().getTopValues().entries(); i++) {
BaseColumn *col = (BaseColumn *) oldToNewMap().getBottomValues()[i].getItemExpr();
NABoolean addToOldToNewMap = TRUE;
// Copy the oldToNewMap.
if (col->getNAColumn()->isComputedColumnAlways()) {
// Computed columns can be copied from delete to insert if they don't
// change. Don't include the column in this map, though, if one of
// the underlying columns gets updated, because the value of the
// computed column has to be recomputed. That computation will be
// done in the new insert node.
ValueIdSet underlyingCols;
col->getUnderlyingColumnsForCC(underlyingCols);
if (NOT underlyingCols.intersect(lhsOfSetClause).isEmpty())
addToOldToNewMap = FALSE;
}
// Copy the oldToNewMap.
if (addToOldToNewMap)
delNode->oldToNewMap().addMapEntry(oldToNewMap().getTopValues()[i],
oldToNewMap().getBottomValues()[i]);
}
RelRoot * rootNode = new (bindWA->wHeap())
RelRoot(delNode,
REL_ROOT,
selectList.rebuildExprTree(ITM_ITEM_LIST));
RelExpr * boundExpr;
Insert * insNode = new (bindWA->wHeap())
Insert(CorrName(getTableDesc()->getCorrNameObj(),bindWA->wHeap()),
getTableDesc(), // insert gets the same tabledesc as the update
REL_UNARY_INSERT,
rootNode,
NULL);
insNode->setNoLogOp(isNoLogOperation());
insNode->setSubqInUpdateAssign(subqInUpdateAssign());
if (this->rowsAffected() == GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED)
insNode->rowsAffected() = GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED;
else
insNode->rowsAffected() = GenericUpdate::COMPUTE_ROWSAFFECTED;
insNode->setUpdateCKorUniqueIndexKey(TRUE);
InliningInfo inlineInfo = getInliningInfo();
insNode->setInliningInfo(&inlineInfo);
if (CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) == DF_ON) {
insNode->setAvoidHalloween(TRUE);
insNode->setHalloweenCannotUseDP2Locks(TRUE);
}
// used to convey updated columns to insert node's stoi
// during inlining the update node is not present anymore, we read the
// insert's stoi to figure out which columns are updated.
SqlTableOpenInfo * scanStoi = getLeftmostScanNode()->getOptStoi()->getStoi();
short updateColsCount = getOptStoi()->getStoi()->getColumnListCount();
scanStoi->setColumnListCount(updateColsCount);
scanStoi->setColumnList(new (bindWA->wHeap()) short[updateColsCount]);
for (short i=0; i<updateColsCount; i++)
scanStoi->setUpdateColumn(i,getOptStoi()->getStoi()->getUpdateColumn(i));
boundExpr = insNode->bindNode(bindWA);
return boundExpr;
}
RelExpr *Update::transformHbaseUpdate(BindWA *bindWA)
{
Delete * delNode = new (bindWA->wHeap())
Delete(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap()),
NULL,
REL_UNARY_DELETE,
child(0),
NULL);
delNode->setNoLogOp(CONSISTENT_NOLOG);
delNode->setUpdateCKorUniqueIndexKey(TRUE);
delNode->rowsAffected() = GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED;
ValueIdList selectList, sourceColsList, lhsOfSetClause;
getTableDesc()->getUserColumnList(selectList);
getScanIndexDesc()->getPrimaryTableDesc()->getUserColumnList(sourceColsList);
ValueId vid ;
CollIndex pos;
// newRecExprArray is a list of assigns. For each assign
// child(0) is the LHS of the set clause and child(1) is
// the RHS of the SET clause
for (CollIndex i=0; i < newRecExprArray().entries(); i++)
{
lhsOfSetClause.insertAt(i,newRecExprArray().at(i).getItemExpr()->child(0).getValueId());
}
for (CollIndex i=0; i < selectList.entries(); i++)
{
if ((pos = lhsOfSetClause.index(selectList[i])) == NULL_COLL_INDEX)
selectList[i] = sourceColsList[i];
else
selectList[i] = newRecExprArray().at(pos).getItemExpr()->child(1).getValueId();
}
for (CollIndex i=0; i < oldToNewMap().getTopValues().entries(); i++) {
BaseColumn *col = (BaseColumn *) oldToNewMap().getBottomValues()[i].getItemExpr();
NABoolean addToOldToNewMap = TRUE;
// Copy the oldToNewMap.
if (col->getNAColumn()->isComputedColumnAlways()) {
// Computed columns can be copied from delete to insert if they don't
// change. Don't include the column in this map, though, if one of
// the underlying columns gets updated, because the value of the
// computed column has to be recomputed. That computation will be
// done in the new insert node.
ValueIdSet underlyingCols;
col->getUnderlyingColumnsForCC(underlyingCols);
if (NOT underlyingCols.intersect(lhsOfSetClause).isEmpty())
addToOldToNewMap = FALSE;
}
// Copy the oldToNewMap.
if (addToOldToNewMap)
delNode->oldToNewMap().addMapEntry(oldToNewMap().getTopValues()[i],
oldToNewMap().getBottomValues()[i]);
}
RelRoot * rootNode = new (bindWA->wHeap())
RelRoot(delNode,
REL_ROOT,
selectList.rebuildExprTree(ITM_ITEM_LIST));
RelExpr * boundExpr;
Insert * insNode = new (bindWA->wHeap())
Insert(CorrName(getTableDesc()->getCorrNameObj(),bindWA->wHeap()),
getTableDesc(), // insert gets the same tabledesc as the update
REL_UNARY_INSERT,
rootNode,
NULL);
insNode->setNoLogOp(isNoLogOperation());
insNode->setSubqInUpdateAssign(subqInUpdateAssign());
if (this->rowsAffected() == GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED)
insNode->rowsAffected() = GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED;
else
insNode->rowsAffected() = GenericUpdate::COMPUTE_ROWSAFFECTED;
insNode->setUpdateCKorUniqueIndexKey(TRUE);
InliningInfo inlineInfo = getInliningInfo();
insNode->setInliningInfo(&inlineInfo);
if (CmpCommon::getDefault(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY) == DF_ON) {
insNode->setAvoidHalloween(TRUE);
insNode->setHalloweenCannotUseDP2Locks(TRUE);
}
// used to convey updated columns to insert node's stoi
// during inlining the update node is not present anymore, we read the
// insert's stoi to figure out which columns are updated.
SqlTableOpenInfo * scanStoi = getLeftmostScanNode()->getOptStoi()->getStoi();
short updateColsCount = getOptStoi()->getStoi()->getColumnListCount();
scanStoi->setColumnListCount(updateColsCount);
scanStoi->setColumnList(new (bindWA->wHeap()) short[updateColsCount]);
for (short i=0; i<updateColsCount; i++)
scanStoi->setUpdateColumn(i,getOptStoi()->getStoi()->getUpdateColumn(i));
boundExpr = insNode->bindNode(bindWA);
return boundExpr;
}
| 1 | 15,903 | It's better to not leave this old code here... it just clutters things up and makes reading the code more confusing. We can always recover the old code from the repository if needed. | apache-trafodion | cpp |
@@ -21,6 +21,8 @@ import com.google.common.base.Charsets;
import com.google.common.io.ByteStreams;
import com.google.common.net.MediaType;
+import com.sun.org.glassfish.gmbal.ManagedObject;
+
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; | 1 | /*
Copyright 2011 Selenium committers
Copyright 2011 Software Freedom Conservancy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.grid.internal;
import com.google.common.base.Charsets;
import com.google.common.io.ByteStreams;
import com.google.common.net.MediaType;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.message.BasicHttpEntityEnclosingRequest;
import org.apache.http.message.BasicHttpRequest;
import org.apache.http.util.EntityUtils;
import org.openqa.grid.common.exception.ClientGoneException;
import org.openqa.grid.common.exception.GridException;
import org.openqa.grid.internal.listeners.CommandListener;
import org.openqa.grid.web.Hub;
import org.openqa.grid.web.servlet.handler.LegacySeleniumRequest;
import org.openqa.grid.web.servlet.handler.RequestType;
import org.openqa.grid.web.servlet.handler.SeleniumBasedRequest;
import org.openqa.grid.web.servlet.handler.SeleniumBasedResponse;
import org.openqa.grid.web.servlet.handler.WebDriverRequest;
import org.openqa.selenium.io.IOUtils;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.text.DateFormat;
import java.util.Calendar;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Represent a running test for the hub/registry. A test session is created when a TestSlot becomes
* available for a test. <p/> The session is destroyed when the test ends ( ended by the client or
* timed out)
*/
@SuppressWarnings("JavaDoc")
public class TestSession {
private static final Logger log = Logger.getLogger(TestSession.class.getName());
static final int MAX_IDLE_TIME_BEFORE_CONSIDERED_ORPHANED = 5000;
private final String internalKey;
private final TestSlot slot;
private volatile ExternalSessionKey externalKey = null;
private volatile long sessionCreatedAt;
private volatile long lastActivity;
private final Map<String, Object> requestedCapabilities;
private Map<String, Object> objects = Collections.synchronizedMap(new HashMap<String, Object>());
private volatile boolean ignoreTimeout = false;
private final TimeSource timeSource;
private volatile boolean forwardingRequest;
private final int MAX_NETWORK_LATENCY = 1000;
public String getInternalKey() {
return internalKey;
}
/**
* Creates a test session on the specified testSlot.
*/
public TestSession(TestSlot slot, Map<String, Object> requestedCapabilities,
TimeSource timeSource) {
internalKey = UUID.randomUUID().toString();
this.slot = slot;
this.requestedCapabilities = requestedCapabilities;
this.timeSource = timeSource;
lastActivity = this.timeSource.currentTimeInMillis();
}
/**
* the capabilities the client requested. It will match the TestSlot capabilities, but is not
* equals.
*/
public Map<String, Object> getRequestedCapabilities() {
return requestedCapabilities;
}
/**
* Get the session key from the remote. It's up to the remote to guarantee the key is unique. If 2
* remotes return the same session key, the tests will overwrite each other.
*
* @return the key that was provided by the remote when the POST /session command was sent.
*/
public ExternalSessionKey getExternalKey() {
return externalKey;
}
/**
* associate this session to the session provided by the remote.
*/
public void setExternalKey(ExternalSessionKey externalKey) {
this.externalKey = externalKey;
sessionCreatedAt = lastActivity;
}
/**
* give the time in milliseconds since the last access to this test session, or 0 is ignore time
* out has been set to true.
*
* @return time in millis
* @see TestSession#setIgnoreTimeout(boolean)
*/
public long getInactivityTime() {
if (ignoreTimeout) {
return 0;
} else {
return timeSource.currentTimeInMillis() - lastActivity;
}
}
public boolean isOrphaned() {
final long elapsedSinceCreation = timeSource.currentTimeInMillis() - sessionCreatedAt;
// The session needs to have been open for at least the time interval and we need to have not
// seen any new commands during that time frame.
return slot.getProtocol().isSelenium()
&& elapsedSinceCreation > MAX_IDLE_TIME_BEFORE_CONSIDERED_ORPHANED
&& sessionCreatedAt == lastActivity;
}
/**
* @return the TestSlot this session is executed against.
*/
public TestSlot getSlot() {
return slot;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((internalKey == null) ? 0 : internalKey.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TestSession other = (TestSession) obj;
return internalKey.equals(other.internalKey);
}
@Override
public String toString() {
return externalKey != null ? "ext. key " + externalKey : internalKey
+ " (int. key, remote not contacted yet.)";
}
private HttpClient getClient() {
Registry reg = slot.getProxy().getRegistry();
int browserTimeout = reg.getConfiguration().getBrowserTimeout();
if (browserTimeout > 0){
final int selenium_server_cleanup_cycle = browserTimeout / 10;
browserTimeout += (selenium_server_cleanup_cycle + MAX_NETWORK_LATENCY);
browserTimeout *=2; // Lets not let this happen too often
}
return slot.getProxy().getHttpClientFactory().getGridHttpClient(browserTimeout, browserTimeout);
}
/**
* forwards the request to the node.
*/
public String forward(SeleniumBasedRequest request, HttpServletResponse response,
boolean newSessionRequest)
throws IOException {
String res = null;
String currentThreadName = Thread.currentThread().getName();
setThreadDisplayName();
forwardingRequest = true;
try {
if (slot.getProxy() instanceof CommandListener) {
((CommandListener) slot.getProxy()).beforeCommand(this, request, response);
}
lastActivity = timeSource.currentTimeInMillis();
HttpRequest proxyRequest = prepareProxyRequest(request/*, config*/);
HttpResponse proxyResponse = sendRequestToNode(proxyRequest);
lastActivity = timeSource.currentTimeInMillis();
HttpEntity responseBody = proxyResponse.getEntity();
try {
final int statusCode = proxyResponse.getStatusLine().getStatusCode();
response.setStatus(statusCode);
processResponseHeaders(request, response, slot.getRemoteURL(), proxyResponse);
byte[] consumedNewWebDriverSessionBody = null;
if (statusCode != HttpServletResponse.SC_INTERNAL_SERVER_ERROR &&
statusCode != HttpServletResponse.SC_NOT_FOUND) {
consumedNewWebDriverSessionBody = updateHubIfNewWebDriverSession(request, proxyResponse);
}
if (newSessionRequest && statusCode == HttpServletResponse.SC_INTERNAL_SERVER_ERROR) {
removeIncompleteNewSessionRequest();
}
if (statusCode == HttpServletResponse.SC_NOT_FOUND) {
removeSessionBrowserTimeout();
}
byte[] contentBeingForwarded = null;
if (responseBody != null) {
try {
InputStream in;
if (consumedNewWebDriverSessionBody == null) {
in = responseBody.getContent();
if (request.getRequestType() == RequestType.START_SESSION
&& request instanceof LegacySeleniumRequest) {
res = getResponseUtf8Content(in);
updateHubNewSeleniumSession(res);
in = new ByteArrayInputStream(res.getBytes("UTF-8"));
}
} else {
in = new ByteArrayInputStream(consumedNewWebDriverSessionBody);
}
final byte[] bytes = drainInputStream(in);
writeRawBody(response, bytes);
contentBeingForwarded = bytes;
} finally {
EntityUtils.consume(responseBody);
}
}
if (slot.getProxy() instanceof CommandListener) {
SeleniumBasedResponse wrappedResponse = new SeleniumBasedResponse(response);
wrappedResponse.setForwardedContent(contentBeingForwarded);
((CommandListener) slot.getProxy()).afterCommand(this, request, wrappedResponse);
}
response.flushBuffer();
} finally {
EntityUtils.consume(responseBody);
}
response.flushBuffer();
return res;
} finally {
forwardingRequest = false;
Thread.currentThread().setName(currentThreadName);
}
}
private void setThreadDisplayName() {
DateFormat dfmt = DateFormat.getTimeInstance();
String name = "Forwarding " + this + " to " + slot.getRemoteURL() + " at " +
dfmt.format(Calendar.getInstance().getTime());
Thread.currentThread().setName(name);
}
private void removeIncompleteNewSessionRequest() {
RemoteProxy proxy = slot.getProxy();
proxy.getRegistry().terminate(this, SessionTerminationReason.CREATIONFAILED);
}
private void removeSessionBrowserTimeout() {
RemoteProxy proxy = slot.getProxy();
proxy.getRegistry().terminate(this, SessionTerminationReason.BROWSER_TIMEOUT);
}
private void updateHubNewSeleniumSession(String content) {
ExternalSessionKey key = ExternalSessionKey.fromResponseBody(content);
setExternalKey(key);
}
private byte[] updateHubIfNewWebDriverSession(
SeleniumBasedRequest request, HttpResponse proxyResponse) throws IOException {
byte[] consumedData = null;
if (request.getRequestType() == RequestType.START_SESSION
&& request instanceof WebDriverRequest) {
Header h = proxyResponse.getFirstHeader("Location");
if (h == null) {
if (isSuccessJsonResponse(proxyResponse) && proxyResponse.getEntity() != null) {
InputStream stream = proxyResponse.getEntity().getContent();
consumedData = ByteStreams.toByteArray(stream);
stream.close();
String contentString = new String(consumedData, Charsets.UTF_8);
ExternalSessionKey key = ExternalSessionKey.fromJsonResponseBody(contentString);
if (key == null) {
throw new GridException(
"webdriver new session JSON response body did not contain a session ID");
}
setExternalKey(key);
return consumedData;
} else {
throw new GridException(
"new session request for webdriver should contain a location header "
+ "or an 'application/json;charset=UTF-8' response body with the session ID.");
}
}
ExternalSessionKey key = ExternalSessionKey.fromWebDriverRequest(h.getValue());
setExternalKey(key);
}
return consumedData;
}
private static boolean isSuccessJsonResponse(HttpResponse response) {
if (response.getStatusLine().getStatusCode() == HttpServletResponse.SC_OK) {
for (Header header : response.getHeaders("Content-Type")) {
MediaType type;
try {
type = MediaType.parse(header.getValue());
} catch (IllegalArgumentException ignored) {
continue;
}
if (MediaType.JSON_UTF_8.is(type)) {
return true;
}
}
}
return false;
}
private HttpResponse sendRequestToNode(HttpRequest proxyRequest) throws ClientProtocolException,
IOException {
HttpClient client = getClient();
URL remoteURL = slot.getRemoteURL();
HttpHost host = new HttpHost(remoteURL.getHost(), remoteURL.getPort());
return client.execute(host, proxyRequest);
}
private HttpRequest prepareProxyRequest(HttpServletRequest request
/*, ForwardConfiguration config*/)
throws IOException {
URL remoteURL = slot.getRemoteURL();
String pathSpec = request.getServletPath() + request.getContextPath();
String path = request.getRequestURI();
if (!path.startsWith(pathSpec)) {
throw new IllegalStateException("Expected path " + path + " to start with pathSpec "
+ pathSpec);
}
String end = path.substring(pathSpec.length());
String ok = remoteURL + end;
if (request.getQueryString() != null) {
ok += "?" + request.getQueryString();
}
String uri = new URL(remoteURL, ok).toExternalForm();
InputStream body = null;
if (request.getContentLength() > 0 || request.getHeader("Transfer-Encoding") != null) {
body = request.getInputStream();
}
HttpRequest proxyRequest;
if (body != null) {
BasicHttpEntityEnclosingRequest r =
new BasicHttpEntityEnclosingRequest(request.getMethod(), uri);
r.setEntity(new InputStreamEntity(body, request.getContentLength()));
proxyRequest = r;
} else {
proxyRequest = new BasicHttpRequest(request.getMethod(), uri);
}
for (Enumeration<?> e = request.getHeaderNames(); e.hasMoreElements(); ) {
String headerName = (String) e.nextElement();
if ("Content-Length".equalsIgnoreCase(headerName)) {
continue; // already set
}
proxyRequest.setHeader(headerName, request.getHeader(headerName));
}
return proxyRequest;
}
private void writeRawBody(HttpServletResponse response, byte[] rawBody) throws IOException {
OutputStream out = response.getOutputStream();
try {
// We need to set the Content-Length header before we write to the output stream. Usually
// the
// Content-Length header is already set because we take it from the proxied request. But, it
// won't
// be set when we consume chunked content, since that doesn't use Content-Length. As we're
// not
// going to send a chunked response, we need to set the Content-Length in order for the
// response
// to be valid.
if (!response.containsHeader("Content-Length")) {
response.setIntHeader("Content-Length", rawBody.length);
}
out.write(rawBody);
} catch (IOException e) {
throw new ClientGoneException(e);
} finally {
IOUtils.closeQuietly(out);
}
}
private byte[] drainInputStream(InputStream in) throws IOException {
try {
return ByteStreams.toByteArray(in);
} finally {
in.close();
}
}
private String getResponseUtf8Content(InputStream in) {
String res;
StringBuilder sb = new StringBuilder();
String line;
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(in, "UTF-8"));
while ((line = reader.readLine()) != null) {
// TODO freynaud bug ?
sb.append(line);/* .append("\n") */
}
in.close();
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
res = sb.toString();
return res;
}
private void processResponseHeaders(HttpServletRequest request, HttpServletResponse response,
URL remoteURL, HttpResponse proxyResponse)
throws MalformedURLException {
String pathSpec = request.getServletPath() + request.getContextPath();
for (Header header : proxyResponse.getAllHeaders()) {
String name = header.getName();
String value = header.getValue();
// HttpEntity#getContent() chews up the chunk-size octet (i.e., the InputStream does not
// actually map 1:1 to the underlying response body). This breaks any client expecting the
// chunk size. We could
// try to recreate it, but since the chunks are already read in and decoded, you'd end up with
// a
// single chunk, which isn't all that useful. So, we return the response as a traditional
// response with a
// Content-Length header, obviating the need for the Transfer-Encoding header.
if (name.equalsIgnoreCase("Transfer-Encoding") && value.equalsIgnoreCase("chunked")) {
continue;
}
// the location needs to point to the hub that will proxy
// everything.
if (name.equalsIgnoreCase("Location")) {
URL returnedLocation = new URL(value);
String driverPath = remoteURL.getPath();
String wrongPath = returnedLocation.getPath();
String correctPath = wrongPath.replace(driverPath, "");
Hub hub = slot.getProxy().getRegistry().getHub();
String location = "http://" + hub.getHost() + ":" + hub.getPort() + pathSpec + correctPath;
response.setHeader(name, location);
} else {
response.setHeader(name, value);
}
}
}
/**
* Allow you to retrieve an object previously stored on the test session.
*
* @return the object you stored
*/
public Object get(String key) {
return objects.get(key);
}
/**
* Allows you to store an object on the test session.
*
* @param key a non-null string
*/
public void put(String key, Object value) {
objects.put(key, value);
}
/**
* Sends a DELETE/testComplete (webdriver/selenium) session command to the remote, following web
* driver protocol.
*
* @return true is the remote replied successfully to the request.
*/
public boolean sendDeleteSessionRequest() {
URL remoteURL = slot.getRemoteURL();
HttpRequest request;
switch (slot.getProtocol()) {
case Selenium:
request =
new BasicHttpRequest("POST", remoteURL.toExternalForm()
+ "/?cmd=testComplete&sessionId=" + getExternalKey()
.getKey());
break;
case WebDriver:
String uri = remoteURL.toString() + "/session/" + externalKey;
request = new BasicHttpRequest("DELETE", uri);
break;
default:
throw new GridException("Error, protocol not implemented.");
}
HttpHost host = new HttpHost(remoteURL.getHost(), remoteURL.getPort());
HttpEntity responseBody = null;
boolean ok;
try {
HttpClient client = getClient();
HttpResponse response = client.execute(host, request);
responseBody = response.getEntity();
int code = response.getStatusLine().getStatusCode();
ok = (code >= 200) && (code <= 299);
} catch (Throwable e) {
ok = false;
// corrupted or the something else already sent the DELETE.
log.severe("Error releasing. Server corrupted ?");
}finally{
try {
EntityUtils.consume(responseBody);
} catch (IOException e) {
log.warning("Consuming the response body when DELETE to the node" + e.getMessage());
}
}
return ok;
}
/**
* allow to bypass time out for this session. ignore = true => the session will not time out.
* setIgnoreTimeout(true) also update the lastActivity to now.
*/
public void setIgnoreTimeout(boolean ignore) {
if (!ignore) {
lastActivity = timeSource.currentTimeInMillis();
}
this.ignoreTimeout = ignore;
}
public boolean isForwardingRequest() {
return forwardingRequest;
}
}
| 1 | 11,531 | Again, you don't want to depend on glassfish. | SeleniumHQ-selenium | js |
@@ -1,6 +1,6 @@
FactoryGirl.define do
factory :post do
- association :account
+ association :account, factory: :account
association :topic
body { Faker::Lorem.sentence }
sequence :created_at do |n| | 1 | FactoryGirl.define do
factory :post do
association :account
association :topic
body { Faker::Lorem.sentence }
sequence :created_at do |n|
Time.now + n
end
end
end
| 1 | 7,189 | This shouldn't be required. What was going on here? | blackducksoftware-ohloh-ui | rb |
@@ -80,6 +80,7 @@ func (cp ConnectionPolicies) TLSConfig(ctx caddy.Context) *tls.Config {
}
return &tls.Config{
+ MinVersion: tls.VersionTLS12,
GetConfigForClient: func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
// filter policies by SNI first, if possible, to speed things up
// when there may be lots of policies | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/mholt/acmez"
)
// ConnectionPolicies govern the establishment of TLS connections. It is
// an ordered group of connection policies; the first matching policy will
// be used to configure TLS connections at handshake-time.
type ConnectionPolicies []*ConnectionPolicy
// Provision sets up each connection policy. It should be called
// during the Validate() phase, after the TLS app (if any) is
// already set up.
func (cp ConnectionPolicies) Provision(ctx caddy.Context) error {
for i, pol := range cp {
// matchers
mods, err := ctx.LoadModule(pol, "MatchersRaw")
if err != nil {
return fmt.Errorf("loading handshake matchers: %v", err)
}
for _, modIface := range mods.(map[string]interface{}) {
cp[i].matchers = append(cp[i].matchers, modIface.(ConnectionMatcher))
}
// enable HTTP/2 by default
if len(pol.ALPN) == 0 {
pol.ALPN = append(pol.ALPN, defaultALPN...)
}
// pre-build standard TLS config so we don't have to at handshake-time
err = pol.buildStandardTLSConfig(ctx)
if err != nil {
return fmt.Errorf("connection policy %d: building standard TLS config: %s", i, err)
}
}
return nil
}
// TLSConfig returns a standard-lib-compatible TLS configuration which
// selects the first matching policy based on the ClientHello.
func (cp ConnectionPolicies) TLSConfig(ctx caddy.Context) *tls.Config {
// using ServerName to match policies is extremely common, especially in configs
// with lots and lots of different policies; we can fast-track those by indexing
// them by SNI, so we don't have to iterate potentially thousands of policies
// (TODO: this map does not account for wildcards, see if this is a problem in practice?)
indexedBySNI := make(map[string]ConnectionPolicies)
if len(cp) > 30 {
for _, p := range cp {
for _, m := range p.matchers {
if sni, ok := m.(MatchServerName); ok {
for _, sniName := range sni {
indexedBySNI[sniName] = append(indexedBySNI[sniName], p)
}
}
}
}
}
return &tls.Config{
GetConfigForClient: func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
// filter policies by SNI first, if possible, to speed things up
// when there may be lots of policies
possiblePolicies := cp
if indexedPolicies, ok := indexedBySNI[hello.ServerName]; ok {
possiblePolicies = indexedPolicies
}
policyLoop:
for _, pol := range possiblePolicies {
for _, matcher := range pol.matchers {
if !matcher.Match(hello) {
continue policyLoop
}
}
return pol.stdTLSConfig, nil
}
return nil, fmt.Errorf("no server TLS configuration available for ClientHello: %+v", hello)
},
}
}
// ConnectionPolicy specifies the logic for handling a TLS handshake.
// An empty policy is valid; safe and sensible defaults will be used.
type ConnectionPolicy struct {
// How to match this policy with a TLS ClientHello. If
// this policy is the first to match, it will be used.
MatchersRaw caddy.ModuleMap `json:"match,omitempty" caddy:"namespace=tls.handshake_match"`
// How to choose a certificate if more than one matched
// the given ServerName (SNI) value.
CertSelection *CustomCertSelectionPolicy `json:"certificate_selection,omitempty"`
// The list of cipher suites to support. Caddy's
// defaults are modern and secure.
CipherSuites []string `json:"cipher_suites,omitempty"`
// The list of elliptic curves to support. Caddy's
// defaults are modern and secure.
Curves []string `json:"curves,omitempty"`
// Protocols to use for Application-Layer Protocol
// Negotiation (ALPN) during the handshake.
ALPN []string `json:"alpn,omitempty"`
// Minimum TLS protocol version to allow. Default: `tls1.2`
ProtocolMin string `json:"protocol_min,omitempty"`
// Maximum TLS protocol version to allow. Default: `tls1.3`
ProtocolMax string `json:"protocol_max,omitempty"`
// Enables and configures TLS client authentication.
ClientAuthentication *ClientAuthentication `json:"client_authentication,omitempty"`
// DefaultSNI becomes the ServerName in a ClientHello if there
// is no policy configured for the empty SNI value.
DefaultSNI string `json:"default_sni,omitempty"`
matchers []ConnectionMatcher
stdTLSConfig *tls.Config
}
func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
tlsAppIface, err := ctx.App("tls")
if err != nil {
return fmt.Errorf("getting tls app: %v", err)
}
tlsApp := tlsAppIface.(*TLS)
// fill in some "easy" default values, but for other values
// (such as slices), we should ensure that they start empty
// so the user-provided config can fill them in; then we will
// fill in a default config at the end if they are still unset
cfg := &tls.Config{
NextProtos: p.ALPN,
PreferServerCipherSuites: true,
GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
// TODO: I don't love how this works: we pre-build certmagic configs
// so that handshakes are faster. Unfortunately, certmagic configs are
// comprised of settings from both a TLS connection policy and a TLS
// automation policy. The only two fields (as of March 2020; v2 beta 17)
// of a certmagic config that come from the TLS connection policy are
// CertSelection and DefaultServerName, so an automation policy is what
// builds the base certmagic config. Since the pre-built config is
// shared, I don't think we can change any of its fields per-handshake,
// hence the awkward shallow copy (dereference) here and the subsequent
// changing of some of its fields. I'm worried this dereference allocates
// more at handshake-time, but I don't know how to practically pre-build
// a certmagic config for each combination of conn policy + automation policy...
cfg := *tlsApp.getConfigForName(hello.ServerName)
if p.CertSelection != nil {
// you would think we could just set this whether or not
// p.CertSelection is nil, but that leads to panics if
// it is, because cfg.CertSelection is an interface,
// so it will have a non-nil value even if the actual
// value underlying it is nil (sigh)
cfg.CertSelection = p.CertSelection
}
cfg.DefaultServerName = p.DefaultSNI
return cfg.GetCertificate(hello)
},
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS13,
}
// session tickets support
if tlsApp.SessionTickets != nil {
cfg.SessionTicketsDisabled = tlsApp.SessionTickets.Disabled
// session ticket key rotation
tlsApp.SessionTickets.register(cfg)
ctx.OnCancel(func() {
// do cleanup when the context is canceled because,
// though unlikely, it is possible that a context
// needing a TLS server config could exist for less
// than the lifetime of the whole app
tlsApp.SessionTickets.unregister(cfg)
})
}
// TODO: Clean up session ticket active locks in storage if app (or process) is being closed!
// add all the cipher suites in order, without duplicates
cipherSuitesAdded := make(map[uint16]struct{})
for _, csName := range p.CipherSuites {
csID := CipherSuiteID(csName)
if csID == 0 {
return fmt.Errorf("unsupported cipher suite: %s", csName)
}
if _, ok := cipherSuitesAdded[csID]; !ok {
cipherSuitesAdded[csID] = struct{}{}
cfg.CipherSuites = append(cfg.CipherSuites, csID)
}
}
// add all the curve preferences in order, without duplicates
curvesAdded := make(map[tls.CurveID]struct{})
for _, curveName := range p.Curves {
curveID := SupportedCurves[curveName]
if _, ok := curvesAdded[curveID]; !ok {
curvesAdded[curveID] = struct{}{}
cfg.CurvePreferences = append(cfg.CurvePreferences, curveID)
}
}
// ensure ALPN includes the ACME TLS-ALPN protocol
var alpnFound bool
for _, a := range p.ALPN {
if a == acmez.ACMETLS1Protocol {
alpnFound = true
break
}
}
if !alpnFound {
cfg.NextProtos = append(cfg.NextProtos, acmez.ACMETLS1Protocol)
}
// min and max protocol versions
if (p.ProtocolMin != "" && p.ProtocolMax != "") && p.ProtocolMin > p.ProtocolMax {
return fmt.Errorf("protocol min (%x) cannot be greater than protocol max (%x)", p.ProtocolMin, p.ProtocolMax)
}
if p.ProtocolMin != "" {
cfg.MinVersion = SupportedProtocols[p.ProtocolMin]
}
if p.ProtocolMax != "" {
cfg.MaxVersion = SupportedProtocols[p.ProtocolMax]
}
// client authentication
if p.ClientAuthentication != nil {
err := p.ClientAuthentication.ConfigureTLSConfig(cfg)
if err != nil {
return fmt.Errorf("configuring TLS client authentication: %v", err)
}
}
setDefaultTLSParams(cfg)
p.stdTLSConfig = cfg
return nil
}
// SettingsEmpty returns true if p's settings (fields
// except the matchers) are all empty/unset.
func (p ConnectionPolicy) SettingsEmpty() bool {
return p.CertSelection == nil &&
p.CipherSuites == nil &&
p.Curves == nil &&
p.ALPN == nil &&
p.ProtocolMin == "" &&
p.ProtocolMax == "" &&
p.ClientAuthentication == nil &&
p.DefaultSNI == ""
}
// ClientAuthentication configures TLS client auth.
type ClientAuthentication struct {
// A list of base64 DER-encoded CA certificates
// against which to validate client certificates.
// Client certs which are not signed by any of
// these CAs will be rejected.
TrustedCACerts []string `json:"trusted_ca_certs,omitempty"`
// A list of base64 DER-encoded client leaf certs
// to accept. If this list is not empty, client certs
// which are not in this list will be rejected.
TrustedLeafCerts []string `json:"trusted_leaf_certs,omitempty"`
// The mode for authenticating the client. Allowed values are:
//
// Mode | Description
// -----|---------------
// `request` | Ask clients for a certificate, but allow even if there isn't one; do not verify it
// `require` | Require clients to present a certificate, but do not verify it
// `verify_if_given` | Ask clients for a certificate; allow even if there isn't one, but verify it if there is
// `require_and_verify` | Require clients to present a valid certificate that is verified
//
// The default mode is `require_and_verify` if any
// TrustedCACerts or TrustedLeafCerts are provided;
// otherwise, the default mode is `require`.
Mode string `json:"mode,omitempty"`
// state established with the last call to ConfigureTLSConfig
trustedLeafCerts []*x509.Certificate
existingVerifyPeerCert func([][]byte, [][]*x509.Certificate) error
}
// Active returns true if clientauth has an actionable configuration.
func (clientauth ClientAuthentication) Active() bool {
return len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedLeafCerts) > 0 || len(clientauth.Mode) > 0
}
// ConfigureTLSConfig sets up cfg to enforce clientauth's configuration.
func (clientauth *ClientAuthentication) ConfigureTLSConfig(cfg *tls.Config) error {
// if there's no actionable client auth, simply disable it
if !clientauth.Active() {
cfg.ClientAuth = tls.NoClientCert
return nil
}
// enforce desired mode of client authentication
if len(clientauth.Mode) > 0 {
switch clientauth.Mode {
case "request":
cfg.ClientAuth = tls.RequestClientCert
case "require":
cfg.ClientAuth = tls.RequireAnyClientCert
case "verify_if_given":
cfg.ClientAuth = tls.VerifyClientCertIfGiven
case "require_and_verify":
cfg.ClientAuth = tls.RequireAndVerifyClientCert
default:
return fmt.Errorf("client auth mode not recognized: %s", clientauth.Mode)
}
} else {
// otherwise, set a safe default mode
if len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedLeafCerts) > 0 {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
} else {
cfg.ClientAuth = tls.RequireAnyClientCert
}
}
// enforce CA verification by adding CA certs to the ClientCAs pool
if len(clientauth.TrustedCACerts) > 0 {
caPool := x509.NewCertPool()
for _, clientCAString := range clientauth.TrustedCACerts {
clientCA, err := decodeBase64DERCert(clientCAString)
if err != nil {
return fmt.Errorf("parsing certificate: %v", err)
}
caPool.AddCert(clientCA)
}
cfg.ClientCAs = caPool
}
// enforce leaf verification by writing our own verify function
if len(clientauth.TrustedLeafCerts) > 0 {
clientauth.trustedLeafCerts = []*x509.Certificate{}
for _, clientCertString := range clientauth.TrustedLeafCerts {
clientCert, err := decodeBase64DERCert(clientCertString)
if err != nil {
return fmt.Errorf("parsing certificate: %v", err)
}
clientauth.trustedLeafCerts = append(clientauth.trustedLeafCerts, clientCert)
}
// if a custom verification function already exists, wrap it
clientauth.existingVerifyPeerCert = cfg.VerifyPeerCertificate
cfg.VerifyPeerCertificate = clientauth.verifyPeerCertificate
}
return nil
}
// verifyPeerCertificate is for use as a tls.Config.VerifyPeerCertificate
// callback to do custom client certificate verification. It is intended
// for installation only by clientauth.ConfigureTLSConfig().
func (clientauth ClientAuthentication) verifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
// first use any pre-existing custom verification function
if clientauth.existingVerifyPeerCert != nil {
err := clientauth.existingVerifyPeerCert(rawCerts, verifiedChains)
if err != nil {
return err
}
}
if len(rawCerts) == 0 {
return fmt.Errorf("no client certificate provided")
}
remoteLeafCert, err := x509.ParseCertificate(rawCerts[0])
if err != nil {
return fmt.Errorf("can't parse the given certificate: %s", err.Error())
}
for _, trustedLeafCert := range clientauth.trustedLeafCerts {
if remoteLeafCert.Equal(trustedLeafCert) {
return nil
}
}
return fmt.Errorf("client leaf certificate failed validation")
}
// decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
func decodeBase64DERCert(certStr string) (*x509.Certificate, error) {
derBytes, err := base64.StdEncoding.DecodeString(certStr)
if err != nil {
return nil, err
}
return x509.ParseCertificate(derBytes)
}
// setDefaultTLSParams sets the default TLS cipher suites, protocol versions,
// and server preferences of cfg if they are not already set; it does not
// overwrite values, only fills in missing values.
func setDefaultTLSParams(cfg *tls.Config) {
if len(cfg.CipherSuites) == 0 {
cfg.CipherSuites = getOptimalDefaultCipherSuites()
}
// Not a cipher suite, but still important for mitigating protocol downgrade attacks
// (prepend since having it at end breaks http2 due to non-h2-approved suites before it)
cfg.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, cfg.CipherSuites...)
if len(cfg.CurvePreferences) == 0 {
cfg.CurvePreferences = defaultCurves
}
if cfg.MinVersion == 0 {
cfg.MinVersion = tls.VersionTLS12
}
if cfg.MaxVersion == 0 {
cfg.MaxVersion = tls.VersionTLS13
}
cfg.PreferServerCipherSuites = true
}
// PublicKeyAlgorithm is a JSON-unmarshalable wrapper type.
type PublicKeyAlgorithm x509.PublicKeyAlgorithm
// UnmarshalJSON satisfies json.Unmarshaler.
func (a *PublicKeyAlgorithm) UnmarshalJSON(b []byte) error {
algoStr := strings.ToLower(strings.Trim(string(b), `"`))
algo, ok := publicKeyAlgorithms[algoStr]
if !ok {
return fmt.Errorf("unrecognized public key algorithm: %s (expected one of %v)",
algoStr, publicKeyAlgorithms)
}
*a = PublicKeyAlgorithm(algo)
return nil
}
// ConnectionMatcher is a type which matches TLS handshakes.
type ConnectionMatcher interface {
Match(*tls.ClientHelloInfo) bool
}
var defaultALPN = []string{"h2", "http/1.1"}
| 1 | 15,761 | Why add this here? | caddyserver-caddy | go |
@@ -141,15 +141,15 @@ namespace AutoRest.CSharp.Model
{
if (ReturnType.Body != null && ReturnType.Headers != null)
{
- return $"Microsoft.Rest.HttpOperationResponse<{ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head)},{ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head)}>";
+ return $"Microsoft.Rest.HttpOperationResponse<{ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head && IsXNullableReturnType)},{ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head)}>";
}
if (ReturnType.Body != null)
{
- return $"Microsoft.Rest.HttpOperationResponse<{ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head)}>";
+ return $"Microsoft.Rest.HttpOperationResponse<{ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head && IsXNullableReturnType)}>";
}
if (ReturnType.Headers != null)
{
- return $"Microsoft.Rest.HttpOperationHeaderResponse<{ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head)}>";
+ return $"Microsoft.Rest.HttpOperationHeaderResponse<{ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head && IsXNullableReturnType)}>";
}
return "Microsoft.Rest.HttpOperationResponse"; | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Net;
using System.Text.RegularExpressions;
using AutoRest.Core.Model;
using AutoRest.Core.Utilities;
using AutoRest.Extensions;
using Newtonsoft.Json;
namespace AutoRest.CSharp.Model
{
public class MethodCs : Method
{
public MethodCs()
{
}
public bool IsCustomBaseUri
=> CodeModel.Extensions.ContainsKey(SwaggerExtensions.ParameterizedHostExtension);
public SyncMethodsGenerationMode SyncMethods { get; set; }
/// <summary>
/// Get the predicate to determine of the http operation status code indicates failure
/// </summary>
public string FailureStatusCodePredicate
{
get
{
if (Responses.Any())
{
List<string> predicates = new List<string>();
foreach (var responseStatus in Responses.Keys)
{
predicates.Add(string.Format(CultureInfo.InvariantCulture,
"(int)_statusCode != {0}", GetStatusCodeReference(responseStatus)));
}
return string.Join(" && ", predicates);
}
return "!_httpResponse.IsSuccessStatusCode";
}
}
/// <summary>
/// Generate the method parameter declaration for async methods and extensions
/// </summary>
public virtual string GetAsyncMethodParameterDeclaration()
{
return this.GetAsyncMethodParameterDeclaration(false);
}
/// <summary>
/// Generate the method parameter declaration for sync methods and extensions
/// </summary>
/// <param name="addCustomHeaderParameters">If true add the customHeader to the parameters</param>
/// <returns>Generated string of parameters</returns>
public virtual string GetSyncMethodParameterDeclaration(bool addCustomHeaderParameters)
{
List<string> declarations = new List<string>();
foreach (var parameter in LocalParameters)
{
string format = (parameter.IsRequired ? "{0} {1}" : "{0} {1} = {2}");
string defaultValue = $"default({parameter.ModelTypeName})";
if (!string.IsNullOrEmpty(parameter.DefaultValue) && parameter.ModelType is PrimaryType)
{
defaultValue = parameter.DefaultValue;
}
declarations.Add(string.Format(CultureInfo.InvariantCulture,
format, parameter.ModelTypeName, parameter.Name, defaultValue));
}
if (addCustomHeaderParameters)
{
declarations.Add("System.Collections.Generic.Dictionary<string, System.Collections.Generic.List<string>> customHeaders = null");
}
return string.Join(", ", declarations);
}
/// <summary>
/// Generate the method parameter declaration for async methods and extensions
/// </summary>
/// <param name="addCustomHeaderParameters">If true add the customHeader to the parameters</param>
/// <returns>Generated string of parameters</returns>
public virtual string GetAsyncMethodParameterDeclaration(bool addCustomHeaderParameters)
{
var declarations = this.GetSyncMethodParameterDeclaration(addCustomHeaderParameters);
if (!string.IsNullOrEmpty(declarations))
{
declarations += ", ";
}
declarations += "System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)";
return declarations;
}
/// <summary>
/// Arguments for invoking the method from a synchronous extension method
/// </summary>
public string SyncMethodInvocationArgs => string.Join(", ", LocalParameters.Select(each => each.Name));
/// <summary>
/// Get the invocation args for an invocation with an async method
/// </summary>
public string GetAsyncMethodInvocationArgs(string customHeaderReference, string cancellationTokenReference = "cancellationToken") => string.Join(", ", LocalParameters.Select(each => (string)each.Name).Concat(new[] { customHeaderReference, cancellationTokenReference }));
/// <summary>
/// Get the parameters that are actually method parameters in the order they appear in the method signature
/// exclude global parameters
/// </summary>
[JsonIgnore]
public IEnumerable<ParameterCs> LocalParameters
{
get
{
return
Parameters.Where(parameter =>
parameter != null &&
!parameter.IsClientProperty &&
!string.IsNullOrWhiteSpace(parameter.Name) &&
!parameter.IsConstant)
.OrderBy(item => !item.IsRequired).Cast<ParameterCs>();
}
}
/// <summary>
/// Get the return type name for the underlying interface method
/// </summary>
public virtual string OperationResponseReturnTypeString
{
get
{
if (ReturnType.Body != null && ReturnType.Headers != null)
{
return $"Microsoft.Rest.HttpOperationResponse<{ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head)},{ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head)}>";
}
if (ReturnType.Body != null)
{
return $"Microsoft.Rest.HttpOperationResponse<{ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head)}>";
}
if (ReturnType.Headers != null)
{
return $"Microsoft.Rest.HttpOperationHeaderResponse<{ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head)}>";
}
return "Microsoft.Rest.HttpOperationResponse";
}
}
/// <summary>
/// Get the return type for the async extension method
/// </summary>
public virtual string TaskExtensionReturnTypeString
{
get
{
if (ReturnType.Body != null)
{
return string.Format(CultureInfo.InvariantCulture,
"System.Threading.Tasks.Task<{0}>", ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head));
}
else if (ReturnType.Headers != null)
{
return string.Format(CultureInfo.InvariantCulture,
"System.Threading.Tasks.Task<{0}>", ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head));
}
else
{
return "System.Threading.Tasks.Task";
}
}
}
/// <summary>
/// Get the type for operation exception
/// </summary>
public virtual string OperationExceptionTypeString
{
get
{
if (this.DefaultResponse.Body is CompositeType)
{
CompositeType type = this.DefaultResponse.Body as CompositeType;
if (type.Extensions.ContainsKey(SwaggerExtensions.NameOverrideExtension))
{
var ext = type.Extensions[SwaggerExtensions.NameOverrideExtension] as Newtonsoft.Json.Linq.JContainer;
if (ext != null && ext["name"] != null)
{
return ext["name"].ToString();
}
}
return type.Name + "Exception";
}
else
{
return "Microsoft.Rest.HttpOperationException";
}
}
}
/// <summary>
/// Get the expression for exception initialization with message.
/// </summary>
public virtual string InitializeExceptionWithMessage
{
get
{
return string.Empty;
}
}
/// <summary>
/// Get the expression for exception initialization with message.
/// </summary>
public virtual string InitializeException
{
get
{
return string.Empty;
}
}
/// <summary>
/// Gets the expression for response body initialization.
/// </summary>
public virtual string InitializeResponseBody
{
get
{
return string.Empty;
}
}
/// <summary>
/// Gets the expression for default header setting.
/// </summary>
public virtual string SetDefaultHeaders
{
get
{
return string.Empty;
}
}
/// <summary>
/// Get the type name for the method's return type
/// </summary>
public virtual string ReturnTypeString
{
get
{
if (ReturnType.Body != null)
{
return ReturnType.Body.AsNullableType(HttpMethod != HttpMethod.Head);
}
if (ReturnType.Headers != null)
{
return ReturnType.Headers.AsNullableType(HttpMethod != HttpMethod.Head);
}
else
{
return "void";
}
}
}
/// <summary>
/// Get the method's request body (or null if there is no request body)
/// </summary>
[JsonIgnore]
public ParameterCs RequestBody => Body as ParameterCs;
/// <summary>
/// Generate a reference to the ServiceClient
/// </summary>
[JsonIgnore]
public string ClientReference => Group.IsNullOrEmpty() ? "this" : "this.Client";
/// <summary>
/// Returns serialization settings reference.
/// </summary>
/// <param name="serializationType"></param>
/// <returns></returns>
public string GetSerializationSettingsReference(IModelType serializationType)
{
if (serializationType.IsOrContainsPrimaryType(KnownPrimaryType.Date))
{
return "new Microsoft.Rest.Serialization.DateJsonConverter()";
}
else if (serializationType.IsOrContainsPrimaryType(KnownPrimaryType.DateTimeRfc1123))
{
return "new Microsoft.Rest.Serialization.DateTimeRfc1123JsonConverter()";
}
else if (serializationType.IsOrContainsPrimaryType(KnownPrimaryType.Base64Url))
{
return "new Microsoft.Rest.Serialization.Base64UrlJsonConverter()";
}
else if (serializationType.IsOrContainsPrimaryType(KnownPrimaryType.UnixTime))
{
return "new Microsoft.Rest.Serialization.UnixTimeJsonConverter()";
}
return ClientReference + ".SerializationSettings";
}
/// <summary>
/// Returns deserialization settings reference.
/// </summary>
/// <param name="deserializationType"></param>
/// <returns></returns>
public string GetDeserializationSettingsReference(IModelType deserializationType)
{
if (deserializationType.IsOrContainsPrimaryType(KnownPrimaryType.Date))
{
return "new Microsoft.Rest.Serialization.DateJsonConverter()";
}
else if (deserializationType.IsOrContainsPrimaryType(KnownPrimaryType.Base64Url))
{
return "new Microsoft.Rest.Serialization.Base64UrlJsonConverter()";
}
else if (deserializationType.IsOrContainsPrimaryType(KnownPrimaryType.UnixTime))
{
return "new Microsoft.Rest.Serialization.UnixTimeJsonConverter()";
}
return ClientReference + ".DeserializationSettings";
}
public string GetExtensionParameters(string methodParameters)
{
string operationsParameter = "this I" + MethodGroup.TypeName + " operations";
return string.IsNullOrWhiteSpace(methodParameters)
? operationsParameter
: operationsParameter + ", " + methodParameters;
}
public static string GetStatusCodeReference(HttpStatusCode code)
{
return ((int)code).ToString(CultureInfo.InvariantCulture);
}
/// <summary>
/// Generate code to build the URL from a url expression and method parameters
/// </summary>
/// <param name="variableName">The variable to store the url in.</param>
/// <returns></returns>
public virtual string BuildUrl(string variableName)
{
var builder = new IndentedStringBuilder();
foreach (var pathParameter in this.LogicalParameters.Where(p => p.Location == ParameterLocation.Path))
{
string replaceString = "{0} = {0}.Replace(\"{{{1}}}\", System.Uri.EscapeDataString({2}));";
if (pathParameter.SkipUrlEncoding())
{
replaceString = "{0} = {0}.Replace(\"{{{1}}}\", {2});";
}
var urlPathName = pathParameter.SerializedName;
if (pathParameter.ModelType is SequenceType)
{
builder.AppendLine(replaceString,
variableName,
urlPathName,
pathParameter.GetFormattedReferenceValue(ClientReference));
}
else
{
builder.AppendLine(replaceString,
variableName,
urlPathName,
pathParameter.ModelType.ToString(ClientReference, pathParameter.Name));
}
}
if (this.LogicalParameters.Any(p => p.Location == ParameterLocation.Query))
{
builder.AppendLine("System.Collections.Generic.List<string> _queryParameters = new System.Collections.Generic.List<string>();");
foreach (var queryParameter in this.LogicalParameters.Where(p => p.Location == ParameterLocation.Query))
{
var replaceString = "_queryParameters.Add(string.Format(\"{0}={{0}}\", System.Uri.EscapeDataString({1})));";
if ((queryParameter as ParameterCs).IsNullable())
{
builder.AppendLine("if ({0} != null)", queryParameter.Name)
.AppendLine("{").Indent();
}
if (queryParameter.SkipUrlEncoding())
{
replaceString = "_queryParameters.Add(string.Format(\"{0}={{0}}\", {1}));";
}
if (queryParameter.CollectionFormat == CollectionFormat.Multi)
{
builder.AppendLine("if ({0}.Count == 0)", queryParameter.Name)
.AppendLine("{").Indent()
.AppendLine(replaceString, queryParameter.SerializedName, "string.Empty").Outdent()
.AppendLine("}")
.AppendLine("else")
.AppendLine("{").Indent()
.AppendLine("foreach (var _item in {0})", queryParameter.Name)
.AppendLine("{").Indent()
.AppendLine(replaceString, queryParameter.SerializedName, "_item ?? string.Empty").Outdent()
.AppendLine("}").Outdent()
.AppendLine("}").Outdent();
}
else
{
builder.AppendLine(replaceString,
queryParameter.SerializedName, queryParameter.GetFormattedReferenceValue(ClientReference));
}
if ((queryParameter as ParameterCs).IsNullable())
{
builder.Outdent()
.AppendLine("}");
}
}
builder.AppendLine("if (_queryParameters.Count > 0)")
.AppendLine("{").Indent();
if (this.Extensions.ContainsKey("nextLinkMethod") && (bool)this.Extensions["nextLinkMethod"])
{
builder.AppendLine("{0} += ({0}.Contains(\"?\") ? \"&\" : \"?\") + string.Join(\"&\", _queryParameters);", variableName);
}
else
{
builder.AppendLine("{0} += \"?\" + string.Join(\"&\", _queryParameters);", variableName);
}
builder.Outdent().AppendLine("}");
}
return builder.ToString();
}
/// <summary>
/// Generates input mapping code block.
/// </summary>
/// <returns></returns>
public virtual string BuildInputMappings()
{
var builder = new IndentedStringBuilder();
foreach (var transformation in InputParameterTransformation)
{
var compositeOutputParameter = transformation.OutputParameter.ModelType as CompositeType;
if (transformation.OutputParameter.IsRequired && compositeOutputParameter != null)
{
builder.AppendLine("{0} {1} = new {0}();",
transformation.OutputParameter.ModelTypeName,
transformation.OutputParameter.Name);
}
else
{
builder.AppendLine("{0} {1} = default({0});",
transformation.OutputParameter.ModelTypeName,
transformation.OutputParameter.Name);
}
var nullCheck = BuildNullCheckExpression(transformation);
if (!string.IsNullOrEmpty(nullCheck))
{
builder.AppendLine("if ({0})", nullCheck)
.AppendLine("{").Indent();
}
if (transformation.ParameterMappings.Any(m => !string.IsNullOrEmpty(m.OutputParameterProperty)) &&
compositeOutputParameter != null && !transformation.OutputParameter.IsRequired)
{
builder.AppendLine("{0} = new {1}();",
transformation.OutputParameter.Name,
transformation.OutputParameter.ModelType.Name);
}
foreach (var mapping in transformation.ParameterMappings)
{
builder.AppendLine("{0};", mapping.CreateCode(transformation.OutputParameter));
}
if (!string.IsNullOrEmpty(nullCheck))
{
builder.Outdent()
.AppendLine("}");
}
}
return builder.ToString();
}
private static string BuildNullCheckExpression(ParameterTransformation transformation)
{
if (transformation == null)
{
throw new ArgumentNullException("transformation");
}
return string.Join(" || ",
transformation.ParameterMappings
.Where(m => m.InputParameter.IsNullable())
.Select(m => m.InputParameter.Name + " != null"));
}
}
}
| 1 | 23,839 | nullability does not apply to headers type (that is always an object) | Azure-autorest | java |
@@ -166,7 +166,7 @@ static fpga_result map_mmio_region(fpga_handle handle, uint32_t mmio_num)
wsid = wsid_gen();
if (!wsid_add(&_handle->mmio_root,
wsid,
- (uint64_t) NULL,
+ (uint64_t) addr,
(uint64_t) NULL,
size,
(uint64_t) addr, | 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include "opae/access.h"
#include "opae/utils.h"
#include "common_int.h"
#include "intel-fpga.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdbool.h>
#include <stdint.h>
/* Port UAFU */
#define AFU_PERMISSION (FPGA_REGION_READ | FPGA_REGION_WRITE | FPGA_REGION_MMAP)
#define AFU_SIZE 0x40000
#define AFU_OFFSET 0
static fpga_result port_get_region_info(fpga_handle handle,
uint32_t mmio_num,
uint32_t *flags,
uint64_t *size,
uint64_t *offset)
{
int err;
struct _fpga_handle *_handle = (struct _fpga_handle *) handle;
fpga_result result = FPGA_OK;
ASSERT_NOT_NULL(flags);
ASSERT_NOT_NULL(size);
ASSERT_NOT_NULL(offset);
result = handle_check_and_lock(_handle);
if (result)
return result;
/* Set ioctl fpga_port_region_info struct parameters */
struct fpga_port_region_info rinfo = {.argsz = sizeof(rinfo),
.padding = 0,
.index = (__u32) mmio_num};
/* Dispatch ioctl command */
if (ioctl(_handle->fddev, FPGA_PORT_GET_REGION_INFO, &rinfo) != 0) {
FPGA_MSG("FPGA_PORT_GET_REGION_INFO ioctl failed: %s",
strerror(errno));
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
*flags = (uint32_t) rinfo.flags;
*size = (uint64_t) rinfo.size;
*offset = (uint64_t) rinfo.offset;
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
static fpga_result port_mmap_region(fpga_handle handle,
void **vaddr,
uint64_t size,
uint32_t flags,
uint64_t offset,
uint32_t mmio_num)
{
void *addr;
int err;
struct _fpga_handle *_handle = (struct _fpga_handle *) handle;
fpga_result result = FPGA_OK;
UNUSED_PARAM(mmio_num);
/* Assure returning pointer contains allocated memory */
ASSERT_NOT_NULL(vaddr);
result = handle_check_and_lock(_handle);
if (result)
return result;
/* Map MMIO memory */
addr = (void *) mmap(NULL, size, flags, MAP_SHARED, _handle->fddev, offset);
if (addr == MAP_FAILED) {
FPGA_MSG("Unable to map MMIO region. Error value is : %s",
strerror(errno));
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
/* Save return address */
*vaddr = addr;
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
static fpga_result map_mmio_region(fpga_handle handle, uint32_t mmio_num)
{
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
void *addr = NULL;
uint64_t wsid = 0;
uint32_t flags = 0;
uint64_t size = 0;
uint64_t offset = 0;
fpga_result result = FPGA_OK;
/* Obtain MMIO region information */
result = port_get_region_info(handle,
mmio_num,
&flags,
&size,
&offset);
if (flags != AFU_PERMISSION) {
FPGA_MSG("Invalid MMIO permission flags");
result = FPGA_NO_ACCESS;
return result;
}
/* Map UAFU MMIO */
result = port_mmap_region(handle,
(void **) &addr,
size,
PROT_READ | PROT_WRITE,
offset,
mmio_num);
if (result != FPGA_OK)
return result;
/* Add to MMIO list */
wsid = wsid_gen();
if (!wsid_add(&_handle->mmio_root,
wsid,
(uint64_t) NULL,
(uint64_t) NULL,
size,
(uint64_t) addr,
mmio_num,
0)) {
if (munmap(addr, size)) {
FPGA_MSG("munmap failed. Error value is : %s",
strerror(errno));
return FPGA_INVALID_PARAM;
} else {
FPGA_MSG("Failed to add MMIO id: %d", mmio_num);
return FPGA_NO_MEMORY;
}
}
return FPGA_OK;
}
/* Lazy mapping of MMIO region (only map if not already mapped) */
static fpga_result find_or_map_wm(fpga_handle handle, uint32_t mmio_num,
struct wsid_map **wm_out)
{
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct wsid_map *wm = NULL;
fpga_result result = FPGA_OK;
wm = wsid_find_by_index(_handle->mmio_root, mmio_num);
if (!wm) {
result = map_mmio_region(handle, mmio_num);
if (result != FPGA_OK) {
FPGA_ERR("failed to map mmio region %d", mmio_num);
return result;
}
wm = wsid_find_by_index(_handle->mmio_root, mmio_num);
}
*wm_out = wm;
return FPGA_OK;
}
fpga_result __FPGA_API__ fpgaWriteMMIO32(fpga_handle handle,
uint32_t mmio_num,
uint64_t offset,
uint32_t value)
{
int err;
struct _fpga_handle *_handle = (struct _fpga_handle *) handle;
struct wsid_map *wm = NULL;
fpga_result result = FPGA_OK;
if (offset % sizeof(uint32_t) != 0) {
FPGA_MSG("Misaligned MMIO access");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
result = find_or_map_wm(handle, mmio_num, &wm);
if (result)
goto out_unlock;
if (offset > wm->len) {
FPGA_MSG("offset out of bounds");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
*((volatile uint32_t *) ((uint8_t *)wm->offset + offset)) = value;
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ fpgaReadMMIO32(fpga_handle handle,
uint32_t mmio_num,
uint64_t offset,
uint32_t *value)
{
int err;
struct _fpga_handle *_handle = (struct _fpga_handle *) handle;
struct wsid_map *wm = NULL;
fpga_result result = FPGA_OK;
if (offset % sizeof(uint32_t) != 0) {
FPGA_MSG("Misaligned MMIO access");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
result = find_or_map_wm(handle, mmio_num, &wm);
if (result)
goto out_unlock;
if (offset > wm->len) {
FPGA_MSG("offset out of bounds");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
*value = *((volatile uint32_t *) ((uint8_t *)wm->offset + offset));
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ fpgaWriteMMIO64(fpga_handle handle,
uint32_t mmio_num,
uint64_t offset,
uint64_t value)
{
int err;
struct _fpga_handle *_handle = (struct _fpga_handle *) handle;
struct wsid_map *wm = NULL;
fpga_result result = FPGA_OK;
if (offset % sizeof(uint64_t) != 0) {
FPGA_MSG("Misaligned MMIO access");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
result = find_or_map_wm(handle, mmio_num, &wm);
if (result)
goto out_unlock;
if (offset > wm->len) {
FPGA_MSG("offset out of bounds");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
*((volatile uint64_t *) ((uint8_t *)wm->offset + offset)) = value;
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ fpgaReadMMIO64(fpga_handle handle,
uint32_t mmio_num,
uint64_t offset,
uint64_t *value)
{
int err;
struct _fpga_handle *_handle = (struct _fpga_handle *) handle;
struct wsid_map *wm = NULL;
fpga_result result = FPGA_OK;
if (offset % sizeof(uint64_t) != 0) {
FPGA_MSG("Misaligned MMIO access");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
result = find_or_map_wm(handle, mmio_num, &wm);
if (result)
goto out_unlock;
if (offset > wm->len) {
FPGA_MSG("offset out of bounds");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
*value = *((volatile uint64_t *) ((uint8_t *)wm->offset + offset));
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ fpgaMapMMIO(fpga_handle handle,
uint32_t mmio_num,
uint64_t **mmio_ptr)
{
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct wsid_map *wm = NULL;
fpga_result result = FPGA_OK;
int err;
result = handle_check_and_lock(_handle);
if (result)
return result;
result = find_or_map_wm(handle, mmio_num, &wm);
if (result)
goto out_unlock;
/* Store return value only if return pointer has allocated memory */
if (mmio_ptr)
*mmio_ptr = (uint64_t *)wm->addr;
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ fpgaUnmapMMIO(fpga_handle handle,
uint32_t mmio_num)
{
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
void *mmio_ptr;
fpga_result result = FPGA_OK;
int err;
result = handle_check_and_lock(_handle);
if (result)
return result;
/* Fetch the MMIO physical address and length */
struct wsid_map *wm = wsid_find_by_index(_handle->mmio_root, mmio_num);
if (!wm) {
FPGA_MSG("MMIO region %d not found", mmio_num);
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
/* Unmap UAFU MMIO */
mmio_ptr = (void *) wm->offset;
if (munmap((void *) mmio_ptr, wm->len)) {
FPGA_MSG("munmap failed: %s",
strerror(errno));
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
/* Remove MMIO */
wsid_del(&_handle->mmio_root, wm->wsid);
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
| 1 | 14,630 | Do we also need to add the iova, or is that done somewhere else? | OPAE-opae-sdk | c |
@@ -19,9 +19,7 @@ package selector
import "github.com/mysteriumnetwork/node/identity"
-// Handler allows selecting identity to be used
+// Handler interface
type Handler interface {
- UseExisting(address, passphrase string) (identity.Identity, error)
- UseLast(passphrase string) (identity.Identity, error)
- UseNew(passphrase string) (identity.Identity, error)
+ UseOrCreate(address, passphrase string) (identity.Identity, error)
} | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package selector
import "github.com/mysteriumnetwork/node/identity"
// Handler allows selecting identity to be used
type Handler interface {
UseExisting(address, passphrase string) (identity.Identity, error)
UseLast(passphrase string) (identity.Identity, error)
UseNew(passphrase string) (identity.Identity, error)
}
| 1 | 14,566 | why change this? | mysteriumnetwork-node | go |
@@ -173,7 +173,7 @@ void h2o_get_timestamp(h2o_context_t *ctx, h2o_mem_pool_t *pool, h2o_timestamp_t
if (ctx->_timestamp_cache.value != NULL)
h2o_mem_release_shared(ctx->_timestamp_cache.value);
ctx->_timestamp_cache.value = h2o_mem_alloc_shared(NULL, sizeof(h2o_timestamp_string_t), NULL);
- gmtime_r(&ctx->_timestamp_cache.tv_at.tv_sec, &gmt);
+ gmtime_r((time_t *)&ctx->_timestamp_cache.tv_at.tv_sec, &gmt);
h2o_time2str_rfc1123(ctx->_timestamp_cache.value->rfc1123, &gmt);
h2o_time2str_log(ctx->_timestamp_cache.value->log, ctx->_timestamp_cache.tv_at.tv_sec);
} | 1 | /*
* Copyright (c) 2014 DeNA Co., Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stddef.h>
#include <stdlib.h>
#include <sys/time.h>
#include "h2o.h"
void h2o_context_init_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf)
{
/* add pathconf to the inited list (or return if already inited) */
size_t i;
for (i = 0; i != ctx->_pathconfs_inited.size; ++i)
if (ctx->_pathconfs_inited.entries[i] == pathconf)
return;
h2o_vector_reserve(NULL, (void *)&ctx->_pathconfs_inited, sizeof(ctx->_pathconfs_inited.entries[0]),
ctx->_pathconfs_inited.size + 1);
ctx->_pathconfs_inited.entries[ctx->_pathconfs_inited.size++] = pathconf;
#define DOIT(type, list) \
do { \
size_t i; \
for (i = 0; i != pathconf->list.size; ++i) { \
type *o = pathconf->list.entries[i]; \
if (o->on_context_init != NULL) \
o->on_context_init(o, ctx); \
} \
} while (0)
DOIT(h2o_handler_t, handlers);
DOIT(h2o_filter_t, filters);
DOIT(h2o_logger_t, loggers);
#undef DOIT
}
void h2o_context_dispose_pathconf_context(h2o_context_t *ctx, h2o_pathconf_t *pathconf)
{
/* nullify pathconf in the inited list (or return if already disposed) */
size_t i;
for (i = 0; i != ctx->_pathconfs_inited.size; ++i)
if (ctx->_pathconfs_inited.entries[i] == pathconf)
break;
if (i == ctx->_pathconfs_inited.size)
return;
ctx->_pathconfs_inited.entries[i] = NULL;
#define DOIT(type, list) \
do { \
size_t i; \
for (i = 0; i != pathconf->list.size; ++i) { \
type *o = pathconf->list.entries[i]; \
if (o->on_context_dispose != NULL) \
o->on_context_dispose(o, ctx); \
} \
} while (0)
DOIT(h2o_handler_t, handlers);
DOIT(h2o_filter_t, filters);
DOIT(h2o_logger_t, loggers);
#undef DOIT
}
void h2o_context_init(h2o_context_t *ctx, h2o_loop_t *loop, h2o_globalconf_t *config)
{
size_t i, j;
assert(config->hosts[0] != NULL);
memset(ctx, 0, sizeof(*ctx));
ctx->loop = loop;
ctx->globalconf = config;
h2o_timeout_init(ctx->loop, &ctx->zero_timeout, 0);
h2o_timeout_init(ctx->loop, &ctx->one_sec_timeout, 1000);
ctx->queue = h2o_multithread_create_queue(loop);
h2o_multithread_register_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr, h2o_hostinfo_getaddr_receiver);
h2o_timeout_init(ctx->loop, &ctx->http1.req_timeout, config->http1.req_timeout);
h2o_timeout_init(ctx->loop, &ctx->http2.idle_timeout, config->http2.idle_timeout);
h2o_linklist_init_anchor(&ctx->http2._conns);
ctx->proxy.client_ctx.loop = loop;
h2o_timeout_init(ctx->loop, &ctx->proxy.io_timeout, config->proxy.io_timeout);
ctx->proxy.client_ctx.getaddr_receiver = &ctx->receivers.hostinfo_getaddr;
ctx->proxy.client_ctx.io_timeout = &ctx->proxy.io_timeout;
ctx->_module_configs = h2o_mem_alloc(sizeof(*ctx->_module_configs) * config->_num_config_slots);
memset(ctx->_module_configs, 0, sizeof(*ctx->_module_configs) * config->_num_config_slots);
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&mutex);
for (i = 0; config->hosts[i] != NULL; ++i) {
h2o_hostconf_t *hostconf = config->hosts[i];
for (j = 0; j != hostconf->paths.size; ++j) {
h2o_pathconf_t *pathconf = hostconf->paths.entries + j;
h2o_context_init_pathconf_context(ctx, pathconf);
}
h2o_context_init_pathconf_context(ctx, &hostconf->fallback_path);
}
pthread_mutex_unlock(&mutex);
}
void h2o_context_dispose(h2o_context_t *ctx)
{
h2o_globalconf_t *config = ctx->globalconf;
size_t i, j;
for (i = 0; config->hosts[i] != NULL; ++i) {
h2o_hostconf_t *hostconf = config->hosts[i];
for (j = 0; j != hostconf->paths.size; ++j) {
h2o_pathconf_t *pathconf = hostconf->paths.entries + j;
h2o_context_dispose_pathconf_context(ctx, pathconf);
}
h2o_context_dispose_pathconf_context(ctx, &hostconf->fallback_path);
}
free(ctx->_pathconfs_inited.entries);
free(ctx->_module_configs);
h2o_timeout_dispose(ctx->loop, &ctx->zero_timeout);
h2o_timeout_dispose(ctx->loop, &ctx->one_sec_timeout);
h2o_timeout_dispose(ctx->loop, &ctx->http1.req_timeout);
h2o_timeout_dispose(ctx->loop, &ctx->http2.idle_timeout);
h2o_timeout_dispose(ctx->loop, &ctx->proxy.io_timeout);
/* what should we do here? assert(!h2o_linklist_is_empty(&ctx->http2._conns); */
/* TODO assert that the all the getaddrinfo threads are idle */
h2o_multithread_unregister_receiver(ctx->queue, &ctx->receivers.hostinfo_getaddr);
h2o_multithread_destroy_queue(ctx->queue);
#if H2O_USE_LIBUV
/* make sure the handles released by h2o_timeout_dispose get freed */
uv_run(ctx->loop, UV_RUN_NOWAIT);
#endif
}
void h2o_context_request_shutdown(h2o_context_t *ctx)
{
ctx->shutdown_requested = 1;
if (ctx->globalconf->http1.callbacks.request_shutdown != NULL)
ctx->globalconf->http1.callbacks.request_shutdown(ctx);
if (ctx->globalconf->http2.callbacks.request_shutdown != NULL)
ctx->globalconf->http2.callbacks.request_shutdown(ctx);
}
void h2o_get_timestamp(h2o_context_t *ctx, h2o_mem_pool_t *pool, h2o_timestamp_t *ts)
{
uint64_t now = h2o_now(ctx->loop);
struct tm gmt;
if (ctx->_timestamp_cache.uv_now_at != now) {
time_t prev_sec = ctx->_timestamp_cache.tv_at.tv_sec;
ctx->_timestamp_cache.uv_now_at = now;
gettimeofday(&ctx->_timestamp_cache.tv_at, NULL);
if (ctx->_timestamp_cache.tv_at.tv_sec != prev_sec) {
/* update the string cache */
if (ctx->_timestamp_cache.value != NULL)
h2o_mem_release_shared(ctx->_timestamp_cache.value);
ctx->_timestamp_cache.value = h2o_mem_alloc_shared(NULL, sizeof(h2o_timestamp_string_t), NULL);
gmtime_r(&ctx->_timestamp_cache.tv_at.tv_sec, &gmt);
h2o_time2str_rfc1123(ctx->_timestamp_cache.value->rfc1123, &gmt);
h2o_time2str_log(ctx->_timestamp_cache.value->log, ctx->_timestamp_cache.tv_at.tv_sec);
}
}
ts->at = ctx->_timestamp_cache.tv_at;
h2o_mem_link_shared(pool, ctx->_timestamp_cache.value);
ts->str = ctx->_timestamp_cache.value;
}
| 1 | 10,692 | If there is no guarantee that tv_sec is of type `time_t`, IMO you should copy the value instead of enforcing a pointer typecast. | h2o-h2o | c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.