patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -29,6 +29,7 @@ class ResourceType(object):
FOLDER = 'folder'
PROJECT = 'project'
GROUP = 'group'
+ BACKEND_SERVICE = 'backend_service'
FORWARDING_RULE = 'forwarding_rule'
BUCKETS_ACL = 'buckets_acl'
CLOUDSQL_ACL = 'cloudsql_instances' | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCP Resource.
For now, this only represents Organization resources. In the future, we may
need to separate the classes depending on implementation.
"""
import abc
from google.cloud.security.common.gcp_type import errors
class ResourceType(object):
"""Resource types."""
ORGANIZATION = 'organization'
FOLDER = 'folder'
PROJECT = 'project'
GROUP = 'group'
FORWARDING_RULE = 'forwarding_rule'
BUCKETS_ACL = 'buckets_acl'
CLOUDSQL_ACL = 'cloudsql_instances'
resource_types = frozenset([
ORGANIZATION,
FOLDER,
PROJECT,
GROUP,
FORWARDING_RULE,
])
@classmethod
def verify(cls, resource_type):
"""Verify if the resource type is recognized.
Args:
resource_type: The string resource type.
Returns:
The resource type if it is recognized in the resource_types.
Raises:
InvalidResourceTypeError if resource type is not recognized.
"""
if resource_type not in cls.resource_types:
raise errors.InvalidResourceTypeError(
'Invalid resource type: {}'.format(resource_type))
return resource_type
# pylint: disable=too-few-public-methods
class LifecycleState(object):
"""Resource lifecycle state."""
ACTIVE = 'ACTIVE'
DELETED = 'DELETED'
UNSPECIFIED = 'LIFECYCLE_STATE_UNSPECIFIED'
class Resource(object):
"""Represents a GCP resource."""
__metaclass__ = abc.ABCMeta
def __init__(
self,
resource_id,
resource_type,
name=None,
display_name=None,
parent=None,
lifecycle_state=LifecycleState.UNSPECIFIED):
"""Initialize.
Args:
resource_id: The resource's unique id (string) in GCP.
resource_type: The resource type.
name: The resource unique name,
e.g. "<resource type>/{id}".
display_name: The resource display name.
parent: The parent Resource object.
lifecycle_state: The lifecycle state of the Resource.
"""
self._resource_id = str(resource_id)
self._resource_type = resource_type
if name:
self._name = name
else:
self._name = self.RESOURCE_NAME_FMT % resource_id
self._display_name = display_name
# TODO: maybe need assertion for parent type, e.g. assert that
# organization has no parent, whereas projects and folders can
# have either another folder or organization as a parent.
self._parent = parent
self._lifecycle_state = lifecycle_state
def __eq__(self, other):
"""Test equality of Resource."""
if not isinstance(other, type(self)):
return NotImplemented
return (self.id == other.id and
self.type == self.type)
def __ne__(self, other):
"""Test inequality of Resource."""
return not self == other
def __hash__(self):
"""Create a hash on the resource type and id."""
return hash((self.type, self.id))
def __repr__(self):
"""String representation of the Resource."""
return '{}<id={},parent={}>'.format(
self.type, self.id, self.parent)
@property
def id(self):
"""Resource id."""
return self._resource_id
@property
def type(self):
"""Resource type."""
return self._resource_type
@property
def name(self):
"""GCP name."""
return self._name
@property
def display_name(self):
"""Display name."""
return self._display_name
@property
def parent(self):
"""Resource parent."""
return self._parent
@property
def lifecycle_state(self):
"""Lifecycle state."""
return self._lifecycle_state
@abc.abstractmethod
def exists(self):
"""Verify that the resource exists in GCP."""
raise NotImplementedError('Implement exists() in subclass')
| 1 | 26,110 | nit: Would you mind alpha-sorting this? | forseti-security-forseti-security | py |
@@ -55,6 +55,8 @@ public class ManifestFiles {
* @return a {@link ManifestReader}
*/
public static ManifestReader read(ManifestFile manifest, FileIO io, Map<Integer, PartitionSpec> specsById) {
+ Preconditions.checkArgument(manifest.content() == ManifestContent.DATA,
+ "Cannot read a delete manifest with a ManifestReader: %s", manifest);
InputFile file = io.newInputFile(manifest.path());
InheritableMetadata inheritableMetadata = InheritableMetadataFactory.fromManifest(manifest);
return new ManifestReader(file, specsById, inheritableMetadata); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.IOException;
import java.util.Map;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
public class ManifestFiles {
private ManifestFiles() {
}
/**
* Returns a new {@link ManifestReader} for a {@link ManifestFile}.
* <p>
* <em>Note:</em> Callers should use {@link ManifestFiles#read(ManifestFile, FileIO, Map)} to ensure
* the schema used by filters is the latest table schema. This should be used only when reading
* a manifest without filters.
*
* @param manifest a ManifestFile
* @param io a FileIO
* @return a manifest reader
*/
public static ManifestReader read(ManifestFile manifest, FileIO io) {
return read(manifest, io, null);
}
/**
* Returns a new {@link ManifestReader} for a {@link ManifestFile}.
*
* @param manifest a {@link ManifestFile}
* @param io a {@link FileIO}
* @param specsById a Map from spec ID to partition spec
* @return a {@link ManifestReader}
*/
public static ManifestReader read(ManifestFile manifest, FileIO io, Map<Integer, PartitionSpec> specsById) {
InputFile file = io.newInputFile(manifest.path());
InheritableMetadata inheritableMetadata = InheritableMetadataFactory.fromManifest(manifest);
return new ManifestReader(file, specsById, inheritableMetadata);
}
/**
* Create a new {@link ManifestWriter}.
* <p>
* Manifests created by this writer have all entry snapshot IDs set to null.
* All entries will inherit the snapshot ID that will be assigned to the manifest on commit.
*
* @param spec {@link PartitionSpec} used to produce {@link DataFile} partition tuples
* @param outputFile the destination file location
* @return a manifest writer
*/
public static ManifestWriter write(PartitionSpec spec, OutputFile outputFile) {
return write(1, spec, outputFile, null);
}
/**
* Create a new {@link ManifestWriter} for the given format version.
*
* @param formatVersion a target format version
* @param spec a {@link PartitionSpec}
* @param outputFile an {@link OutputFile} where the manifest will be written
* @param snapshotId a snapshot ID for the manifest entries, or null for an inherited ID
* @return a manifest writer
*/
public static ManifestWriter write(int formatVersion, PartitionSpec spec, OutputFile outputFile, Long snapshotId) {
switch (formatVersion) {
case 1:
return new ManifestWriter.V1Writer(spec, outputFile, snapshotId);
case 2:
return new ManifestWriter.V2Writer(spec, outputFile, snapshotId);
}
throw new UnsupportedOperationException("Cannot write manifest for table version: " + formatVersion);
}
static ManifestFile copyAppendManifest(int formatVersion,
InputFile toCopy, Map<Integer, PartitionSpec> specsById,
OutputFile outputFile, long snapshotId,
SnapshotSummary.Builder summaryBuilder) {
// use metadata that will add the current snapshot's ID for the rewrite
InheritableMetadata inheritableMetadata = InheritableMetadataFactory.forCopy(snapshotId);
try (ManifestReader reader = new ManifestReader(toCopy, specsById, inheritableMetadata)) {
return copyManifestInternal(
formatVersion, reader, outputFile, snapshotId, summaryBuilder, ManifestEntry.Status.ADDED);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close manifest: %s", toCopy.location());
}
}
static ManifestFile copyRewriteManifest(int formatVersion,
InputFile toCopy, Map<Integer, PartitionSpec> specsById,
OutputFile outputFile, long snapshotId,
SnapshotSummary.Builder summaryBuilder) {
// for a rewritten manifest all snapshot ids should be set. use empty metadata to throw an exception if it is not
InheritableMetadata inheritableMetadata = InheritableMetadataFactory.empty();
try (ManifestReader reader = new ManifestReader(toCopy, specsById, inheritableMetadata)) {
return copyManifestInternal(
formatVersion, reader, outputFile, snapshotId, summaryBuilder, ManifestEntry.Status.EXISTING);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close manifest: %s", toCopy.location());
}
}
private static ManifestFile copyManifestInternal(int formatVersion, ManifestReader reader, OutputFile outputFile,
long snapshotId, SnapshotSummary.Builder summaryBuilder,
ManifestEntry.Status allowedEntryStatus) {
ManifestWriter writer = write(formatVersion, reader.spec(), outputFile, snapshotId);
boolean threw = true;
try {
for (ManifestEntry entry : reader.entries()) {
Preconditions.checkArgument(
allowedEntryStatus == entry.status(),
"Invalid manifest entry status: %s (allowed status: %s)",
entry.status(), allowedEntryStatus);
switch (entry.status()) {
case ADDED:
summaryBuilder.addedFile(reader.spec(), entry.file());
writer.add(entry);
break;
case EXISTING:
writer.existing(entry);
break;
case DELETED:
summaryBuilder.deletedFile(reader.spec(), entry.file());
writer.delete(entry);
break;
}
}
threw = false;
} finally {
try {
writer.close();
} catch (IOException e) {
if (!threw) {
throw new RuntimeIOException(e, "Failed to close manifest: %s", outputFile);
}
}
}
return writer.toManifestFile();
}
}
| 1 | 20,158 | For my understanding, the DATA manifest & DELETE manifest could share the same read / write path so I think we could use the common reader+writer. Is there any other reason that we need to make them separate paths ? | apache-iceberg | java |
@@ -4,7 +4,7 @@
plan_title = @plan.title
user_name = @plan.owner.name
helpdesk_email = Rails.configuration.branding[:organisation][:helpdesk_email]
- contact_us_url = Rails.configuration.branding[:organisation][:contact_us_url]
+ contact_us = Rails.configuration.branding[:organisation][:contact_us_url] || contact_us_url
email_subject = _('Query or feedback related to %{tool_name}') %{ :tool_name => tool_name }
%>
<% FastGettext.with_locale FastGettext.default_locale do %> | 1 | <%
tool_name = Rails.configuration.branding[:application][:name]
commenter_name = @commenter.name
plan_title = @plan.title
user_name = @plan.owner.name
helpdesk_email = Rails.configuration.branding[:organisation][:helpdesk_email]
contact_us_url = Rails.configuration.branding[:organisation][:contact_us_url]
email_subject = _('Query or feedback related to %{tool_name}') %{ :tool_name => tool_name }
%>
<% FastGettext.with_locale FastGettext.default_locale do %>
<p>
<%= _('Hello %{user_name}') %{ :user_name => user_name } %>
</p>
<p>
<%= _('%{commenter_name} has commented on the plan %{plan_title}. To view the comments, '\
'please visit the My Dashboard page in %{tool_name} and open your plan.') %{ :plan_title => plan_title,
:commenter_name => commenter_name, :tool_name => tool_name } %>
</p>
<p>
<%= _('All the best') %>
<br />
<%= _('The %{tool_name} team') %{:tool_name => tool_name} %>
</p>
<p>
<%= _('You may change your notification preferences on your profile page.') %> <%= _('Please do not reply to this email.') %> <%= raw(_('If you have any questions or need help, please contact us at %{helpdesk_email} or visit %{contact_us_url}') %{ :helpdesk_email => mail_to(helpdesk_email, helpdesk_email, subject: email_subject), :contact_us_url => link_to(contact_us_url, contact_us_url) }) %>
</p>
<% end %> | 1 | 17,083 | that change should be sufficient for avoiding the override of contact_us_url helper. The one defined at the .yml it is within a hash so no problem should arise. | DMPRoadmap-roadmap | rb |
@@ -56,7 +56,7 @@ return [
'choose_file' => 'Datei auswählen',
'close' => 'Schließen',
'create' => 'Erstellen',
- 'create_and_add_another' => 'Erstellen und weitere hinzufügen',
+ 'create_and_add_another' => 'Erstellen und weiteres Element hinzufügen',
'create_and_continue' => 'Erstellen und weiter bearbeiten',
'save' => 'Speichern',
'save_and_continue' => 'Speichern und weiter bearbeiten', | 1 | <?php
return [
'page_title' => [
'dashboard' => 'Dashboard',
'detail' => '%entity_label_singular% <small>(#%entity_short_id%)</small>',
'edit' => '%entity_label_singular% <small>(#%entity_short_id%)</small> bearbeiten',
'index' => '%entity_label_plural%',
'new' => '%entity_label_singular% erstellen',
'exception' => 'Fehler',
],
'datagrid' => [
'hidden_results' => 'Einige Ergebnisse können aufgrund fehlender Berechtigungen nicht angezeigt werden.',
'no_results' => 'Keine Ergebnisse gefunden.',
],
'paginator' => [
'first' => 'Erste',
'previous' => 'Zurück',
'next' => 'Nächste',
'last' => 'Letzte',
'counter' => '<strong>%start%</strong> - <strong>%end%</strong> von <strong>%results%</strong>',
'results' => '{0} Keine Ergebnisse|{1} <strong>1</strong> Ergebnis|]1,Inf] <strong>%count%</strong> Ergebnisse',
],
'label' => [
'true' => 'Ja',
'false' => 'Nein',
'empty' => 'Leer',
'null' => 'Null',
'nullable_field' => 'Feld leer lassen',
'object' => 'PHP-Objekt',
'inaccessible' => 'Nicht zugreifbar',
'inaccessible.explanation' => 'Es gibt keine Getter-Methode für diese Eigenschaft oder die Eigenschaft ist nicht public',
'form.empty_value' => 'kein Wert',
],
'field' => [
'code_editor.view_code' => 'Code anzeigen',
'text_editor.view_content' => 'Inhalt anzeigen',
],
'action' => [
'entity_actions' => 'Aktionen',
'new' => '%entity_label_singular% erstellen',
'search' => 'Suchen',
'detail' => 'Anzeigen',
'edit' => 'Ändern',
'delete' => 'Löschen',
'cancel' => 'Abbrechen',
'index' => 'Zurück zur Übersicht',
'deselect' => 'Auswahl aufheben',
'add_new_item' => 'Neues Element hinzufügen',
'remove_item' => 'Element entfernen',
'choose_file' => 'Datei auswählen',
'close' => 'Schließen',
'create' => 'Erstellen',
'create_and_add_another' => 'Erstellen und weitere hinzufügen',
'create_and_continue' => 'Erstellen und weiter bearbeiten',
'save' => 'Speichern',
'save_and_continue' => 'Speichern und weiter bearbeiten',
],
'batch_action_modal' => [
'title' => 'Möchten Sie die ausgewählten Elemente wirklich verändern?',
'content' => 'Diese Aktion kann nicht rückgängig gemacht werden.',
'action' => 'Fortfahren',
],
'delete_modal' => [
'title' => 'Soll das Element wirklich gelöscht werden?',
'content' => 'Diese Aktion kann nicht rückgängig gemacht werden.',
],
'filter' => [
'title' => 'Filtern',
'button.clear' => 'Zurücksetzen',
'button.apply' => 'Anwenden',
'label.is_equal_to' => 'ist gleich',
'label.is_not_equal_to' => 'ist nicht gleich',
'label.is_greater_than' => 'ist größer als',
'label.is_greater_than_or_equal_to' => 'ist größer oder gleich',
'label.is_less_than' => 'ist kleiner als',
'label.is_less_than_or_equal_to' => 'ist kleiner oder gleich',
'label.is_between' => 'ist zwischen',
'label.contains' => 'enthält',
'label.not_contains' => 'enthält nicht',
'label.starts_with' => 'beginnt mit',
'label.ends_with' => 'endet mit',
'label.exactly' => 'ist genau',
'label.not_exactly' => 'ist nicht genau',
'label.is_same' => 'ist gleich',
'label.is_not_same' => 'ist nicht gleich',
'label.is_after' => 'ist nach',
'label.is_after_or_same' => 'ist nach oder gleich',
'label.is_before' => 'ist vor',
'label.is_before_or_same' => 'ist vor oder gleich',
],
'form' => [
'are_you_sure' => 'Vorgenommene Änderungen wurden noch nicht gespeichert.',
'tab.error_badge_title' => 'Eine ungültige Eingabe|%count% ungültige Eingaben',
'slug.confirm_text' => 'Wenn Sie den Slug ändern, kann dies Links auf anderen Seiten beschädigen.',
],
'user' => [
'logged_in_as' => 'Angemeldet als',
'unnamed' => 'Unbenannter Benutzer',
'anonymous' => 'Anonymer Benutzer',
'sign_out' => 'Abmelden',
'exit_impersonation' => 'Benutzerimitation verlassen',
],
'login_page' => [
'username' => 'Benutzername',
'password' => 'Passwort',
'sign_in' => 'Login',
],
'exception' => [
'entity_not_found' => 'Dieses Element ist nicht mehr verfügbar.',
'entity_remove' => 'Dieses Element kann nicht gelöscht werden, weil andere Elemente davon abhängen.',
'forbidden_action' => 'Die gewünschte Aktion kann kann mit diesem Element nicht ausgeführt werden.',
'insufficient_entity_permission' => 'Sie haben keine Berechtigung, auf dieses Element zuzugreifen.',
],
];
| 1 | 12,720 | We actually had a discussion about this exactly in #3470, some languages seem to go for one, some for the other | EasyCorp-EasyAdminBundle | php |
@@ -50,6 +50,8 @@ public abstract class DynamicLangXApiView implements ViewModel {
public abstract List<LongRunningOperationDetailView> longRunningDescriptors();
+ public abstract List<GrpcStreamingDetailView> grpcStreamingDescriptors();
+
public abstract List<String> methodKeys();
public abstract String clientConfigPath(); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel;
import com.google.api.codegen.SnippetSetRunner;
import com.google.auto.value.AutoValue;
import java.util.List;
@AutoValue
public abstract class DynamicLangXApiView implements ViewModel {
public abstract String templateFileName();
public abstract FileHeaderView fileHeader();
public abstract String protoFilename();
public abstract ServiceDocView doc();
public abstract String name();
public abstract String serviceAddress();
public abstract Integer servicePort();
public abstract String serviceTitle();
public abstract Iterable<String> authScopes();
public abstract List<PathTemplateView> pathTemplates();
public abstract List<FormatResourceFunctionView> formatResourceFunctions();
public abstract List<ParseResourceFunctionView> parseResourceFunctions();
public abstract List<PathTemplateGetterFunctionView> pathTemplateGetterFunctions();
public abstract List<PageStreamingDescriptorView> pageStreamingDescriptors();
public abstract List<LongRunningOperationDetailView> longRunningDescriptors();
public abstract List<String> methodKeys();
public abstract String clientConfigPath();
public abstract String interfaceKey();
public abstract String grpcClientTypeName();
public abstract List<GrpcStubView> stubs();
public abstract String outputPath();
public abstract List<ApiMethodView> apiMethods();
public abstract boolean hasPageStreamingMethods();
public abstract boolean hasLongRunningOperations();
public abstract boolean hasDefaultServiceAddress();
public abstract boolean hasDefaultServiceScopes();
public boolean missingDefaultServiceAddress() {
return !hasDefaultServiceAddress();
}
public boolean missingDefaultServiceScopes() {
return !hasDefaultServiceScopes();
}
public boolean hasMissingDefaultOptions() {
return missingDefaultServiceAddress() || missingDefaultServiceScopes();
}
@Override
public String resourceRoot() {
return SnippetSetRunner.SNIPPET_RESOURCE_ROOT;
}
public static Builder newBuilder() {
return new AutoValue_DynamicLangXApiView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder templateFileName(String val);
public abstract Builder fileHeader(FileHeaderView val);
public abstract Builder protoFilename(String simpleName);
public abstract Builder doc(ServiceDocView doc);
public abstract Builder name(String val);
public abstract Builder serviceAddress(String val);
public abstract Builder servicePort(Integer val);
public abstract Builder serviceTitle(String val);
public abstract Builder authScopes(Iterable<String> val);
public abstract Builder pathTemplates(List<PathTemplateView> val);
public abstract Builder formatResourceFunctions(List<FormatResourceFunctionView> val);
public abstract Builder parseResourceFunctions(List<ParseResourceFunctionView> val);
public abstract Builder pathTemplateGetterFunctions(List<PathTemplateGetterFunctionView> val);
public abstract Builder pageStreamingDescriptors(List<PageStreamingDescriptorView> val);
public abstract Builder longRunningDescriptors(List<LongRunningOperationDetailView> val);
public abstract Builder methodKeys(List<String> val);
public abstract Builder clientConfigPath(String val);
public abstract Builder interfaceKey(String val);
public abstract Builder grpcClientTypeName(String val);
public abstract Builder stubs(List<GrpcStubView> val);
public abstract Builder outputPath(String val);
public abstract Builder apiMethods(List<ApiMethodView> val);
public abstract Builder hasPageStreamingMethods(boolean val);
public abstract Builder hasLongRunningOperations(boolean val);
public abstract Builder hasDefaultServiceAddress(boolean val);
public abstract Builder hasDefaultServiceScopes(boolean val);
public abstract DynamicLangXApiView build();
}
}
| 1 | 20,728 | Is PHP the first dynamic MVVM language with grpc streaming support? | googleapis-gapic-generator | java |
@@ -41,6 +41,7 @@ SmilesMolSupplier::SmilesMolSupplier(const std::string &fileName,
if (!tmpStream || (!(*tmpStream)) || (tmpStream->bad())) {
std::ostringstream errout;
errout << "Bad input file " << fileName;
+ if (tmpStream) { delete tmpStream; }
throw BadFileException(errout.str());
}
dp_inStream = static_cast<std::istream *>(tmpStream); | 1 | // $Id$
//
// Copyright (C) 2002-2011 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include <RDGeneral/BadFileException.h>
#include <RDGeneral/FileParseException.h>
#include <RDGeneral/StreamOps.h>
#include <RDGeneral/RDLog.h>
#include "MolSupplier.h"
#include "FileParsers.h"
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <boost/tokenizer.hpp>
typedef boost::tokenizer<boost::char_separator<char> > tokenizer;
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <cstdlib>
namespace RDKit {
SmilesMolSupplier::SmilesMolSupplier() { init(); }
SmilesMolSupplier::SmilesMolSupplier(const std::string &fileName,
const std::string &delimiter,
int smilesColumn, int nameColumn,
bool titleLine, bool sanitize) {
init();
// FIX: this binary mode of opening file is here because of a bug in VC++ 6.0
// the function "tellg" does not work correctly if we do not open it this way
// Need to check if this has been fixed in VC++ 7.0
auto *tmpStream = new std::ifstream(fileName.c_str(), std::ios_base::binary);
if (!tmpStream || (!(*tmpStream)) || (tmpStream->bad())) {
std::ostringstream errout;
errout << "Bad input file " << fileName;
throw BadFileException(errout.str());
}
dp_inStream = static_cast<std::istream *>(tmpStream);
CHECK_INVARIANT(dp_inStream, "bad instream");
CHECK_INVARIANT(!(dp_inStream->eof()), "early EOF");
d_delim = delimiter;
df_sanitize = sanitize;
df_title = titleLine;
d_smi = smilesColumn;
d_name = nameColumn;
df_end = false;
// if(d_title) processTitleLine();
this->checkForEnd();
POSTCONDITION(dp_inStream, "bad instream");
}
SmilesMolSupplier::SmilesMolSupplier(std::istream *inStream, bool takeOwnership,
const std::string &delimiter,
int smilesColumn, int nameColumn,
bool titleLine, bool sanitize) {
CHECK_INVARIANT(inStream, "bad instream");
CHECK_INVARIANT(!(inStream->eof()), "early EOF");
init();
dp_inStream = inStream;
df_owner = takeOwnership;
d_delim = delimiter;
df_sanitize = sanitize;
df_title = titleLine;
d_smi = smilesColumn;
d_name = nameColumn;
df_end = false;
this->checkForEnd();
POSTCONDITION(dp_inStream, "bad instream");
}
SmilesMolSupplier::~SmilesMolSupplier() {
if (df_owner && dp_inStream) {
delete dp_inStream;
}
}
void SmilesMolSupplier::init() {
dp_inStream = nullptr;
df_owner = true;
df_end = false;
d_len = -1;
d_next = -1;
d_line = -1;
d_molpos.clear();
d_lineNums.clear();
}
void SmilesMolSupplier::setData(const std::string &text,
const std::string &delimiter, int smilesColumn,
int nameColumn, bool titleLine, bool sanitize) {
if (dp_inStream && df_owner) delete dp_inStream;
init();
dp_inStream = new std::stringstream(text);
d_delim = delimiter;
df_sanitize = sanitize;
df_title = titleLine;
d_smi = smilesColumn;
d_name = nameColumn;
df_end = false;
this->checkForEnd();
POSTCONDITION(dp_inStream, "bad instream");
}
// ensures that there is a line available to be read
// from the file:
void SmilesMolSupplier::checkForEnd() {
PRECONDITION(dp_inStream, "no stream");
int pos = this->skipComments();
if (pos != -1) {
d_line = -1;
dp_inStream->seekg(0);
df_end = false;
}
}
void SmilesMolSupplier::reset() {
PRECONDITION(dp_inStream, "no stream");
dp_inStream->clear();
df_end = 0;
if (d_molpos.size() > 0) {
dp_inStream->seekg(d_molpos.front());
d_next = 0;
d_line = 0;
} else {
dp_inStream->seekg(0);
d_next = -1;
d_line = -1;
}
}
ROMol *SmilesMolSupplier::processLine(std::string inLine) {
ROMol *res = nullptr;
try {
// -----------
// tokenize the input line:
// -----------
boost::char_separator<char> sep(d_delim.c_str(), "",
boost::keep_empty_tokens);
tokenizer tokens(inLine, sep);
STR_VECT recs;
for (tokenizer::iterator tokIter = tokens.begin(); tokIter != tokens.end();
++tokIter) {
std::string rec = strip(*tokIter);
recs.push_back(rec);
}
if (recs.size() <= static_cast<unsigned int>(d_smi)) {
std::ostringstream errout;
errout << "ERROR: line #" << d_line << "does not contain enough tokens\n";
throw FileParseException(errout.str());
}
// -----------
// get the smiles and create a molecule
// -----------
SmilesParserParams params;
params.sanitize = df_sanitize;
params.allowCXSMILES = false;
params.parseName = false;
res = SmilesToMol(recs[d_smi], params);
if (!res) {
std::stringstream errout;
errout << "Cannot create molecule from : '" << recs[d_smi] << "'";
throw SmilesParseException(errout.str());
}
// -----------
// get the name (if there's a name column)
// -----------
if (d_name == -1) {
// if no name defaults it to the line number we read it from string
std::ostringstream tstr;
tstr << d_line;
std::string mname = tstr.str();
res->setProp(common_properties::_Name, mname);
} else {
if (d_name >= static_cast<int>(recs.size())) {
BOOST_LOG(rdWarningLog) << "WARNING: no name column found on line "
<< d_line << std::endl;
} else {
res->setProp(common_properties::_Name, recs[d_name]);
}
}
// -----------
// read in the properties
// -----------
unsigned int iprop = 0;
for (unsigned int col = 0; col < recs.size(); col++) {
if (static_cast<int>(col) == d_smi || static_cast<int>(col) == d_name)
continue;
std::string pname, pval;
if (d_props.size() > col) {
pname = d_props[col];
} else {
pname = "Column_";
std::stringstream ss;
ss << col;
pname += ss.str();
}
pval = recs[col];
res->setProp(pname, pval);
iprop++;
}
} catch (const SmilesParseException &pe) {
// Couldn't parse the passed in smiles
// Simply print out a message
BOOST_LOG(rdErrorLog) << "ERROR: Smiles parse error on line " << d_line
<< "\n";
BOOST_LOG(rdErrorLog) << "ERROR: " << pe.message() << "\n";
res = nullptr;
} catch (const MolSanitizeException &se) {
// We couldn't sanitize the molecule
// write out an error message
BOOST_LOG(rdErrorLog) << "ERROR: Could not sanitize molecule on line "
<< d_line << std::endl;
BOOST_LOG(rdErrorLog) << "ERROR: " << se.message() << "\n";
res = nullptr;
} catch (...) {
// write out an error message
BOOST_LOG(rdErrorLog) << "ERROR: Could not process molecule on line "
<< d_line << std::endl;
res = nullptr;
}
return res;
}
// --------------------------------------------------
//
// Returns the next available line in the input stream.
//
// Side-effects:
// - If EOF is hit without reading anything, the df_end
// flag will be set.
// - If a real line is read, our d_line counter is
// incremented
//
// --------------------------------------------------
std::string SmilesMolSupplier::nextLine() {
PRECONDITION(dp_inStream, "bad stream");
if (df_end) return "";
std::string tempStr = getLine(dp_inStream);
if (tempStr == "") {
// got an empty string, check to see if we hit EOF:
if (dp_inStream->eof()) {
// yes, set our flag:
df_end = true;
}
} else if (dp_inStream->eof()) {
// we got some data before hitting EOF. So clear the
// flag on inStream
dp_inStream->clear();
}
d_line++;
return tempStr;
}
// --------------------------------------------------
//
// Returns the position of the beginning of the next
// non-comment line in the input stream. -1 is returned if
// no line could be read;
//
// Side-effects:
// - If EOF is hit without finding a valid line, the df_end
// flag will be set.
// - Our d_line counter is incremented for each line read
//
long int SmilesMolSupplier::skipComments() {
PRECONDITION(dp_inStream, "bad stream");
if (this->atEnd()) return -1;
std::streampos prev = dp_inStream->tellg();
std::string tempStr = this->nextLine();
if (!df_end) {
// if we didn't immediately hit EOF, loop until we get a valid line:
while ((tempStr[0] == '#') || (strip(tempStr).size() == 0)) {
prev = dp_inStream->tellg();
tempStr = this->nextLine();
if (this->atEnd()) break;
}
}
// if we hit EOF without getting a proper line, return -1:
if (tempStr.empty() || (tempStr[0] == '#') || (strip(tempStr).size() == 0)) {
return -1;
}
return static_cast<long int>(prev);
}
// --------------------------------------------------
//
// Reads and processes the title line
//
void SmilesMolSupplier::processTitleLine() {
PRECONDITION(dp_inStream, "bad stream");
int pos = this->skipComments();
if (pos >= 0) {
dp_inStream->seekg(pos);
std::string tempStr = getLine(dp_inStream);
boost::char_separator<char> sep(d_delim.c_str(), "",
boost::keep_empty_tokens);
tokenizer tokens(tempStr, sep);
for (tokenizer::iterator tokIter = tokens.begin(); tokIter != tokens.end();
++tokIter) {
std::string pname = strip(*tokIter);
d_props.push_back(pname);
}
}
}
std::string SmilesMolSupplier::getItemText(unsigned int idx) {
PRECONDITION(dp_inStream, "no stream");
unsigned int holder = d_next;
bool endHolder = df_end;
// this throws the relevant exception if we go too far:
moveTo(idx);
std::string res = getLine(dp_inStream);
d_next = holder;
df_end = endHolder;
return res;
}
// --------------------------------------------------
//
// Moves to the position of a particular entry in the
// stream.
//
// If insufficient entries are present, a FileParseException
// will be thrown
//
void SmilesMolSupplier::moveTo(unsigned int idx) {
PRECONDITION(dp_inStream, "bad instream");
// get the easy situations (boundary conditions) out of the
// way first:
if (d_len > -1 && idx >= static_cast<unsigned int>(d_len)) {
df_end = true;
std::ostringstream errout;
errout << "ERROR: Index error (idx = " << idx << "): "
<< "ran out of lines\n";
throw FileParseException(errout.str());
}
// dp_inStream->seekg() is called for all idx values
// and earlier calls to next() may have put the stream into a bad state
dp_inStream->clear();
// -----------
// Case 1: we have already read the particular entry:
//
// Set the stream position and return
// -----------
if (!d_molpos.empty() && d_molpos.size() > idx) {
dp_inStream->clear(); // clear the EOF tag if it has been set
df_end = false;
dp_inStream->seekg(d_molpos[idx]);
d_next = idx;
d_line = d_lineNums[idx];
return;
}
// -----------
// Case 2: we haven't read the entry, so move forward until
// we've gone far enough.
// -----------
if (d_molpos.empty()) {
// if we are just starting out, process the title line
dp_inStream->seekg(0);
if (df_title) this->processTitleLine();
} else {
// move to the last position we've seen:
dp_inStream->seekg(d_molpos.back());
// read that line:
std::string tmp = getLine(dp_inStream);
}
// the stream pointer is now at the last thing we read in
while (d_molpos.size() <= idx) {
int nextP = this->skipComments();
if (nextP < 0) {
std::ostringstream errout;
errout << "ERROR: Index error (idx = " << idx << "): "
<< "ran out of lines\n";
throw FileParseException(errout.str());
} else {
d_molpos.push_back(nextP);
d_lineNums.push_back(d_line);
if (d_molpos.size() == idx + 1 && df_end) {
// boundary condition: we could read the point we were looking for
// but not the next one.
// indicate that we've reached EOF:
dp_inStream->clear();
dp_inStream->seekg(0, std::ios_base::end);
d_len = d_molpos.size();
break;
}
}
}
POSTCONDITION(d_molpos.size() > idx, "not enough lines");
dp_inStream->seekg(d_molpos[idx]);
d_next = idx;
return;
}
// ----------------------------------------------------------------------
//
// Grabs and returns the next molecule from the input stream.
// After processing the line, the file is advanced to the next
// position in the file (skipping blank and comment lines).
//
// Throws a FileParseException if EOF has already been hit.
//
ROMol *SmilesMolSupplier::next() {
PRECONDITION(dp_inStream, "no stream");
ROMol *res = nullptr;
if (d_next < 0) {
d_next = 0;
}
// This throws an exception if it fails:
moveTo(d_next);
CHECK_INVARIANT(static_cast<int>(d_molpos.size()) > d_next,
"bad index length");
// ---------
// if we get here we can just build the molecule:
// ---------
// set the stream to the relevant position:
dp_inStream->clear(); // clear the EOF tag if it has been set
dp_inStream->seekg(d_molpos[d_next]);
d_line = d_lineNums[d_next];
// grab the line:
std::string inLine = getLine(dp_inStream);
// and process it:
res = this->processLine(inLine);
// if we don't already know the length of the supplier,
// check if we can read another line:
if (d_len < 0 && this->skipComments() < 0) {
d_len = d_molpos.size();
}
// make sure the line number is correct:
if (d_next < static_cast<int>(d_lineNums.size())) {
d_line = d_lineNums[d_next];
}
++d_next;
// if we just hit the last one, simulate EOF:
if (d_len > 0 && d_next == d_len) {
df_end = true;
}
return res;
}
// ----------------------------------------------------------------------
//
// Grabs and returns a particular molecule from the input stream.
//
// Raises a FileParseException on failure.
//
ROMol *SmilesMolSupplier::operator[](unsigned int idx) {
PRECONDITION(dp_inStream, "no stream");
// ---------
// move to the appropriate location in the file:
// ---------
moveTo(idx);
// ---------
// and then pull the molecule:
// ---------
ROMol *res = next();
return res;
}
// ----------------------------------------------------------------------
//
// Returns the number of entries in the input stream
//
unsigned int SmilesMolSupplier::length() {
PRECONDITION(dp_inStream, "no stream");
// return the number of molecule lines in the file
if (d_len > 0) {
return d_len;
} else {
std::streampos oPos = dp_inStream->tellg();
if (d_molpos.size()) {
// we've already read some molecules, go to the last
// one and read it in to initialize our location:
dp_inStream->seekg(d_molpos.back());
// skip that line and then continue:
this->skipComments();
} else {
// process the title line if need be:
if (df_title) this->processTitleLine();
}
int pos = this->skipComments();
while (pos >= 0) {
d_molpos.push_back(pos);
d_lineNums.push_back(d_line);
pos = this->skipComments();
}
// now remember to set the stream to its original position:
dp_inStream->seekg(oPos);
d_len = d_molpos.size();
return d_len;
}
}
bool SmilesMolSupplier::atEnd() { return df_end; }
}
| 1 | 18,590 | the `if` isn't necessary here; it's safe to call `delete` on `nullptr` | rdkit-rdkit | cpp |
@@ -156,6 +156,12 @@ ReturnCode_t DataWriter::assert_liveliness()
return impl_->assert_liveliness();
}
+bool DataWriter::remove_all_changes(
+ size_t* removed)
+{
+ return impl_->remove_all_changes(removed);
+}
+
} // namespace dds
} // namespace fastdds
} // namespace eprosima | 1 | // Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* DataWriter.cpp
*
*/
#include <fastdds/dds/publisher/DataWriter.hpp>
#include <fastdds/publisher/DataWriterImpl.hpp>
#include <fastdds/dds/publisher/Publisher.hpp>
namespace eprosima {
using namespace fastrtps;
namespace fastdds {
namespace dds {
DataWriter::DataWriter(
DataWriterImpl* impl,
const StatusMask& mask)
: DomainEntity(mask)
, impl_(impl)
{
}
DataWriter::DataWriter(
Publisher* pub,
Topic* topic,
const DataWriterQos& qos,
DataWriterListener* listener,
const StatusMask& mask)
: DomainEntity(mask)
, impl_(pub->create_datawriter(topic, qos, listener, mask)->impl_)
{
}
DataWriter::~DataWriter()
{
}
bool DataWriter::write(
void* data)
{
return impl_->write(data);
}
bool DataWriter::write(
void* data,
fastrtps::rtps::WriteParams& params)
{
return impl_->write(data, params);
}
ReturnCode_t DataWriter::write(
void* data,
const fastrtps::rtps::InstanceHandle_t& handle)
{
return impl_->write(data, handle);
}
ReturnCode_t DataWriter::dispose(
void* data,
const fastrtps::rtps::InstanceHandle_t& handle)
{
return impl_->dispose(data, handle);
}
bool DataWriter::dispose(
void* data)
{
return impl_->dispose(data);
}
const fastrtps::rtps::GUID_t& DataWriter::guid()
{
return impl_->guid();
}
fastrtps::rtps::InstanceHandle_t DataWriter::get_instance_handle() const
{
return impl_->get_instance_handle();
}
ReturnCode_t DataWriter::set_qos(
const DataWriterQos& qos)
{
return impl_->set_qos(qos);
}
const DataWriterQos& DataWriter::get_qos() const
{
return impl_->get_qos();
}
ReturnCode_t DataWriter::get_qos(
DataWriterQos& qos) const
{
qos = impl_->get_qos();
return ReturnCode_t::RETCODE_OK;
}
ReturnCode_t DataWriter::set_listener(
DataWriterListener* listener)
{
return impl_->set_listener(listener);
}
const DataWriterListener* DataWriter::get_listener() const
{
return impl_->get_listener();
}
Topic* DataWriter::get_topic() const
{
return impl_->get_topic();
}
const Publisher* DataWriter::get_publisher() const
{
return impl_->get_publisher();
}
ReturnCode_t DataWriter::wait_for_acknowledgments(
const Duration_t& max_wait)
{
return impl_->wait_for_acknowledgments(max_wait);
}
ReturnCode_t DataWriter::get_offered_deadline_missed_status(
OfferedDeadlineMissedStatus& status)
{
return impl_->get_offered_deadline_missed_status(status);
}
ReturnCode_t DataWriter::get_liveliness_lost_status(
LivelinessLostStatus& status)
{
return impl_->get_liveliness_lost_status(status);
}
ReturnCode_t DataWriter::assert_liveliness()
{
return impl_->assert_liveliness();
}
} // namespace dds
} // namespace fastdds
} // namespace eprosima
| 1 | 18,439 | Change if the method's name finally changes to `clear_history` as proposed elsewhere | eProsima-Fast-DDS | cpp |
@@ -34,9 +34,8 @@ def showHelp(helpId: str):
noHelpMessage = _("No help available here.")
queueHandler.queueFunction(queueHandler.eventQueue, ui.message, noHelpMessage)
return
-
- import gui
- helpFile = gui.getDocFilePath("userGuide.html")
+ import documentationUtils
+ helpFile = documentationUtils.getDocFilePath("userGuide.html")
if helpFile is None:
# Translators: Message shown when trying to display context sensitive help,
# indicating that the user guide could not be found. | 1 | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2017-2020 NV Access Limited, Thomas Stivers
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import os
import tempfile
import typing
import wx
from logHandler import log
def writeRedirect(helpId: str, helpFilePath: str, contextHelpPath: str):
redirect = rf"""
<html><head>
<meta http-equiv="refresh" content="0;url=file:///{helpFilePath}#{helpId}" />
</head></html>
"""
with open(contextHelpPath, 'w') as f:
f.write(redirect)
def showHelp(helpId: str):
"""Display the corresponding section of the user guide when either the Help
button in an NVDA dialog is pressed or the F1 key is pressed on a
recognized control.
"""
import ui
import queueHandler
if not helpId:
# Translators: Message indicating no context sensitive help is available for the control or dialog.
noHelpMessage = _("No help available here.")
queueHandler.queueFunction(queueHandler.eventQueue, ui.message, noHelpMessage)
return
import gui
helpFile = gui.getDocFilePath("userGuide.html")
if helpFile is None:
# Translators: Message shown when trying to display context sensitive help,
# indicating that the user guide could not be found.
noHelpMessage = _("No user guide found.")
log.debugWarning("No user guide found: possible cause - running from source without building user docs")
queueHandler.queueFunction(queueHandler.eventQueue, ui.message, noHelpMessage)
return
log.debug(f"Opening help: helpId = {helpId}, userGuidePath: {helpFile}")
nvdaTempDir = os.path.join(tempfile.gettempdir(), "nvda")
if not os.path.exists(nvdaTempDir):
os.mkdir(nvdaTempDir)
contextHelpRedirect = os.path.join(nvdaTempDir, "contextHelp.html")
try:
# a redirect is necessary because not all browsers support opening a fragment URL from the command line.
writeRedirect(helpId, helpFile, contextHelpRedirect)
except Exception:
log.error("Unable to write context help redirect file.", exc_info=True)
return
try:
os.startfile(f"file://{contextHelpRedirect}")
except Exception:
log.error("Unable to launch context help.", exc_info=True)
def bindHelpEvent(helpId: str, window: wx.Window):
window.Unbind(wx.EVT_HELP)
window.Bind(
wx.EVT_HELP,
lambda evt: _onEvtHelp(helpId, evt),
)
log.debug(f"Did context help binding for {window.__class__.__qualname__}")
def _onEvtHelp(helpId: str, evt: wx.HelpEvent):
# Don't call evt.skip. Events bubble upwards through parent controls.
# Context help for more specific controls should override the less specific parent controls.
showHelp(helpId)
class ContextHelpMixin:
#: The name of the appropriate anchor in NVDA help that provides help for the wx.Window this mixin is
# used with.
helpId = ""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
helpId = getattr(self, "helpId", None)
if helpId is None or not isinstance(helpId, str):
log.warning(f"No helpId (or incorrect type) for: {self.__class__.__qualname__} helpId: {helpId!r}")
helpId = ""
window = typing.cast(wx.Window, self)
bindHelpEvent(helpId, window)
def bindHelpEvent(self, helpId: str, window: wx.Window):
"""A helper method, to bind helpId strings to sub-controls of this Window.
Useful for adding context help to wx controls directly.
"""
bindHelpEvent(helpId, window)
| 1 | 31,810 | can this one also be moved to the top of the file? | nvaccess-nvda | py |
@@ -26,6 +26,8 @@ type AppliedToGroup struct {
metav1.ObjectMeta
// Pods is a list of Pods selected by this group.
Pods []GroupMemberPod
+ // GroupMembers is a list of resources selected by this group.
+ GroupMembers []GroupMember
}
// PodReference represents a Pod Reference. | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networking
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroup is the message format of antrea/pkg/controller/types.AppliedToGroup in an API response.
type AppliedToGroup struct {
metav1.TypeMeta
metav1.ObjectMeta
// Pods is a list of Pods selected by this group.
Pods []GroupMemberPod
}
// PodReference represents a Pod Reference.
type PodReference struct {
// The name of this pod.
Name string
// The namespace of this pod.
Namespace string
}
// NamedPort represents a Port with a name on Pod.
type NamedPort struct {
// Port represents the Port number.
Port int32
// Name represents the associated name with this Port number.
Name string
// Protocol for port. Must be UDP, TCP, or SCTP.
Protocol Protocol
}
// GroupMemberPod represents a Pod related member to be populated in Groups.
type GroupMemberPod struct {
// Pod maintains the reference to the Pod.
Pod *PodReference
// IP maintains the IPAddress of the Pod.
IP IPAddress
// Ports maintain the list of named port associated with this Pod member.
Ports []NamedPort
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupPatch describes the incremental update of an AppliedToGroup.
type AppliedToGroupPatch struct {
metav1.TypeMeta
metav1.ObjectMeta
AddedPods []GroupMemberPod
RemovedPods []GroupMemberPod
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupList is a list of AppliedToGroup objects.
type AppliedToGroupList struct {
metav1.TypeMeta
metav1.ListMeta
Items []AppliedToGroup
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroup is the message format of antrea/pkg/controller/types.AddressGroup in an API response.
type AddressGroup struct {
metav1.TypeMeta
metav1.ObjectMeta
// Pods is a list of Pods selected by this group.
Pods []GroupMemberPod
}
// IPAddress describes a single IP address. Either an IPv4 or IPv6 address must be set.
type IPAddress []byte
// IPNet describes an IP network.
type IPNet struct {
IP IPAddress
PrefixLength int32
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupPatch describes the incremental update of an AddressGroup.
type AddressGroupPatch struct {
metav1.TypeMeta
metav1.ObjectMeta
AddedPods []GroupMemberPod
RemovedPods []GroupMemberPod
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupList is a list of AddressGroup objects.
type AddressGroupList struct {
metav1.TypeMeta
metav1.ListMeta
Items []AddressGroup
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy is the message format of antrea/pkg/controller/types.NetworkPolicy in an API response.
type NetworkPolicy struct {
metav1.TypeMeta
metav1.ObjectMeta
// Rules is a list of rules to be applied to the selected Pods.
Rules []NetworkPolicyRule
// AppliedToGroups is a list of names of AppliedToGroups to which this policy applies.
AppliedToGroups []string
}
// Direction defines traffic direction of NetworkPolicyRule.
type Direction string
const (
DirectionIn Direction = "In"
DirectionOut Direction = "Out"
)
// NetworkPolicyRule describes a particular set of traffic that is allowed.
type NetworkPolicyRule struct {
// The direction of this rule.
// If it's set to In, From must be set and To must not be set.
// If it's set to Out, To must be set and From must not be set.
Direction Direction
// From represents sources which should be able to access the pods selected by the policy.
From NetworkPolicyPeer
// To represents destinations which should be able to be accessed by the pods selected by the policy.
To NetworkPolicyPeer
// Services is a list of services which should be matched.
Services []Service
}
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
// ProtocolSCTP is the SCTP protocol.
ProtocolSCTP Protocol = "SCTP"
)
// Service describes a port to allow traffic on.
type Service struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
// field defaults to TCP.
// +optional
Protocol *Protocol
// The port name or number on the given protocol. If not specified, this matches all port numbers.
// +optional
Port *intstr.IntOrString
}
// NetworkPolicyPeer describes a peer of NetworkPolicyRules.
// It could be a list of names of AddressGroups and/or a list of IPBlock.
type NetworkPolicyPeer struct {
// A list of names of AddressGroups.
AddressGroups []string
// A list of IPBlock.
IPBlocks []IPBlock
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24"). The except entry describes CIDRs that should
// not be included within this rule.
type IPBlock struct {
// CIDR is an IPNet represents the IP Block.
CIDR IPNet
// Except is a slice of IPNets that should not be included within an IP Block.
// Except values will be rejected if they are outside the CIDR range.
// +optional
Except []IPNet
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta
metav1.ListMeta
Items []NetworkPolicy
}
| 1 | 18,565 | So if I understand this correctly the externalEntities should be computed and stored in GroupMember, while current code usage of GroupMemberPod will be migrated to GroupMember later? | antrea-io-antrea | go |
@@ -1,13 +1,9 @@
-using System;
-using System.Collections.Generic;
-
-namespace Datadog.Tracer
+namespace Datadog.Tracer
{
public static class Tags
{
- public const string Service = "Datadog.Service";
- public const string Resource = "Datadog.Resource";
- public const string Error = "Datadog.Error";
- public const string Type = "Datadog.Type";
+ public const string ServiceName = "service.name";
+ public const string ResourceName = "resource.name";
+ public const string SpanType = "span.type";
}
} | 1 | using System;
using System.Collections.Generic;
namespace Datadog.Tracer
{
public static class Tags
{
public const string Service = "Datadog.Service";
public const string Resource = "Datadog.Resource";
public const string Error = "Datadog.Error";
public const string Type = "Datadog.Type";
}
}
| 1 | 14,095 | Don't we have tests to change after this breaking change? probably we're not using hard coded strings in our tests when we create a `Span`? | DataDog-dd-trace-dotnet | .cs |
@@ -19,7 +19,7 @@ REPORT_FREQUENCY = 5000
SUBSCRIBER_NAME = "bq"
KEYSPACE_NAME_INCOMING = "ilisten"
KEYSPACE_NAME_UNIQUE = "ulisten"
-APP_CREDENTIALS_FILE = "bigquery-credentials.json"
+APP_CREDENTIALS_FILE = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
# TODO:
# Big query hardcoded data set ids | 1 | #!/usr/bin/env python
import sys
import os
from datetime import datetime
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from redis import Redis
from redis_pubsub import RedisPubSubSubscriber, RedisPubSubPublisher, NoSubscriberNameSetException, WriteFailException
import ujson
import logging
from listen import Listen
from time import time, sleep
import config
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
REPORT_FREQUENCY = 5000
SUBSCRIBER_NAME = "bq"
KEYSPACE_NAME_INCOMING = "ilisten"
KEYSPACE_NAME_UNIQUE = "ulisten"
APP_CREDENTIALS_FILE = "bigquery-credentials.json"
# TODO:
# Big query hardcoded data set ids
# Redis persistence
class BigQueryWriterSubscriber(RedisPubSubSubscriber):
def __init__(self, redis):
RedisPubSubSubscriber.__init__(self, redis, KEYSPACE_NAME_UNIQUE)
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.INFO)
logging.basicConfig()
self.total_inserts = 0
self.inserts = 0
self.time = 0
def write(self, listens):
# We've collected listens to write, now write them
bq_data = []
for listen in listens:
meta = listen['track_metadata']
row = {
'user_name' : listen['user_name'],
'listened_at' : listen['listened_at'],
'artist_msid' : meta['additional_info']['artist_msid'],
'artist_name' : meta['artist_name'],
'artist_mbids' : ",".join(meta['additional_info'].get('artist_mbids', [])),
'album_msid' : meta['additional_info'].get('album_msid', ''),
'album_name' : meta['additional_info'].get('release_name', ''),
'album_mbid' : meta['additional_info'].get('release_mbid', ''),
'track_name' : meta['track_name'],
'recording_msid' : listen['recording_msid'],
'recording_mbid' : meta['additional_info'].get('recording_mbid', ''),
'tags' : ",".join(meta['additional_info'].get('tags', [])),
}
bq_data.append({
'json': row,
'insertId': "%s-%s" % (listen['user_name'], listen['listened_at'])
})
body = { 'rows' : bq_data }
try:
t0 = time()
ret = self.bigquery.tabledata().insertAll(
projectId="listenbrainz",
datasetId="listenbrainz_test",
tableId="listen",
body=body).execute(num_retries=5)
self.time += time() - t0
except HttpError as e:
self.log.error("Submit to BigQuery failed: " + str(e))
self.log.error(json.dumps(body, indent=3))
# Clear the start time, since we've cleaned out the batch
batch_start_time = 0
return True
def start(self):
# if we're not supposed to run, just sleep
if not config.WRITE_TO_BIGQUERY:
sleep(1000)
return
if not os.path.exists(APP_CREDENTIALS_FILE):
self.log.error("BiqQueryWriter not started, big-query-credentials.json is missing.")
sleep(1000)
return
self.log.info("BigQueryWriterSubscriber started")
credentials = GoogleCredentials.get_application_default()
self.bigquery = discovery.build('bigquery', 'v2', credentials=credentials)
self.register(SUBSCRIBER_NAME)
while True:
try:
count = self.subscriber()
except NoSubscriberNameSetException as e:
self.log.error("BigQueryWriterSubscriber has no subscriber name set.")
return
except WriteFailException as e:
self.log.error("BigQueryWriterSubscriber failed to write: %s" % str(e))
return
if not count:
continue
# collect and occasionally print some stats
self.inserts += count
if self.inserts >= REPORT_FREQUENCY:
self.total_inserts += self.inserts
if self.time > 0:
self.log.error("Inserted %d rows in %.1fs (%.2f listens/sec). Total %d rows." % \
(count, self.time, count / self.time, self.total_inserts))
self.inserts = 0
self.time = 0
if __name__ == "__main__":
r = Redis(config.REDIS_HOST)
bq = BigQueryWriterSubscriber(r)
while True:
# If the start fails, try again in a few
bq.start()
sleep(3)
| 1 | 14,125 | will this fail if the env variable doesn't exist? Is this the preferred behaviour? | metabrainz-listenbrainz-server | py |
@@ -325,8 +325,9 @@ class SpatialPooler(object):
# Initialize a tiny random tie breaker. This is used to determine winning
# columns where the overlaps are identical.
- self._tieBreaker = 0.01*numpy.array([self._random.getReal64() for i in
- xrange(self._numColumns)])
+ self._tieBreaker = numpy.array([0.01 * self._random.getReal64() for i in
+ xrange(self._numColumns)],
+ dtype=realDType)
# 'self._connectedSynapses' is a similar matrix to 'self._permanences'
# (rows represent cortical columns, columns represent input bits) whose | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import itertools
import numpy
from nupic.bindings.math import (SM32 as SparseMatrix,
SM_01_32_32 as SparseBinaryMatrix,
GetNTAReal,
Random as NupicRandom)
realDType = GetNTAReal()
uintType = "uint32"
VERSION = 2
class InvalidSPParamValueError(ValueError):
"""The user passed an invalid value for a SpatialPooler parameter
"""
pass
class _SparseMatrixCorticalColumnAdapter(object):
""" Many functions in SpatialPooler operate on a columnIndex but use an
underlying storage implementation based on a Sparse Matrix in which cortical
columns are represented as rows. This can be confusing to someone trying to
follow the algorithm, confusing terminology between matrix math and cortical
columns. This class is provided to abstract away some of the details of the
underlying implementation, providing a cleaner API that isn't specific to
sparse matrices.
"""
def __getitem__(self, columnIndex):
""" Wraps getRow() such that instances may be indexed by columnIndex.
"""
return super(_SparseMatrixCorticalColumnAdapter, self).getRow(columnIndex)
def replace(self, columnIndex, bitmap):
""" Wraps replaceSparseRow()
"""
return super(_SparseMatrixCorticalColumnAdapter, self).replaceSparseRow(
columnIndex, bitmap
)
def update(self, columnIndex, vector):
""" Wraps setRowFromDense()
"""
return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense(
columnIndex, vector
)
class CorticalColumns(_SparseMatrixCorticalColumnAdapter, SparseMatrix):
""" SparseMatrix variant of _SparseMatrixCorticalColumnAdapter. Use in cases
where column connections are represented as float values, such as permanence
values
"""
pass
class BinaryCorticalColumns(_SparseMatrixCorticalColumnAdapter,
SparseBinaryMatrix):
""" SparseBinaryMatrix variant of _SparseMatrixCorticalColumnAdapter. Use in
cases where column connections are represented as bitmaps.
"""
pass
class SpatialPooler(object):
"""
This class implements the spatial pooler. It is in charge of handling the
relationships between the columns of a region and the inputs bits. The
primary public interface to this function is the "compute" method, which
takes in an input vector and returns a list of activeColumns columns.
Example Usage:
>
> sp = SpatialPooler(...)
> for line in file:
> inputVector = numpy.array(line)
> sp.compute(inputVector)
> ...
"""
def __init__(self,
inputDimensions=(32, 32),
columnDimensions=(64, 64),
potentialRadius=16,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=10.0,
stimulusThreshold=0,
synPermInactiveDec=0.008,
synPermActiveInc=0.05,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.001,
minPctActiveDutyCycle=0.001,
dutyCyclePeriod=1000,
maxBoost=10.0,
seed=-1,
spVerbosity=0,
wrapAround=True
):
"""
Parameters:
----------------------------
@param inputDimensions:
A sequence representing the dimensions of the input vector. Format is
(height, width, depth, ...), where each value represents the size of the
dimension. For a topology of one dimension with 100 inputs use 100, or
(100,). For a two dimensional topology of 10x5 use (10,5).
@param columnDimensions:
A sequence representing the dimensions of the columns in the region.
Format is (height, width, depth, ...), where each value represents the
size of the dimension. For a topology of one dimension with 2000 columns
use 2000, or (2000,). For a three dimensional topology of 32x64x16 use
(32, 64, 16).
@param potentialRadius:
This parameter determines the extent of the input that each column can
potentially be connected to. This can be thought of as the input bits
that are visible to each column, or a 'receptiveField' of the field of
vision. A large enough value will result in 'global coverage', meaning
that each column can potentially be connected to every input bit. This
parameter defines a square (or hyper
square) area: a column will have a max square potential pool with sides of
length 2 * potentialRadius + 1.
@param potentialPct:
The percent of the inputs, within a column's potential radius, that a
column can be connected to. If set to 1, the column will be connected
to every input within its potential radius. This parameter is used to
give each column a unique potential pool when a large potentialRadius
causes overlap between the columns. At initialization time we choose
((2*potentialRadius + 1)^(# inputDimensions) * potentialPct) input bits
to comprise the column's potential pool.
@param globalInhibition:
If true, then during inhibition phase the winning columns are selected
as the most active columns from the region as a whole. Otherwise, the
winning columns are selected with respect to their local neighborhoods.
Using global inhibition boosts performance x60.
@param localAreaDensity:
The desired density of active columns within a local inhibition area
(the size of which is set by the internally calculated inhibitionRadius,
which is in turn determined from the average size of the connected
potential pools of all columns). The inhibition logic will insure that
at most N columns remain ON within a local inhibition area, where
N = localAreaDensity * (total number of columns in inhibition area).
@param numActiveColumnsPerInhArea:
An alternate way to control the density of the active columns. If
numActiveColumnsPerInhArea is specified then localAreaDensity must be
less than 0, and vice versa. When using numActiveColumnsPerInhArea, the
inhibition logic will insure that at most 'numActiveColumnsPerInhArea'
columns remain ON within a local inhibition area (the size of which is
set by the internally calculated inhibitionRadius, which is in turn
determined from the average size of the connected receptive fields of all
columns). When using this method, as columns learn and grow their
effective receptive fields, the inhibitionRadius will grow, and hence the
net density of the active columns will *decrease*. This is in contrast to
the localAreaDensity method, which keeps the density of active columns
the same regardless of the size of their receptive fields.
@param stimulusThreshold:
This is a number specifying the minimum number of synapses that must be
on in order for a columns to turn ON. The purpose of this is to prevent
noise input from activating columns. Specified as a percent of a fully
grown synapse.
@param synPermInactiveDec:
The amount by which an inactive synapse is decremented in each round.
Specified as a percent of a fully grown synapse.
@param synPermActiveInc:
The amount by which an active synapse is incremented in each round.
Specified as a percent of a fully grown synapse.
@param synPermConnected:
The default connected threshold. Any synapse whose permanence value is
above the connected threshold is a "connected synapse", meaning it can
contribute to the cell's firing.
@param minPctOverlapDutyCycle:
A number between 0 and 1.0, used to set a floor on how often a column
should have at least stimulusThreshold active inputs. Periodically, each
column looks at the overlap duty cycle of all other columns within its
inhibition radius and sets its own internal minimal acceptable duty cycle
to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each
iteration, any column whose overlap duty cycle falls below this computed
value will get all of its permanence values boosted up by
synPermActiveInc. Raising all permanences in response to a sub-par duty
cycle before inhibition allows a cell to search for new inputs when
either its previously learned inputs are no longer ever active, or when
the vast majority of them have been "hijacked" by other columns.
@param minPctActiveDutyCycle:
A number between 0 and 1.0, used to set a floor on how often a column
should be activate. Periodically, each column looks at the activity duty
cycle of all other columns within its inhibition radius and sets its own
internal minimal acceptable duty cycle to: minPctDutyCycleAfterInh *
max(other columns' duty cycles). On each iteration, any column whose duty
cycle after inhibition falls below this computed value will get its
internal boost factor increased.
@param dutyCyclePeriod:
The period used to calculate duty cycles. Higher values make it take
longer to respond to changes in boost or synPerConnectedCell. Shorter
values make it more unstable and likely to oscillate.
@param maxBoost:
The maximum overlap boost factor. Each column's overlap gets multiplied
by a boost factor before it gets considered for inhibition. The actual
boost factor for a column is number between 1.0 and maxBoost. A boost
factor of 1.0 is used if the duty cycle is >= minOverlapDutyCycle,
maxBoost is used if the duty cycle is 0, and any duty cycle in between is
linearly extrapolated from these 2 endpoints.
@param seed:
Seed for our own pseudo-random number generator.
@param spVerbosity:
spVerbosity level: 0, 1, 2, or 3
@param wrapAround:
Determines if inputs at the beginning and end of an input dimension should
be considered neighbors when mapping columns to inputs.
"""
if (numActiveColumnsPerInhArea == 0 and
(localAreaDensity == 0 or localAreaDensity > 0.5)):
raise InvalidSPParamValueError("Inhibition parameters are invalid")
columnDimensions = numpy.array(columnDimensions, ndmin=1)
numColumns = columnDimensions.prod()
if not isinstance(numColumns, (int, long)) or numColumns <= 0:
raise InvalidSPParamValueError("Invalid number of columns ({})"
.format(repr(numColumns)))
inputDimensions = numpy.array(inputDimensions, ndmin=1)
numInputs = inputDimensions.prod()
if not isinstance(numInputs, (int, long)) or numInputs <= 0:
raise InvalidSPParamValueError("Invalid number of inputs ({}"
.format(repr(numInputs)))
if inputDimensions.size != columnDimensions.size:
raise InvalidSPParamValueError(
"Input dimensions must match column dimensions")
self._seed(seed)
self._numInputs = int(numInputs)
self._numColumns = int(numColumns)
self._columnDimensions = columnDimensions
self._inputDimensions = inputDimensions
self._potentialRadius = int(min(potentialRadius, numInputs))
self._potentialPct = potentialPct
self._globalInhibition = globalInhibition
self._numActiveColumnsPerInhArea = int(numActiveColumnsPerInhArea)
self._localAreaDensity = localAreaDensity
self._stimulusThreshold = stimulusThreshold
self._synPermInactiveDec = synPermInactiveDec
self._synPermActiveInc = synPermActiveInc
self._synPermBelowStimulusInc = synPermConnected / 10.0
self._synPermConnected = synPermConnected
self._minPctOverlapDutyCycles = minPctOverlapDutyCycle
self._minPctActiveDutyCycles = minPctActiveDutyCycle
self._dutyCyclePeriod = dutyCyclePeriod
self._maxBoost = maxBoost
self._spVerbosity = spVerbosity
self._wrapAround = wrapAround
self._synPermMin = 0.0
self._synPermMax = 1.0
self._synPermTrimThreshold = synPermActiveInc / 2.0
if self._synPermTrimThreshold >= self._synPermConnected:
raise InvalidSPParamValueError(
"synPermTrimThreshold ({}) must be less than synPermConnected ({})"
.format(repr(self._synPermTrimThreshold),
repr(self._synPermConnected)))
self._updatePeriod = 50
initConnectedPct = 0.5
self._version = VERSION
self._iterationNum = 0
self._iterationLearnNum = 0
# Store the set of all inputs within each columns potential pool as a
# single adjacency matrix such that matrix rows map to cortical columns,
# and matrix columns map to input buts. If potentialPools[i][j] == 1,
# then input bit 'j' is in column 'i's potential pool. A column can only be
# connected to inputs in its potential pool. Here, BinaryCorticalColumns
# is used to provide cortical column-centric semantics for what is
# otherwise a sparse binary matrix implementation. Sparse binary matrix is
# used as an optimization since a column will only be connected to a small
# fraction of input bits.
self._potentialPools = BinaryCorticalColumns(numInputs)
self._potentialPools.resize(numColumns, numInputs)
# Initialize the permanences for each column. Similar to the
# 'self._potentialPools', the permanences are stored in a matrix whose rows
# represent the cortical columns, and whose columns represent the input
# bits. If self._permanences[i][j] = 0.2, then the synapse connecting
# cortical column 'i' to input bit 'j' has a permanence of 0.2. Here,
# CorticalColumns is used to provide cortical column-centric semantics for
# what is otherwise a sparse matrix implementation. Sparse matrix is used
# as an optimization to improve computation time of alforithms that
# require iterating over the data structure. This permanence matrix is
# only allowed to have non-zero elements where the potential pool is
# non-zero.
self._permanences = CorticalColumns(numColumns, numInputs)
# Initialize a tiny random tie breaker. This is used to determine winning
# columns where the overlaps are identical.
self._tieBreaker = 0.01*numpy.array([self._random.getReal64() for i in
xrange(self._numColumns)])
# 'self._connectedSynapses' is a similar matrix to 'self._permanences'
# (rows represent cortical columns, columns represent input bits) whose
# entries represent whether the cortical column is connected to the input
# bit, i.e. its permanence value is greater than 'synPermConnected'. While
# this information is readily available from the 'self._permanence' matrix,
# it is stored separately for efficiency purposes.
self._connectedSynapses = BinaryCorticalColumns(numInputs)
self._connectedSynapses.resize(numColumns, numInputs)
# Stores the number of connected synapses for each column. This is simply
# a sum of each row of 'self._connectedSynapses'. again, while this
# information is readily available from 'self._connectedSynapses', it is
# stored separately for efficiency purposes.
self._connectedCounts = numpy.zeros(numColumns, dtype=realDType)
# Initialize the set of permanence values for each column. Ensure that
# each column is connected to enough input bits to allow it to be
# activated.
for columnIndex in xrange(numColumns):
potential = self._mapPotential(columnIndex, wrapAround=self._wrapAround)
self._potentialPools.replace(columnIndex, potential.nonzero()[0])
perm = self._initPermanence(potential, initConnectedPct)
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=True)
self._overlapDutyCycles = numpy.zeros(numColumns, dtype=realDType)
self._activeDutyCycles = numpy.zeros(numColumns, dtype=realDType)
self._minOverlapDutyCycles = numpy.zeros(numColumns,
dtype=realDType)
self._minActiveDutyCycles = numpy.zeros(numColumns,
dtype=realDType)
self._boostFactors = numpy.ones(numColumns, dtype=realDType)
# The inhibition radius determines the size of a column's local
# neighborhood. A cortical column must overcome the overlap score of
# columns in its neighborhood in order to become active. This radius is
# updated every learning round. It grows and shrinks with the average
# number of connected synapses per column.
self._inhibitionRadius = 0
self._updateInhibitionRadius()
if self._spVerbosity > 0:
self.printParameters()
def getColumnDimensions(self):
"""Returns the dimensions of the columns in the region"""
return self._columnDimensions
def getInputDimensions(self):
"""Returns the dimensions of the input vector"""
return self._inputDimensions
def getNumColumns(self):
"""Returns the total number of columns"""
return self._numColumns
def getNumInputs(self):
"""Returns the total number of inputs"""
return self._numInputs
def getPotentialRadius(self):
"""Returns the potential radius"""
return self._potentialRadius
def setPotentialRadius(self, potentialRadius):
"""Sets the potential radius"""
self._potentialRadius = potentialRadius
def getPotentialPct(self):
"""Returns the potential percent"""
return self._potentialPct
def setPotentialPct(self, potentialPct):
"""Sets the potential percent"""
self._potentialPct = potentialPct
def getGlobalInhibition(self):
"""Returns whether global inhibition is enabled"""
return self._globalInhibition
def setGlobalInhibition(self, globalInhibition):
"""Sets global inhibition"""
self._globalInhibition = globalInhibition
def getNumActiveColumnsPerInhArea(self):
"""Returns the number of active columns per inhibition area. Returns a
value less than 0 if parameter is unused"""
return self._numActiveColumnsPerInhArea
def setNumActiveColumnsPerInhArea(self, numActiveColumnsPerInhArea):
"""Sets the number of active columns per inhibition area. Invalidates the
'localAreaDensity' parameter"""
assert(numActiveColumnsPerInhArea > 0)
self._numActiveColumnsPerInhArea = numActiveColumnsPerInhArea
self._localAreaDensity = 0
def getLocalAreaDensity(self):
"""Returns the local area density. Returns a value less than 0 if parameter
is unused"""
return self._localAreaDensity
def setLocalAreaDensity(self, localAreaDensity):
"""Sets the local area density. Invalidates the 'numActiveColumnsPerInhArea'
parameter"""
assert(localAreaDensity > 0 and localAreaDensity <= 1)
self._localAreaDensity = localAreaDensity
self._numActiveColumnsPerInhArea = 0
def getStimulusThreshold(self):
"""Returns the stimulus threshold"""
return self._stimulusThreshold
def setStimulusThreshold(self, stimulusThreshold):
"""Sets the stimulus threshold"""
self._stimulusThreshold = stimulusThreshold
def getInhibitionRadius(self):
"""Returns the inhibition radius"""
return self._inhibitionRadius
def setInhibitionRadius(self, inhibitionRadius):
"""Sets the inhibition radius"""
self._inhibitionRadius = inhibitionRadius
def getDutyCyclePeriod(self):
"""Returns the duty cycle period"""
return self._dutyCyclePeriod
def setDutyCyclePeriod(self, dutyCyclePeriod):
"""Sets the duty cycle period"""
self._dutyCyclePeriod = dutyCyclePeriod
def getMaxBoost(self):
"""Returns the maximum boost value"""
return self._maxBoost
def setMaxBoost(self, maxBoost):
"""Sets the maximum boost value"""
self._maxBoost = maxBoost
def getIterationNum(self):
"""Returns the iteration number"""
return self._iterationNum
def setIterationNum(self, iterationNum):
"""Sets the iteration number"""
self._iterationNum = iterationNum
def getIterationLearnNum(self):
"""Returns the learning iteration number"""
return self._iterationLearnNum
def setIterationLearnNum(self, iterationLearnNum):
"""Sets the learning iteration number"""
self._iterationLearnNum = iterationLearnNum
def getSpVerbosity(self):
"""Returns the verbosity level"""
return self._spVerbosity
def setSpVerbosity(self, spVerbosity):
"""Sets the verbosity level"""
self._spVerbosity = spVerbosity
def getUpdatePeriod(self):
"""Returns the update period"""
return self._updatePeriod
def setUpdatePeriod(self, updatePeriod):
"""Sets the update period"""
self._updatePeriod = updatePeriod
def getSynPermTrimThreshold(self):
"""Returns the permanence trim threshold"""
return self._synPermTrimThreshold
def setSynPermTrimThreshold(self, synPermTrimThreshold):
"""Sets the permanence trim threshold"""
self._synPermTrimThreshold = synPermTrimThreshold
def getSynPermActiveInc(self):
"""Returns the permanence increment amount for active synapses
inputs"""
return self._synPermActiveInc
def setSynPermActiveInc(self, synPermActiveInc):
"""Sets the permanence increment amount for active synapses"""
self._synPermActiveInc = synPermActiveInc
def getSynPermInactiveDec(self):
"""Returns the permanence decrement amount for inactive synapses"""
return self._synPermInactiveDec
def setSynPermInactiveDec(self, synPermInactiveDec):
"""Sets the permanence decrement amount for inactive synapses"""
self._synPermInactiveDec = synPermInactiveDec
def getSynPermBelowStimulusInc(self):
"""Returns the permanence increment amount for columns that have not been
recently active """
return self._synPermBelowStimulusInc
def setSynPermBelowStimulusInc(self, synPermBelowStimulusInc):
"""Sets the permanence increment amount for columns that have not been
recently active """
self._synPermBelowStimulusInc = synPermBelowStimulusInc
def getSynPermConnected(self):
"""Returns the permanence amount that qualifies a synapse as
being connected"""
return self._synPermConnected
def setSynPermConnected(self, synPermConnected):
"""Sets the permanence amount that qualifies a synapse as being
connected"""
self._synPermConnected = synPermConnected
def getMinPctOverlapDutyCycles(self):
"""Returns the minimum tolerated overlaps, given as percent of
neighbors overlap score"""
return self._minPctOverlapDutyCycles
def setMinPctOverlapDutyCycles(self, minPctOverlapDutyCycles):
"""Sets the minimum tolerated activity duty cycle, given as percent of
neighbors' activity duty cycle"""
self._minPctOverlapDutyCycles = minPctOverlapDutyCycles
def getMinPctActiveDutyCycles(self):
"""Returns the minimum tolerated activity duty cycle, given as percent of
neighbors' activity duty cycle"""
return self._minPctActiveDutyCycles
def setMinPctActiveDutyCycles(self, minPctActiveDutyCycles):
"""Sets the minimum tolerated activity duty, given as percent of
neighbors' activity duty cycle"""
self._minPctActiveDutyCycles = minPctActiveDutyCycles
def getBoostFactors(self, boostFactors):
"""Returns the boost factors for all columns. 'boostFactors' size must
match the number of columns"""
boostFactors[:] = self._boostFactors[:]
def setBoostFactors(self, boostFactors):
"""Sets the boost factors for all columns. 'boostFactors' size must match
the number of columns"""
self._boostFactors[:] = boostFactors[:]
def getOverlapDutyCycles(self, overlapDutyCycles):
"""Returns the overlap duty cycles for all columns. 'overlapDutyCycles'
size must match the number of columns"""
overlapDutyCycles[:] = self._overlapDutyCycles[:]
def setOverlapDutyCycles(self, overlapDutyCycles):
"""Sets the overlap duty cycles for all columns. 'overlapDutyCycles'
size must match the number of columns"""
self._overlapDutyCycles[:] = overlapDutyCycles
def getActiveDutyCycles(self, activeDutyCycles):
"""Returns the activity duty cycles for all columns. 'activeDutyCycles'
size must match the number of columns"""
activeDutyCycles[:] = self._activeDutyCycles[:]
def setActiveDutyCycles(self, activeDutyCycles):
"""Sets the activity duty cycles for all columns. 'activeDutyCycles'
size must match the number of columns"""
self._activeDutyCycles[:] = activeDutyCycles
def getMinOverlapDutyCycles(self, minOverlapDutyCycles):
"""Returns the minimum overlap duty cycles for all columns.
'_minOverlapDutyCycles' size must match the number of columns"""
minOverlapDutyCycles[:] = self._minOverlapDutyCycles[:]
def setMinOverlapDutyCycles(self, minOverlapDutyCycles):
"""Sets the minimum overlap duty cycles for all columns.
'_minOverlapDutyCycles' size must match the number of columns"""
self._minOverlapDutyCycles[:] = minOverlapDutyCycles[:]
def getMinActiveDutyCycles(self, minActiveDutyCycles):
"""Returns the minimum activity duty cycles for all columns.
'_minActiveDutyCycles' size must match the number of columns"""
minActiveDutyCycles[:] = self._minActiveDutyCycles[:]
def setMinActiveDutyCycles(self, minActiveDutyCycles):
"""Sets the minimum activity duty cycles for all columns.
'_minActiveDutyCycles' size must match the number of columns"""
self._minActiveDutyCycles = minActiveDutyCycles
def getPotential(self, columnIndex, potential):
"""Returns the potential mapping for a given column. 'potential' size
must match the number of inputs"""
assert(columnIndex < self._numColumns)
potential[:] = self._potentialPools[columnIndex]
def setPotential(self, columnIndex, potential):
"""Sets the potential mapping for a given column. 'potential' size
must match the number of inputs, and must be greater than _stimulusThreshold """
assert(column < self._numColumns)
potentialSparse = numpy.where(potential > 0)[0]
if len(potentialSparse) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size.")
self._potentialPools.replace(columnIndex, potentialSparse)
def getPermanence(self, columnIndex, permanence):
"""Returns the permanence values for a given column. 'permanence' size
must match the number of inputs"""
assert(columnIndex < self._numColumns)
permanence[:] = self._permanences[columnIndex]
def setPermanence(self, columnIndex, permanence):
"""Sets the permanence values for a given column. 'permanence' size
must match the number of inputs"""
assert(columnIndex < self._numColumns)
self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False)
def getConnectedSynapses(self, columnIndex, connectedSynapses):
"""Returns the connected synapses for a given column.
'connectedSynapses' size must match the number of inputs"""
assert(columnIndex < self._numColumns)
connectedSynapses[:] = self._connectedSynapses[columnIndex]
def getConnectedCounts(self, connectedCounts):
"""Returns the number of connected synapses for all columns.
'connectedCounts' size must match the number of columns"""
connectedCounts[:] = self._connectedCounts[:]
def compute(self, inputVector, learn, activeArray):
"""
This is the primary public method of the SpatialPooler class. This
function takes a input vector and outputs the indices of the active columns.
If 'learn' is set to True, this method also updates the permanences of the
columns.
@param inputVector: A numpy array of 0's and 1's that comprises the input
to the spatial pooler. The array will be treated as a one dimensional
array, therefore the dimensions of the array do not have to match the
exact dimensions specified in the class constructor. In fact, even a
list would suffice. The number of input bits in the vector must,
however, match the number of bits specified by the call to the
constructor. Therefore there must be a '0' or '1' in the array for
every input bit.
@param learn: A boolean value indicating whether learning should be
performed. Learning entails updating the permanence values of the
synapses, and hence modifying the 'state' of the model. Setting
learning to 'off' freezes the SP and has many uses. For example, you
might want to feed in various inputs and examine the resulting SDR's.
@param activeArray: An array whose size is equal to the number of columns.
Before the function returns this array will be populated with 1's at
the indices of the active columns, and 0's everywhere else.
"""
if not isinstance(inputVector, numpy.ndarray):
raise TypeError("Input vector must be a numpy array, not %s" %
str(type(inputVector)))
if inputVector.size != self._numInputs:
raise ValueError(
"Input vector dimensions don't match. Expecting %s but got %s" % (
inputVector.size, self._numInputs))
self._updateBookeepingVars(learn)
inputVector = numpy.array(inputVector, dtype=realDType)
inputVector.reshape(-1)
overlaps = self._calculateOverlap(inputVector)
# Apply boosting when learning is on
if learn:
boostedOverlaps = self._boostFactors * overlaps
else:
boostedOverlaps = overlaps
# Apply inhibition to determine the winning columns
activeColumns = self._inhibitColumns(boostedOverlaps)
if learn:
self._adaptSynapses(inputVector, activeColumns)
self._updateDutyCycles(overlaps, activeColumns)
self._bumpUpWeakColumns()
self._updateBoostFactors()
if self._isUpdateRound():
self._updateInhibitionRadius()
self._updateMinDutyCycles()
activeArray.fill(0)
activeArray[activeColumns] = 1
def stripUnlearnedColumns(self, activeArray):
"""Removes the set of columns who have never been active from the set of
active columns selected in the inhibition round. Such columns cannot
represent learned pattern and are therefore meaningless if only inference
is required. This should not be done when using a random, unlearned SP
since you would end up with no active columns.
@param activeArray: An array whose size is equal to the number of columns.
Any columns marked as active with an activeDutyCycle of 0 have
never been activated before and therefore are not active due to
learning. Any of these (unlearned) columns will be disabled (set to 0).
"""
neverLearned = numpy.where(self._activeDutyCycles == 0)[0]
activeArray[neverLearned] = 0
def _updateMinDutyCycles(self):
"""
Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted.
"""
if self._globalInhibition or self._inhibitionRadius > self._numInputs:
self._updateMinDutyCyclesGlobal()
else:
self._updateMinDutyCyclesLocal()
def _updateMinDutyCyclesGlobal(self):
"""
Updates the minimum duty cycles in a global fashion. Sets the minimum duty
cycles for the overlap and activation of all columns to be a percent of the
maximum in the region, specified by minPctOverlapDutyCycle and
minPctActiveDutyCycle respectively. Functionality it is equivalent to
_updateMinDutyCyclesLocal, but this function exploits the globality of the
computation to perform it in a straightforward, and more efficient manner.
"""
self._minOverlapDutyCycles.fill(
self._minPctOverlapDutyCycles * self._overlapDutyCycles.max()
)
self._minActiveDutyCycles.fill(
self._minPctActiveDutyCycles * self._activeDutyCycles.max()
)
def _updateMinDutyCyclesLocal(self):
"""
Updates the minimum duty cycles. The minimum duty cycles are determined
locally. Each column's minimum duty cycles are set to be a percent of the
maximum duty cycles in the column's neighborhood. Unlike
_updateMinDutyCyclesGlobal, here the values can be quite different for
different columns.
"""
for i in xrange(self._numColumns):
maskNeighbors = numpy.append(i,
self._getNeighborsND(i, self._columnDimensions,
self._inhibitionRadius))
self._minOverlapDutyCycles[i] = (
self._overlapDutyCycles[maskNeighbors].max() *
self._minPctOverlapDutyCycles
)
self._minActiveDutyCycles[i] = (
self._activeDutyCycles[maskNeighbors].max() *
self._minPctActiveDutyCycles
)
def _updateDutyCycles(self, overlaps, activeColumns):
"""
Updates the duty cycles for each column. The OVERLAP duty cycle is a moving
average of the number of inputs which overlapped with the each column. The
ACTIVITY duty cycles is a moving average of the frequency of activation for
each column.
Parameters:
----------------------------
@param overlaps:
An array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
@param activeColumns:
An array containing the indices of the active columns,
the sparse set of columns which survived inhibition
"""
overlapArray = numpy.zeros(self._numColumns, dtype=realDType)
activeArray = numpy.zeros(self._numColumns, dtype=realDType)
overlapArray[overlaps > 0] = 1
activeArray[activeColumns] = 1
period = self._dutyCyclePeriod
if (period > self._iterationNum):
period = self._iterationNum
self._overlapDutyCycles = self._updateDutyCyclesHelper(
self._overlapDutyCycles,
overlapArray,
period
)
self._activeDutyCycles = self._updateDutyCyclesHelper(
self._activeDutyCycles,
activeArray,
period
)
def _updateInhibitionRadius(self):
"""
Update the inhibition radius. The inhibition radius is a measure of the
square (or hypersquare) of columns that each a column is "connected to"
on average. Since columns are are not connected to each other directly, we
determine this quantity by first figuring out how many *inputs* a column is
connected to, and then multiplying it by the total number of columns that
exist for each input. For multiple dimension the aforementioned
calculations are averaged over all dimensions of inputs and columns. This
value is meaningless if global inhibition is enabled.
"""
if self._globalInhibition:
self._inhibitionRadius = self._columnDimensions.max()
return
avgConnectedSpan = numpy.average(
[self._avgConnectedSpanForColumnND(i)
for i in xrange(self._numColumns)]
)
columnsPerInput = self._avgColumnsPerInput()
diameter = avgConnectedSpan * columnsPerInput
radius = (diameter - 1) / 2.0
radius = max(1.0, radius)
self._inhibitionRadius = int(round(radius))
def _avgColumnsPerInput(self):
"""
The average number of columns per input, taking into account the topology
of the inputs and columns. This value is used to calculate the inhibition
radius. This function supports an arbitrary number of dimensions. If the
number of column dimensions does not match the number of input dimensions,
we treat the missing, or phantom dimensions as 'ones'.
"""
#TODO: extend to support different number of dimensions for inputs and
# columns
numDim = max(self._columnDimensions.size, self._inputDimensions.size)
colDim = numpy.ones(numDim)
colDim[:self._columnDimensions.size] = self._columnDimensions
inputDim = numpy.ones(numDim)
inputDim[:self._inputDimensions.size] = self._inputDimensions
columnsPerInput = colDim.astype(realDType) / inputDim
return numpy.average(columnsPerInput)
def _avgConnectedSpanForColumn1D(self, columnIndex):
"""
The range of connected synapses for column. This is used to
calculate the inhibition radius. This variation of the function only
supports a 1 dimensional column topology.
Parameters:
----------------------------
@param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices
"""
assert(self._inputDimensions.size == 1)
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if connected.size == 0:
return 0
else:
return max(connected) - min(connected) + 1
def _avgConnectedSpanForColumn2D(self, columnIndex):
"""
The range of connectedSynapses per column, averaged for each dimension.
This value is used to calculate the inhibition radius. This variation of
the function only supports a 2 dimensional column topology.
Parameters:
----------------------------
@param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices
"""
assert(self._inputDimensions.size == 2)
connected = self._connectedSynapses[columnIndex]
(rows, cols) = connected.reshape(self._inputDimensions).nonzero()
if rows.size == 0 and cols.size == 0:
return 0
rowSpan = rows.max() - rows.min() + 1
colSpan = cols.max() - cols.min() + 1
return numpy.average([rowSpan, colSpan])
def _avgConnectedSpanForColumnND(self, columnIndex):
"""
The range of connectedSynapses per column, averaged for each dimension.
This value is used to calculate the inhibition radius. This variation of
the function supports arbitrary column dimensions.
Parameters:
----------------------------
@param index: The index identifying a column in the permanence, potential
and connectivity matrices.
"""
dimensions = self._inputDimensions
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if connected.size == 0:
return 0
maxCoord = numpy.empty(self._inputDimensions.size)
minCoord = numpy.empty(self._inputDimensions.size)
maxCoord.fill(-1)
minCoord.fill(max(self._inputDimensions))
for i in connected:
maxCoord = numpy.maximum(maxCoord, numpy.unravel_index(i, dimensions))
minCoord = numpy.minimum(minCoord, numpy.unravel_index(i, dimensions))
return numpy.average(maxCoord - minCoord + 1)
def _adaptSynapses(self, inputVector, activeColumns):
"""
The primary method in charge of learning. Adapts the permanence values of
the synapses based on the input vector, and the chosen columns after
inhibition round. Permanence values are increased for synapses connected to
input bits that are turned on, and decreased for synapses connected to
inputs bits that are turned off.
Parameters:
----------------------------
@param inputVector:
A numpy array of 0's and 1's that comprises the input to
the spatial pooler. There exists an entry in the array
for every input bit.
@param activeColumns:
An array containing the indices of the columns that
survived inhibition.
"""
inputIndices = numpy.where(inputVector > 0)[0]
permChanges = numpy.zeros(self._numInputs)
permChanges.fill(-1 * self._synPermInactiveDec)
permChanges[inputIndices] = self._synPermActiveInc
for columnIndex in activeColumns:
perm = self._permanences[columnIndex]
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += permChanges[maskPotential]
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=True)
def _bumpUpWeakColumns(self):
"""
This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.
"""
weakColumns = numpy.where(self._overlapDutyCycles
< self._minOverlapDutyCycles)[0]
for columnIndex in weakColumns:
perm = self._permanences[columnIndex].astype(realDType)
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += self._synPermBelowStimulusInc
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)
def _raisePermanenceToThreshold(self, perm, mask):
"""
This method ensures that each column has enough connections to input bits
to allow it to become active. Since a column must have at least
'self._stimulusThreshold' overlaps in order to be considered during the
inhibition phase, columns without such minimal number of connections, even
if all the input bits they are connected to turn on, have no chance of
obtaining the minimum threshold. For such columns, the permanence values
are increased until the minimum number of connections are formed.
Parameters:
----------------------------
@param perm: An array of permanence values for a column. The array is
"dense", i.e. it contains an entry for each input bit, even
if the permanence value is 0.
@param mask: the indices of the columns whose permanences need to be
raised.
"""
if len(mask) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size. [len(mask) < self._stimulusThreshold]")
numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
while True:
numConnected = numpy.nonzero(perm > self._synPermConnected)[0].size
if numConnected >= self._stimulusThreshold:
return
perm[mask] += self._synPermBelowStimulusInc
def _updatePermanencesForColumn(self, perm, columnIndex, raisePerm=True):
"""
This method updates the permanence matrix with a column's new permanence
values. The column is identified by its index, which reflects the row in
the matrix, and the permanence is given in 'dense' form, i.e. a full
array containing all the zeros as well as the non-zero values. It is in
charge of implementing 'clipping' - ensuring that the permanence values are
always between 0 and 1 - and 'trimming' - enforcing sparsity by zeroing out
all permanence values below '_synPermTrimThreshold'. It also maintains
the consistency between 'self._permanences' (the matrix storing the
permanence values), 'self._connectedSynapses', (the matrix storing the bits
each column is connected to), and 'self._connectedCounts' (an array storing
the number of input bits each column is connected to). Every method wishing
to modify the permanence matrix should do so through this method.
Parameters:
----------------------------
@param perm: An array of permanence values for a column. The array is
"dense", i.e. it contains an entry for each input bit, even
if the permanence value is 0.
@param index: The index identifying a column in the permanence, potential
and connectivity matrices
@param raisePerm: A boolean value indicating whether the permanence values
should be raised until a minimum number are synapses are in
a connected state. Should be set to 'false' when a direct
assignment is required.
"""
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
if raisePerm:
self._raisePermanenceToThreshold(perm, maskPotential)
perm[perm < self._synPermTrimThreshold] = 0
numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
newConnected = numpy.where(perm >= self._synPermConnected)[0]
self._permanences.update(columnIndex, perm)
self._connectedSynapses.replace(columnIndex, newConnected)
self._connectedCounts[columnIndex] = newConnected.size
def _initPermConnected(self):
"""
Returns a randomly generated permanence value for a synapses that is
initialized in a connected state. The basic idea here is to initialize
permanence values very close to synPermConnected so that a small number of
learning steps could make it disconnected or connected.
Note: experimentation was done a long time ago on the best way to initialize
permanence values, but the history for this particular scheme has been lost.
"""
p = self._synPermConnected + (
self._synPermMax - self._synPermConnected)*self._random.getReal64()
# Ensure we don't have too much unnecessary precision. A full 64 bits of
# precision causes numerical stability issues across platforms and across
# implementations
p = int(p*100000) / 100000.0
return p
def _initPermNonConnected(self):
"""
Returns a randomly generated permanence value for a synapses that is to be
initialized in a non-connected state.
"""
p = self._synPermConnected * self._random.getReal64()
# Ensure we don't have too much unnecessary precision. A full 64 bits of
# precision causes numerical stability issues across platforms and across
# implementations
p = int(p*100000) / 100000.0
return p
def _initPermanence(self, potential, connectedPct):
"""
Initializes the permanences of a column. The method
returns a 1-D array the size of the input, where each entry in the
array represents the initial permanence value between the input bit
at the particular index in the array, and the column represented by
the 'index' parameter.
Parameters:
----------------------------
@param potential: A numpy array specifying the potential pool of the column.
Permanence values will only be generated for input bits
corresponding to indices for which the mask value is 1.
@param connectedPct: A value between 0 or 1 governing the chance, for each
permanence, that the initial permanence value will
be a value that is considered connected.
"""
# Determine which inputs bits will start out as connected
# to the inputs. Initially a subset of the input bits in a
# column's potential pool will be connected. This number is
# given by the parameter "connectedPct"
perm = numpy.zeros(self._numInputs)
for i in xrange(self._numInputs):
if (potential[i] < 1):
continue
if (self._random.getReal64() <= connectedPct):
perm[i] = self._initPermConnected()
else:
perm[i] = self._initPermNonConnected()
# Clip off low values. Since we use a sparse representation
# to store the permanence values this helps reduce memory
# requirements.
perm[perm < self._synPermTrimThreshold] = 0
return perm
def _mapColumn(self, index):
"""
Maps a column to its respective input index, keeping to the topology of
the region. It takes the index of the column as an argument and determines
what is the index of the flattened input vector that is to be the center of
the column's potential pool. It distributes the columns over the inputs
uniformly. The return value is an integer representing the index of the
input bit. Examples of the expected output of this method:
* If the topology is one dimensional, and the column index is 0, this
method will return the input index 0. If the column index is 1, and there
are 3 columns over 7 inputs, this method will return the input index 3.
* If the topology is two dimensional, with column dimensions [3, 5] and
input dimensions [7, 11], and the column index is 3, the method
returns input index 8.
Parameters:
----------------------------
@param index: The index identifying a column in the permanence, potential
and connectivity matrices.
@param wrapAround: A boolean value indicating that boundaries should be
ignored.
"""
columnCoords = numpy.unravel_index(index, self._columnDimensions)
columnCoords = numpy.array(columnCoords, dtype=realDType)
ratios = columnCoords / self._columnDimensions
inputCoords = self._inputDimensions * ratios
inputCoords += 0.5 * self._inputDimensions / self._columnDimensions
inputCoords = inputCoords.astype(int)
inputIndex = numpy.ravel_multi_index(inputCoords, self._inputDimensions)
return inputIndex
def _mapPotential(self, index, wrapAround=False):
"""
Maps a column to its input bits. This method encapsulates the topology of
the region. It takes the index of the column as an argument and determines
what are the indices of the input vector that are located within the
column's potential pool. The return value is a list containing the indices
of the input bits. The current implementation of the base class only
supports a 1 dimensional topology of columns with a 1 dimensional topology
of inputs. To extend this class to support 2-D topology you will need to
override this method. Examples of the expected output of this method:
* If the potentialRadius is greater than or equal to the largest input
dimension then each column connects to all of the inputs.
* If the topology is one dimensional, the input space is divided up evenly
among the columns and each column is centered over its share of the
inputs. If the potentialRadius is 5, then each column connects to the
input it is centered above as well as the 5 inputs to the left of that
input and the five inputs to the right of that input, wrapping around if
wrapAround=True.
* If the topology is two dimensional, the input space is again divided up
evenly among the columns and each column is centered above its share of
the inputs. If the potentialRadius is 5, the column connects to a square
that has 11 inputs on a side and is centered on the input that the column
is centered above.
Parameters:
----------------------------
@param index: The index identifying a column in the permanence, potential
and connectivity matrices.
@param wrapAround: A boolean value indicating that boundaries should be
fignored.
"""
index = self._mapColumn(index)
indices = self._getNeighborsND(index,
self._inputDimensions,
self._potentialRadius,
wrapAround=wrapAround)
indices.append(index)
indices = numpy.array(indices, dtype=uintType)
# TODO: See https://github.com/numenta/nupic.core/issues/128
indices.sort()
# Select a subset of the receptive field to serve as the
# the potential pool
numPotential = int(round(indices.size * self._potentialPct))
selectedIndices = numpy.empty(numPotential, dtype=uintType)
self._random.sample(indices, selectedIndices)
potential = numpy.zeros(self._numInputs, dtype=uintType)
potential[selectedIndices] = 1
return potential
@staticmethod
def _updateDutyCyclesHelper(dutyCycles, newInput, period):
"""
Updates a duty cycle estimate with a new value. This is a helper
function that is used to update several duty cycle variables in
the Column class, such as: overlapDutyCucle, activeDutyCycle,
minPctDutyCycleBeforeInh, minPctDutyCycleAfterInh, etc. returns
the updated duty cycle. Duty cycles are updated according to the following
formula:
(period - 1)*dutyCycle + newValue
dutyCycle := ----------------------------------
period
Parameters:
----------------------------
@param dutyCycles: An array containing one or more duty cycle values that need
to be updated
@param newInput: A new numerical value used to update the duty cycle
@param period: The period of the duty cycle
"""
assert(period >= 1)
return (dutyCycles * (period -1.0) + newInput) / period
def _updateBoostFactors(self):
r"""
Update the boost factors for all columns. The boost factors are used to
increase the overlap of inactive columns to improve their chances of
becoming active. and hence encourage participation of more columns in the
learning process. This is a line defined as: y = mx + b boost =
(1-maxBoost)/minDuty * dutyCycle + maxFiringBoost. Intuitively this means
that columns that have been active enough have a boost factor of 1, meaning
their overlap is not boosted. Columns whose active duty cycle drops too much
below that of their neighbors are boosted depending on how infrequently they
have been active. The more infrequent, the more they are boosted. The exact
boost factor is linearly interpolated between the points (dutyCycle:0,
boost:maxFiringBoost) and (dutyCycle:minDuty, boost:1.0).
boostFactor
^
maxBoost _ |
|\
| \
1 _ | \ _ _ _ _ _ _ _
|
+--------------------> activeDutyCycle
|
minActiveDutyCycle
"""
mask = numpy.where(self._minActiveDutyCycles > 0)[0]
self._boostFactors[mask] = ((1 - self._maxBoost) /
self._minActiveDutyCycles[mask] * self._activeDutyCycles[mask]
).astype(realDType) + self._maxBoost
self._boostFactors[self._activeDutyCycles >
self._minActiveDutyCycles] = 1.0
def _updateBookeepingVars(self, learn):
"""
Updates counter instance variables each round.
Parameters:
----------------------------
@param learn: a boolean value indicating whether learning should be
performed. Learning entails updating the permanence
values of the synapses, and hence modifying the 'state'
of the model. setting learning to 'off' might be useful
for indicating separate training vs. testing sets.
"""
self._iterationNum += 1
if learn:
self._iterationLearnNum += 1
def _calculateOverlap(self, inputVector):
"""
This function determines each column's overlap with the current input
vector. The overlap of a column is the number of synapses for that column
that are connected (permanence value is greater than '_synPermConnected')
to input bits which are turned on. Overlap values that are lower than
the 'stimulusThreshold' are ignored. The implementation takes advantage of
the SparseBinaryMatrix class to perform this calculation efficiently.
Parameters:
----------------------------
@param inputVector: a numpy array of 0's and 1's that comprises the input to
the spatial pooler.
"""
overlaps = numpy.zeros(self._numColumns).astype(realDType)
self._connectedSynapses.rightVecSumAtNZ_fast(inputVector, overlaps)
overlaps[overlaps < self._stimulusThreshold] = 0
return overlaps
def _calculateOverlapPct(self, overlaps):
return overlaps.astype(realDType) / self._connectedCounts
def _inhibitColumns(self, overlaps):
"""
Performs inhibition. This method calculates the necessary values needed to
actually perform inhibition and then delegates the task of picking the
active columns to helper functions.
Parameters:
----------------------------
@param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
"""
# determine how many columns should be selected in the inhibition phase.
# This can be specified by either setting the 'numActiveColumnsPerInhArea'
# parameter or the 'localAreaDensity' parameter when initializing the class
overlaps = overlaps.copy()
if (self._localAreaDensity > 0):
density = self._localAreaDensity
else:
inhibitionArea = ((2*self._inhibitionRadius + 1)
** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
density = float(self._numActiveColumnsPerInhArea) / inhibitionArea
density = min(density, 0.5)
# Add our fixed little bit of random noise to the scores to help break ties.
overlaps += self._tieBreaker
if self._globalInhibition or \
self._inhibitionRadius > max(self._columnDimensions):
return self._inhibitColumnsGlobal(overlaps, density)
else:
return self._inhibitColumnsLocal(overlaps, density)
def _inhibitColumnsGlobal(self, overlaps, density):
"""
Perform global inhibition. Performing global inhibition entails picking the
top 'numActive' columns with the highest overlap score in the entire
region. At most half of the columns in a local neighborhood are allowed to
be active.
@param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
@param density: The fraction of columns to survive inhibition.
@return list with indices of the winning columns
"""
#calculate num active per inhibition area
numActive = int(density * self._numColumns)
# argpartition returns an array where all values to the left of index
# numActive are less than or equal to all values to its right. Negating
# overlaps produces a descending-order partition.
winnerIndices = numpy.argpartition(-overlaps, numActive)[:numActive]
# Compatibility with nupic.core requires that the winners are sorted;
# however, sorting only the winners is far less expensive than sorting all
# columns.
winnerValues = overlaps[winnerIndices]
sortedWinnerIndices = winnerIndices[numpy.argsort(-winnerValues)]
return sortedWinnerIndices
def _inhibitColumnsLocal(self, overlaps, density):
"""
Performs local inhibition. Local inhibition is performed on a column by
column basis. Each column observes the overlaps of its neighbors and is
selected if its overlap score is within the top 'numActive' in its local
neighborhood. At most half of the columns in a local neighborhood are
allowed to be active.
@param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
@param density: The fraction of columns to survive inhibition. This
value is only an intended target. Since the surviving
columns are picked in a local fashion, the exact fraction
of surviving columns is likely to vary.
@return list with indices of the winning columns
"""
winners = []
addToWinners = max(overlaps)/1000.0
overlaps = numpy.array(overlaps, dtype=realDType)
for i in xrange(self._numColumns):
maskNeighbors = self._getNeighborsND(i, self._columnDimensions, self._inhibitionRadius)
overlapSlice = overlaps[maskNeighbors]
numActive = int(0.5 + density * (len(maskNeighbors) + 1))
numBigger = numpy.count_nonzero(overlapSlice > overlaps[i])
if numBigger < numActive:
winners.append(i)
overlaps[i] += addToWinners
return winners
@staticmethod
def _getNeighbors1D(columnIndex, dimensions, radius, wrapAround=False):
"""
Returns a list of indices corresponding to the neighbors of a given column.
In this variation of the method, which only supports a one dimensional
column topology, a column's neighbors are those neighbors who are 'radius'
indices away. This information is needed to perform inhibition. This method
is a subset of _getNeighborsND and is only included for illustration
purposes, and potentially enhanced performance for spatial pooler
implementations that only require a one-dimensional topology.
Parameters:
----------------------------
@param columnIndex: The index identifying a column in the permanence, potential
and connectivity matrices.
@param dimensions: An array containing a dimensions for the column space. A 2x3
grid will be represented by [2,3].
@param radius: Indicates how far away from a given column are other
columns to be considered its neighbors. In the previous 2x3
example, each column with coordinates:
[2+/-radius, 3+/-radius] is considered a neighbor.
@param wrapAround: A boolean value indicating whether to consider columns at
the border of a dimensions to be adjacent to columns at the
other end of the dimension. For example, if the columns are
laid out in one dimension, columns 1 and 10 will be
considered adjacent if wrapAround is set to true:
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
assert(dimensions.size == 1)
ncols = dimensions[0]
if wrapAround:
neighbors = numpy.array(
range(columnIndex-radius,columnIndex+radius+1)) % ncols
else:
neighbors = numpy.array(
range(columnIndex-radius,columnIndex+radius+1))
neighbors = neighbors[
numpy.logical_and(neighbors >= 0, neighbors < ncols)]
neighbors = list(set(neighbors) - set([columnIndex]))
assert(neighbors)
return neighbors
@staticmethod
def _getNeighbors2D(columnIndex, dimensions, radius, wrapAround=False):
"""
Returns a list of indices corresponding to the neighbors of a given column.
Since the permanence values are stored in such a way that information about
topology is lost, this method allows for reconstructing the topology of the
inputs, which are flattened to one array. Given a column's index, its
neighbors are defined as those columns that are 'radius' indices away from
it in each dimension. The method returns a list of the flat indices of
these columns. This method is a subset of _getNeighborsND and is only
included for illustration purposes, and potentially enhanced performance
for spatial pooler implementations that only require a two-dimensional
topology.
Parameters:
----------------------------
@param columnIndex: The index identifying a column in the permanence, potential
and connectivity matrices.
@param dimensions: An array containing a dimensions for the column space. A 2x3
grid will be represented by [2,3].
@param radius: Indicates how far away from a given column are other
columns to be considered its neighbors. In the previous 2x3
example, each column with coordinates:
[2+/-radius, 3+/-radius] is considered a neighbor.
@param wrapAround: A boolean value indicating whether to consider columns at
the border of a dimensions to be adjacent to columns at the
other end of the dimension. For example, if the columns are
laid out in one dimension, columns 1 and 10 will be
considered adjacent if wrapAround is set to true:
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
assert(dimensions.size == 2)
nrows = dimensions[0]
ncols = dimensions[1]
toRow = lambda index: index / ncols
toCol = lambda index: index % ncols
toIndex = lambda row, col: row * ncols + col
row = toRow(columnIndex)
col = toCol(columnIndex)
if wrapAround:
colRange = numpy.array(range(col-radius, col+radius+1)) % ncols
rowRange = numpy.array(range(row-radius, row+radius+1)) % nrows
else:
colRange = numpy.array(range(col-radius, col+radius+1))
colRange = colRange[
numpy.logical_and(colRange >= 0, colRange < ncols)]
rowRange = numpy.array(range(row-radius, row+radius+1))
rowRange = rowRange[
numpy.logical_and(rowRange >= 0, rowRange < nrows)]
neighbors = [toIndex(r, c) for (r, c) in
itertools.product(rowRange, colRange)]
neighbors = list(set(neighbors) - set([columnIndex]))
assert(neighbors)
return neighbors
@staticmethod
def _getNeighborsND(columnIndex, dimensions, radius, wrapAround=False):
"""
Similar to _getNeighbors1D and _getNeighbors2D, this function Returns a
list of indices corresponding to the neighbors of a given column. Since the
permanence values are stored in such a way that information about topology
is lost. This method allows for reconstructing the topology of the inputs,
which are flattened to one array. Given a column's index, its neighbors are
defined as those columns that are 'radius' indices away from it in each
dimension. The method returns a list of the flat indices of these columns.
Parameters:
----------------------------
@param columnIndex: The index identifying a column in the permanence, potential
and connectivity matrices.
@param dimensions: An array containing a dimensions for the column space. A 2x3
grid will be represented by [2,3].
@param radius: Indicates how far away from a given column are other
columns to be considered its neighbors. In the previous 2x3
example, each column with coordinates:
[2+/-radius, 3+/-radius] is considered a neighbor.
@param wrapAround: A boolean value indicating whether to consider columns at
the border of a dimensions to be adjacent to columns at the
other end of the dimension. For example, if the columns are
laid out in one dimension, columns 1 and 10 will be
considered adjacent if wrapAround is set to true:
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
assert(dimensions.size > 0)
columnCoords = numpy.unravel_index(columnIndex, dimensions)
rangeND = []
for i in xrange(dimensions.size):
if wrapAround:
curRange = numpy.array(range(columnCoords[i]-radius,
columnCoords[i]+radius+1)) % dimensions[i]
else:
curRange = numpy.array(range(columnCoords[i]-radius,
columnCoords[i]+radius+1))
curRange = curRange[
numpy.logical_and(curRange >= 0, curRange < dimensions[i])]
rangeND.append(numpy.unique(curRange))
neighbors = numpy.ravel_multi_index(
numpy.array(list(itertools.product(*rangeND))).T,
dimensions).tolist()
neighbors.remove(columnIndex)
return neighbors
def _isUpdateRound(self):
"""
returns true if enough rounds have passed to warrant updates of
duty cycles
"""
return (self._iterationNum % self._updatePeriod) == 0
def _seed(self, seed=-1):
"""
Initialize the random seed
"""
if seed != -1:
self._random = NupicRandom(seed)
else:
self._random = NupicRandom()
def __setstate__(self, state):
"""
Initialize class properties from stored values.
"""
# original version was a float so check for anything less than 2
if state['_version'] < 2:
# the wrapAround property was added in version 2,
# in version 1 the wrapAround parameter was True for SP initialization
state['_wrapAround'] = True
# update version property to current SP version
state['_version'] = VERSION
self.__dict__.update(state)
def write(self, proto):
self._random.write(proto.random)
proto.numInputs = self._numInputs
proto.numColumns = self._numColumns
cdimsProto = proto.init("columnDimensions", len(self._columnDimensions))
for i, dim in enumerate(self._columnDimensions):
cdimsProto[i] = int(dim)
idimsProto = proto.init("inputDimensions", len(self._inputDimensions))
for i, dim in enumerate(self._inputDimensions):
idimsProto[i] = int(dim)
proto.potentialRadius = self._potentialRadius
proto.potentialPct = self._potentialPct
proto.inhibitionRadius = self._inhibitionRadius
proto.globalInhibition = self._globalInhibition
proto.numActiveColumnsPerInhArea = self._numActiveColumnsPerInhArea
proto.localAreaDensity = self._localAreaDensity
proto.stimulusThreshold = self._stimulusThreshold
proto.synPermInactiveDec = self._synPermInactiveDec
proto.synPermActiveInc = self._synPermActiveInc
proto.synPermBelowStimulusInc = self._synPermBelowStimulusInc
proto.synPermConnected = self._synPermConnected
proto.minPctOverlapDutyCycles = self._minPctOverlapDutyCycles
proto.minPctActiveDutyCycles = self._minPctActiveDutyCycles
proto.dutyCyclePeriod = self._dutyCyclePeriod
proto.maxBoost = self._maxBoost
proto.wrapAround = self._wrapAround
proto.spVerbosity = self._spVerbosity
proto.synPermMin = self._synPermMin
proto.synPermMax = self._synPermMax
proto.synPermTrimThreshold = self._synPermTrimThreshold
proto.updatePeriod = self._updatePeriod
proto.version = self._version
proto.iterationNum = self._iterationNum
proto.iterationLearnNum = self._iterationLearnNum
self._potentialPools.write(proto.potentialPools)
self._permanences.write(proto.permanences)
tieBreakersProto = proto.init("tieBreaker", len(self._tieBreaker))
for i, v in enumerate(self._tieBreaker):
tieBreakersProto[i] = float(v)
overlapDutyCyclesProto = proto.init("overlapDutyCycles",
len(self._overlapDutyCycles))
for i, v in enumerate(self._overlapDutyCycles):
overlapDutyCyclesProto[i] = float(v)
activeDutyCyclesProto = proto.init("activeDutyCycles",
len(self._activeDutyCycles))
for i, v in enumerate(self._activeDutyCycles):
activeDutyCyclesProto[i] = float(v)
minOverlapDutyCyclesProto = proto.init("minOverlapDutyCycles",
len(self._minOverlapDutyCycles))
for i, v in enumerate(self._minOverlapDutyCycles):
minOverlapDutyCyclesProto[i] = float(v)
minActiveDutyCyclesProto = proto.init("minActiveDutyCycles",
len(self._minActiveDutyCycles))
for i, v in enumerate(self._minActiveDutyCycles):
minActiveDutyCyclesProto[i] = float(v)
boostFactorsProto = proto.init("boostFactors", len(self._boostFactors))
for i, v in enumerate(self._boostFactors):
boostFactorsProto[i] = float(v)
def read(self, proto):
numInputs = int(proto.numInputs)
numColumns = int(proto.numColumns)
self._random.read(proto.random)
self._numInputs = numInputs
self._numColumns = numColumns
self._columnDimensions = numpy.array(proto.columnDimensions)
self._inputDimensions = numpy.array(proto.inputDimensions)
self._potentialRadius = proto.potentialRadius
self._potentialPct = proto.potentialPct
self._inhibitionRadius = proto.inhibitionRadius
self._globalInhibition = proto.globalInhibition
self._numActiveColumnsPerInhArea = proto.numActiveColumnsPerInhArea
self._localAreaDensity = proto.localAreaDensity
self._stimulusThreshold = proto.stimulusThreshold
self._synPermInactiveDec = proto.synPermInactiveDec
self._synPermActiveInc = proto.synPermActiveInc
self._synPermBelowStimulusInc = proto.synPermBelowStimulusInc
self._synPermConnected = proto.synPermConnected
self._minPctOverlapDutyCycles = proto.minPctOverlapDutyCycles
self._minPctActiveDutyCycles = proto.minPctActiveDutyCycles
self._dutyCyclePeriod = proto.dutyCyclePeriod
self._maxBoost = proto.maxBoost
self._wrapAround = proto.wrapAround
self._spVerbosity = proto.spVerbosity
self._synPermMin = proto.synPermMin
self._synPermMax = proto.synPermMax
self._synPermTrimThreshold = proto.synPermTrimThreshold
self._updatePeriod = proto.updatePeriod
self._version = VERSION
self._iterationNum = proto.iterationNum
self._iterationLearnNum = proto.iterationLearnNum
self._potentialPools.read(proto.potentialPools)
self._permanences.read(proto.permanences)
# Initialize ephemerals and make sure they get updated
self._connectedCounts = numpy.zeros(numColumns, dtype=realDType)
self._connectedSynapses = BinaryCorticalColumns(numInputs)
self._connectedSynapses.resize(numColumns, numInputs)
for columnIndex in xrange(proto.numColumns):
self._updatePermanencesForColumn(
self._permanences[columnIndex], columnIndex, False
)
self._tieBreaker = numpy.array(proto.tieBreaker)
self._overlapDutyCycles = numpy.array(proto.overlapDutyCycles,
dtype=realDType)
self._activeDutyCycles = numpy.array(proto.activeDutyCycles,
dtype=realDType)
self._minOverlapDutyCycles = numpy.array(proto.minOverlapDutyCycles,
dtype=realDType)
self._minActiveDutyCycles = numpy.array(proto.minActiveDutyCycles,
dtype=realDType)
self._boostFactors = numpy.array(proto.boostFactors, dtype=realDType)
def printParameters(self):
"""
Useful for debugging.
"""
print "------------PY SpatialPooler Parameters ------------------"
print "numInputs = ", self.getNumInputs()
print "numColumns = ", self.getNumColumns()
print "columnDimensions = ", self._columnDimensions
print "numActiveColumnsPerInhArea = ", self.getNumActiveColumnsPerInhArea()
print "potentialPct = ", self.getPotentialPct()
print "globalInhibition = ", self.getGlobalInhibition()
print "localAreaDensity = ", self.getLocalAreaDensity()
print "stimulusThreshold = ", self.getStimulusThreshold()
print "synPermActiveInc = ", self.getSynPermActiveInc()
print "synPermInactiveDec = ", self.getSynPermInactiveDec()
print "synPermConnected = ", self.getSynPermConnected()
print "minPctOverlapDutyCycle = ", self.getMinPctOverlapDutyCycles()
print "minPctActiveDutyCycle = ", self.getMinPctActiveDutyCycles()
print "dutyCyclePeriod = ", self.getDutyCyclePeriod()
print "maxBoost = ", self.getMaxBoost()
print "spVerbosity = ", self.getSpVerbosity()
print "version = ", self._version
| 1 | 20,045 | bad line wrap | numenta-nupic | py |
@@ -1245,6 +1245,14 @@ void rai::bootstrap_server::receive_header_action (boost::system::error_code con
});
break;
}
+ case rai::message_type::bulk_pull_blocks:
+ {
+ auto this_l (shared_from_this ());
+ boost::asio::async_read (*socket, boost::asio::buffer (receive_buffer.data () + 8, sizeof (rai::uint256_union) + sizeof (rai::uint256_union) + sizeof (bulk_pull_blocks_mode) + sizeof (uint32_t)), [this_l](boost::system::error_code const & ec, size_t size_a) {
+ this_l->receive_bulk_pull_blocks_action (ec, size_a);
+ });
+ break;
+ }
case rai::message_type::frontier_req:
{
auto this_l (shared_from_this ()); | 1 | #include <rai/node/bootstrap.hpp>
#include <rai/node/common.hpp>
#include <rai/node/node.hpp>
#include <boost/log/trivial.hpp>
rai::block_synchronization::block_synchronization (boost::log::sources::logger_mt & log_a) :
log (log_a)
{
}
rai::block_synchronization::~block_synchronization ()
{
}
namespace
{
class add_dependency_visitor : public rai::block_visitor
{
public:
add_dependency_visitor (MDB_txn * transaction_a, rai::block_synchronization & sync_a) :
transaction (transaction_a),
sync (sync_a),
complete (true)
{
}
void send_block (rai::send_block const & block_a) override
{
add_dependency (block_a.hashables.previous);
}
void receive_block (rai::receive_block const & block_a) override
{
add_dependency (block_a.hashables.previous);
if (complete)
{
add_dependency (block_a.hashables.source);
}
}
void open_block (rai::open_block const & block_a) override
{
add_dependency (block_a.hashables.source);
}
void change_block (rai::change_block const & block_a) override
{
add_dependency (block_a.hashables.previous);
}
void add_dependency (rai::block_hash const & hash_a)
{
if (!sync.synchronized (transaction, hash_a) && sync.retrieve (transaction, hash_a) != nullptr)
{
complete = false;
sync.blocks.push_back (hash_a);
}
else
{
// Block is already synchronized, normal
}
}
MDB_txn * transaction;
rai::block_synchronization & sync;
bool complete;
};
}
bool rai::block_synchronization::add_dependency (MDB_txn * transaction_a, rai::block const & block_a)
{
add_dependency_visitor visitor (transaction_a, *this);
block_a.visit (visitor);
return visitor.complete;
}
void rai::block_synchronization::fill_dependencies (MDB_txn * transaction_a)
{
auto done (false);
while (!done)
{
auto hash (blocks.back ());
auto block (retrieve (transaction_a, hash));
if (block != nullptr)
{
done = add_dependency (transaction_a, *block);
}
else
{
done = true;
}
}
}
rai::sync_result rai::block_synchronization::synchronize_one (MDB_txn * transaction_a)
{
// Blocks that depend on multiple paths e.g. receive_blocks, need to have their dependencies recalculated each time
fill_dependencies (transaction_a);
rai::sync_result result (rai::sync_result::success);
auto hash (blocks.back ());
blocks.pop_back ();
auto block (retrieve (transaction_a, hash));
if (block != nullptr)
{
result = target (transaction_a, *block);
}
else
{
// A block that can be the dependency of more than one other block, e.g. send blocks, can be added to the dependency list more than once. Subsequent retrievals won't find the block but this isn't an error
}
return result;
}
rai::sync_result rai::block_synchronization::synchronize (MDB_txn * transaction_a, rai::block_hash const & hash_a)
{
auto result (rai::sync_result::success);
blocks.clear ();
blocks.push_back (hash_a);
auto cutoff (std::chrono::system_clock::now () + rai::transaction_timeout);
while (std::chrono::system_clock::now () < cutoff && result != rai::sync_result::fork && !blocks.empty ())
{
result = synchronize_one (transaction_a);
}
return result;
}
rai::push_synchronization::push_synchronization (rai::node & node_a, std::function<rai::sync_result (MDB_txn *, rai::block const &)> const & target_a) :
block_synchronization (node_a.log),
target_m (target_a),
node (node_a)
{
}
bool rai::push_synchronization::synchronized (MDB_txn * transaction_a, rai::block_hash const & hash_a)
{
auto result (!node.store.unsynced_exists (transaction_a, hash_a));
if (!result)
{
node.store.unsynced_del (transaction_a, hash_a);
}
return result;
}
std::unique_ptr<rai::block> rai::push_synchronization::retrieve (MDB_txn * transaction_a, rai::block_hash const & hash_a)
{
return node.store.block_get (transaction_a, hash_a);
}
rai::sync_result rai::push_synchronization::target (MDB_txn * transaction_a, rai::block const & block_a)
{
return target_m (transaction_a, block_a);
}
rai::bootstrap_client::bootstrap_client (std::shared_ptr<rai::node> node_a, std::shared_ptr<rai::bootstrap_attempt> attempt_a, rai::tcp_endpoint const & endpoint_a) :
node (node_a),
attempt (attempt_a),
socket (node_a->service),
endpoint (endpoint_a),
timeout (node_a->service)
{
++attempt->connections;
}
rai::bootstrap_client::~bootstrap_client ()
{
--attempt->connections;
}
void rai::bootstrap_client::start_timeout ()
{
timeout.expires_from_now (boost::posix_time::seconds (15));
std::weak_ptr<rai::bootstrap_client> this_w (shared ());
timeout.async_wait ([this_w](boost::system::error_code const & ec) {
if (ec != boost::asio::error::operation_aborted)
{
auto this_l (this_w.lock ());
if (this_l != nullptr)
{
this_l->socket.close ();
if (this_l->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Disconnecting from %1% due to timeout") % this_l->endpoint);
}
}
}
});
}
void rai::bootstrap_client::stop_timeout ()
{
size_t killed (timeout.cancel ());
(void)killed;
}
void rai::bootstrap_client::run ()
{
auto this_l (shared_from_this ());
start_timeout ();
socket.async_connect (endpoint, [this_l](boost::system::error_code const & ec) {
this_l->stop_timeout ();
if (!ec)
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Connection established to %1%") % this_l->endpoint);
this_l->attempt->pool_connection (this_l->shared_from_this ());
}
else
{
if (this_l->node->config.logging.network_logging ())
{
switch (ec.value ())
{
default:
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Error initiating bootstrap connection to %2%: %1%") % ec.message () % this_l->endpoint);
break;
case boost::system::errc::connection_refused:
case boost::system::errc::operation_canceled:
case boost::system::errc::timed_out:
break;
}
}
}
});
}
void rai::frontier_req_client::run ()
{
std::unique_ptr<rai::frontier_req> request (new rai::frontier_req);
request->start.clear ();
request->age = std::numeric_limits<decltype (request->age)>::max ();
request->count = std::numeric_limits<decltype (request->age)>::max ();
auto send_buffer (std::make_shared<std::vector<uint8_t>> ());
{
rai::vectorstream stream (*send_buffer);
request->serialize (stream);
}
auto this_l (shared_from_this ());
connection->start_timeout ();
boost::asio::async_write (connection->socket, boost::asio::buffer (send_buffer->data (), send_buffer->size ()), [this_l, send_buffer](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
if (!ec)
{
this_l->receive_frontier ();
}
else
{
if (this_l->connection->node->config.logging.network_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error while sending bootstrap request %1%") % ec.message ());
}
}
});
}
std::shared_ptr<rai::bootstrap_client> rai::bootstrap_client::shared ()
{
return shared_from_this ();
}
rai::frontier_req_client::frontier_req_client (std::shared_ptr<rai::bootstrap_client> connection_a) :
connection (connection_a),
current (0),
count (0),
landing ("059F68AAB29DE0D3A27443625C7EA9CDDB6517A8B76FE37727EF6A4D76832AD5"),
faucet ("8E319CE6F3025E5B2DF66DA7AB1467FE48F1679C13DD43BFDB29FA2E9FC40D3B"),
next_report (std::chrono::system_clock::now () + std::chrono::seconds (15))
{
rai::transaction transaction (connection->node->store.environment, nullptr, false);
next (transaction);
}
rai::frontier_req_client::~frontier_req_client ()
{
}
void rai::frontier_req_client::receive_frontier ()
{
auto this_l (shared_from_this ());
connection->start_timeout ();
boost::asio::async_read (connection->socket, boost::asio::buffer (connection->receive_buffer.data (), sizeof (rai::uint256_union) + sizeof (rai::uint256_union)), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
this_l->received_frontier (ec, size_a);
});
}
void rai::frontier_req_client::request_account (rai::account const & account_a, rai::block_hash const & latest_a)
{
// Account they know about and we don't.
rai::account account_1 ("6B31E80CABDD2FEE6F54A7BDBF91B666010418F4438EF0B48168F93CD79DBC85"); // xrb_1tsjx18cqqbhxsqobbxxqyauesi31iehaiwgy4ta4t9s9mdsuh671npo1st9
rai::account account_2 ("FD6EE9E0E107A6A8584DB94A3F154799DD5C2A7D6ABED0889DA3B837B0E61663"); // xrb_3zdgx9ig43x8o3e6ugcc9wcnh8gxdio9ttoyt46buaxr8yrge7m5331qdwhk
if (account_a != landing && account_a != faucet && account_a != account_1 && account_a != account_2)
{
insert_pull (rai::pull_info (account_a, latest_a, rai::block_hash (0)));
}
else
{
std::lock_guard<std::mutex> lock (connection->attempt->mutex);
connection->attempt->pulls.push_front (rai::pull_info (account_a, latest_a, rai::block_hash (0)));
}
}
void rai::frontier_req_client::unsynced (MDB_txn * transaction_a, rai::block_hash const & ours_a, rai::block_hash const & theirs_a)
{
auto current (ours_a);
while (!current.is_zero () && current != theirs_a)
{
connection->node->store.unsynced_put (transaction_a, current);
auto block (connection->node->store.block_get (transaction_a, current));
current = block->previous ();
}
}
void rai::frontier_req_client::received_frontier (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == sizeof (rai::uint256_union) + sizeof (rai::uint256_union));
rai::account account;
rai::bufferstream account_stream (connection->receive_buffer.data (), sizeof (rai::uint256_union));
auto error1 (rai::read (account_stream, account));
assert (!error1);
rai::block_hash latest;
rai::bufferstream latest_stream (connection->receive_buffer.data () + sizeof (rai::uint256_union), sizeof (rai::uint256_union));
auto error2 (rai::read (latest_stream, latest));
assert (!error2);
++count;
auto now (std::chrono::system_clock::now ());
if (next_report < now)
{
next_report = now + std::chrono::seconds (15);
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Received %1% frontiers from %2%") % std::to_string (count) % connection->socket.remote_endpoint ());
}
if (!account.is_zero ())
{
while (!current.is_zero () && current < account)
{
// We know about an account they don't.
rai::transaction transaction (connection->node->store.environment, nullptr, true);
if (connection->node->wallets.exists (transaction, current))
{
unsynced (transaction, info.head, 0);
}
next (transaction);
}
if (!current.is_zero ())
{
if (account == current)
{
rai::transaction transaction (connection->node->store.environment, nullptr, true);
if (latest == info.head)
{
// In sync
}
else
{
if (connection->node->store.block_exists (transaction, latest))
{
// We know about a block they don't.
if (connection->node->wallets.exists (transaction, current))
{
unsynced (transaction, info.head, latest);
}
}
else
{
// They know about a block we don't.
if (account != rai::genesis_account && account != landing && account != faucet)
{
insert_pull (rai::pull_info (account, latest, info.head));
}
else
{
connection->attempt->pulls.push_front (rai::pull_info (account, latest, info.head));
}
}
}
next (transaction);
}
else
{
assert (account < current);
request_account (account, latest);
}
}
else
{
request_account (account, latest);
}
receive_frontier ();
}
else
{
{
rai::transaction transaction (connection->node->store.environment, nullptr, true);
while (!current.is_zero ())
{
// We know about an account they don't.
if (connection->node->wallets.exists (transaction, current))
{
unsynced (transaction, info.head, 0);
}
next (transaction);
}
}
{
try
{
promise.set_value (false);
}
catch (std::future_error &)
{
}
connection->attempt->pool_connection (connection);
}
}
}
else
{
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error while receiving frontier %1%") % ec.message ());
}
}
}
void rai::frontier_req_client::insert_pull (rai::pull_info const & pull_a)
{
std::lock_guard<std::mutex> lock (connection->attempt->mutex);
connection->attempt->pulls.insert (connection->attempt->pulls.begin () + rai::random_pool.GenerateWord32 (0, connection->attempt->pulls.size ()), pull_a);
}
void rai::frontier_req_client::next (MDB_txn * transaction_a)
{
auto iterator (connection->node->store.latest_begin (transaction_a, rai::uint256_union (current.number () + 1)));
if (iterator != connection->node->store.latest_end ())
{
current = rai::account (iterator->first.uint256 ());
info = rai::account_info (iterator->second);
}
else
{
current.clear ();
}
}
rai::bulk_pull_client::bulk_pull_client (std::shared_ptr<rai::bootstrap_client> connection_a) :
connection (connection_a)
{
assert (!connection->attempt->mutex.try_lock ());
++connection->attempt->pulling;
connection->attempt->condition.notify_all ();
}
rai::bulk_pull_client::~bulk_pull_client ()
{
{
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
--connection->attempt->pulling;
connection->attempt->condition.notify_all ();
}
if (!pull.account.is_zero ())
{
connection->attempt->requeue_pull (pull);
}
}
void rai::bulk_pull_client::request (rai::pull_info const & pull_a)
{
pull = pull_a;
expected = pull_a.head;
rai::bulk_pull req;
req.start = pull_a.account;
req.end = pull_a.end;
auto buffer (std::make_shared<std::vector<uint8_t>> ());
{
rai::vectorstream stream (*buffer);
req.serialize (stream);
}
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Requesting account %1% from %2%") % req.start.to_account () % connection->endpoint);
}
else if (connection->node->config.logging.network_logging () && connection->attempt->account_count++ % 256 == 0)
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Requesting account %1% from %2%") % req.start.to_account () % connection->endpoint);
}
auto this_l (shared_from_this ());
connection->start_timeout ();
boost::asio::async_write (connection->socket, boost::asio::buffer (buffer->data (), buffer->size ()), [this_l, buffer](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
if (!ec)
{
this_l->receive_block ();
}
else
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error sending bulk pull request %1% to %2%") % ec.message () % this_l->connection->endpoint);
}
});
}
void rai::bulk_pull_client::receive_block ()
{
auto this_l (shared_from_this ());
connection->start_timeout ();
boost::asio::async_read (connection->socket, boost::asio::buffer (connection->receive_buffer.data (), 1), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
if (!ec)
{
this_l->received_type ();
}
else
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error receiving block type %1%") % ec.message ());
}
});
}
void rai::bulk_pull_client::received_type ()
{
auto this_l (shared_from_this ());
rai::block_type type (static_cast<rai::block_type> (connection->receive_buffer[0]));
switch (type)
{
case rai::block_type::send:
{
connection->start_timeout ();
boost::asio::async_read (connection->socket, boost::asio::buffer (connection->receive_buffer.data () + 1, rai::send_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::receive:
{
connection->start_timeout ();
boost::asio::async_read (connection->socket, boost::asio::buffer (connection->receive_buffer.data () + 1, rai::receive_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::open:
{
connection->start_timeout ();
boost::asio::async_read (connection->socket, boost::asio::buffer (connection->receive_buffer.data () + 1, rai::open_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::change:
{
connection->start_timeout ();
boost::asio::async_read (connection->socket, boost::asio::buffer (connection->receive_buffer.data () + 1, rai::change_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::not_a_block:
{
connection->attempt->pool_connection (connection);
if (expected == pull.end)
{
pull = rai::pull_info ();
}
break;
}
default:
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unknown type received as block type: %1%") % static_cast<int> (type));
break;
}
}
}
void rai::bulk_pull_client::received_block (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
rai::bufferstream stream (connection->receive_buffer.data (), 1 + size_a);
std::shared_ptr<rai::block> block (rai::deserialize_block (stream));
if (block != nullptr)
{
auto hash (block->hash ());
if (connection->node->config.logging.bulk_pull_logging ())
{
std::string block_l;
block->serialize_json (block_l);
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Pulled block %1% %2%") % hash.to_string () % block_l);
}
if (hash == expected)
{
expected = block->previous ();
}
auto attempt_l (connection->attempt);
auto pull_l (pull);
attempt_l->node->block_processor.add (rai::block_processor_item (block, [attempt_l, pull_l](MDB_txn * transaction_a, rai::process_return result_a, std::shared_ptr<rai::block> block_a) {
switch (result_a.code)
{
case rai::process_result::progress:
case rai::process_result::old:
break;
case rai::process_result::fork:
{
auto node_l (attempt_l->node);
std::shared_ptr<rai::block> block (node_l->ledger.forked_block (transaction_a, *block_a));
if (!node_l->active.start (transaction_a, block))
{
node_l->network.broadcast_confirm_req (block_a);
node_l->network.broadcast_confirm_req (block);
auto hash (block_a->hash ());
attempt_l->requeue_pull (rai::pull_info (pull_l.account, hash, hash));
BOOST_LOG (node_l->log) << boost::str (boost::format ("While bootstrappping, fork between our block: %2% and block %1% both with root %3%") % block_a->hash ().to_string () % block->hash ().to_string () % block_a->root ().to_string ());
}
break;
}
default:
break;
}
}));
receive_block ();
}
else
{
BOOST_LOG (connection->node->log) << "Error deserializing block received from pull request";
}
}
else
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error bulk receiving block: %1%") % ec.message ());
}
}
rai::bulk_push_client::bulk_push_client (std::shared_ptr<rai::bootstrap_client> const & connection_a) :
connection (connection_a),
synchronization (*connection->node, [this](MDB_txn * transaction_a, rai::block const & block_a) {
push_block (block_a);
return rai::sync_result::success;
})
{
}
rai::bulk_push_client::~bulk_push_client ()
{
}
void rai::bulk_push_client::start ()
{
rai::bulk_push message;
auto buffer (std::make_shared<std::vector<uint8_t>> ());
{
rai::vectorstream stream (*buffer);
message.serialize (stream);
}
auto this_l (shared_from_this ());
connection->start_timeout ();
boost::asio::async_write (connection->socket, boost::asio::buffer (buffer->data (), buffer->size ()), [this_l, buffer](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
rai::transaction transaction (this_l->connection->node->store.environment, nullptr, true);
if (!ec)
{
this_l->push (transaction);
}
else
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Unable to send bulk_push request %1%") % ec.message ());
}
});
}
void rai::bulk_push_client::push (MDB_txn * transaction_a)
{
auto finished (false);
{
auto first (connection->node->store.unsynced_begin (transaction_a));
if (first != rai::store_iterator (nullptr))
{
rai::block_hash hash (first->first.uint256 ());
if (!hash.is_zero ())
{
connection->node->store.unsynced_del (transaction_a, hash);
synchronization.blocks.push_back (hash);
synchronization.synchronize_one (transaction_a);
}
else
{
finished = true;
}
}
else
{
finished = true;
}
}
if (finished)
{
send_finished ();
}
}
void rai::bulk_push_client::send_finished ()
{
auto buffer (std::make_shared<std::vector<uint8_t>> ());
buffer->push_back (static_cast<uint8_t> (rai::block_type::not_a_block));
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << "Bulk push finished";
}
auto this_l (shared_from_this ());
async_write (connection->socket, boost::asio::buffer (buffer->data (), 1), [this_l](boost::system::error_code const & ec, size_t size_a) {
try
{
this_l->promise.set_value (false);
}
catch (std::future_error &)
{
}
});
}
void rai::bulk_push_client::push_block (rai::block const & block_a)
{
auto buffer (std::make_shared<std::vector<uint8_t>> ());
{
rai::vectorstream stream (*buffer);
rai::serialize_block (stream, block_a);
}
auto this_l (shared_from_this ());
connection->start_timeout ();
boost::asio::async_write (connection->socket, boost::asio::buffer (buffer->data (), buffer->size ()), [this_l, buffer](boost::system::error_code const & ec, size_t size_a) {
this_l->connection->stop_timeout ();
if (!ec)
{
rai::transaction transaction (this_l->connection->node->store.environment, nullptr, true);
if (!this_l->synchronization.blocks.empty ())
{
this_l->synchronization.synchronize_one (transaction);
}
else
{
this_l->push (transaction);
}
}
else
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error sending block during bulk push %1%") % ec.message ());
}
});
}
rai::pull_info::pull_info () :
account (0),
end (0),
attempts (0)
{
}
rai::pull_info::pull_info (rai::account const & account_a, rai::block_hash const & head_a, rai::block_hash const & end_a) :
account (account_a),
head (head_a),
end (end_a),
attempts (0)
{
}
rai::bootstrap_attempt::bootstrap_attempt (std::shared_ptr<rai::node> node_a) :
connections (0),
pulling (0),
node (node_a),
account_count (0),
stopped (false)
{
BOOST_LOG (node->log) << "Starting bootstrap attempt";
node->bootstrap_initiator.notify_listeners (true);
}
rai::bootstrap_attempt::~bootstrap_attempt ()
{
BOOST_LOG (node->log) << "Exiting bootstrap attempt";
node->bootstrap_initiator.notify_listeners (false);
}
bool rai::bootstrap_attempt::request_frontier (std::unique_lock<std::mutex> & lock_a)
{
auto result (true);
auto connection_l (connection (lock_a));
if (connection_l)
{
std::future<bool> future;
{
auto client (std::make_shared<rai::frontier_req_client> (connection_l));
client->run ();
frontiers = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
result = consume_future (future);
lock_a.lock ();
if (result)
{
pulls.clear ();
}
if (node->config.logging.network_logging ())
{
if (!result)
{
BOOST_LOG (node->log) << boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % pulls.size () % connection_l->endpoint);
}
else
{
BOOST_LOG (node->log) << "frontier_req failed, reattempting";
}
}
}
return result;
}
void rai::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_a)
{
auto connection_l (connection (lock_a));
if (connection_l)
{
auto pull (pulls.front ());
pulls.pop_front ();
auto client (std::make_shared<rai::bulk_pull_client> (connection_l));
// The bulk_pull_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference
// Dispatch request in an external thread in case it needs to be destroyed
node->background ([client, pull]() {
client->request (pull);
});
}
}
bool rai::bootstrap_attempt::request_push (std::unique_lock<std::mutex> & lock_a)
{
auto result (true);
auto connection_l (connection (lock_a));
if (connection_l)
{
std::future<bool> future;
{
auto client (std::make_shared<rai::bulk_push_client> (connection_l));
client->start ();
push = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
result = consume_future (future);
lock_a.lock ();
if (node->config.logging.network_logging ())
{
BOOST_LOG (node->log) << "Exiting bulk push client";
if (result)
{
BOOST_LOG (node->log) << "Bulk push client failed";
}
}
}
return result;
}
bool rai::bootstrap_attempt::still_pulling ()
{
assert (!mutex.try_lock ());
auto running (!stopped);
auto more_pulls (!pulls.empty ());
auto still_pulling (pulling > 0);
return running && (more_pulls || still_pulling);
}
void rai::bootstrap_attempt::run ()
{
populate_connections ();
std::unique_lock<std::mutex> lock (mutex);
auto frontier_failure (true);
while (!stopped && frontier_failure)
{
frontier_failure = request_frontier (lock);
}
while (still_pulling ())
{
while (still_pulling ())
{
if (!pulls.empty ())
{
request_pull (lock);
}
else
{
condition.wait (lock);
}
}
// Flushing may resolve forks which can add more pulls
BOOST_LOG (node->log) << "Flushing unchecked blocks";
lock.unlock ();
node->block_processor.flush ();
lock.lock ();
BOOST_LOG (node->log) << "Finished flushing unchecked blocks";
}
if (!stopped)
{
BOOST_LOG (node->log) << "Completed pulls";
}
auto push_failure (true);
while (!stopped && push_failure)
{
push_failure = request_push (lock);
}
stopped = true;
condition.notify_all ();
idle.clear ();
}
std::shared_ptr<rai::bootstrap_client> rai::bootstrap_attempt::connection (std::unique_lock<std::mutex> & lock_a)
{
while (!stopped && idle.empty ())
{
condition.wait (lock_a);
}
std::shared_ptr<rai::bootstrap_client> result;
if (!idle.empty ())
{
result = idle.back ();
idle.pop_back ();
}
return result;
}
bool rai::bootstrap_attempt::consume_future (std::future<bool> & future_a)
{
bool result;
try
{
result = future_a.get ();
}
catch (std::future_error &)
{
result = true;
}
return result;
}
void rai::bootstrap_attempt::populate_connections ()
{
if (connections < node->config.bootstrap_connections)
{
auto peer (node->peers.bootstrap_peer ());
if (peer != rai::endpoint (boost::asio::ip::address_v6::any (), 0))
{
auto client (std::make_shared<rai::bootstrap_client> (node, shared_from_this (), rai::tcp_endpoint (peer.address (), peer.port ())));
client->run ();
std::lock_guard<std::mutex> lock (mutex);
clients.push_back (client);
}
else
{
BOOST_LOG (node->log) << boost::str (boost::format ("Bootstrap stopped because there are no peers"));
stopped = true;
condition.notify_all ();
}
}
if (!stopped)
{
std::weak_ptr<rai::bootstrap_attempt> this_w (shared_from_this ());
node->alarm.add (std::chrono::system_clock::now () + std::chrono::seconds (5), [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->populate_connections ();
}
});
}
}
void rai::bootstrap_attempt::add_connection (rai::endpoint const & endpoint_a)
{
auto client (std::make_shared<rai::bootstrap_client> (node, shared_from_this (), rai::tcp_endpoint (endpoint_a.address (), endpoint_a.port ())));
client->run ();
}
void rai::bootstrap_attempt::pool_connection (std::shared_ptr<rai::bootstrap_client> client_a)
{
std::lock_guard<std::mutex> lock (mutex);
idle.push_back (client_a);
condition.notify_all ();
}
void rai::bootstrap_attempt::stop ()
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
condition.notify_all ();
for (auto i : clients)
{
if (auto client = i.lock ())
{
client->socket.close ();
}
}
if (auto i = frontiers.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
if (auto i = push.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
}
void rai::bootstrap_attempt::requeue_pull (rai::pull_info const & pull_a)
{
auto pull (pull_a);
if (++pull.attempts < 4)
{
std::lock_guard<std::mutex> lock (mutex);
pulls.push_front (pull);
condition.notify_all ();
}
else
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Failed to pull account %1% down to %2% after %3% attempts") % pull.account.to_account () % pull.end.to_string () % pull.attempts);
}
}
}
rai::bootstrap_initiator::bootstrap_initiator (rai::node & node_a) :
node (node_a),
stopped (false),
thread ([this]() { run_bootstrap (); })
{
}
rai::bootstrap_initiator::~bootstrap_initiator ()
{
stop ();
thread.join ();
}
void rai::bootstrap_initiator::bootstrap ()
{
std::unique_lock<std::mutex> lock (mutex);
if (!stopped && attempt == nullptr)
{
attempt = std::make_shared<rai::bootstrap_attempt> (node.shared ());
condition.notify_all ();
}
}
void rai::bootstrap_initiator::bootstrap (rai::endpoint const & endpoint_a)
{
node.peers.insert (endpoint_a, 0x5);
std::unique_lock<std::mutex> lock (mutex);
if (!stopped)
{
while (attempt != nullptr)
{
attempt->stop ();
condition.wait (lock);
}
attempt = std::make_shared<rai::bootstrap_attempt> (node.shared ());
attempt->add_connection (endpoint_a);
condition.notify_all ();
}
}
void rai::bootstrap_initiator::run_bootstrap ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped)
{
if (attempt != nullptr)
{
lock.unlock ();
attempt->run ();
lock.lock ();
attempt = nullptr;
condition.notify_all ();
}
else
{
condition.wait (lock);
}
}
}
void rai::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a)
{
std::lock_guard<std::mutex> lock (mutex);
observers.push_back (observer_a);
}
bool rai::bootstrap_initiator::in_progress ()
{
std::lock_guard<std::mutex> lock (mutex);
return attempt != nullptr;
}
void rai::bootstrap_initiator::stop ()
{
std::unique_lock<std::mutex> lock (mutex);
stopped = true;
if (attempt != nullptr)
{
attempt->stop ();
}
condition.notify_all ();
}
void rai::bootstrap_initiator::notify_listeners (bool in_progress_a)
{
for (auto & i : observers)
{
i (in_progress_a);
}
}
rai::bootstrap_listener::bootstrap_listener (boost::asio::io_service & service_a, uint16_t port_a, rai::node & node_a) :
acceptor (service_a),
local (boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::any (), port_a)),
service (service_a),
node (node_a)
{
}
void rai::bootstrap_listener::start ()
{
acceptor.open (local.protocol ());
acceptor.set_option (boost::asio::ip::tcp::acceptor::reuse_address (true));
boost::system::error_code ec;
acceptor.bind (local, ec);
if (ec)
{
BOOST_LOG (node.log) << boost::str (boost::format ("Error while binding for bootstrap on port %1%: %2%") % local.port () % ec.message ());
throw std::runtime_error (ec.message ());
}
acceptor.listen ();
accept_connection ();
}
void rai::bootstrap_listener::stop ()
{
on = false;
std::lock_guard<std::mutex> lock (mutex);
acceptor.close ();
for (auto & i : connections)
{
auto connection (i.second.lock ());
if (connection)
{
connection->socket->close ();
}
}
}
void rai::bootstrap_listener::accept_connection ()
{
auto socket (std::make_shared<boost::asio::ip::tcp::socket> (service));
acceptor.async_accept (*socket, [this, socket](boost::system::error_code const & ec) {
accept_action (ec, socket);
});
}
void rai::bootstrap_listener::accept_action (boost::system::error_code const & ec, std::shared_ptr<boost::asio::ip::tcp::socket> socket_a)
{
if (!ec)
{
accept_connection ();
auto connection (std::make_shared<rai::bootstrap_server> (socket_a, node.shared ()));
{
std::lock_guard<std::mutex> lock (mutex);
if (acceptor.is_open ())
{
connections[connection.get ()] = connection;
connection->receive ();
}
}
}
else
{
BOOST_LOG (node.log) << boost::str (boost::format ("Error while accepting bootstrap connections: %1%") % ec.message ());
}
}
boost::asio::ip::tcp::endpoint rai::bootstrap_listener::endpoint ()
{
return boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::loopback (), local.port ());
}
rai::bootstrap_server::~bootstrap_server ()
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << "Exiting bootstrap server";
}
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this);
}
rai::bootstrap_server::bootstrap_server (std::shared_ptr<boost::asio::ip::tcp::socket> socket_a, std::shared_ptr<rai::node> node_a) :
socket (socket_a),
node (node_a)
{
}
void rai::bootstrap_server::receive ()
{
auto this_l (shared_from_this ());
boost::asio::async_read (*socket, boost::asio::buffer (receive_buffer.data (), 8), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_header_action (ec, size_a);
});
}
void rai::bootstrap_server::receive_header_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == 8);
rai::bufferstream type_stream (receive_buffer.data (), size_a);
uint8_t version_max;
uint8_t version_using;
uint8_t version_min;
rai::message_type type;
std::bitset<16> extensions;
if (!rai::message::read_header (type_stream, version_max, version_using, version_min, type, extensions))
{
switch (type)
{
case rai::message_type::bulk_pull:
{
auto this_l (shared_from_this ());
boost::asio::async_read (*socket, boost::asio::buffer (receive_buffer.data () + 8, sizeof (rai::uint256_union) + sizeof (rai::uint256_union)), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_bulk_pull_action (ec, size_a);
});
break;
}
case rai::message_type::frontier_req:
{
auto this_l (shared_from_this ());
boost::asio::async_read (*socket, boost::asio::buffer (receive_buffer.data () + 8, sizeof (rai::uint256_union) + sizeof (uint32_t) + sizeof (uint32_t)), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_frontier_req_action (ec, size_a);
});
break;
}
case rai::message_type::bulk_push:
{
add_request (std::unique_ptr<rai::message> (new rai::bulk_push));
break;
}
default:
{
if (node->config.logging.network_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received invalid type from bootstrap connection %1%") % static_cast<uint8_t> (type));
}
break;
}
}
}
}
else
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Error while receiving type %1%") % ec.message ());
}
}
}
void rai::bootstrap_server::receive_bulk_pull_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
std::unique_ptr<rai::bulk_pull> request (new rai::bulk_pull);
rai::bufferstream stream (receive_buffer.data (), 8 + sizeof (rai::uint256_union) + sizeof (rai::uint256_union));
auto error (request->deserialize (stream));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received bulk pull for %1% down to %2%") % request->start.to_string () % request->end.to_string ());
}
add_request (std::unique_ptr<rai::message> (request.release ()));
receive ();
}
}
}
void rai::bootstrap_server::receive_frontier_req_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
std::unique_ptr<rai::frontier_req> request (new rai::frontier_req);
rai::bufferstream stream (receive_buffer.data (), 8 + sizeof (rai::uint256_union) + sizeof (uint32_t) + sizeof (uint32_t));
auto error (request->deserialize (stream));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received frontier request for %1% with age %2%") % request->start.to_string () % request->age);
}
add_request (std::unique_ptr<rai::message> (request.release ()));
receive ();
}
}
else
{
if (node->config.logging.network_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Error sending receiving frontier request %1%") % ec.message ());
}
}
}
void rai::bootstrap_server::add_request (std::unique_ptr<rai::message> message_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto start (requests.empty ());
requests.push (std::move (message_a));
if (start)
{
run_next ();
}
}
void rai::bootstrap_server::finish_request ()
{
std::lock_guard<std::mutex> lock (mutex);
requests.pop ();
if (!requests.empty ())
{
run_next ();
}
}
namespace
{
class request_response_visitor : public rai::message_visitor
{
public:
request_response_visitor (std::shared_ptr<rai::bootstrap_server> connection_a) :
connection (connection_a)
{
}
void keepalive (rai::keepalive const &) override
{
assert (false);
}
void publish (rai::publish const &) override
{
assert (false);
}
void confirm_req (rai::confirm_req const &) override
{
assert (false);
}
void confirm_ack (rai::confirm_ack const &) override
{
assert (false);
}
void bulk_pull (rai::bulk_pull const &) override
{
auto response (std::make_shared<rai::bulk_pull_server> (connection, std::unique_ptr<rai::bulk_pull> (static_cast<rai::bulk_pull *> (connection->requests.front ().release ()))));
response->send_next ();
}
void bulk_push (rai::bulk_push const &) override
{
auto response (std::make_shared<rai::bulk_push_server> (connection));
response->receive ();
}
void frontier_req (rai::frontier_req const &) override
{
auto response (std::make_shared<rai::frontier_req_server> (connection, std::unique_ptr<rai::frontier_req> (static_cast<rai::frontier_req *> (connection->requests.front ().release ()))));
response->send_next ();
}
std::shared_ptr<rai::bootstrap_server> connection;
};
}
void rai::bootstrap_server::run_next ()
{
assert (!requests.empty ());
request_response_visitor visitor (shared_from_this ());
requests.front ()->visit (visitor);
}
void rai::bulk_pull_server::set_current_end ()
{
assert (request != nullptr);
rai::transaction transaction (connection->node->store.environment, nullptr, false);
if (!connection->node->store.block_exists (transaction, request->end))
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Bulk pull end block doesn't exist: %1%, sending everything") % request->end.to_string ());
}
request->end.clear ();
}
rai::account_info info;
auto no_address (connection->node->store.account_get (transaction, request->start, info));
if (no_address)
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Request for unknown account: %1%") % request->start.to_account ());
}
current = request->end;
}
else
{
if (!request->end.is_zero ())
{
auto account (connection->node->ledger.account (transaction, request->end));
if (account == request->start)
{
current = info.head;
}
else
{
current = request->end;
}
}
else
{
current = info.head;
}
}
}
void rai::bulk_pull_server::send_next ()
{
std::unique_ptr<rai::block> block (get_next ());
if (block != nullptr)
{
{
send_buffer.clear ();
rai::vectorstream stream (send_buffer);
rai::serialize_block (stream, *block);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending block: %1%") % block->hash ().to_string ());
}
async_write (*connection->socket, boost::asio::buffer (send_buffer.data (), send_buffer.size ()), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
send_finished ();
}
}
std::unique_ptr<rai::block> rai::bulk_pull_server::get_next ()
{
std::unique_ptr<rai::block> result;
if (current != request->end)
{
rai::transaction transaction (connection->node->store.environment, nullptr, false);
result = connection->node->store.block_get (transaction, current);
if (result != nullptr)
{
auto previous (result->previous ());
if (!previous.is_zero ())
{
current = previous;
}
else
{
current = request->end;
}
}
else
{
current = request->end;
}
}
return result;
}
void rai::bulk_pull_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
send_next ();
}
else
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ());
}
}
void rai::bulk_pull_server::send_finished ()
{
send_buffer.clear ();
send_buffer.push_back (static_cast<uint8_t> (rai::block_type::not_a_block));
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Bulk sending finished";
}
async_write (*connection->socket, boost::asio::buffer (send_buffer.data (), 1), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->no_block_sent (ec, size_a);
});
}
void rai::bulk_pull_server::no_block_sent (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == 1);
connection->finish_request ();
}
else
{
BOOST_LOG (connection->node->log) << "Unable to send not-a-block";
}
}
rai::bulk_pull_server::bulk_pull_server (std::shared_ptr<rai::bootstrap_server> const & connection_a, std::unique_ptr<rai::bulk_pull> request_a) :
connection (connection_a),
request (std::move (request_a))
{
set_current_end ();
}
rai::bulk_push_server::bulk_push_server (std::shared_ptr<rai::bootstrap_server> const & connection_a) :
connection (connection_a)
{
}
void rai::bulk_push_server::receive ()
{
auto this_l (shared_from_this ());
boost::asio::async_read (*connection->socket, boost::asio::buffer (receive_buffer.data (), 1), [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->received_type ();
}
else
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error receiving block type %1%") % ec.message ());
}
});
}
void rai::bulk_push_server::received_type ()
{
auto this_l (shared_from_this ());
rai::block_type type (static_cast<rai::block_type> (receive_buffer[0]));
switch (type)
{
case rai::block_type::send:
{
boost::asio::async_read (*connection->socket, boost::asio::buffer (receive_buffer.data () + 1, rai::send_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::receive:
{
boost::asio::async_read (*connection->socket, boost::asio::buffer (receive_buffer.data () + 1, rai::receive_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::open:
{
boost::asio::async_read (*connection->socket, boost::asio::buffer (receive_buffer.data () + 1, rai::open_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::change:
{
boost::asio::async_read (*connection->socket, boost::asio::buffer (receive_buffer.data () + 1, rai::change_block::size), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a);
});
break;
}
case rai::block_type::not_a_block:
{
connection->finish_request ();
break;
}
default:
{
BOOST_LOG (connection->node->log) << "Unknown type received as block type";
break;
}
}
}
void rai::bulk_push_server::received_block (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
rai::bufferstream stream (receive_buffer.data (), 1 + size_a);
auto block (rai::deserialize_block (stream));
if (block != nullptr)
{
if (!connection->node->bootstrap_initiator.in_progress ())
{
connection->node->process_active (std::move (block));
}
receive ();
}
else
{
BOOST_LOG (connection->node->log) << "Error deserializing block received from pull request";
}
}
}
rai::frontier_req_server::frontier_req_server (std::shared_ptr<rai::bootstrap_server> const & connection_a, std::unique_ptr<rai::frontier_req> request_a) :
connection (connection_a),
current (request_a->start.number () - 1),
info (0, 0, 0, 0, 0, 0),
request (std::move (request_a))
{
next ();
skip_old ();
}
void rai::frontier_req_server::skip_old ()
{
if (request->age != std::numeric_limits<decltype (request->age)>::max ())
{
auto now (connection->node->store.now ());
while (!current.is_zero () && (now - info.modified) >= request->age)
{
next ();
}
}
}
void rai::frontier_req_server::send_next ()
{
if (!current.is_zero ())
{
{
send_buffer.clear ();
rai::vectorstream stream (send_buffer);
write (stream, current.bytes);
write (stream, info.head.bytes);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending frontier for %1% %2%") % current.to_account () % info.head.to_string ());
}
next ();
async_write (*connection->socket, boost::asio::buffer (send_buffer.data (), send_buffer.size ()), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
send_finished ();
}
}
void rai::frontier_req_server::send_finished ()
{
{
send_buffer.clear ();
rai::vectorstream stream (send_buffer);
rai::uint256_union zero (0);
write (stream, zero.bytes);
write (stream, zero.bytes);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << "Frontier sending finished";
}
async_write (*connection->socket, boost::asio::buffer (send_buffer.data (), send_buffer.size ()), [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->no_block_sent (ec, size_a);
});
}
void rai::frontier_req_server::no_block_sent (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
connection->finish_request ();
}
else
{
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error sending frontier finish %1%") % ec.message ());
}
}
}
void rai::frontier_req_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
send_next ();
}
else
{
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error sending frontier pair %1%") % ec.message ());
}
}
}
void rai::frontier_req_server::next ()
{
rai::transaction transaction (connection->node->store.environment, nullptr, false);
auto iterator (connection->node->store.latest_begin (transaction, current.number () + 1));
if (iterator != connection->node->store.latest_end ())
{
current = rai::uint256_union (iterator->first.uint256 ());
info = rai::account_info (iterator->second);
}
else
{
current.clear ();
}
}
| 1 | 13,168 | What does the `8` here represent? Can you give it a name? | nanocurrency-nano-node | cpp |
@@ -0,0 +1,12 @@
+from pymatgen.ext.matproj import MPRester, TaskType
+import os
+material_ids = ["mp-32800", "mp-23494"]
+task_types = [TaskType.GGA_OPT, TaskType.GGA_UNIFORM]
+file_patterns = ["vasprun*", "OUTCAR*"]
+with MPRester(os.environ["MP_API_KEY"]) as mpr:
+ meta, urls = mpr.get_download_info(
+ material_ids, task_types=task_types, file_patterns=file_patterns
+ )
+
+print(meta)
+print(urls) | 1 | 1 | 19,257 | `MPRester(os.environ["MP_API_KEY"])` can be left simply as `MPRester()` and it will pick up the API key from the environment. What is the purpose of this file otherwise? Is it better suited for `dev_scripts`, or a test? | materialsproject-pymatgen | py |
|
@@ -11,7 +11,7 @@
#
# It's strongly recommended to check this file into your version control system.
-ActiveRecord::Schema.define(:version => 20121114193521) do
+ActiveRecord::Schema.define(:version => 20121212214215) do
create_table "announcements", :force => true do |t|
t.datetime "created_at", :null => false | 1 | # encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended to check this file into your version control system.
ActiveRecord::Schema.define(:version => 20121114193521) do
create_table "announcements", :force => true do |t|
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
t.integer "announceable_id", :null => false
t.string "announceable_type", :null => false
t.text "message", :null => false
t.datetime "ends_at", :null => false
end
add_index "announcements", ["announceable_id", "announceable_type", "ends_at"], :name => "index_announcements_on_announceable_and_ends_at"
create_table "articles", :force => true do |t|
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
t.string "title", :null => false
t.text "body_html", :null => false
t.string "tumblr_url", :null => false
t.integer "author_id"
t.date "published_on", :null => false
end
add_index "articles", ["author_id"], :name => "index_articles_on_author_id"
create_table "articles_topics", :id => false, :force => true do |t|
t.integer "article_id", :null => false
t.integer "topic_id", :null => false
end
add_index "articles_topics", ["article_id", "topic_id"], :name => "index_articles_topics_on_article_id_and_topic_id", :unique => true
create_table "audiences", :force => true do |t|
t.string "name"
t.integer "position"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "authors", :force => true do |t|
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
t.string "tumblr_user_name", :null => false
t.string "first_name"
t.string "last_name"
t.string "email"
end
add_index "authors", ["tumblr_user_name"], :name => "index_authors_on_tumblr_user_name", :unique => true
create_table "classifications", :force => true do |t|
t.integer "topic_id"
t.string "classifiable_type"
t.integer "classifiable_id"
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
end
create_table "coupons", :force => true do |t|
t.string "code"
t.integer "amount"
t.datetime "created_at"
t.datetime "updated_at"
t.boolean "active", :default => true, :null => false
t.string "discount_type", :default => "percentage", :null => false
t.boolean "one_time_use_only", :default => false, :null => false
end
add_index "coupons", ["code"], :name => "index_coupons_on_code"
create_table "courses", :force => true do |t|
t.string "name", :null => false
t.integer "price"
t.text "description"
t.time "start_at"
t.time "stop_at"
t.integer "maximum_students", :default => 12, :null => false
t.boolean "public", :default => true, :null => false
t.datetime "created_at"
t.datetime "updated_at"
t.string "short_description"
t.string "external_registration_url"
t.integer "position"
t.integer "audience_id"
t.string "course_image_file_name"
t.string "course_image_file_size"
t.string "course_image_content_type"
t.string "course_image_updated_at"
t.string "promo_location"
end
add_index "courses", ["audience_id"], :name => "index_courses_on_audience_id"
create_table "downloads", :force => true do |t|
t.integer "product_id"
t.string "download_file_name"
t.string "download_file_size"
t.string "download_content_type"
t.string "download_updated_at"
t.string "description"
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
end
create_table "episodes", :force => true do |t|
t.string "title"
t.string "old_url"
t.string "file"
t.text "description"
t.text "notes"
t.date "published_on"
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
t.integer "file_size"
t.integer "duration"
end
create_table "follow_ups", :force => true do |t|
t.string "email"
t.integer "course_id"
t.datetime "created_at"
t.datetime "updated_at"
t.datetime "notified_at"
end
add_index "follow_ups", ["course_id"], :name => "index_follow_ups_on_course_id"
create_table "products", :force => true do |t|
t.string "name"
t.string "sku"
t.string "tagline"
t.string "call_to_action"
t.string "short_description"
t.text "description"
t.integer "individual_price"
t.integer "company_price"
t.string "product_type"
t.boolean "active", :default => true, :null => false
t.datetime "created_at"
t.datetime "updated_at"
t.string "fulfillment_method"
t.integer "github_team"
t.string "github_url"
t.text "questions"
t.text "terms"
t.text "alternative_description"
t.string "product_image_file_name"
t.string "product_image_file_size"
t.string "product_image_content_type"
t.string "product_image_updated_at"
t.text "external_purchase_url"
t.string "external_purchase_name"
t.string "external_purchase_description"
t.string "promo_location"
end
create_table "purchases", :force => true do |t|
t.integer "product_id"
t.string "stripe_customer"
t.string "variant"
t.string "name"
t.string "email"
t.string "organization"
t.string "address1"
t.string "address2"
t.string "city"
t.string "state"
t.string "zip_code"
t.datetime "created_at"
t.datetime "updated_at"
t.string "lookup"
t.integer "coupon_id"
t.text "readers"
t.boolean "paid", :default => false, :null => false
t.string "payment_method", :default => "stripe", :null => false
t.string "country"
t.string "payment_transaction_id"
t.integer "user_id"
t.integer "paid_price"
end
add_index "purchases", ["lookup"], :name => "index_purchases_on_lookup"
add_index "purchases", ["product_id"], :name => "index_purchases_on_product_id"
add_index "purchases", ["stripe_customer"], :name => "index_purchases_on_stripe_customer"
create_table "questions", :force => true do |t|
t.integer "course_id"
t.string "question"
t.text "answer"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "questions", ["course_id"], :name => "index_questions_on_course_id"
create_table "rails_admin_histories", :force => true do |t|
t.text "message"
t.string "username"
t.integer "item"
t.string "table"
t.integer "month", :limit => 2
t.integer "year", :limit => 8
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
end
add_index "rails_admin_histories", ["item", "table", "month", "year"], :name => "index_rails_admin_histories"
create_table "registrations", :force => true do |t|
t.integer "section_id"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "freshbooks_invoice_id"
t.string "freshbooks_invoice_url"
t.integer "coupon_id"
t.string "email"
t.string "billing_email"
t.string "first_name"
t.string "last_name"
t.string "organization"
t.string "phone"
t.string "address1"
t.string "address2"
t.string "city"
t.string "state"
t.string "zip_code"
t.string "freshbooks_client_id"
t.text "comments"
t.boolean "paid", :default => false, :null => false
t.integer "user_id"
end
add_index "registrations", ["paid"], :name => "index_registrations_on_paid"
add_index "registrations", ["section_id"], :name => "index_registrations_on_section_id"
create_table "resources", :force => true do |t|
t.integer "course_id"
t.string "name"
t.string "url"
end
add_index "resources", ["course_id"], :name => "index_resources_on_course_id"
create_table "section_teachers", :force => true do |t|
t.integer "section_id"
t.integer "teacher_id"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "section_teachers", ["section_id", "teacher_id"], :name => "index_section_teachers_on_section_id_and_teacher_id", :unique => true
create_table "sections", :force => true do |t|
t.integer "course_id"
t.date "starts_on"
t.date "ends_on"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "seats_available"
t.time "start_at"
t.time "stop_at"
t.string "address"
t.string "city"
t.string "state"
t.string "zip"
t.text "reminder_email"
end
add_index "sections", ["course_id"], :name => "index_sections_on_course_id"
create_table "teachers", :force => true do |t|
t.string "name"
t.string "gravatar_hash"
t.text "bio"
t.datetime "created_at"
t.datetime "updated_at"
t.string "email"
end
create_table "topics", :force => true do |t|
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
t.string "keywords"
t.string "name", :null => false
t.string "slug", :null => false
t.text "summary"
t.integer "count"
t.boolean "featured", :default => false, :null => false
t.text "trail_map"
end
add_index "topics", ["slug"], :name => "index_topics_on_slug", :unique => true
create_table "users", :force => true do |t|
t.string "email"
t.string "encrypted_password", :limit => 128
t.string "salt", :limit => 128
t.string "confirmation_token", :limit => 128
t.string "remember_token", :limit => 128
t.boolean "email_confirmed", :default => true, :null => false
t.datetime "created_at"
t.datetime "updated_at"
t.string "customer_id"
t.string "first_name"
t.string "last_name"
t.string "reference"
t.boolean "admin", :default => false, :null => false
t.string "stripe_customer"
t.string "github_username"
t.string "auth_provider"
t.integer "auth_uid"
end
add_index "users", ["admin"], :name => "index_users_on_admin"
add_index "users", ["email"], :name => "index_users_on_email"
add_index "users", ["id", "confirmation_token"], :name => "index_users_on_id_and_confirmation_token"
add_index "users", ["remember_token"], :name => "index_users_on_remember_token"
create_table "videos", :force => true do |t|
t.integer "product_id"
t.string "wistia_id"
t.string "title"
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
end
end
| 1 | 6,654 | Looks like there may be more changes to this file than intended? | thoughtbot-upcase | rb |
@@ -117,7 +117,7 @@ func (eb *eventbus) pubCloudMsgToEdge() {
body, ok := accessInfo.GetContent().(map[string]interface{})
if !ok {
klog.Errorf("Message is not map type")
- return
+ continue
}
message := body["message"].(map[string]interface{})
topic := message["topic"].(string) | 1 | package eventbus
import (
"encoding/json"
"fmt"
"os"
"github.com/astaxie/beego/orm"
"k8s.io/klog/v2"
"github.com/kubeedge/beehive/pkg/core"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
messagepkg "github.com/kubeedge/kubeedge/edge/pkg/common/message"
"github.com/kubeedge/kubeedge/edge/pkg/common/modules"
"github.com/kubeedge/kubeedge/edge/pkg/eventbus/common/util"
eventconfig "github.com/kubeedge/kubeedge/edge/pkg/eventbus/config"
"github.com/kubeedge/kubeedge/edge/pkg/eventbus/dao"
mqttBus "github.com/kubeedge/kubeedge/edge/pkg/eventbus/mqtt"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1"
)
var mqttServer *mqttBus.Server
// eventbus struct
type eventbus struct {
enable bool
}
func newEventbus(enable bool) *eventbus {
return &eventbus{
enable: enable,
}
}
// Register register eventbus
func Register(eventbus *v1alpha1.EventBus, nodeName string) {
eventconfig.InitConfigure(eventbus, nodeName)
core.Register(newEventbus(eventbus.Enable))
orm.RegisterModel(new(dao.SubTopics))
}
func (*eventbus) Name() string {
return modules.EventBusModuleName
}
func (*eventbus) Group() string {
return modules.BusGroup
}
// Enable indicates whether this module is enabled
func (eb *eventbus) Enable() bool {
return eb.enable
}
func (eb *eventbus) Start() {
if eventconfig.Config.MqttMode >= v1alpha1.MqttModeBoth {
hub := &mqttBus.Client{
MQTTUrl: eventconfig.Config.MqttServerExternal,
}
mqttBus.MQTTHub = hub
hub.InitSubClient()
hub.InitPubClient()
klog.Infof("Init Sub And Pub Client for externel mqtt broker %v successfully", eventconfig.Config.MqttServerExternal)
}
if eventconfig.Config.MqttMode <= v1alpha1.MqttModeBoth {
// launch an internal mqtt server only
mqttServer = mqttBus.NewMqttServer(
int(eventconfig.Config.MqttSessionQueueSize),
eventconfig.Config.MqttServerInternal,
eventconfig.Config.MqttRetain,
int(eventconfig.Config.MqttQOS))
mqttServer.InitInternalTopics()
err := mqttServer.Run()
if err != nil {
klog.Errorf("Launch internel mqtt broker failed, %s", err.Error())
os.Exit(1)
}
klog.Infof("Launch internel mqtt broker %v successfully", eventconfig.Config.MqttServerInternal)
}
eb.pubCloudMsgToEdge()
}
func pubMQTT(topic string, payload []byte) {
token := mqttBus.MQTTHub.PubCli.Publish(topic, 1, false, payload)
if token.WaitTimeout(util.TokenWaitTime) && token.Error() != nil {
klog.Errorf("Error in pubMQTT with topic: %s, %v", topic, token.Error())
} else {
klog.Infof("Success in pubMQTT with topic: %s", topic)
}
}
func (eb *eventbus) pubCloudMsgToEdge() {
for {
select {
case <-beehiveContext.Done():
klog.Warning("EventBus PubCloudMsg To Edge stop")
return
default:
}
accessInfo, err := beehiveContext.Receive(eb.Name())
if err != nil {
klog.Errorf("Fail to get a message from channel: %v", err)
continue
}
operation := accessInfo.GetOperation()
resource := accessInfo.GetResource()
switch operation {
case messagepkg.OperationSubscribe:
eb.subscribe(resource)
klog.Infof("Edge-hub-cli subscribe topic to %s", resource)
case messagepkg.OperationUnsubscribe:
eb.unsubscribe(resource)
klog.Infof("Edge-hub-cli unsubscribe topic to %s", resource)
case messagepkg.OperationMessage:
body, ok := accessInfo.GetContent().(map[string]interface{})
if !ok {
klog.Errorf("Message is not map type")
return
}
message := body["message"].(map[string]interface{})
topic := message["topic"].(string)
payload, _ := json.Marshal(&message)
eb.publish(topic, payload)
case messagepkg.OperationPublish:
topic := resource
var ok bool
// cloud and edge will send different type of content, need to check
payload, ok := accessInfo.GetContent().([]byte)
if !ok {
content := accessInfo.GetContent().(string)
payload = []byte(content)
}
eb.publish(topic, payload)
case messagepkg.OperationGetResult:
if resource != "auth_info" {
klog.Info("Skip none auth_info get_result message")
return
}
topic := fmt.Sprintf("$hw/events/node/%s/authInfo/get/result", eventconfig.Config.NodeName)
payload, _ := json.Marshal(accessInfo.GetContent())
eb.publish(topic, payload)
default:
klog.Warningf("Action not found")
}
}
}
func (eb *eventbus) publish(topic string, payload []byte) {
if eventconfig.Config.MqttMode >= v1alpha1.MqttModeBoth {
// pub msg to external mqtt broker.
pubMQTT(topic, payload)
}
if eventconfig.Config.MqttMode <= v1alpha1.MqttModeBoth {
// pub msg to internal mqtt broker.
mqttServer.Publish(topic, payload)
}
}
func (eb *eventbus) subscribe(topic string) {
if eventconfig.Config.MqttMode <= v1alpha1.MqttModeBoth {
// set topic to internal mqtt broker.
mqttServer.SetTopic(topic)
}
if eventconfig.Config.MqttMode >= v1alpha1.MqttModeBoth {
// subscribe topic to external mqtt broker.
token := mqttBus.MQTTHub.SubCli.Subscribe(topic, 1, mqttBus.OnSubMessageReceived)
if rs, err := util.CheckClientToken(token); !rs {
klog.Errorf("Edge-hub-cli subscribe topic: %s, %v", topic, err)
return
}
}
err := dao.InsertTopics(topic)
if err != nil {
klog.Errorf("Insert topic %s failed, %v", topic, err)
}
}
func (eb *eventbus) unsubscribe(topic string) {
if eventconfig.Config.MqttMode <= v1alpha1.MqttModeBoth {
mqttServer.RemoveTopic(topic)
}
if eventconfig.Config.MqttMode >= v1alpha1.MqttModeBoth {
token := mqttBus.MQTTHub.SubCli.Unsubscribe(topic)
if rs, err := util.CheckClientToken(token); !rs {
klog.Errorf("Edge-hub-cli unsubscribe topic: %s, %v", topic, err)
return
}
}
err := dao.DeleteTopicsByKey(topic)
if err != nil {
klog.Errorf("Delete topic %s failed, %v", topic, err)
}
}
| 1 | 20,950 | when message type is not expected, contine to next loop instead of return to finish infinite loop | kubeedge-kubeedge | go |
@@ -2,6 +2,13 @@ package cmd
import (
"fmt"
+ "os"
+ osexec "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/exec"
"github.com/drud/ddev/pkg/fileutil" | 1 | package cmd
import (
"fmt"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/exec"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/nodeps"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/ddev/pkg/util"
asrt "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"os"
osexec "os/exec"
"path/filepath"
"strings"
"testing"
"time"
)
// TestCustomCommands does basic checks to make sure custom commands work OK.
func TestCustomCommands(t *testing.T) {
assert := asrt.New(t)
runTime := util.TimeTrack(time.Now(), "ddev list")
tmpHome := testcommon.CreateTmpDir(t.Name() + "tempHome")
origHome := os.Getenv("HOME")
origDebug := os.Getenv("DDEV_DEBUG")
// Change the homedir temporarily
err := os.Setenv("HOME", tmpHome)
require.NoError(t, err)
_ = os.Setenv("DDEV_DEBUG", "")
pwd, _ := os.Getwd()
testCustomCommandsDir := filepath.Join(pwd, "testdata", t.Name())
site := TestSites[0]
switchDir := TestSites[0].Chdir()
app, _ := ddevapp.NewApp(TestSites[0].Dir, false, "")
origType := app.Type
t.Cleanup(func() {
runTime()
app.Type = origType
_ = app.WriteConfig()
_ = os.RemoveAll(tmpHome)
_ = os.Setenv("HOME", origHome)
_ = os.Setenv("DDEV_DEBUG", origDebug)
_ = fileutil.PurgeDirectory(filepath.Join(site.Dir, ".ddev", "commands"))
_ = fileutil.PurgeDirectory(filepath.Join(site.Dir, ".ddev", ".global_commands"))
switchDir()
})
err = app.Start()
require.NoError(t, err)
// We can't use the standard getGlobalDDevDir here because *our* global hasn't changed.
// It's changed via $HOME for the ddev subprocess
err = os.MkdirAll(filepath.Join(tmpHome, ".ddev"), 0755)
assert.NoError(err)
tmpHomeGlobalCommandsDir := filepath.Join(tmpHome, ".ddev", "commands")
err = os.RemoveAll(tmpHomeGlobalCommandsDir)
assert.NoError(err)
projectCommandsDir := app.GetConfigPath("commands")
globalCommandsDir := app.GetConfigPath(".global_commands")
_ = os.RemoveAll(globalCommandsDir)
err = fileutil.CopyDir(filepath.Join(testCustomCommandsDir, "global_commands"), tmpHomeGlobalCommandsDir)
require.NoError(t, err)
assert.FileExists(filepath.Join(projectCommandsDir, "db", "mysql"))
assert.FileExists(filepath.Join(projectCommandsDir, "host", "mysqlworkbench.example"))
out, err := exec.RunCommand(DdevBin, []string{})
assert.NoError(err)
assert.Contains(out, "mysql client in db container")
// Test the `ddev mysql` command with stdin
inputFile := filepath.Join(testCustomCommandsDir, "select99.sql")
f, err := os.Open(inputFile)
require.NoError(t, err)
// nolint: errcheck
defer f.Close()
command := osexec.Command(DdevBin, "mysql")
command.Stdin = f
byteOut, err := command.CombinedOutput()
require.NoError(t, err, "Failed ddev mysql; output=%v", string(byteOut))
assert.Contains(string(byteOut), "99\n99\n")
_ = os.RemoveAll(projectCommandsDir)
_ = os.RemoveAll(globalCommandsDir)
// Now copy a project commands and global commands and make sure they show up and execute properly
err = fileutil.CopyDir(filepath.Join(testCustomCommandsDir, "project_commands"), projectCommandsDir)
assert.NoError(err)
out, err = exec.RunCommand(DdevBin, []string{})
assert.NoError(err)
assert.Contains(out, "testhostcmd project (shell host container command)")
assert.Contains(out, "testwebcmd project (shell web container command)")
assert.Contains(out, "testhostglobal global (global shell host container command)")
assert.Contains(out, "testwebglobal global (global shell web container command)")
assert.NotContains(out, "testhostcmd global") //the global testhostcmd should have been overridden by the projct one
assert.NotContains(out, "testwebcmd global") //the global testwebcmd should have been overridden by the projct one
for _, c := range []string{"testhostcmd", "testhostglobal", "testwebcmd", "testwebglobal"} {
args := []string{c, "hostarg1", "hostarg2", "--hostflag1"}
out, err = exec.RunCommand(DdevBin, args)
assert.NoError(err, "Failed to run ddev %s %v", c, args)
expectedHost, _ := os.Hostname()
if !strings.Contains(c, "host") {
expectedHost = site.Name + "-web"
}
assert.Contains(out, fmt.Sprintf("%s was executed with args=hostarg1 hostarg2 --hostflag1 on host %s", c, expectedHost))
}
app.Type = nodeps.AppTypePHP
err = app.WriteConfig()
assert.NoError(err)
// Make sure that all the official ddev-provided custom commands are usable by just checking help
for _, c := range []string{"launch", "live", "mysql", "xdebug"} {
_, err = exec.RunCommand(DdevBin, []string{c, "-h"})
assert.NoError(err, "Failed to run ddev %s -h", c)
}
// The various CMS commands should not be available here
for _, c := range []string{"artisan", "drush", "magento", "typo3", "typo3cms", "wp"} {
_, err = exec.RunCommand(DdevBin, []string{c, "-h"})
assert.Error(err, "found command %s when it should not have been there (no error) app.Type=%s", c, app.Type)
}
// TYPO3 commands should only be available for type typo3
app.Type = nodeps.AppTypeTYPO3
_ = app.WriteConfig()
_, _ = exec.RunCommand(DdevBin, nil)
for _, c := range []string{"typo3", "typo3cms"} {
_, err = exec.RunCommand(DdevBin, []string{c, "-h"})
assert.NoError(err)
}
// Drupal types should only be available for type drupal*
app.Type = nodeps.AppTypeDrupal9
_ = app.WriteConfig()
_, _ = exec.RunCommand(DdevBin, nil)
for _, c := range []string{"drush"} {
_, err = exec.RunCommand(DdevBin, []string{c, "-h"})
assert.NoError(err)
}
// Laravel types should only be available for type laravel
app.Type = nodeps.AppTypeLaravel
_ = app.WriteConfig()
_, _ = exec.RunCommand(DdevBin, nil)
for _, c := range []string{"artisan"} {
_, err = exec.RunCommand(DdevBin, []string{c, "-h"})
assert.NoError(err)
}
// Wordpress types should only be available for type drupal*
app.Type = nodeps.AppTypeWordPress
_ = app.WriteConfig()
_, _ = exec.RunCommand(DdevBin, nil)
for _, c := range []string{"wp"} {
_, err = exec.RunCommand(DdevBin, []string{c, "-h"})
assert.NoError(err, "expected to find command %s for app.Type=%s", c, app.Type)
}
// Make sure that the non-command stuff we installed is there
for _, f := range []string{"db/mysqldump.example", "db/README.txt", "web/README.txt", "host/README.txt", "host/phpstorm.example"} {
assert.FileExists(filepath.Join(projectCommandsDir, f))
assert.FileExists(filepath.Join(globalCommandsDir, f))
}
// Make sure we haven't accidentally created anything inappropriate in ~/.ddev
assert.False(fileutil.FileExists(filepath.Join(tmpHome, ".ddev", ".globalcommands")))
assert.False(fileutil.FileExists(filepath.Join(origHome, ".ddev", ".globalcommands")))
}
// TestLaunchCommand tests that the launch command behaves all the ways it should behave
func TestLaunchCommand(t *testing.T) {
assert := asrt.New(t)
pwd, _ := os.Getwd()
// Create a temporary directory and switch to it.
tmpdir := testcommon.CreateTmpDir(t.Name())
err := os.Chdir(tmpdir)
assert.NoError(err)
_ = os.Setenv("DDEV_DEBUG", "true")
app, err := ddevapp.NewApp(tmpdir, false, "")
require.NoError(t, err)
err = app.WriteConfig()
require.NoError(t, err)
t.Cleanup(func() {
err = app.Stop(true, false)
assert.NoError(err)
err = os.Chdir(pwd)
assert.NoError(err)
_ = os.RemoveAll(tmpdir)
})
// This only tests the https port changes, but that might be enough
app.RouterHTTPSPort = "8443"
_ = app.WriteConfig()
err = app.Start()
require.NoError(t, err)
desc, err := app.Describe(false)
require.NoError(t, err)
cases := map[string]string{
"": app.GetPrimaryURL(),
"-p": desc["phpmyadmin_https_url"].(string),
"-m": desc["mailhog_https_url"].(string),
}
for partialCommand, expect := range cases {
// Try with the base URL, simplest case
c := DdevBin + ` launch ` + partialCommand + ` | awk '/FULLURL/ {print $2}'`
out, err := exec.RunCommand("bash", []string{"-c", c})
out = strings.Trim(out, "\n")
assert.NoError(err, `couldn't run "%s"", output=%s`, c, out)
assert.Contains(out, expect, "ouptput of %s is incorrect with app.RouterHTTPSPort=%s: %s", c, app.RouterHTTPSPort, out)
}
}
// TestMysqlCommand tests `ddev mysql``
func TestMysqlCommand(t *testing.T) {
assert := asrt.New(t)
// Create a temporary directory and switch to it.
tmpdir := testcommon.CreateTmpDir(t.Name())
defer testcommon.CleanupDir(tmpdir)
defer testcommon.Chdir(tmpdir)()
app, err := ddevapp.NewApp(tmpdir, false, "")
require.NoError(t, err)
err = app.WriteConfig()
require.NoError(t, err)
err = app.Start()
require.NoError(t, err)
defer func() {
_ = app.Stop(true, false)
}()
// Test ddev mysql -uroot -proot mysql
command := osexec.Command("bash", "-c", "echo 'SHOW TABLES;' | "+DdevBin+" mysql --user=root --password=root --database=mysql")
byteOut, err := command.CombinedOutput()
assert.NoError(err, "byteOut=%v", string(byteOut))
assert.Contains(string(byteOut), `Tables_in_mysql
column_stats
columns_priv`)
}
| 1 | 14,675 | This change was not really intended but made by the linter of VS Code. And looking at other packages this looks like a best practise to place interal packages on the top and gh imports afterwards. | drud-ddev | php |
@@ -84,6 +84,10 @@ func (r *AWSMachine) ValidateUpdate(old runtime.Object) error {
delete(oldAWSMachineSpec, "providerID")
delete(newAWSMachineSpec, "providerID")
+ // allow changes to instanceID
+ delete(oldAWSMachineSpec, "instanceID")
+ delete(newAWSMachineSpec, "instanceID")
+
// allow changes to additionalTags
delete(oldAWSMachineSpec, "additionalTags")
delete(newAWSMachineSpec, "additionalTags") | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
"reflect"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var _ = logf.Log.WithName("awsmachine-resource")
func (r *AWSMachine) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha3-awsmachine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1alpha3,name=validation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None
// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1alpha3-awsmachine,mutating=true,failurePolicy=fail,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1alpha3,name=mawsmachine.kb.io,name=mutation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None
var (
_ webhook.Validator = &AWSMachine{}
_ webhook.Defaulter = &AWSMachine{}
)
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *AWSMachine) ValidateCreate() error {
var allErrs field.ErrorList
allErrs = append(allErrs, r.validateCloudInitSecret()...)
allErrs = append(allErrs, r.validateRootVolume()...)
allErrs = append(allErrs, r.validateNonRootVolumes()...)
allErrs = append(allErrs, isValidSSHKey(r.Spec.SSHKeyName)...)
allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...)
return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *AWSMachine) ValidateUpdate(old runtime.Object) error {
newAWSMachine, err := runtime.DefaultUnstructuredConverter.ToUnstructured(r)
if err != nil {
return apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{
field.InternalError(nil, errors.Wrap(err, "failed to convert new AWSMachine to unstructured object")),
})
}
oldAWSMachine, err := runtime.DefaultUnstructuredConverter.ToUnstructured(old)
if err != nil {
return apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{
field.InternalError(nil, errors.Wrap(err, "failed to convert old AWSMachine to unstructured object")),
})
}
var allErrs field.ErrorList
allErrs = append(allErrs, r.validateCloudInitSecret()...)
newAWSMachineSpec := newAWSMachine["spec"].(map[string]interface{})
oldAWSMachineSpec := oldAWSMachine["spec"].(map[string]interface{})
// allow changes to providerID
delete(oldAWSMachineSpec, "providerID")
delete(newAWSMachineSpec, "providerID")
// allow changes to additionalTags
delete(oldAWSMachineSpec, "additionalTags")
delete(newAWSMachineSpec, "additionalTags")
// allow changes to additionalSecurityGroups
delete(oldAWSMachineSpec, "additionalSecurityGroups")
delete(newAWSMachineSpec, "additionalSecurityGroups")
// allow changes to secretPrefix, secretCount, and secureSecretsBackend
if cloudInit, ok := oldAWSMachineSpec["cloudInit"].(map[string]interface{}); ok {
delete(cloudInit, "secretPrefix")
delete(cloudInit, "secretCount")
delete(cloudInit, "secureSecretsBackend")
}
if cloudInit, ok := newAWSMachineSpec["cloudInit"].(map[string]interface{}); ok {
delete(cloudInit, "secretPrefix")
delete(cloudInit, "secretCount")
delete(cloudInit, "secureSecretsBackend")
}
if !reflect.DeepEqual(oldAWSMachineSpec, newAWSMachineSpec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "cannot be modified"))
}
return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
func (r *AWSMachine) validateCloudInitSecret() field.ErrorList {
var allErrs field.ErrorList
if r.Spec.CloudInit.InsecureSkipSecretsManager {
if r.Spec.CloudInit.SecretPrefix != "" {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretPrefix"), "cannot be set if spec.cloudInit.insecureSkipSecretsManager is true"))
}
if r.Spec.CloudInit.SecretCount != 0 {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretCount"), "cannot be set if spec.cloudInit.insecureSkipSecretsManager is true"))
}
if r.Spec.CloudInit.SecureSecretsBackend != "" {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secureSecretsBackend"), "cannot be set if spec.cloudInit.insecureSkipSecretsManager is true"))
}
}
if (r.Spec.CloudInit.SecretPrefix != "") != (r.Spec.CloudInit.SecretCount != 0) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretCount"), "must be set together with spec.CloudInit.SecretPrefix"))
}
return allErrs
}
func (r *AWSMachine) validateRootVolume() field.ErrorList {
var allErrs field.ErrorList
if r.Spec.RootVolume == nil {
return allErrs
}
if (r.Spec.RootVolume.Type == "io1" || r.Spec.RootVolume.Type == "io2") && r.Spec.RootVolume.IOPS == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("spec.rootVolumeOptions.iops"), "iops required if type is 'io1' or 'io2'"))
}
if r.Spec.RootVolume.DeviceName != "" {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.rootVolumeOptions.deviceName"), "root volume shouldn't have device name"))
}
return allErrs
}
func (r *AWSMachine) validateNonRootVolumes() field.ErrorList {
var allErrs field.ErrorList
if r.Spec.NonRootVolumes == nil {
return allErrs
}
for _, volume := range r.Spec.NonRootVolumes {
if (volume.Type == "io1" || volume.Type == "io2") && volume.IOPS == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("spec.nonRootVolumes.volumeOptions.iops"), "iops required if type is 'io1' or 'io2'"))
}
if volume.DeviceName == "" {
allErrs = append(allErrs, field.Required(field.NewPath("spec.nonRootVolumes.volumeOptions.deviceName"), "non root volume should have device name"))
}
}
return allErrs
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *AWSMachine) ValidateDelete() error {
return nil
}
// Default implements webhook.Defaulter such that an empty CloudInit will be defined with a default
// SecureSecretsBackend as SecretBackendSecretsManager iff InsecureSkipSecretsManager is unset
func (r *AWSMachine) Default() {
if !r.Spec.CloudInit.InsecureSkipSecretsManager && r.Spec.CloudInit.SecureSecretsBackend == "" {
r.Spec.CloudInit.SecureSecretsBackend = SecretBackendSecretsManager
}
}
func (r *AWSMachine) validateAdditionalSecurityGroups() field.ErrorList {
var allErrs field.ErrorList
for _, additionalSecurityGroups := range r.Spec.AdditionalSecurityGroups {
if len(additionalSecurityGroups.Filters) > 0 {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.additionalSecurityGroups"), "filters are not implemented for security groups and will be removed in a future release"))
}
}
return allErrs
}
| 1 | 16,861 | instanceID should stay the same for a AWSMachine? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -77,6 +77,7 @@ def _launch_local_catalog():
env = dict(REGISTRY_URL="http://localhost:5000",
S3_PROXY_URL=open_config["s3Proxy"],
ALWAYS_REQUIRE_AUTH="false",
+ NO_DOWNLOAD="false",
CATALOG_MODE="LOCAL",
SSO_AUTH="DISABLED",
PASSWORD_AUTH="ENABLED", | 1 | """
Parses the command-line arguments and runs a command.
"""
import argparse
import subprocess
import time
import sys
import dns.resolver
import requests
from . import api, session
from . import __version__ as quilt3_version
from .session import open_url
from .util import get_from_config, catalog_s3_url, catalog_package_url, QuiltException, PhysicalKey, \
fix_url, get_package_registry
def cmd_config(catalog_url, **kwargs):
"""
Configure quilt3 to a Quilt stack
"""
config_values = kwargs['set'] if kwargs['set'] else {}
if catalog_url and config_values:
raise QuiltException("Expected either an auto-config URL or key=value pairs, but got both.")
if config_values:
api.config(**config_values)
else:
if catalog_url is None:
existing_catalog_url = get_from_config('navigator_url')
if existing_catalog_url is not None:
print(existing_catalog_url)
else:
print('<None>')
else:
api.config(catalog_url)
class ParseConfigDict(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
d = {}
if values:
for item in values:
split_items = item.split("=", 1)
key, value = split_items[0].strip(), split_items[1]
d[key] = value
setattr(namespace, self.dest, d)
def cmd_config_default_registry(default_remote_registry):
"""
Configure the default remote registry for quilt3
"""
api.config(default_remote_registry=default_remote_registry)
print(f"Successfully set the default remote registry to {default_remote_registry}")
def _test_url(url):
try:
response = requests.get(url)
if response.ok:
return True
return False
except requests.exceptions.ConnectionError:
return False
def _launch_local_catalog():
""""
Launches a docker container to run nginx hosting
the Quilt catalog on localhost:3000
"""
open_config = api._config()
command = ["docker", "run", "--rm"]
env = dict(REGISTRY_URL="http://localhost:5000",
S3_PROXY_URL=open_config["s3Proxy"],
ALWAYS_REQUIRE_AUTH="false",
CATALOG_MODE="LOCAL",
SSO_AUTH="DISABLED",
PASSWORD_AUTH="ENABLED",
API_GATEWAY=open_config["apiGatewayEndpoint"],
BINARY_API_GATEWAY=open_config["binaryApiGatewayEndpoint"])
for var in [f"{key}={value}" for key, value in env.items()]:
command += ["-e", var]
command += ["-p", "3000:80", "quiltdata/catalog"]
subprocess.Popen(command)
def _launch_local_s3proxy():
""""
Launches an s3 proxy (via docker)
on localhost:5002
"""
dns_resolver = dns.resolver.Resolver()
command = ["docker", "run", "--rm"]
# Workaround for a Docker-for-Mac bug in which the container
# ends up with a different DNS server than the host.
# Workaround #2: use only IPv4 addresses.
# Note: leaving this code in though it isn't called so that it
# can be reintroduced once Docker-for-Mac DNS works reliably.
# TODO: switch back to this local s3proxy or remove this function
if sys.platform == 'darwin':
nameservers = [ip for ip in dns_resolver.nameservers if ip.count('.') == 3]
command += ["--dns", nameservers[0]]
command += ["-p", "5002:80", "quiltdata/s3proxy"]
subprocess.Popen(command)
catalog_cmd_detailed_help = """
Run the Quilt catalog on your machine (requires Docker). Running
`quilt3 catalog` launches a webserver on your local machine using
Docker and a Python microservice that supplies temporary AWS
credentials to the catalog. Temporary credentials are derived from
your default AWS credentials (or active `AWS_PROFILE`) using
`boto3.sts.get_session_token`. For more details about configuring and
using AWS credentials in `boto3`, see the AWS documentation:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
#### Previewing files in S3
The Quilt catalog allows users to preview files in S3 without
downloading. It relies on a API Gateway and AWS Lambda to generate
certain previews in the cloud. The catalog launched by `quilt3
catalog` sends preview requests to https://open.quiltdata.com. Preview
requests contain short-lived signed URLs generated using your AWS
credentials. Data is encrypted in transit and no data is retained by Quilt.
Nevertheless, it is recommended that you use `quilt3 catalog` only for public data.
We strongly encourage users with
sensitive data in S3 to run a private Quilt deployment. Visit
https://quiltdata.com for more information.
"""
def cmd_catalog(navigation_target=None, detailed_help=False):
"""
Run the Quilt catalog locally. If navigation_targets starts with 's3://', open file view. Otherwise assume it
refers to a package, following the pattern: BUCKET:USER/PKG
If detailed_help=True, display detailed information about the `quilt3 catalog` command and then exit
"""
from .registry import app # Delay importing it cause it's expensive.
if detailed_help:
print(catalog_cmd_detailed_help)
return
local_catalog_url = "http://localhost:3000"
# Build the catalog URL - we do this at the beginning so simple syntax errors return immediately
if navigation_target is None:
catalog_url = local_catalog_url
elif navigation_target.startswith("s3://"):
catalog_url = catalog_s3_url(local_catalog_url, navigation_target)
else:
num_colons = navigation_target.count(":")
assert num_colons == 1, f"To go to Package view, the input should follow the pattern BUCKET:USER/PKG. " \
f"However the input {navigation_target} has {num_colons} colons when it should have exactly one."
num_slashes = navigation_target.count("/")
assert num_slashes == 1, f"To go to Package view, the input should follow the pattern BUCKET:USER/PKG. " \
f"However the input {navigation_target} has {num_slashes} backslashes when it should have exactly one."
bucket, package_name = navigation_target.split(":")
catalog_url = catalog_package_url(local_catalog_url, bucket, package_name)
if not _test_url(local_catalog_url):
_launch_local_catalog()
# Make sure the containers are running and available before opening the browser window
print("Waiting for containers to launch...")
failure_timeout_secs = 15
poll_interval_secs = 0.5
start_time = time.time()
while True:
if time.time() - start_time > failure_timeout_secs:
catalog_failed = _test_url(local_catalog_url)
if not catalog_failed:
# Succeeded at the last second, let it proceed
break
raise QuiltException(f"The backend containers needed to run the catalog did not both successfully launch. "
f"Status:\n"
f"\tCATALOG: {'FAILED' if catalog_failed else 'SUCCEEDED'}")
if _test_url(local_catalog_url):
# Everything is working, proceed
break
else:
time.sleep(poll_interval_secs) # The containers can take a moment to launch
open_url(catalog_url)
app.run()
def cmd_disable_telemetry():
api._disable_telemetry()
print("Successfully disabled telemetry.")
def cmd_list_packages(registry):
registry_parsed = PhysicalKey.from_url(get_package_registry(fix_url(registry)))
for package_name in api._list_packages(registry=registry_parsed):
print(package_name)
def cmd_verify(name, registry, top_hash, dir, extra_files_ok):
pkg = api.Package._browse(name, registry, top_hash)
if pkg.verify(dir, extra_files_ok):
print("Verification succeeded")
return 0
else:
print("Verification failed")
return 1
def cmd_push(name, dir, registry, dest, message):
pkg = api.Package()
pkg.set_dir('.', dir)
pkg.push(name, registry=registry, dest=dest, message=message)
print("Successfully pushed the new package")
def create_parser():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument(
"--version",
help="Show quilt3 version and exit",
action="version",
version=quilt3_version.strip()
)
subparsers = parser.add_subparsers(metavar="<command>")
subparsers.required = True
# login
shorthelp = "Log in to configured Quilt server"
login_p = subparsers.add_parser("login", description=shorthelp, help=shorthelp, allow_abbrev=False)
login_p.set_defaults(func=session.login)
# logout
shorthelp = "Log out of current Quilt server"
logout_p = subparsers.add_parser("logout", description=shorthelp, help=shorthelp, allow_abbrev=False)
logout_p.set_defaults(func=session.logout)
# config
shorthelp = "Configure Quilt"
config_p = subparsers.add_parser("config", description=shorthelp, help=shorthelp, allow_abbrev=False)
config_p.add_argument(
"catalog_url",
help="URL of catalog to config with, or empty string to reset the config",
type=str,
nargs="?"
)
config_p.add_argument(
"--set",
metavar="KEY=VALUE",
nargs="+",
help="Set a number of key-value pairs for config_values"
"(do not put spaces before or after the = sign). "
"If a value contains spaces, you should define "
"it with double quotes: "
'foo="this is a sentence". Note that '
"values are always treated as strings.",
action=ParseConfigDict,
)
config_p.set_defaults(func=cmd_config)
# config-default-registry
shorthelp = "Configure default remote registry for Quilt"
config_p = subparsers.add_parser("config-default-remote-registry",
description=shorthelp, help=shorthelp, allow_abbrev=False)
config_p.add_argument(
"default_remote_registry",
help="The default remote registry to use, e.g. s3://quilt-ml",
type=str
)
config_p.set_defaults(func=cmd_config_default_registry)
# catalog
shorthelp = "Run Quilt catalog locally"
catalog_p = subparsers.add_parser("catalog", description=shorthelp, help=shorthelp, allow_abbrev=False)
catalog_p.add_argument(
"navigation_target",
help="Which page in the local catalog to open. Leave blank to go to the catalog landing page, pass in an "
"s3 url (e.g. 's3://bucket/myfile.txt') to go to file viewer, or pass in a package name in the form "
"'BUCKET:USER/PKG' to go to the package viewer.",
type=str,
nargs="?"
)
catalog_p.add_argument(
"--detailed_help",
help="Display detailed information about this command and then exit",
action="store_true",
)
catalog_p.set_defaults(func=cmd_catalog)
# disable-telemetry
shorthelp = "Disable anonymous usage metrics"
disable_telemetry_p = subparsers.add_parser("disable-telemetry",
description=shorthelp, help=shorthelp, allow_abbrev=False)
disable_telemetry_p.set_defaults(func=cmd_disable_telemetry)
# install
shorthelp = "Install a package"
install_p = subparsers.add_parser("install", description=shorthelp, help=shorthelp, allow_abbrev=False)
install_p.add_argument(
"name",
help="Name of package, in the USER/PKG[/PATH] format",
type=str,
)
install_p.add_argument(
"--registry",
help="Registry where package is located, usually s3://MY-BUCKET. Defaults to the default remote registry.",
type=str,
required=False,
)
install_p.add_argument(
"--top-hash",
help="Hash of package to install. Defaults to latest.",
type=str,
required=False,
)
install_p.add_argument(
"--dest",
help="Local path to download files to.",
type=str,
required=False,
)
install_p.add_argument(
"--dest-registry",
help="Registry to install package to. Defaults to local registry.",
type=str,
required=False,
)
install_p.set_defaults(func=api.Package.install)
# list-packages
shorthelp = "List all packages in a registry"
list_packages_p = subparsers.add_parser("list-packages", description=shorthelp, help=shorthelp, allow_abbrev=False)
list_packages_p.add_argument(
"registry",
help="Registry for packages, e.g. s3://quilt-example",
type=str,
)
list_packages_p.set_defaults(func=cmd_list_packages)
# verify
shorthelp = "Verify that package contents matches a given directory"
verify_p = subparsers.add_parser("verify", description=shorthelp, help=shorthelp, allow_abbrev=False)
verify_p.add_argument(
"name",
help="Name of package, in the USER/PKG format",
type=str,
)
verify_p.add_argument(
"--registry",
help="Registry where package is located, usually s3://MY-BUCKET",
type=str,
required=True,
)
verify_p.add_argument(
"--top-hash",
help="Hash of package to verify",
type=str,
required=True,
)
verify_p.add_argument(
"--dir",
help="Directory to verify",
type=str,
required=True,
)
verify_p.add_argument(
"--extra-files-ok",
help="Whether extra files in the directory should cause a failure",
action="store_true"
)
verify_p.set_defaults(func=cmd_verify)
# push
shorthelp = "Pushes the new package to the remote registry"
push_p = subparsers.add_parser("push", description=shorthelp, help=shorthelp, allow_abbrev=False)
push_p.add_argument(
"name",
help="Name of package, in the USER/PKG format",
type=str,
)
push_p.add_argument(
"--dir",
help="Directory to add to the new package",
type=str,
required=True,
)
push_p.add_argument(
"--registry",
help="Registry where to create the new package. Defaults to the default remote registry.",
type=str,
)
push_p.add_argument(
"--dest",
help="Where to copy the objects in the package",
type=str,
)
push_p.add_argument(
"--message",
help="The commit message for the new package",
type=str,
)
push_p.set_defaults(func=cmd_push)
return parser
def main(args=None):
parser = create_parser()
args = parser.parse_args(args)
kwargs = vars(args)
func = kwargs.pop('func')
try:
return func(**kwargs)
except QuiltException as ex:
print(ex.message, file=sys.stderr)
return 1
| 1 | 18,644 | Since this is a dict don't you want `False` or do we clean that up elsewhere? | quiltdata-quilt | py |
@@ -23,6 +23,7 @@ from luigi.scheduler import DISABLED, DONE, FAILED, CentralPlannerScheduler
luigi.notifications.DEBUG = True
WORKER = 'myworker'
+HOST = 'localhost'
class CentralPlannerTest(unittest.TestCase): | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from helpers import unittest
import luigi.notifications
from luigi.scheduler import DISABLED, DONE, FAILED, CentralPlannerScheduler
luigi.notifications.DEBUG = True
WORKER = 'myworker'
class CentralPlannerTest(unittest.TestCase):
def setUp(self):
self.sch = CentralPlannerScheduler(retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
disable_persist=10,
disable_window=10,
disable_failures=3)
self.time = time.time
def tearDown(self):
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_failed_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(WORKER, 'A', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_broken_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A', runnable=False)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(WORKER, 'A', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_two_workers(self):
# Worker X wants to build A -> B
# Worker Y wants to build A -> C
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.sch.add_task(task_id='B', deps=('A',), worker='X')
self.sch.add_task(task_id='C', deps=('A',), worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None) # Worker Y is pending on A to be done
self.sch.add_task(worker='X', task_id='A', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'C')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'B')
def test_retry(self):
# Try to build A but fails, will retry after 100s
self.setTime(0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', FAILED)
for t in range(100):
self.setTime(t)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
self.sch.ping(WORKER)
if t % 10 == 0:
self.sch.prune()
self.setTime(101)
self.sch.prune()
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_disconnect_running(self):
# X and Y wants to run A.
# X starts but does not report back. Y does.
# After some timeout, Y will build it instead
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.sch.add_task(task_id='A', worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
for t in range(200):
self.setTime(t)
self.sch.ping(worker='Y')
if t % 10 == 0:
self.sch.prune()
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'A')
def test_remove_dep(self):
# X schedules A -> B, A is broken
# Y schedules C -> B: this should remove A as a dep of B
self.sch.add_task(task_id='A', worker='X', runnable=False)
self.sch.add_task(task_id='B', deps=('A',), worker='X')
# X can't build anything
self.assertEqual(self.sch.get_work(worker='X')['task_id'], None)
self.sch.add_task(task_id='B', deps=('C',), worker='Y') # should reset dependencies for A
self.sch.add_task(task_id='C', worker='Y', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'B')
def test_timeout(self):
# A bug that was earlier present when restarting the same flow
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.setTime(10000)
self.sch.add_task(task_id='A', worker='Y') # Will timeout X but not schedule A for removal
for i in range(2000):
self.setTime(10000 + i)
self.sch.ping(worker='Y')
self.sch.add_task(task_id='A', status=DONE, worker='Y') # This used to raise an exception since A was removed
def test_disallowed_state_changes(self):
# Test that we can not schedule an already running task
t = 'A'
self.sch.add_task(task_id=t, worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], t)
self.sch.add_task(task_id=t, worker='Y')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None)
def test_two_worker_info(self):
# Make sure the scheduler returns info that some other worker is running task A
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
r = self.sch.get_work(worker='Y')
self.assertEqual(r['task_id'], None) # Worker Y is pending on A to be done
s = r['running_tasks'][0]
self.assertEqual(s['task_id'], 'A')
self.assertEqual(s['worker'], 'X')
def test_scheduler_resources_none_allow_one(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 1})
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_resources_none_disallow_two(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 2})
self.assertFalse(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_with_insufficient_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 3})
self.sch.update_resources(R1=2)
self.assertFalse(self.sch.get_work(worker='X')['task_id'])
def test_scheduler_with_sufficient_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 3})
self.sch.update_resources(R1=3)
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_with_resources_used(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 1})
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='Y', task_id='B', resources={'R1': 1})
self.sch.update_resources(R1=1)
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
def test_scheduler_overprovisioned_on_other_resource(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 2})
self.sch.update_resources(R1=2)
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='Y', task_id='B', resources={'R2': 2})
self.sch.update_resources(R1=1, R2=2)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'B')
def test_scheduler_with_priority_and_competing_resources(self):
self.sch.add_task(worker='X', task_id='A')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=10)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
self.sch.add_task(worker='Y', task_id='D', priority=0)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'D')
def test_do_not_lock_resources_when_not_ready(self):
""" Test to make sure that resources won't go unused waiting on workers """
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])
def test_lock_resources_when_one_of_multiple_workers_is_ready(self):
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 2)])
self.sch.add_worker('Y', [])
self.assertFalse(self.sch.get_work('Y')['task_id'])
def test_do_not_lock_resources_while_running_higher_priority(self):
""" Test to make sure that resources won't go unused waiting on workers """
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.assertEqual('C', self.sch.get_work('Y')['task_id'])
def test_lock_resources_while_running_lower_priority(self):
""" Make sure resources will be made available while working on lower priority tasks """
self.sch.add_task(worker='X', task_id='A', priority=4)
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertFalse(self.sch.get_work('Y')['task_id'])
def test_lock_resources_for_second_worker(self):
self.sch.add_task(worker='X', task_id='A', resources={'R': 1})
self.sch.add_task(worker='X', task_id='B', resources={'R': 1})
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=10)
self.sch.add_worker('X', {'workers': 2})
self.sch.add_worker('Y', {'workers': 1})
self.sch.update_resources(R=2)
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.assertFalse(self.sch.get_work('X')['task_id'])
def test_can_work_on_lower_priority_while_waiting_for_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R': 1}, priority=0)
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.sch.add_task(worker='Y', task_id='B', resources={'R': 1}, priority=10)
self.sch.add_task(worker='Y', task_id='C', priority=0)
self.sch.update_resources(R=1)
self.assertEqual('C', self.sch.get_work('Y')['task_id'])
def test_priority_update_with_pruning(self):
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.setTime(50) # after worker disconnects
self.sch.prune()
self.sch.add_task(task_id='B', deps=['A'], worker='X')
self.setTime(2000) # after remove for task A
self.sch.prune()
# Here task A that B depends on is missing
self.sch.add_task(WORKER, task_id='C', deps=['B'], priority=100)
self.sch.add_task(WORKER, task_id='B', deps=['A'])
self.sch.add_task(WORKER, task_id='A')
self.sch.add_task(WORKER, task_id='D', priority=10)
self.check_task_order('ABCD')
def test_update_resources(self):
self.sch.add_task(WORKER, task_id='A', deps=['B'])
self.sch.add_task(WORKER, task_id='B', resources={'r': 2})
self.sch.update_resources(r=1)
# B requires too many resources, we can't schedule
self.check_task_order([])
self.sch.add_task(WORKER, task_id='B', resources={'r': 1})
# now we have enough resources
self.check_task_order(['B', 'A'])
def test_hendle_multiple_resources(self):
self.sch.add_task(WORKER, task_id='A', resources={'r1': 1, 'r2': 1})
self.sch.add_task(WORKER, task_id='B', resources={'r1': 1, 'r2': 1})
self.sch.add_task(WORKER, task_id='C', resources={'r1': 1})
self.sch.update_resources(r1=2, r2=1)
self.assertEqual('A', self.sch.get_work(WORKER)['task_id'])
self.check_task_order('C')
def test_single_resource_lock(self):
self.sch.add_task('X', task_id='A', resources={'r': 1})
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.sch.add_task(WORKER, task_id='B', resources={'r': 2}, priority=10)
self.sch.add_task(WORKER, task_id='C', resources={'r': 1})
self.sch.update_resources(r=2)
# Should wait for 2 units of r to be available for B before scheduling C
self.check_task_order([])
def test_no_lock_if_too_many_resources_required(self):
self.sch.add_task(WORKER, task_id='A', resources={'r': 2}, priority=10)
self.sch.add_task(WORKER, task_id='B', resources={'r': 1})
self.sch.update_resources(r=1)
self.check_task_order('B')
def test_multiple_resources_lock(self):
self.sch.add_task('X', task_id='A', resources={'r1': 1, 'r2': 1}, priority=10)
self.sch.add_task(WORKER, task_id='B', resources={'r2': 1})
self.sch.add_task(WORKER, task_id='C', resources={'r1': 1})
self.sch.update_resources(r1=1, r2=1)
# should preserve both resources for worker 'X'
self.check_task_order([])
def test_multiple_resources_no_lock(self):
self.sch.add_task(WORKER, task_id='A', resources={'r1': 1}, priority=10)
self.sch.add_task(WORKER, task_id='B', resources={'r1': 1, 'r2': 1}, priority=10)
self.sch.add_task(WORKER, task_id='C', resources={'r2': 1})
self.sch.update_resources(r1=1, r2=2)
self.assertEqual('A', self.sch.get_work(WORKER)['task_id'])
# C doesn't block B, so it can go first
self.check_task_order('C')
def check_task_order(self, order):
for expected_id in order:
self.assertEqual(self.sch.get_work(WORKER)['task_id'], expected_id)
self.sch.add_task(WORKER, expected_id, status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_priorities(self):
self.sch.add_task(WORKER, 'A', priority=10)
self.sch.add_task(WORKER, 'B', priority=5)
self.sch.add_task(WORKER, 'C', priority=15)
self.sch.add_task(WORKER, 'D', priority=9)
self.check_task_order(['C', 'A', 'D', 'B'])
def test_priorities_default_and_negative(self):
self.sch.add_task(WORKER, 'A', priority=10)
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C', priority=15)
self.sch.add_task(WORKER, 'D', priority=-20)
self.sch.add_task(WORKER, 'E', priority=1)
self.check_task_order(['C', 'A', 'E', 'B', 'D'])
def test_priorities_and_dependencies(self):
self.sch.add_task(WORKER, 'A', deps=['Z'], priority=10)
self.sch.add_task(WORKER, 'B', priority=5)
self.sch.add_task(WORKER, 'C', deps=['Z'], priority=3)
self.sch.add_task(WORKER, 'D', priority=2)
self.sch.add_task(WORKER, 'Z', priority=1)
self.check_task_order(['Z', 'A', 'B', 'C', 'D'])
def test_priority_update_dependency_after_scheduling(self):
self.sch.add_task(WORKER, 'A', priority=1)
self.sch.add_task(WORKER, 'B', priority=5, deps=['A'])
self.sch.add_task(WORKER, 'C', priority=10, deps=['B'])
self.sch.add_task(WORKER, 'D', priority=6)
self.check_task_order(['A', 'B', 'C', 'D'])
def test_disable(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_disable_and_reenable(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.re_enable_task('A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_disable_and_reenable_and_disable_again(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.re_enable_task('A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be still enabled
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled now
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_disable_and_done(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(WORKER, 'A', status=DONE)
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('DONE', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_disable_by_worker(self):
self.sch.add_task(WORKER, 'A', status=DISABLED)
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.sch.add_task(WORKER, 'A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_task_list_beyond_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=3)
for c in 'ABCD':
sch.add_task(WORKER, c)
self.assertEqual(set('ABCD'), set(sch.task_list('PENDING', '', False).keys()))
self.assertEqual({'num_tasks': 4}, sch.task_list('PENDING', ''))
def test_task_list_within_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=4)
for c in 'ABCD':
sch.add_task(WORKER, c)
self.assertEqual(set('ABCD'), set(sch.task_list('PENDING', '').keys()))
def test_task_lists_some_beyond_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=3)
for c in 'ABCD':
sch.add_task(WORKER, c, 'DONE')
for c in 'EFG':
sch.add_task(WORKER, c)
self.assertEqual(set('EFG'), set(sch.task_list('PENDING', '').keys()))
self.assertEqual({'num_tasks': 4}, sch.task_list('DONE', ''))
def test_priority_update_dependency_chain(self):
self.sch.add_task(WORKER, 'A', priority=10, deps=['B'])
self.sch.add_task(WORKER, 'B', priority=5, deps=['C'])
self.sch.add_task(WORKER, 'C', priority=1)
self.sch.add_task(WORKER, 'D', priority=6)
self.check_task_order(['C', 'B', 'A', 'D'])
def test_priority_no_decrease_with_multiple_updates(self):
self.sch.add_task(WORKER, 'A', priority=1)
self.sch.add_task(WORKER, 'B', priority=10, deps=['A'])
self.sch.add_task(WORKER, 'C', priority=5, deps=['A'])
self.sch.add_task(WORKER, 'D', priority=6)
self.check_task_order(['A', 'B', 'D', 'C'])
def test_unique_tasks(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C')
self.sch.add_task(WORKER + "_2", 'B')
response = self.sch.get_work(WORKER)
self.assertEqual(3, response['n_pending_tasks'])
self.assertEqual(2, response['n_unique_pending'])
def test_prefer_more_dependents(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C', deps=['B'])
self.sch.add_task(WORKER, 'D', deps=['B'])
self.sch.add_task(WORKER, 'E', deps=['A'])
self.check_task_order('BACDE')
def test_prefer_readier_dependents(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C')
self.sch.add_task(WORKER, 'D')
self.sch.add_task(WORKER, 'F', deps=['A', 'B', 'C'])
self.sch.add_task(WORKER, 'G', deps=['A', 'B', 'C'])
self.sch.add_task(WORKER, 'E', deps=['D'])
self.check_task_order('DABCFGE')
def test_ignore_done_dependents(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C')
self.sch.add_task(WORKER, 'D', priority=1)
self.sch.add_task(WORKER, 'E', deps=['C', 'D'])
self.sch.add_task(WORKER, 'F', deps=['A', 'B'])
self.check_task_order('DCABEF')
if __name__ == '__main__':
unittest.main()
| 1 | 11,220 | what's the reason you had to add the `host` argument to all `get_work` calls? | spotify-luigi | py |
@@ -37,6 +37,13 @@ public class UpdatePermissionRootCommand extends AbstractCommand<Dataverse> {
}
}
+ @Override
+ public boolean onSuccess(CommandContext ctxt, Object r) {
+ return ctxt.dataverses().index((Dataverse) r,true);
+ }
+
+
+ //TODO: Review this as this will never be an instance of Dataset, will it?
@Override
public Map<String, Set<Permission>> getRequiredPermissions() {
// for data file check permission on owning dataset | 1 | package edu.harvard.iq.dataverse.engine.command.impl;
import edu.harvard.iq.dataverse.Dataverse;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.engine.command.AbstractCommand;
import edu.harvard.iq.dataverse.engine.command.CommandContext;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
/**
* Updates the permission root-ness of a DvObjectContainer.
* @author michael
*/
// no annotations here, since permissions are dynamically decided
public class UpdatePermissionRootCommand extends AbstractCommand<Dataverse> {
private final boolean newValue;
private final Dataverse dvoc;
public UpdatePermissionRootCommand(boolean newValue, DataverseRequest aRequest, Dataverse anAffectedDataverse) {
super(aRequest, anAffectedDataverse);
this.newValue = newValue;
dvoc = anAffectedDataverse;
}
@Override
public Dataverse execute( final CommandContext ctxt) throws CommandException {
if ( dvoc.isPermissionRoot() == newValue ) {
return dvoc;
} else {
dvoc.setPermissionRoot(newValue);
return ctxt.dataverses().save(dvoc);
}
}
@Override
public Map<String, Set<Permission>> getRequiredPermissions() {
// for data file check permission on owning dataset
return Collections.singletonMap("",
dvoc instanceof Dataverse ? Collections.singleton(Permission.ManageDataversePermissions)
: Collections.singleton(Permission.ManageDatasetPermissions));
}
}
| 1 | 42,564 | This will never be a dataset so we can simplify the code here | IQSS-dataverse | java |
@@ -89,6 +89,15 @@ public abstract class AbstractBlockProcessor implements BlockProcessor {
private final MiningBeneficiaryCalculator miningBeneficiaryCalculator;
+ public AbstractBlockProcessor(final AbstractBlockProcessor blockProcessor) {
+ this(
+ blockProcessor.transactionProcessor,
+ blockProcessor.transactionReceiptFactory,
+ blockProcessor.blockReward,
+ blockProcessor.miningBeneficiaryCalculator,
+ blockProcessor.skipZeroBlockRewards);
+ }
+
public AbstractBlockProcessor(
final TransactionProcessor transactionProcessor,
final MainnetBlockProcessor.TransactionReceiptFactory transactionReceiptFactory, | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
import org.hyperledger.besu.ethereum.core.ProcessableBlockHeader;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.core.TransactionReceipt;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.core.WorldState;
import org.hyperledger.besu.ethereum.core.WorldUpdater;
import org.hyperledger.besu.ethereum.vm.BlockHashLookup;
import java.util.ArrayList;
import java.util.List;
import com.google.common.collect.ImmutableList;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public abstract class AbstractBlockProcessor implements BlockProcessor {
@FunctionalInterface
public interface TransactionReceiptFactory {
TransactionReceipt create(
TransactionProcessor.Result result, WorldState worldState, long gasUsed);
}
private static final Logger LOG = LogManager.getLogger();
static final int MAX_GENERATION = 6;
public static class Result implements BlockProcessor.Result {
private static final AbstractBlockProcessor.Result FAILED =
new AbstractBlockProcessor.Result(false, null);
private final boolean successful;
private final List<TransactionReceipt> receipts;
public static AbstractBlockProcessor.Result successful(
final List<TransactionReceipt> receipts) {
return new AbstractBlockProcessor.Result(true, ImmutableList.copyOf(receipts));
}
public static AbstractBlockProcessor.Result failed() {
return FAILED;
}
Result(final boolean successful, final List<TransactionReceipt> receipts) {
this.successful = successful;
this.receipts = receipts;
}
@Override
public List<TransactionReceipt> getReceipts() {
return receipts;
}
@Override
public boolean isSuccessful() {
return successful;
}
}
private final TransactionProcessor transactionProcessor;
private final MainnetBlockProcessor.TransactionReceiptFactory transactionReceiptFactory;
final Wei blockReward;
private final boolean skipZeroBlockRewards;
private final MiningBeneficiaryCalculator miningBeneficiaryCalculator;
public AbstractBlockProcessor(
final TransactionProcessor transactionProcessor,
final MainnetBlockProcessor.TransactionReceiptFactory transactionReceiptFactory,
final Wei blockReward,
final MiningBeneficiaryCalculator miningBeneficiaryCalculator,
final boolean skipZeroBlockRewards) {
this.transactionProcessor = transactionProcessor;
this.transactionReceiptFactory = transactionReceiptFactory;
this.blockReward = blockReward;
this.miningBeneficiaryCalculator = miningBeneficiaryCalculator;
this.skipZeroBlockRewards = skipZeroBlockRewards;
}
@Override
public AbstractBlockProcessor.Result processBlock(
final Blockchain blockchain,
final MutableWorldState worldState,
final BlockHeader blockHeader,
final List<Transaction> transactions,
final List<BlockHeader> ommers) {
long gasUsed = 0;
final List<TransactionReceipt> receipts = new ArrayList<>();
for (final Transaction transaction : transactions) {
final long remainingGasBudget = blockHeader.getGasLimit() - gasUsed;
if (Long.compareUnsigned(transaction.getGasLimit(), remainingGasBudget) > 0) {
LOG.warn(
"Transaction processing error: transaction gas limit {} exceeds available block budget remaining {}",
transaction.getGasLimit(),
remainingGasBudget);
return AbstractBlockProcessor.Result.failed();
}
final WorldUpdater worldStateUpdater = worldState.updater();
final BlockHashLookup blockHashLookup = new BlockHashLookup(blockHeader, blockchain);
final Address miningBeneficiary =
miningBeneficiaryCalculator.calculateBeneficiary(blockHeader);
final TransactionProcessor.Result result =
transactionProcessor.processTransaction(
blockchain,
worldStateUpdater,
blockHeader,
transaction,
miningBeneficiary,
blockHashLookup,
true,
TransactionValidationParams.processingBlock());
if (result.isInvalid()) {
return AbstractBlockProcessor.Result.failed();
}
worldStateUpdater.commit();
gasUsed = transaction.getGasLimit() - result.getGasRemaining() + gasUsed;
final TransactionReceipt transactionReceipt =
transactionReceiptFactory.create(result, worldState, gasUsed);
receipts.add(transactionReceipt);
}
if (!rewardCoinbase(worldState, blockHeader, ommers, skipZeroBlockRewards)) {
return AbstractBlockProcessor.Result.failed();
}
worldState.persist();
return AbstractBlockProcessor.Result.successful(receipts);
}
abstract boolean rewardCoinbase(
final MutableWorldState worldState,
final ProcessableBlockHeader header,
final List<BlockHeader> ommers,
final boolean skipZeroBlockRewards);
}
| 1 | 19,840 | This should be moved into the PrivacyBlockProcessor instead of adding the constructor here. | hyperledger-besu | java |
@@ -38,7 +38,7 @@ import (
"github.com/pipe-cd/pipe/pkg/crypto"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/git"
- "github.com/pipe-cd/pipe/pkg/insight"
+ insightfilestore "github.com/pipe-cd/pipe/pkg/insight/insightstore"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc/rpcauth" | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcapi
import (
"context"
"encoding/base64"
"errors"
"fmt"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/cache/memorycache"
"github.com/pipe-cd/pipe/pkg/cache/rediscache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/crypto"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/insight"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc/rpcauth"
)
type encrypter interface {
Encrypt(text string) (string, error)
}
// WebAPI implements the behaviors for the gRPC definitions of WebAPI.
type WebAPI struct {
applicationStore datastore.ApplicationStore
environmentStore datastore.EnvironmentStore
deploymentStore datastore.DeploymentStore
pipedStore datastore.PipedStore
projectStore datastore.ProjectStore
apiKeyStore datastore.APIKeyStore
stageLogStore stagelogstore.Store
applicationLiveStateStore applicationlivestatestore.Store
insightstore insight.Store
commandStore commandstore.Store
encrypter encrypter
appProjectCache cache.Cache
deploymentProjectCache cache.Cache
pipedProjectCache cache.Cache
insightCache cache.Cache
projectsInConfig map[string]config.ControlPlaneProject
logger *zap.Logger
}
// NewWebAPI creates a new WebAPI instance.
func NewWebAPI(
ctx context.Context,
ds datastore.DataStore,
sls stagelogstore.Store,
alss applicationlivestatestore.Store,
cmds commandstore.Store,
is insight.Store,
rd redis.Redis,
projs map[string]config.ControlPlaneProject,
encrypter encrypter,
logger *zap.Logger) *WebAPI {
a := &WebAPI{
applicationStore: datastore.NewApplicationStore(ds),
environmentStore: datastore.NewEnvironmentStore(ds),
deploymentStore: datastore.NewDeploymentStore(ds),
pipedStore: datastore.NewPipedStore(ds),
projectStore: datastore.NewProjectStore(ds),
apiKeyStore: datastore.NewAPIKeyStore(ds),
stageLogStore: sls,
insightstore: is,
applicationLiveStateStore: alss,
commandStore: cmds,
projectsInConfig: projs,
encrypter: encrypter,
appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
insightCache: rediscache.NewTTLCache(rd, 3*time.Hour),
logger: logger.Named("web-api"),
}
return a
}
// Register registers all handling of this service into the specified gRPC server.
func (a *WebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *WebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
env := model.Environment{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
}
err = a.environmentStore.AddEnvironment(ctx, &env)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The environment already exists")
}
if err != nil {
a.logger.Error("failed to create environment", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create environment")
}
return &webservice.AddEnvironmentResponse{}, nil
}
func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
envs, err := a.environmentStore.ListEnvironments(ctx, opts)
if err != nil {
a.logger.Error("failed to get environments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get environments")
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
piped := model.Piped{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
EnvIds: req.EnvIds,
Status: model.Piped_OFFLINE,
}
piped.AddKey(keyHash, claims.Subject, time.Now())
err = a.pipedStore.AddPiped(ctx, &piped)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The piped already exists")
}
if err != nil {
a.logger.Error("failed to register piped", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to register piped")
}
return &webservice.RegisterPipedResponse{
Id: piped.Id,
Key: key,
}, nil
}
func (a *WebAPI) UpdatePiped(ctx context.Context, req *webservice.UpdatePipedRequest) (*webservice.UpdatePipedResponse, error) {
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.UpdatePiped(ctx, req.PipedId, func(p *model.Piped) error {
p.Name = req.Name
p.Desc = req.Desc
p.EnvIds = req.EnvIds
return nil
})
}
if err := a.updatePiped(ctx, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.UpdatePipedResponse{}, nil
}
func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.AddKey(ctx, pipedID, keyHash, claims.Subject, time.Now())
}
if err := a.updatePiped(ctx, req.Id, updater); err != nil {
return nil, err
}
return &webservice.RecreatePipedKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil {
return nil, err
}
return &webservice.EnablePipedResponse{}, nil
}
func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil {
return nil, err
}
return &webservice.DisablePipedResponse{}, nil
}
func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil {
return err
}
if err := updater(ctx, pipedID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The piped is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the piped",
zap.String("piped-id", pipedID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the piped ")
}
}
return nil
}
// TODO: Consider using piped-stats to decide piped connection status.
func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !req.Options.Enabled.GetValue(),
})
}
}
pipeds, err := a.pipedStore.ListPipeds(ctx, opts)
if err != nil {
a.logger.Error("failed to get pipeds", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get pipeds")
}
// Redact all sensitive data inside piped message before sending to the client.
for i := range pipeds {
pipeds[i].RedactSensitiveData()
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
// Redact all sensitive data inside piped message before sending to the client.
piped.RedactSensitiveData()
return &webservice.GetPipedResponse{
Piped: piped,
}, nil
}
// validatePipedBelongsToProject checks if the given piped belongs to the given project.
// It gives back error unless the piped belongs to the project.
func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error {
pid, err := a.pipedProjectCache.Get(pipedID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return err
}
a.pipedProjectCache.Put(pipedID, piped.ProjectId)
if piped.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
// TODO: Validate the specified piped to ensure that it belongs to the specified environment.
func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if piped.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
gitpath, err := makeGitPath(
req.GitPath.Repo.Id,
req.GitPath.Path,
req.GitPath.ConfigFilename,
piped,
a.logger,
)
if err != nil {
return nil, err
}
app := model.Application{
Id: uuid.New().String(),
Name: req.Name,
EnvId: req.EnvId,
PipedId: req.PipedId,
ProjectId: claims.Role.ProjectId,
GitPath: gitpath,
Kind: req.Kind,
CloudProvider: req.CloudProvider,
}
err = a.applicationStore.AddApplication(ctx, &app)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The application already exists")
}
if err != nil {
a.logger.Error("failed to create application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create application")
}
return &webservice.AddApplicationResponse{
ApplicationId: app.Id,
}, nil
}
func (a *WebAPI) UpdateApplication(ctx context.Context, req *webservice.UpdateApplicationRequest) (*webservice.UpdateApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if piped.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
gitpath, err := makeGitPath(
req.GitPath.Repo.Id,
req.GitPath.Path,
req.GitPath.ConfigFilename,
piped,
a.logger,
)
if err != nil {
return nil, err
}
err = a.applicationStore.UpdateApplication(ctx, req.ApplicationId, func(app *model.Application) error {
app.Name = req.Name
app.EnvId = req.EnvId
app.PipedId = req.PipedId
app.GitPath = gitpath
app.Kind = req.Kind
app.CloudProvider = req.CloudProvider
return nil
})
if err != nil {
a.logger.Error("failed to update application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update application")
}
return &webservice.UpdateApplicationResponse{}, nil
}
func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil {
return nil, err
}
return &webservice.EnableApplicationResponse{}, nil
}
func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil {
return nil, err
}
return &webservice.DisableApplicationResponse{}, nil
}
func (a *WebAPI) DeleteApplication(ctx context.Context, req *webservice.DeleteApplicationRequest) (*webservice.DeleteApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
if err := a.applicationStore.DeleteApplication(ctx, req.ApplicationId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.NotFound, "The application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the application",
zap.String("application-id", req.ApplicationId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the application")
}
}
return &webservice.DeleteApplicationResponse{}, nil
}
func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validateAppBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil {
return err
}
var updater func(context.Context, string) error
if enable {
updater = a.applicationStore.EnableApplication
} else {
updater = a.applicationStore.DisableApplication
}
if err := updater(ctx, appID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The application is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the application",
zap.String("application-id", appID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the application")
}
}
return nil
}
func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
if o.Enabled != nil {
filters = append(filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !o.Enabled.GetValue(),
})
}
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: "==",
Value: o.Kinds[0],
})
}
if len(o.SyncStatuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "SyncState.Status",
Operator: "==",
Value: o.SyncStatuses[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: "==",
Value: o.EnvIds[0],
})
}
}
apps, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
})
if err != nil {
a.logger.Error("failed to get applications", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get applications")
}
return &webservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != app.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: app.PipedId,
ApplicationId: app.Id,
Type: model.Command_SYNC_APPLICATION,
Commander: claims.Subject,
SyncApplication: &model.Command_SyncApplication{
ApplicationId: app.Id,
SyncStrategy: req.SyncStrategy,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.SyncApplicationResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if app.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
return &webservice.GetApplicationResponse{
Application: app,
}, nil
}
func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
sse := piped.SealedSecretEncryption
if sse == nil {
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain the encryption configuration")
}
data := req.Data
if req.Base64Encoding {
data = base64.StdEncoding.EncodeToString([]byte(data))
}
var enc encrypter
switch model.SealedSecretManagementType(sse.Type) {
case model.SealedSecretManagementSealingKey:
if sse.PublicKey == "" {
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a public key")
}
enc, err = crypto.NewHybridEncrypter(sse.PublicKey)
if err != nil {
a.logger.Error("failed to initialize the crypter", zap.Error(err))
return nil, status.Error(codes.FailedPrecondition, "Failed to initialize the encrypter")
}
default:
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a valid encryption type")
}
encryptedText, err := enc.Encrypt(data)
if err != nil {
a.logger.Error("failed to encrypt the secret", zap.Error(err))
return nil, status.Error(codes.FailedPrecondition, "Failed to encrypt the secret")
}
return &webservice.GenerateApplicationSealedSecretResponse{
Data: encryptedText,
}, nil
}
// validateAppBelongsToProject checks if the given application belongs to the given project.
// It gives back error unless the application belongs to the project.
func (a *WebAPI) validateAppBelongsToProject(ctx context.Context, appID, projectID string) error {
pid, err := a.appProjectCache.Get(appID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
app, err := getApplication(ctx, a.applicationStore, appID, a.logger)
if err != nil {
return err
}
a.appProjectCache.Put(appID, app.ProjectId)
if app.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Statuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Status",
Operator: "==",
Value: o.Statuses[0],
})
}
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: "==",
Value: o.Kinds[0],
})
}
if len(o.ApplicationIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "ApplicationId",
Operator: "==",
Value: o.ApplicationIds[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: "==",
Value: o.EnvIds[0],
})
}
if o.MaxUpdatedAt != 0 {
filters = append(filters, datastore.ListFilter{
Field: "UpdatedAt",
Operator: "<=",
Value: o.MaxUpdatedAt,
})
}
}
deployments, err := a.deploymentStore.ListDeployments(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
PageSize: int(req.PageSize),
})
if err != nil {
a.logger.Error("failed to get deployments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get deployments")
}
return &webservice.ListDeploymentsResponse{
Deployments: deployments,
}, nil
}
func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
return &webservice.GetDeploymentResponse{
Deployment: deployment,
}, nil
}
// validateDeploymentBelongsToProject checks if the given deployment belongs to the given project.
// It gives back error unless the deployment belongs to the project.
func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error {
pid, err := a.deploymentProjectCache.Get(deploymentID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
deployment, err := getDeployment(ctx, a.deploymentStore, deploymentID, a.logger)
if err != nil {
return err
}
a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId)
if deployment.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex)
if errors.Is(err, stagelogstore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The stage log not found")
}
if err != nil {
a.logger.Error("failed to get stage logs", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get stage logs")
}
return &webservice.GetStageLogResponse{
Blocks: blocks,
Completed: completed,
}, nil
}
func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
if model.IsCompletedDeployment(deployment.Status) {
return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
DeploymentId: req.DeploymentId,
Type: model.Command_CANCEL_DEPLOYMENT,
Commander: claims.Subject,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: req.DeploymentId,
ForceRollback: req.ForceRollback,
ForceNoRollback: req.ForceNoRollback,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.CancelDeploymentResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
stage, ok := deployment.StageStatusMap()[req.StageId]
if !ok {
return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment")
}
if model.IsCompletedStage(stage) {
return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed")
}
commandID := uuid.New().String()
cmd := model.Command{
Id: commandID,
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
DeploymentId: req.DeploymentId,
StageId: req.StageId,
Type: model.Command_APPROVE_STAGE,
Commander: claims.Subject,
ApproveStage: &model.Command_ApproveStage{
DeploymentId: req.DeploymentId,
StageId: req.StageId,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.ApproveStageResponse{
CommandId: commandID,
}, nil
}
func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId)
if err != nil {
a.logger.Error("failed to get application live state", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get application live state")
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
// GetProject gets the specified porject without sensitive data.
func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
project, err := a.getProject(ctx, claims.Role.ProjectId)
if err != nil {
return nil, err
}
// Redact all sensitive data inside project message before sending to the client.
project.RedactSensitiveData()
return &webservice.GetProjectResponse{
Project: project,
}, nil
}
func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) {
if p, ok := a.projectsInConfig[projectID]; ok {
return &model.Project{
Id: p.Id,
Desc: p.Desc,
StaticAdmin: &model.ProjectStaticUser{
Username: p.StaticAdmin.Username,
PasswordHash: p.StaticAdmin.PasswordHash,
},
}, nil
}
project, err := a.projectStore.GetProject(ctx, projectID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The project is not found")
}
if err != nil {
a.logger.Error("failed to get project", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get project")
}
return project, nil
}
// UpdateProjectStaticAdmin updates the static admin user settings.
func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil {
a.logger.Error("failed to update static admin", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update static admin")
}
return &webservice.UpdateProjectStaticAdminResponse{}, nil
}
// EnableStaticAdmin enables static admin login.
func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to enable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to enable static admin login")
}
return &webservice.EnableStaticAdminResponse{}, nil
}
// DisableStaticAdmin disables static admin login.
func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to disenable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to disenable static admin login")
}
return &webservice.DisableStaticAdminResponse{}, nil
}
// UpdateProjectSSOConfig updates the sso settings.
func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := req.Sso.Encrypt(a.encrypter); err != nil {
a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations")
}
if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectSSOConfigResponse{}, nil
}
// UpdateProjectRBACConfig updates the sso settings.
func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectRBACConfigResponse{}, nil
}
// GetMe gets information about the current user.
func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
return &webservice.GetMeResponse{
Subject: claims.Subject,
AvatarUrl: claims.AvatarURL,
ProjectId: claims.Role.ProjectId,
ProjectRole: claims.Role.ProjectRole,
}, nil
}
func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
cmd, err := getCommand(ctx, a.commandStore, req.CommandId, a.logger)
if err != nil {
return nil, err
}
// TODO: Add check if requested command belongs to logged-in project, after adding project id field to model.Command.
return &webservice.GetCommandResponse{
Command: cmd,
}, nil
}
func (a *WebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
var templates []*webservice.DeploymentConfigTemplate
switch app.Kind {
case model.ApplicationKind_KUBERNETES:
templates = k8sDeploymentConfigTemplates
case model.ApplicationKind_TERRAFORM:
templates = terraformDeploymentConfigTemplates
case model.ApplicationKind_CROSSPLANE:
templates = crossplaneDeploymentConfigTemplates
case model.ApplicationKind_LAMBDA:
templates = lambdaDeploymentConfigTemplates
case model.ApplicationKind_CLOUDRUN:
templates = cloudrunDeploymentConfigTemplates
default:
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Unknown application kind %v", app.Kind))
}
for _, t := range templates {
g := app.GetGitPath()
filename := g.ConfigFilename
if filename == "" {
filename = ".pipe.yaml"
}
t.FileCreationUrl, err = git.MakeFileCreationURL(g.Repo.Remote, g.Path, g.Repo.Branch, filename, t.Content)
if err != nil {
a.logger.Error("failed to make a link to creat a file", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to make a link to creat a file")
}
}
if len(req.Labels) == 0 {
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: templates}, nil
}
filtered := filterDeploymentConfigTemplates(templates, req.Labels)
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: filtered}, nil
}
// Returns the one from the given templates with all the specified labels.
func filterDeploymentConfigTemplates(templates []*webservice.DeploymentConfigTemplate, labels []webservice.DeploymentConfigTemplateLabel) []*webservice.DeploymentConfigTemplate {
filtered := make([]*webservice.DeploymentConfigTemplate, 0, len(templates))
L:
for _, template := range templates {
for _, l := range labels {
if !template.HasLabel(l) {
continue L
}
}
filtered = append(filtered, template)
}
return filtered
}
func (a *WebAPI) GenerateAPIKey(ctx context.Context, req *webservice.GenerateAPIKeyRequest) (*webservice.GenerateAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
id := uuid.New().String()
key, hash, err := model.GenerateAPIKey(id)
if err != nil {
a.logger.Error("failed to generate API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate API key")
}
apiKey := model.APIKey{
Id: id,
Name: req.Name,
KeyHash: hash,
ProjectId: claims.Role.ProjectId,
Role: req.Role,
Creator: claims.Subject,
}
err = a.apiKeyStore.AddAPIKey(ctx, &apiKey)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The API key already exists")
}
if err != nil {
a.logger.Error("failed to create API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create API key")
}
return &webservice.GenerateAPIKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) DisableAPIKey(ctx context.Context, req *webservice.DisableAPIKeyRequest) (*webservice.DisableAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.apiKeyStore.DisableAPIKey(ctx, req.Id, claims.Role.ProjectId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "The API key is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to disable the API key",
zap.String("apikey-id", req.Id),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to disable the API key")
}
}
return &webservice.DisableAPIKeyResponse{}, nil
}
func (a *WebAPI) ListAPIKeys(ctx context.Context, req *webservice.ListAPIKeysRequest) (*webservice.ListAPIKeysResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !req.Options.Enabled.GetValue(),
})
}
}
apiKeys, err := a.apiKeyStore.ListAPIKeys(ctx, opts)
if err != nil {
a.logger.Error("failed to list API keys", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to list API keys")
}
// Redact all sensitive data inside API key before sending to the client.
for i := range apiKeys {
apiKeys[i].RedactSensitiveData()
}
return &webservice.ListAPIKeysResponse{
Keys: apiKeys,
}, nil
}
// GetInsightData returns the accumulated insight data.
func (a *WebAPI) GetInsightData(ctx context.Context, req *webservice.GetInsightDataRequest) (*webservice.GetInsightDataResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
count := int(req.DataPointCount)
from := time.Unix(req.RangeFrom, 0)
chunks, err := insight.LoadChunksFromCache(a.insightCache, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from cache", zap.Error(err))
chunks, err = a.insightstore.LoadChunks(ctx, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from insightstore", zap.Error(err))
return nil, err
}
if err := insight.PutChunksToCache(a.insightCache, chunks); err != nil {
a.logger.Error("failed to put chunks to cache", zap.Error(err))
}
}
idp, err := chunks.ExtractDataPoints(req.Step, from, count)
if err != nil {
a.logger.Error("failed to extract data points from chunks", zap.Error(err))
}
var updateAt int64
for _, c := range chunks {
accumulatedTo := c.GetAccumulatedTo()
if accumulatedTo > updateAt {
updateAt = accumulatedTo
}
}
return &webservice.GetInsightDataResponse{
UpdatedAt: updateAt,
DataPoints: idp,
}, nil
}
| 1 | 14,065 | nit: Remove "insightfilestore". I think "insightstore" is good enough and as I see we are using that name at other places too. | pipe-cd-pipe | go |
@@ -5,8 +5,8 @@ module RSpec::Core::Formatters
BisectProgressFormatter = Class.new(remove_const :BisectProgressFormatter) do
RSpec::Core::Formatters.register self
- def bisect_round_finished(notification)
- return super unless notification.round == 3
+ def bisect_round_started(notification)
+ return super unless @round_count == 3
Process.kill("INT", Process.pid)
# Process.kill is not a synchronous call, so to ensure the output | 1 | require 'rspec/core'
RSpec::Support.require_rspec_core "formatters/bisect_progress_formatter"
module RSpec::Core::Formatters
BisectProgressFormatter = Class.new(remove_const :BisectProgressFormatter) do
RSpec::Core::Formatters.register self
def bisect_round_finished(notification)
return super unless notification.round == 3
Process.kill("INT", Process.pid)
# Process.kill is not a synchronous call, so to ensure the output
# below aborts at a deterministic place, we need to block here.
# The sleep will be interrupted by the signal once the OS sends it.
# For the most part, this is only needed on JRuby, but we saw
# the asynchronous behavior on an MRI 2.0 travis build as well.
sleep 5
end
end
end
| 1 | 15,218 | It looks like you've removed `bisect_round_finished` but kept `bisect_round_started`. Seems a little odd that they aren't paired. Not sure if there's anything to do about that though... | rspec-rspec-core | rb |
@@ -29,6 +29,8 @@ from qutebrowser.mainwindow.statusbar.percentage import Percentage
def percentage(qtbot):
"""Fixture providing a Percentage widget."""
widget = Percentage()
+ # Force immedate update of percentage widget
+ widget.set_perc.throttle_set(-1)
qtbot.add_widget(widget)
return widget
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test Percentage widget."""
import pytest
from qutebrowser.mainwindow.statusbar.percentage import Percentage
@pytest.fixture
def percentage(qtbot):
"""Fixture providing a Percentage widget."""
widget = Percentage()
qtbot.add_widget(widget)
return widget
@pytest.mark.parametrize('y, raw, expected', [
(0, False, '[top]'),
(100, False, '[bot]'),
(75, False, '[75%]'),
(25, False, '[25%]'),
(5, False, '[05%]'),
(None, False, '[???]'),
(0, True, '[top]'),
(100, True, '[bot]'),
(75, True, '[75]'),
(25, True, '[25]'),
(5, True, '[05]'),
(None, True, '[???]'),
])
def test_percentage_text(percentage, y, raw, expected):
"""Test text displayed by the widget based on the y position of a page.
Args:
y: y position of the page as an int in the range [0, 100].
parametrized.
expected: expected text given y position. parametrized.
"""
percentage.raw = raw
percentage.set_perc(x=None, y=y)
assert percentage.text() == expected
def test_tab_change(percentage, fake_web_tab):
"""Make sure the percentage gets changed correctly when switching tabs."""
percentage.set_perc(x=None, y=10)
tab = fake_web_tab(scroll_pos_perc=(0, 20))
percentage.on_tab_changed(tab)
assert percentage.text() == '[20%]'
| 1 | 22,923 | I'm not sure how to fix the lint and mypy warnings on this line. I could just ignore them, but I feel like the linter should know this exists. | qutebrowser-qutebrowser | py |
@@ -1,7 +1,7 @@
'use strict';
const Aspect = {
- SKIP_SESSION: Symbol('SKIP_SESSION')
+ SKIP_SESSION: 'SKIP_SESSION'
};
/** | 1 | 'use strict';
const Aspect = {
SKIP_SESSION: Symbol('SKIP_SESSION')
};
/**
* This class acts as a parent class for any operation and is responsible for setting this.options,
* as well as setting and getting a session.
* Additionally, this class implements `hasAspect`, which determines whether an operation has
* a specific aspect, including `SKIP_SESSION` and other aspects to encode retryability
* and other functionality.
*/
class OperationBase {
constructor(options) {
this.options = options || {};
}
hasAspect(aspect) {
if (this.constructor.aspects == null) {
return false;
}
return this.constructor.aspects.has(aspect);
}
set session(session) {
Object.assign(this.options, { session });
}
get session() {
return this.options.session;
}
clearSession() {
delete this.options.session;
}
execute() {
throw new TypeError('`execute` must be implemented for OperationBase subclasses');
}
}
function defineAspects(operation, aspects) {
aspects = new Set(aspects);
Object.defineProperty(operation, 'aspects', {
value: aspects,
writable: false
});
return aspects;
}
module.exports = {
Aspect,
defineAspects,
OperationBase
};
| 1 | 15,369 | why remove `Symbol`? | mongodb-node-mongodb-native | js |
@@ -121,7 +121,7 @@ function getPunctuationRegExp() {
* Reference: https://en.wikipedia.org/wiki/Supplemental_Punctuation
* -> \u2E00-\u2E7F Reference
*/
- return /[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,\-.\/:;<=>?@\[\]^_`{|}~]/g;
+ return /[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&£¢¥§€()*+,\-.\/:;<=>?@\[\]^_`{|}~±]/g;
}
/** | 1 | /* global text */
/**
* Determine if a given string contains unicode characters, specified in options
*
* @method hasUnicode
* @memberof axe.commons.text
* @instance
* @param {String} str string to verify
* @param {Object} options config containing which unicode character sets to verify
* @property {Boolean} options.emoji verify emoji unicode
* @property {Boolean} options.nonBmp verify nonBmp unicode
* @property {Boolean} options.punctuations verify punctuations unicode
* @returns {Boolean}
*/
text.hasUnicode = function hasUnicode(str, options) {
const { emoji, nonBmp, punctuations } = options;
if (emoji) {
return axe.imports.emojiRegexText().test(str);
}
if (nonBmp) {
return (
getUnicodeNonBmpRegExp().test(str) ||
getSupplementaryPrivateUseRegExp().test(str)
);
}
if (punctuations) {
return getPunctuationRegExp().test(str);
}
return false;
};
/**
* Remove specified type(s) unicode characters
*
* @method removeUnicode
* @memberof axe.commons.text
* @instance
* @param {String} str string to operate on
* @param {Object} options config containing which unicode character sets to remove
* @property {Boolean} options.emoji remove emoji unicode
* @property {Boolean} options.nonBmp remove nonBmp unicode
* @property {Boolean} options.punctuations remove punctuations unicode
* @returns {String}
*/
text.removeUnicode = function removeUnicode(str, options) {
const { emoji, nonBmp, punctuations } = options;
if (emoji) {
str = str.replace(axe.imports.emojiRegexText(), '');
}
if (nonBmp) {
str = str.replace(getUnicodeNonBmpRegExp(), '');
str = str.replace(getSupplementaryPrivateUseRegExp(), '');
}
if (punctuations) {
str = str.replace(getPunctuationRegExp(), '');
}
return str;
};
/**
* Regex for matching unicode values out of Basic Multilingual Plane (BMP)
* Reference:
* - https://github.com/mathiasbynens/regenerate
* - https://unicode-table.com/
* - https://mathiasbynens.be/notes/javascript-unicode
*
* @returns {RegExp}
*/
function getUnicodeNonBmpRegExp() {
/**
* Regex for matching astral plane unicode
* - http://kourge.net/projects/regexp-unicode-block
*/
/**
* Notes on various unicode planes being used in the regex below:
* '\u1D00-\u1D7F' Phonetic Extensions
* '\u1D80-\u1DBF' Phonetic Extensions Supplement
* '\u1DC0-\u1DFF' Combining Diacritical Marks Supplement
* '\u20A0-\u20CF' Currency symbols
* '\u20D0-\u20FF' Combining Diacritical Marks for Symbols
* '\u2100-\u214F' Letter like symbols
* '\u2150-\u218F' Number forms (eg: Roman numbers)
* '\u2190-\u21FF' Arrows
* '\u2200-\u22FF' Mathematical operators
* '\u2300-\u23FF' Misc Technical
* '\u2400-\u243F' Control pictures
* '\u2440-\u245F' OCR
* '\u2460-\u24FF' Enclosed alpha numerics
* '\u2500-\u257F' Box Drawing
* '\u2580-\u259F' Block Elements
* '\u25A0-\u25FF' Geometric Shapes
* '\u2600-\u26FF' Misc Symbols
* '\u2700-\u27BF' Dingbats
* '\uE000-\uF8FF' Private Use
*
* Note: plane '\u2000-\u206F' used for General punctuation is excluded as it is handled in -> getPunctuationRegExp
*/
return /[\u1D00-\u1D7F\u1D80-\u1DBF\u1DC0-\u1DFF\u20A0-\u20CF\u20D0-\u20FF\u2100-\u214F\u2150-\u218F\u2190-\u21FF\u2200-\u22FF\u2300-\u23FF\u2400-\u243F\u2440-\u245F\u2460-\u24FF\u2500-\u257F\u2580-\u259F\u25A0-\u25FF\u2600-\u26FF\u2700-\u27BF\uE000-\uF8FF]/g;
}
/**
* Get regular expression for matching punctuations
*
* @returns {RegExp}
*/
function getPunctuationRegExp() {
/**
* Reference: http://kunststube.net/encoding/
* US-ASCII
* -> !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~
*
* General Punctuation block
* -> \u2000-\u206F
*
* Supplemental Punctuation block
* Reference: https://en.wikipedia.org/wiki/Supplemental_Punctuation
* -> \u2E00-\u2E7F Reference
*/
return /[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,\-.\/:;<=>?@\[\]^_`{|}~]/g;
}
/**
* Get regular expression for supplementary private use
*
* @returns {RegExp}
*/
function getSupplementaryPrivateUseRegExp() {
// 1. High surrogate area (https://www.unicode.org/charts/PDF/UD800.pdf)
// 2. Low surrogate area (https://www.unicode.org/charts/PDF/UDC00.pdf)
// 3. Supplementary private use area A (https://www.unicode.org/charts/PDF/UF0000.pdf)
//
// 1 2 3
// ┏━━━━━━┻━━━━━━┓┏━━━━━━┻━━━━━━┓ ┏━━━━━━━━━━━━━━━━━━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
return /[\uDB80-\uDBBF][\uDC00-\uDFFD]|(?:[\uDB80-\uDBBE][\uDC00-\uDFFF]|\uDBBF[\uDC00-\uDFFD])/g;
}
| 1 | 15,408 | Why are currency signs considered punctuation? They technically fall under the BMP range for Currency Symbols `\u20A0-\u20CF` (covered in `getUnicodeNonBmpRegExp`, which is itself a misnomer) | dequelabs-axe-core | js |
@@ -71,6 +71,7 @@ setup(
# Metadata
author='Philippe BIONDI',
author_email='phil(at)secdev.org',
+ maintainer='Pierre LALET, Guillaume VALADON',
description='Scapy: interactive packet manipulation tool',
license='GPLv2',
url='http://www.secdev.org/projects/scapy', | 1 | #! /usr/bin/env python
"""
Distutils setup file for Scapy.
"""
from distutils import archive_util
from distutils import sysconfig
from distutils.core import setup
from distutils.command.sdist import sdist
import os
EZIP_HEADER = """#! /bin/sh
PYTHONPATH=$0/%s exec python -m scapy.__init__
"""
def make_ezipfile(base_name, base_dir, verbose=0, dry_run=0, **kwargs):
fname = archive_util.make_zipfile(base_name, base_dir, verbose, dry_run)
ofname = fname + ".old"
os.rename(fname, ofname)
of = open(ofname)
f = open(fname, "w")
f.write(EZIP_HEADER % base_dir)
while True:
data = of.read(8192)
if not data:
break
f.write(data)
f.close()
os.system("zip -A '%s'" % fname)
of.close()
os.unlink(ofname)
os.chmod(fname, 0o755)
return fname
archive_util.ARCHIVE_FORMATS["ezip"] = (
make_ezipfile, [], 'Executable ZIP file')
SCRIPTS = ['bin/scapy', 'bin/UTscapy']
# On Windows we also need additional batch files to run the above scripts
if os.name == "nt":
SCRIPTS += ['bin/scapy.bat', 'bin/UTscapy.bat']
setup(
name='scapy',
version=__import__('scapy').VERSION,
packages=[
'scapy',
'scapy/arch',
'scapy/arch/bpf',
'scapy/arch/windows',
'scapy/contrib',
'scapy/layers',
'scapy/layers/tls',
'scapy/layers/tls/crypto',
'scapy/modules',
'scapy/modules/krack',
'scapy/asn1',
'scapy/tools',
],
scripts=SCRIPTS,
data_files=[('share/man/man1', ["doc/scapy.1.gz"])],
package_data={
'scapy': ['VERSION'],
},
# Metadata
author='Philippe BIONDI',
author_email='phil(at)secdev.org',
description='Scapy: interactive packet manipulation tool',
license='GPLv2',
url='http://www.secdev.org/projects/scapy',
download_url='https://github.com/secdev/scapy/tarball/master',
keywords=["network"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Topic :: Security",
"Topic :: System :: Networking",
"Topic :: System :: Networking :: Monitoring",
]
)
| 1 | 11,189 | You have forgotten a coma `,` | secdev-scapy | py |
@@ -35,7 +35,8 @@ def single_gpu_test(model, data_loader, show=False, out_dir=None):
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
- out_file = osp.join(out_dir, img_meta['filename'])
+ out_file = osp.join(out_dir,
+ osp.basename(img_meta['filename']))
else:
out_file = None
| 1 | import os.path as osp
import pickle
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmdet.core import tensor2imgs
def single_gpu_test(model, data_loader, show=False, out_dir=None):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['filename'])
else:
out_file = None
model.module.show_result(
img_show, result, show=show, out_file=out_file)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = (
len(data['img_meta']._data)
if 'img_meta' in data else data['img'][0].size(0))
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 1 | 19,406 | For different datasets, we may want to save the images differently. Sometimes the datasets are categorized into several subfolders, and we want to keep it when saving visualization results, and sometimes we just want to save all images in `out_dir`. An argument may be added to control the behavior. | open-mmlab-mmdetection | py |
@@ -788,7 +788,12 @@ public class FlexBuffers {
if (io == other.length) {
// in our buffer we have an additional \0 byte
// but this does not exist in regular Java strings, so we return now
- return c1 - c2;
+ int cmp = c1 - c2;
+ if (cmp != 0 || bb.get(ia) == '\0') {
+ return cmp;
+ } else {
+ return 1;
+ }
}
}
while (c1 == c2); | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.flatbuffers;
import static com.google.flatbuffers.FlexBuffers.Unsigned.byteToUnsignedInt;
import static com.google.flatbuffers.FlexBuffers.Unsigned.intToUnsignedLong;
import static com.google.flatbuffers.FlexBuffers.Unsigned.shortToUnsignedInt;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
/// @file
/// @addtogroup flatbuffers_java_api
/// @{
/**
* This class can be used to parse FlexBuffer messages.
* <p>
* For generating FlexBuffer messages, use {@link FlexBuffersBuilder}.
* <p>
* Example of usage:
* <pre>
* ReadBuf bb = ... // load message from file or network
* FlexBuffers.Reference r = FlexBuffers.getRoot(bb); // Reads the root element
* FlexBuffers.Map map = r.asMap(); // We assumed root object is a map
* System.out.println(map.get("name").asString()); // prints element with key "name"
* </pre>
*/
public class FlexBuffers {
// These are used as the upper 6 bits of a type field to indicate the actual
// type.
/** Represent a null type */
public static final int FBT_NULL = 0;
/** Represent a signed integer type */
public static final int FBT_INT = 1;
/** Represent a unsigned type */
public static final int FBT_UINT = 2;
/** Represent a float type */
public static final int FBT_FLOAT = 3; // Types above stored inline, types below store an offset.
/** Represent a key to a map type */
public static final int FBT_KEY = 4;
/** Represent a string type */
public static final int FBT_STRING = 5;
/** Represent a indirect signed integer type */
public static final int FBT_INDIRECT_INT = 6;
/** Represent a indirect unsigned integer type */
public static final int FBT_INDIRECT_UINT = 7;
/** Represent a indirect float type */
public static final int FBT_INDIRECT_FLOAT = 8;
/** Represent a map type */
public static final int FBT_MAP = 9;
/** Represent a vector type */
public static final int FBT_VECTOR = 10; // Untyped.
/** Represent a vector of signed integers type */
public static final int FBT_VECTOR_INT = 11; // Typed any size = stores no type table).
/** Represent a vector of unsigned integers type */
public static final int FBT_VECTOR_UINT = 12;
/** Represent a vector of floats type */
public static final int FBT_VECTOR_FLOAT = 13;
/** Represent a vector of keys type */
public static final int FBT_VECTOR_KEY = 14;
/** Represent a vector of strings type */
// DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
// more info on thttps://github.com/google/flatbuffers/issues/5627.
public static final int FBT_VECTOR_STRING_DEPRECATED = 15;
/// @cond FLATBUFFERS_INTERNAL
public static final int FBT_VECTOR_INT2 = 16; // Typed tuple = no type table; no size field).
public static final int FBT_VECTOR_UINT2 = 17;
public static final int FBT_VECTOR_FLOAT2 = 18;
public static final int FBT_VECTOR_INT3 = 19; // Typed triple = no type table; no size field).
public static final int FBT_VECTOR_UINT3 = 20;
public static final int FBT_VECTOR_FLOAT3 = 21;
public static final int FBT_VECTOR_INT4 = 22; // Typed quad = no type table; no size field).
public static final int FBT_VECTOR_UINT4 = 23;
public static final int FBT_VECTOR_FLOAT4 = 24;
/// @endcond FLATBUFFERS_INTERNAL
/** Represent a blob type */
public static final int FBT_BLOB = 25;
/** Represent a boolean type */
public static final int FBT_BOOL = 26;
/** Represent a vector of booleans type */
public static final int FBT_VECTOR_BOOL = 36; // To Allow the same type of conversion of type to vector type
private static final ReadBuf EMPTY_BB = new ArrayReadWriteBuf(new byte[] {0}, 1);
/**
* Checks where a type is a typed vector
*
* @param type type to be checked
* @return true if typed vector
*/
static boolean isTypedVector(int type) {
return (type >= FBT_VECTOR_INT && type <= FBT_VECTOR_STRING_DEPRECATED) || type == FBT_VECTOR_BOOL;
}
/**
* Check whether you can access type directly (no indirection) or not.
*
* @param type type to be checked
* @return true if inline type
*/
static boolean isTypeInline(int type) {
return type <= FBT_FLOAT || type == FBT_BOOL;
}
static int toTypedVectorElementType(int original_type) {
return original_type - FBT_VECTOR_INT + FBT_INT;
}
/**
* Return a vector type our of a original element type
*
* @param type element type
* @param fixedLength size of element
* @return typed vector type
*/
static int toTypedVector(int type, int fixedLength) {
assert (isTypedVectorElementType(type));
switch (fixedLength) {
case 0: return type - FBT_INT + FBT_VECTOR_INT;
case 2: return type - FBT_INT + FBT_VECTOR_INT2;
case 3: return type - FBT_INT + FBT_VECTOR_INT3;
case 4: return type - FBT_INT + FBT_VECTOR_INT4;
default:
assert (false);
return FBT_NULL;
}
}
static boolean isTypedVectorElementType(int type) {
return (type >= FBT_INT && type <= FBT_KEY) || type == FBT_BOOL;
}
// return position of the element that the offset is pointing to
private static int indirect(ReadBuf bb, int offset, int byteWidth) {
// we assume all offset fits on a int, since ReadBuf operates with that assumption
return (int) (offset - readUInt(bb, offset, byteWidth));
}
// read unsigned int with size byteWidth and return as a 64-bit integer
private static long readUInt(ReadBuf buff, int end, int byteWidth) {
switch (byteWidth) {
case 1: return byteToUnsignedInt(buff.get(end));
case 2: return shortToUnsignedInt(buff.getShort(end));
case 4: return intToUnsignedLong(buff.getInt(end));
case 8: return buff.getLong(end); // We are passing signed long here. Losing information (user should know)
default: return -1; // we should never reach here
}
}
// read signed int of size byteWidth and return as 32-bit int
private static int readInt(ReadBuf buff, int end, int byteWidth) {
return (int) readLong(buff, end, byteWidth);
}
// read signed int of size byteWidth and return as 64-bit int
private static long readLong(ReadBuf buff, int end, int byteWidth) {
switch (byteWidth) {
case 1: return buff.get(end);
case 2: return buff.getShort(end);
case 4: return buff.getInt(end);
case 8: return buff.getLong(end);
default: return -1; // we should never reach here
}
}
private static double readDouble(ReadBuf buff, int end, int byteWidth) {
switch (byteWidth) {
case 4: return buff.getFloat(end);
case 8: return buff.getDouble(end);
default: return -1; // we should never reach here
}
}
/**
* Reads a FlexBuffer message in ReadBuf and returns {@link Reference} to
* the root element.
* @param buffer ReadBuf containing FlexBuffer message
* @return {@link Reference} to the root object
*/
@Deprecated
public static Reference getRoot(ByteBuffer buffer) {
return getRoot( buffer.hasArray() ? new ArrayReadWriteBuf(buffer.array(), buffer.limit()) : new ByteBufferReadWriteBuf(buffer));
}
/**
* Reads a FlexBuffer message in ReadBuf and returns {@link Reference} to
* the root element.
* @param buffer ReadBuf containing FlexBuffer message
* @return {@link Reference} to the root object
*/
public static Reference getRoot(ReadBuf buffer) {
// See Finish() below for the serialization counterpart of this.
// The root ends at the end of the buffer, so we parse backwards from there.
int end = buffer.limit();
int byteWidth = buffer.get(--end);
int packetType = byteToUnsignedInt(buffer.get(--end));
end -= byteWidth; // The root data item.
return new Reference(buffer, end, byteWidth, packetType);
}
/**
* Represents an generic element in the buffer.
*/
public static class Reference {
private static final Reference NULL_REFERENCE = new Reference(EMPTY_BB, 0, 1, 0);
private ReadBuf bb;
private int end;
private int parentWidth;
private int byteWidth;
private int type;
Reference(ReadBuf bb, int end, int parentWidth, int packedType) {
this(bb, end, parentWidth, (1 << (packedType & 3)), packedType >> 2);
}
Reference(ReadBuf bb, int end, int parentWidth, int byteWidth, int type) {
this.bb = bb;
this.end = end;
this.parentWidth = parentWidth;
this.byteWidth = byteWidth;
this.type = type;
}
/**
* Return element type
* @return element type as integer
*/
public int getType() {
return type;
}
/**
* Checks whether the element is null type
* @return true if null type
*/
public boolean isNull() {
return type == FBT_NULL;
}
/**
* Checks whether the element is boolean type
* @return true if boolean type
*/
public boolean isBoolean() {
return type == FBT_BOOL;
}
/**
* Checks whether the element type is numeric (signed/unsigned integers and floats)
* @return true if numeric type
*/
public boolean isNumeric() {
return isIntOrUInt() || isFloat();
}
/**
* Checks whether the element type is signed or unsigned integers
* @return true if an integer type
*/
public boolean isIntOrUInt() {
return isInt() || isUInt();
}
/**
* Checks whether the element type is float
* @return true if a float type
*/
public boolean isFloat() {
return type == FBT_FLOAT || type == FBT_INDIRECT_FLOAT;
}
/**
* Checks whether the element type is signed integer
* @return true if a signed integer type
*/
public boolean isInt() {
return type == FBT_INT || type == FBT_INDIRECT_INT;
}
/**
* Checks whether the element type is signed integer
* @return true if a signed integer type
*/
public boolean isUInt() {
return type == FBT_UINT || type == FBT_INDIRECT_UINT;
}
/**
* Checks whether the element type is string
* @return true if a string type
*/
public boolean isString() {
return type == FBT_STRING;
}
/**
* Checks whether the element type is key
* @return true if a key type
*/
public boolean isKey() {
return type == FBT_KEY;
}
/**
* Checks whether the element type is vector
* @return true if a vector type
*/
public boolean isVector() {
return type == FBT_VECTOR || type == FBT_MAP;
}
/**
* Checks whether the element type is typed vector
* @return true if a typed vector type
*/
public boolean isTypedVector() {
return FlexBuffers.isTypedVector(type);
}
/**
* Checks whether the element type is a map
* @return true if a map type
*/
public boolean isMap() {
return type == FBT_MAP;
}
/**
* Checks whether the element type is a blob
* @return true if a blob type
*/
public boolean isBlob() {
return type == FBT_BLOB;
}
/**
* Returns element as 32-bit integer.
* <p> For vector element, it will return size of the vector</p>
* <p> For String element, it will type to be parsed as integer</p>
* <p> Unsigned elements will become negative</p>
* <p> Float elements will be casted to integer </p>
* @return 32-bit integer or 0 if fail to convert element to integer.
*/
public int asInt() {
if (type == FBT_INT) {
// A fast path for the common case.
return readInt(bb, end, parentWidth);
} else
switch (type) {
case FBT_INDIRECT_INT: return readInt(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_UINT: return (int) readUInt(bb, end, parentWidth);
case FBT_INDIRECT_UINT: return (int) readUInt(bb, indirect(bb, end, parentWidth), parentWidth);
case FBT_FLOAT: return (int) readDouble(bb, end, parentWidth);
case FBT_INDIRECT_FLOAT: return (int) readDouble(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_NULL: return 0;
case FBT_STRING: return Integer.parseInt(asString());
case FBT_VECTOR: return asVector().size();
case FBT_BOOL: return readInt(bb, end, parentWidth);
default:
// Convert other things to int.
return 0;
}
}
/**
* Returns element as unsigned 64-bit integer.
* <p> For vector element, it will return size of the vector</p>
* <p> For String element, it will type to be parsed as integer</p>
* <p> Negative signed elements will become unsigned counterpart</p>
* <p> Float elements will be casted to integer </p>
* @return 64-bit integer or 0 if fail to convert element to integer.
*/
public long asUInt() {
if (type == FBT_UINT) {
// A fast path for the common case.
return readUInt(bb, end, parentWidth);
} else
switch (type) {
case FBT_INDIRECT_UINT: return readUInt(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_INT: return readLong(bb, end, parentWidth);
case FBT_INDIRECT_INT: return readLong(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_FLOAT: return (long) readDouble(bb, end, parentWidth);
case FBT_INDIRECT_FLOAT: return (long) readDouble(bb, indirect(bb, end, parentWidth), parentWidth);
case FBT_NULL: return 0;
case FBT_STRING: return Long.parseLong(asString());
case FBT_VECTOR: return asVector().size();
case FBT_BOOL: return readInt(bb, end, parentWidth);
default:
// Convert other things to uint.
return 0;
}
}
/**
* Returns element as 64-bit integer.
* <p> For vector element, it will return size of the vector</p>
* <p> For String element, it will type to be parsed as integer</p>
* <p> Unsigned elements will become negative</p>
* <p> Float elements will be casted to integer </p>
* @return 64-bit integer or 0 if fail to convert element to long.
*/
public long asLong() {
if (type == FBT_INT) {
// A fast path for the common case.
return readLong(bb, end, parentWidth);
} else
switch (type) {
case FBT_INDIRECT_INT: return readLong(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_UINT: return readUInt(bb, end, parentWidth);
case FBT_INDIRECT_UINT: return readUInt(bb, indirect(bb, end, parentWidth), parentWidth);
case FBT_FLOAT: return (long) readDouble(bb, end, parentWidth);
case FBT_INDIRECT_FLOAT: return (long) readDouble(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_NULL: return 0;
case FBT_STRING: {
try {
return Long.parseLong(asString());
} catch (NumberFormatException nfe) {
return 0; //same as C++ implementation
}
}
case FBT_VECTOR: return asVector().size();
case FBT_BOOL: return readInt(bb, end, parentWidth);
default:
// Convert other things to int.
return 0;
}
}
/**
* Returns element as 64-bit integer.
* <p> For vector element, it will return size of the vector</p>
* <p> For String element, it will type to be parsed as integer</p>
* @return 64-bit integer or 0 if fail to convert element to long.
*/
public double asFloat() {
if (type == FBT_FLOAT) {
// A fast path for the common case.
return readDouble(bb, end, parentWidth);
} else
switch (type) {
case FBT_INDIRECT_FLOAT: return readDouble(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_INT: return readInt(bb, end, parentWidth);
case FBT_UINT:
case FBT_BOOL:
return readUInt(bb, end, parentWidth);
case FBT_INDIRECT_INT: return readInt(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_INDIRECT_UINT: return readUInt(bb, indirect(bb, end, parentWidth), byteWidth);
case FBT_NULL: return 0.0;
case FBT_STRING: return Double.parseDouble(asString());
case FBT_VECTOR: return asVector().size();
default:
// Convert strings and other things to float.
return 0;
}
}
/**
* Returns element as a {@link Key}
* @return key or {@link Key#empty()} if element is not a key
*/
public Key asKey() {
if (isKey()) {
return new Key(bb, indirect(bb, end, parentWidth), byteWidth);
} else {
return Key.empty();
}
}
/**
* Returns element as a `String`
* @return element as `String` or empty `String` if fail
*/
public String asString() {
if (isString()) {
int start = indirect(bb, end, parentWidth);
int size = (int) readUInt(bb, start - byteWidth, byteWidth);
return bb.getString(start, size);
}
else if (isKey()){
int start = indirect(bb, end, byteWidth);
for (int i = start; ; i++) {
if (bb.get(i) == 0) {
return bb.getString(start, i - start);
}
}
} else {
return "";
}
}
/**
* Returns element as a {@link Map}
* @return element as {@link Map} or empty {@link Map} if fail
*/
public Map asMap() {
if (isMap()) {
return new Map(bb, indirect(bb, end, parentWidth), byteWidth);
} else {
return Map.empty();
}
}
/**
* Returns element as a {@link Vector}
* @return element as {@link Vector} or empty {@link Vector} if fail
*/
public Vector asVector() {
if (isVector()) {
return new Vector(bb, indirect(bb, end, parentWidth), byteWidth);
} else if(type == FlexBuffers.FBT_VECTOR_STRING_DEPRECATED) {
// deprecated. Should be treated as key vector
return new TypedVector(bb, indirect(bb, end, parentWidth), byteWidth, FlexBuffers.FBT_KEY);
} else if (FlexBuffers.isTypedVector(type)) {
return new TypedVector(bb, indirect(bb, end, parentWidth), byteWidth, FlexBuffers.toTypedVectorElementType(type));
} else {
return Vector.empty();
}
}
/**
* Returns element as a {@link Blob}
* @return element as {@link Blob} or empty {@link Blob} if fail
*/
public Blob asBlob() {
if (isBlob() || isString()) {
return new Blob(bb, indirect(bb, end, parentWidth), byteWidth);
} else {
return Blob.empty();
}
}
/**
* Returns element as a boolean
* <p>If element type is not boolean, it will be casted to integer and compared against 0</p>
* @return element as boolean
*/
public boolean asBoolean() {
if (isBoolean()) {
return bb.get(end) != 0;
}
return asUInt() != 0;
}
/**
* Returns text representation of the element (JSON)
* @return String containing text representation of the element
*/
@Override
public String toString() {
return toString(new StringBuilder(128)).toString();
}
/**
* Appends a text(JSON) representation to a `StringBuilder`
*/
StringBuilder toString(StringBuilder sb) {
//TODO: Original C++ implementation escape strings.
// probably we should do it as well.
switch (type) {
case FBT_NULL:
return sb.append("null");
case FBT_INT:
case FBT_INDIRECT_INT:
return sb.append(asLong());
case FBT_UINT:
case FBT_INDIRECT_UINT:
return sb.append(asUInt());
case FBT_INDIRECT_FLOAT:
case FBT_FLOAT:
return sb.append(asFloat());
case FBT_KEY:
return asKey().toString(sb.append('"')).append('"');
case FBT_STRING:
return sb.append('"').append(asString()).append('"');
case FBT_MAP:
return asMap().toString(sb);
case FBT_VECTOR:
return asVector().toString(sb);
case FBT_BLOB:
return asBlob().toString(sb);
case FBT_BOOL:
return sb.append(asBoolean());
case FBT_VECTOR_INT:
case FBT_VECTOR_UINT:
case FBT_VECTOR_FLOAT:
case FBT_VECTOR_KEY:
case FBT_VECTOR_STRING_DEPRECATED:
case FBT_VECTOR_BOOL:
return sb.append(asVector());
case FBT_VECTOR_INT2:
case FBT_VECTOR_UINT2:
case FBT_VECTOR_FLOAT2:
case FBT_VECTOR_INT3:
case FBT_VECTOR_UINT3:
case FBT_VECTOR_FLOAT3:
case FBT_VECTOR_INT4:
case FBT_VECTOR_UINT4:
case FBT_VECTOR_FLOAT4:
throw new FlexBufferException("not_implemented:" + type);
default:
return sb;
}
}
}
/**
* Base class of all types below.
* Points into the data buffer and allows access to one type.
*/
private static abstract class Object {
ReadBuf bb;
int end;
int byteWidth;
Object(ReadBuf buff, int end, int byteWidth) {
this.bb = buff;
this.end = end;
this.byteWidth = byteWidth;
}
@Override
public String toString() {
return toString(new StringBuilder(128)).toString();
}
public abstract StringBuilder toString(StringBuilder sb);
}
// Stores size in `byte_width_` bytes before end position.
private static abstract class Sized extends Object {
protected final int size;
Sized(ReadBuf buff, int end, int byteWidth) {
super(buff, end, byteWidth);
size = (int) readUInt(bb, end - byteWidth, byteWidth);
}
public int size() {
return size;
}
}
/**
* Represents a array of bytes element in the buffer
*
* <p>It can be converted to `ReadBuf` using {@link data()},
* copied into a byte[] using {@link getBytes()} or
* have individual bytes accessed individually using {@link get(int)}</p>
*/
public static class Blob extends Sized {
static final Blob EMPTY = new Blob(EMPTY_BB, 1, 1);
Blob(ReadBuf buff, int end, int byteWidth) {
super(buff, end, byteWidth);
}
/** Return an empty {@link Blob} */
public static Blob empty() {
return EMPTY;
}
/**
* Return {@link Blob} as `ReadBuf`
* @return blob as `ReadBuf`
*/
public ByteBuffer data() {
ByteBuffer dup = ByteBuffer.wrap(bb.data());
dup.position(end);
dup.limit(end + size());
return dup.asReadOnlyBuffer().slice();
}
/**
* Copy blob into a byte[]
* @return blob as a byte[]
*/
public byte[] getBytes() {
int size = size();
byte[] result = new byte[size];
for (int i = 0; i < size; i++) {
result[i] = bb.get(end + i);
}
return result;
}
/**
* Return individual byte at a given position
* @param pos position of the byte to be read
*/
public byte get(int pos) {
assert pos >=0 && pos <= size();
return bb.get(end + pos);
}
/**
* Returns a text(JSON) representation of the {@link Blob}
*/
@Override
public String toString() {
return bb.getString(end, size());
}
/**
* Append a text(JSON) representation of the {@link Blob} into a `StringBuilder`
*/
@Override
public StringBuilder toString(StringBuilder sb) {
sb.append('"');
sb.append(bb.getString(end, size()));
return sb.append('"');
}
}
/**
* Represents a key element in the buffer. Keys are
* used to reference objects in a {@link Map}
*/
public static class Key extends Object {
private static final Key EMPTY = new Key(EMPTY_BB, 0, 0);
Key(ReadBuf buff, int end, int byteWidth) {
super(buff, end, byteWidth);
}
/**
* Return an empty {@link Key}
* @return empty {@link Key}
* */
public static Key empty() {
return Key.EMPTY;
}
/**
* Appends a text(JSON) representation to a `StringBuilder`
*/
@Override
public StringBuilder toString(StringBuilder sb) {
return sb.append(toString());
}
@Override
public String toString() {
int size;
for (int i = end; ; i++) {
if (bb.get(i) == 0) {
size = i - end;
break;
}
}
return bb.getString(end, size);
}
int compareTo(byte[] other) {
int ia = end;
int io = 0;
byte c1, c2;
do {
c1 = bb.get(ia);
c2 = other[io];
if (c1 == '\0')
return c1 - c2;
ia++;
io++;
if (io == other.length) {
// in our buffer we have an additional \0 byte
// but this does not exist in regular Java strings, so we return now
return c1 - c2;
}
}
while (c1 == c2);
return c1 - c2;
}
/**
* Compare keys
* @param obj other key to compare
* @return true if keys are the same
*/
@Override
public boolean equals(java.lang.Object obj) {
if (!(obj instanceof Key))
return false;
return ((Key) obj).end == end && ((Key) obj).byteWidth == byteWidth;
}
public int hashCode() {
return end ^ byteWidth;
}
}
/**
* Map object representing a set of key-value pairs.
*/
public static class Map extends Vector {
private static final Map EMPTY_MAP = new Map(EMPTY_BB, 1, 1);
// cache for converting UTF-8 codepoints into
// Java chars. Used to speed up String comparison
private final byte[] comparisonBuffer = new byte[4];
Map(ReadBuf bb, int end, int byteWidth) {
super(bb, end, byteWidth);
}
/**
* Returns an empty {@link Map}
* @return an empty {@link Map}
*/
public static Map empty() {
return EMPTY_MAP;
}
/**
* @param key access key to element on map
* @return reference to value in map
*/
public Reference get(String key) {
int index = binarySearch(key);
if (index >= 0 && index < size) {
return get(index);
}
return Reference.NULL_REFERENCE;
}
/**
* @param key access key to element on map. Keys are assumed to be encoded in UTF-8
* @return reference to value in map
*/
public Reference get(byte[] key) {
int index = binarySearch(key);
if (index >= 0 && index < size) {
return get(index);
}
return Reference.NULL_REFERENCE;
}
/**
* Get a vector or keys in the map
*
* @return vector of keys
*/
public KeyVector keys() {
final int num_prefixed_fields = 3;
int keysOffset = end - (byteWidth * num_prefixed_fields);
return new KeyVector(new TypedVector(bb,
indirect(bb, keysOffset, byteWidth),
readInt(bb, keysOffset + byteWidth, byteWidth),
FBT_KEY));
}
/**
* @return {@code Vector} of values from map
*/
public Vector values() {
return new Vector(bb, end, byteWidth);
}
/**
* Writes text (json) representation of map in a {@code StringBuilder}.
*
* @param builder {@code StringBuilder} to be appended to
* @return Same {@code StringBuilder} with appended text
*/
public StringBuilder toString(StringBuilder builder) {
builder.append("{ ");
KeyVector keys = keys();
int size = size();
Vector vals = values();
for (int i = 0; i < size; i++) {
builder.append('"')
.append(keys.get(i).toString())
.append("\" : ");
builder.append(vals.get(i).toString());
if (i != size - 1)
builder.append(", ");
}
builder.append(" }");
return builder;
}
// Performs a binary search on a key vector and return index of the key in key vector
private int binarySearch(CharSequence searchedKey) {
int low = 0;
int high = size - 1;
final int num_prefixed_fields = 3;
int keysOffset = end - (byteWidth * num_prefixed_fields);
int keysStart = indirect(bb, keysOffset, byteWidth);
int keyByteWidth = readInt(bb, keysOffset + byteWidth, byteWidth);
while (low <= high) {
int mid = (low + high) >>> 1;
int keyPos = indirect(bb, keysStart + mid * keyByteWidth, keyByteWidth);
int cmp = compareCharSequence(keyPos, searchedKey);
if (cmp < 0)
low = mid + 1;
else if (cmp > 0)
high = mid - 1;
else
return mid; // key found
}
return -(low + 1); // key not found
}
private int binarySearch(byte[] searchedKey) {
int low = 0;
int high = size - 1;
final int num_prefixed_fields = 3;
int keysOffset = end - (byteWidth * num_prefixed_fields);
int keysStart = indirect(bb, keysOffset, byteWidth);
int keyByteWidth = readInt(bb, keysOffset + byteWidth, byteWidth);
while (low <= high) {
int mid = (low + high) >>> 1;
int keyPos = indirect(bb, keysStart + mid * keyByteWidth, keyByteWidth);
int cmp = compareBytes(bb, keyPos, searchedKey);
if (cmp < 0)
low = mid + 1;
else if (cmp > 0)
high = mid - 1;
else
return mid; // key found
}
return -(low + 1); // key not found
}
// compares a byte[] against a FBT_KEY
private int compareBytes(ReadBuf bb, int start, byte[] other) {
int l1 = start;
int l2 = 0;
byte c1, c2;
do {
c1 = bb.get(l1);
c2 = other[l2];
if (c1 == '\0')
return c1 - c2;
l1++;
l2++;
if (l2 == other.length) {
// in our buffer we have an additional \0 byte
// but this does not exist in regular Java strings, so we return now
return c1 - c2;
}
}
while (c1 == c2);
return c1 - c2;
}
// compares a CharSequence against a FBT_KEY
private int compareCharSequence(int start, CharSequence other) {
int bufferPos = start;
int otherPos = 0;
int limit = bb.limit();
int otherLimit = other.length();
// special loop for ASCII characters. Most of keys should be ASCII only, so this
// loop should be optimized for that.
// breaks if a multi-byte character is found
while (otherPos < otherLimit) {
char c2 = other.charAt(otherPos);
if (c2 >= 0x80) {
// not a single byte codepoint
break;
}
byte b = bb.get(bufferPos);
if (b == 0) {
return -c2;
} else if (b < 0) {
break;
} else if ((char) b != c2) {
return b - c2;
}
++bufferPos;
++otherPos;
}
while (bufferPos < limit) {
int sizeInBuff = Utf8.encodeUtf8CodePoint(other, otherPos, comparisonBuffer);
if (sizeInBuff == 0) {
// That means we finish with other and there are not more chars to
// compare. String in the buffer is bigger.
return bb.get(bufferPos);
}
for (int i = 0; i < sizeInBuff; i++) {
byte bufferByte = bb.get(bufferPos++);
byte otherByte = comparisonBuffer[i];
if (bufferByte == 0) {
// Our key is finished, so other is bigger
return -otherByte;
} else if (bufferByte != otherByte) {
return bufferByte - otherByte;
}
}
otherPos += sizeInBuff == 4 ? 2 : 1;
}
return 0;
}
}
/**
* Object that represents a set of elements in the buffer
*/
public static class Vector extends Sized {
private static final Vector EMPTY_VECTOR = new Vector(EMPTY_BB, 1, 1);
Vector(ReadBuf bb, int end, int byteWidth) {
super(bb, end, byteWidth);
}
/**
* Returns an empty {@link Map}
* @return an empty {@link Map}
*/
public static Vector empty() {
return EMPTY_VECTOR;
}
/**
* Checks if the vector is empty
* @return true if vector is empty
*/
public boolean isEmpty() {
return this == EMPTY_VECTOR;
}
/**
* Appends a text(JSON) representation to a `StringBuilder`
*/
@Override
public StringBuilder toString(StringBuilder sb) {
sb.append("[ ");
int size = size();
for (int i = 0; i < size; i++) {
get(i).toString(sb);
if (i != size - 1) {
sb.append(", ");
}
}
sb.append(" ]");
return sb;
}
/**
* Get a element in a vector by index
*
* @param index position of the element
* @return {@code Reference} to the element
*/
public Reference get(int index) {
long len = size();
if (index >= len) {
return Reference.NULL_REFERENCE;
}
int packedType = byteToUnsignedInt(bb.get((int) (end + (len * byteWidth) + index)));
int obj_end = end + index * byteWidth;
return new Reference(bb, obj_end, byteWidth, packedType);
}
}
/**
* Object that represents a set of elements with the same type
*/
public static class TypedVector extends Vector {
private static final TypedVector EMPTY_VECTOR = new TypedVector(EMPTY_BB, 1, 1, FBT_INT);
private final int elemType;
TypedVector(ReadBuf bb, int end, int byteWidth, int elemType) {
super(bb, end, byteWidth);
this.elemType = elemType;
}
public static TypedVector empty() {
return EMPTY_VECTOR;
}
/**
* Returns whether the vector is empty
*
* @return true if empty
*/
public boolean isEmptyVector() {
return this == EMPTY_VECTOR;
}
/**
* Return element type for all elements in the vector
*
* @return element type
*/
public int getElemType() {
return elemType;
}
/**
* Get reference to an object in the {@code Vector}
*
* @param pos position of the object in {@code Vector}
* @return reference to element
*/
@Override
public Reference get(int pos) {
int len = size();
if (pos >= len) return Reference.NULL_REFERENCE;
int childPos = end + pos * byteWidth;
return new Reference(bb, childPos, byteWidth, 1, elemType);
}
}
/**
* Represent a vector of keys in a map
*/
public static class KeyVector {
private final TypedVector vec;
KeyVector(TypedVector vec) {
this.vec = vec;
}
/**
* Return key
*
* @param pos position of the key in key vector
* @return key
*/
public Key get(int pos) {
int len = size();
if (pos >= len) return Key.EMPTY;
int childPos = vec.end + pos * vec.byteWidth;
return new Key(vec.bb, indirect(vec.bb, childPos, vec.byteWidth), 1);
}
/**
* Returns size of key vector
*
* @return size
*/
public int size() {
return vec.size();
}
/**
* Returns a text(JSON) representation
*/
public String toString() {
StringBuilder b = new StringBuilder();
b.append('[');
for (int i = 0; i < vec.size(); i++) {
vec.get(i).toString(b);
if (i != vec.size() - 1) {
b.append(", ");
}
}
return b.append("]").toString();
}
}
public static class FlexBufferException extends RuntimeException {
FlexBufferException(String msg) {
super(msg);
}
}
static class Unsigned {
static int byteToUnsignedInt(byte x) {
return ((int) x) & 0xff;
}
static int shortToUnsignedInt(short x) {
return ((int) x) & 0xffff;
}
static long intToUnsignedLong(int x) {
return ((long) x) & 0xffffffffL;
}
}
}
/// @}
| 1 | 21,946 | if `other` could have a guaranteed `0` byte in it, this whole `if` could be removed, and above you could just add `|| c2 == '\0'` to get the same effect? | google-flatbuffers | java |
@@ -33,7 +33,11 @@ import org.apache.iceberg.exceptions.AlreadyExistsException;
public class CachingCatalog implements Catalog {
public static Catalog wrap(Catalog catalog) {
- return new CachingCatalog(catalog);
+ return wrap(catalog, true);
+ }
+
+ public static Catalog wrap(Catalog catalog, Boolean caseSensitive) {
+ return new CachingCatalog(catalog, caseSensitive);
}
private final Cache<TableIdentifier, Table> tableCache = Caffeine.newBuilder() | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
public class CachingCatalog implements Catalog {
public static Catalog wrap(Catalog catalog) {
return new CachingCatalog(catalog);
}
private final Cache<TableIdentifier, Table> tableCache = Caffeine.newBuilder()
.softValues()
.expireAfterAccess(1, TimeUnit.MINUTES)
.build();
private final Catalog catalog;
private CachingCatalog(Catalog catalog) {
this.catalog = catalog;
}
@Override
public List<TableIdentifier> listTables(Namespace namespace) {
return catalog.listTables(namespace);
}
@Override
public Table loadTable(TableIdentifier ident) {
return tableCache.get(ident, catalog::loadTable);
}
@Override
public Table createTable(TableIdentifier ident, Schema schema, PartitionSpec spec, String location,
Map<String, String> properties) {
AtomicBoolean created = new AtomicBoolean(false);
Table table = tableCache.get(ident, identifier -> {
created.set(true);
return catalog.createTable(identifier, schema, spec, location, properties);
});
if (!created.get()) {
throw new AlreadyExistsException("Table already exists: %s", ident);
}
return table;
}
@Override
public Transaction newCreateTableTransaction(TableIdentifier ident, Schema schema, PartitionSpec spec,
String location, Map<String, String> properties) {
// create a new transaction without altering the cache. the table doesn't exist until the transaction is committed.
// if the table is created before the transaction commits, any cached version is correct and the transaction create
// will fail. if the transaction commits before another create, then the cache will be empty.
return catalog.newCreateTableTransaction(ident, schema, spec, location, properties);
}
@Override
public Transaction newReplaceTableTransaction(TableIdentifier ident, Schema schema, PartitionSpec spec,
String location, Map<String, String> properties, boolean orCreate) {
// create a new transaction without altering the cache. the table doesn't change until the transaction is committed.
// when the transaction commits, invalidate the table in the cache if it is present.
return CommitCallbackTransaction.addCallback(
catalog.newReplaceTableTransaction(ident, schema, spec, location, properties, orCreate),
() -> tableCache.invalidate(ident));
}
@Override
public boolean dropTable(TableIdentifier ident, boolean purge) {
boolean dropped = catalog.dropTable(ident, false);
tableCache.invalidate(ident);
return dropped;
}
@Override
public void renameTable(TableIdentifier from, TableIdentifier to) {
catalog.renameTable(from, to);
tableCache.invalidate(from);
}
}
| 1 | 16,743 | nit: can this be a primitive value? | apache-iceberg | java |
@@ -333,9 +333,14 @@ func (b Browse) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
// Browsing navigation gets messed up if browsing a directory
// that doesn't end in "/" (which it should, anyway)
- if !strings.HasSuffix(r.URL.Path, "/") {
- staticfiles.RedirectToDir(w, r)
- return 0, nil
+ u := r.Context().Value(httpserver.OriginalURLCtxKey).(url.URL)
+ if u.Path == "" {
+ u.Path = "/"
+ }
+ if u.Path[len(u.Path)-1] != '/' {
+ u.Path += "/"
+ http.Redirect(w, r, u.String(), http.StatusMovedPermanently)
+ return http.StatusMovedPermanently, nil
}
return b.ServeListing(w, r, requestedFilepath, bc) | 1 | // Package browse provides middleware for listing files in a directory
// when directory path is requested instead of a specific file.
package browse
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"text/template"
"time"
"github.com/dustin/go-humanize"
"github.com/mholt/caddy/caddyhttp/httpserver"
"github.com/mholt/caddy/caddyhttp/staticfiles"
)
const (
sortByName = "name"
sortByNameDirFirst = "namedirfirst"
sortBySize = "size"
sortByTime = "time"
)
// Browse is an http.Handler that can show a file listing when
// directories in the given paths are specified.
type Browse struct {
Next httpserver.Handler
Configs []Config
IgnoreIndexes bool
}
// Config is a configuration for browsing in a particular path.
type Config struct {
PathScope string // the base path the URL must match to enable browsing
Fs staticfiles.FileServer
Variables interface{}
Template *template.Template
}
// A Listing is the context used to fill out a template.
type Listing struct {
// The name of the directory (the last element of the path)
Name string
// The full path of the request
Path string
// Whether the parent directory is browsable
CanGoUp bool
// The items (files and folders) in the path
Items []FileInfo
// The number of directories in the listing
NumDirs int
// The number of files (items that aren't directories) in the listing
NumFiles int
// Which sorting order is used
Sort string
// And which order
Order string
// If ≠0 then Items have been limited to that many elements
ItemsLimitedTo int
// Optional custom variables for use in browse templates
User interface{}
httpserver.Context
}
// Crumb represents part of a breadcrumb menu.
type Crumb struct {
Link, Text string
}
// Breadcrumbs returns l.Path where every element maps
// the link to the text to display.
func (l Listing) Breadcrumbs() []Crumb {
var result []Crumb
if len(l.Path) == 0 {
return result
}
// skip trailing slash
lpath := l.Path
if lpath[len(lpath)-1] == '/' {
lpath = lpath[:len(lpath)-1]
}
parts := strings.Split(lpath, "/")
for i := range parts {
txt := parts[i]
if i == 0 && parts[i] == "" {
txt = "/"
}
result = append(result, Crumb{Link: strings.Repeat("../", len(parts)-i-1), Text: txt})
}
return result
}
// FileInfo is the info about a particular file or directory
type FileInfo struct {
Name string
Size int64
URL string
ModTime time.Time
Mode os.FileMode
IsDir bool
}
// HumanSize returns the size of the file as a human-readable string
// in IEC format (i.e. power of 2 or base 1024).
func (fi FileInfo) HumanSize() string {
return humanize.IBytes(uint64(fi.Size))
}
// HumanModTime returns the modified time of the file as a human-readable string.
func (fi FileInfo) HumanModTime(format string) string {
return fi.ModTime.Format(format)
}
// Implement sorting for Listing
type byName Listing
type byNameDirFirst Listing
type bySize Listing
type byTime Listing
// By Name
func (l byName) Len() int { return len(l.Items) }
func (l byName) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
// Treat upper and lower case equally
func (l byName) Less(i, j int) bool {
return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
}
// By Name Dir First
func (l byNameDirFirst) Len() int { return len(l.Items) }
func (l byNameDirFirst) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
// Treat upper and lower case equally
func (l byNameDirFirst) Less(i, j int) bool {
// if both are dir or file sort normally
if l.Items[i].IsDir == l.Items[j].IsDir {
return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
} else {
// always sort dir ahead of file
return l.Items[i].IsDir
}
}
// By Size
func (l bySize) Len() int { return len(l.Items) }
func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
const directoryOffset = -1 << 31 // = math.MinInt32
func (l bySize) Less(i, j int) bool {
iSize, jSize := l.Items[i].Size, l.Items[j].Size
// Directory sizes depend on the filesystem implementation,
// which is opaque to a visitor, and should indeed does not change if the operator choses to change the fs.
// For a consistent user experience directories are pulled to the front…
if l.Items[i].IsDir {
iSize = directoryOffset
}
if l.Items[j].IsDir {
jSize = directoryOffset
}
// … and sorted by name.
if l.Items[i].IsDir && l.Items[j].IsDir {
return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
}
return iSize < jSize
}
// By Time
func (l byTime) Len() int { return len(l.Items) }
func (l byTime) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
func (l byTime) Less(i, j int) bool { return l.Items[i].ModTime.Before(l.Items[j].ModTime) }
// Add sorting method to "Listing"
// it will apply what's in ".Sort" and ".Order"
func (l Listing) applySort() {
// Check '.Order' to know how to sort
if l.Order == "desc" {
switch l.Sort {
case sortByName:
sort.Sort(sort.Reverse(byName(l)))
case sortByNameDirFirst:
sort.Sort(sort.Reverse(byNameDirFirst(l)))
case sortBySize:
sort.Sort(sort.Reverse(bySize(l)))
case sortByTime:
sort.Sort(sort.Reverse(byTime(l)))
default:
// If not one of the above, do nothing
return
}
} else { // If we had more Orderings we could add them here
switch l.Sort {
case sortByName:
sort.Sort(byName(l))
case sortByNameDirFirst:
sort.Sort(byNameDirFirst(l))
case sortBySize:
sort.Sort(bySize(l))
case sortByTime:
sort.Sort(byTime(l))
default:
// If not one of the above, do nothing
return
}
}
}
func directoryListing(files []os.FileInfo, canGoUp bool, urlPath string, config *Config) (Listing, bool) {
var (
fileinfos []FileInfo
dirCount, fileCount int
hasIndexFile bool
)
for _, f := range files {
name := f.Name()
for _, indexName := range staticfiles.IndexPages {
if name == indexName {
hasIndexFile = true
break
}
}
if f.IsDir() {
name += "/"
dirCount++
} else {
fileCount++
}
if config.Fs.IsHidden(f) {
continue
}
url := url.URL{Path: "./" + name} // prepend with "./" to fix paths with ':' in the name
fileinfos = append(fileinfos, FileInfo{
IsDir: f.IsDir(),
Name: f.Name(),
Size: f.Size(),
URL: url.String(),
ModTime: f.ModTime().UTC(),
Mode: f.Mode(),
})
}
return Listing{
Name: path.Base(urlPath),
Path: urlPath,
CanGoUp: canGoUp,
Items: fileinfos,
NumDirs: dirCount,
NumFiles: fileCount,
}, hasIndexFile
}
// ServeHTTP determines if the request is for this plugin, and if all prerequisites are met.
// If so, control is handed over to ServeListing.
func (b Browse) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
// See if there's a browse configuration to match the path
var bc *Config
for i := range b.Configs {
if httpserver.Path(r.URL.Path).Matches(b.Configs[i].PathScope) {
bc = &b.Configs[i]
break
}
}
if bc == nil {
return b.Next.ServeHTTP(w, r)
}
// Browse works on existing directories; delegate everything else
requestedFilepath, err := bc.Fs.Root.Open(r.URL.Path)
if err != nil {
switch {
case os.IsPermission(err):
return http.StatusForbidden, err
case os.IsExist(err):
return http.StatusNotFound, err
default:
return b.Next.ServeHTTP(w, r)
}
}
defer requestedFilepath.Close()
info, err := requestedFilepath.Stat()
if err != nil {
switch {
case os.IsPermission(err):
return http.StatusForbidden, err
case os.IsExist(err):
return http.StatusGone, err
default:
return b.Next.ServeHTTP(w, r)
}
}
if !info.IsDir() {
return b.Next.ServeHTTP(w, r)
}
// Do not reply to anything else because it might be nonsensical
switch r.Method {
case http.MethodGet, http.MethodHead:
// proceed, noop
case "PROPFIND", http.MethodOptions:
return http.StatusNotImplemented, nil
default:
return b.Next.ServeHTTP(w, r)
}
// Browsing navigation gets messed up if browsing a directory
// that doesn't end in "/" (which it should, anyway)
if !strings.HasSuffix(r.URL.Path, "/") {
staticfiles.RedirectToDir(w, r)
return 0, nil
}
return b.ServeListing(w, r, requestedFilepath, bc)
}
func (b Browse) loadDirectoryContents(requestedFilepath http.File, urlPath string, config *Config) (*Listing, bool, error) {
files, err := requestedFilepath.Readdir(-1)
if err != nil {
return nil, false, err
}
// Determine if user can browse up another folder
var canGoUp bool
curPathDir := path.Dir(strings.TrimSuffix(urlPath, "/"))
for _, other := range b.Configs {
if strings.HasPrefix(curPathDir, other.PathScope) {
canGoUp = true
break
}
}
// Assemble listing of directory contents
listing, hasIndex := directoryListing(files, canGoUp, urlPath, config)
return &listing, hasIndex, nil
}
// handleSortOrder gets and stores for a Listing the 'sort' and 'order',
// and reads 'limit' if given. The latter is 0 if not given.
//
// This sets Cookies.
func (b Browse) handleSortOrder(w http.ResponseWriter, r *http.Request, scope string) (sort string, order string, limit int, err error) {
sort, order, limitQuery := r.URL.Query().Get("sort"), r.URL.Query().Get("order"), r.URL.Query().Get("limit")
// If the query 'sort' or 'order' is empty, use defaults or any values previously saved in Cookies
switch sort {
case "":
sort = sortByNameDirFirst
if sortCookie, sortErr := r.Cookie("sort"); sortErr == nil {
sort = sortCookie.Value
}
case sortByName, sortByNameDirFirst, sortBySize, sortByTime:
http.SetCookie(w, &http.Cookie{Name: "sort", Value: sort, Path: scope, Secure: r.TLS != nil})
}
switch order {
case "":
order = "asc"
if orderCookie, orderErr := r.Cookie("order"); orderErr == nil {
order = orderCookie.Value
}
case "asc", "desc":
http.SetCookie(w, &http.Cookie{Name: "order", Value: order, Path: scope, Secure: r.TLS != nil})
}
if limitQuery != "" {
limit, err = strconv.Atoi(limitQuery)
if err != nil { // if the 'limit' query can't be interpreted as a number, return err
return
}
}
return
}
// ServeListing returns a formatted view of 'requestedFilepath' contents'.
func (b Browse) ServeListing(w http.ResponseWriter, r *http.Request, requestedFilepath http.File, bc *Config) (int, error) {
listing, containsIndex, err := b.loadDirectoryContents(requestedFilepath, r.URL.Path, bc)
if err != nil {
switch {
case os.IsPermission(err):
return http.StatusForbidden, err
case os.IsExist(err):
return http.StatusGone, err
default:
return http.StatusInternalServerError, err
}
}
if containsIndex && !b.IgnoreIndexes { // directory isn't browsable
return b.Next.ServeHTTP(w, r)
}
listing.Context = httpserver.Context{
Root: bc.Fs.Root,
Req: r,
URL: r.URL,
}
listing.User = bc.Variables
// Copy the query values into the Listing struct
var limit int
listing.Sort, listing.Order, limit, err = b.handleSortOrder(w, r, bc.PathScope)
if err != nil {
return http.StatusBadRequest, err
}
listing.applySort()
if limit > 0 && limit <= len(listing.Items) {
listing.Items = listing.Items[:limit]
listing.ItemsLimitedTo = limit
}
var buf *bytes.Buffer
acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ","))
switch {
case strings.Contains(acceptHeader, "application/json"):
if buf, err = b.formatAsJSON(listing, bc); err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
default: // There's no 'application/json' in the 'Accept' header; browse normally
if buf, err = b.formatAsHTML(listing, bc); err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
}
buf.WriteTo(w)
return http.StatusOK, nil
}
func (b Browse) formatAsJSON(listing *Listing, bc *Config) (*bytes.Buffer, error) {
marsh, err := json.Marshal(listing.Items)
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
_, err = buf.Write(marsh)
return buf, err
}
func (b Browse) formatAsHTML(listing *Listing, bc *Config) (*bytes.Buffer, error) {
buf := new(bytes.Buffer)
err := bc.Template.Execute(buf, listing)
return buf, err
}
| 1 | 10,696 | This maybe not what we want. If `rewrite` occurred before, `r.URL.Path` contains the result and we should use it. Otherwise `rewrite` middleware will disfunction when combining with `browser`. | caddyserver-caddy | go |
@@ -197,7 +197,7 @@ class Engine(object):
# :type exception: BaseException
exception = None
try:
- modules = [self.provisioning, self.aggregator] + self.reporters
+ modules = [self.aggregator, self.provisioning] + self.reporters
modules += self.services
for module in modules:
try: | 1 | """
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
import copy
import datetime
from distutils.version import LooseVersion
import json
import logging
import os
import shutil
import time
import traceback
from collections import namedtuple, defaultdict
from json import encoder
import psutil
import yaml
from yaml.representer import SafeRepresenter
from bzt import ManualShutdown, NormalShutdown, get_configs_dir
import bzt
from bzt.utils import load_class, to_json, BetterDict, ensure_is_dict, dehumanize_time, is_int
from bzt.six import iteritems, string_types, text_type, PY2, UserDict, configparser, parse, ProxyHandler, build_opener, \
install_opener, urlopen, request
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[EngineModule]
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {}
self.provisioning = Provisioning()
self.aggregator = EngineModule()
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.__disk_counters = None
self.__net_counters = None
self.__counters_ts = None
def configure(self, user_configs):
"""
Load configuration files
"""
self.log.info("Configuring...")
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
self._create_artifacts_dir()
dump = self.create_artifact("effective", "") # FIXME: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for config in user_configs:
self.existing_artifact(config)
self._load_included_configs()
self.config.merge({"version": bzt.VERSION})
self._set_up_proxy()
self._check_updates()
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.__prepare_services()
self.__prepare_aggregator()
self.__prepare_provisioning()
self.__prepare_reporters()
interval = self.config.get("settings").get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
self.config.dump()
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
try:
self._startup()
self._wait()
except NormalShutdown as exc:
self.log.debug("Normal shutdown called: %s", traceback.format_exc())
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
except BaseException as exc:
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
raise
finally:
self._shutdown()
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning]
for _module in modules:
_module.startup()
self.config.dump()
def _wait(self):
"""
Wait modules for finish
:return:
"""
self.log.info("Waiting for finish...")
prev = time.time()
modules = []
if self.provisioning:
modules.append(self.provisioning)
if self.aggregator:
modules.append(self.aggregator)
modules += self.services + self.reporters
while not EngineModule.check_modules_list(modules):
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
try:
exception = None
modules = [self.provisioning, self.aggregator]
modules += self.reporters
modules += self.services
for module in modules:
try:
module.shutdown()
except BaseException as exc:
self.log.error("Error while shutting down: %s", traceback.format_exc())
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
if not exception:
exception = exc
if exception:
raise exception
except BaseException as exc:
self.log.error("Error while shutting down: %s", traceback.format_exc())
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
raise
finally:
self.config.dump()
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exception = None
try:
modules = [self.provisioning, self.aggregator] + self.reporters
modules += self.services
for module in modules:
try:
module.post_process()
except KeyboardInterrupt as exc:
self.log.error("Shutdown: %s", exc)
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
if not exception:
exception = exc
except BaseException as exc:
self.log.error("Error while post-processing: %s", traceback.format_exc())
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
if not exception:
exception = exc
finally:
self.__finalize()
self.config.dump()
if exception:
self.log.debug("Exception in post-process: %s", exception)
self.stopping_reason = exception if not self.stopping_reason else self.stopping_reason
if isinstance(exception, KeyboardInterrupt):
raise exception
elif exception:
raise RuntimeError("Failed post-processing, see errors above")
def __finalize(self):
"""
Finalize the Engine. For example, copy artifacts
into artifacts directory
"""
pass
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise ValueError: if no artifacts dir set
"""
if not self.artifacts_dir:
raise ValueError("Cannot create artifact: no artifacts_dir set up")
diff = ""
base = os.path.join(self.artifacts_dir, prefix)
while os.path.exists(base + diff + suffix) or base + diff + suffix in self.__artifacts:
if diff:
diff = "-%s" % (int(diff[1:]) + 1)
else:
diff = "-1"
filename = base + diff + suffix
self.log.debug("New artifact filename: %s", filename)
self.__artifacts.append(filename)
return filename
def existing_artifact(self, filename, move=False):
"""
Add existing artifact, it will be collected at the end of job. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
newname = os.path.join(self.artifacts_dir, os.path.basename(filename))
self.__artifacts.append(newname)
if os.path.realpath(filename) == os.path.realpath(newname):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, newname)
shutil.move(filename, newname)
else:
self.log.debug("Copying %s to %s", filename, newname)
shutil.copy(filename, newname)
def _create_artifacts_dir(self):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if self.artifacts_dir:
self.artifacts_dir = os.path.expanduser(self.artifacts_dir)
else:
default = "%Y-%m-%d_%H-%M-%S.%f"
artifacts_dir = self.config.get("settings").get("artifacts-dir", default)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = os.path.expanduser(self.artifacts_dir)
self.artifacts_dir = os.path.abspath(self.artifacts_dir)
self.log.info("Artifacts dir: %s", self.artifacts_dir)
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
self.log.info("Possible module aliases: %s", [str(x) for x in sorted(mod_conf.keys())])
raise ValueError("Module alias '%s' not found in module settings" % alias)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
clsname = settings.get('class', None)
if clsname is None:
raise ValueError("Class name not found in module settings: %s" % settings)
try:
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TypeError("Module class does not inherit from EngineModule: %s" % clsname)
except BaseException:
self.log.debug("Failed to load class %s: %s", clsname, traceback.format_exc())
raise ValueError("Cannot load module '%s' with class %s" % (alias, clsname))
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename): # TODO: use it everywhere when it makes sense
"""
Try to find file in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
"""
if os.path.isfile(filename):
return filename
elif filename.lower().startswith("http://") or filename.lower().startswith("https://"):
downloader = request.FancyURLopener()
dest = self.create_artifact("downloaded", ".file") # TODO: make it smart to get name from URL if possible
self.log.info("Downloading %s into %s", filename, dest)
downloader.retrieve(filename, dest)
return dest
elif self.file_search_paths:
for dirname in self.file_search_paths:
location = os.path.join(dirname, os.path.basename(filename))
if os.path.isfile(location):
self.log.warning("Guessed location from search paths for file %s: %s", filename, location)
return location
else:
return filename
def _load_base_configs(self):
base_configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading machine configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
base_configs.append(fname)
else:
self.log.info("No machine configs dir: %s", machine_dir)
user_file = os.path.expanduser(os.path.join('~', ".bzt-rc"))
if os.path.isfile(user_file):
self.log.debug("Adding personal config: %s", user_file)
base_configs.append(user_file)
else:
self.log.info("No personal config: %s", user_file)
self.config.load(base_configs)
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
user_config = Configuration()
user_config.load(user_configs, self.__config_loaded)
self.config.merge(user_config)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(os.path.dirname(os.path.realpath(config)))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
cls = self.config.get(Provisioning.PROV, "")
if not cls:
raise ValueError("Please configure provisioning settings")
self.provisioning = self.instantiate_module(cls)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
reporter = ensure_is_dict(reporting, index, "module")
cls = reporter.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = reporter
if isinstance(instance, AggregatorListener):
self.aggregator.add_listener(instance) # NOTE: bad design, add_listener method is unknown
assert isinstance(instance, Reporter)
self.reporters.append(instance)
# prepare reporters
for module in self.reporters:
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
services = self.config.get("services", [])
for index, config in enumerate(services):
config = ensure_is_dict(services, index, "module")
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
self.services.append(instance)
for module in self.services:
module.prepare()
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get("settings").get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
self.aggregator = EngineModule()
else:
self.aggregator = self.instantiate_module(cls)
self.aggregator.prepare()
def engine_resource_stats(self):
"""
Get local resource stats
:return: namedtuple
"""
stats = namedtuple("ResourceStats", ('cpu', 'disk_usage', 'mem_usage',
'rx', 'tx', 'dru', 'dwu'))
rx_bytes, tx_bytes, dru, dwu = self.__get_resource_stats()
# TODO: measure and report check loop utilization
return stats(
cpu=psutil.cpu_percent(),
disk_usage=psutil.disk_usage(self.artifacts_dir).percent,
mem_usage=psutil.virtual_memory().percent,
rx=rx_bytes, tx=tx_bytes, dru=dru, dwu=dwu
)
def __get_resource_stats(self):
"""
Get network and disk counters
:return: tuple
"""
if not self.__counters_ts:
self.__disk_counters = psutil.disk_io_counters()
self.__net_counters = psutil.net_io_counters()
self.__counters_ts = datetime.datetime.now()
time.sleep(0.2) # small enough for human, big enough for machine
now = datetime.datetime.now()
interval = (now - self.__counters_ts).total_seconds()
net = psutil.net_io_counters()
tx_bytes = (net.bytes_sent - self.__net_counters.bytes_sent) / interval
rx_bytes = (net.bytes_recv - self.__net_counters.bytes_recv) / interval
self.__net_counters = net
disk = psutil.disk_io_counters()
dru = (disk.read_bytes - self.__disk_counters.read_bytes) / interval
dwu = (disk.write_bytes - self.__disk_counters.write_bytes) / interval
self.__disk_counters = disk
self.__counters_ts = now
return rx_bytes, tx_bytes, dru, dwu
def _set_up_proxy(self):
proxy_settings = self.config.get("settings").get("proxy")
if proxy_settings and proxy_settings.get("address"):
proxy_url = parse.urlsplit(proxy_settings.get("address"))
self.log.debug("Using proxy settings: %s", proxy_url)
username = proxy_settings.get("username")
pwd = proxy_settings.get("password")
if username and pwd:
proxy_uri = "%s://%s:%s@%s" % (proxy_url.scheme, username, pwd, proxy_url.netloc)
else:
proxy_uri = "%s://%s" % (proxy_url.scheme, proxy_url.netloc)
proxy_handler = ProxyHandler({"https": proxy_uri, "http": proxy_uri})
opener = build_opener(proxy_handler)
install_opener(opener)
def _check_updates(self):
if self.config.get("settings").get("check-updates", True):
try:
params = (bzt.VERSION, self.config.get("install-id", "N/A"))
req = "http://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", req)
response = urlopen(req, timeout=1)
resp = response.read()
if not isinstance(resp, str):
resp = resp.decode()
self.log.debug("Result: %s", resp)
data = json.loads(resp)
mine = LooseVersion(bzt.VERSION)
latest = LooseVersion(data['latest'])
if mine < latest or data['needsUpgrade']:
self.log.warning("There is newer version of Taurus %s available, consider upgrading", latest)
else:
self.log.debug("Installation is up-to-date")
except BaseException:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.debug("Failed to check for updates")
def _load_included_configs(self):
for config in self.config.get("included-configs", []):
fname = os.path.abspath(self.find_file(config))
self.existing_artifact(fname)
self.config.load([fname])
class Configuration(BetterDict):
"""
loading both JSONs and YAMLs and .properties-like override
dump effective config into files
first config should not contain action prefixes
"""
JSON = "JSON"
YAML = "YAML"
INI = "INI"
def __init__(self):
super(Configuration, self).__init__()
self.log = logging.getLogger('')
self.dump_filename = None
def load(self, configs, callback=None):
"""
Load and merge JSON/YAML files into current dict
:type configs: list[str]
"""
self.log.debug("Configs: %s", configs)
for config_file in configs:
config = self.__read_file(config_file)[0]
if isinstance(config, list):
self.__apply_overrides(config)
else:
self.merge(config)
if callback is not None:
callback(config_file)
def __read_file(self, filename):
"""
Read and parse config file
:param filename: str
:return: list
"""
with open(filename) as fds:
first_line = "#"
while first_line.startswith("#"):
first_line = fds.readline().strip()
fds.seek(0)
if first_line.startswith('---'):
self.log.debug("Reading %s as YAML", filename)
return yaml.load(fds), self.YAML
elif first_line.startswith('{'):
self.log.debug("Reading %s as JSON", filename)
return json.loads(fds.read()), self.JSON
elif first_line.startswith('['):
self.log.debug("Reading %s as INI", filename)
parser = configparser.RawConfigParser()
parser.read(filename)
res = []
for option in parser.options("BZT"):
res.append((option, parser.get("BZT", option)))
return res, self.INI
else:
raise ValueError("Cannot detect file format for %s" % filename)
def set_dump_file(self, filename):
"""
Set default file and format to be used by `dump` method
:type filename: str
"""
self.dump_filename = filename
def write(self, fds, fmt):
"""
Write config into opened file
:type fds: file
:type fmt: str
:raise ValueError:
"""
if fmt == self.JSON:
fds.write(to_json(self))
elif fmt == self.YAML:
yml = yaml.dump(self, default_flow_style=False,
explicit_start=True, canonical=False)
fds.write(yml)
elif fmt == self.INI:
fds.write("[DEFAULT]\n") # TODO: switch to write it with ConfigParser like done in CLI
fds.write(self.__dict_to_overrides(self))
else:
raise ValueError("Unknown dump format: %s" % fmt)
fds.write("\n")
@classmethod
def __dict_to_overrides(cls, obj, path=''):
"""
Converts dict into OVERRIDES format, which is properties-like format
:type path: str or unicode
:return:
"""
if isinstance(obj, dict):
result = ''
for key, val in iteritems(obj):
result += cls.__dict_to_overrides(val, '%s.%s' % (path, key))
return result
elif isinstance(obj, list):
result = ''
for key, val in enumerate(obj):
result += cls.__dict_to_overrides(val, '%s.%s' % (path, key))
return result
else:
return "%s=%s\n" % (path[1:], obj)
def dump(self, filename=None, fmt=None):
"""
Dump current state of dict into file. If no filename or format
specified, defaults are used
:type filename: str or NoneType
:type fmt: str or NoneType
:raise ValueError:
"""
if not filename:
filename = self.dump_filename
if filename:
if not fmt:
self.dump(filename + ".yml", self.YAML)
self.dump(filename + ".json", self.JSON)
return
acopy = copy.deepcopy(self)
BetterDict.traverse(acopy, self.masq_sensitive)
with open(filename, "w") as fhd:
self.log.debug("Dumping %s config into %s", fmt, filename)
acopy.write(fhd, fmt)
@staticmethod
def masq_sensitive(config):
"""
Remove sensitive data from config
"""
for key in config.keys():
if key in ('password', 'secret', 'token') and config[key]:
config[key] = '*' * 8
def __ensure_list_capacity(self, pointer, part, next_part=None):
"""
Extend pointer list to hold additional item
:type pointer: list
:type part: int
"""
if isinstance(pointer, list) and isinstance(part, int):
while len(pointer) <= part:
self.log.debug("Len %s less than %s", len(pointer), part)
if isinstance(next_part, int):
pointer.append([])
else:
pointer.append(BetterDict())
def __apply_single_override(self, name, value):
"""
Apply single override
:type name: str
:type value: str
"""
self.log.debug("Applying %s=%s", name, value)
parts = [(int(x) if is_int(x) else x) for x in name.split(".")]
pointer = self
for index, part in enumerate(parts[:-1]):
self.__ensure_list_capacity(pointer, part, parts[index + 1])
if isinstance(part, int):
pointer = pointer[part]
elif isinstance(parts[index + 1], int) and isinstance(pointer, dict):
pointer = pointer.get(part, [])
else:
pointer = pointer.get(part)
self.__ensure_list_capacity(pointer, parts[-1])
self.log.debug("Applying: [%s]=%s", parts[-1], value)
if isinstance(parts[-1], string_types) and parts[-1][0] == '^':
del pointer[parts[-1][1:]]
else:
if value.isdigit():
value = float(value)
if isinstance(pointer, list) and parts[-1] < 0:
pointer.append(value)
else:
pointer[parts[-1]] = value
def __apply_overrides(self, opts):
"""
Apply overrides
:type opts: dict
"""
for name, value in opts:
try:
self.__apply_single_override(name, value)
except:
self.log.debug("Failed override: %s", traceback.format_exc())
self.log.error("Failed to apply override %s=%s", name, value)
raise
self.dump()
yaml.add_representer(Configuration, SafeRepresenter.represent_dict)
yaml.add_representer(BetterDict, SafeRepresenter.represent_dict)
if PY2:
yaml.add_representer(text_type, SafeRepresenter.represent_unicode)
# dirty hack from http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module
encoder.FLOAT_REPR = lambda o: format(o, '.3g')
class EngineModule(object):
"""
Base class for any BZT engine module
:type engine: engine.Engine
:type settings: BetterDict
"""
def __init__(self):
self.log = logging.getLogger('')
self.engine = None
self.settings = BetterDict()
self.parameters = BetterDict()
def prepare(self):
"""
Preparation stage, at which configuration is being read, configs
and tools being prepared. All long preparations and checks should be
made here, to make `startup` stage as fast as possible.
"""
pass
def startup(self):
"""
Startup should be as fast as possible. Launch background processes,
do some API calls for initiation of actual work. Consider making all
checks and preparations on `prepare` stage.
"""
pass
def check(self):
"""
Check if work should be finished
:rtype: bool
:return: True if should be finished
"""
return False
def shutdown(self):
"""
Stop all processes that were started in `startup` stage.
Should also be as fast as possible, deferring all long operations to
`post_process` stage.
"""
pass
def post_process(self):
"""
Do all possibly long analysis and processing on run results
"""
pass
@staticmethod
def check_modules_list(modules, require_all=False):
"""
Helper for bulk check
:type modules: list
:param require_all:
:return:
"""
finished = require_all
for module in modules:
logging.debug("Checking %s", module)
if require_all:
finished &= module.check()
else:
finished |= module.check()
return finished
class Provisioning(EngineModule):
"""
Base class for any provisioning type. Provisioning is the way to
get the resources that will run the job. For example, local provisoning
means using local machine to run executors, remote means using
remote machines with BZT API nodes on them.
:type executors: list[ScenarioExecutor]
"""
PROV = "provisioning"
def __init__(self):
super(Provisioning, self).__init__()
self.executors = []
def prepare(self):
"""
Preparation in provisioning begins with reading executions list
and instantiating ScenarioExecutor classes for them
"""
super(Provisioning, self).prepare()
esettings = self.engine.config.get("settings")
default_executor = esettings.get("default-executor", None)
if ScenarioExecutor.EXEC not in self.engine.config:
raise ValueError("No execution is configured")
executions = self.engine.config.get(ScenarioExecutor.EXEC)
if not isinstance(executions, list):
executions = [executions]
if not executions:
raise ValueError("No execution is configured")
for execution in executions:
executor = execution.get("executor", default_executor)
if not executor:
msg = "Cannot determine executor type and no default executor"
raise RuntimeError(msg)
instance = self.engine.instantiate_module(executor)
instance.provisioning = self
instance.execution = execution
self.executors.append(instance)
class FileLister(object):
"""
A mixin to get required files info from executor
"""
@abstractmethod
def resource_files(self):
"""
Get list of resource files
:rtype: list
"""
pass
class ScenarioExecutor(EngineModule):
"""
:type provisioning: engine.Provisioning
:type execution: BetterDict
"""
RAMP_UP = "ramp-up"
HOLD_FOR = "hold-for"
CONCURR = "concurrency"
THRPT = "throughput"
EXEC = "execution"
STEPS = "steps"
def __init__(self):
super(ScenarioExecutor, self).__init__()
self.provisioning = None
self.execution = BetterDict()
self.__scenario = None
def get_scenario(self):
"""
Returns scenario dict, either inlined, or referenced by alias
:return: DictOfDicts
"""
if self.__scenario is None:
scenario = self.execution.get('scenario', BetterDict())
if isinstance(scenario, string_types):
scenarios = self.engine.config.get("scenarios")
if scenario not in scenarios:
raise ValueError("Scenario not found in scenarios: %s" % scenario)
scenario = scenarios.get(scenario)
self.__scenario = Scenario(scenario)
elif isinstance(scenario, dict):
self.__scenario = Scenario(scenario)
else:
raise ValueError("Scenario not configured properly: %s" % scenario)
return self.__scenario
def get_load(self):
"""
Helper method to read load specification
:return:
"""
prov_type = self.engine.config.get(Provisioning.PROV, None)
ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type)
throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0)
ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type)
concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0)
iterations = self.execution.get("iterations", None)
ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None)
steps = self.execution.get(ScenarioExecutor.STEPS, None)
hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0))
if ramp_up is None:
ramp_up = None
duration = hold
else:
ramp_up = dehumanize_time(ramp_up)
duration = hold + ramp_up
if duration and not iterations:
iterations = 0 # which means infinite
res = namedtuple("LoadSpec",
('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps'))
return res(concurrency=concurrency, ramp_up=ramp_up,
throughput=throughput, hold=hold, iterations=iterations,
duration=duration, steps=steps)
def get_resource_files(self):
"""
Return resource files list
"""
files_list = self.execution.get("files", [])
if isinstance(self, FileLister):
files_list.extend(self.resource_files())
return files_list
def __repr__(self):
return "%s-%s" % (self.execution.get("executor", None), id(self))
class Reporter(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
REP = "reporting"
class Scenario(UserDict, object):
"""
Test scenario entity
"""
SCRIPT = "script"
def __init__(self, scenario=None):
super(Scenario, self).__init__()
self.data = scenario
def get(self, key, default=defaultdict):
"""
:param key:
:type default: object
:return:
"""
return self.data.get(key, default)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
for item in self.data:
yield item
def __len__(self):
return len(self.data)
def __delitem__(self, key):
return self.data.pop(key)
def get_headers(self):
"""
Returns global headers
:rtype: dict[str,str]
"""
scenario = self
headers = scenario.get("headers")
return headers
def get_requests(self):
"""
Generator object to read requests
"""
scenario = self
requests = scenario.get("requests", [])
for key in range(len(requests)):
req = ensure_is_dict(requests, key, "url")
res = namedtuple("HTTPReq",
('url', 'label', 'method', 'headers', 'timeout', 'think_time', 'config', "body"))
url = req["url"]
label = req.get("label", url)
method = req.get("method", "GET")
headers = req.get("headers", {})
timeout = req.get("timeout", None)
think_time = req.get("think-time", None)
body = None
bodyfile = req.get("body-file", None)
if bodyfile:
with open(bodyfile) as fhd:
body = fhd.read()
body = req.get("body", body)
yield res(config=req, label=label,
url=url, method=method, headers=headers,
timeout=timeout, think_time=think_time, body=body)
class AggregatorListener(object):
"""
Mixin for listeners of aggregator data
"""
@abstractmethod
def aggregated_second(self, data):
"""
Notification about new data point
:param data: bzt.modules.reporting.DataPoint
"""
pass
def finalize(self):
"""
This method is called at the end of run
to close open file descriptors etc.
"""
pass
| 1 | 13,547 | Why change this? | Blazemeter-taurus | py |
@@ -5,9 +5,9 @@ declare(strict_types=1);
namespace Shopsys\FrameworkBundle\Component\Error;
use Psr\Log\LoggerInterface;
-use Symfony\Component\HttpKernel\EventListener\ExceptionListener;
+use Symfony\Component\HttpKernel\EventListener\ErrorListener;
-class NotLogFakeHttpExceptionsExceptionListener extends ExceptionListener
+class NotLogFakeHttpExceptionsExceptionListener extends ErrorListener
{
/**
* @var \Shopsys\FrameworkBundle\Component\Error\ErrorIdProvider|null | 1 | <?php
declare(strict_types=1);
namespace Shopsys\FrameworkBundle\Component\Error;
use Psr\Log\LoggerInterface;
use Symfony\Component\HttpKernel\EventListener\ExceptionListener;
class NotLogFakeHttpExceptionsExceptionListener extends ExceptionListener
{
/**
* @var \Shopsys\FrameworkBundle\Component\Error\ErrorIdProvider|null
*/
protected $errorIdProvider;
/**
* @param mixed $controller
* @param \Psr\Log\LoggerInterface|null $logger
* @param bool $debug
* @param \Shopsys\FrameworkBundle\Component\Error\ErrorIdProvider|null $errorIdProvider
*/
public function __construct($controller, ?LoggerInterface $logger = null, bool $debug = false, ?ErrorIdProvider $errorIdProvider = null)
{
parent::__construct($controller, $logger, $debug);
$this->errorIdProvider = $errorIdProvider;
}
/**
* @inheritDoc
*/
protected function logException(\Exception $exception, $message)
{
if (!$exception instanceof \Shopsys\FrameworkBundle\Component\Error\Exception\FakeHttpException) {
$message .= sprintf(' Error ID: %s', $this->errorIdProvider->getErrorId());
parent::logException($exception, $message);
}
}
/**
* @required
* @param \Shopsys\FrameworkBundle\Component\Error\ErrorIdProvider $errorIdProvider
* @internal This function will be replaced by constructor injection in next major
*/
public function setErrorIdProvider(ErrorIdProvider $errorIdProvider): void
{
if ($this->errorIdProvider && $this->errorIdProvider !== $errorIdProvider) {
throw new \BadMethodCallException(
sprintf('Method "%s" has been already called and cannot be called multiple times.', __METHOD__)
);
}
if (!$this->errorIdProvider) {
@trigger_error(
sprintf(
'The %s() method is deprecated and will be removed in the next major. Use the constructor injection instead.',
__METHOD__
),
E_USER_DEPRECATED
);
$this->errorIdProvider = $errorIdProvider;
}
}
}
| 1 | 21,569 | _nitpick_ Isn't it now actually NotLogFakeHttpExceptions**Error**Listener ? | shopsys-shopsys | php |
@@ -82,6 +82,7 @@ namespace AutoRest.AzureResourceSchema
JsonSchema resourceDefinition = new JsonSchema();
resourceDefinition.JsonType = "object";
+ resourceDefinition.ResourceType = resourceType;
resourceDefinition.AddProperty("type", JsonSchema.CreateStringEnum(resourceType), true);
resourceDefinition.AddProperty("apiVersion", JsonSchema.CreateStringEnum(apiVersion), true);
| 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using AutoRest.Core.Model;
namespace AutoRest.AzureResourceSchema
{
/// <summary>
/// The ResourceSchemaParser class is responsible for converting a ServiceClient object into a
/// ResourceSchemaModel.
/// </summary>
public static class ResourceSchemaParser
{
private const string resourceMethodPrefix = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/";
/// <summary>
/// Parse a ResourceSchemaModel from the provided ServiceClient.
/// </summary>
/// <param name="serviceClient"></param>
/// <returns></returns>
public static IDictionary<string, ResourceSchema> Parse(CodeModel serviceClient)
{
if (serviceClient == null)
{
throw new ArgumentNullException(nameof(serviceClient));
}
string apiVersion = serviceClient.ApiVersion;
if (string.IsNullOrWhiteSpace(apiVersion))
{
throw new ArgumentException("No API version is provided in the swagger document.");
}
Dictionary<string, ResourceSchema> resourceSchemas = new Dictionary<string, ResourceSchema>();
foreach (Method method in serviceClient.Methods)
{
if (method.HttpMethod != HttpMethod.Put ||
string.IsNullOrWhiteSpace(method.Url) ||
!method.Url.Value.StartsWith(resourceMethodPrefix, StringComparison.OrdinalIgnoreCase) ||
!method.Url.Value.EndsWith("}", StringComparison.OrdinalIgnoreCase))
{
continue;
}
string afterPrefix = method.Url.Value.Substring(resourceMethodPrefix.Length);
int forwardSlashIndexAfterProvider = afterPrefix.IndexOf('/');
string resourceProvider = afterPrefix.Substring(0, forwardSlashIndexAfterProvider);
if (IsPathVariable(resourceProvider))
{
// If the resourceProvider is a path variable, such as {someValue}, then this
// is not a create resource method. Skip it.
continue;
}
ResourceSchema resourceSchema;
if (!resourceSchemas.ContainsKey(resourceProvider))
{
resourceSchema = new ResourceSchema();
resourceSchema.Id = string.Format(CultureInfo.InvariantCulture, "http://schema.management.azure.com/schemas/{0}/{1}.json#", apiVersion, resourceProvider);
resourceSchema.Title = resourceProvider;
resourceSchema.Description = resourceProvider.Replace('.', ' ') + " Resource Types";
resourceSchema.Schema = "http://json-schema.org/draft-04/schema#";
resourceSchemas.Add(resourceProvider, resourceSchema);
}
else
{
resourceSchema = resourceSchemas[resourceProvider];
}
string methodUrlPathAfterProvider = afterPrefix.Substring(forwardSlashIndexAfterProvider + 1);
string[] resourceTypes = ParseResourceTypes(resourceProvider, methodUrlPathAfterProvider, method);
foreach (string resourceType in resourceTypes)
{
JsonSchema resourceDefinition = new JsonSchema();
resourceDefinition.JsonType = "object";
resourceDefinition.AddProperty("type", JsonSchema.CreateStringEnum(resourceType), true);
resourceDefinition.AddProperty("apiVersion", JsonSchema.CreateStringEnum(apiVersion), true);
if (method.Body != null)
{
CompositeType body = method.Body.ModelType as CompositeType;
// Debug.Assert(body != null, "The create resource method's body must be a CompositeType and cannot be null.");
if (body != null)
{
foreach (Property property in body.ComposedProperties)
{
if (!resourceDefinition.Properties.Keys.Contains(property.Name.RawValue))
{
JsonSchema propertyDefinition = ParseType(property, property.ModelType, resourceSchema.Definitions, serviceClient.ModelTypes);
if (propertyDefinition != null)
{
resourceDefinition.AddProperty(property.Name.RawValue, propertyDefinition, property.IsRequired || property.Name.RawValue == "properties");
}
}
}
}
}
resourceDefinition.Description = resourceType;
string resourcePropertyName = resourceType.Substring(resourceProvider.Length + 1).Replace('/', '_');
Debug.Assert(!resourceSchema.ResourceDefinitions.ContainsKey(resourcePropertyName));
resourceSchema.AddResourceDefinition(resourcePropertyName, resourceDefinition);
}
}
// This loop adds child resource schemas to their parent resource schemas. We can't do
// this until we're done adding all resources as top level resources, though, because
// it's possible that we will parse a child resource before we parse the parent
// resource.
foreach (ResourceSchema resourceSchema in resourceSchemas.Values)
{
// By iterating over the reverse order of the defined resource definitions, I'm
// counting on the resource definitions being in sorted order. That way I'm
// guaranteed to visit child resource definitions before I visit their parent
// resource definitions. By doing this, I've guaranteed that grandchildren resource
// definitions will be added to their grandparent (and beyond) ancestor
// resource definitions.
foreach (string resourcePropertyName in resourceSchema.ResourceDefinitions.Keys.Reverse())
{
JsonSchema resourceDefinition = resourceSchema.ResourceDefinitions[resourcePropertyName];
string resourceType = resourceDefinition.ResourceType;
int lastSlashIndex = resourceType.LastIndexOf('/');
string parentResourceType = resourceType.Substring(0, lastSlashIndex);
JsonSchema parentResourceDefinition = resourceSchema.GetResourceDefinitionByResourceType(parentResourceType);
if (parentResourceDefinition != null)
{
string childResourceType = resourceType.Substring(lastSlashIndex + 1);
JsonSchema childResourceDefinition = resourceDefinition.Clone();
childResourceDefinition.ResourceType = childResourceType;
string childResourceDefinitionPropertyName = string.Join("_", resourcePropertyName, "childResource");
resourceSchema.AddDefinition(childResourceDefinitionPropertyName, childResourceDefinition);
JsonSchema childResources;
if (parentResourceDefinition.Properties.ContainsKey("resources"))
{
childResources = parentResourceDefinition.Properties["resources"];
}
else
{
childResources = new JsonSchema()
{
JsonType = "array",
Items = new JsonSchema()
};
parentResourceDefinition.AddProperty("resources", childResources);
}
childResources.Items.AddOneOf(new JsonSchema()
{
Ref = "#/definitions/" + childResourceDefinitionPropertyName,
});
}
}
}
return resourceSchemas;
}
private static string[] ParseResourceTypes(string resourceProvider, string methodUrlPathAfterProvider, Method method)
{
// Gather the list of resource types defined by this method url. Usually this will
// result in only one resource type, but if the method url contains an enumerated
// resource type parameter, then multiple resource types could be declared from a
// single method url.
List<string> resourceTypes = new List<string>();
resourceTypes.Add(resourceProvider);
string[] pathSegments = methodUrlPathAfterProvider.Split(new char[] { '/' });
for (int i = 0; i < pathSegments.Length; i += 2)
{
string pathSegment = pathSegments[i];
if (IsPathVariable(pathSegment))
{
string parameterName = pathSegment.Substring(1, pathSegment.Length - 2);
Parameter parameter = method.Parameters.FirstOrDefault(methodParameter => methodParameter.Name.RawValue == parameterName);
if (parameter == null)
{
throw new ArgumentException(string.Format(CultureInfo.CurrentCulture, "Found undefined parameter reference {0} in create resource method \"{1}/{2}/{3}\".", pathSegment, resourceMethodPrefix, resourceProvider, methodUrlPathAfterProvider));
}
if (parameter.ModelType == null)
{
throw new ArgumentException(string.Format(CultureInfo.CurrentCulture, "Parameter reference {0} has no defined type.", pathSegment));
}
EnumType parameterType = parameter.ModelType as EnumType;
if (parameterType == null)
{
// If we encounter a parameter in the URL that isn't an enumeration, then
// we can't create a resource from this URL.
resourceTypes.Clear();
break;
}
if (parameterType.Values == null || parameterType.Values.Count == 0)
{
string errorMessage = string.Format(CultureInfo.CurrentCulture, "Parameter reference {0} is defined as an enum type, but it doesn't have any specified values.", pathSegment);
throw new ArgumentException(errorMessage);
}
List<string> newResourceTypes = new List<string>();
foreach (string resourceType in resourceTypes)
{
foreach (EnumValue parameterValue in parameterType.Values)
{
newResourceTypes.Add(string.Join("/", resourceType, parameterValue.Name));
}
}
resourceTypes = newResourceTypes;
}
else
{
for (int j = 0; j < resourceTypes.Count; ++j)
{
resourceTypes[j] = string.Join("/", resourceTypes[j], pathSegment);
}
}
}
return resourceTypes.ToArray();
}
private static JsonSchema ParseType(Property property, IModelType type, IDictionary<string, JsonSchema> definitions, IEnumerable<CompositeType> modelTypes)
{
JsonSchema result = null;
if (property == null || !property.IsReadOnly)
{
// A schema that matches a JSON object with specific properties, such as
// { "name": { "type": "string" }, "age": { "type": "number" } }
CompositeType compositeType = type as CompositeType;
if (compositeType != null)
{
result = ParseCompositeType(property, compositeType, definitions, modelTypes);
}
else
{
// A schema that matches a "dictionary" JSON object, such as
// { "additionalProperties": { "type": "string" } }
DictionaryType dictionaryType = type as DictionaryType;
if (dictionaryType != null)
{
result = ParseDictionaryType(property, dictionaryType, definitions, modelTypes);
}
else
{
// A schema that matches a single value from a given set of values, such as
// { "enum": [ "a", "b" ] }
EnumType enumType = type as EnumType;
if (enumType != null)
{
result = ParseEnumType(property, enumType);
}
else
{
// A schema that matches simple values, such as { "type": "number" }
PrimaryType primaryType = type as PrimaryType;
if (primaryType != null)
{
result = ParsePrimaryType(property, primaryType);
}
else
{
// A schema that matches an array of values, such as
// { "items": { "type": "number" } }
SequenceType sequenceType = type as SequenceType;
if (sequenceType != null)
{
result = ParseSequenceType(property, sequenceType, definitions, modelTypes);
}
else
{
Debug.Fail("Unrecognized property type: " + type.GetType());
}
}
}
}
}
}
return result;
}
private static JsonSchema ParseCompositeType(Property property, CompositeType compositeType, IDictionary<string, JsonSchema> definitions, IEnumerable<CompositeType> modelTypes)
{
string definitionName = compositeType.Name.RawValue;
if (!definitions.ContainsKey(definitionName))
{
JsonSchema definition = new JsonSchema()
{
JsonType = "object",
Description = compositeType.Documentation
};
// This definition must be added to the definition map before we start parsing
// its properties because its properties may recursively reference back to this
// definition.
definitions.Add(definitionName, definition);
foreach (Property subProperty in compositeType.ComposedProperties)
{
JsonSchema subPropertyDefinition = ParseType(subProperty, subProperty.ModelType, definitions, modelTypes);
if (subPropertyDefinition != null)
{
definition.AddProperty(subProperty.Name.RawValue, subPropertyDefinition, subProperty.IsRequired);
}
}
string discriminatorPropertyName = compositeType.PolymorphicDiscriminator;
if (!string.IsNullOrWhiteSpace(discriminatorPropertyName))
{
CompositeType[] subTypes = modelTypes.Where(modelType => modelType.BaseModelType == compositeType).ToArray();
if (subTypes != null && subTypes.Length > 0)
{
JsonSchema discriminatorDefinition = new JsonSchema()
{
JsonType = "string"
};
if (subTypes.Length == 1)
{
CompositeType subType = subTypes[0];
if (subType != null)
{
foreach (Property subTypeProperty in subType.Properties)
{
JsonSchema subTypePropertyDefinition = ParseType(subTypeProperty, subTypeProperty.ModelType, definitions, modelTypes);
if (subTypePropertyDefinition != null)
{
definition.AddProperty(subTypeProperty.Name.RawValue, subTypePropertyDefinition, subTypeProperty.IsRequired);
}
}
const string discriminatorValueExtensionName = "x-ms-discriminator-value";
if (subType.ComposedExtensions.ContainsKey(discriminatorValueExtensionName))
{
string discriminatorValue = subType.ComposedExtensions[discriminatorValueExtensionName] as string;
if (!string.IsNullOrWhiteSpace(discriminatorValue))
{
discriminatorDefinition.AddEnum(discriminatorValue);
}
}
}
definition.AddProperty(discriminatorPropertyName, discriminatorDefinition);
}
else
{
string errorMessage = string.Format(
CultureInfo.CurrentCulture,
"Multiple sub-types ({0}) of a polymorphic discriminated type ({1}) are not currently supported.",
string.Join(", ", subTypes.Select(subType => subType.Name.RawValue)),
compositeType.Name.RawValue);
throw new NotSupportedException(errorMessage);
}
}
}
}
JsonSchema result = new JsonSchema()
{
Ref = "#/definitions/" + definitionName
};
if (property != null)
{
result.Description = RemovePossibleValuesFromDescription(property.Documentation);
}
return result;
}
private static JsonSchema ParseDictionaryType(Property property, DictionaryType dictionaryType, IDictionary<string, JsonSchema> definitions, IEnumerable<CompositeType> modelTypes)
{
JsonSchema result = new JsonSchema()
{
JsonType = "object",
AdditionalProperties = ParseType(null, dictionaryType.ValueType, definitions, modelTypes)
};
if (property != null)
{
result.Description = RemovePossibleValuesFromDescription(property.Documentation);
}
return result;
}
private static JsonSchema ParseEnumType(Property property, EnumType enumType)
{
JsonSchema result = new JsonSchema()
{
JsonType = "string"
};
foreach (EnumValue enumValue in enumType.Values)
{
result.AddEnum(enumValue.Name);
}
if (property != null)
{
result.Description = RemovePossibleValuesFromDescription(property.Documentation);
}
return result;
}
private static JsonSchema ParsePrimaryType(Property property, PrimaryType primaryType)
{
JsonSchema result = new JsonSchema()
{
Format = primaryType.Format
};
switch (primaryType.KnownPrimaryType)
{
case KnownPrimaryType.Boolean:
result.JsonType = "boolean";
break;
case KnownPrimaryType.Int:
case KnownPrimaryType.Long:
result.JsonType = "integer";
break;
case KnownPrimaryType.Double:
result.JsonType = "number";
break;
case KnownPrimaryType.Object:
result.JsonType = "object";
break;
case KnownPrimaryType.DateTime:
case KnownPrimaryType.String:
case KnownPrimaryType.TimeSpan:
result.JsonType = "string";
break;
default:
Debug.Assert(false, "Unrecognized known property type: " + primaryType.KnownPrimaryType);
break;
}
if (property != null)
{
result.Description = property.Documentation;
if (property.DefaultValue != null)
{
result.AddEnum(property.DefaultValue);
}
if (property.Constraints.Count > 0)
{
foreach (KeyValuePair<Constraint, string> entry in property.Constraints)
{
switch (entry.Key)
{
case Constraint.InclusiveMinimum:
Debug.Assert(result.JsonType == "integer" || result.JsonType == "number", "Expected to only find an InclusiveMinimum constraint on an integer or number property.");
result.Minimum = Double.Parse(entry.Value, CultureInfo.CurrentCulture);
break;
case Constraint.InclusiveMaximum:
Debug.Assert(result.JsonType == "integer" || result.JsonType == "number", "Expected to only find an InclusiveMaximum constraint on an integer or number property.");
result.Maximum = Double.Parse(entry.Value, CultureInfo.CurrentCulture);
break;
case Constraint.Pattern:
Debug.Assert(result.JsonType == "string", "Expected to only find a Pattern constraint on a string property.");
result.Pattern = entry.Value;
break;
case Constraint.MinLength:
Debug.Assert(result.JsonType == "string" || result.JsonType == "array", "Expected to only find a MinLength constraint on a string or array property.");
result.MinLength = Double.Parse(entry.Value, CultureInfo.CurrentCulture);
break;
case Constraint.MaxLength:
Debug.Assert(result.JsonType == "string" || result.JsonType == "array", "Expected to only find a MaxLength constraint on a string or array property.");
result.MaxLength = Double.Parse(entry.Value, CultureInfo.CurrentCulture);
break;
default:
Debug.Fail("Unrecognized property Constraint: " + entry.Key);
break;
}
}
}
}
return result;
}
private static JsonSchema ParseSequenceType(Property property, SequenceType sequenceType, IDictionary<string, JsonSchema> definitions, IEnumerable<CompositeType> modelTypes)
{
JsonSchema result = new JsonSchema()
{
JsonType = "array",
Items = ParseType(null, sequenceType.ElementType, definitions, modelTypes)
};
if (property != null)
{
result.Description = RemovePossibleValuesFromDescription(property.Documentation);
}
return result;
}
/// <summary>
/// AutoRest has no way of indicating that you don't want Enum properties to have a
/// "Possible values include: ..." string appended at the end of their descriptions. This
/// function removes the "Possible values" suffix if it exists.
/// </summary>
/// <param name="description">The description to remove the "Possible values" suffix from.</param>
/// <returns></returns>
private static string RemovePossibleValuesFromDescription(string description)
{
if (!string.IsNullOrEmpty(description))
{
int possibleValuesIndex = description.IndexOf("Possible values include: ", StringComparison.OrdinalIgnoreCase);
if (possibleValuesIndex > -1)
{
description = description.Substring(0, possibleValuesIndex).TrimEnd();
}
}
return description;
}
private static bool IsPathVariable(string pathSegment)
{
Debug.Assert(pathSegment != null);
return pathSegment.StartsWith("{", StringComparison.Ordinal) && pathSegment.EndsWith("}", StringComparison.Ordinal);
}
}
}
| 1 | 23,284 | Why not just look for the "type" property instead of creating a ResourceType property? | Azure-autorest | java |
@@ -360,7 +360,8 @@ func (bc *blockchain) context(ctx context.Context, tipInfoFlag bool) (context.Co
protocol.WithBlockchainCtx(
ctx,
protocol.BlockchainCtx{
- Tip: tip,
+ Tip: tip,
+ ChainID: config.ChainID(),
},
),
bc.config.Genesis, | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/facebookgo/clock"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/blockdao"
"github.com/iotexproject/iotex-core/blockchain/filedao"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/prometheustimer"
)
var (
blockMtc = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "iotex_block_metrics",
Help: "Block metrics.",
},
[]string{"type"},
)
// ErrInvalidTipHeight is the error returned when the block height is not valid
ErrInvalidTipHeight = errors.New("invalid tip height")
// ErrInvalidBlock is the error returned when the block is not valid
ErrInvalidBlock = errors.New("failed to validate the block")
// ErrActionNonce is the error when the nonce of the action is wrong
ErrActionNonce = errors.New("invalid action nonce")
// ErrInsufficientGas indicates the error of insufficient gas value for data storage
ErrInsufficientGas = errors.New("insufficient intrinsic gas value")
// ErrBalance indicates the error of balance
ErrBalance = errors.New("invalid balance")
)
func init() {
prometheus.MustRegister(blockMtc)
}
type (
// Blockchain represents the blockchain data structure and hosts the APIs to access it
Blockchain interface {
lifecycle.StartStopper
// For exposing blockchain states
// BlockHeaderByHeight return block header by height
BlockHeaderByHeight(height uint64) (*block.Header, error)
// BlockFooterByHeight return block footer by height
BlockFooterByHeight(height uint64) (*block.Footer, error)
// ChainID returns the chain ID
ChainID() uint32
// ChainAddress returns chain address on parent chain, the root chain return empty.
ChainAddress() string
// TipHash returns tip block's hash
TipHash() hash.Hash256
// TipHeight returns tip block's height
TipHeight() uint64
// Genesis returns the genesis
Genesis() genesis.Genesis
// Context returns current context
Context(context.Context) (context.Context, error)
// For block operations
// MintNewBlock creates a new block with given actions
// Note: the coinbase transfer will be added to the given transfers when minting a new block
MintNewBlock(timestamp time.Time) (*block.Block, error)
// CommitBlock validates and appends a block to the chain
CommitBlock(blk *block.Block) error
// ValidateBlock validates a new block before adding it to the blockchain
ValidateBlock(blk *block.Block) error
// AddSubscriber make you listen to every single produced block
AddSubscriber(BlockCreationSubscriber) error
// RemoveSubscriber make you listen to every single produced block
RemoveSubscriber(BlockCreationSubscriber) error
}
// BlockBuilderFactory is the factory interface of block builder
BlockBuilderFactory interface {
// NewBlockBuilder creates block builder
NewBlockBuilder(context.Context, func(action.Envelope) (action.SealedEnvelope, error)) (*block.Builder, error)
}
)
// Productivity returns the map of the number of blocks produced per delegate in given epoch
func Productivity(bc Blockchain, startHeight uint64, endHeight uint64) (map[string]uint64, error) {
stats := make(map[string]uint64)
for i := startHeight; i <= endHeight; i++ {
header, err := bc.BlockHeaderByHeight(i)
if err != nil {
return nil, err
}
producer := header.ProducerAddress()
stats[producer]++
}
return stats, nil
}
// blockchain implements the Blockchain interface
type blockchain struct {
mu sync.RWMutex // mutex to protect utk, tipHeight and tipHash
dao blockdao.BlockDAO
config config.Config
blockValidator block.Validator
lifecycle lifecycle.Lifecycle
clk clock.Clock
pubSubManager PubSubManager
timerFactory *prometheustimer.TimerFactory
// used by account-based model
bbf BlockBuilderFactory
}
// Option sets blockchain construction parameter
type Option func(*blockchain, config.Config) error
// BlockValidatorOption sets block validator
func BlockValidatorOption(blockValidator block.Validator) Option {
return func(bc *blockchain, cfg config.Config) error {
bc.blockValidator = blockValidator
return nil
}
}
// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath
func BoltDBDaoOption(indexers ...blockdao.BlockIndexer) Option {
return func(bc *blockchain, cfg config.Config) error {
if bc.dao != nil {
return nil
}
cfg.DB.DbPath = cfg.Chain.ChainDBPath // TODO: remove this after moving TrieDBPath from cfg.Chain to cfg.DB
cfg.DB.CompressLegacy = cfg.Chain.CompressBlock
bc.dao = blockdao.NewBlockDAO(indexers, cfg.DB)
return nil
}
}
// InMemDaoOption sets blockchain's dao with MemKVStore
func InMemDaoOption(indexers ...blockdao.BlockIndexer) Option {
return func(bc *blockchain, cfg config.Config) error {
if bc.dao != nil {
return nil
}
bc.dao = blockdao.NewBlockDAOInMemForTest(indexers)
return nil
}
}
// ClockOption overrides the default clock
func ClockOption(clk clock.Clock) Option {
return func(bc *blockchain, conf config.Config) error {
bc.clk = clk
return nil
}
}
// NewBlockchain creates a new blockchain and DB instance
// TODO: replace sf with blockbuilderfactory
func NewBlockchain(cfg config.Config, dao blockdao.BlockDAO, bbf BlockBuilderFactory, opts ...Option) Blockchain {
// create the Blockchain
chain := &blockchain{
config: cfg,
dao: dao,
bbf: bbf,
clk: clock.New(),
pubSubManager: NewPubSub(cfg.BlockSync.BufferSize),
}
for _, opt := range opts {
if err := opt(chain, cfg); err != nil {
log.S().Panicf("Failed to execute blockchain creation option %p: %v", opt, err)
}
}
timerFactory, err := prometheustimer.New(
"iotex_blockchain_perf",
"Performance of blockchain module",
[]string{"topic", "chainID"},
[]string{"default", strconv.FormatUint(uint64(cfg.Chain.ID), 10)},
)
if err != nil {
log.L().Panic("Failed to generate prometheus timer factory.", zap.Error(err))
}
chain.timerFactory = timerFactory
if chain.dao == nil {
log.L().Panic("blockdao is nil")
}
chain.lifecycle.Add(chain.dao)
return chain
}
func (bc *blockchain) ChainID() uint32 {
return atomic.LoadUint32(&bc.config.Chain.ID)
}
func (bc *blockchain) ChainAddress() string {
return bc.config.Chain.Address
}
// Start starts the blockchain
func (bc *blockchain) Start(ctx context.Context) error {
bc.mu.Lock()
defer bc.mu.Unlock()
// pass registry to be used by state factory's initialization
ctx, err := bc.context(ctx, false)
if err != nil {
return err
}
return bc.lifecycle.OnStart(ctx)
}
// Stop stops the blockchain.
func (bc *blockchain) Stop(ctx context.Context) error {
bc.mu.Lock()
defer bc.mu.Unlock()
return bc.lifecycle.OnStop(ctx)
}
func (bc *blockchain) BlockHeaderByHeight(height uint64) (*block.Header, error) {
return bc.dao.HeaderByHeight(height)
}
func (bc *blockchain) BlockFooterByHeight(height uint64) (*block.Footer, error) {
return bc.dao.FooterByHeight(height)
}
// TipHash returns tip block's hash
func (bc *blockchain) TipHash() hash.Hash256 {
tipHeight, err := bc.dao.Height()
if err != nil {
return hash.ZeroHash256
}
tipHash, err := bc.dao.GetBlockHash(tipHeight)
if err != nil {
return hash.ZeroHash256
}
return tipHash
}
// TipHeight returns tip block's height
func (bc *blockchain) TipHeight() uint64 {
tipHeight, err := bc.dao.Height()
if err != nil {
log.L().Panic("failed to get tip height", zap.Error(err))
}
return tipHeight
}
// ValidateBlock validates a new block before adding it to the blockchain
func (bc *blockchain) ValidateBlock(blk *block.Block) error {
bc.mu.RLock()
defer bc.mu.RUnlock()
timer := bc.timerFactory.NewTimer("ValidateBlock")
defer timer.End()
if blk == nil {
return ErrInvalidBlock
}
tip, err := bc.tipInfo()
if err != nil {
return err
}
// verify new block has height incremented by 1
if blk.Height() != 0 && blk.Height() != tip.Height+1 {
return errors.Wrapf(
ErrInvalidTipHeight,
"wrong block height %d, expecting %d",
blk.Height(),
tip.Height+1,
)
}
// verify new block has correctly linked to current tip
if blk.PrevHash() != tip.Hash {
blk.HeaderLogger(log.L()).Error("Previous block hash doesn't match.",
log.Hex("expectedBlockHash", tip.Hash[:]))
return errors.Wrapf(
ErrInvalidBlock,
"wrong prev hash %x, expecting %x",
blk.PrevHash(),
tip.Hash,
)
}
if err := block.VerifyBlock(blk); err != nil {
return errors.Wrap(err, "failed to verify block's signature and merkle root")
}
producerAddr := blk.PublicKey().Address()
if producerAddr == nil {
return errors.New("failed to get address")
}
ctx, err := bc.context(context.Background(), true)
if err != nil {
return err
}
ctx = protocol.WithBlockCtx(ctx,
protocol.BlockCtx{
BlockHeight: blk.Height(),
BlockTimeStamp: blk.Timestamp(),
GasLimit: bc.config.Genesis.BlockGasLimit,
Producer: producerAddr,
},
)
if bc.blockValidator == nil {
return nil
}
return bc.blockValidator.Validate(ctx, blk)
}
func (bc *blockchain) Context(ctx context.Context) (context.Context, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.context(ctx, true)
}
func (bc *blockchain) contextWithBlock(ctx context.Context, producer address.Address, height uint64, timestamp time.Time) context.Context {
return protocol.WithBlockCtx(
ctx,
protocol.BlockCtx{
BlockHeight: height,
BlockTimeStamp: timestamp,
Producer: producer,
GasLimit: bc.config.Genesis.BlockGasLimit,
})
}
func (bc *blockchain) context(ctx context.Context, tipInfoFlag bool) (context.Context, error) {
var tip protocol.TipInfo
if tipInfoFlag {
if tipInfoValue, err := bc.tipInfo(); err == nil {
tip = *tipInfoValue
} else {
return nil, err
}
}
return genesis.WithGenesisContext(
protocol.WithBlockchainCtx(
ctx,
protocol.BlockchainCtx{
Tip: tip,
},
),
bc.config.Genesis,
), nil
}
func (bc *blockchain) MintNewBlock(timestamp time.Time) (*block.Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
mintNewBlockTimer := bc.timerFactory.NewTimer("MintNewBlock")
defer mintNewBlockTimer.End()
tipHeight, err := bc.dao.Height()
if err != nil {
return nil, err
}
newblockHeight := tipHeight + 1
ctx, err := bc.context(context.Background(), true)
if err != nil {
return nil, err
}
ctx = bc.contextWithBlock(ctx, bc.config.ProducerAddress(), newblockHeight, timestamp)
// run execution and update state trie root hash
minterPrivateKey := bc.config.ProducerPrivateKey()
blockBuilder, err := bc.bbf.NewBlockBuilder(
ctx,
func(elp action.Envelope) (action.SealedEnvelope, error) {
return action.Sign(elp, minterPrivateKey)
},
)
if err != nil {
return nil, errors.Wrapf(err, "failed to create block builder at new block height %d", newblockHeight)
}
blk, err := blockBuilder.SignAndBuild(minterPrivateKey)
if err != nil {
return nil, errors.Wrapf(err, "failed to create block")
}
return &blk, nil
}
// CommitBlock validates and appends a block to the chain
func (bc *blockchain) CommitBlock(blk *block.Block) error {
bc.mu.Lock()
defer bc.mu.Unlock()
timer := bc.timerFactory.NewTimer("CommitBlock")
defer timer.End()
return bc.commitBlock(blk)
}
func (bc *blockchain) AddSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
log.L().Info("Add a subscriber.")
if s == nil {
return errors.New("subscriber could not be nil")
}
return bc.pubSubManager.AddBlockListener(s)
}
func (bc *blockchain) RemoveSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
return bc.pubSubManager.RemoveBlockListener(s)
}
//======================================
// internal functions
//=====================================
func (bc *blockchain) Genesis() genesis.Genesis {
return bc.config.Genesis
}
//======================================
// private functions
//=====================================
func (bc *blockchain) tipInfo() (*protocol.TipInfo, error) {
tipHeight, err := bc.dao.Height()
if err != nil {
return nil, err
}
if tipHeight == 0 {
return &protocol.TipInfo{
Height: 0,
Hash: bc.config.Genesis.Hash(),
Timestamp: time.Unix(bc.config.Genesis.Timestamp, 0),
}, nil
}
header, err := bc.dao.HeaderByHeight(tipHeight)
if err != nil {
return nil, err
}
return &protocol.TipInfo{
Height: tipHeight,
Hash: header.HashBlock(),
Timestamp: header.Timestamp(),
}, nil
}
// commitBlock commits a block to the chain
func (bc *blockchain) commitBlock(blk *block.Block) error {
ctx, err := bc.context(context.Background(), false)
if err != nil {
return err
}
// write block into DB
putTimer := bc.timerFactory.NewTimer("putBlock")
err = bc.dao.PutBlock(ctx, blk)
putTimer.End()
switch {
case errors.Cause(err) == filedao.ErrAlreadyExist:
return nil
case err != nil:
return err
}
blkHash := blk.HashBlock()
if blk.Height()%100 == 0 {
blk.HeaderLogger(log.L()).Info("Committed a block.", log.Hex("tipHash", blkHash[:]))
}
blockMtc.WithLabelValues("numActions").Set(float64(len(blk.Actions)))
// emit block to all block subscribers
bc.emitToSubscribers(blk)
return nil
}
func (bc *blockchain) emitToSubscribers(blk *block.Block) {
if bc.pubSubManager == nil {
return
}
bc.pubSubManager.SendBlockToSubscribers(blk)
}
| 1 | 23,645 | should use `bc.config.Blockchain.ChainID` | iotexproject-iotex-core | go |
@@ -17,13 +17,12 @@ import java.util.Collections;
import java.util.List;
import org.springframework.boot.context.properties.ConfigurationProperties;
import zipkin.storage.elasticsearch.ElasticsearchStorage;
-import zipkin.storage.elasticsearch.NativeClient;
@ConfigurationProperties("zipkin.storage.elasticsearch")
public class ZipkinElasticsearchStorageProperties {
- /** @see NativeClient.Builder#cluster(String) */
+ /** @see ElasticsearchStorage.Builder#cluster(String) */
private String cluster = "elasticsearch";
- /** @see NativeClient.Builder#hosts(List) */
+ /** @see ElasticsearchStorage.Builder#hosts(List) */
private List<String> hosts = Collections.singletonList("localhost:9300");
/** @see ElasticsearchStorage.Builder#index(String) */
private String index = "zipkin"; | 1 | /**
* Copyright 2015-2016 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.autoconfigure.storage.elasticsearch;
import java.util.Collections;
import java.util.List;
import org.springframework.boot.context.properties.ConfigurationProperties;
import zipkin.storage.elasticsearch.ElasticsearchStorage;
import zipkin.storage.elasticsearch.NativeClient;
@ConfigurationProperties("zipkin.storage.elasticsearch")
public class ZipkinElasticsearchStorageProperties {
/** @see NativeClient.Builder#cluster(String) */
private String cluster = "elasticsearch";
/** @see NativeClient.Builder#hosts(List) */
private List<String> hosts = Collections.singletonList("localhost:9300");
/** @see ElasticsearchStorage.Builder#index(String) */
private String index = "zipkin";
/** @see ElasticsearchStorage.Builder#indexShards(int) */
private int indexShards = 5;
/** @see ElasticsearchStorage.Builder#indexReplicas(int) */
private int indexReplicas = 1;
public String getCluster() {
return cluster;
}
public ZipkinElasticsearchStorageProperties setCluster(String cluster) {
this.cluster = cluster;
return this;
}
public List<String> getHosts() {
return hosts;
}
public ZipkinElasticsearchStorageProperties setHosts(List<String> hosts) {
this.hosts = hosts;
return this;
}
public String getIndex() {
return index;
}
public ZipkinElasticsearchStorageProperties setIndex(String index) {
this.index = index;
return this;
}
public int getIndexShards() {
return indexShards;
}
public void setIndexShards(int indexShards) {
this.indexShards = indexShards;
}
public int getIndexReplicas() {
return indexReplicas;
}
public void setIndexReplicas(int indexReplicas) {
this.indexReplicas = indexReplicas;
}
public ElasticsearchStorage.Builder toBuilder() {
return ElasticsearchStorage.builder()
.client(NativeClient.builder()
.cluster(cluster)
.hosts(hosts).build())
.index(index)
.indexShards(indexShards)
.indexReplicas(indexReplicas);
}
}
| 1 | 11,722 | this doesn't need to know about NativeClient | openzipkin-zipkin | java |
@@ -233,7 +233,8 @@ class ViolationAccess(object):
violation.get('full_name', ''),
violation.get('resource_data', ''),
violation.get('violation_data', ''),
- violation.get('rule_name', '')
+ violation.get('rule_name', ''),
+ violation.get('resource_name', '')
)
violation = Violation( | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database access objects for Forseti Scanner. """
from builtins import object
from collections import defaultdict
import hashlib
import json
import re
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import and_
from sqlalchemy import inspect
from sqlalchemy.ext.declarative import declarative_base
from google.cloud.forseti.common.data_access import violation_map as vm
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util.index_state import IndexState
LOGGER = logger.get_logger(__name__)
BASE = declarative_base()
CURRENT_SCHEMA = 1
SUCCESS_STATES = [IndexState.SUCCESS, IndexState.PARTIAL_SUCCESS]
CV_VIOLATION_PATTERN = re.compile('^cv', re.I)
class ScannerIndex(BASE):
"""Represents a scanner run."""
__tablename__ = 'scanner_index'
id = Column(BigInteger, primary_key=True)
inventory_index_id = Column(BigInteger)
created_at_datetime = Column(DateTime())
completed_at_datetime = Column(DateTime())
scanner_status = Column(Text())
schema_version = Column(Integer())
scanner_index_warnings = Column(Text(16777215))
scanner_index_errors = Column(Text())
message = Column(Text())
def __repr__(self):
"""Object string representation.
Returns:
str: String representation of the object.
"""
return """<{}(id='{}', version='{}', timestamp='{}')>""".format(
self.__class__.__name__,
self.id,
self.schema_version,
self.created_at_datetime)
@classmethod
def create(cls, inv_index_id):
"""Create a new scanner index row.
Args:
inv_index_id (str): Id of the inventory index.
Returns:
object: ScannerIndex row object.
"""
utc_now = date_time.get_utc_now_datetime()
micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now)
return ScannerIndex(
id=micro_timestamp,
inventory_index_id=inv_index_id,
created_at_datetime=utc_now,
scanner_status=IndexState.CREATED,
schema_version=CURRENT_SCHEMA)
def complete(self, status=IndexState.SUCCESS):
"""Mark the scanner as completed with a final scanner_status.
Args:
status (str): Final scanner_status.
"""
self.completed_at_datetime = date_time.get_utc_now_datetime()
self.scanner_status = status
def add_warning(self, session, warning):
"""Add a warning to the scanner.
Args:
session (object): session object to work on.
warning (str): Warning message
"""
warning_message = '{}\n'.format(warning)
if not self.scanner_index_warnings:
self.scanner_index_warnings = warning_message
else:
self.scanner_index_warnings += warning_message
session.add(self)
session.flush()
def set_error(self, session, message):
"""Indicate a broken scanner run.
Args:
session (object): session object to work on.
message (str): Error message to set.
"""
self.scanner_index_errors = message
session.add(self)
session.flush()
def get_latest_scanner_index_id(session, inv_index_id, index_state=None):
"""Return last `ScannerIndex` row with the given state or `None`.
Either return the latest `ScannerIndex` row where the `scanner_status`
matches the given `index_state` parameter (if passed) or the latest row
that represents a (partially) successful scanner run.
Args:
session (object): session object to work on.
inv_index_id (str): Id of the inventory index.
index_state (str): we want the latest `ScannerIndex` with this state
Returns:
sqlalchemy_object: the latest `ScannerIndex` row or `None`
"""
scanner_index = None
if not index_state:
scanner_index = (
session.query(ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status.in_(SUCCESS_STATES),
ScannerIndex.inventory_index_id == inv_index_id))
.order_by(ScannerIndex.id.desc()).first())
else:
scanner_index = (
session.query(ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status == index_state,
ScannerIndex.inventory_index_id == inv_index_id))
.order_by(ScannerIndex.created_at_datetime.desc()).first())
return scanner_index.id if scanner_index else None
class Violation(BASE):
"""Row entry for a violation."""
__tablename__ = 'violations'
id = Column(Integer, primary_key=True)
created_at_datetime = Column(DateTime())
full_name = Column(String(1024))
resource_data = Column(Text(16777215))
resource_name = Column(String(256), default='')
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
rule_index = Column(Integer, default=0)
rule_name = Column(String(256))
scanner_index_id = Column(BigInteger)
violation_data = Column(Text(16777215))
violation_hash = Column(String(256))
violation_message = Column(Text)
violation_type = Column(String(256), nullable=False)
def __repr__(self):
"""String representation.
Returns:
str: string representation of the Violation row entry.
"""
string = ('<Violation(violation_type={}, resource_type={} '
'rule_name={})>')
return string.format(
self.violation_type, self.resource_type, self.rule_name)
@staticmethod
def get_schema_update_actions():
"""Maintain all the schema changes for this table.
Returns:
dict: A mapping of Action: Column.
"""
columns_to_alter = {
Column('violation_data', Text()):
Column('violation_data', Text(16777215))
}
columns_to_create = [
Column('resource_name', String(256), default=''),
Column('violation_message', Text(), default='')
]
return {'ALTER': columns_to_alter, 'CREATE': columns_to_create}
class ViolationAccess(object):
"""Facade for violations, implement APIs against violations table."""
def __init__(self, session):
"""Constructor for the Violation Access.
Args:
session (Session): SQLAlchemy session object.
"""
self.session = session
def create(self, violations, scanner_index_id):
"""Save violations to the db table.
Args:
violations (list): A list of violations.
scanner_index_id (int): id of the `ScannerIndex` row for this
scanner run.
"""
created_at_datetime = date_time.get_utc_now_datetime()
for violation in violations:
violation_hash = _create_violation_hash(
violation.get('full_name', ''),
violation.get('resource_data', ''),
violation.get('violation_data', ''),
violation.get('rule_name', '')
)
violation = Violation(
created_at_datetime=created_at_datetime,
full_name=violation.get('full_name'),
resource_data=violation.get('resource_data'),
resource_name=violation.get('resource_name'),
resource_id=violation.get('resource_id'),
resource_type=violation.get('resource_type'),
rule_index=violation.get('rule_index'),
rule_name=violation.get('rule_name'),
scanner_index_id=scanner_index_id,
violation_data=json.dumps(
violation.get('violation_data'), sort_keys=True),
violation_hash=violation_hash,
violation_message=violation.get('violation_message', ''),
violation_type=violation.get('violation_type')
)
self.session.add(violation)
def list(self, inv_index_id=None, scanner_index_id=None):
"""List all violations from the db table.
If
* neither index is passed we return all violations.
* the `inv_index_id` is passed the violations from all scanner
runs for that inventory index will be returned.
* the `scanner_index_id` is passed the violations from that
specific scanner run will be returned.
NOTA BENE: do *NOT* call this method with both indices!
Args:
inv_index_id (str): Id of the inventory index.
scanner_index_id (int): Id of the scanner index.
Returns:
list: List of Violation row entry objects.
Raises:
ValueError: if called with both the inventory and the scanner index
"""
if not (inv_index_id or scanner_index_id):
return self.session.query(Violation).all()
if inv_index_id and scanner_index_id:
raise ValueError(
'Please call list() with the inventory index XOR the scanner '
'index, not both.')
results = []
if inv_index_id:
results = (
self.session.query(Violation, ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status.in_(SUCCESS_STATES),
ScannerIndex.inventory_index_id == inv_index_id))
.filter(Violation.scanner_index_id == ScannerIndex.id)
.all())
if scanner_index_id:
results = (
self.session.query(Violation, ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status.in_(SUCCESS_STATES),
ScannerIndex.id == scanner_index_id))
.filter(Violation.scanner_index_id == ScannerIndex.id)
.all())
violations = []
for violation, _ in results:
violations.append(violation)
return violations
# pylint: disable=invalid-name
def convert_sqlalchemy_object_to_dict(sqlalchemy_obj):
"""Convert a sqlalchemy row/record object to a dictionary.
Args:
sqlalchemy_obj (sqlalchemy_object): A sqlalchemy row/record object
Returns:
dict: A dict of sqlalchemy object's attributes.
"""
return {c.key: getattr(sqlalchemy_obj, c.key)
for c in inspect(sqlalchemy_obj).mapper.column_attrs}
def map_by_resource(violation_rows):
"""Create a map of violation types to violations of that resource.
Args:
violation_rows (list): A list of dict of violation data.
Returns:
dict: A dict of violation types mapped to the list of corresponding
violation types, i.e. { resource => [violation_data...] }.
"""
# The defaultdict makes it easy to add a value to a key without having
# to check if the key exists.
v_by_type = defaultdict(list)
for v_data in violation_rows:
try:
v_data['violation_data'] = json.loads(v_data['violation_data'])
except ValueError:
LOGGER.warning('Invalid violation data, unable to parse json '
'for %s',
v_data['violation_data'])
# resource_data can be regular python string
try:
v_data['resource_data'] = json.loads(v_data['resource_data'])
except ValueError:
v_data['resource_data'] = json.loads(
json.dumps(v_data['resource_data']))
violation_type = vm.VIOLATION_RESOURCES.get(v_data['violation_type'])
if not violation_type:
if bool(CV_VIOLATION_PATTERN.match(v_data['violation_type'])):
violation_type = vm.CV_VIOLATION_TYPE
if violation_type:
v_by_type[violation_type].append(v_data)
return dict(v_by_type)
def _create_violation_hash(violation_full_name, resource_data, violation_data, rule_name):
"""Create a hash of violation data.
Args:
violation_full_name (str): The full name of the violation.
resource_data (str): The inventory data.
violation_data (dict): A violation.
rule_name (str): Rule or constraint name.
Returns:
str: The resulting hex digest or '' if we can't successfully create
a hash.
"""
# TODO: Intelligently choose from hashlib.algorithms_guaranteed if our
# desired one is not available.
algorithm = 'sha512'
try:
violation_hash = hashlib.new(algorithm)
except ValueError:
LOGGER.exception('Cannot create hash for a violation with algorithm: '
'%s', algorithm)
return ''
try:
# Group resources do not have full name. Issue #1072
violation_hash.update(
json.dumps(violation_full_name).encode() +
json.dumps(resource_data, sort_keys=True).encode() +
json.dumps(violation_data, sort_keys=True).encode() +
json.dumps(rule_name).encode()
)
except TypeError:
LOGGER.exception('Cannot create hash for a violation: %s',
violation_full_name)
return ''
return violation_hash.hexdigest()
def initialize(engine):
"""Create all tables in the database if not existing.
Args:
engine (object): Database engine to operate on.
"""
# Create tables if not exists.
BASE.metadata.create_all(engine)
| 1 | 35,849 | I know we discussed if we should add in the resource_name to the hash, and I actually don't think it's required. The violation hash was recently updated (not released) to include the violated rule, so when the next release goes out, user's findings will get updated. The resource name doesn't hurt to have here, but not required. | forseti-security-forseti-security | py |
@@ -14,8 +14,11 @@ static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t sx, int32_t sy) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
+ int scale = surface->current.scale;
surface->pending.invalid |= WLR_SURFACE_INVALID_BUFFER;
surface->pending.buffer = buffer;
+ surface->texture->height_from_buffer = surface->texture->height / scale;
+ surface->texture->width_from_buffer = surface->texture->width / scale;
}
static void surface_damage(struct wl_client *client, | 1 | #include <assert.h>
#include <stdlib.h>
#include <wayland-server.h>
#include <wlr/util/log.h>
#include <wlr/egl.h>
#include <wlr/render/interface.h>
#include <wlr/types/wlr_surface.h>
static void surface_destroy(struct wl_client *client, struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t sx, int32_t sy) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending.invalid |= WLR_SURFACE_INVALID_BUFFER;
surface->pending.buffer = buffer;
}
static void surface_damage(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width, int32_t height) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending.invalid |= WLR_SURFACE_INVALID_SURFACE_DAMAGE;
pixman_region32_union_rect(&surface->pending.surface_damage,
&surface->pending.surface_damage,
x, y, width, height);
}
static void destroy_frame_callback(struct wl_resource *resource) {
struct wlr_frame_callback *cb = wl_resource_get_user_data(resource);
wl_list_remove(&cb->link);
free(cb);
}
static void surface_frame(struct wl_client *client,
struct wl_resource *resource, uint32_t callback) {
struct wlr_frame_callback *cb;
struct wlr_surface *surface = wl_resource_get_user_data(resource);
cb = malloc(sizeof(struct wlr_frame_callback));
if (cb == NULL) {
wl_resource_post_no_memory(resource);
return;
}
cb->resource = wl_resource_create(client,
&wl_callback_interface, 1, callback);
if (cb->resource == NULL) {
free(cb);
wl_resource_post_no_memory(resource);
return;
}
wl_resource_set_implementation(cb->resource,
NULL, cb, destroy_frame_callback);
wl_list_insert(surface->frame_callback_list.prev, &cb->link);
}
static void surface_set_opaque_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending.invalid |= WLR_SURFACE_INVALID_OPAQUE_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending.opaque, region);
} else {
pixman_region32_clear(&surface->pending.opaque);
}
}
static void surface_set_input_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending.invalid |= WLR_SURFACE_INVALID_INPUT_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending.input, region);
} else {
pixman_region32_fini(&surface->pending.input);
pixman_region32_init_rect(&surface->pending.input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
}
static void surface_commit(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if ((surface->pending.invalid & WLR_SURFACE_INVALID_BUFFER)) {
surface->current.buffer = surface->pending.buffer;
}
if ((surface->pending.invalid & WLR_SURFACE_INVALID_SURFACE_DAMAGE)) {
// TODO: Sort out buffer damage too
pixman_region32_union(&surface->current.surface_damage,
&surface->current.surface_damage,
&surface->pending.surface_damage);
// TODO: Surface sizing is complicated
//pixman_region32_intersect_rect(&surface->current.surface_damage,
// &surface->current.surface_damage,
// 0, 0, surface->width, surface->height);
pixman_region32_clear(&surface->pending.surface_damage);
}
// TODO: Commit other changes
surface->pending.invalid = 0;
// TODO: add the invalid bitfield to this callback
wl_signal_emit(&surface->signals.commit, surface);
}
void wlr_surface_flush_damage(struct wlr_surface *surface) {
if (!surface->current.buffer) {
if (surface->texture->valid) {
// TODO: Detach buffers
}
return;
}
struct wl_shm_buffer *buffer = wl_shm_buffer_get(surface->current.buffer);
if (!buffer) {
if (wlr_renderer_buffer_is_drm(surface->renderer, surface->pending.buffer)) {
wlr_texture_upload_drm(surface->texture, surface->pending.buffer);
goto release;
} else {
wlr_log(L_INFO, "Unknown buffer handle attached");
return;
}
}
pixman_region32_t damage = surface->current.surface_damage;
if (!pixman_region32_not_empty(&damage)) {
goto release;
}
int n;
pixman_box32_t *rects = pixman_region32_rectangles(&damage, &n);
uint32_t format = wl_shm_buffer_get_format(buffer);
for (int i = 0; i < n; ++i) {
pixman_box32_t rect = rects[i];
if (!wlr_texture_update_shm(surface->texture, format,
rect.x1, rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1,
buffer)) {
break;
}
}
pixman_region32_fini(&surface->current.surface_damage);
pixman_region32_init(&surface->current.surface_damage);
release:
wl_resource_queue_event(surface->current.buffer, WL_BUFFER_RELEASE);
}
static void surface_set_buffer_transform(struct wl_client *client,
struct wl_resource *resource, int transform) {
wlr_log(L_DEBUG, "TODO: surface surface buffer transform");
}
static void surface_set_buffer_scale(struct wl_client *client,
struct wl_resource *resource,
int32_t scale) {
wlr_log(L_DEBUG, "TODO: surface set buffer scale");
}
static void surface_damage_buffer(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width,
int32_t height) {
wlr_log(L_DEBUG, "TODO: surface damage buffer");
}
const struct wl_surface_interface surface_interface = {
surface_destroy,
surface_attach,
surface_damage,
surface_frame,
surface_set_opaque_region,
surface_set_input_region,
surface_commit,
surface_set_buffer_transform,
surface_set_buffer_scale,
surface_damage_buffer
};
static void destroy_surface(struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
wlr_texture_destroy(surface->texture);
struct wlr_frame_callback *cb, *next;
wl_list_for_each_safe(cb, next, &surface->frame_callback_list, link) {
wl_resource_destroy(cb->resource);
}
free(surface);
}
struct wlr_surface *wlr_surface_create(struct wl_resource *res,
struct wlr_renderer *renderer) {
struct wlr_surface *surface = calloc(1, sizeof(struct wlr_surface));
surface->renderer = renderer;
surface->texture = wlr_render_texture_init(renderer);
surface->resource = res;
wl_signal_init(&surface->signals.commit);
wl_list_init(&surface->frame_callback_list);
wl_resource_set_implementation(res, &surface_interface,
surface, destroy_surface);
return surface;
}
| 1 | 7,682 | The variable name is also misleading imo. The width from the buffer comes from `wl_shm_buffer_get_width` (or a similar EGL thing), which is what gets encoded into `surface->texture->width`. | swaywm-wlroots | c |
@@ -179,6 +179,8 @@ class Listen(object):
'time' : self.ts_since_epoch,
'tags' : {
'user_name' : escape(self.user_name),
+ 'artist_msid' : self.artist_msid,
+ 'recording_msid' : self.recording_msid,
},
'fields' : {
'artist_name' : self.data['artist_name'], | 1 | # coding=utf-8
from datetime import datetime
import calendar
from listenbrainz.utils import escape, convert_to_unix_timestamp
def flatten_dict(d, seperator='', parent_key=''):
"""
Flattens a nested dictionary structure into a single dict.
Args:
d: the dict to be flattened
seperator: the seperator used in keys in the flattened dict
parent_key: the key that is prefixed to all keys generated during flattening
Returns:
Flattened dict with keys such as key1.key2
"""
result = []
for key, value in d.items():
new_key = "{}{}{}".format(parent_key, seperator, str(key))
if isinstance(value, dict):
result.extend(list(flatten_dict(value, '.', new_key).items()))
else:
result.append((new_key, value))
return dict(result)
class Listen(object):
""" Represents a listen object """
# keys in additional_info that we support explicitly and are not superfluous
SUPPORTED_KEYS = [
'artist_mbids',
'release_group_mbid',
'release_mbid',
'recording_mbid',
'track_mbid',
'work_mbids',
'tracknumber',
'isrc',
'spotify_id',
'tags',
'artist_msid',
'release_msid',
'recording_msid',
]
def __init__(self, user_id=None, user_name=None, timestamp=None, artist_msid=None, release_msid=None,
recording_msid=None, data=None):
self.user_id = user_id
self.user_name = user_name
# determine the type of timestamp and do the right thing
if isinstance(timestamp, int) or isinstance(timestamp, float):
self.ts_since_epoch = int(timestamp)
self.timestamp = datetime.utcfromtimestamp(self.ts_since_epoch)
else:
if timestamp:
self.timestamp = timestamp
self.ts_since_epoch = calendar.timegm(self.timestamp.utctimetuple())
else:
self.timestamp = 0
self.ts_since_epoch = 0
self.artist_msid = artist_msid
self.release_msid = release_msid
self.recording_msid = recording_msid
if data is None:
self.data = {'additional_info': {}}
else:
try:
data['additional_info'] = flatten_dict(data['additional_info'])
except TypeError:
# TypeError may occur here because PostgresListenStore passes strings
# to data sometimes. If that occurs, we don't need to do anything.
pass
self.data = data
@classmethod
def from_json(cls, j):
"""Factory to make Listen() objects from a dict"""
return cls(user_id=j['user_id'],
user_name=j.get('user_name', ""),
timestamp=datetime.utcfromtimestamp(float(j['listened_at'])),
artist_msid=j['track_metadata']['additional_info'].get('artist_msid'),
release_msid=j['track_metadata']['additional_info'].get('release_msid'),
recording_msid=j.get('recording_msid'),
data=j.get('track_metadata')
)
@classmethod
def from_influx(cls, row):
""" Factory to make Listen objects from an influx row
"""
t = convert_to_unix_timestamp(row['time'])
mbids = []
artist_mbids = row.get('artist_mbids')
if artist_mbids:
for mbid in artist_mbids.split(','):
mbids.append(mbid)
tags = []
influx_tags = row.get('tags')
if influx_tags:
for tag in influx_tags.split(','):
tags.append(tag)
data = {
'artist_mbids': mbids,
'release_msid': row.get('release_msid'),
'release_mbid': row.get('release_mbid'),
'release_name': row.get('release_name'),
'recording_mbid': row.get('recording_mbid'),
'tags': tags,
}
# The influx row can contain many fields that are user-generated.
# We only need to add those fields which have some value in them to additional_info.
# Also, we need to make sure that we don't add fields like time, user_name etc. into
# the additional_info.
for key, value in row.items():
if key not in ['time', 'user_name', 'recording_msid', 'artist_mbids', 'tags'] and value is not None:
data[key] = value
return cls(
timestamp=t,
user_name=row.get('user_name'),
artist_msid=row.get('artist_msid'),
recording_msid=row.get('recording_msid'),
release_msid=row.get('release_msid'),
data={
'additional_info': data,
'artist_name': row.get('artist_name'),
'track_name': row.get('track_name'),
}
)
def to_api(self):
"""
Converts listen into the format in which listens are returned in the payload by the api
on get_listen requests
Returns:
dict with fields 'track_metadata', 'listened_at' and 'recording_msid'
"""
track_metadata = self.data.copy()
track_metadata['additional_info']['artist_msid'] = self.artist_msid
track_metadata['additional_info']['release_msid'] = self.release_msid
data = {
'track_metadata': track_metadata,
'listened_at': self.ts_since_epoch,
'recording_msid': self.recording_msid,
}
return data
def to_json(self):
return {
'user_id': self.user_id,
'user_name': self.user_name,
'timestamp': self.timestamp,
'track_metadata': self.data,
'recording_msid': self.recording_msid
}
def to_influx(self, measurement):
"""
Converts listen into dict that can be submitted to influx directly.
Returns:
a dict with approriate values of measurement, time, tags and fields
"""
data = {
'measurement' : measurement,
'time' : self.ts_since_epoch,
'tags' : {
'user_name' : escape(self.user_name),
},
'fields' : {
'artist_name' : self.data['artist_name'],
'artist_msid' : self.artist_msid,
'artist_mbids' : ",".join(self.data['additional_info'].get('artist_mbids', [])),
'release_name' : self.data.get('release_name', ''),
'release_msid' : self.release_msid,
'release_mbid' : self.data['additional_info'].get('release_mbid', ''),
'track_name' : self.data['track_name'],
'recording_msid' : self.recording_msid,
'recording_mbid' : self.data['additional_info'].get('recording_mbid', ''),
'tags' : ",".join(self.data['additional_info'].get('tags', [])),
}
}
# add the user generated keys present in additional info to fields
for key, value in self.data['additional_info'].items():
if key not in Listen.SUPPORTED_KEYS:
data['fields'][key] = escape(str(value))
return data
def validate(self):
return (self.user_id is not None and self.timestamp is not None and self.artist_msid is not None
and self.recording_msid is not None and self.data is not None)
@property
def date(self):
return self.timestamp
def __repr__(self):
return str(self).encode("utf-8")
def __unicode__(self):
return "<Listen: user_name: %s, time: %s, artist_msid: %s, release_msid: %s, recording_msid: %s, artist_name: %s, track_name: %s>" % \
(self.user_name, self.ts_since_epoch, self.artist_msid, self.release_msid, self.recording_msid, self.data['artist_name'], self.data['track_name'])
| 1 | 14,418 | Why are you using these tags to carry these msids? | metabrainz-listenbrainz-server | py |
@@ -439,8 +439,8 @@ int flb_config_set_property(struct flb_config *config,
flb_free(*s_val); /* release before overwriting */
}
- *s_val = malloc(flb_sds_len(tmp) * sizeof(char));
- strncpy(*s_val, tmp, flb_sds_len(tmp));
+ *s_val = malloc(flb_sds_len(tmp) * sizeof(char) + 1);
+ strcpy(*s_val, tmp);
flb_sds_destroy(tmp);
break;
default: | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <stddef.h>
#include <monkey/mk_core.h>
#include <fluent-bit/flb_info.h>
#include <fluent-bit/flb_mem.h>
#include <fluent-bit/flb_str.h>
#include <fluent-bit/flb_kv.h>
#include <fluent-bit/flb_env.h>
#include <fluent-bit/flb_macros.h>
#include <fluent-bit/flb_config.h>
#include <fluent-bit/flb_parser.h>
#include <fluent-bit/flb_plugin.h>
#include <fluent-bit/flb_plugins.h>
#include <fluent-bit/flb_slist.h>
#include <fluent-bit/flb_io_tls.h>
#include <fluent-bit/flb_kernel.h>
#include <fluent-bit/flb_worker.h>
#include <fluent-bit/flb_scheduler.h>
#include <fluent-bit/flb_http_server.h>
#include <fluent-bit/flb_plugin.h>
#include <fluent-bit/flb_utils.h>
int flb_regex_init();
struct flb_service_config service_configs[] = {
{FLB_CONF_STR_FLUSH,
FLB_CONF_TYPE_DOUBLE,
offsetof(struct flb_config, flush)},
{FLB_CONF_STR_GRACE,
FLB_CONF_TYPE_INT,
offsetof(struct flb_config, grace)},
{FLB_CONF_STR_DAEMON,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, daemon)},
{FLB_CONF_STR_LOGFILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, log_file)},
{FLB_CONF_STR_PARSERS_FILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, parsers_file)},
{FLB_CONF_STR_PLUGINS_FILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, plugins_file)},
{FLB_CONF_STR_LOGLEVEL,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, log)},
#ifdef FLB_HAVE_HTTP_SERVER
{FLB_CONF_STR_HTTP_SERVER,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, http_server)},
{FLB_CONF_STR_HTTP_LISTEN,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, http_listen)},
{FLB_CONF_STR_HTTP_PORT,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, http_port)},
#endif
/* Storage */
{FLB_CONF_STORAGE_PATH,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, storage_path)},
{FLB_CONF_STORAGE_SYNC,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, storage_sync)},
{FLB_CONF_STORAGE_CHECKSUM,
FLB_CONF_TYPE_BOOL,
offsetof(struct flb_config, storage_checksum)},
{FLB_CONF_STORAGE_BL_MEM_LIMIT,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, storage_bl_mem_limit)},
{FLB_CONF_STORAGE_MAX_CHUNKS_UP,
FLB_CONF_TYPE_INT,
offsetof(struct flb_config, storage_max_chunks_up)},
/* Coroutines */
{FLB_CONF_STR_CORO_STACK_SIZE,
FLB_CONF_TYPE_INT,
offsetof(struct flb_config, coro_stack_size)},
#ifdef FLB_HAVE_STREAM_PROCESSOR
{FLB_CONF_STR_STREAMS_FILE,
FLB_CONF_TYPE_STR,
offsetof(struct flb_config, stream_processor_file)},
#endif
{NULL, FLB_CONF_TYPE_OTHER, 0} /* end of array */
};
struct flb_config *flb_config_init()
{
struct flb_config *config;
config = flb_calloc(1, sizeof(struct flb_config));
if (!config) {
flb_errno();
return NULL;
}
MK_EVENT_ZERO(&config->ch_event);
MK_EVENT_ZERO(&config->event_flush);
MK_EVENT_ZERO(&config->event_shutdown);
config->is_running = FLB_TRUE;
/* Flush */
config->flush = FLB_CONFIG_FLUSH_SECS;
config->daemon = FLB_FALSE;
config->init_time = time(NULL);
config->kernel = flb_kernel_info();
config->verbose = 3;
config->grace = 5;
#ifdef FLB_HAVE_HTTP_SERVER
config->http_ctx = NULL;
config->http_server = FLB_FALSE;
config->http_listen = flb_strdup(FLB_CONFIG_HTTP_LISTEN);
config->http_port = flb_strdup(FLB_CONFIG_HTTP_PORT);
#endif
config->cio = NULL;
config->storage_path = NULL;
config->storage_input_plugin = NULL;
#ifdef FLB_HAVE_SQLDB
mk_list_init(&config->sqldb_list);
#endif
#ifdef FLB_HAVE_LUAJIT
mk_list_init(&config->luajit_list);
#endif
#ifdef FLB_HAVE_STREAM_PROCESSOR
flb_slist_create(&config->stream_processor_tasks);
#endif
/* Set default coroutines stack size */
config->coro_stack_size = FLB_THREAD_STACK_SIZE;
/* Initialize linked lists */
mk_list_init(&config->collectors);
mk_list_init(&config->in_plugins);
mk_list_init(&config->parser_plugins);
mk_list_init(&config->filter_plugins);
mk_list_init(&config->out_plugins);
mk_list_init(&config->inputs);
mk_list_init(&config->parsers);
mk_list_init(&config->filters);
mk_list_init(&config->outputs);
mk_list_init(&config->proxies);
mk_list_init(&config->workers);
memset(&config->tasks_map, '\0', sizeof(config->tasks_map));
/* Environment */
config->env = flb_env_create();
/* Register static plugins */
flb_register_plugins(config);
/* Create environment for dynamic plugins */
config->dso_plugins = flb_plugin_create();
/* Ignoring SIGPIPE on Windows (scary) */
#ifndef _WIN32
/* Ignore SIGPIPE */
signal(SIGPIPE, SIG_IGN);
#endif
/* Prepare worker interface */
flb_worker_init(config);
#ifdef FLB_HAVE_REGEX
/* Regex support */
flb_regex_init();
#endif
return config;
}
void flb_config_exit(struct flb_config *config)
{
struct mk_list *tmp;
struct mk_list *head;
struct flb_input_collector *collector;
if (config->log_file) {
flb_free(config->log_file);
}
if (config->log) {
flb_log_stop(config->log, config);
}
if (config->parsers_file) {
flb_free(config->parsers_file);
}
if (config->plugins_file) {
flb_free(config->plugins_file);
}
if (config->kernel) {
flb_free(config->kernel->s_version.data);
flb_free(config->kernel);
}
/* release resources */
if (config->ch_event.fd) {
mk_event_closesocket(config->ch_event.fd);
}
/* Pipe */
if (config->ch_data[0]) {
mk_event_closesocket(config->ch_data[0]);
mk_event_closesocket(config->ch_data[1]);
}
/* Channel manager */
if (config->ch_manager[0] > 0) {
mk_event_closesocket(config->ch_manager[0]);
if (config->ch_manager[0] != config->ch_manager[1]) {
mk_event_closesocket(config->ch_manager[1]);
}
}
/* Channel notifications */
if (config->ch_notif[0] > 0) {
mk_event_closesocket(config->ch_notif[0]);
if (config->ch_notif[0] != config->ch_notif[1]) {
mk_event_closesocket(config->ch_notif[1]);
}
}
/* Collectors */
mk_list_foreach_safe(head, tmp, &config->collectors) {
collector = mk_list_entry(head, struct flb_input_collector, _head);
if (collector->type == FLB_COLLECT_TIME) {
if (collector->fd_timer > 0) {
mk_event_timeout_destroy(config->evl, &collector->event);
mk_event_closesocket(collector->fd_timer);
}
} else {
mk_event_del(config->evl, &collector->event);
}
mk_list_del(&collector->_head);
flb_free(collector);
}
flb_env_destroy(config->env);
/* Conf path */
if (config->conf_path) {
flb_free(config->conf_path);
}
/* Destroy any DSO context */
flb_plugin_destroy(config->dso_plugins);
/* Workers */
flb_worker_exit(config);
/* Event flush */
if (config->evl) {
mk_event_del(config->evl, &config->event_flush);
}
mk_event_closesocket(config->flush_fd);
/* Release scheduler */
flb_sched_exit(config);
#ifdef FLB_HAVE_HTTP_SERVER
if (config->http_listen) {
flb_free(config->http_listen);
}
if (config->http_port) {
flb_free(config->http_port);
}
#endif
if (config->storage_path) {
flb_free(config->storage_path);
}
#ifdef FLB_HAVE_STREAM_PROCESSOR
if (config->stream_processor_file) {
flb_free(config->stream_processor_file);
}
flb_slist_destroy(&config->stream_processor_tasks);
#endif
if (config->evl) {
mk_event_loop_destroy(config->evl);
}
flb_free(config);
}
const char *flb_config_prop_get(const char *key, struct mk_list *list)
{
return flb_kv_get_key_value(key, list);
}
static inline int prop_key_check(const char *key, const char *kv, int k_len)
{
size_t len;
len = strnlen(key,256);
if (strncasecmp(key, kv, k_len) == 0 && len == k_len) {
return 0;
}
return -1;
}
static int set_log_level(struct flb_config *config, const char *v_str)
{
if (v_str != NULL) {
if (strcasecmp(v_str, "error") == 0) {
config->verbose = 1;
}
else if (strcasecmp(v_str, "warning") == 0) {
config->verbose = 2;
}
else if (strcasecmp(v_str, "info") == 0) {
config->verbose = 3;
}
else if (strcasecmp(v_str, "debug") == 0) {
config->verbose = 4;
}
else if (strcasecmp(v_str, "trace") == 0) {
config->verbose = 5;
}
else {
return -1;
}
}
else if (config->log) {
config->verbose = 3;
}
return 0;
}
int flb_config_set_property(struct flb_config *config,
const char *k, const char *v)
{
int i=0;
int ret = -1;
int *i_val;
double *d_val;
char **s_val;
size_t len = strnlen(k, 256);
char *key = service_configs[0].key;
flb_sds_t tmp = NULL;
while (key != NULL) {
if (prop_key_check(key, k,len) == 0) {
if (!strncasecmp(key, FLB_CONF_STR_LOGLEVEL, 256)) {
tmp = flb_env_var_translate(config->env, v);
if (tmp) {
ret = set_log_level(config, tmp);
flb_sds_destroy(tmp);
tmp = NULL;
}
else {
ret = set_log_level(config, v);
}
}
else if (!strncasecmp(key, FLB_CONF_STR_PARSERS_FILE, 32)) {
#ifdef FLB_HAVE_PARSER
tmp = flb_env_var_translate(config->env, v);
ret = flb_parser_conf_file(tmp, config);
flb_sds_destroy(tmp);
tmp = NULL;
#endif
}
else if (!strncasecmp(key, FLB_CONF_STR_PLUGINS_FILE, 32)) {
tmp = flb_env_var_translate(config->env, v);
ret = flb_plugin_load_config_file(tmp, config);
flb_sds_destroy(tmp);
tmp = NULL;
}
else {
ret = 0;
tmp = flb_env_var_translate(config->env, v);
switch(service_configs[i].type) {
case FLB_CONF_TYPE_INT:
i_val = (int*)((char*)config + service_configs[i].offset);
*i_val = atoi(tmp);
flb_sds_destroy(tmp);
break;
case FLB_CONF_TYPE_DOUBLE:
d_val = (double*)((char*)config + service_configs[i].offset);
*d_val = atof(tmp);
flb_sds_destroy(tmp);
break;
case FLB_CONF_TYPE_BOOL:
i_val = (int*)((char*)config+service_configs[i].offset);
*i_val = flb_utils_bool(tmp);
flb_sds_destroy(tmp);
break;
case FLB_CONF_TYPE_STR:
s_val = (char**)((char*)config+service_configs[i].offset);
if ( *s_val != NULL ) {
flb_free(*s_val); /* release before overwriting */
}
*s_val = malloc(flb_sds_len(tmp) * sizeof(char));
strncpy(*s_val, tmp, flb_sds_len(tmp));
flb_sds_destroy(tmp);
break;
default:
ret = -1;
}
}
if (ret < 0) {
if (tmp) {
flb_sds_destroy(tmp);
}
return -1;
}
return 0;
}
key = service_configs[++i].key;
}
return 0;
}
| 1 | 10,313 | flb_strdup() should do the work here. note: use Fluent Bit memory wrappers | fluent-fluent-bit | c |
@@ -0,0 +1,6 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate sh -c "protoc -I . -I \"$(go list -f '{{ .Dir }}' -m github.com/gogo/protobuf)/protobuf\" --gogofaster_out=. retrieval.proto"
+package pb | 1 | 1 | 8,698 | This line should be separated from the package declaration by one line as it is not the package description. | ethersphere-bee | go |
|
@@ -411,7 +411,7 @@ func TestRollDPoSConsensus(t *testing.T) {
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
for j := 0; j < numNodes; j++ {
- ws, err := sf.NewWorkingSet(nil)
+ ws, err := sf.NewWorkingSet()
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, chainRawAddrs[j], big.NewInt(0))
require.NoError(t, err) | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"encoding/hex"
"fmt"
"math/big"
"net"
"sync"
"testing"
"time"
"github.com/facebookgo/clock"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
cp "github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/endorsement"
"github.com/iotexproject/iotex-core/p2p/node"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_actpool"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/testutil"
)
type addrKeyPair struct {
priKey crypto.PrivateKey
encodedAddr string
}
func TestNewRollDPoS(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cfg := config.Default
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
t.Run("normal", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("mock-clock", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
_, ok := r.ctx.clock.(*clock.Mock)
assert.True(t, ok)
})
t.Run("root chain API", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("missing-dep", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.Error(t, err)
assert.Nil(t, r)
})
}
func makeBlock(t *testing.T, accountIndex, numOfEndosements int, makeInvalidEndorse bool, height int) *block.Block {
unixTime := 1500000000
blkTime := int64(-1)
if height != 9 {
height = 9
blkTime = int64(-7723372030)
}
timeT := time.Unix(blkTime, 0)
rap := block.RunnableActionsBuilder{}
ra := rap.
SetHeight(uint64(height)).
SetTimeStamp(timeT).
Build(identityset.PrivateKey(accountIndex).PublicKey())
blk, err := block.NewBuilder(ra).
SetVersion(1).
SetReceiptRoot(hash.Hash256b([]byte("hello, world!"))).
SetDeltaStateDigest(hash.Hash256b([]byte("world, hello!"))).
SetPrevBlockHash(hash.Hash256b([]byte("hello, block!"))).
SignAndBuild(identityset.PrivateKey(accountIndex))
require.NoError(t, err)
footerForBlk := &block.Footer{}
typesFooter := iotextypes.BlockFooter{}
for i := 0; i < numOfEndosements; i++ {
timeTime := time.Unix(int64(unixTime), 0)
hs := blk.HashBlock()
var consensusVote *ConsensusVote
if makeInvalidEndorse {
consensusVote = NewConsensusVote(hs[:], LOCK)
} else {
consensusVote = NewConsensusVote(hs[:], COMMIT)
}
en, err := endorsement.Endorse(identityset.PrivateKey(i), consensusVote, timeTime)
require.NoError(t, err)
enProto, err := en.Proto()
require.NoError(t, err)
typesFooter.Endorsements = append(typesFooter.Endorsements, enProto)
}
ts, err := ptypes.TimestampProto(time.Unix(int64(unixTime), 0))
require.NoError(t, err)
typesFooter.Timestamp = ts
require.NotNil(t, typesFooter.Timestamp)
err = footerForBlk.ConvertFromBlockFooterPb(&typesFooter)
require.NoError(t, err)
blk.Footer = *footerForBlk
return &blk
}
func TestValidateBlockFooter(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(5)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
cfg.Genesis.Timestamp = int64(1500000000)
blockchain.EXPECT().Genesis().Return(cfg.Genesis).Times(5)
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetChainManager(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
// all right
blk := makeBlock(t, 1, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.NoError(t, err)
// Proposer is wrong
blk = makeBlock(t, 0, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Not enough endorsements
blk = makeBlock(t, 1, 2, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// round information is wrong
blk = makeBlock(t, 1, 4, false, 0)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Some endorsement is invalid
blk = makeBlock(t, 1, 4, true, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
}
func TestRollDPoS_Metrics(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().TipHeight().Return(blockHeight).Times(1)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(2)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
cfg.Genesis.Timestamp = int64(1500000000)
blockchain.EXPECT().Genesis().Return(cfg.Genesis).Times(2)
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetChainManager(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
clock.Add(r.ctx.BlockInterval(blockHeight))
require.NoError(t, r.ctx.Start(context.Background()))
r.ctx.round, err = r.ctx.RoundCalc().UpdateRound(r.ctx.round, blockHeight+1, r.ctx.BlockInterval(blockHeight+1), clock.Now(), 2*time.Second)
require.NoError(t, err)
m, err := r.Metrics()
require.NoError(t, err)
assert.Equal(t, uint64(3), m.LatestEpoch)
cp.SortCandidates(candidates, rp.GetEpochHeight(m.LatestEpoch), cp.CryptoSeed)
assert.Equal(t, candidates[:4], m.LatestDelegates)
assert.Equal(t, candidates[1], m.LatestBlockProducer)
}
// E2E RollDPoS tests bellow
type directOverlay struct {
addr net.Addr
peers map[net.Addr]*RollDPoS
}
func (o *directOverlay) Start(_ context.Context) error { return nil }
func (o *directOverlay) Stop(_ context.Context) error { return nil }
func (o *directOverlay) Broadcast(msg proto.Message) error {
// Only broadcast consensus message
if cMsg, ok := msg.(*iotextypes.ConsensusMessage); ok {
for _, r := range o.peers {
if err := r.HandleConsensusMsg(cMsg); err != nil {
return errors.Wrap(err, "error when handling consensus message directly")
}
}
}
return nil
}
func (o *directOverlay) Tell(uint32, net.Addr, proto.Message) error { return nil }
func (o *directOverlay) Self() net.Addr { return o.addr }
func (o *directOverlay) GetPeers() []net.Addr {
addrs := make([]net.Addr, 0, len(o.peers))
for addr := range o.peers {
addrs = append(addrs, addr)
}
return addrs
}
func TestRollDPoSConsensus(t *testing.T) {
newConsensusComponents := func(numNodes int) ([]*RollDPoS, []*directOverlay, []blockchain.Blockchain) {
cfg := config.Default
cfg.Consensus.RollDPoS.ConsensusDBPath = ""
cfg.Consensus.RollDPoS.Delay = 300 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptBlockTTL = 800 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptProposalEndorsementTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptLockEndorsementTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.CommitTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.UnmatchedEventTTL = time.Second
cfg.Consensus.RollDPoS.FSM.UnmatchedEventInterval = 10 * time.Millisecond
cfg.Consensus.RollDPoS.ToleratedOvertime = 200 * time.Millisecond
cfg.Genesis.BlockInterval = 2 * time.Second
cfg.Genesis.Blockchain.NumDelegates = uint64(numNodes)
cfg.Genesis.Blockchain.NumSubEpochs = 1
cfg.Genesis.EnableGravityChainVoting = false
chainAddrs := make([]*addrKeyPair, 0, numNodes)
networkAddrs := make([]net.Addr, 0, numNodes)
for i := 0; i < numNodes; i++ {
sk := identityset.PrivateKey(i)
addr := addrKeyPair{
encodedAddr: identityset.Address(i).String(),
priKey: sk,
}
chainAddrs = append(chainAddrs, &addr)
networkAddrs = append(networkAddrs, node.NewTCPNode(fmt.Sprintf("127.0.0.%d:4689", i+1)))
}
chainRawAddrs := make([]string, 0, numNodes)
addressMap := make(map[string]*addrKeyPair)
for _, addr := range chainAddrs {
chainRawAddrs = append(chainRawAddrs, addr.encodedAddr)
addressMap[addr.encodedAddr] = addr
}
cp.SortCandidates(chainRawAddrs, 1, cp.CryptoSeed)
for i, rawAddress := range chainRawAddrs {
chainAddrs[i] = addressMap[rawAddress]
}
candidatesByHeightFunc := func(_ uint64) ([]*state.Candidate, error) {
candidates := make([]*state.Candidate, 0, numNodes)
for _, addr := range chainAddrs {
candidates = append(candidates, &state.Candidate{Address: addr.encodedAddr})
}
return candidates, nil
}
chains := make([]blockchain.Blockchain, 0, numNodes)
p2ps := make([]*directOverlay, 0, numNodes)
cs := make([]*RollDPoS, 0, numNodes)
for i := 0; i < numNodes; i++ {
ctx := context.Background()
cfg.Chain.ProducerPrivKey = hex.EncodeToString(chainAddrs[i].priKey.Bytes())
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
for j := 0; j < numNodes; j++ {
ws, err := sf.NewWorkingSet(nil)
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, chainRawAddrs[j], big.NewInt(0))
require.NoError(t, err)
gasLimit := testutil.TestGasLimit
wsctx := protocol.WithRunActionsCtx(ctx,
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
Genesis: cfg.Genesis,
})
_, err = ws.RunActions(wsctx, 0, nil)
require.NoError(t, err)
require.NoError(t, sf.Commit(ws))
}
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
chain := blockchain.NewBlockchain(
cfg,
nil,
blockchain.InMemDaoOption(),
blockchain.PrecreatedStateFactoryOption(sf),
blockchain.RegistryOption(®istry),
)
chain.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(chain.Factory().Nonce))
chains = append(chains, chain)
actPool, err := actpool.NewActPool(chain, cfg.ActPool, actpool.EnableExperimentalActions())
require.NoError(t, err)
p2p := &directOverlay{
addr: networkAddrs[i],
peers: make(map[net.Addr]*RollDPoS),
}
p2ps = append(p2ps, p2p)
consensus, err := NewRollDPoSBuilder().
SetAddr(chainAddrs[i].encodedAddr).
SetPriKey(chainAddrs[i].priKey).
SetConfig(cfg).
SetChainManager(chain).
SetActPool(actPool).
SetBroadcast(p2p.Broadcast).
SetCandidatesByHeightFunc(candidatesByHeightFunc).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
cs = append(cs, consensus)
}
for i := 0; i < numNodes; i++ {
for j := 0; j < numNodes; j++ {
if i != j {
p2ps[i].peers[p2ps[j].addr] = cs[j]
}
}
}
return cs, p2ps, chains
}
t.Run("1-block", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 10*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 1 {
return false, nil
}
}
return true, nil
}))
})
t.Run("1-epoch", func(t *testing.T) {
if testing.Short() {
t.Skip("Skip the 1-epoch test in short mode.")
}
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 100*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 48 {
return false, nil
}
}
return true, nil
}))
})
t.Run("network-partition-time-rotation", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
cs[idx].ctx.roundCalc.timeBasedRotation = true
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for i, chain := range chains {
if i == 1 {
continue
}
if chain.TipHeight() < 4 {
return false, nil
}
}
return true, nil
}))
})
t.Run("proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
time.Sleep(5 * time.Second)
for _, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
assert.Nil(t, header)
assert.Error(t, err)
}
})
t.Run("non-proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 0 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[0].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for i, chain := range chains {
if i == 0 {
continue
}
if chain.TipHeight() < 2 {
return false, nil
}
}
return true, nil
}))
for i, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
if i == 0 {
assert.Nil(t, header)
assert.Error(t, err)
} else {
assert.NotNil(t, header)
assert.NoError(t, err)
}
}
})
}
| 1 | 20,068 | shadow: declaration of "err" shadows declaration at line 410 (from `govet`) | iotexproject-iotex-core | go |
@@ -1,15 +1,14 @@
package task_config
import (
+ "strings"
+ "time"
+
+ ds "github.com/c2h5oh/datasize"
"github.com/sonm-io/core/insonmnia/structs"
"github.com/sonm-io/core/proto"
)
-type DurationConfig struct {
- Since string `yaml:"since" required:"true"`
- Until string `yaml:"until" required:"true"`
-}
-
type RatingConfig struct {
Buyer int64 `yaml:"buyer" required:"true"`
Supplier int64 `yaml:"supplier" required:"true"` | 1 | package task_config
import (
"github.com/sonm-io/core/insonmnia/structs"
"github.com/sonm-io/core/proto"
)
type DurationConfig struct {
Since string `yaml:"since" required:"true"`
Until string `yaml:"until" required:"true"`
}
type RatingConfig struct {
Buyer int64 `yaml:"buyer" required:"true"`
Supplier int64 `yaml:"supplier" required:"true"`
}
type ResourcesConfig struct {
Cpu uint64 `yaml:"cpu_cores" required:"true"`
Ram uint64 `yaml:"ram_bytes" required:"true"`
Gpu string `yaml:"gpu_count" required:"true"`
Storage uint64 `yaml:"storage" required:"true"`
Network NetworkConfig `yaml:"network" required:"true"`
Properties map[string]float64 `yaml:"properties" required:"true"`
}
type NetworkConfig struct {
In uint64 `yaml:"in" required:"true"`
Out uint64 `yaml:"out" required:"true"`
Type string `yaml:"type" required:"true"`
}
type SlotConfig struct {
Duration DurationConfig `yaml:"duration" required:"true"`
Rating RatingConfig `yaml:"rating" required:"true"`
Resources ResourcesConfig `yaml:"resources" required:"true"`
}
func (c *SlotConfig) IntoSlot() (*structs.Slot, error) {
networkType, err := structs.ParseNetworkType(c.Resources.Network.Type)
if err != nil {
return nil, err
}
gpuCount, err := structs.ParseGPUCount(c.Resources.Gpu)
if err != nil {
return nil, err
}
return structs.NewSlot(&sonm.Slot{
BuyerRating: c.Rating.Buyer,
SupplierRating: c.Rating.Supplier,
Resources: &sonm.Resources{
CpuCores: c.Resources.Cpu,
RamBytes: c.Resources.Ram,
GpuCount: gpuCount,
Storage: c.Resources.Storage,
NetTrafficIn: c.Resources.Network.In,
NetTrafficOut: c.Resources.Network.Out,
NetworkType: networkType,
Properties: c.Resources.Properties,
},
})
}
| 1 | 6,065 | Maybe next time better to make separate PR? | sonm-io-core | go |
@@ -254,6 +254,14 @@ class MaskTestMixin(object):
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ if torch.onnx.is_in_onnx_export():
+ raise RuntimeError('[ONNX Error] Can not record MaskHead '
+ 'as it has not been executed this time')
+ segm_results = [[[] for _ in range(self.mask_head.num_classes)]
+ for _ in range(len(det_bboxes))]
+ return segm_results
+
# The length of proposals of different batches may be different.
# In order to form a batch, a padding operation is required.
if isinstance(det_bboxes, list): | 1 | import logging
import sys
import torch
from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class BBoxTestMixin(object):
if sys.version_info >= (3, 7):
async def async_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False,
bbox_semaphore=None,
global_lock=None):
"""Asynchronized test for box head without augmentation."""
rois = bbox2roi(proposals)
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
async with completed(
__name__, 'bbox_head_forward',
sleep_interval=sleep_interval):
cls_score, bbox_pred = self.bbox_head(roi_feats)
img_shape = img_metas[0]['img_shape']
scale_factor = img_metas[0]['scale_factor']
det_bboxes, det_labels = self.bbox_head.get_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
img_metas (list[dict]): Image meta info.
proposals (Tensor or List[Tensor]): Region proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[list[Tensor], list[Tensor]]: The first list contains
the boxes of the corresponding image in a batch, each
tensor has the shape (num_boxes, 5) and last dimension
5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor
in the second list is the labels with shape (num_boxes, ).
The length of both lists should be equal to batch_size.
"""
# get origin input shape to support onnx dynamic input shape
if torch.onnx.is_in_onnx_export():
assert len(
img_metas
) == 1, 'Only support one input image while in exporting to ONNX'
img_shapes = img_metas[0]['img_shape_for_onnx']
else:
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# The length of proposals of different batches may be different.
# In order to form a batch, a padding operation is required.
if isinstance(proposals, list):
# padding to form a batch
max_size = max([proposal.size(0) for proposal in proposals])
for i, proposal in enumerate(proposals):
supplement = proposal.new_full(
(max_size - proposal.size(0), proposal.size(1)), 0)
proposals[i] = torch.cat((supplement, proposal), dim=0)
rois = torch.stack(proposals, dim=0)
else:
rois = proposals
batch_index = torch.arange(
rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 5)
bbox_results = self._bbox_forward(x, rois)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, -1)
cls_score = cls_score.reshape(batch_size, num_proposals_per_img, -1)
if not torch.onnx.is_in_onnx_export():
# remove padding
supplement_mask = rois[..., -1] == 0
cls_score[supplement_mask, :] = 0
# bbox_pred would be None in some detector when with_reg is False,
# e.g. Grid R-CNN.
if bbox_pred is not None:
# the bbox prediction of some detectors like SABL is not Tensor
if isinstance(bbox_pred, torch.Tensor):
bbox_pred = bbox_pred.reshape(batch_size,
num_proposals_per_img, -1)
if not torch.onnx.is_in_onnx_export():
bbox_pred[supplement_mask, :] = 0
else:
# TODO: Looking forward to a better way
# For SABL
bbox_preds = self.bbox_head.bbox_pred_split(
bbox_pred, num_proposals_per_img)
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(len(proposals)):
# remove padding
supplement_mask = proposals[i][..., -1] == 0
for bbox in bbox_preds[i]:
bbox[supplement_mask] = 0
det_bbox, det_label = self.bbox_head.get_bboxes(
rois[i],
cls_score[i],
bbox_preds[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
return det_bboxes, det_labels
else:
bbox_pred = None
return self.bbox_head.get_bboxes(
rois,
cls_score,
bbox_pred,
img_shapes,
scale_factors,
rescale=rescale,
cfg=rcnn_test_cfg)
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
bbox_results = self._bbox_forward(x, rois)
bboxes, scores = self.bbox_head.get_bboxes(
rois,
bbox_results['cls_score'],
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
class MaskTestMixin(object):
if sys.version_info >= (3, 7):
async def async_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False,
mask_test_cfg=None):
"""Asynchronized test for mask head without augmentation."""
# image shape of the first image in the batch (only one)
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
if rescale and not isinstance(scale_factor,
(float, torch.Tensor)):
scale_factor = det_bboxes.new_tensor(scale_factor)
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):
sleep_interval = mask_test_cfg['async_sleep_interval']
else:
sleep_interval = 0.035
async with completed(
__name__,
'mask_head_forward',
sleep_interval=sleep_interval):
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,
scale_factor, rescale)
return segm_result
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Simple test for mask head without augmentation."""
# image shapes of images in the batch
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# The length of proposals of different batches may be different.
# In order to form a batch, a padding operation is required.
if isinstance(det_bboxes, list):
# padding to form a batch
max_size = max([bboxes.size(0) for bboxes in det_bboxes])
for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):
supplement_bbox = bbox.new_full(
(max_size - bbox.size(0), bbox.size(1)), 0)
supplement_label = label.new_full((max_size - label.size(0), ),
0)
det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)
det_labels[i] = torch.cat((supplement_label, label), dim=0)
det_bboxes = torch.stack(det_bboxes, dim=0)
det_labels = torch.stack(det_labels, dim=0)
batch_size = det_bboxes.size(0)
num_proposals_per_img = det_bboxes.shape[1]
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
det_bboxes = det_bboxes[..., :4]
if rescale:
if not isinstance(scale_factors[0], float):
scale_factors = det_bboxes.new_tensor(scale_factors)
det_bboxes = det_bboxes * scale_factors.unsqueeze(1)
batch_index = torch.arange(
det_bboxes.size(0), device=det_bboxes.device).float().view(
-1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
# Recover the batch dimension
mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,
*mask_pred.shape[1:])
# apply mask post-processing to each image individually
segm_results = []
for i in range(batch_size):
mask_pred = mask_preds[i]
det_bbox = det_bboxes[i]
det_label = det_labels[i]
# remove padding
supplement_mask = det_bbox[..., -1] != 0
mask_pred = mask_pred[supplement_mask]
det_bbox = det_bbox[supplement_mask]
det_label = det_label[supplement_mask]
if det_label.shape[0] == 0:
segm_results.append([[]
for _ in range(self.mask_head.num_classes)
])
else:
segm_result = self.mask_head.get_seg_masks(
mask_pred, det_bbox, det_label, self.test_cfg,
ori_shapes[i], scale_factors[i], rescale)
segm_results.append(segm_result)
return segm_results
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
"""Test for mask head with test time augmentation."""
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
# convert to numpy array to save memory
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
| 1 | 23,417 | line 316 also handles this case, need to simplify the logic. | open-mmlab-mmdetection | py |
@@ -122,9 +122,12 @@ type Options struct {
}
// openBucket returns a GCS Bucket that communicates using the given HTTP client.
-func openBucket(ctx context.Context, bucketName string, client *gcp.HTTPClient, opts *Options) (driver.Bucket, error) {
+func openBucket(ctx context.Context, bucketName string, client *gcp.HTTPClient, opts *Options) (*bucket, error) {
if client == nil {
- return nil, errors.New("OpenBucket requires an HTTP client")
+ return nil, errors.New("gcsblob.OpenBucket: client is required")
+ }
+ if bucketName == "" {
+ return nil, errors.New("gcsblob.OpenBucket: bucketName is required")
}
c, err := storage.NewClient(ctx, option.WithHTTPClient(&client.Client))
if err != nil { | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gcsblob provides an implementation of blob that uses GCS.
//
// For blob.Open URLs, gcsblob registers for the "gs" protocol.
// The URL's Host is used as the bucket name.
// The following query options are supported:
// - cred_path: Sets path to the Google credentials file. If unset, default
// credentials are loaded.
// See https://cloud.google.com/docs/authentication/production.
// - access_id: Sets Options.GoogleAccessID.
// - private_key_path: Sets path to a private key, which is read and used
// to set Options.PrivateKey.
// Example URL: blob.Open("gs://mybucket")
//
// It exposes the following types for As:
// Bucket: *storage.Client
// Error: *googleapi.Error
// ListObject: storage.ObjectAttrs
// ListOptions.BeforeList: *storage.Query
// Reader: storage.Reader
// Attributes: storage.ObjectAttrs
// WriterOptions.BeforeWrite: *storage.Writer
package gcsblob
import (
"context"
"errors"
"io"
"io/ioutil"
"net/url"
"sort"
"strings"
"time"
"github.com/google/go-cloud/blob"
"github.com/google/go-cloud/blob/driver"
"github.com/google/go-cloud/gcp"
"cloud.google.com/go/storage"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
const defaultPageSize = 1000
func init() {
blob.Register("gs", openURL)
}
func openURL(ctx context.Context, u *url.URL) (driver.Bucket, error) {
q := u.Query()
opts := &Options{}
if accessID := q["access_id"]; len(accessID) > 0 {
opts.GoogleAccessID = accessID[0]
}
if keyPath := q["private_key_path"]; len(keyPath) > 0 {
pk, err := ioutil.ReadFile(keyPath[0])
if err != nil {
return nil, err
}
opts.PrivateKey = pk
}
var creds *google.Credentials
if credPath := q["cred_path"]; len(credPath) == 0 {
var err error
creds, err = gcp.DefaultCredentials(ctx)
if err != nil {
return nil, err
}
} else {
jsonCreds, err := ioutil.ReadFile(credPath[0])
if err != nil {
return nil, err
}
creds, err = google.CredentialsFromJSON(ctx, jsonCreds)
if err != nil {
return nil, err
}
}
client, err := gcp.NewHTTPClient(gcp.DefaultTransport(), gcp.CredentialsTokenSource(creds))
if err != nil {
return nil, err
}
return openBucket(ctx, u.Host, client, opts)
}
// Options sets options for constructing a *blob.Bucket backed by GCS.
type Options struct {
// GoogleAccessID represents the authorizer for SignedURL.
// Required to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
GoogleAccessID string
// PrivateKey is the Google service account private key.
// Exactly one of PrivateKey or SignBytes must be non-nil to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
PrivateKey []byte
// SignBytes is a function for implementing custom signing.
// Exactly one of PrivateKey or SignBytes must be non-nil to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
SignBytes func([]byte) ([]byte, error)
}
// openBucket returns a GCS Bucket that communicates using the given HTTP client.
func openBucket(ctx context.Context, bucketName string, client *gcp.HTTPClient, opts *Options) (driver.Bucket, error) {
if client == nil {
return nil, errors.New("OpenBucket requires an HTTP client")
}
c, err := storage.NewClient(ctx, option.WithHTTPClient(&client.Client))
if err != nil {
return nil, err
}
if opts == nil {
opts = &Options{}
}
return &bucket{name: bucketName, client: c, opts: opts}, nil
}
// OpenBucket returns a GCS Bucket that communicates using the given HTTP client.
func OpenBucket(ctx context.Context, bucketName string, client *gcp.HTTPClient, opts *Options) (*blob.Bucket, error) {
drv, err := openBucket(ctx, bucketName, client, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(drv), nil
}
// bucket represents a GCS bucket, which handles read, write and delete operations
// on objects within it.
type bucket struct {
name string
client *storage.Client
opts *Options
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// reader reads a GCS object. It implements driver.Reader.
type reader struct {
body io.ReadCloser
attrs driver.ReaderAttributes
raw *storage.Reader
}
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
// Close closes the reader itself. It must be called when done reading.
func (r *reader) Close() error {
return r.body.Close()
}
func (r *reader) Attributes() driver.ReaderAttributes {
return r.attrs
}
func (r *reader) As(i interface{}) bool {
p, ok := i.(*storage.Reader)
if !ok {
return false
}
*p = *r.raw
return true
}
// IsNotExist implements driver.IsNotExist.
func (b *bucket) IsNotExist(err error) bool {
return err == storage.ErrObjectNotExist
}
// IsNotImplemented implements driver.IsNotImplemented.
func (b *bucket) IsNotImplemented(err error) bool {
return false
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
bkt := b.client.Bucket(b.name)
query := &storage.Query{
Prefix: opts.Prefix,
Delimiter: opts.Delimiter,
}
if opts.BeforeList != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**storage.Query)
if !ok {
return false
}
*p = query
return true
}
if err := opts.BeforeList(asFunc); err != nil {
return nil, err
}
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
iter := bkt.Objects(ctx, query)
pager := iterator.NewPager(iter, pageSize, string(opts.PageToken))
var objects []*storage.ObjectAttrs
nextPageToken, err := pager.NextPage(&objects)
if err != nil {
return nil, err
}
page := driver.ListPage{NextPageToken: []byte(nextPageToken)}
if len(objects) > 0 {
page.Objects = make([]*driver.ListObject, len(objects))
for i, obj := range objects {
asFunc := func(i interface{}) bool {
p, ok := i.(*storage.ObjectAttrs)
if !ok {
return false
}
*p = *obj
return true
}
if obj.Prefix == "" {
// Regular blob.
page.Objects[i] = &driver.ListObject{
Key: obj.Name,
ModTime: obj.Updated,
Size: obj.Size,
AsFunc: asFunc,
}
} else {
// "Directory".
page.Objects[i] = &driver.ListObject{
Key: obj.Prefix,
IsDir: true,
AsFunc: asFunc,
}
}
}
// GCS always returns "directories" at the end; sort them.
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
return &page, nil
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool {
p, ok := i.(**storage.Client)
if !ok {
return false
}
*p = b.client
return true
}
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool {
switch v := err.(type) {
case *googleapi.Error:
if p, ok := i.(**googleapi.Error); ok {
*p = v
return true
}
}
return false
}
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (driver.Attributes, error) {
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
attrs, err := obj.Attrs(ctx)
if err != nil {
return driver.Attributes{}, err
}
return driver.Attributes{
ContentType: attrs.ContentType,
Metadata: attrs.Metadata,
ModTime: attrs.Updated,
Size: attrs.Size,
AsFunc: func(i interface{}) bool {
p, ok := i.(*storage.ObjectAttrs)
if !ok {
return false
}
*p = *attrs
return true
},
}, nil
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
r, err := obj.NewRangeReader(ctx, offset, length)
if err != nil {
return nil, err
}
modTime, _ := r.LastModified()
return &reader{
body: r,
attrs: driver.ReaderAttributes{
ContentType: r.ContentType(),
ModTime: modTime,
Size: r.Size(),
},
raw: r,
}, nil
}
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
w := obj.NewWriter(ctx)
w.ContentType = contentType
w.ChunkSize = bufferSize(opts.BufferSize)
w.Metadata = opts.Metadata
w.MD5 = opts.ContentMD5
if opts.BeforeWrite != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**storage.Writer)
if !ok {
return false
}
*p = w
return true
}
if err := opts.BeforeWrite(asFunc); err != nil {
return nil, err
}
}
return w, nil
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
return obj.Delete(ctx)
}
func (b *bucket) SignedURL(ctx context.Context, key string, dopts *driver.SignedURLOptions) (string, error) {
if b.opts.GoogleAccessID == "" || (b.opts.PrivateKey == nil && b.opts.SignBytes == nil) {
return "", errors.New("to use SignedURL, you must call OpenBucket with a valid Options.GoogleAccessID and exactly one of Options.PrivateKey or Options.SignBytes")
}
opts := &storage.SignedURLOptions{
Expires: time.Now().Add(dopts.Expiry),
Method: "GET",
GoogleAccessID: b.opts.GoogleAccessID,
PrivateKey: b.opts.PrivateKey,
SignBytes: b.opts.SignBytes,
}
return storage.SignedURL(b.name, key, opts)
}
func bufferSize(size int) int {
if size == 0 {
return googleapi.DefaultUploadChunkSize
} else if size > 0 {
return size
}
return 0 // disable buffering
}
| 1 | 12,571 | We used to have a simple check for bucket and object names, we then decided to remove it and rely on the backend service to check for the name. Is there any reason to add an empty name check now? | google-go-cloud | go |
@@ -983,7 +983,11 @@ bool RTPSParticipantImpl::sendSync(
for (auto& send_resource : send_resource_list_)
{
- send_resource->send(msg->buffer, msg->length, destination_loc);
+ // Calculate next timeout.
+ std::chrono::microseconds timeout =
+ std::chrono::duration_cast<std::chrono::microseconds>(max_blocking_time_point - std::chrono::steady_clock::now());
+
+ send_resource->send(msg->buffer, msg->length, destination_loc, timeout);
}
}
| 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file RTPSParticipant.cpp
*
*/
#include "RTPSParticipantImpl.h"
#include "../flowcontrol/ThroughputController.h"
#include "../persistence/PersistenceService.h"
#include <fastrtps/rtps/resources/AsyncWriterThread.h>
#include <fastrtps/rtps/messages/MessageReceiver.h>
#include <fastrtps/rtps/writer/StatelessWriter.h>
#include <fastrtps/rtps/writer/StatefulWriter.h>
#include <fastrtps/rtps/writer/StatelessPersistentWriter.h>
#include <fastrtps/rtps/writer/StatefulPersistentWriter.h>
#include <fastrtps/rtps/reader/StatelessReader.h>
#include <fastrtps/rtps/reader/StatefulReader.h>
#include <fastrtps/rtps/reader/StatelessPersistentReader.h>
#include <fastrtps/rtps/reader/StatefulPersistentReader.h>
#include <fastrtps/rtps/participant/RTPSParticipant.h>
#include <fastrtps/transport/UDPv4TransportDescriptor.h>
#include <fastrtps/transport/TCPv4TransportDescriptor.h>
#include <fastrtps/rtps/RTPSDomain.h>
#include <fastrtps/rtps/builtin/BuiltinProtocols.h>
#include <fastrtps/rtps/builtin/discovery/participant/PDPSimple.h>
#include <fastrtps/rtps/builtin/data/ParticipantProxyData.h>
#include <fastrtps/rtps/builtin/liveliness/WLP.h>
#include <fastrtps/utils/IPFinder.h>
#include <fastrtps/utils/eClock.h>
#include <fastrtps/utils/Semaphore.h>
#include <fastrtps/utils/System.h>
#include <mutex>
#include <algorithm>
#include <fastrtps/log/Log.h>
namespace eprosima {
namespace fastrtps{
namespace rtps {
static EntityId_t TrustedWriter(const EntityId_t& reader)
{
return
(reader == c_EntityId_SPDPReader) ? c_EntityId_SPDPWriter :
(reader == c_EntityId_SEDPPubReader) ? c_EntityId_SEDPPubWriter :
(reader == c_EntityId_SEDPSubReader) ? c_EntityId_SEDPSubWriter :
(reader == c_EntityId_ReaderLiveliness) ? c_EntityId_WriterLiveliness :
c_EntityId_Unknown;
}
Locator_t& RTPSParticipantImpl::applyLocatorAdaptRule(Locator_t& loc)
{
// This is a completely made up rule
// It is transport responsability to interpret this new port.
loc.port += m_att.port.participantIDGain;
return loc;
}
RTPSParticipantImpl::RTPSParticipantImpl(const RTPSParticipantAttributes& PParam, const GuidPrefix_t& guidP,
RTPSParticipant* par, RTPSParticipantListener* plisten)
: m_att(PParam)
, m_guid(guidP ,c_EntityId_RTPSParticipant)
, mp_builtinProtocols(nullptr)
, mp_ResourceSemaphore(new Semaphore(0))
, IdCounter(0)
#if HAVE_SECURITY
, m_security_manager(this)
#endif
, mp_participantListener(plisten)
, mp_userParticipant(par)
, mp_mutex(new std::recursive_mutex())
{
// Builtin transport by default
if (PParam.useBuiltinTransports)
{
UDPv4TransportDescriptor descriptor;
descriptor.sendBufferSize = m_att.sendSocketBufferSize;
descriptor.receiveBufferSize = m_att.listenSocketBufferSize;
m_network_Factory.RegisterTransport(&descriptor);
}
// Workaround TCP discovery issues when register
switch (PParam.builtin.discovery_config.discoveryProtocol)
{
case DiscoveryProtocol::CLIENT:
case DiscoveryProtocol::SERVER:
case DiscoveryProtocol::BACKUP:
// Verify if listening ports are provided
for (auto& transportDescriptor : PParam.userTransports)
{
TCPTransportDescriptor * pT = dynamic_cast<TCPTransportDescriptor *>(transportDescriptor.get());
if (pT && pT->listening_ports.empty())
{
logError(RTPS_PARTICIPANT, "Participant " << m_att.getName() << " with GUID " << m_guid
<< " tries to use discovery server over TCP without providing a proper listening port");
}
}
default:
break;
}
// User defined transports
for (const auto& transportDescriptor : PParam.userTransports)
{
m_network_Factory.RegisterTransport(transportDescriptor.get());
}
mp_userParticipant->mp_impl = this;
mp_event_thr.init_thread();
// Throughput controller, if the descriptor has valid values
if (PParam.throughputController.bytesPerPeriod != UINT32_MAX && PParam.throughputController.periodMillisecs != 0)
{
std::unique_ptr<FlowController> controller(new ThroughputController(PParam.throughputController, this));
m_controllers.push_back(std::move(controller));
}
/* If metatrafficMulticastLocatorList is empty, add mandatory default Locators
Else -> Take them */
// Creation of metatraffic locator and receiver resources
uint32_t metatraffic_multicast_port = m_att.port.getMulticastPort(m_att.builtin.domainId);
uint32_t metatraffic_unicast_port = m_att.port.getUnicastPort(m_att.builtin.domainId,
static_cast<uint32_t>(m_att.participantID));
/* INSERT DEFAULT MANDATORY MULTICAST LOCATORS HERE */
if(m_att.builtin.metatrafficMulticastLocatorList.empty() && m_att.builtin.metatrafficUnicastLocatorList.empty())
{
m_network_Factory.getDefaultMetatrafficMulticastLocators(m_att.builtin.metatrafficMulticastLocatorList,
metatraffic_multicast_port);
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficMulticastLocatorList);
m_network_Factory.getDefaultMetatrafficUnicastLocators(m_att.builtin.metatrafficUnicastLocatorList,
metatraffic_unicast_port);
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficUnicastLocatorList);
}
else
{
std::for_each(m_att.builtin.metatrafficMulticastLocatorList.begin(),
m_att.builtin.metatrafficMulticastLocatorList.end(), [&](Locator_t& locator)
{
m_network_Factory.fillMetatrafficMulticastLocator(locator, metatraffic_multicast_port);
});
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficMulticastLocatorList);
std::for_each(m_att.builtin.metatrafficUnicastLocatorList.begin(),
m_att.builtin.metatrafficUnicastLocatorList.end(), [&](Locator_t& locator)
{
m_network_Factory.fillMetatrafficUnicastLocator(locator, metatraffic_unicast_port);
});
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficUnicastLocatorList);
}
createReceiverResources(m_att.builtin.metatrafficMulticastLocatorList, true);
createReceiverResources(m_att.builtin.metatrafficUnicastLocatorList, true);
// Initial peers
if(m_att.builtin.initialPeersList.empty())
{
m_att.builtin.initialPeersList = m_att.builtin.metatrafficMulticastLocatorList;
}
else
{
LocatorList_t initial_peers;
initial_peers.swap(m_att.builtin.initialPeersList);
std::for_each(initial_peers.begin(), initial_peers.end(),
[&](Locator_t& locator) {
m_network_Factory.configureInitialPeerLocator(locator, m_att);
});
}
// Creation of user locator and receiver resources
bool hasLocatorsDefined = true;
//If no default locators are defined we define some.
/* The reasoning here is the following.
If the parameters of the RTPS Participant don't hold default listening locators for the creation
of Endpoints, we make some for Unicast only.
If there is at least one listen locator of any kind, we do not create any default ones.
If there are no sending locators defined, we create default ones for the transports we implement.
*/
if (m_att.defaultUnicastLocatorList.empty() && m_att.defaultMulticastLocatorList.empty())
{
//Default Unicast Locators in case they have not been provided
/* INSERT DEFAULT UNICAST LOCATORS FOR THE PARTICIPANT */
hasLocatorsDefined = false;
m_network_Factory.getDefaultUnicastLocators(m_att.defaultUnicastLocatorList, m_att);
}
else
{
// Locator with port 0, calculate port.
std::for_each(m_att.defaultUnicastLocatorList.begin(), m_att.defaultUnicastLocatorList.end(),
[&](Locator_t& loc)
{
m_network_Factory.fillDefaultUnicastLocator(loc, m_att);
});
}
// Normalize unicast locators.
m_network_Factory.NormalizeLocators(m_att.defaultUnicastLocatorList);
createReceiverResources(m_att.defaultUnicastLocatorList, true);
if (!hasLocatorsDefined)
{
logInfo(RTPS_PARTICIPANT, m_att.getName() << " Created with NO default Unicast Locator List, adding Locators:"
<< m_att.defaultUnicastLocatorList);
}
createReceiverResources(m_att.defaultMulticastLocatorList, true);
#if HAVE_SECURITY
// Start security
// TODO(Ricardo) Get returned value in future.
m_security_manager_initialized = m_security_manager.init(security_attributes_, PParam.properties, m_is_security_active);
#endif
//START BUILTIN PROTOCOLS
mp_builtinProtocols = new BuiltinProtocols();
if(!mp_builtinProtocols->initBuiltinProtocols(this,m_att.builtin))
{
logError(RTPS_PARTICIPANT, "The builtin protocols were not correctly initialized");
}
logInfo(RTPS_PARTICIPANT,"RTPSParticipant \"" << m_att.getName() << "\" with guidPrefix: " <<m_guid.guidPrefix);
}
const std::vector<RTPSWriter*>& RTPSParticipantImpl::getAllWriters() const
{
return m_allWriterList;
}
const std::vector<RTPSReader*>& RTPSParticipantImpl::getAllReaders() const
{
return m_allReaderList;
}
RTPSParticipantImpl::~RTPSParticipantImpl()
{
// Disable Retries on Transports
m_network_Factory.Shutdown();
// Safely abort threads.
for(auto& block : m_receiverResourcelist)
{
block.Receiver->UnregisterReceiver(block.mp_receiver);
block.disable();
}
while(m_userReaderList.size() > 0)
{
deleteUserEndpoint(static_cast<Endpoint*>(*m_userReaderList.begin()));
}
while(m_userWriterList.size() > 0)
{
deleteUserEndpoint(static_cast<Endpoint*>(*m_userWriterList.begin()));
}
delete(this->mp_builtinProtocols);
#if HAVE_SECURITY
m_security_manager.destroy();
#endif
// Destruct message receivers
for (auto& block : m_receiverResourcelist)
{
delete block.mp_receiver;
}
m_receiverResourcelist.clear();
delete(this->mp_ResourceSemaphore);
delete(this->mp_userParticipant);
send_resource_list_.clear();
delete(this->mp_mutex);
}
/*
*
* MAIN RTPSParticipant IMPL API
*
*/
bool RTPSParticipantImpl::createWriter(
RTPSWriter** WriterOut,
WriterAttributes& param,
WriterHistory* hist,
WriterListener* listen,
const EntityId_t& entityId,
bool isBuiltin)
{
std::string type = (param.endpoint.reliabilityKind == RELIABLE) ? "RELIABLE" :"BEST_EFFORT";
logInfo(RTPS_PARTICIPANT," of type " << type);
EntityId_t entId;
if(entityId == c_EntityId_Unknown)
{
if(param.endpoint.topicKind == NO_KEY)
{
entId.value[3] = 0x03;
}
else if(param.endpoint.topicKind == WITH_KEY)
{
entId.value[3] = 0x02;
}
uint32_t idnum;
if(param.endpoint.getEntityID() > 0)
{
idnum = static_cast<uint32_t>(param.endpoint.getEntityID());
}
else
{
IdCounter++;
idnum = IdCounter;
}
octet* c = reinterpret_cast<octet*>(&idnum);
entId.value[2] = c[0];
entId.value[1] = c[1];
entId.value[0] = c[2];
if(this->existsEntityId(entId, WRITER))
{
logError(RTPS_PARTICIPANT,"A writer with the same entityId already exists in this RTPSParticipant");
return false;
}
}
else
{
entId = entityId;
}
if(!param.endpoint.unicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT,"Unicast Locator List for Writer contains invalid Locator");
return false;
}
if(!param.endpoint.multicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT,"Multicast Locator List for Writer contains invalid Locator");
return false;
}
if(!param.endpoint.remoteLocatorList.isValid())
{
logError(RTPS_PARTICIPANT,"Remote Locator List for Writer contains invalid Locator");
return false;
}
if (((param.throughputController.bytesPerPeriod != UINT32_MAX && param.throughputController.periodMillisecs != 0) ||
(m_att.throughputController.bytesPerPeriod != UINT32_MAX && m_att.throughputController.periodMillisecs != 0))
&& param.mode != ASYNCHRONOUS_WRITER)
{
logError(RTPS_PARTICIPANT, "Writer has to be configured to publish asynchronously, because a flowcontroller was configured");
return false;
}
// Get persistence service
IPersistenceService* persistence = nullptr;
if (param.endpoint.durabilityKind >= TRANSIENT)
{
persistence = get_persistence_service(param.endpoint);
if (persistence == nullptr)
{
logError(RTPS_PARTICIPANT, "Couldn't create persistence service for transient/persistent writer");
return false;
}
}
normalize_endpoint_locators(param.endpoint);
RTPSWriter* SWriter = nullptr;
GUID_t guid(m_guid.guidPrefix,entId);
if (param.endpoint.reliabilityKind == BEST_EFFORT)
{
SWriter = (persistence == nullptr) ?
new StatelessWriter(this, guid, param, hist, listen) :
new StatelessPersistentWriter(this, guid, param, hist, listen, persistence);
}
else if (param.endpoint.reliabilityKind == RELIABLE)
{
SWriter = (persistence == nullptr) ?
new StatefulWriter(this, guid, param, hist, listen) :
new StatefulPersistentWriter(this, guid, param, hist, listen, persistence);
}
if (SWriter == nullptr)
{
return false;
}
#if HAVE_SECURITY
if(!isBuiltin)
{
if(!m_security_manager.register_local_writer(SWriter->getGuid(),
param.endpoint.properties, SWriter->getAttributes().security_attributes()))
{
delete(SWriter);
return false;
}
}
else
{
if(!m_security_manager.register_local_builtin_writer(SWriter->getGuid(),
SWriter->getAttributes().security_attributes()))
{
delete(SWriter);
return false;
}
}
#endif
createSendResources(SWriter);
if (param.endpoint.reliabilityKind == RELIABLE)
{
if (!createAndAssociateReceiverswithEndpoint(SWriter))
{
delete(SWriter);
return false;
}
}
// Asynchronous thread runs regardless of mode because of
// nack response duties.
AsyncWriterThread::addWriter(*SWriter);
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
m_allWriterList.push_back(SWriter);
if (!isBuiltin)
{
m_userWriterList.push_back(SWriter);
}
*WriterOut = SWriter;
// If the terminal throughput controller has proper user defined values, instantiate it
if (param.throughputController.bytesPerPeriod != UINT32_MAX && param.throughputController.periodMillisecs != 0)
{
std::unique_ptr<FlowController> controller(new ThroughputController(param.throughputController, SWriter));
SWriter->add_flow_controller(std::move(controller));
}
return true;
}
bool RTPSParticipantImpl::createReader(
RTPSReader** ReaderOut,
ReaderAttributes& param,
ReaderHistory* hist,
ReaderListener* listen,
const EntityId_t& entityId,
bool isBuiltin,
bool enable)
{
std::string type = (param.endpoint.reliabilityKind == RELIABLE) ? "RELIABLE" :"BEST_EFFORT";
logInfo(RTPS_PARTICIPANT," of type " << type);
EntityId_t entId;
if(entityId== c_EntityId_Unknown)
{
if (param.endpoint.topicKind == NO_KEY)
{
entId.value[3] = 0x04;
}
else if (param.endpoint.topicKind == WITH_KEY)
{
entId.value[3] = 0x07;
}
uint32_t idnum;
if (param.endpoint.getEntityID() > 0)
{
idnum = static_cast<uint32_t>(param.endpoint.getEntityID());
}
else
{
IdCounter++;
idnum = IdCounter;
}
octet* c = reinterpret_cast<octet*>(&idnum);
entId.value[2] = c[0];
entId.value[1] = c[1];
entId.value[0] = c[2];
if(this->existsEntityId(entId,WRITER))
{
logError(RTPS_PARTICIPANT,"A reader with the same entityId already exists in this RTPSParticipant");
return false;
}
}
else
{
entId = entityId;
}
if(!param.endpoint.unicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT,"Unicast Locator List for Reader contains invalid Locator");
return false;
}
if(!param.endpoint.multicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT,"Multicast Locator List for Reader contains invalid Locator");
return false;
}
if(!param.endpoint.remoteLocatorList.isValid())
{
logError(RTPS_PARTICIPANT,"Remote Locator List for Reader contains invalid Locator");
return false;
}
// Get persistence service
IPersistenceService* persistence = nullptr;
if (param.endpoint.durabilityKind >= TRANSIENT)
{
persistence = get_persistence_service(param.endpoint);
if (persistence == nullptr)
{
logError(RTPS_PARTICIPANT, "Couldn't create persistence service for transient/persistent reader");
return false;
}
}
normalize_endpoint_locators(param.endpoint);
RTPSReader* SReader = nullptr;
GUID_t guid(m_guid.guidPrefix,entId);
if (param.endpoint.reliabilityKind == BEST_EFFORT)
{
SReader = (persistence == nullptr) ?
new StatelessReader(this, guid, param, hist, listen) :
new StatelessPersistentReader(this, guid, param, hist, listen, persistence);
}
else if (param.endpoint.reliabilityKind == RELIABLE)
{
SReader = (persistence == nullptr) ?
new StatefulReader(this, guid, param, hist, listen) :
new StatefulPersistentReader(this, guid, param, hist, listen, persistence);
}
if (SReader == nullptr)
{
return false;
}
#if HAVE_SECURITY
if(!isBuiltin)
{
if(!m_security_manager.register_local_reader(SReader->getGuid(),
param.endpoint.properties, SReader->getAttributes().security_attributes()))
{
delete(SReader);
return false;
}
}
else
{
if(!m_security_manager.register_local_builtin_reader(SReader->getGuid(),
SReader->getAttributes().security_attributes()))
{
delete(SReader);
return false;
}
}
#endif
if (param.endpoint.reliabilityKind == RELIABLE)
{
createSendResources(SReader);
}
if (isBuiltin)
{
SReader->setTrustedWriter(TrustedWriter(SReader->getGuid().entityId));
}
if (enable)
{
if (!createAndAssociateReceiverswithEndpoint(SReader))
{
delete(SReader);
return false;
}
}
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
m_allReaderList.push_back(SReader);
if (!isBuiltin)
{
m_userReaderList.push_back(SReader);
}
*ReaderOut = SReader;
return true;
}
bool RTPSParticipantImpl::enableReader(RTPSReader *reader)
{
if (!assignEndpointListenResources(reader))
{
return false;
}
return true;
}
// Avoid to receive PDPSimple reader a DATA while calling ~PDPSimple and EDP was destroy already.
void RTPSParticipantImpl::disableReader(RTPSReader *reader)
{
m_receiverResourcelistMutex.lock();
for (auto it = m_receiverResourcelist.begin(); it != m_receiverResourcelist.end(); ++it)
{
it->mp_receiver->removeEndpoint(reader);
}
m_receiverResourcelistMutex.unlock();
}
bool RTPSParticipantImpl::registerWriter(RTPSWriter* Writer, const TopicAttributes& topicAtt, const WriterQos& wqos)
{
return this->mp_builtinProtocols->addLocalWriter(Writer, topicAtt, wqos);
}
bool RTPSParticipantImpl::registerReader(RTPSReader* reader, const TopicAttributes& topicAtt, const ReaderQos& rqos)
{
return this->mp_builtinProtocols->addLocalReader(reader, topicAtt, rqos);
}
bool RTPSParticipantImpl::updateLocalWriter(RTPSWriter* Writer, const TopicAttributes& topicAtt, const WriterQos& wqos)
{
return this->mp_builtinProtocols->updateLocalWriter(Writer, topicAtt, wqos);
}
bool RTPSParticipantImpl::updateLocalReader(RTPSReader* reader, const TopicAttributes& topicAtt, const ReaderQos& rqos)
{
return this->mp_builtinProtocols->updateLocalReader(reader, topicAtt, rqos);
}
/*
*
* AUXILIARY METHODS
*
*
*/
bool RTPSParticipantImpl::existsEntityId(const EntityId_t& ent,EndpointKind_t kind) const
{
if (kind == WRITER)
{
for (std::vector<RTPSWriter*>::const_iterator it = m_userWriterList.begin(); it != m_userWriterList.end(); ++it)
{
if (ent == (*it)->getGuid().entityId)
{
return true;
}
}
}
else
{
for (std::vector<RTPSReader*>::const_iterator it = m_userReaderList.begin(); it != m_userReaderList.end(); ++it)
{
if (ent == (*it)->getGuid().entityId)
{
return true;
}
}
}
return false;
}
/*
*
* RECEIVER RESOURCE METHODS
*
*/
bool RTPSParticipantImpl::assignEndpointListenResources(Endpoint* endp)
{
//Tag the endpoint with the ReceiverResources
bool valid = true;
/* No need to check for emptiness on the lists, as it was already done on part function
In case are using the default list of Locators they have already been embedded to the parameters
*/
//UNICAST
assignEndpoint2LocatorList(endp, endp->getAttributes().unicastLocatorList);
//MULTICAST
assignEndpoint2LocatorList(endp, endp->getAttributes().multicastLocatorList);
return valid;
}
bool RTPSParticipantImpl::createAndAssociateReceiverswithEndpoint(Endpoint * pend)
{
/* This function...
- Asks the network factory for new resources
- Encapsulates the new resources within the ReceiverControlBlock list
- Associated the endpoint to the new elements in the list
- Launches the listener thread
*/
// 1 - Ask the network factory to generate the elements that do still not exist
std::vector<ReceiverResource> newItems; //Store the newly created elements
std::vector<ReceiverResource> newItemsBuffer; //Store intermediate results
//Iterate through the list of unicast and multicast locators the endpoint has... unless its empty
//In that case, just use the standard
if (pend->getAttributes().unicastLocatorList.empty() && pend->getAttributes().multicastLocatorList.empty())
{
//Default unicast
pend->getAttributes().unicastLocatorList = m_att.defaultUnicastLocatorList;
}
createReceiverResources(pend->getAttributes().unicastLocatorList, false);
createReceiverResources(pend->getAttributes().multicastLocatorList, false);
// Associate the Endpoint with ReceiverControlBlock
assignEndpointListenResources(pend);
return true;
}
bool RTPSParticipantImpl::assignEndpoint2LocatorList(Endpoint* endp, LocatorList_t& list)
{
/* Note:
The previous version of this function associated (or created) ListenResources and added the endpoint to them.
It then requested the list of Locators the Listener is listening to and appended to the LocatorList_t from the parameters.
This has been removed because it is considered redundant. For ReceiveResources that listen on multiple interfaces, only
one of the supported Locators is needed to make the match, and the case of new ListenResources being created has been removed
since its the NetworkFactory the one that takes care of Resource creation.
*/
LocatorList_t finalList;
for (auto lit = list.begin(); lit != list.end(); ++lit)
{
//Iteration of all Locators within the Locator list passed down as argument
std::lock_guard<std::mutex> guard(m_receiverResourcelistMutex);
//Check among ReceiverResources whether the locator is supported or not
for (auto it = m_receiverResourcelist.begin(); it != m_receiverResourcelist.end(); ++it)
{
//Take mutex for the resource since we are going to interact with shared resources
//std::lock_guard<std::mutex> guard((*it).mtx);
if (it->Receiver->SupportsLocator(*lit))
{
//Supported! Take mutex and update lists - We maintain reader/writer discrimination just in case
it->mp_receiver->associateEndpoint(endp);
// end association between reader/writer and the receive resources
}
}
//Finished iteratig through all ListenResources for a single Locator (from the parameter list).
//Since this function is called after checking with NetFactory we do not have to create any more resource.
}
return true;
}
bool RTPSParticipantImpl::createSendResources(Endpoint *pend)
{
if (pend->m_att.remoteLocatorList.empty())
{
// Adds the default locators of every registered transport.
m_network_Factory.GetDefaultOutputLocators(pend->m_att.remoteLocatorList);
}
std::lock_guard<std::timed_mutex> guard(m_send_resources_mutex_);
//Output locators have been specified, create them
for (auto it = pend->m_att.remoteLocatorList.begin(); it != pend->m_att.remoteLocatorList.end(); ++it)
{
if(!m_network_Factory.build_send_resources(send_resource_list_, (*it)))
{
logWarning(RTPS_PARTICIPANT, "Cannot create send resource for endpoint remote locator (" <<
pend->getGuid() << ", " << (*it) << ")");
}
}
return true;
}
void RTPSParticipantImpl::createReceiverResources(LocatorList_t& Locator_list, bool ApplyMutation)
{
std::vector<std::shared_ptr<ReceiverResource>> newItemsBuffer;
uint32_t size = m_network_Factory.get_max_message_size_between_transports();
for (auto it_loc = Locator_list.begin(); it_loc != Locator_list.end(); ++it_loc)
{
bool ret = m_network_Factory.BuildReceiverResources(*it_loc, size, newItemsBuffer);
if (!ret && ApplyMutation)
{
uint32_t tries = 0;
while (!ret && (tries < m_att.builtin.mutation_tries))
{
tries++;
*it_loc = applyLocatorAdaptRule(*it_loc);
ret = m_network_Factory.BuildReceiverResources(*it_loc, size, newItemsBuffer);
}
}
for (auto it_buffer = newItemsBuffer.begin(); it_buffer != newItemsBuffer.end(); ++it_buffer)
{
std::lock_guard<std::mutex> lock(m_receiverResourcelistMutex);
//Push the new items into the ReceiverResource buffer
m_receiverResourcelist.emplace_back(*it_buffer);
//Create and init the MessageReceiver
auto mr = new MessageReceiver(this, size);
m_receiverResourcelist.back().mp_receiver = mr;
//Start reception
m_receiverResourcelist.back().Receiver->RegisterReceiver(mr);
}
newItemsBuffer.clear();
}
}
void RTPSParticipantImpl::createSenderResources(const LocatorList_t& locator_list)
{
std::unique_lock<std::timed_mutex> lock(m_send_resources_mutex_);
for (auto it_loc = locator_list.begin(); it_loc != locator_list.end(); ++it_loc)
{
m_network_Factory.build_send_resources(send_resource_list_, *it_loc);
}
}
void RTPSParticipantImpl::createSenderResources(const Locator_t& locator)
{
std::unique_lock<std::timed_mutex> lock(m_send_resources_mutex_);
m_network_Factory.build_send_resources(send_resource_list_, locator);
}
bool RTPSParticipantImpl::deleteUserEndpoint(Endpoint* p_endpoint)
{
m_receiverResourcelistMutex.lock();
for (auto it = m_receiverResourcelist.begin(); it != m_receiverResourcelist.end(); ++it)
{
it->mp_receiver->removeEndpoint(p_endpoint);
}
m_receiverResourcelistMutex.unlock();
bool found = false, found_in_users = false;
{
if(p_endpoint->getAttributes().endpointKind == WRITER)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for (auto wit = m_userWriterList.begin(); wit != m_userWriterList.end(); ++wit)
{
if ((*wit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_userWriterList.erase(wit);
found_in_users = true;
break;
}
}
for (auto wit = m_allWriterList.begin(); wit != m_allWriterList.end(); ++wit)
{
if ((*wit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_allWriterList.erase(wit);
found = true;
break;
}
}
}
else
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for (auto rit = m_userReaderList.begin(); rit != m_userReaderList.end(); ++rit)
{
if ((*rit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_userReaderList.erase(rit);
found_in_users = true;
break;
}
}
for (auto rit = m_allReaderList.begin(); rit != m_allReaderList.end(); ++rit)
{
if ((*rit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_allReaderList.erase(rit);
found = true;
break;
}
}
}
if (!found)
{
return false;
}
//REMOVE FOR BUILTINPROTOCOLS
if(p_endpoint->getAttributes().endpointKind == WRITER)
{
if (found_in_users)
{
mp_builtinProtocols->removeLocalWriter(static_cast<RTPSWriter*>(p_endpoint));
}
#if HAVE_SECURITY
if(p_endpoint->getAttributes().security_attributes().is_submessage_protected ||
p_endpoint->getAttributes().security_attributes().is_payload_protected)
{
m_security_manager.unregister_local_writer(p_endpoint->getGuid());
}
#endif
}
else
{
if (found_in_users)
{
mp_builtinProtocols->removeLocalReader(static_cast<RTPSReader*>(p_endpoint));
}
#if HAVE_SECURITY
if(p_endpoint->getAttributes().security_attributes().is_submessage_protected ||
p_endpoint->getAttributes().security_attributes().is_payload_protected)
{
m_security_manager.unregister_local_reader(p_endpoint->getGuid());
}
#endif
}
}
// std::lock_guard<std::recursive_mutex> guardEndpoint(*p_endpoint->getMutex());
delete(p_endpoint);
return true;
}
void RTPSParticipantImpl::normalize_endpoint_locators(EndpointAttributes& endpoint_att)
{
// Locators with port 0, calculate port.
for (Locator_t& loc : endpoint_att.unicastLocatorList)
{
m_network_Factory.fillDefaultUnicastLocator(loc, m_att);
}
for (Locator_t& loc : endpoint_att.multicastLocatorList)
{
m_network_Factory.fillDefaultUnicastLocator(loc, m_att);
}
// Normalize unicast locators
if (!endpoint_att.unicastLocatorList.empty())
{
m_network_Factory.NormalizeLocators(endpoint_att.unicastLocatorList);
}
}
std::vector<std::string> RTPSParticipantImpl::getParticipantNames() const
{
std::vector<std::string> participant_names;
auto pdp = mp_builtinProtocols->mp_PDP;
for (auto it = pdp->ParticipantProxiesBegin(); it != pdp->ParticipantProxiesEnd(); ++it)
{
participant_names.emplace_back((*it)->m_participantName.to_string());
}
return participant_names;
}
bool RTPSParticipantImpl::sendSync(
CDRMessage_t* msg,
const Locator_t& destination_loc,
std::chrono::steady_clock::time_point& max_blocking_time_point)
{
bool ret_code = false;
std::unique_lock<std::timed_mutex> lock(m_send_resources_mutex_, std::defer_lock);
if(lock.try_lock_until(max_blocking_time_point))
{
ret_code = true;
for (auto& send_resource : send_resource_list_)
{
send_resource->send(msg->buffer, msg->length, destination_loc);
}
}
return ret_code;
}
void RTPSParticipantImpl::setGuid(GUID_t& guid)
{
m_guid = guid;
}
void RTPSParticipantImpl::announceRTPSParticipantState()
{
return mp_builtinProtocols->announceRTPSParticipantState();
}
void RTPSParticipantImpl::stopRTPSParticipantAnnouncement()
{
return mp_builtinProtocols->stopRTPSParticipantAnnouncement();
}
void RTPSParticipantImpl::resetRTPSParticipantAnnouncement()
{
return mp_builtinProtocols->resetRTPSParticipantAnnouncement();
}
void RTPSParticipantImpl::loose_next_change()
{
//NOTE: This is replaced by the test transport
//this->mp_send_thr->loose_next_change();
}
bool RTPSParticipantImpl::newRemoteEndpointDiscovered(const GUID_t& pguid, int16_t userDefinedId, EndpointKind_t kind)
{
if (m_att.builtin.discovery_config.discoveryProtocol != DiscoveryProtocol::SIMPLE ||
m_att.builtin.discovery_config.use_STATIC_EndpointDiscoveryProtocol == false)
{
logWarning(RTPS_PARTICIPANT, "Remote Endpoints can only be activated with static discovery protocol over PDP simple protocol");
return false;
}
if (PDPSimple * pS = dynamic_cast<PDPSimple*>(mp_builtinProtocols->mp_PDP))
{
return pS->newRemoteEndpointStaticallyDiscovered(pguid, userDefinedId, kind);
}
return false;
}
void RTPSParticipantImpl::ResourceSemaphorePost()
{
if (mp_ResourceSemaphore != nullptr)
{
mp_ResourceSemaphore->post();
}
}
void RTPSParticipantImpl::ResourceSemaphoreWait()
{
if (mp_ResourceSemaphore != nullptr)
{
mp_ResourceSemaphore->wait();
}
}
void RTPSParticipantImpl::assertRemoteRTPSParticipantLiveliness(const GuidPrefix_t& guidP)
{
this->mp_builtinProtocols->mp_PDP->assertRemoteParticipantLiveliness(guidP);
}
uint32_t RTPSParticipantImpl::getMaxMessageSize() const
{
return m_network_Factory.get_max_message_size_between_transports();
}
uint32_t RTPSParticipantImpl::getMaxDataSize()
{
return calculateMaxDataSize(getMaxMessageSize());
}
uint32_t RTPSParticipantImpl::calculateMaxDataSize(uint32_t length)
{
uint32_t maxDataSize = length;
#if HAVE_SECURITY
// If there is rtps messsage protection, reduce max size for messages,
// because extra data is added on encryption.
if(security_attributes_.is_rtps_protected)
{
maxDataSize -= m_security_manager.calculate_extra_size_for_rtps_message();
}
#endif
// RTPS header
maxDataSize -= RTPSMESSAGE_HEADER_SIZE;
return maxDataSize;
}
bool RTPSParticipantImpl::networkFactoryHasRegisteredTransports() const
{
return m_network_Factory.numberOfRegisteredTransports() > 0;
}
#if HAVE_SECURITY
bool RTPSParticipantImpl::pairing_remote_reader_with_local_writer_after_security(const GUID_t& local_writer,
const ReaderProxyData& remote_reader_data)
{
bool return_value;
return_value = mp_builtinProtocols->mp_PDP->getEDP()->pairing_remote_reader_with_local_writer_after_security(
local_writer, remote_reader_data);
if (!return_value && mp_builtinProtocols->mp_WLP != nullptr)
{
return_value = mp_builtinProtocols->mp_WLP->pairing_remote_reader_with_local_writer_after_security(
local_writer, remote_reader_data);
}
return return_value;
}
bool RTPSParticipantImpl::pairing_remote_writer_with_local_reader_after_security(const GUID_t& local_reader,
const WriterProxyData& remote_writer_data)
{
bool return_value;
return_value = mp_builtinProtocols->mp_PDP->getEDP()->pairing_remote_writer_with_local_reader_after_security(
local_reader, remote_writer_data);
if (!return_value && mp_builtinProtocols->mp_WLP != nullptr)
{
return_value = mp_builtinProtocols->mp_WLP->pairing_remote_writer_with_local_reader_after_security(
local_reader, remote_writer_data);
}
return return_value;
}
#endif
PDPSimple* RTPSParticipantImpl::pdpsimple()
{
return dynamic_cast<PDPSimple*>(mp_builtinProtocols->mp_PDP);
}
WLP* RTPSParticipantImpl::wlp()
{
return mp_builtinProtocols->mp_WLP;
}
bool RTPSParticipantImpl::get_remote_writer_info(const GUID_t& writerGuid, WriterProxyData& returnedInfo)
{
if (this->mp_builtinProtocols->mp_PDP->lookupWriterProxyData(writerGuid, returnedInfo))
{
return true;
}
return false;
}
bool RTPSParticipantImpl::get_remote_reader_info(const GUID_t& readerGuid, ReaderProxyData& returnedInfo)
{
if (this->mp_builtinProtocols->mp_PDP->lookupReaderProxyData(readerGuid, returnedInfo))
{
return true;
}
return false;
}
IPersistenceService* RTPSParticipantImpl::get_persistence_service(const EndpointAttributes& param)
{
IPersistenceService* ret_val;
ret_val = PersistenceFactory::create_persistence_service(param.properties);
return ret_val != nullptr ?
ret_val :
PersistenceFactory::create_persistence_service(m_att.properties);
}
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 15,445 | Line too long | eProsima-Fast-DDS | cpp |
@@ -638,6 +638,9 @@ MagicEffectNames magicEffectNames = {
{"orangeteleport", CONST_ME_ORANGETELEPORT},
{"greyteleport", CONST_ME_GREYTELEPORT},
{"lightblueteleport", CONST_ME_LIGHTBLUETELEPORT},
+ {"fatal", CONST_ME_FATAL},
+ {"dodge", CONST_ME_DODGE},
+ {"hourglass", CONST_ME_HOURGLASS},
};
ShootTypeNames shootTypeNames = { | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "tools.h"
#include "configmanager.h"
extern ConfigManager g_config;
void printXMLError(const std::string& where, const std::string& fileName, const pugi::xml_parse_result& result)
{
std::cout << '[' << where << "] Failed to load " << fileName << ": " << result.description() << std::endl;
FILE* file = fopen(fileName.c_str(), "rb");
if (!file) {
return;
}
char buffer[32768];
uint32_t currentLine = 1;
std::string line;
auto offset = static_cast<size_t>(result.offset);
size_t lineOffsetPosition = 0;
size_t index = 0;
size_t bytes;
do {
bytes = fread(buffer, 1, 32768, file);
for (size_t i = 0; i < bytes; ++i) {
char ch = buffer[i];
if (ch == '\n') {
if ((index + i) >= offset) {
lineOffsetPosition = line.length() - ((index + i) - offset);
bytes = 0;
break;
}
++currentLine;
line.clear();
} else {
line.push_back(ch);
}
}
index += bytes;
} while (bytes == 32768);
fclose(file);
std::cout << "Line " << currentLine << ':' << std::endl;
std::cout << line << std::endl;
for (size_t i = 0; i < lineOffsetPosition; i++) {
if (line[i] == '\t') {
std::cout << '\t';
} else {
std::cout << ' ';
}
}
std::cout << '^' << std::endl;
}
static uint32_t circularShift(int bits, uint32_t value)
{
return (value << bits) | (value >> (32 - bits));
}
static void processSHA1MessageBlock(const uint8_t* messageBlock, uint32_t* H)
{
uint32_t W[80];
for (int i = 0; i < 16; ++i) {
const size_t offset = i << 2;
W[i] = messageBlock[offset] << 24 | messageBlock[offset + 1] << 16 | messageBlock[offset + 2] << 8 | messageBlock[offset + 3];
}
for (int i = 16; i < 80; ++i) {
W[i] = circularShift(1, W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16]);
}
uint32_t A = H[0], B = H[1], C = H[2], D = H[3], E = H[4];
for (int i = 0; i < 20; ++i) {
const uint32_t tmp = circularShift(5, A) + ((B & C) | ((~B) & D)) + E + W[i] + 0x5A827999;
E = D; D = C; C = circularShift(30, B); B = A; A = tmp;
}
for (int i = 20; i < 40; ++i) {
const uint32_t tmp = circularShift(5, A) + (B ^ C ^ D) + E + W[i] + 0x6ED9EBA1;
E = D; D = C; C = circularShift(30, B); B = A; A = tmp;
}
for (int i = 40; i < 60; ++i) {
const uint32_t tmp = circularShift(5, A) + ((B & C) | (B & D) | (C & D)) + E + W[i] + 0x8F1BBCDC;
E = D; D = C; C = circularShift(30, B); B = A; A = tmp;
}
for (int i = 60; i < 80; ++i) {
const uint32_t tmp = circularShift(5, A) + (B ^ C ^ D) + E + W[i] + 0xCA62C1D6;
E = D; D = C; C = circularShift(30, B); B = A; A = tmp;
}
H[0] += A;
H[1] += B;
H[2] += C;
H[3] += D;
H[4] += E;
}
std::string transformToSHA1(const std::string& input)
{
uint32_t H[] = {
0x67452301,
0xEFCDAB89,
0x98BADCFE,
0x10325476,
0xC3D2E1F0
};
uint8_t messageBlock[64];
size_t index = 0;
uint32_t length_low = 0;
uint32_t length_high = 0;
for (char ch : input) {
messageBlock[index++] = ch;
length_low += 8;
if (length_low == 0) {
length_high++;
}
if (index == 64) {
processSHA1MessageBlock(messageBlock, H);
index = 0;
}
}
messageBlock[index++] = 0x80;
if (index > 56) {
while (index < 64) {
messageBlock[index++] = 0;
}
processSHA1MessageBlock(messageBlock, H);
index = 0;
}
while (index < 56) {
messageBlock[index++] = 0;
}
messageBlock[56] = length_high >> 24;
messageBlock[57] = length_high >> 16;
messageBlock[58] = length_high >> 8;
messageBlock[59] = length_high;
messageBlock[60] = length_low >> 24;
messageBlock[61] = length_low >> 16;
messageBlock[62] = length_low >> 8;
messageBlock[63] = length_low;
processSHA1MessageBlock(messageBlock, H);
char hexstring[41];
static const char hexDigits[] = {"0123456789abcdef"};
for (int hashByte = 20; --hashByte >= 0;) {
const uint8_t byte = H[hashByte >> 2] >> (((3 - hashByte) & 3) << 3);
index = hashByte << 1;
hexstring[index] = hexDigits[byte >> 4];
hexstring[index + 1] = hexDigits[byte & 15];
}
return std::string(hexstring, 40);
}
std::string generateToken(const std::string& key, uint32_t ticks)
{
// generate message from ticks
std::string message(8, 0);
for (uint8_t i = 8; --i; ticks >>= 8) {
message[i] = static_cast<char>(ticks & 0xFF);
}
// hmac key pad generation
std::string iKeyPad(64, 0x36), oKeyPad(64, 0x5C);
for (uint8_t i = 0; i < key.length(); ++i) {
iKeyPad[i] ^= key[i];
oKeyPad[i] ^= key[i];
}
oKeyPad.reserve(84);
// hmac concat inner pad with message
iKeyPad.append(message);
// hmac first pass
message.assign(transformToSHA1(iKeyPad));
// hmac concat outer pad with message, conversion from hex to int needed
for (uint8_t i = 0; i < message.length(); i += 2) {
oKeyPad.push_back(static_cast<char>(std::strtoul(message.substr(i, 2).c_str(), nullptr, 16)));
}
// hmac second pass
message.assign(transformToSHA1(oKeyPad));
// calculate hmac offset
uint32_t offset = static_cast<uint32_t>(std::strtoul(message.substr(39, 1).c_str(), nullptr, 16) & 0xF);
// get truncated hash
uint32_t truncHash = static_cast<uint32_t>(std::strtoul(message.substr(2 * offset, 8).c_str(), nullptr, 16)) & 0x7FFFFFFF;
message.assign(std::to_string(truncHash));
// return only last AUTHENTICATOR_DIGITS (default 6) digits, also asserts exactly 6 digits
uint32_t hashLen = message.length();
message.assign(message.substr(hashLen - std::min(hashLen, AUTHENTICATOR_DIGITS)));
message.insert(0, AUTHENTICATOR_DIGITS - std::min(hashLen, AUTHENTICATOR_DIGITS), '0');
return message;
}
void replaceString(std::string& str, const std::string& sought, const std::string& replacement)
{
size_t pos = 0;
size_t start = 0;
size_t soughtLen = sought.length();
size_t replaceLen = replacement.length();
while ((pos = str.find(sought, start)) != std::string::npos) {
str = str.substr(0, pos) + replacement + str.substr(pos + soughtLen);
start = pos + replaceLen;
}
}
void trim_right(std::string& source, char t)
{
source.erase(source.find_last_not_of(t) + 1);
}
void trim_left(std::string& source, char t)
{
source.erase(0, source.find_first_not_of(t));
}
void toLowerCaseString(std::string& source)
{
std::transform(source.begin(), source.end(), source.begin(), tolower);
}
std::string asLowerCaseString(std::string source)
{
toLowerCaseString(source);
return source;
}
std::string asUpperCaseString(std::string source)
{
std::transform(source.begin(), source.end(), source.begin(), toupper);
return source;
}
StringVector explodeString(const std::string& inString, const std::string& separator, int32_t limit/* = -1*/)
{
StringVector returnVector;
std::string::size_type start = 0, end = 0;
while (--limit != -1 && (end = inString.find(separator, start)) != std::string::npos) {
returnVector.push_back(inString.substr(start, end - start));
start = end + separator.size();
}
returnVector.push_back(inString.substr(start));
return returnVector;
}
IntegerVector vectorAtoi(const StringVector& stringVector)
{
IntegerVector returnVector;
for (const auto& string : stringVector) {
returnVector.push_back(std::stoi(string));
}
return returnVector;
}
std::mt19937& getRandomGenerator()
{
static std::random_device rd;
static std::mt19937 generator(rd());
return generator;
}
int32_t uniform_random(int32_t minNumber, int32_t maxNumber)
{
static std::uniform_int_distribution<int32_t> uniformRand;
if (minNumber == maxNumber) {
return minNumber;
} else if (minNumber > maxNumber) {
std::swap(minNumber, maxNumber);
}
return uniformRand(getRandomGenerator(), std::uniform_int_distribution<int32_t>::param_type(minNumber, maxNumber));
}
int32_t normal_random(int32_t minNumber, int32_t maxNumber)
{
static std::normal_distribution<float> normalRand(0.5f, 0.25f);
if (minNumber == maxNumber) {
return minNumber;
} else if (minNumber > maxNumber) {
std::swap(minNumber, maxNumber);
}
int32_t increment;
const int32_t diff = maxNumber - minNumber;
const float v = normalRand(getRandomGenerator());
if (v < 0.0) {
increment = diff / 2;
} else if (v > 1.0) {
increment = (diff + 1) / 2;
} else {
increment = round(v * diff);
}
return minNumber + increment;
}
bool boolean_random(double probability/* = 0.5*/)
{
static std::bernoulli_distribution booleanRand;
return booleanRand(getRandomGenerator(), std::bernoulli_distribution::param_type(probability));
}
void trimString(std::string& str)
{
str.erase(str.find_last_not_of(' ') + 1);
str.erase(0, str.find_first_not_of(' '));
}
std::string convertIPToString(uint32_t ip)
{
char buffer[17];
int res = sprintf(buffer, "%u.%u.%u.%u", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, (ip >> 24));
if (res < 0) {
return {};
}
return buffer;
}
std::string formatDate(time_t time)
{
const tm* tms = localtime(&time);
if (!tms) {
return {};
}
char buffer[20];
int res = sprintf(buffer, "%02d/%02d/%04d %02d:%02d:%02d", tms->tm_mday, tms->tm_mon + 1, tms->tm_year + 1900, tms->tm_hour, tms->tm_min, tms->tm_sec);
if (res < 0) {
return {};
}
return {buffer, 19};
}
std::string formatDateShort(time_t time)
{
const tm* tms = localtime(&time);
if (!tms) {
return {};
}
char buffer[12];
size_t res = strftime(buffer, 12, "%d %b %Y", tms);
if (res == 0) {
return {};
}
return {buffer, 11};
}
Direction getDirection(const std::string& string)
{
Direction direction = DIRECTION_NORTH;
if (string == "north" || string == "n" || string == "0") {
direction = DIRECTION_NORTH;
} else if (string == "east" || string == "e" || string == "1") {
direction = DIRECTION_EAST;
} else if (string == "south" || string == "s" || string == "2") {
direction = DIRECTION_SOUTH;
} else if (string == "west" || string == "w" || string == "3") {
direction = DIRECTION_WEST;
} else if (string == "southwest" || string == "south west" || string == "south-west" || string == "sw" || string == "4") {
direction = DIRECTION_SOUTHWEST;
} else if (string == "southeast" || string == "south east" || string == "south-east" || string == "se" || string == "5") {
direction = DIRECTION_SOUTHEAST;
} else if (string == "northwest" || string == "north west" || string == "north-west" || string == "nw" || string == "6") {
direction = DIRECTION_NORTHWEST;
} else if (string == "northeast" || string == "north east" || string == "north-east" || string == "ne" || string == "7") {
direction = DIRECTION_NORTHEAST;
}
return direction;
}
Position getNextPosition(Direction direction, Position pos)
{
switch (direction) {
case DIRECTION_NORTH:
pos.y--;
break;
case DIRECTION_SOUTH:
pos.y++;
break;
case DIRECTION_WEST:
pos.x--;
break;
case DIRECTION_EAST:
pos.x++;
break;
case DIRECTION_SOUTHWEST:
pos.x--;
pos.y++;
break;
case DIRECTION_NORTHWEST:
pos.x--;
pos.y--;
break;
case DIRECTION_NORTHEAST:
pos.x++;
pos.y--;
break;
case DIRECTION_SOUTHEAST:
pos.x++;
pos.y++;
break;
default:
break;
}
return pos;
}
Direction getDirectionTo(const Position& from, const Position& to)
{
if (from == to) {
return DIRECTION_NONE;
}
Direction dir;
int32_t x_offset = Position::getOffsetX(from, to);
if (x_offset < 0) {
dir = DIRECTION_EAST;
x_offset = std::abs(x_offset);
} else {
dir = DIRECTION_WEST;
}
int32_t y_offset = Position::getOffsetY(from, to);
if (y_offset >= 0) {
if (y_offset > x_offset) {
dir = DIRECTION_NORTH;
} else if (y_offset == x_offset) {
if (dir == DIRECTION_EAST) {
dir = DIRECTION_NORTHEAST;
} else {
dir = DIRECTION_NORTHWEST;
}
}
} else {
y_offset = std::abs(y_offset);
if (y_offset > x_offset) {
dir = DIRECTION_SOUTH;
} else if (y_offset == x_offset) {
if (dir == DIRECTION_EAST) {
dir = DIRECTION_SOUTHEAST;
} else {
dir = DIRECTION_SOUTHWEST;
}
}
}
return dir;
}
using MagicEffectNames = std::unordered_map<std::string, MagicEffectClasses>;
using ShootTypeNames = std::unordered_map<std::string, ShootType_t>;
using CombatTypeNames = std::unordered_map<CombatType_t, std::string, std::hash<int32_t>>;
using AmmoTypeNames = std::unordered_map<std::string, Ammo_t>;
using WeaponActionNames = std::unordered_map<std::string, WeaponAction_t>;
using SkullNames = std::unordered_map<std::string, Skulls_t>;
MagicEffectNames magicEffectNames = {
{"redspark", CONST_ME_DRAWBLOOD},
{"bluebubble", CONST_ME_LOSEENERGY},
{"poff", CONST_ME_POFF},
{"yellowspark", CONST_ME_BLOCKHIT},
{"explosionarea", CONST_ME_EXPLOSIONAREA},
{"explosion", CONST_ME_EXPLOSIONHIT},
{"firearea", CONST_ME_FIREAREA},
{"yellowbubble", CONST_ME_YELLOW_RINGS},
{"greenbubble", CONST_ME_GREEN_RINGS},
{"blackspark", CONST_ME_HITAREA},
{"teleport", CONST_ME_TELEPORT},
{"energy", CONST_ME_ENERGYHIT},
{"blueshimmer", CONST_ME_MAGIC_BLUE},
{"redshimmer", CONST_ME_MAGIC_RED},
{"greenshimmer", CONST_ME_MAGIC_GREEN},
{"fire", CONST_ME_HITBYFIRE},
{"greenspark", CONST_ME_HITBYPOISON},
{"mortarea", CONST_ME_MORTAREA},
{"greennote", CONST_ME_SOUND_GREEN},
{"rednote", CONST_ME_SOUND_RED},
{"poison", CONST_ME_POISONAREA},
{"yellownote", CONST_ME_SOUND_YELLOW},
{"purplenote", CONST_ME_SOUND_PURPLE},
{"bluenote", CONST_ME_SOUND_BLUE},
{"whitenote", CONST_ME_SOUND_WHITE},
{"bubbles", CONST_ME_BUBBLES},
{"dice", CONST_ME_CRAPS},
{"giftwraps", CONST_ME_GIFT_WRAPS},
{"yellowfirework", CONST_ME_FIREWORK_YELLOW},
{"redfirework", CONST_ME_FIREWORK_RED},
{"bluefirework", CONST_ME_FIREWORK_BLUE},
{"stun", CONST_ME_STUN},
{"sleep", CONST_ME_SLEEP},
{"watercreature", CONST_ME_WATERCREATURE},
{"groundshaker", CONST_ME_GROUNDSHAKER},
{"hearts", CONST_ME_HEARTS},
{"fireattack", CONST_ME_FIREATTACK},
{"energyarea", CONST_ME_ENERGYAREA},
{"smallclouds", CONST_ME_SMALLCLOUDS},
{"holydamage", CONST_ME_HOLYDAMAGE},
{"bigclouds", CONST_ME_BIGCLOUDS},
{"icearea", CONST_ME_ICEAREA},
{"icetornado", CONST_ME_ICETORNADO},
{"iceattack", CONST_ME_ICEATTACK},
{"stones", CONST_ME_STONES},
{"smallplants", CONST_ME_SMALLPLANTS},
{"carniphila", CONST_ME_CARNIPHILA},
{"purpleenergy", CONST_ME_PURPLEENERGY},
{"yellowenergy", CONST_ME_YELLOWENERGY},
{"holyarea", CONST_ME_HOLYAREA},
{"bigplants", CONST_ME_BIGPLANTS},
{"cake", CONST_ME_CAKE},
{"giantice", CONST_ME_GIANTICE},
{"watersplash", CONST_ME_WATERSPLASH},
{"plantattack", CONST_ME_PLANTATTACK},
{"tutorialarrow", CONST_ME_TUTORIALARROW},
{"tutorialsquare", CONST_ME_TUTORIALSQUARE},
{"mirrorhorizontal", CONST_ME_MIRRORHORIZONTAL},
{"mirrorvertical", CONST_ME_MIRRORVERTICAL},
{"skullhorizontal", CONST_ME_SKULLHORIZONTAL},
{"skullvertical", CONST_ME_SKULLVERTICAL},
{"assassin", CONST_ME_ASSASSIN},
{"stepshorizontal", CONST_ME_STEPSHORIZONTAL},
{"bloodysteps", CONST_ME_BLOODYSTEPS},
{"stepsvertical", CONST_ME_STEPSVERTICAL},
{"yalaharighost", CONST_ME_YALAHARIGHOST},
{"bats", CONST_ME_BATS},
{"smoke", CONST_ME_SMOKE},
{"insects", CONST_ME_INSECTS},
{"dragonhead", CONST_ME_DRAGONHEAD},
{"orcshaman", CONST_ME_ORCSHAMAN},
{"orcshamanfire", CONST_ME_ORCSHAMAN_FIRE},
{"thunder", CONST_ME_THUNDER},
{"ferumbras", CONST_ME_FERUMBRAS},
{"confettihorizontal", CONST_ME_CONFETTI_HORIZONTAL},
{"confettivertical", CONST_ME_CONFETTI_VERTICAL},
{"blacksmoke", CONST_ME_BLACKSMOKE},
{"redsmoke", CONST_ME_REDSMOKE},
{"yellowsmoke", CONST_ME_YELLOWSMOKE},
{"greensmoke", CONST_ME_GREENSMOKE},
{"purplesmoke", CONST_ME_PURPLESMOKE},
{"earlythunder", CONST_ME_EARLY_THUNDER},
{"bonecapsule", CONST_ME_RAGIAZ_BONECAPSULE},
{"criticaldamage", CONST_ME_CRITICAL_DAMAGE},
{"plungingfish", CONST_ME_PLUNGING_FISH},
{"bluechain", CONST_ME_BLUECHAIN},
{"orangechain", CONST_ME_ORANGECHAIN},
{"greenchain", CONST_ME_GREENCHAIN},
{"purplechain", CONST_ME_PURPLECHAIN},
{"greychain", CONST_ME_GREYCHAIN},
{"yellowchain", CONST_ME_YELLOWCHAIN},
{"yellowsparkles", CONST_ME_YELLOWSPARKLES},
{"faeexplosion", CONST_ME_FAEEXPLOSION},
{"faecoming", CONST_ME_FAECOMING},
{"faegoing", CONST_ME_FAEGOING},
{"bigcloudssinglespace", CONST_ME_BIGCLOUDSSINGLESPACE},
{"stonessinglespace", CONST_ME_STONESSINGLESPACE},
{"blueghost", CONST_ME_BLUEGHOST},
{"pointofinterest", CONST_ME_POINTOFINTEREST},
{"mapeffect", CONST_ME_MAPEFFECT},
{"pinkspark", CONST_ME_PINKSPARK},
{"greenfirework", CONST_ME_FIREWORK_GREEN},
{"orangefirework", CONST_ME_FIREWORK_ORANGE},
{"purplefirework", CONST_ME_FIREWORK_PURPLE},
{"turquoisefirework", CONST_ME_FIREWORK_TURQUOISE},
{"thecube", CONST_ME_THECUBE},
{"drawink", CONST_ME_DRAWINK},
{"prismaticsparkles", CONST_ME_PRISMATICSPARKLES},
{"thaian", CONST_ME_THAIAN},
{"thaianghost", CONST_ME_THAIANGHOST},
{"ghostsmoke", CONST_ME_GHOSTSMOKE},
{"floatingblock", CONST_ME_FLOATINGBLOCK},
{"block", CONST_ME_BLOCK},
{"rooting", CONST_ME_ROOTING},
{"sunpriest", CONST_ME_SUNPRIEST},
{"werelion", CONST_ME_WERELION},
{"ghostlyscratch", CONST_ME_GHOSTLYSCRATCH},
{"ghostlybite", CONST_ME_GHOSTLYBITE},
{"bigscratching", CONST_ME_BIGSCRATCHING},
{"slash", CONST_ME_SLASH},
{"bite", CONST_ME_BITE},
{"chivalriouschallenge", CONST_ME_CHIVALRIOUSCHALLENGE},
{"divinedazzle", CONST_ME_DIVINEDAZZLE},
{"electricalspark", CONST_ME_ELECTRICALSPARK},
{"purpleteleport", CONST_ME_PURPLETELEPORT},
{"redteleport", CONST_ME_REDTELEPORT},
{"orangeteleport", CONST_ME_ORANGETELEPORT},
{"greyteleport", CONST_ME_GREYTELEPORT},
{"lightblueteleport", CONST_ME_LIGHTBLUETELEPORT},
};
ShootTypeNames shootTypeNames = {
{"spear", CONST_ANI_SPEAR},
{"bolt", CONST_ANI_BOLT},
{"arrow", CONST_ANI_ARROW},
{"fire", CONST_ANI_FIRE},
{"energy", CONST_ANI_ENERGY},
{"poisonarrow", CONST_ANI_POISONARROW},
{"burstarrow", CONST_ANI_BURSTARROW},
{"throwingstar", CONST_ANI_THROWINGSTAR},
{"throwingknife", CONST_ANI_THROWINGKNIFE},
{"smallstone", CONST_ANI_SMALLSTONE},
{"death", CONST_ANI_DEATH},
{"largerock", CONST_ANI_LARGEROCK},
{"snowball", CONST_ANI_SNOWBALL},
{"powerbolt", CONST_ANI_POWERBOLT},
{"poison", CONST_ANI_POISON},
{"infernalbolt", CONST_ANI_INFERNALBOLT},
{"huntingspear", CONST_ANI_HUNTINGSPEAR},
{"enchantedspear", CONST_ANI_ENCHANTEDSPEAR},
{"redstar", CONST_ANI_REDSTAR},
{"greenstar", CONST_ANI_GREENSTAR},
{"royalspear", CONST_ANI_ROYALSPEAR},
{"sniperarrow", CONST_ANI_SNIPERARROW},
{"onyxarrow", CONST_ANI_ONYXARROW},
{"piercingbolt", CONST_ANI_PIERCINGBOLT},
{"whirlwindsword", CONST_ANI_WHIRLWINDSWORD},
{"whirlwindaxe", CONST_ANI_WHIRLWINDAXE},
{"whirlwindclub", CONST_ANI_WHIRLWINDCLUB},
{"etherealspear", CONST_ANI_ETHEREALSPEAR},
{"ice", CONST_ANI_ICE},
{"earth", CONST_ANI_EARTH},
{"holy", CONST_ANI_HOLY},
{"suddendeath", CONST_ANI_SUDDENDEATH},
{"flasharrow", CONST_ANI_FLASHARROW},
{"flammingarrow", CONST_ANI_FLAMMINGARROW},
{"shiverarrow", CONST_ANI_SHIVERARROW},
{"energyball", CONST_ANI_ENERGYBALL},
{"smallice", CONST_ANI_SMALLICE},
{"smallholy", CONST_ANI_SMALLHOLY},
{"smallearth", CONST_ANI_SMALLEARTH},
{"eartharrow", CONST_ANI_EARTHARROW},
{"explosion", CONST_ANI_EXPLOSION},
{"cake", CONST_ANI_CAKE},
{"tarsalarrow", CONST_ANI_TARSALARROW},
{"vortexbolt", CONST_ANI_VORTEXBOLT},
{"prismaticbolt", CONST_ANI_PRISMATICBOLT},
{"crystallinearrow", CONST_ANI_CRYSTALLINEARROW},
{"drillbolt", CONST_ANI_DRILLBOLT},
{"envenomedarrow", CONST_ANI_ENVENOMEDARROW},
{"gloothspear", CONST_ANI_GLOOTHSPEAR},
{"simplearrow", CONST_ANI_SIMPLEARROW},
{"leafstar", CONST_ANI_LEAFSTAR},
{"diamondarrow", CONST_ANI_DIAMONDARROW},
{"spectralbolt", CONST_ANI_SPECTRALBOLT},
{"royalstar", CONST_ANI_ROYALSTAR},
};
CombatTypeNames combatTypeNames = {
{COMBAT_PHYSICALDAMAGE, "physical"},
{COMBAT_ENERGYDAMAGE, "energy"},
{COMBAT_EARTHDAMAGE, "earth"},
{COMBAT_FIREDAMAGE, "fire"},
{COMBAT_UNDEFINEDDAMAGE, "undefined"},
{COMBAT_LIFEDRAIN, "lifedrain"},
{COMBAT_MANADRAIN, "manadrain"},
{COMBAT_HEALING, "healing"},
{COMBAT_DROWNDAMAGE, "drown"},
{COMBAT_ICEDAMAGE, "ice"},
{COMBAT_HOLYDAMAGE, "holy"},
{COMBAT_DEATHDAMAGE, "death"},
};
AmmoTypeNames ammoTypeNames = {
{"spear", AMMO_SPEAR},
{"bolt", AMMO_BOLT},
{"arrow", AMMO_ARROW},
{"poisonarrow", AMMO_ARROW},
{"burstarrow", AMMO_ARROW},
{"throwingstar", AMMO_THROWINGSTAR},
{"throwingknife", AMMO_THROWINGKNIFE},
{"smallstone", AMMO_STONE},
{"largerock", AMMO_STONE},
{"snowball", AMMO_SNOWBALL},
{"powerbolt", AMMO_BOLT},
{"infernalbolt", AMMO_BOLT},
{"huntingspear", AMMO_SPEAR},
{"enchantedspear", AMMO_SPEAR},
{"royalspear", AMMO_SPEAR},
{"sniperarrow", AMMO_ARROW},
{"onyxarrow", AMMO_ARROW},
{"piercingbolt", AMMO_BOLT},
{"etherealspear", AMMO_SPEAR},
{"flasharrow", AMMO_ARROW},
{"flammingarrow", AMMO_ARROW},
{"shiverarrow", AMMO_ARROW},
{"eartharrow", AMMO_ARROW},
{"leafstar", AMMO_THROWINGSTAR},
{"diamondarrow", AMMO_ARROW},
{"spectralbolt", AMMO_BOLT},
{"royalstar", AMMO_THROWINGSTAR},
};
WeaponActionNames weaponActionNames = {
{"move", WEAPONACTION_MOVE},
{"removecharge", WEAPONACTION_REMOVECHARGE},
{"removecount", WEAPONACTION_REMOVECOUNT},
};
SkullNames skullNames = {
{"none", SKULL_NONE},
{"yellow", SKULL_YELLOW},
{"green", SKULL_GREEN},
{"white", SKULL_WHITE},
{"red", SKULL_RED},
{"black", SKULL_BLACK},
{"orange", SKULL_ORANGE},
};
MagicEffectClasses getMagicEffect(const std::string& strValue)
{
auto magicEffect = magicEffectNames.find(strValue);
if (magicEffect != magicEffectNames.end()) {
return magicEffect->second;
}
return CONST_ME_NONE;
}
ShootType_t getShootType(const std::string& strValue)
{
auto shootType = shootTypeNames.find(strValue);
if (shootType != shootTypeNames.end()) {
return shootType->second;
}
return CONST_ANI_NONE;
}
std::string getCombatName(CombatType_t combatType)
{
auto combatName = combatTypeNames.find(combatType);
if (combatName != combatTypeNames.end()) {
return combatName->second;
}
return "unknown";
}
Ammo_t getAmmoType(const std::string& strValue)
{
auto ammoType = ammoTypeNames.find(strValue);
if (ammoType != ammoTypeNames.end()) {
return ammoType->second;
}
return AMMO_NONE;
}
WeaponAction_t getWeaponAction(const std::string& strValue)
{
auto weaponAction = weaponActionNames.find(strValue);
if (weaponAction != weaponActionNames.end()) {
return weaponAction->second;
}
return WEAPONACTION_NONE;
}
Skulls_t getSkullType(const std::string& strValue)
{
auto skullType = skullNames.find(strValue);
if (skullType != skullNames.end()) {
return skullType->second;
}
return SKULL_NONE;
}
std::string getSpecialSkillName(uint8_t skillid)
{
switch (skillid) {
case SPECIALSKILL_CRITICALHITCHANCE:
return "critical hit chance";
case SPECIALSKILL_CRITICALHITAMOUNT:
return "critical extra damage";
case SPECIALSKILL_LIFELEECHCHANCE:
return "hitpoints leech chance";
case SPECIALSKILL_LIFELEECHAMOUNT:
return "hitpoints leech amount";
case SPECIALSKILL_MANALEECHCHANCE:
return "manapoints leech chance";
case SPECIALSKILL_MANALEECHAMOUNT:
return "mana points leech amount";
default:
return "unknown";
}
}
std::string getSkillName(uint8_t skillid)
{
switch (skillid) {
case SKILL_FIST:
return "fist fighting";
case SKILL_CLUB:
return "club fighting";
case SKILL_SWORD:
return "sword fighting";
case SKILL_AXE:
return "axe fighting";
case SKILL_DISTANCE:
return "distance fighting";
case SKILL_SHIELD:
return "shielding";
case SKILL_FISHING:
return "fishing";
case SKILL_MAGLEVEL:
return "magic level";
case SKILL_LEVEL:
return "level";
default:
return "unknown";
}
}
uint32_t adlerChecksum(const uint8_t* data, size_t length)
{
if (length > NETWORKMESSAGE_MAXSIZE) {
return 0;
}
const uint16_t adler = 65521;
uint32_t a = 1, b = 0;
while (length > 0) {
size_t tmp = length > 5552 ? 5552 : length;
length -= tmp;
do {
a += *data++;
b += a;
} while (--tmp);
a %= adler;
b %= adler;
}
return (b << 16) | a;
}
std::string ucfirst(std::string str)
{
for (char& i : str) {
if (i != ' ') {
i = toupper(i);
break;
}
}
return str;
}
std::string ucwords(std::string str)
{
size_t strLength = str.length();
if (strLength == 0) {
return str;
}
str[0] = toupper(str.front());
for (size_t i = 1; i < strLength; ++i) {
if (str[i - 1] == ' ') {
str[i] = toupper(str[i]);
}
}
return str;
}
bool booleanString(const std::string& str)
{
if (str.empty()) {
return false;
}
char ch = tolower(str.front());
return ch != 'f' && ch != 'n' && ch != '0';
}
std::string getWeaponName(WeaponType_t weaponType)
{
switch (weaponType) {
case WEAPON_SWORD: return "sword";
case WEAPON_CLUB: return "club";
case WEAPON_AXE: return "axe";
case WEAPON_DISTANCE: return "distance";
case WEAPON_WAND: return "wand";
case WEAPON_AMMO: return "ammunition";
default: return std::string();
}
}
size_t combatTypeToIndex(CombatType_t combatType)
{
switch (combatType) {
case COMBAT_PHYSICALDAMAGE:
return 0;
case COMBAT_ENERGYDAMAGE:
return 1;
case COMBAT_EARTHDAMAGE:
return 2;
case COMBAT_FIREDAMAGE:
return 3;
case COMBAT_UNDEFINEDDAMAGE:
return 4;
case COMBAT_LIFEDRAIN:
return 5;
case COMBAT_MANADRAIN:
return 6;
case COMBAT_HEALING:
return 7;
case COMBAT_DROWNDAMAGE:
return 8;
case COMBAT_ICEDAMAGE:
return 9;
case COMBAT_HOLYDAMAGE:
return 10;
case COMBAT_DEATHDAMAGE:
return 11;
default:
return 0;
}
}
CombatType_t indexToCombatType(size_t v)
{
return static_cast<CombatType_t>(1 << v);
}
uint8_t serverFluidToClient(uint8_t serverFluid)
{
uint8_t size = sizeof(clientToServerFluidMap) / sizeof(uint8_t);
for (uint8_t i = 0; i < size; ++i) {
if (clientToServerFluidMap[i] == serverFluid) {
return i;
}
}
return 0;
}
uint8_t clientFluidToServer(uint8_t clientFluid)
{
uint8_t size = sizeof(clientToServerFluidMap) / sizeof(uint8_t);
if (clientFluid >= size) {
return 0;
}
return clientToServerFluidMap[clientFluid];
}
itemAttrTypes stringToItemAttribute(const std::string& str)
{
if (str == "aid") {
return ITEM_ATTRIBUTE_ACTIONID;
} else if (str == "uid") {
return ITEM_ATTRIBUTE_UNIQUEID;
} else if (str == "description") {
return ITEM_ATTRIBUTE_DESCRIPTION;
} else if (str == "text") {
return ITEM_ATTRIBUTE_TEXT;
} else if (str == "date") {
return ITEM_ATTRIBUTE_DATE;
} else if (str == "writer") {
return ITEM_ATTRIBUTE_WRITER;
} else if (str == "name") {
return ITEM_ATTRIBUTE_NAME;
} else if (str == "article") {
return ITEM_ATTRIBUTE_ARTICLE;
} else if (str == "pluralname") {
return ITEM_ATTRIBUTE_PLURALNAME;
} else if (str == "weight") {
return ITEM_ATTRIBUTE_WEIGHT;
} else if (str == "attack") {
return ITEM_ATTRIBUTE_ATTACK;
} else if (str == "defense") {
return ITEM_ATTRIBUTE_DEFENSE;
} else if (str == "extradefense") {
return ITEM_ATTRIBUTE_EXTRADEFENSE;
} else if (str == "armor") {
return ITEM_ATTRIBUTE_ARMOR;
} else if (str == "hitchance") {
return ITEM_ATTRIBUTE_HITCHANCE;
} else if (str == "shootrange") {
return ITEM_ATTRIBUTE_SHOOTRANGE;
} else if (str == "owner") {
return ITEM_ATTRIBUTE_OWNER;
} else if (str == "duration") {
return ITEM_ATTRIBUTE_DURATION;
} else if (str == "decaystate") {
return ITEM_ATTRIBUTE_DECAYSTATE;
} else if (str == "corpseowner") {
return ITEM_ATTRIBUTE_CORPSEOWNER;
} else if (str == "charges") {
return ITEM_ATTRIBUTE_CHARGES;
} else if (str == "fluidtype") {
return ITEM_ATTRIBUTE_FLUIDTYPE;
} else if (str == "doorid") {
return ITEM_ATTRIBUTE_DOORID;
} else if (str == "decayto") {
return ITEM_ATTRIBUTE_DECAYTO;
} else if (str == "wrapid") {
return ITEM_ATTRIBUTE_WRAPID;
} else if (str == "storeitem") {
return ITEM_ATTRIBUTE_STOREITEM;
} else if (str == "attackspeed") {
return ITEM_ATTRIBUTE_ATTACK_SPEED;
}
return ITEM_ATTRIBUTE_NONE;
}
std::string getFirstLine(const std::string& str)
{
std::string firstLine;
firstLine.reserve(str.length());
for (const char c : str) {
if (c == '\n') {
break;
}
firstLine.push_back(c);
}
return firstLine;
}
const char* getReturnMessage(ReturnValue value)
{
switch (value) {
case RETURNVALUE_DESTINATIONOUTOFREACH:
return "Destination is out of range.";
case RETURNVALUE_NOTMOVEABLE:
return "You cannot move this object.";
case RETURNVALUE_DROPTWOHANDEDITEM:
return "Drop the double-handed object first.";
case RETURNVALUE_BOTHHANDSNEEDTOBEFREE:
return "Both hands need to be free.";
case RETURNVALUE_CANNOTBEDRESSED:
return "You cannot dress this object there.";
case RETURNVALUE_PUTTHISOBJECTINYOURHAND:
return "Put this object in your hand.";
case RETURNVALUE_PUTTHISOBJECTINBOTHHANDS:
return "Put this object in both hands.";
case RETURNVALUE_CANONLYUSEONEWEAPON:
return "You may only use one weapon.";
case RETURNVALUE_TOOFARAWAY:
return "You are too far away.";
case RETURNVALUE_FIRSTGODOWNSTAIRS:
return "First go downstairs.";
case RETURNVALUE_FIRSTGOUPSTAIRS:
return "First go upstairs.";
case RETURNVALUE_NOTENOUGHCAPACITY:
return "This object is too heavy for you to carry.";
case RETURNVALUE_CONTAINERNOTENOUGHROOM:
return "You cannot put more objects in this container.";
case RETURNVALUE_NEEDEXCHANGE:
case RETURNVALUE_NOTENOUGHROOM:
return "There is not enough room.";
case RETURNVALUE_CANNOTPICKUP:
return "You cannot take this object.";
case RETURNVALUE_CANNOTTHROW:
return "You cannot throw there.";
case RETURNVALUE_THEREISNOWAY:
return "There is no way.";
case RETURNVALUE_THISISIMPOSSIBLE:
return "This is impossible.";
case RETURNVALUE_PLAYERISPZLOCKED:
return "You can not enter a protection zone after attacking another player.";
case RETURNVALUE_PLAYERISNOTINVITED:
return "You are not invited.";
case RETURNVALUE_CREATUREDOESNOTEXIST:
return "Creature does not exist.";
case RETURNVALUE_DEPOTISFULL:
return "You cannot put more items in this depot.";
case RETURNVALUE_CANNOTUSETHISOBJECT:
return "You cannot use this object.";
case RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE:
return "A player with this name is not online.";
case RETURNVALUE_NOTREQUIREDLEVELTOUSERUNE:
return "You do not have the required magic level to use this rune.";
case RETURNVALUE_YOUAREALREADYTRADING:
return "You are already trading. Finish this trade first.";
case RETURNVALUE_THISPLAYERISALREADYTRADING:
return "This player is already trading.";
case RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT:
return "You may not logout during or immediately after a fight!";
case RETURNVALUE_DIRECTPLAYERSHOOT:
return "You are not allowed to shoot directly on players.";
case RETURNVALUE_NOTENOUGHLEVEL:
return "Your level is too low.";
case RETURNVALUE_NOTENOUGHMAGICLEVEL:
return "You do not have enough magic level.";
case RETURNVALUE_NOTENOUGHMANA:
return "You do not have enough mana.";
case RETURNVALUE_NOTENOUGHSOUL:
return "You do not have enough soul.";
case RETURNVALUE_YOUAREEXHAUSTED:
return "You are exhausted.";
case RETURNVALUE_YOUCANNOTUSEOBJECTSTHATFAST:
return "You cannot use objects that fast.";
case RETURNVALUE_CANONLYUSETHISRUNEONCREATURES:
return "You can only use it on creatures.";
case RETURNVALUE_PLAYERISNOTREACHABLE:
return "Player is not reachable.";
case RETURNVALUE_CREATUREISNOTREACHABLE:
return "Creature is not reachable.";
case RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE:
return "This action is not permitted in a protection zone.";
case RETURNVALUE_YOUMAYNOTATTACKTHISPLAYER:
return "You may not attack this person.";
case RETURNVALUE_YOUMAYNOTATTACKTHISCREATURE:
return "You may not attack this creature.";
case RETURNVALUE_YOUMAYNOTATTACKAPERSONINPROTECTIONZONE:
return "You may not attack a person in a protection zone.";
case RETURNVALUE_YOUMAYNOTATTACKAPERSONWHILEINPROTECTIONZONE:
return "You may not attack a person while you are in a protection zone.";
case RETURNVALUE_YOUCANONLYUSEITONCREATURES:
return "You can only use it on creatures.";
case RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS:
return "Turn secure mode off if you really want to attack unmarked players.";
case RETURNVALUE_YOUNEEDPREMIUMACCOUNT:
return "You need a premium account.";
case RETURNVALUE_YOUNEEDTOLEARNTHISSPELL:
return "You must learn this spell first.";
case RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL:
return "You have the wrong vocation to cast this spell.";
case RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL:
return "You need to equip a weapon to use this spell.";
case RETURNVALUE_PLAYERISPZLOCKEDLEAVEPVPZONE:
return "You can not leave a pvp zone after attacking another player.";
case RETURNVALUE_PLAYERISPZLOCKEDENTERPVPZONE:
return "You can not enter a pvp zone after attacking another player.";
case RETURNVALUE_ACTIONNOTPERMITTEDINANOPVPZONE:
return "This action is not permitted in a non pvp zone.";
case RETURNVALUE_YOUCANNOTLOGOUTHERE:
return "You can not logout here.";
case RETURNVALUE_YOUNEEDAMAGICITEMTOCASTSPELL:
return "You need a magic item to cast this spell.";
case RETURNVALUE_CANNOTCONJUREITEMHERE:
return "You cannot conjure items here.";
case RETURNVALUE_YOUNEEDTOSPLITYOURSPEARS:
return "You need to split your spears first.";
case RETURNVALUE_NAMEISTOOAMBIGUOUS:
return "Player name is ambiguous.";
case RETURNVALUE_CANONLYUSEONESHIELD:
return "You may use only one shield.";
case RETURNVALUE_NOPARTYMEMBERSINRANGE:
return "No party members in range.";
case RETURNVALUE_YOUARENOTTHEOWNER:
return "You are not the owner.";
case RETURNVALUE_NOSUCHRAIDEXISTS:
return "No such raid exists.";
case RETURNVALUE_ANOTHERRAIDISALREADYEXECUTING:
return "Another raid is already executing.";
case RETURNVALUE_TRADEPLAYERFARAWAY:
return "Trade player is too far away.";
case RETURNVALUE_YOUDONTOWNTHISHOUSE:
return "You don't own this house.";
case RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE:
return "Trade player already owns a house.";
case RETURNVALUE_TRADEPLAYERHIGHESTBIDDER:
return "Trade player is currently the highest bidder of an auctioned house.";
case RETURNVALUE_YOUCANNOTTRADETHISHOUSE:
return "You can not trade this house.";
case RETURNVALUE_YOUDONTHAVEREQUIREDPROFESSION:
return "You don't have the required profession.";
case RETURNVALUE_CANNOTMOVEITEMISNOTSTOREITEM:
return "You cannot move this item into your Store inbox as it was not bought in the Store.";
case RETURNVALUE_ITEMCANNOTBEMOVEDTHERE:
return "This item cannot be moved there.";
case RETURNVALUE_YOUCANNOTUSETHISBED:
return "This bed can't be used, but Premium Account players can rent houses and sleep in beds there to regain health and mana.";
default: // RETURNVALUE_NOTPOSSIBLE, etc
return "Sorry, not possible.";
}
}
int64_t OTSYS_TIME()
{
return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
}
SpellGroup_t stringToSpellGroup(const std::string& value)
{
std::string tmpStr = asLowerCaseString(value);
if (tmpStr == "attack" || tmpStr == "1") {
return SPELLGROUP_ATTACK;
} else if (tmpStr == "healing" || tmpStr == "2") {
return SPELLGROUP_HEALING;
} else if (tmpStr == "support" || tmpStr == "3") {
return SPELLGROUP_SUPPORT;
} else if (tmpStr == "special" || tmpStr == "4") {
return SPELLGROUP_SPECIAL;
}
return SPELLGROUP_NONE;
}
| 1 | 19,773 | This isn't aligned though. | otland-forgottenserver | cpp |
@@ -533,7 +533,8 @@ class PAAHead(ATSSHead):
cls_scores. Besides, score voting is used when `` score_voting``
is set to True.
"""
- assert with_nms, 'PAA only supports "with_nms=True" now'
+ assert with_nms, 'PAA only supports "with_nms=True" now and it is ' \
+ 'mean PAAHead does not support test-time augmentation'
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
batch_size = cls_scores[0].shape[0]
| 1 | import numpy as np
import torch
from mmcv.runner import force_fp32
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from mmdet.models import HEADS
from mmdet.models.dense_heads import ATSSHead
EPS = 1e-12
try:
import sklearn.mixture as skm
except ImportError:
skm = None
def levels_to_images(mlvl_tensor):
"""Concat multi-level feature maps by image.
[feature_level0, feature_level1...] -> [feature_image0, feature_image1...]
Convert the shape of each element in mlvl_tensor from (N, C, H, W) to
(N, H*W , C), then split the element to N elements with shape (H*W, C), and
concat elements in same image of all level along first dimension.
Args:
mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from
corresponding level. Each element is of shape (N, C, H, W)
Returns:
list[torch.Tensor]: A list that contains N tensors and each tensor is
of shape (num_elements, C)
"""
batch_size = mlvl_tensor[0].size(0)
batch_list = [[] for _ in range(batch_size)]
channels = mlvl_tensor[0].size(1)
for t in mlvl_tensor:
t = t.permute(0, 2, 3, 1)
t = t.view(batch_size, -1, channels).contiguous()
for img in range(batch_size):
batch_list[img].append(t[img])
return [torch.cat(item, 0) for item in batch_list]
@HEADS.register_module()
class PAAHead(ATSSHead):
"""Head of PAAAssignment: Probabilistic Anchor Assignment with IoU
Prediction for Object Detection.
Code is modified from the `official github repo
<https://github.com/kkhoot/PAA/blob/master/paa_core
/modeling/rpn/paa/loss.py>`_.
More details can be found in the `paper
<https://arxiv.org/abs/2007.08103>`_ .
Args:
topk (int): Select topk samples with smallest loss in
each level.
score_voting (bool): Whether to use score voting in post-process.
covariance_type : String describing the type of covariance parameters
to be used in :class:`sklearn.mixture.GaussianMixture`.
It must be one of:
- 'full': each component has its own general covariance matrix
- 'tied': all components share the same general covariance matrix
- 'diag': each component has its own diagonal covariance matrix
- 'spherical': each component has its own single variance
Default: 'diag'. From 'full' to 'spherical', the gmm fitting
process is faster yet the performance could be influenced. For most
cases, 'diag' should be a good choice.
"""
def __init__(self,
*args,
topk=9,
score_voting=True,
covariance_type='diag',
**kwargs):
# topk used in paa reassign process
self.topk = topk
self.with_score_voting = score_voting
self.covariance_type = covariance_type
super(PAAHead, self).__init__(*args, **kwargs)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
def loss(self,
cls_scores,
bbox_preds,
iou_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
iou_preds (list[Tensor]): iou_preds for each scale
level with shape (N, num_anchors * 1, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when are computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss gmm_assignment.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
)
(labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,
pos_gt_index) = cls_reg_targets
cls_scores = levels_to_images(cls_scores)
cls_scores = [
item.reshape(-1, self.cls_out_channels) for item in cls_scores
]
bbox_preds = levels_to_images(bbox_preds)
bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]
iou_preds = levels_to_images(iou_preds)
iou_preds = [item.reshape(-1, 1) for item in iou_preds]
pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,
cls_scores, bbox_preds, labels,
labels_weight, bboxes_target,
bboxes_weight, pos_inds)
with torch.no_grad():
reassign_labels, reassign_label_weight, \
reassign_bbox_weights, num_pos = multi_apply(
self.paa_reassign,
pos_losses_list,
labels,
labels_weight,
bboxes_weight,
pos_inds,
pos_gt_index,
anchor_list)
num_pos = sum(num_pos)
# convert all tensor list to a flatten tensor
cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))
bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))
iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))
labels = torch.cat(reassign_labels, 0).view(-1)
flatten_anchors = torch.cat(
[torch.cat(item, 0) for item in anchor_list])
labels_weight = torch.cat(reassign_label_weight, 0).view(-1)
bboxes_target = torch.cat(bboxes_target,
0).view(-1, bboxes_target[0].size(-1))
pos_inds_flatten = ((labels >= 0)
&
(labels < self.num_classes)).nonzero().reshape(-1)
losses_cls = self.loss_cls(
cls_scores,
labels,
labels_weight,
avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0
if num_pos:
pos_bbox_pred = self.bbox_coder.decode(
flatten_anchors[pos_inds_flatten],
bbox_preds[pos_inds_flatten])
pos_bbox_target = bboxes_target[pos_inds_flatten]
iou_target = bbox_overlaps(
pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)
losses_iou = self.loss_centerness(
iou_preds[pos_inds_flatten],
iou_target.unsqueeze(-1),
avg_factor=num_pos)
losses_bbox = self.loss_bbox(
pos_bbox_pred,
pos_bbox_target,
iou_target.clamp(min=EPS),
avg_factor=iou_target.sum())
else:
losses_iou = iou_preds.sum() * 0
losses_bbox = bbox_preds.sum() * 0
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight,
bbox_target, bbox_weight, pos_inds):
"""Calculate loss of all potential positive samples obtained from first
match process.
Args:
anchors (list[Tensor]): Anchors of each scale.
cls_score (Tensor): Box scores of single image with shape
(num_anchors, num_classes)
bbox_pred (Tensor): Box energies / deltas of single image
with shape (num_anchors, 4)
label (Tensor): classification target of each anchor with
shape (num_anchors,)
label_weight (Tensor): Classification loss weight of each
anchor with shape (num_anchors).
bbox_target (dict): Regression target of each anchor with
shape (num_anchors, 4).
bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
pos_inds (Tensor): Index of all positive samples got from
first assign process.
Returns:
Tensor: Losses of all positive samples in single image.
"""
if not len(pos_inds):
return cls_score.new([]),
anchors_all_level = torch.cat(anchors, 0)
pos_scores = cls_score[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_label = label[pos_inds]
pos_label_weight = label_weight[pos_inds]
pos_bbox_target = bbox_target[pos_inds]
pos_bbox_weight = bbox_weight[pos_inds]
pos_anchors = anchors_all_level[pos_inds]
pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred)
# to keep loss dimension
loss_cls = self.loss_cls(
pos_scores,
pos_label,
pos_label_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_bbox = self.loss_bbox(
pos_bbox_pred,
pos_bbox_target,
pos_bbox_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_cls = loss_cls.sum(-1)
pos_loss = loss_bbox + loss_cls
return pos_loss,
def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
pos_inds, pos_gt_inds, anchors):
"""Fit loss to GMM distribution and separate positive, ignore, negative
samples again with GMM model.
Args:
pos_losses (Tensor): Losses of all positive samples in
single image.
label (Tensor): classification target of each anchor with
shape (num_anchors,)
label_weight (Tensor): Classification loss weight of each
anchor with shape (num_anchors).
bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
pos_inds (Tensor): Index of all positive samples got from
first assign process.
pos_gt_inds (Tensor): Gt_index of all positive samples got
from first assign process.
anchors (list[Tensor]): Anchors of each scale.
Returns:
tuple: Usually returns a tuple containing learning targets.
- label (Tensor): classification target of each anchor after
paa assign, with shape (num_anchors,)
- label_weight (Tensor): Classification loss weight of each
anchor after paa assign, with shape (num_anchors).
- bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
- num_pos (int): The number of positive samples after paa
assign.
"""
if not len(pos_inds):
return label, label_weight, bbox_weight, 0
label = label.clone()
label_weight = label_weight.clone()
bbox_weight = bbox_weight.clone()
num_gt = pos_gt_inds.max() + 1
num_level = len(anchors)
num_anchors_each_level = [item.size(0) for item in anchors]
num_anchors_each_level.insert(0, 0)
inds_level_interval = np.cumsum(num_anchors_each_level)
pos_level_mask = []
for i in range(num_level):
mask = (pos_inds >= inds_level_interval[i]) & (
pos_inds < inds_level_interval[i + 1])
pos_level_mask.append(mask)
pos_inds_after_paa = [label.new_tensor([])]
ignore_inds_after_paa = [label.new_tensor([])]
for gt_ind in range(num_gt):
pos_inds_gmm = []
pos_loss_gmm = []
gt_mask = pos_gt_inds == gt_ind
for level in range(num_level):
level_mask = pos_level_mask[level]
level_gt_mask = level_mask & gt_mask
value, topk_inds = pos_losses[level_gt_mask].topk(
min(level_gt_mask.sum(), self.topk), largest=False)
pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds])
pos_loss_gmm.append(value)
pos_inds_gmm = torch.cat(pos_inds_gmm)
pos_loss_gmm = torch.cat(pos_loss_gmm)
# fix gmm need at least two sample
if len(pos_inds_gmm) < 2:
continue
device = pos_inds_gmm.device
pos_loss_gmm, sort_inds = pos_loss_gmm.sort()
pos_inds_gmm = pos_inds_gmm[sort_inds]
pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()
min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
weights_init = np.array([0.5, 0.5])
precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full
if self.covariance_type == 'spherical':
precisions_init = precisions_init.reshape(2)
elif self.covariance_type == 'diag':
precisions_init = precisions_init.reshape(2, 1)
elif self.covariance_type == 'tied':
precisions_init = np.array([[1.0]])
if skm is None:
raise ImportError('Please run "pip install sklearn" '
'to install sklearn first.')
gmm = skm.GaussianMixture(
2,
weights_init=weights_init,
means_init=means_init,
precisions_init=precisions_init,
covariance_type=self.covariance_type)
gmm.fit(pos_loss_gmm)
gmm_assignment = gmm.predict(pos_loss_gmm)
scores = gmm.score_samples(pos_loss_gmm)
gmm_assignment = torch.from_numpy(gmm_assignment).to(device)
scores = torch.from_numpy(scores).to(device)
pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(
gmm_assignment, scores, pos_inds_gmm)
pos_inds_after_paa.append(pos_inds_temp)
ignore_inds_after_paa.append(ignore_inds_temp)
pos_inds_after_paa = torch.cat(pos_inds_after_paa)
ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)
reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)
reassign_ids = pos_inds[reassign_mask]
label[reassign_ids] = self.num_classes
label_weight[ignore_inds_after_paa] = 0
bbox_weight[reassign_ids] = 0
num_pos = len(pos_inds_after_paa)
return label, label_weight, bbox_weight, num_pos
def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
"""A general separation scheme for gmm model.
It separates a GMM distribution of candidate samples into three
parts, 0 1 and uncertain areas, and you can implement other
separation schemes by rewriting this function.
Args:
gmm_assignment (Tensor): The prediction of GMM which is of shape
(num_samples,). The 0/1 value indicates the distribution
that each sample comes from.
scores (Tensor): The probability of sample coming from the
fit GMM distribution. The tensor is of shape (num_samples,).
pos_inds_gmm (Tensor): All the indexes of samples which are used
to fit GMM model. The tensor is of shape (num_samples,)
Returns:
tuple[Tensor]: The indices of positive and ignored samples.
- pos_inds_temp (Tensor): Indices of positive samples.
- ignore_inds_temp (Tensor): Indices of ignore samples.
"""
# The implementation is (c) in Fig.3 in origin paper instead of (b).
# You can refer to issues such as
# https://github.com/kkhoot/PAA/issues/8 and
# https://github.com/kkhoot/PAA/issues/9.
fgs = gmm_assignment == 0
pos_inds_temp = fgs.new_tensor([], dtype=torch.long)
ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)
if fgs.nonzero().numel():
_, pos_thr_ind = scores[fgs].topk(1)
pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]
ignore_inds_temp = pos_inds_gmm.new_tensor([])
return pos_inds_temp, ignore_inds_temp
def get_targets(
self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True,
):
"""Get targets for PAA head.
This method is almost the same as `AnchorHead.get_targets()`. We direct
return the results from _get_targets_single instead map it to levels
by images_to_levels function.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: Usually returns a tuple containing learning targets.
- labels (list[Tensor]): Labels of all anchors, each with
shape (num_anchors,).
- label_weights (list[Tensor]): Label weights of all anchor.
each with shape (num_anchors,).
- bbox_targets (list[Tensor]): BBox targets of all anchors.
each with shape (num_anchors, 4).
- bbox_weights (list[Tensor]): BBox weights of all anchors.
each with shape (num_anchors, 4).
- pos_inds (list[Tensor]): Contains all index of positive
sample in all anchor.
- gt_inds (list[Tensor]): Contains all gt_index of positive
sample in all anchor.
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(torch.cat(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
results = multi_apply(
self._get_targets_single,
concat_anchor_list,
concat_valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
(labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,
valid_neg_inds, sampling_result) = results
# Due to valid flag of anchors, we have to calculate the real pos_inds
# in origin anchor set.
pos_inds = []
for i, single_labels in enumerate(labels):
pos_mask = (0 <= single_labels) & (
single_labels < self.num_classes)
pos_inds.append(pos_mask.nonzero().view(-1))
gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
gt_inds)
def _get_targets_single(self,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in a
single image.
This method is same as `AnchorHead._get_targets_single()`.
"""
assert unmap_outputs, 'We must map outputs back to the original' \
'set of anchors in PAAhead'
return super(ATSSHead, self)._get_targets_single(
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True)
def _get_bboxes(self,
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shapes,
scale_factors,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into labeled boxes.
This method is almost same as `ATSSHead._get_bboxes()`.
We use sqrt(iou_preds * cls_scores) in NMS process instead of just
cls_scores. Besides, score voting is used when `` score_voting``
is set to True.
"""
assert with_nms, 'PAA only supports "with_nms=True" now'
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
batch_size = cls_scores[0].shape[0]
mlvl_bboxes = []
mlvl_scores = []
mlvl_iou_preds = []
for cls_score, bbox_pred, iou_preds, anchors in zip(
cls_scores, bbox_preds, iou_preds, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(0, 2, 3, 1).reshape(
batch_size, -1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(batch_size, -1, 4)
iou_preds = iou_preds.permute(0, 2, 3, 1).reshape(batch_size,
-1).sigmoid()
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[1] > nms_pre:
max_scores, _ = (scores * iou_preds[..., None]).sqrt().max(-1)
_, topk_inds = max_scores.topk(nms_pre)
batch_inds = torch.arange(batch_size).view(
-1, 1).expand_as(topk_inds).long()
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[batch_inds, topk_inds, :]
scores = scores[batch_inds, topk_inds, :]
iou_preds = iou_preds[batch_inds, topk_inds]
else:
anchors = anchors.expand_as(bbox_pred)
bboxes = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shapes)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_iou_preds.append(iou_preds)
batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
if rescale:
batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
scale_factors).unsqueeze(1)
batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = batch_mlvl_scores.new_zeros(batch_size,
batch_mlvl_scores.shape[1], 1)
batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
batch_mlvl_iou_preds = torch.cat(mlvl_iou_preds, dim=1)
batch_mlvl_nms_scores = (batch_mlvl_scores *
batch_mlvl_iou_preds[..., None]).sqrt()
det_results = []
for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
batch_mlvl_nms_scores):
det_bbox, det_label = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=None)
if self.with_score_voting and len(det_bbox) > 0:
det_bbox, det_label = self.score_voting(
det_bbox, det_label, mlvl_bboxes, mlvl_scores,
cfg.score_thr)
det_results.append(tuple([det_bbox, det_label]))
return det_results
def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,
mlvl_nms_scores, score_thr):
"""Implementation of score voting method works on each remaining boxes
after NMS procedure.
Args:
det_bboxes (Tensor): Remaining boxes after NMS procedure,
with shape (k, 5), each dimension means
(x1, y1, x2, y2, score).
det_labels (Tensor): The label of remaining boxes, with shape
(k, 1),Labels are 0-based.
mlvl_bboxes (Tensor): All boxes before the NMS procedure,
with shape (num_anchors,4).
mlvl_nms_scores (Tensor): The scores of all boxes which is used
in the NMS procedure, with shape (num_anchors, num_class)
mlvl_iou_preds (Tensor): The predictions of IOU of all boxes
before the NMS procedure, with shape (num_anchors, 1)
score_thr (float): The score threshold of bboxes.
Returns:
tuple: Usually returns a tuple containing voting results.
- det_bboxes_voted (Tensor): Remaining boxes after
score voting procedure, with shape (k, 5), each
dimension means (x1, y1, x2, y2, score).
- det_labels_voted (Tensor): Label of remaining bboxes
after voting, with shape (num_anchors,).
"""
candidate_mask = mlvl_nms_scores > score_thr
candidate_mask_nonzeros = candidate_mask.nonzero()
candidate_inds = candidate_mask_nonzeros[:, 0]
candidate_labels = candidate_mask_nonzeros[:, 1]
candidate_bboxes = mlvl_bboxes[candidate_inds]
candidate_scores = mlvl_nms_scores[candidate_mask]
det_bboxes_voted = []
det_labels_voted = []
for cls in range(self.cls_out_channels):
candidate_cls_mask = candidate_labels == cls
if not candidate_cls_mask.any():
continue
candidate_cls_scores = candidate_scores[candidate_cls_mask]
candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]
det_cls_mask = det_labels == cls
det_cls_bboxes = det_bboxes[det_cls_mask].view(
-1, det_bboxes.size(-1))
det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],
candidate_cls_bboxes)
for det_ind in range(len(det_cls_bboxes)):
single_det_ious = det_candidate_ious[det_ind]
pos_ious_mask = single_det_ious > 0.01
pos_ious = single_det_ious[pos_ious_mask]
pos_bboxes = candidate_cls_bboxes[pos_ious_mask]
pos_scores = candidate_cls_scores[pos_ious_mask]
pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *
pos_scores)[:, None]
voted_box = torch.sum(
pis * pos_bboxes, dim=0) / torch.sum(
pis, dim=0)
voted_score = det_cls_bboxes[det_ind][-1:][None, :]
det_bboxes_voted.append(
torch.cat((voted_box[None, :], voted_score), dim=1))
det_labels_voted.append(cls)
det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)
det_labels_voted = det_labels.new_tensor(det_labels_voted)
return det_bboxes_voted, det_labels_voted
| 1 | 23,412 | it is mean -> it means PAAHead does not support test-time augmentation. | open-mmlab-mmdetection | py |
@@ -119,6 +119,7 @@ from invenio.legacy.bibcatalog.api import BIBCATALOG_SYSTEM
from invenio.legacy.bibdocfile.config import CFG_BIBDOCFILE_ICON_SUBFORMAT_RE, \
CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT
from invenio.utils.hash import md5
+from invenio.legacy.bibdocfile.registry import plugins
import invenio.legacy.template
| 1 | ## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""
This module implements the low-level API for dealing with fulltext files.
- All the files associated to a I{record} (identified by a I{recid}) can be
managed via an instance of the C{BibRecDocs} class.
- A C{BibRecDocs} is a wrapper of the list of I{documents} attached to the
record.
- Each document is represented by an instance of the C{BibDoc} class.
- A document is identified by a C{docid} and name (C{docname}). The docname
must be unique within the record. A document is the set of all the
formats and revisions of a piece of information.
- A document has a type called C{doctype} and can have a restriction.
- Each physical file, i.e. the concretization of a document into a
particular I{version} and I{format} is represented by an instance of the
C{BibDocFile} class.
- The format is infact the extension of the physical file.
- A comment and a description and other information can be associated to a
BibDocFile.
- A C{bibdoc} is a synonim for a document, while a C{bibdocfile} is a
synonim for a physical file.
@group Main classes: BibRecDocs,BibDoc,BibDocFile
@group Other classes: BibDocMoreInfo,Md5Folder,InvenioBibDocFileError
@group Main functions: decompose_file,stream_file,bibdocfile_*,download_url
@group Configuration Variables: CFG_*
"""
__revision__ = "$Id$"
import os
import re
import shutil
import filecmp
import time
import random
import socket
import urllib2
import urllib
import tempfile
from six.moves import cPickle
import base64
import binascii
import cgi
import sys
try:
import magic
if hasattr(magic, "open"):
CFG_HAS_MAGIC = 1
if not hasattr(magic, "MAGIC_MIME_TYPE"):
## Patching RHEL6/CentOS6 version
magic.MAGIC_MIME_TYPE = 16
elif hasattr(magic, "Magic"):
CFG_HAS_MAGIC = 2
except ImportError:
CFG_HAS_MAGIC = 0
from flask import current_app
from datetime import datetime
from mimetypes import MimeTypes
from thread import get_ident
from six import iteritems
from invenio.utils import apache
## Let's set a reasonable timeout for URL request (e.g. FFT)
socket.setdefaulttimeout(40)
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.utils.shell import escape_shell_arg, run_shell_command
from invenio.legacy.dbquery import run_sql, DatabaseError
from invenio.ext.logging import register_exception
from invenio.legacy.bibrecord import record_get_field_instances, \
field_get_subfield_values, field_get_subfield_instances, \
encode_for_xml
from invenio.utils.url import create_url, make_user_agent_string
from invenio.utils.text import nice_size
from invenio.modules.access.engine import acc_authorize_action
from invenio.modules.access.control import acc_is_user_in_role, acc_get_role_id
from invenio.modules.access.firerole import compile_role_definition, acc_firerole_check_user
from invenio.modules.access.local_config import SUPERADMINROLE, CFG_WEBACCESS_WARNING_MSGS
from invenio.config import CFG_SITE_URL, \
CFG_WEBDIR, CFG_BIBDOCFILE_FILEDIR,\
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS, \
CFG_BIBDOCFILE_FILESYSTEM_BIBDOC_GROUP_LIMIT, CFG_SITE_SECURE_URL, \
CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS, \
CFG_TMPDIR, CFG_TMPSHAREDDIR, CFG_PATH_MD5SUM, \
CFG_WEBSUBMIT_STORAGEDIR, \
CFG_BIBDOCFILE_USE_XSENDFILE, \
CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY, \
CFG_SITE_RECORD, CFG_PYLIBDIR, \
CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS, \
CFG_BIBDOCFILE_ENABLE_BIBDOCFSINFO_CACHE, \
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES, \
CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING, \
CFG_BIBCATALOG_SYSTEM
from invenio.legacy.bibcatalog.api import BIBCATALOG_SYSTEM
from invenio.legacy.bibdocfile.config import CFG_BIBDOCFILE_ICON_SUBFORMAT_RE, \
CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT
from invenio.utils.hash import md5
import invenio.legacy.template
def _plugin_bldr(plugin_code):
"""Preparing the plugin dictionary structure."""
if not plugin_code.__name__.split('.')[-1].startswith('bom_'):
return
ret = {}
ret['create_instance'] = getattr(plugin_code, "create_instance", None)
ret['supports'] = getattr(plugin_code, "supports", None)
return ret
_CFG_BIBDOC_PLUGINS = None
def get_plugins():
"""Lazy loading of plugins."""
global _CFG_BIBDOC_PLUGINS
if _CFG_BIBDOC_PLUGINS is None:
_CFG_BIBDOC_PLUGINS = filter(None, map(
_plugin_bldr,
plugins))
return _CFG_BIBDOC_PLUGINS
bibdocfile_templates = invenio.legacy.template.load('bibdocfile')
## The above flag controls whether HTTP range requests are supported or not
## when serving static files via Python. This is disabled by default as
## it currently breaks support for opening PDF files on Windows platforms
## using Acrobat reader brower plugin.
CFG_ENABLE_HTTP_RANGE_REQUESTS = False
#: block size when performing I/O.
CFG_BIBDOCFILE_BLOCK_SIZE = 1024 * 8
#: threshold used do decide when to use Python MD5 of CLI MD5 algorithm.
CFG_BIBDOCFILE_MD5_THRESHOLD = 256 * 1024
#: chunks loaded by the Python MD5 algorithm.
CFG_BIBDOCFILE_MD5_BUFFER = 1024 * 1024
#: whether to normalize e.g. ".JPEG" and ".jpg" into .jpeg.
CFG_BIBDOCFILE_STRONG_FORMAT_NORMALIZATION = False
#: flags that can be associated to files.
CFG_BIBDOCFILE_AVAILABLE_FLAGS = (
'PDF/A',
'STAMPED',
'PDFOPT',
'HIDDEN',
'CONVERTED',
'PERFORM_HIDE_PREVIOUS',
'OCRED'
)
DBG_LOG_QUERIES = False
#: constant used if FFT correct with the obvious meaning.
KEEP_OLD_VALUE = 'KEEP-OLD-VALUE'
_CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS = [(re.compile(_regex), _headers)
for _regex, _headers in CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS]
_mimes = MimeTypes(strict=False)
_mimes.suffix_map.update({'.tbz2' : '.tar.bz2'})
_mimes.encodings_map.update({'.bz2' : 'bzip2'})
if CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES:
for key, value in iteritems(CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES):
_mimes.add_type(key, value)
del key, value
_magic_cookies = {}
if CFG_HAS_MAGIC == 1:
def _get_magic_cookies():
"""
@return: a tuple of magic object.
@rtype: (MAGIC_NONE, MAGIC_COMPRESS, MAGIC_MIME, MAGIC_COMPRESS + MAGIC_MIME)
@note: ... not real magic. Just see: man file(1)
"""
thread_id = get_ident()
if thread_id not in _magic_cookies:
_magic_cookies[thread_id] = {
magic.MAGIC_NONE: magic.open(magic.MAGIC_NONE),
magic.MAGIC_COMPRESS: magic.open(magic.MAGIC_COMPRESS),
magic.MAGIC_MIME: magic.open(magic.MAGIC_MIME),
magic.MAGIC_COMPRESS + magic.MAGIC_MIME: magic.open(magic.MAGIC_COMPRESS + magic.MAGIC_MIME),
magic.MAGIC_MIME_TYPE: magic.open(magic.MAGIC_MIME_TYPE),
}
for key in _magic_cookies[thread_id].keys():
_magic_cookies[thread_id][key].load()
return _magic_cookies[thread_id]
elif CFG_HAS_MAGIC == 2:
def _magic_wrapper(local_path, mime=True, mime_encoding=False):
thread_id = get_ident()
if (thread_id, mime, mime_encoding) not in _magic_cookies:
magic_object = _magic_cookies[thread_id, mime, mime_encoding] = magic.Magic(mime=mime, mime_encoding=mime_encoding)
else:
magic_object = _magic_cookies[thread_id, mime, mime_encoding]
return magic_object.from_file(local_path) # pylint: disable=E1103
def _generate_extensions():
"""
Generate the regular expression to match all the known extensions.
@return: the regular expression.
@rtype: regular expression object
"""
_tmp_extensions = _mimes.encodings_map.keys() + \
_mimes.suffix_map.keys() + \
_mimes.types_map[1].keys() + \
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS
extensions = []
for ext in _tmp_extensions:
if ext.startswith('.'):
extensions.append(ext)
else:
extensions.append('.' + ext)
extensions.sort()
extensions.reverse()
extensions = set([ext.lower() for ext in extensions])
extensions = '\\' + '$|\\'.join(extensions) + '$'
extensions = extensions.replace('+', '\\+')
return re.compile(extensions, re.I)
#: Regular expression to recognized extensions.
_extensions = _generate_extensions()
class InvenioBibDocFileError(Exception):
"""
Exception raised in case of errors related to fulltext files.
"""
pass
class InvenioBibdocfileUnauthorizedURL(InvenioBibDocFileError):
"""
Exception raised in case of errors related to fulltext files.
"""
## NOTE: this is a legacy Exception
pass
def _val_or_null(val, eq_name = None, q_str = None, q_args = None):
"""
Auxiliary function helpful while building WHERE clauses of SQL queries
that should contain field=val or field is val
If optional parameters q_str and q_args are provided, lists are updated
if val == None, a statement of the form "eq_name is Null" is returned
otherwise, otherwise the function returns a parametrised comparison
"eq_name=%s" with val as an argument added to the query args list.
Using parametrised queries diminishes the likelihood of having
SQL injection.
@param val Value to compare with
@type val
@param eq_name The name of the database column
@type eq_name string
@param q_str Query string builder - list of clauses
that should be connected by AND operator
@type q_str list
@param q_args Query arguments list. This list will be applied as
a second argument of run_sql command
@type q_args list
@result string of a single part of WHERE clause
@rtype string
"""
res = ""
if eq_name != None:
res += eq_name
if val == None:
if eq_name != None:
res += " is "
res += "NULL"
if q_str != None:
q_str.append(res)
return res
else:
if eq_name != None:
res += "="
res += "%s"
if q_str != None:
q_str.append(res)
if q_args != None:
q_args.append(str(val))
return res
def _sql_generate_conjunctive_where(to_process):
"""Generating WHERE clause of a SQL statement, consisting of conjunction
of declared terms. Terms are defined by the to_process argument.
the method creates appropriate entries different in the case, value
should be NULL (None in the list) and in the case of not-none arguments.
In the second case, parametrised query is generated decreasing the
chance of an SQL-injection.
@param to_process List of tuples (value, database_column)
@type to_process list"""
q_str = []
q_args = []
for entry in to_process:
q_str.append(_val_or_null(entry[0], eq_name = entry[1], q_args = q_args))
return (" AND ".join(q_str), q_args)
def file_strip_ext(afile, skip_version=False, only_known_extensions=False, allow_subformat=True):
"""
Strip in the best way the extension from a filename.
>>> file_strip_ext("foo.tar.gz")
'foo'
>>> file_strip_ext("foo.buz.gz")
'foo.buz'
>>> file_strip_ext("foo.buz")
'foo'
>>> file_strip_ext("foo.buz", only_known_extensions=True)
'foo.buz'
>>> file_strip_ext("foo.buz;1", skip_version=False,
... only_known_extensions=True)
'foo.buz;1'
>>> file_strip_ext("foo.gif;icon")
'foo'
>>> file_strip_ext("foo.gif:icon", allow_subformat=False)
'foo.gif:icon'
@param afile: the path/name of a file.
@type afile: string
@param skip_version: whether to skip a trailing ";version".
@type skip_version: bool
@param only_known_extensions: whether to strip out only known extensions or
to consider as extension anything that follows a dot.
@type only_known_extensions: bool
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: the name/path without the extension (and version).
@rtype: string
"""
if skip_version or allow_subformat:
afile = afile.split(';')[0]
nextfile = _extensions.sub('', afile)
if nextfile == afile and not only_known_extensions:
nextfile = os.path.splitext(afile)[0]
while nextfile != afile:
afile = nextfile
nextfile = _extensions.sub('', afile)
return nextfile
def normalize_format(docformat, allow_subformat=True):
"""
Normalize the format, e.g. by adding a dot in front.
@param format: the format/extension to be normalized.
@type format: string
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: the normalized format.
@rtype; string
"""
if not docformat:
return ''
if allow_subformat:
subformat = docformat[docformat.rfind(';'):]
docformat = docformat[:docformat.rfind(';')]
else:
subformat = ''
if docformat and docformat[0] != '.':
docformat = '.' + docformat
if CFG_BIBDOCFILE_STRONG_FORMAT_NORMALIZATION:
if docformat not in ('.Z', '.H', '.C', '.CC'):
docformat = docformat.lower()
docformat = {
'.jpg' : '.jpeg',
'.htm' : '.html',
'.tif' : '.tiff'
}.get(docformat, docformat)
return docformat + subformat
def guess_format_from_url(url):
"""
Given a URL tries to guess it's extension.
Different method will be used, including HTTP HEAD query,
downloading the resource and using mime
@param url: the URL for which the extension should be guessed.
@type url: string
@return: the recognized extension or '.bin' if it's impossible to
recognize it.
@rtype: string
"""
def guess_via_magic(local_path):
try:
if CFG_HAS_MAGIC == 1:
magic_cookie = _get_magic_cookies()[magic.MAGIC_MIME_TYPE]
mimetype = magic_cookie.file(local_path)
elif CFG_HAS_MAGIC == 2:
mimetype = _magic_wrapper(local_path, mime=True, mime_encoding=False)
if CFG_HAS_MAGIC:
if mimetype in CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING:
return normalize_format(CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING[mimetype])
else:
return normalize_format(_mimes.guess_extension(mimetype))
except Exception:
pass
## Let's try to guess the extension by considering the URL as a filename
ext = decompose_file(url, skip_version=True, only_known_extensions=True)[2]
if ext.startswith('.'):
return ext
if is_url_a_local_file(url):
## The URL corresponds to a local file, so we can safely consider
## traditional extensions after the dot.
ext = decompose_file(url, skip_version=True, only_known_extensions=False)[2]
if ext.startswith('.'):
return ext
## No extensions? Let's use Magic.
ext = guess_via_magic(url)
if ext:
return ext
else:
## Since the URL is remote, let's try to perform a HEAD request
## and see the corresponding headers
try:
response = open_url(url, head_request=True)
except (InvenioBibdocfileUnauthorizedURL, urllib2.URLError):
return ".bin"
ext = get_format_from_http_response(response)
if ext:
return ext
if CFG_HAS_MAGIC:
## Last solution: let's download the remote resource
## and use the Python magic library to guess the extension
filename = ""
try:
try:
filename = download_url(url, docformat='')
ext = guess_via_magic(filename)
if ext:
return ext
except Exception:
pass
finally:
if os.path.exists(filename):
## Let's free space
os.remove(filename)
return ".bin"
_docname_re = re.compile(r'[^-\w.]*')
def normalize_docname(docname):
"""
Normalize the docname.
At the moment the normalization is just returning the same string.
@param docname: the docname to be normalized.
@type docname: string
@return: the normalized docname.
@rtype: string
"""
#return _docname_re.sub('', docname)
return docname
def normalize_version(version):
"""
Normalize the version.
The version can be either an integer or the keyword 'all'. Any other
value will be transformed into the empty string.
@param version: the version (either a number or 'all').
@type version: integer or string
@return: the normalized version.
@rtype: string
"""
try:
int(version)
except ValueError:
if version.lower().strip() == 'all':
return 'all'
else:
return ''
return str(version)
def compose_file(dirname, extension, subformat=None, version=None, storagename=None):
"""
Construct back a fullpath given the separate components.
@param
@param storagename Name under which the file should be stored in the filesystem
@type storagename string
@return a fullpath to the file
@rtype string
"""
if version:
version = ";%i" % int(version)
else:
version = ""
if subformat:
if not subformat.startswith(";"):
subformat = ";%s" % subformat
else:
subformat = ""
if extension and not extension.startswith("."):
extension = ".%s" % extension
if not storagename:
storagename = "content"
return os.path.join(dirname, storagename + extension + subformat + version)
def compose_format(extension, subformat=None):
"""
Construct the format string
"""
if not extension.startswith("."):
extension = ".%s" % extension
if subformat:
if not subformat.startswith(";"):
subformat = ";%s" % subformat
else:
subformat = ""
return extension + subformat
def decompose_file(afile, skip_version=False, only_known_extensions=False,
allow_subformat=True):
"""
Decompose a file/path into its components dirname, basename and extension.
>>> decompose_file('/tmp/foo.tar.gz')
('/tmp', 'foo', '.tar.gz')
>>> decompose_file('/tmp/foo.tar.gz;1', skip_version=True)
('/tmp', 'foo', '.tar.gz')
>>> decompose_file('http://www.google.com/index.html')
('http://www.google.com', 'index', '.html')
@param afile: the path/name of a file.
@type afile: string
@param skip_version: whether to skip a trailing ";version".
@type skip_version: bool
@param only_known_extensions: whether to strip out only known extensions or
to consider as extension anything that follows a dot.
@type only_known_extensions: bool
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: a tuple with the directory name, the basename and extension.
@rtype: (dirname, basename, extension)
@note: if a URL is provided, the scheme will be part of the dirname.
@see: L{file_strip_ext} for the algorithm used to retrieve the extension.
"""
if skip_version:
version = afile.split(';')[-1]
try:
int(version)
afile = afile[:-len(version)-1]
except ValueError:
pass
basename = os.path.basename(afile)
dirname = afile[:-len(basename)-1]
base = file_strip_ext(
basename,
only_known_extensions=only_known_extensions,
allow_subformat=allow_subformat)
extension = basename[len(base) + 1:]
if extension:
extension = '.' + extension
return (dirname, base, extension)
def decompose_file_with_version(afile):
"""
Decompose a file into dirname, basename, extension and version.
>>> decompose_file_with_version('/tmp/foo.tar.gz;1')
('/tmp', 'foo', '.tar.gz', 1)
@param afile: the path/name of a file.
@type afile: string
@return: a tuple with the directory name, the basename, extension and
version.
@rtype: (dirname, basename, extension, version)
@raise ValueError: in case version does not exist it will.
@note: if a URL is provided, the scheme will be part of the dirname.
"""
version_str = afile.split(';')[-1]
version = int(version_str)
afile = afile[:-len(version_str)-1]
basename = os.path.basename(afile)
dirname = afile[:-len(basename)-1]
base = file_strip_ext(basename)
extension = basename[len(base) + 1:]
if extension:
extension = '.' + extension
return (dirname, base, extension, version)
def get_subformat_from_format(docformat):
"""
@return the subformat if any.
@rtype: string
>>> get_subformat_from_format('foo;bar')
'bar'
>>> get_subformat_from_format('foo')
''
"""
try:
return docformat[docformat.rindex(';') + 1:]
except ValueError:
return ''
def get_superformat_from_format(docformat):
"""
@return the superformat if any.
@rtype: string
>>> get_superformat_from_format('foo;bar')
'foo'
>>> get_superformat_from_format('foo')
'foo'
"""
try:
return docformat[:docformat.rindex(';')]
except ValueError:
return docformat
def propose_next_docname(docname):
"""
Given a I{docname}, suggest a new I{docname} (useful when trying to generate
a unique I{docname}).
>>> propose_next_docname('foo')
'foo_1'
>>> propose_next_docname('foo_1')
'foo_2'
>>> propose_next_docname('foo_10')
'foo_11'
@param docname: the base docname.
@type docname: string
@return: the next possible docname based on the given one.
@rtype: string
"""
if '_' in docname:
split_docname = docname.split('_')
try:
split_docname[-1] = str(int(split_docname[-1]) + 1)
docname = '_'.join(split_docname)
except ValueError:
docname += '_1'
else:
docname += '_1'
return docname
class BibRecDocs(object):
"""
This class represents all the files attached to one record.
@param recid: the record identifier.
@type recid: integer
@param deleted_too: whether to consider deleted documents as normal
documents (useful when trying to recover deleted information).
@type deleted_too: bool
@param human_readable: whether numbers should be printed in human readable
format (e.g. 2048 bytes -> 2Kb)
@ivar id: the record identifier as passed to the constructor.
@type id: integer
@ivar human_readable: the human_readable flag as passed to the constructor.
@type human_readable: bool
@ivar deleted_too: the deleted_too flag as passed to the constructor.
@type deleted_too: bool
@ivar bibdocs: the list of documents attached to the record.
@type bibdocs: list of BibDoc
"""
def __init__(self, recid, deleted_too=False, human_readable=False):
try:
self.id = int(recid)
except ValueError:
raise ValueError("BibRecDocs: recid is %s but must be an integer." % repr(recid))
self.human_readable = human_readable
self.deleted_too = deleted_too
self.attachment_types = {} # dictionary docname->attachment type
self._bibdocs = []
self.dirty = True
@property
def bibdocs(self):
if self.dirty:
self.build_bibdoc_list()
return self._bibdocs
def __repr__(self):
"""
@return: the canonical string representation of the C{BibRecDocs}.
@rtype: string
"""
return 'BibRecDocs(%s%s%s)' % (self.id,
self.deleted_too and ', True' or '',
self.human_readable and ', True' or ''
)
def __str__(self):
"""
@return: an easy to be I{grepped} string representation of the
whole C{BibRecDocs} content.
@rtype: string
"""
out = '%i::::total bibdocs attached=%i\n' % (self.id, len(self.bibdocs))
out += '%i::::total size latest version=%s\n' % (self.id, nice_size(self.get_total_size_latest_version()))
out += '%i::::total size all files=%s\n' % (self.id, nice_size(self.get_total_size()))
for (docname, (bibdoc, dummy)) in self.bibdocs.items():
out += str(docname) + ":" + str(bibdoc)
return out
def empty_p(self):
"""
@return: True when the record has no attached documents.
@rtype: bool
"""
return len(self.bibdocs) == 0
def deleted_p(self):
"""
@return: True if the correxsponding record has been deleted.
@rtype: bool
"""
from invenio.legacy.search_engine import record_exists
return record_exists(self.id) == -1
def get_xml_8564(self):
"""
Return a snippet of I{MARCXML} representing the I{8564} fields
corresponding to the current state.
@return: the MARCXML representation.
@rtype: string
"""
from invenio.legacy.search_engine import get_record
out = ''
record = get_record(self.id)
fields = record_get_field_instances(record, '856', '4', ' ')
for field in fields:
urls = field_get_subfield_values(field, 'u')
if urls and not bibdocfile_url_p(urls[0]):
out += '\t<datafield tag="856" ind1="4" ind2=" ">\n'
for subfield, value in field_get_subfield_instances(field):
out += '\t\t<subfield code="%s">%s</subfield>\n' % (subfield, encode_for_xml(value))
out += '\t</datafield>\n'
for afile in self.list_latest_files(list_hidden=False):
out += '\t<datafield tag="856" ind1="4" ind2=" ">\n'
url = afile.get_url()
description = afile.get_description()
comment = afile.get_comment()
if url:
out += '\t\t<subfield code="u">%s</subfield>\n' % encode_for_xml(url)
if description:
out += '\t\t<subfield code="y">%s</subfield>\n' % encode_for_xml(description)
if comment:
out += '\t\t<subfield code="z">%s</subfield>\n' % encode_for_xml(comment)
out += '\t</datafield>\n'
return out
def get_total_size_latest_version(self):
"""
Returns the total size used on disk by all the files belonging
to this record and corresponding to the latest version.
@return: the total size.
@rtype: integer
"""
size = 0
for (bibdoc, _) in self.bibdocs.values():
size += bibdoc.get_total_size_latest_version()
return size
def get_total_size(self):
"""
Return the total size used on disk of all the files belonging
to this record of any version (not only the last as in
L{get_total_size_latest_version}).
@return: the total size.
@rtype: integer
"""
size = 0
for (bibdoc, _) in self.bibdocs.values():
size += bibdoc.get_total_size()
return size
def build_bibdoc_list(self):
"""
This method must be called everytime a I{bibdoc} is added, removed or
modified.
"""
self._bibdocs = {}
if self.deleted_too:
res = run_sql("""SELECT brbd.id_bibdoc, brbd.docname, brbd.type FROM bibrec_bibdoc as brbd JOIN
bibdoc as bd ON bd.id=brbd.id_bibdoc WHERE brbd.id_bibrec=%s
ORDER BY brbd.docname ASC""", (self.id,))
else:
res = run_sql("""SELECT brbd.id_bibdoc, brbd.docname, brbd.type FROM bibrec_bibdoc as brbd JOIN
bibdoc as bd ON bd.id=brbd.id_bibdoc WHERE brbd.id_bibrec=%s AND
bd.status<>'DELETED' ORDER BY brbd.docname ASC""", (self.id,))
for row in res:
cur_doc = BibDoc.create_instance(docid=row[0], recid=self.id,
human_readable=self.human_readable)
self._bibdocs[row[1]] = (cur_doc, row[2])
self.dirty = False
def list_bibdocs_by_names(self, doctype=None):
"""
Returns the dictionary of all bibdocs object belonging to a recid.
Keys in the dictionary are names of documetns and values are BibDoc objects.
If C{doctype} is set, it returns just the bibdocs of that doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the dictionary of bibdocs.
@rtype: dictionary of Dcname -> BibDoc
"""
if not doctype:
return dict((k, v) for (k, (v, _)) in iteritems(self.bibdocs))
res = {}
for (docname, (doc, attachmenttype)) in iteritems(self.bibdocs):
if attachmenttype == doctype:
res[docname] = doc
return res
def list_bibdocs(self, doctype=None, rel_type=None):
"""
Returns the list all bibdocs object belonging to a recid.
If C{doctype} is set, it returns just the bibdocs of that doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the list of bibdocs.
@rtype: list of BibDoc
"""
return [bibdoc for (bibdoc, rtype) in self.bibdocs.values()
if (not doctype or doctype == bibdoc.doctype) and
(rel_type is None or rel_type == rtype)]
def get_bibdoc_names(self, doctype=None):
"""
Returns all the names of the documents associated with the bibrec.
If C{doctype} is set, restrict the result to all the matching doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the list of document names.
@rtype: list of string
"""
return [docname for (docname, dummy) in self.list_bibdocs_by_names(doctype).items()]
def check_file_exists(self, path, f_format):
"""
Check if a file with the same content of the file pointed in C{path}
is already attached to this record.
@param path: the file to be checked against.
@type path: string
@return: True if a file with the requested content is already attached
to the record.
@rtype: bool
"""
size = os.path.getsize(path)
# Let's consider all the latest files
files = self.list_latest_files()
# Let's consider all the latest files with same size
potential = [afile for afile in files if afile.get_size() == size and afile.format == f_format]
if potential:
checksum = calculate_md5(path)
# Let's consider all the latest files with the same size and the
# same checksum
potential = [afile for afile in potential if afile.get_checksum() == checksum]
if potential:
potential = [afile for afile in potential if
filecmp.cmp(afile.get_full_path(), path)]
if potential:
return True
else:
# Gosh! How unlucky, same size, same checksum but not same
# content!
pass
return False
def propose_unique_docname(self, docname):
"""
Given C{docname}, return a new docname that is not already attached to
the record.
@param docname: the reference docname.
@type docname: string
@return: a docname not already attached.
@rtype: string
"""
docname = normalize_docname(docname)
goodname = docname
i = 1
while goodname in self.get_bibdoc_names():
i += 1
goodname = "%s_%s" % (docname, i)
return goodname
def merge_bibdocs(self, docname1, docname2):
"""
This method merge C{docname2} into C{docname1}.
1. Given all the formats of the latest version of the files
attached to C{docname2}, these files are added as new formats
into C{docname1}.
2. C{docname2} is marked as deleted.
@raise InvenioBibDocFileError: if at least one format in C{docname2}
already exists in C{docname1}. (In this case the two bibdocs are
preserved)
@note: comments and descriptions are also copied.
@note: if C{docname2} has a I{restriction}(i.e. if the I{status} is
set) and C{docname1} doesn't, the restriction is imported.
"""
bibdoc1 = self.get_bibdoc(docname1)
bibdoc2 = self.get_bibdoc(docname2)
## Check for possibility
for bibdocfile in bibdoc2.list_latest_files():
docformat = bibdocfile.get_format()
if bibdoc1.format_already_exists_p(docformat):
raise InvenioBibDocFileError('Format %s already exists in bibdoc %s of record %s. It\'s impossible to merge bibdoc %s into it.' % (docformat, docname1, self.id, docname2))
## Importing restriction if needed.
restriction1 = bibdoc1.get_status()
restriction2 = bibdoc2.get_status()
if restriction2 and not restriction1:
bibdoc1.set_status(restriction2)
## Importing formats
for bibdocfile in bibdoc2.list_latest_files():
docformat = bibdocfile.get_format()
comment = bibdocfile.get_comment()
description = bibdocfile.get_description()
bibdoc1.add_file_new_format(bibdocfile.get_full_path(),
description=description,
comment=comment, docformat=docformat)
## Finally deleting old bibdoc2
bibdoc2.delete()
self.dirty = True
def get_docid(self, docname):
"""
@param docname: the document name.
@type docname: string
@return: the identifier corresponding to the given C{docname}.
@rtype: integer
@raise InvenioBibDocFileError: if the C{docname} does not
corresponds to a document attached to this record.
"""
if docname in self.bibdocs:
return self.bibdocs[docname][0].id
raise InvenioBibDocFileError, "Recid '%s' is not connected with a " \
"docname '%s'" % (self.id, docname)
def get_docname(self, docid):
"""
@param docid: the document identifier.
@type docid: integer
@return: the name of the document corresponding to the given document
identifier.
@rtype: string
@raise InvenioBibDocFileError: if the C{docid} does not
corresponds to a document attached to this record.
"""
for (docname, (bibdoc, _)) in self.bibdocs.items():
if bibdoc.id == docid:
return docname
raise InvenioBibDocFileError, "Recid '%s' is not connected with a " \
"docid '%s'" % (self.id, docid)
def change_name(self, newname, oldname=None, docid=None):
"""
Renames document of a given name.
@param newname: the new name.
@type newname: string
@raise InvenioBibDocFileError: if the new name corresponds to
a document already attached to the record owning this document.
"""
if not oldname and not docid:
raise StandardError("Trying to rename unspecified document")
if not oldname:
oldname = self.get_docname(docid)
if not docid:
docid = self.get_docid(oldname)
doc, atttype = self.bibdocs[oldname]
newname = normalize_docname(newname)
res = run_sql("SELECT id_bibdoc FROM bibrec_bibdoc WHERE id_bibrec=%s AND docname=%s", (self.id, newname))
if res:
raise InvenioBibDocFileError, "A bibdoc called %s already exists for recid %s" % (newname, self.id)
doc.change_name(self.id, newname)
# updating the record structure
del self._bibdocs[oldname]
self._bibdocs[newname] = (doc, atttype)
def has_docname_p(self, docname):
"""
@param docname: the document name,
@type docname: string
@return: True if a document with the given name is attached to this
record.
@rtype: bool
"""
return docname in self.bibdocs.keys()
def get_bibdoc(self, docname):
"""
@return: the bibdoc with a particular docname associated with
this recid"""
if docname in self.bibdocs:
return self.bibdocs[docname][0]
raise InvenioBibDocFileError, "Recid '%s' is not connected with " \
" docname '%s'" % (self.id, docname)
def delete_bibdoc(self, docname):
"""
Deletes the document with the specified I{docname}.
@param docname: the document name.
@type docname: string
"""
if docname in self.bibdocs:
self.bibdocs[docname][0].delete()
self.dirty = True
def add_bibdoc(self, doctype="Main", docname='file', never_fail=False):
"""
Add a new empty document object (a I{bibdoc}) to the list of
documents of this record.
@param doctype: the document type.
@type doctype: string
@param docname: the document name.
@type docname: string
@param never_fail: if True, this procedure will not fail, even if
a document with the given name is already attached to this
record. In this case a new name will be generated (see
L{propose_unique_docname}).
@type never_fail: bool
@return: the newly created document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case of any error.
"""
try:
docname = normalize_docname(docname)
if never_fail:
docname = self.propose_unique_docname(docname)
if docname in self.get_bibdoc_names():
raise InvenioBibDocFileError, \
"%s has already a bibdoc with docname %s" % (self.id, docname)
else:
bibdoc = BibDoc.create_instance(recid=self.id, doctype=doctype,
docname=docname,
human_readable=self.human_readable)
self.dirty = True
return bibdoc
except Exception as e:
register_exception()
raise InvenioBibDocFileError(str(e))
def add_new_file(self, fullpath, doctype="Main", docname=None,
never_fail=False, description=None, comment=None,
docformat=None, flags=None, modification_date=None):
"""
Directly add a new file to this record.
Adds a new file with the following policy:
- if the C{docname} is not set it is retrieved from the name of the
file.
- If a bibdoc with the given docname doesn't already exist, it is
created and the file is added to it.
- It it exist but it doesn't contain the format that is being
added, the new format is added.
- If the format already exists then if C{never_fail} is True a new
bibdoc is created with a similar name but with a progressive
number as a suffix and the file is added to it (see
L{propose_unique_docname}).
@param fullpath: the filesystme path of the document to be added.
@type fullpath: string
@param doctype: the type of the document.
@type doctype: string
@param docname: the document name.
@type docname: string
@param never_fail: if True, this procedure will not fail, even if
a document with the given name is already attached to this
record. In this case a new name will be generated (see
L{propose_unique_docname}).
@type never_fail: bool
@param description: an optional description of the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case of error.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if docformat is None:
docformat = decompose_file(fullpath)[2]
docname = normalize_docname(docname)
try:
bibdoc = self.get_bibdoc(docname)
except InvenioBibDocFileError:
# bibdoc doesn't already exists!
bibdoc = self.add_bibdoc(doctype, docname, False)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
else:
try:
bibdoc.add_file_new_format(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
except InvenioBibDocFileError as dummy:
# Format already exist!
if never_fail:
bibdoc = self.add_bibdoc(doctype, docname, True)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
else:
raise
return bibdoc
def add_new_version(self, fullpath, docname=None, description=None, comment=None, docformat=None, flags=None):
"""
Adds a new file to an already existent document object as a new
version.
@param fullpath: the filesystem path of the file to be added.
@type fullpath: string
@param docname: the document name. If not specified it will be
extracted from C{fullpath} (see L{decompose_file}).
@type docname: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case of error.
@note: previous files associated with the same document will be
considered obsolete.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if docformat is None:
docformat = decompose_file(fullpath)[2]
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
bibdoc = self.get_bibdoc(docname=docname)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, docformat=docformat, flags=flags)
return bibdoc
def add_new_format(self, fullpath, docname=None, description=None, comment=None, docformat=None, flags=None, modification_date=None):
"""
Adds a new file to an already existent document object as a new
format.
@param fullpath: the filesystem path of the file to be added.
@type fullpath: string
@param docname: the document name. If not specified it will be
extracted from C{fullpath} (see L{decompose_file}).
@type docname: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case the same format already
exists.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if docformat is None:
docformat = decompose_file(fullpath)[2]
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
bibdoc = self.get_bibdoc(docname=docname)
bibdoc.add_file_new_format(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
return bibdoc
def list_latest_files(self, doctype=None, list_hidden=True):
"""
Returns a list of the latest files.
@param doctype: if set, only document of the given type will be listed.
@type doctype: string
@param list_hidden: if True, will list also files with the C{HIDDEN}
flag being set.
@type list_hidden: bool
@return: the list of latest files.
@rtype: list of BibDocFile
"""
docfiles = []
for bibdoc in self.list_bibdocs(doctype):
docfiles += bibdoc.list_latest_files(list_hidden=list_hidden)
return docfiles
def fix(self, docname):
"""
Algorithm that transform a broken/old bibdoc into a coherent one.
Think of it as being the fsck of BibDocs.
- All the files in the bibdoc directory will be renamed according
to the document name. Proper .recid, .type, .md5 files will be
created/updated.
- In case of more than one file with the same format version a new
bibdoc will be created in order to put does files.
@param docname: the document name that need to be fixed.
@type docname: string
@return: the list of newly created bibdocs if any.
@rtype: list of BibDoc
@raise InvenioBibDocFileError: in case of issues that can not be
fixed automatically.
"""
bibdoc = self.get_bibdoc(docname)
versions = {}
res = []
new_bibdocs = [] # List of files with the same version/format of
# existing file which need new bibdoc.
counter = 0
zero_version_bug = False
if os.path.exists(bibdoc.basedir):
from invenio.config import CFG_CERN_SITE, CFG_INSPIRE_SITE, CFG_BIBDOCFILE_AFS_VOLUME_PATTERN, CFG_BIBDOCFILE_AFS_VOLUME_QUOTA
if os.path.realpath(bibdoc.basedir).startswith('/afs') and (CFG_CERN_SITE or CFG_INSPIRE_SITE):
## We are on AFS at CERN! Let's allocate directories the CERN/AFS way. E.g.
## $ afs_admin create -q 1000000 /afs/cern.ch/project/cds/files/g40 p.cds.g40
## NOTE: This might be extended to use low-level OpenAFS CLI tools
## so that this technique could be extended to other AFS users outside CERN.
mount_point = os.path.dirname(os.path.realpath(bibdoc.basedir))
if not os.path.exists(mount_point):
volume = CFG_BIBDOCFILE_AFS_VOLUME_PATTERN % os.path.basename(mount_point)
quota = str(CFG_BIBDOCFILE_AFS_VOLUME_QUOTA)
exit_code, stdout, stderr = run_shell_command("afs_admin create -q %s %s %s", (quota, mount_point, volume))
if exit_code or stderr:
raise IOError("Error in creating AFS mount point %s with quota %s and volume %s: exit_code=%s. Captured stdout:\n: %s\nCaptured stderr:\n: %s" % (mount_point, quota, volume, exit_code, stdout, stderr))
for filename in os.listdir(bibdoc.basedir):
if filename[0] != '.' and ';' in filename:
name, version = filename.rsplit(';', 1)
try:
version = int(version)
except ValueError:
# Strange name
register_exception()
raise InvenioBibDocFileError, "A file called %s exists under %s. This is not a valid name. After the ';' there must be an integer representing the file version. Please, manually fix this file either by renaming or by deleting it." % (filename, bibdoc.basedir)
if version == 0:
zero_version_bug = True
docformat = name[len(file_strip_ext(name)):]
docformat = normalize_format(docformat)
if version not in versions:
versions[version] = {}
new_name = 'FIXING-%s-%s' % (str(counter), name)
try:
shutil.move('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, new_name))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in renaming '%s' to '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, new_name), e)
if docformat in versions[version]:
new_bibdocs.append((new_name, version))
else:
versions[version][docformat] = new_name
counter += 1
elif filename[0] != '.':
# Strange name
register_exception()
raise InvenioBibDocFileError, "A file called %s exists under %s. This is not a valid name. There should be a ';' followed by an integer representing the file version. Please, manually fix this file either by renaming or by deleting it." % (filename, bibdoc.basedir)
else:
# we create the corresponding storage directory
old_umask = os.umask(0o022)
os.makedirs(bibdoc.basedir)
# and save the father record id if it exists
try:
if self.id != "":
recid_fd = open("%s/.recid" % bibdoc.basedir, "w")
recid_fd.write(str(self.id))
recid_fd.close()
if bibdoc.doctype != "":
type_fd = open("%s/.type" % bibdoc.basedir, "w")
type_fd.write(str(bibdoc.doctype))
type_fd.close()
except Exception as e:
register_exception()
raise InvenioBibDocFileError, e
os.umask(old_umask)
if not versions:
bibdoc.delete()
self.dirty = True
else:
for version, formats in iteritems(versions):
if zero_version_bug:
version += 1
for docformat, filename in iteritems(formats):
destination = '%s%s;%i' % (docname, docformat, version)
try:
shutil.move('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, destination))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in renaming '%s' to '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, destination), e)
try:
recid_fd = open("%s/.recid" % bibdoc.basedir, "w")
recid_fd.write(str(self.id))
recid_fd.close()
type_fd = open("%s/.type" % bibdoc.basedir, "w")
type_fd.write(str(bibdoc.doctype))
type_fd.close()
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in creating .recid and .type file for '%s' folder: '%s'" % (bibdoc.basedir, e)
res = []
for (filename, version) in new_bibdocs:
if zero_version_bug:
version += 1
new_bibdoc = self.add_bibdoc(doctype=bibdoc.doctype, docname=docname, never_fail=True)
new_bibdoc.add_file_new_format('%s/%s' % (bibdoc.basedir, filename), version)
res.append(new_bibdoc)
try:
os.remove('%s/%s' % (bibdoc.basedir, filename))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in removing '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), e)
Md5Folder(bibdoc.basedir).update(only_new=False)
bibdoc._build_file_list()
for (bibdoc, dummyatttype) in self.bibdocs.values():
if not run_sql('SELECT data_value FROM bibdocmoreinfo WHERE id_bibdoc=%s', (bibdoc.id,)):
## Import from MARC only if the bibdoc has never had
## its more_info initialized.
try:
bibdoc.import_descriptions_and_comments_from_marc()
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in importing description and comment from %s for record %s: %s" % (repr(bibdoc), self.id, e)
return res
def check_format(self, docname):
"""
Check for any format related issue.
In case L{CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS} is
altered or Python version changes, it might happen that a docname
contains files which are no more docname + .format ; version, simply
because the .format is now recognized (and it was not before, so
it was contained into the docname).
This algorithm verify if it is necessary to fix (seel L{fix_format}).
@param docname: the document name whose formats should be verified.
@type docname: string
@return: True if format is correct. False if a fix is needed.
@rtype: bool
@raise InvenioBibDocFileError: in case of any error.
"""
bibdoc = self.get_bibdoc(docname)
correct_docname = decompose_file(docname + '.pdf')[1]
if docname != correct_docname:
return False
for filename in os.listdir(bibdoc.basedir):
if not filename.startswith('.'):
try:
dummy, dummy, docformat, version = decompose_file_with_version(filename)
except Exception:
raise InvenioBibDocFileError('Incorrect filename "%s" for docname %s for recid %i' % (filename, docname, self.id))
if '%s%s;%i' % (correct_docname, docformat, version) != filename:
return False
return True
def check_duplicate_docnames(self):
"""
Check wethever the record is connected with at least tho documents
with the same name.
@return: True if everything is fine.
@rtype: bool
"""
docnames = set()
for docname in self.get_bibdoc_names():
if docname in docnames:
return False
else:
docnames.add(docname)
return True
def uniformize_bibdoc(self, docname):
"""
This algorithm correct wrong file name belonging to a bibdoc.
@param docname: the document name whose formats should be verified.
@type docname: string
"""
bibdoc = self.get_bibdoc(docname)
for filename in os.listdir(bibdoc.basedir):
if not filename.startswith('.'):
try:
dummy, dummy, docformat, version = decompose_file_with_version(filename)
except ValueError:
register_exception(alert_admin=True, prefix= "Strange file '%s' is stored in %s" % (filename, bibdoc.basedir))
else:
os.rename(os.path.join(bibdoc.basedir, filename), os.path.join(bibdoc.basedir, '%s%s;%i' % (docname, docformat, version)))
Md5Folder(bibdoc.basedir).update()
bibdoc.touch('rename')
def fix_format(self, docname, skip_check=False):
"""
Fixes format related inconsistencies.
@param docname: the document name whose formats should be verified.
@type docname: string
@param skip_check: if True assume L{check_format} has already been
called and the need for fix has already been found.
If False, will implicitly call L{check_format} and skip fixing
if no error is found.
@type skip_check: bool
@return: in case merging two bibdocs is needed but it's not possible.
@rtype: bool
"""
if not skip_check:
if self.check_format(docname):
return True
bibdoc = self.get_bibdoc(docname)
correct_docname = decompose_file(docname + '.pdf')[1]
need_merge = False
if correct_docname != docname:
need_merge = self.has_docname_p(correct_docname)
if need_merge:
proposed_docname = self.propose_unique_docname(correct_docname)
run_sql('UPDATE bibdoc SET docname=%s WHERE id=%s', (proposed_docname, bibdoc.id))
self.dirty = True
self.uniformize_bibdoc(proposed_docname)
try:
self.merge_bibdocs(docname, proposed_docname)
except InvenioBibDocFileError:
return False
else:
run_sql('UPDATE bibdoc SET docname=%s WHERE id=%s', (correct_docname, bibdoc.id))
self.dirty = True
self.uniformize_bibdoc(correct_docname)
else:
self.uniformize_bibdoc(docname)
return True
def fix_duplicate_docnames(self, skip_check=False):
"""
Algotirthm to fix duplicate docnames.
If a record is connected with at least two bibdoc having the same
docname, the algorithm will try to merge them.
@param skip_check: if True assume L{check_duplicate_docnames} has
already been called and the need for fix has already been found.
If False, will implicitly call L{check_duplicate_docnames} and skip
fixing if no error is found.
@type skip_check: bool
"""
if not skip_check:
if self.check_duplicate_docnames():
return
docnames = set()
for bibdoc in self.list_bibdocs():
docname = self.get_docname(bibdoc.id)
if docname in docnames:
new_docname = self.propose_unique_docname(self.get_docname(bibdoc.id))
self.change_name(docid=bibdoc.id, newname=new_docname)
self.merge_bibdocs(docname, new_docname)
docnames.add(docname)
def get_text(self, extract_text_if_necessary=True):
"""
@return: concatenated texts of all bibdocs separated by " ": string
"""
texts = []
for bibdoc in self.list_bibdocs():
if hasattr(bibdoc, 'has_text'):
if extract_text_if_necessary and not bibdoc.has_text(require_up_to_date=True):
perform_ocr = hasattr(bibdoc, 'is_ocr_required') and bibdoc.is_ocr_required()
from invenio.legacy.bibsched.bibtask import write_message
write_message("... will extract words from %s %s" % (bibdoc, perform_ocr and 'with OCR' or ''), verbose=2)
bibdoc.extract_text(perform_ocr=perform_ocr)
texts.append(bibdoc.get_text())
return " ".join(texts)
class BibDoc(object):
"""
This class represents one document (i.e. a set of files with different
formats and with versioning information that consitutes a piece of
information.
To instanciate a new document, the recid and the docname are mandatory.
To instanciate an already existing document, either the recid and docname
or the docid alone are sufficient to retrieve it.
@param docid: the document identifier.
@type docid: integer
@param recid: the record identifier of the record to which this document
belongs to. If the C{docid} is specified the C{recid} is automatically
retrieven from the database.
@type recid: integer
@param docname: the document name.
@type docname: string
@param doctype: the document type (used when instanciating a new document).
@type doctype: string
@param human_readable: whether sizes should be represented in a human
readable format.
@type human_readable: bool
@raise InvenioBibDocFileError: in case of error.
"""
@staticmethod
def create_new_document(doc_type="Main", rec_links=None):
if rec_links is None:
rec_links = []
status = ''
doc_id = run_sql("INSERT INTO bibdoc (status, creation_date, modification_date, doctype) "
"values(%s,NOW(),NOW(), %s)", (status, doc_type))
if not doc_id:
raise InvenioBibDocFileError, "New docid cannot be created"
# creating the representation on disk ... preparing the directory
try:
BibDoc.prepare_basedir(doc_id)
except Exception as e:
run_sql('DELETE FROM bibdoc WHERE id=%s', (doc_id, ))
register_exception(alert_admin=True)
raise InvenioBibDocFileError, e
# the object has been created: linking to bibliographical records
doc = BibDoc(doc_id)
for link in rec_links:
if "rec_id" in link and link["rec_id"]:
rec_id = link["rec_id"]
doc_name = normalize_docname(link["doc_name"])
a_type = link["a_type"]
doc.attach_to_record(rec_id, str(a_type), str(doc_name))
return doc_id
def __init__(self, docid, human_readable=False, initial_data=None):
"""Constructor of a bibdoc. At least the docid or the recid/docname
pair is needed.
specifying recid, docname and doctype without specifying docid results in
attaching newly created document to a record
"""
# docid is known, the document already exists
res2 = run_sql("SELECT id_bibrec, type, docname FROM bibrec_bibdoc WHERE id_bibdoc=%s", (docid,))
self.bibrec_types = [(r[0], r[1], r[2]) for r in res2 ] # just in case the result was behaving like tuples but was something else
if not res2:
# fake attachment
self.bibrec_types = [(0, None, "fake_name_for_unattached_document")]
if initial_data is None:
initial_data = BibDoc._retrieve_data(docid)
self._docfiles = []
self.__md5s = None
self._related_files = {}
self.human_readable = human_readable
self.cd = initial_data["cd"] # creation date
self.md = initial_data["md"] # modification date
self.td = initial_data["td"] # text extraction date # should be moved from here !!!!
self.bibrec_links = initial_data["bibrec_links"]
self.id = initial_data["id"]
self.status = initial_data["status"]
self.basedir = initial_data["basedir"]
self.doctype = initial_data["doctype"]
self.storagename = initial_data["storagename"] # the old docname -> now used as a storage name for old records
self.more_info = BibDocMoreInfo(self.id)
self.dirty = True
self.dirty_related_files = True
self.last_action = 'init'
def __del__(self):
if self.dirty and self.last_action != 'init':
## The object is dirty and we did something more than initializing it
self._build_file_list()
@property
def docfiles(self):
if self.dirty:
self._build_file_list(self.last_action)
self.dirty = False
return self._docfiles
@property
def related_files(self):
if self.dirty_related_files:
self._build_related_file_list()
self.dirty_related_files = False
return self._related_files
@staticmethod
def prepare_basedir(doc_id):
"""Prepares the directory serving as root of a BibDoc"""
basedir = _make_base_dir(doc_id)
# we create the corresponding storage directory
if not os.path.exists(basedir):
from invenio.config import CFG_CERN_SITE, CFG_INSPIRE_SITE, CFG_BIBDOCFILE_AFS_VOLUME_PATTERN, CFG_BIBDOCFILE_AFS_VOLUME_QUOTA
if os.path.realpath(basedir).startswith('/afs') and (CFG_CERN_SITE or CFG_INSPIRE_SITE):
## We are on AFS at CERN! Let's allocate directories the CERN/AFS way. E.g.
## $ afs_admin create -q 1000000 /afs/cern.ch/project/cds/files/g40 p.cds.g40
## NOTE: This might be extended to use low-level OpenAFS CLI tools
## so that this technique could be extended to other AFS users outside CERN.
mount_point = os.path.dirname(os.path.realpath(basedir))
if not os.path.exists(mount_point):
volume = CFG_BIBDOCFILE_AFS_VOLUME_PATTERN % os.path.basename(mount_point)
quota = str(CFG_BIBDOCFILE_AFS_VOLUME_QUOTA)
exit_code, stdout, stderr = run_shell_command("afs_admin create -q %s %s %s", (quota, mount_point, volume))
if exit_code or stderr:
raise IOError("Error in creating AFS mount point %s with quota %s and volume %s: exit_code=%s. Captured stdout:\n: %s\nCaptured stderr:\n: %s" % (mount_point, quota, volume, exit_code, stdout, stderr))
old_umask = os.umask(022)
os.makedirs(basedir)
os.umask(old_umask)
def _update_additional_info_files(self):
"""Update the hidden file in the document directory ... the file contains all links to records"""
try:
reclinks_fd = open("%s/.reclinks" % (self.basedir, ), "w")
reclinks_fd.write("RECID DOCNAME TYPE\n")
for link in self.bibrec_links:
reclinks_fd.write("%(recid)s %(docname)s %(doctype)s\n" % link)
reclinks_fd.close()
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError, e
@staticmethod
def _retrieve_data(docid = None):
"""
Filling information about a document from the database entry
"""
container = {}
container["bibrec_links"] = []
container["id"] = docid
container["basedir"] = _make_base_dir(container["id"])
# retrieving links betwen records and documents
res = run_sql("SELECT id_bibrec, type, docname FROM bibrec_bibdoc WHERE id_bibdoc=%s", (str(docid),), 1)
if res:
for r in res:
container["bibrec_links"].append({"recid": r[0], "doctype": r[1], "docname": r[2]})
# gather the other information
res = run_sql("SELECT status, creation_date, modification_date, text_extraction_date, doctype, docname FROM bibdoc WHERE id=%s LIMIT 1", (docid,), 1)
if res:
container["status"] = res[0][0]
container["cd"] = res[0][1]
container["md"] = res[0][2]
container["td"] = res[0][3]
container["doctype"] = res[0][4]
container["storagename"] = res[0][5]
else:
# this bibdoc doesn't exist
raise InvenioBibDocFileError, "The docid %s does not exist." % docid
# retreiving all available formats
fprefix = container["storagename"] or "content"
try:
if CFG_BIBDOCFILE_ENABLE_BIBDOCFSINFO_CACHE:
## We take all extensions from the existing formats in the DB.
container["extensions"] = set([ext[0] for ext in run_sql("SELECT format FROM bibdocfsinfo WHERE id_bibdoc=%s", (docid, ))])
else:
## We take all the extensions by listing the directory content, stripping name
## and version.
container["extensions"] = set([fname[len(fprefix):].rsplit(";", 1)[0] for fname in filter(lambda x: x.startswith(fprefix), os.listdir(container["basedir"]))])
except OSError:
container["extensions"] = []
current_app.logger.warning("Could not retrieve available formats",
exc_info=True)
return container
@staticmethod
def create_instance(docid=None, recid=None, docname=None,
doctype='Fulltext', a_type = '', human_readable=False):
"""
Parameters of an attachement to the record:
a_type, recid, docname
@param a_type Type of the attachment to the record (by default Main)
@type a_type String
@param doctype Type of the document itself (by default Fulltext)
@type doctype String
"""
# first try to retrieve existing record based on obtained data
data = None
extensions = []
if docid is not None:
data = BibDoc._retrieve_data(docid)
doctype = data["doctype"]
extensions = data["extensions"]
# Loading an appropriate plugin (by default a generic BibDoc)
used_plugin = None
for plugin in get_plugins():
if plugin['supports'](doctype, extensions):
used_plugin = plugin
if not a_type:
a_type = doctype or 'Main'
if not docid:
rec_links = []
if recid:
rec_links.append({"rec_id": recid, "doc_name" : docname, "a_type": a_type})
if used_plugin and 'create_new' in used_plugin:
docid = used_plugin['create_new'](doctype, rec_links)
else:
docid = BibDoc.create_new_document(doctype, rec_links)
if used_plugin:
return used_plugin['create_instance'](docid=docid,
human_readable=human_readable,
initial_data=data)
return BibDoc(docid=docid,
human_readable=human_readable,
initial_data=data)
def attach_to_record(self, recid, a_type, docname):
""" Attaches given document to a record given by its identifier.
@param recid The identifier of the record
@type recid Integer
@param a_type Function of a document in the record
@type a_type String
@param docname Name of a document inside of a record
@type docname String
"""
run_sql("INSERT INTO bibrec_bibdoc (id_bibrec, id_bibdoc, type, docname) VALUES (%s,%s,%s,%s)",
(str(recid), str(self.id), a_type, docname))
self._update_additional_info_files()
def __repr__(self):
"""
@return: the canonical string representation of the C{BibDoc}.
@rtype: string
"""
return 'BibDoc(%s, %s, %s)' % (repr(self.id), repr(self.doctype), repr(self.human_readable))
def format_recids(self):
"""Returns a string representation of related record ids"""
if len(self.bibrec_links) == 1:
return self.bibrec_links[0]["recid"]
return "[" + ",".join([str(el["recid"]) for el in self.bibrec_links]) + "]"
def __str__(self):
"""
@return: an easy to be I{grepped} string representation of the
whole C{BibDoc} content.
@rtype: string
"""
recids = self.format_recids()
out = '%s:%i:::doctype=%s\n' % (recids, self.id, self.doctype)
out += '%s:%i:::status=%s\n' % (recids, self.id, self.status)
out += '%s:%i:::basedir=%s\n' % (recids, self.id, self.basedir)
out += '%s:%i:::creation date=%s\n' % (recids, self.id, self.cd)
out += '%s:%i:::modification date=%s\n' % (recids, self.id, self.md)
out += '%s:%i:::text extraction date=%s\n' % (recids, self.id, self.td)
out += '%s:%i:::total file attached=%s\n' % (recids, self.id, len(self.docfiles))
if self.human_readable:
out += '%s:%i:::total size latest version=%s\n' % (recids, self.id, nice_size(self.get_total_size_latest_version()))
out += '%s:%i:::total size all files=%s\n' % (recids, self.id, nice_size(self.get_total_size()))
else:
out += '%s:%i:::total size latest version=%s\n' % (recids, self.id, self.get_total_size_latest_version())
out += '%s:%i:::total size all files=%s\n' % (recids, self.id, self.get_total_size())
for docfile in self.docfiles:
out += str(docfile)
return out
def get_md5s(self):
"""
@return: an instance of the Md5Folder class to access MD5 information
of the current BibDoc
@rtype: Md5Folder
"""
if self.__md5s is None:
self.__md5s = Md5Folder(self.basedir)
return self.__md5s
md5s = property(get_md5s)
def format_already_exists_p(self, docformat):
"""
@param format: a format to be checked.
@type format: string
@return: True if a file of the given format already exists among the
latest files.
@rtype: bool
"""
docformat = normalize_format(docformat)
for afile in self.list_latest_files():
if docformat == afile.get_format():
return True
return False
def get_status(self):
"""
@return: the status information.
@rtype: string
"""
return self.status
@staticmethod
def get_fileprefix(basedir, storagename=None):
fname = "%s" % (storagename or "content", )
return os.path.join(basedir, fname )
def get_filepath(self, docformat, version):
""" Generaters the path inside of the filesystem where the document should be stored.
@param format The format of the document
@type format string
@param version version to be stored in the file
@type version string
TODO: this should be completely replaced. File storage (and so, also path building)
should be abstracted from BibDoc and be using loadable extensions
@param format Format of the document to be stored
@type format string
@param version Version of the document to be stored
@type version String
@return Full path to the file encoding a particular version and format of the document
@trype string
"""
return "%s%s;%i" % (BibDoc.get_fileprefix(self.basedir, self.storagename), docformat, version)
def get_docname(self):
"""Obsolete !! (will return empty String for new format documents"""
return self.storagename
def get_doctype(self, recid):
"""Retrieves the type of this document in the scope of a given recid"""
link_types = [attachement["doctype"] for attachement in
self.bibrec_links
if str(attachement["recid"]) == str(recid)]
if link_types:
return link_types[0]
return ""
def touch(self, action=''):
"""
Update the modification time of the bibdoc (as in the UNIX command
C{touch}).
"""
run_sql('UPDATE bibdoc SET modification_date=NOW() WHERE id=%s', (self.id, ))
self.dirty = True
self.last_action = action
def change_doctype(self, new_doctype):
"""
Modify the doctype of a BibDoc
"""
run_sql('UPDATE bibdoc SET doctype=%s WHERE id=%s', (new_doctype, self.id))
run_sql('UPDATE bibrec_bibdoc SET type=%s WHERE id_bibdoc=%s', (new_doctype, self.id))
self.dirty = True
def set_status(self, new_status):
"""
Set a new status. A document with a status information is a restricted
document that can be accessed only to user which as an authorization
to the I{viewrestrdoc} WebAccess action with keyword status with value
C{new_status}.
@param new_status: the new status. If empty the document will be
unrestricted.
@type new_status: string
@raise InvenioBibDocFileError: in case the reserved word
'DELETED' is used.
"""
if new_status != KEEP_OLD_VALUE:
if new_status == 'DELETED':
raise InvenioBibDocFileError('DELETED is a reserved word and can not be used for setting the status')
run_sql('UPDATE bibdoc SET status=%s WHERE id=%s', (new_status, self.id))
self.status = new_status
self.touch('status')
def add_file_new_version(self, filename, description=None, comment=None, docformat=None, flags=None, modification_date=None):
"""
Add a new version of a file. If no physical file is already attached
to the document a the given file will have version 1. Otherwise the
new file will have the current version number plus one.
@param filename: the local path of the file.
@type filename: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be retrieved from the filename (see L{decompose_file}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@raise InvenioBibDocFileError: in case of error.
"""
latestVersion = self.get_latest_version()
if latestVersion == 0:
myversion = 1
else:
myversion = latestVersion + 1
if os.path.exists(filename):
if not os.path.getsize(filename) > 0:
raise InvenioBibDocFileError, "%s seems to be empty" % filename
if docformat is None:
docformat = decompose_file(filename)[2]
else:
docformat = normalize_format(docformat)
destination = self.get_filepath(docformat, myversion)
if run_sql("SELECT id_bibdoc FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=%s AND format=%s", (self.id, myversion, docformat)):
raise InvenioBibDocFileError("According to the database a file of format %s is already attached to the docid %s" % (docformat, self.id))
try:
shutil.copyfile(filename, destination)
os.chmod(destination, 0644)
if modification_date: # if the modification time of the file needs to be changed
update_modification_date_of_file(destination, modification_date)
except Exception as e:
register_exception()
raise InvenioBibDocFileError("Encountered an exception while copying '%s' to '%s': '%s'" % (filename, destination, e))
self.more_info.set_description(description, docformat, myversion)
self.more_info.set_comment(comment, docformat, myversion)
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
for flag in flags:
if flag == 'PERFORM_HIDE_PREVIOUS':
for afile in self.list_all_files():
docformat = afile.get_format()
version = afile.get_version()
if version < myversion:
self.more_info.set_flag('HIDDEN', docformat, myversion)
else:
self.more_info.set_flag(flag, docformat, myversion)
else:
raise InvenioBibDocFileError("'%s' does not exists!" % filename)
self.touch('newversion')
Md5Folder(self.basedir).update()
just_added_file = self.get_file(docformat, myversion)
run_sql("INSERT INTO bibdocfsinfo(id_bibdoc, version, format, last_version, cd, md, checksum, filesize, mime) VALUES(%s, %s, %s, true, %s, %s, %s, %s, %s)", (self.id, myversion, docformat, just_added_file.cd, just_added_file.md, just_added_file.get_checksum(), just_added_file.get_size(), just_added_file.mime))
run_sql("UPDATE bibdocfsinfo SET last_version=false WHERE id_bibdoc=%s AND version<%s", (self.id, myversion))
def add_file_new_format(self, filename, version=None, description=None, comment=None, docformat=None, flags=None, modification_date=None):
"""
Add a file as a new format.
@param filename: the local path of the file.
@type filename: string
@param version: an optional specific version to which the new format
should be added. If None, the last version will be used.
@type version: integer
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be retrieved from the filename (see L{decompose_file}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@raise InvenioBibDocFileError: if the given format already exists.
"""
if version is None:
version = self.get_latest_version()
if version == 0:
version = 1
if os.path.exists(filename):
if not os.path.getsize(filename) > 0:
raise InvenioBibDocFileError, "%s seems to be empty" % filename
if docformat is None:
docformat = decompose_file(filename)[2]
else:
docformat = normalize_format(docformat)
if run_sql("SELECT id_bibdoc FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=%s AND format=%s", (self.id, version, docformat)):
raise InvenioBibDocFileError("According to the database a file of format %s is already attached to the docid %s" % (docformat, self.id))
destination = self.get_filepath(docformat, version)
if os.path.exists(destination):
raise InvenioBibDocFileError, "A file for docid '%s' already exists for the format '%s'" % (str(self.id), docformat)
try:
shutil.copyfile(filename, destination)
os.chmod(destination, 0644)
if modification_date: # if the modification time of the file needs to be changed
update_modification_date_of_file(destination, modification_date)
except Exception, e:
register_exception()
raise InvenioBibDocFileError, "Encountered an exception while copying '%s' to '%s': '%s'" % (filename, destination, e)
self.more_info.set_comment(comment, docformat, version)
self.more_info.set_description(description, docformat, version)
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
for flag in flags:
if flag != 'PERFORM_HIDE_PREVIOUS':
self.more_info.set_flag(flag, docformat, version)
else:
raise InvenioBibDocFileError, "'%s' does not exists!" % filename
Md5Folder(self.basedir).update()
self.touch('newformat')
just_added_file = self.get_file(docformat, version)
run_sql("INSERT INTO bibdocfsinfo(id_bibdoc, version, format, last_version, cd, md, checksum, filesize, mime) VALUES(%s, %s, %s, true, %s, %s, %s, %s, %s)", (self.id, version, docformat, just_added_file.cd, just_added_file.md, just_added_file.get_checksum(), just_added_file.get_size(), just_added_file.mime))
def change_docformat(self, oldformat, newformat):
"""
Renames a format name on disk and in all BibDoc structures.
The change will touch only the last version files.
The change will take place only if the newformat doesn't already exist.
@param oldformat: the format that needs to be renamed
@type oldformat: string
@param newformat: the format new name
@type newformat: string
"""
oldformat = normalize_format(oldformat)
newformat = normalize_format(newformat)
if self.format_already_exists_p(newformat):
# same format already exists in the latest files, abort
return
for bibdocfile in self.list_latest_files():
if bibdocfile.get_format() == oldformat:
# change format -> rename x.oldformat -> x.newformat
dirname, base, docformat, version = decompose_file_with_version(bibdocfile.get_full_path())
os.rename(bibdocfile.get_full_path(), os.path.join(dirname, '%s%s;%i' %(base, newformat, version)))
Md5Folder(self.basedir).update()
self.touch('rename')
self._sync_to_db()
return
def purge(self):
"""
Physically removes all the previous version of the given bibdoc.
Everything but the last formats will be erased.
"""
version = self.get_latest_version()
if version > 1:
for afile in self.docfiles:
if afile.get_version() < version:
self.more_info.unset_comment(afile.get_format(), afile.get_version())
self.more_info.unset_description(afile.get_format(), afile.get_version())
for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
self.more_info.unset_flag(flag, afile.get_format(), afile.get_version())
try:
os.remove(afile.get_full_path())
except Exception as dummy:
register_exception()
Md5Folder(self.basedir).update()
self.touch('purge')
run_sql("DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s AND version<%s", (self.id, version))
def expunge(self):
"""
Physically remove all the traces of a given document.
@note: an expunged BibDoc object shouldn't be used anymore or the
result might be unpredicted.
"""
self.more_info.delete()
del self.more_info
os.system('rm -rf %s' % escape_shell_arg(self.basedir))
run_sql('DELETE FROM bibrec_bibdoc WHERE id_bibdoc=%s', (self.id, ))
run_sql('DELETE FROM bibdoc_bibdoc WHERE id_bibdoc1=%s OR id_bibdoc2=%s', (self.id, self.id))
run_sql('DELETE FROM bibdoc WHERE id=%s', (self.id, ))
run_sql('INSERT INTO hstDOCUMENT(action, docname, docformat, docversion, docsize, docchecksum, id_bibdoc, doctimestamp) VALUES("EXPUNGE", %s, %s, %s, %s, %s, %s, NOW())',
('', self.doctype, self.get_latest_version(), self.get_total_size_latest_version(), '', self.id, ))
run_sql('DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s', (self.id, ))
del self._docfiles
del self.id
del self.cd
del self.md
del self.td
del self.basedir
del self.doctype
del self.bibrec_links
def revert(self, version):
"""
Revert the document to a given version. All the formats corresponding
to that version are copied forward to a new version.
@param version: the version to revert to.
@type version: integer
@raise InvenioBibDocFileError: in case of errors
"""
version = int(version)
docfiles = self.list_version_files(version)
if docfiles:
self.add_file_new_version(docfiles[0].get_full_path(), description=docfiles[0].get_description(), comment=docfiles[0].get_comment(), docformat=docfiles[0].get_format(), flags=docfiles[0].flags)
for docfile in docfiles[1:]:
self.add_file_new_format(docfile.filename, description=docfile.get_description(), comment=docfile.get_comment(), docformat=docfile.get_format(), flags=docfile.flags)
def import_descriptions_and_comments_from_marc(self, record=None):
"""
Import descriptions and comments from the corresponding MARC metadata.
@param record: the record (if None it will be calculated).
@type record: bibrecord recstruct
@note: If record is passed it is directly used, otherwise it is retrieved
from the MARCXML stored in the database.
"""
## Let's get the record
from invenio.legacy.search_engine import get_record
if record is None:
record = get_record(self.id)
fields = record_get_field_instances(record, '856', '4', ' ')
global_comment = None
global_description = None
local_comment = {}
local_description = {}
for field in fields:
url = field_get_subfield_values(field, 'u')
if url:
## Given a url
url = url[0]
if re.match('%s/%s/[0-9]+/files/' % (CFG_SITE_URL, CFG_SITE_RECORD), url):
## If it is a traditional /CFG_SITE_RECORD/1/files/ one
## We have global description/comment for all the formats
description = field_get_subfield_values(field, 'y')
if description:
global_description = description[0]
comment = field_get_subfield_values(field, 'z')
if comment:
global_comment = comment[0]
elif bibdocfile_url_p(url):
## Otherwise we have description/comment per format
dummy, docname, docformat = decompose_bibdocfile_url(url)
brd = BibRecDocs(self.id)
if docname == brd.get_docname(self.id):
description = field_get_subfield_values(field, 'y')
if description:
local_description[docformat] = description[0]
comment = field_get_subfield_values(field, 'z')
if comment:
local_comment[docformat] = comment[0]
## Let's update the tables
version = self.get_latest_version()
for docfile in self.list_latest_files():
docformat = docfile.get_format()
if docformat in local_comment:
self.set_comment(local_comment[docformat], docformat, version)
else:
self.set_comment(global_comment, docformat, version)
if docformat in local_description:
self.set_description(local_description[docformat], docformat, version)
else:
self.set_description(global_description, docformat, version)
self.dirty = True
def get_icon(self, subformat_re=CFG_BIBDOCFILE_ICON_SUBFORMAT_RE, display_hidden=True):
"""
@param subformat_re: by default the convention is that
L{CFG_BIBDOCFILE_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat_re: compiled regular expression
@return: the bibdocfile corresponding to CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT
or, if this does not exist, the smallest size icon of this
document, or None if no icon exists for this document.
@rtype: BibDocFile
@warning: before I{subformat} were introduced this method was
returning a BibDoc, while now is returning a BibDocFile. Check
if your client code is compatible with this.
"""
icons = []
for docfile in self.list_latest_files(list_hidden=display_hidden):
subformat = docfile.get_subformat()
if subformat.lower() == CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT.lower():
# If it's the default icon subformat, return it
return docfile
if subformat_re.match(subformat):
icons.append((docfile.get_size(), docfile))
if icons:
# Sort by size, retrieve the smallest one
icons.sort()
return icons[0][1]
return None
def add_icon(self, filename, docformat=None, subformat=CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT, modification_date=None):
"""
Attaches icon to this document.
@param filename: the local filesystem path to the icon.
@type filename: string
@param format: an optional format for the icon. If not specified it
will be calculated after the filesystem path.
@type format: string
@param subformat: by default the convention is that
CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: string
@raise InvenioBibDocFileError: in case of errors.
"""
#first check if an icon already exists
if not docformat:
docformat = decompose_file(filename)[2]
if subformat:
docformat += ";%s" % subformat
self.add_file_new_format(filename, docformat=docformat, modification_date=modification_date)
def delete_icon(self, subformat_re=CFG_BIBDOCFILE_ICON_SUBFORMAT_RE):
"""
@param subformat_re: by default the convention is that
L{CFG_BIBDOCFILE_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: compiled regular expression
Removes the icon attached to the document if it exists.
"""
for docfile in self.list_latest_files():
if subformat_re.match(docfile.get_subformat()):
self.delete_file(docfile.get_format(), docfile.get_version())
def change_name(self, recid, newname):
"""
Renames this document in connection with a given record.
@param newname: the new name.
@type newname: string
@raise InvenioBibDocFileError: if the new name corresponds to
a document already attached to the record owning this document or
if the name was not changed.
"""
newname = normalize_docname(newname)
res = run_sql("SELECT id_bibdoc FROM bibrec_bibdoc WHERE id_bibrec=%s AND docname=%s", (recid, newname))
if res:
raise InvenioBibDocFileError("A bibdoc called %s already exists for recid %s" % (newname, recid))
updated = run_sql("update bibrec_bibdoc set docname=%s where id_bibdoc=%s and id_bibrec=%s", (newname, self.id, recid))
if not updated:
raise InvenioBibDocFileError("Docname for bibdoc %s in record %s was not changed" % (self.id, recid))
# docid is known, the document already exists
res2 = run_sql("SELECT id_bibrec, type, docname FROM bibrec_bibdoc WHERE id_bibdoc=%s", (self.id,))
## Refreshing names and types.
self.bibrec_types = [(r[0], r[1], r[2]) for r in res2 ] # just in case the result was behaving like tuples but was something else
if not res2:
# fake attachment
self.bibrec_types = [(0, None, "fake_name_for_unattached_document")]
self.touch('rename')
def set_comment(self, comment, docformat, version=None):
"""
Updates the comment of a specific format/version of the document.
@param comment: the new comment.
@type comment: string
@param format: the specific format for which the comment should be
updated.
@type format: string
@param version: the specific version for which the comment should be
updated. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.set_comment(comment, docformat, version)
self.dirty = True
def set_description(self, description, docformat, version=None):
"""
Updates the description of a specific format/version of the document.
@param description: the new description.
@type description: string
@param format: the specific format for which the description should be
updated.
@type format: string
@param version: the specific version for which the description should be
updated. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.set_description(description, docformat, version)
self.dirty = True
def set_flag(self, flagname, docformat, version=None):
"""
Sets a flag for a specific format/version of the document.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
set.
@type format: string
@param version: the specific version for which the flag should be
set. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.set_flag(flagname, docformat, version)
self.dirty = True
def has_flag(self, flagname, docformat, version=None):
"""
Checks if a particular flag for a format/version is set.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
set.
@type format: string
@param version: the specific version for which the flag should be
set. If not specified the last version will be used.
@type version: integer
@return: True if the flag is set.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
return self.more_info.has_flag(flagname, docformat, version)
def unset_flag(self, flagname, docformat, version=None):
"""
Unsets a flag for a specific format/version of the document.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
unset.
@type format: string
@param version: the specific version for which the flag should be
unset. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.unset_flag(flagname, docformat, version)
self.dirty = True
def get_comment(self, docformat, version=None):
"""
Retrieve the comment of a specific format/version of the document.
@param format: the specific format for which the comment should be
retrieved.
@type format: string
@param version: the specific version for which the comment should be
retrieved. If not specified the last version will be used.
@type version: integer
@return: the comment.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
return self.more_info.get_comment(docformat, version)
def get_description(self, docformat, version=None):
"""
Retrieve the description of a specific format/version of the document.
@param format: the specific format for which the description should be
retrieved.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@return: the description.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
return self.more_info.get_description(docformat, version)
def hidden_p(self, docformat, version=None):
"""
Returns True if the file specified by the given format/version is
hidden.
@param format: the specific format for which the description should be
retrieved.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@return: True if hidden.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
return self.more_info.has_flag('HIDDEN', docformat, version)
def get_base_dir(self):
"""
@return: the base directory on the local filesystem for this document
(e.g. C{/soft/cdsweb/var/data/files/g0/123})
@rtype: string
"""
return self.basedir
def get_type(self):
"""
@return: the type of this document.
@rtype: string"""
return self.doctype
def get_id(self):
"""
@return: the id of this document.
@rtype: integer
"""
return self.id
def get_file(self, docformat, version="", exact_docformat=False):
"""
Returns a L{BibDocFile} instance of this document corresponding to the
specific format and version.
@param format: the specific format.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@param exact_docformat: if True, consider always the
complete docformat (including subformat if any)
@type exact_docformat: bool
@return: the L{BibDocFile} instance.
@rtype: BibDocFile
"""
if version == "":
docfiles = self.list_latest_files()
else:
version = int(version)
docfiles = self.list_version_files(version)
docformat = normalize_format(docformat)
for docfile in docfiles:
if (docfile.get_format() == docformat or not docformat):
return docfile
## Let's skip the subformat specification and consider just the
## superformat
if not exact_docformat:
superformat = get_superformat_from_format(docformat)
for docfile in docfiles:
if get_superformat_from_format(docfile.get_format()) == superformat:
return docfile
raise InvenioBibDocFileError("No file for doc %i of format '%s', version '%s'" % (self.id, docformat, version))
def list_versions(self):
"""
@return: the list of existing version numbers for this document.
@rtype: list of integer
"""
versions = []
for docfile in self.docfiles:
if not docfile.get_version() in versions:
versions.append(docfile.get_version())
versions.sort()
return versions
def delete(self, recid=None):
"""
Delete this document.
@see: L{undelete} for how to undelete the document.
@raise InvenioBibDocFileError: in case of errors.
"""
try:
today = datetime.today()
recids = []
if recid:
recids = [recid]
else:
recids = [link["recid"] for link in self.bibrec_links]
for rid in recids:
brd = BibRecDocs(rid)
docname = brd.get_docname(self.id)
# if the document is attached to some records
brd.change_name(docid=self.id, newname = 'DELETED-%s%s-%s' % (today.strftime('%Y%m%d%H%M%S'), today.microsecond, docname))
run_sql("UPDATE bibdoc SET status='DELETED' WHERE id=%s", (self.id,))
self.status = 'DELETED'
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError, "It's impossible to delete bibdoc %s: %s" % (self.id, e)
def deleted_p(self):
"""
@return: True if this document has been deleted.
@rtype: bool
"""
return self.status == 'DELETED'
def empty_p(self):
"""
@return: True if this document is empty, i.e. it has no bibdocfile
connected.
@rtype: bool
"""
return len(self.docfiles) == 0
def undelete(self, previous_status='', recid=None):
"""
Undelete a deleted file (only if it was actually deleted via L{delete}).
The previous C{status}, i.e. the restriction key can be provided.
Otherwise the undeleted document will be public.
@param previous_status: the previous status the should be restored.
@type previous_status: string
@raise InvenioBibDocFileError: in case of any error.
"""
try:
run_sql("UPDATE bibdoc SET status=%s WHERE id=%s AND status='DELETED'", (previous_status, self.id))
except Exception as e:
raise InvenioBibDocFileError, "It's impossible to undelete bibdoc %s: %s" % (self.id, e)
if recid:
bibrecdocs = BibRecDocs(recid)
docname = bibrecdocs.get_docname(self.id)
if docname.startswith('DELETED-'):
try:
# Let's remove DELETED-20080214144322- in front of the docname
original_name = '-'.join(docname.split('-')[2:])
original_name = bibrecdocs.propose_unique_docname(original_name)
bibrecdocs.change_name(docid=self.id, newname=original_name)
except Exception as e:
raise InvenioBibDocFileError, "It's impossible to restore the previous docname %s. %s kept as docname because: %s" % (original_name, docname, e)
else:
raise InvenioBibDocFileError, "Strange just undeleted docname isn't called DELETED-somedate-docname but %s" % docname
def delete_file(self, docformat, version):
"""
Delete a specific format/version of this document on the filesystem.
@param format: the particular format to be deleted.
@type format: string
@param version: the particular version to be deleted.
@type version: integer
@note: this operation is not reversible!"""
try:
afile = self.get_file(docformat, version)
except InvenioBibDocFileError:
return
try:
os.remove(afile.get_full_path())
run_sql("DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=%s AND format=%s", (self.id, afile.get_version(), afile.get_format()))
last_version = run_sql("SELECT max(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.id, ))[0][0]
if last_version:
## Updating information about last version
run_sql("UPDATE bibdocfsinfo SET last_version=true WHERE id_bibdoc=%s AND version=%s", (self.id, last_version))
run_sql("UPDATE bibdocfsinfo SET last_version=false WHERE id_bibdoc=%s AND version<>%s", (self.id, last_version))
except OSError:
pass
self.touch('delete')
def get_history(self):
"""
@return: a human readable and parsable string that represent the
history of this document.
@rtype: string
"""
ret = []
hst = run_sql("""SELECT action, docname, docformat, docversion,
docsize, docchecksum, doctimestamp
FROM hstDOCUMENT
WHERE id_bibdoc=%s ORDER BY doctimestamp ASC""", (self.id, ))
for row in hst:
ret.append("%s %s '%s', format: '%s', version: %i, size: %s, checksum: '%s'" % (row[6].strftime('%Y-%m-%d %H:%M:%S'), row[0], row[1], row[2], row[3], nice_size(row[4]), row[5]))
return ret
def _build_file_list(self, context=''):
"""
Lists all files attached to the bibdoc. This function should be
called everytime the bibdoc is modified.
As a side effect it log everything that has happened to the bibdocfiles
in the log facility, according to the context:
"init": means that the function has been called;
for the first time by a constructor, hence no logging is performed
"": by default means to log every deleted file as deleted and every
added file as added;
"rename": means that every appearently deleted file is logged as
renamef and every new file as renamet.
"""
def log_action(action, docid, docname, docformat, version, size, checksum, timestamp=''):
"""Log an action into the bibdoclog table."""
try:
if timestamp:
run_sql('INSERT INTO hstDOCUMENT(action, id_bibdoc, docname, docformat, docversion, docsize, docchecksum, doctimestamp) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)', (action, docid, docname, docformat, version, size, checksum, timestamp))
else:
run_sql('INSERT INTO hstDOCUMENT(action, id_bibdoc, docname, docformat, docversion, docsize, docchecksum, doctimestamp) VALUES(%s, %s, %s, %s, %s, %s, %s, NOW())', (action, docid, docname, docformat, version, size, checksum))
except DatabaseError:
register_exception()
def make_removed_added_bibdocfiles(previous_file_list):
"""Internal function for build the log of changed files."""
# Let's rebuild the previous situation
old_files = {}
for bibdocfile in previous_file_list:
old_files[(bibdocfile.name, bibdocfile.format, bibdocfile.version)] = (bibdocfile.size, bibdocfile.checksum, bibdocfile.md)
# Let's rebuild the new situation
new_files = {}
for bibdocfile in self._docfiles:
new_files[(bibdocfile.name, bibdocfile.format, bibdocfile.version)] = (bibdocfile.size, bibdocfile.checksum, bibdocfile.md)
# Let's subtract from added file all the files that are present in
# the old list, and let's add to deleted files that are not present
# added file.
added_files = dict(new_files)
deleted_files = {}
for key, value in iteritems(old_files):
if key in added_files:
del added_files[key]
else:
deleted_files[key] = value
return (added_files, deleted_files)
if context != ('init', 'init_from_disk'):
previous_file_list = list(self._docfiles)
res = run_sql("SELECT status, creation_date,"
"modification_date FROM bibdoc WHERE id=%s", (self.id,))
self.cd = res[0][1]
self.md = res[0][2]
self.status = res[0][0]
self.more_info = BibDocMoreInfo(self.id)
self._docfiles = []
if CFG_BIBDOCFILE_ENABLE_BIBDOCFSINFO_CACHE and context == 'init':
## In normal init context we read from DB
res = run_sql("SELECT version, format, cd, md, checksum, filesize FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.id, ))
for version, docformat, cd, md, checksum, size in res:
filepath = self.get_filepath(docformat, version)
self._docfiles.append(BibDocFile(
filepath, self.bibrec_types,
version, docformat, self.id, self.status, checksum,
self.more_info, human_readable=self.human_readable, cd=cd, md=md, size=size, bibdoc=self))
else:
if os.path.exists(self.basedir):
files = os.listdir(self.basedir)
files.sort()
for afile in files:
if not afile.startswith('.'):
try:
filepath = os.path.join(self.basedir, afile)
dummy, dummy, docformat, fileversion = decompose_file_with_version(filepath)
checksum = self.md5s.get_checksum(afile)
self._docfiles.append(BibDocFile(filepath, self.bibrec_types,
fileversion, docformat,
self.id, self.status, checksum,
self.more_info, human_readable=self.human_readable, bibdoc=self))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, e
if context in ('init', 'init_from_disk'):
return
else:
added_files, deleted_files = make_removed_added_bibdocfiles(previous_file_list)
deletedstr = "DELETED"
addedstr = "ADDED"
if context == 'rename':
deletedstr = "RENAMEDFROM"
addedstr = "RENAMEDTO"
for (docname, docformat, version), (size, checksum, md) in iteritems(added_files):
if context == 'rename':
md = '' # No modification time
log_action(addedstr, self.id, docname, docformat, version, size, checksum, md)
for (docname, docformat, version), (size, checksum, md) in iteritems(deleted_files):
if context == 'rename':
md = '' # No modification time
log_action(deletedstr, self.id, docname, docformat, version, size, checksum, md)
def _sync_to_db(self):
"""
Update the content of the bibdocfile table by taking what is available on the filesystem.
"""
self._build_file_list('init_from_disk')
run_sql("DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.id,))
for afile in self.docfiles:
run_sql("INSERT INTO bibdocfsinfo(id_bibdoc, version, format, last_version, cd, md, checksum, filesize, mime) VALUES(%s, %s, %s, false, %s, %s, %s, %s, %s)", (self.id, afile.get_version(), afile.get_format(), afile.cd, afile.md, afile.get_checksum(), afile.get_size(), afile.mime))
run_sql("UPDATE bibdocfsinfo SET last_version=true WHERE id_bibdoc=%s AND version=%s", (self.id, self.get_latest_version()))
def _build_related_file_list(self):
"""Lists all files attached to the bibdoc. This function should be
called everytime the bibdoc is modified within e.g. its icon.
@deprecated: use subformats instead.
"""
self.related_files = {}
res = run_sql("SELECT ln.id_bibdoc2,ln.rel_type,bibdoc.status FROM "
"bibdoc_bibdoc AS ln,bibdoc WHERE bibdoc.id=ln.id_bibdoc2 AND "
"ln.id_bibdoc1=%s", (str(self.id),))
for row in res:
docid = row[0]
doctype = row[1]
if row[2] != 'DELETED':
if doctype not in self.related_files:
self.related_files[doctype] = []
cur_doc = BibDoc.create_instance(docid=docid, human_readable=self.human_readable)
self.related_files[doctype].append(cur_doc)
def get_total_size_latest_version(self):
"""Return the total size used on disk of all the files belonging
to this bibdoc and corresponding to the latest version."""
ret = 0
for bibdocfile in self.list_latest_files():
ret += bibdocfile.get_size()
return ret
def get_total_size(self):
"""Return the total size used on disk of all the files belonging
to this bibdoc."""
ret = 0
for bibdocfile in self.list_all_files():
ret += bibdocfile.get_size()
return ret
def list_all_files(self, list_hidden=True):
"""Returns all the docfiles linked with the given bibdoc."""
if list_hidden:
return self.docfiles
else:
return [afile for afile in self.docfiles if not afile.hidden_p()]
def list_latest_files(self, list_hidden=True):
"""Returns all the docfiles within the last version."""
return self.list_version_files(self.get_latest_version(), list_hidden=list_hidden)
def list_version_files(self, version, list_hidden=True):
"""Return all the docfiles of a particular version."""
version = int(version)
return [docfile for docfile in self.docfiles if docfile.get_version() == version and (list_hidden or not docfile.hidden_p())]
def get_latest_version(self):
""" Returns the latest existing version number for the given bibdoc.
If no file is associated to this bibdoc, returns '0'.
"""
version = 0
for bibdocfile in self.docfiles:
if bibdocfile.get_version() > version:
version = bibdocfile.get_version()
return version
def get_file_number(self):
"""Return the total number of files."""
return len(self.docfiles)
def register_download(self, ip_address, version, docformat, userid=0, recid=0):
"""Register the information about a download of a particular file."""
docformat = normalize_format(docformat)
if docformat[:1] == '.':
docformat = docformat[1:]
docformat = docformat.upper()
if not version:
version = self.get_latest_version()
return run_sql("INSERT INTO rnkDOWNLOADS "
"(id_bibrec,id_bibdoc,file_version,file_format,"
"id_user,client_host,download_time) VALUES "
"(%s,%s,%s,%s,%s,INET_ATON(%s),NOW())",
(recid, self.id, version, docformat,
userid, ip_address,))
def get_incoming_relations(self, rel_type=None):
"""Return all relations in which this BibDoc appears on target position
@param rel_type: Type of the relation, to which we want to limit our search. None = any type
@type rel_type: string
@return: List of BibRelation instances
@rtype: list
"""
return BibRelation.get_relations(rel_type = rel_type,
bibdoc2_id = self.id)
def get_outgoing_relations(self, rel_type=None):
"""Return all relations in which this BibDoc appears on target position
@param rel_type: Type of the relation, to which we want to limit our search. None = any type
@type rel_type: string
@return: List of BibRelation instances
@rtype: list
"""
return BibRelation.get_relations(rel_type = rel_type,
bibdoc1_id = self.id)
def create_outgoing_relation(self, bibdoc2, rel_type):
"""
Create an outgoing relation between current BibDoc and a different one
"""
return BibRelation.create(bibdoc1_id = self.id, bibdoc2_id = bibdoc2.id, rel_type = rel_type)
def create_incoming_relation(self, bibdoc1, rel_type):
"""
Create an outgoing relation between a particular version of
current BibDoc and a particular version of a different BibDoc
"""
return BibRelation.create(bibdoc1_id = bibdoc1.id, bibdoc2_id = self.id, rel_type = rel_type)
def generic_path2bidocfile(fullpath):
"""
Returns a BibDocFile objects that wraps the given fullpath.
@note: the object will contain the minimum information that can be
guessed from the fullpath (e.g. docname, format, subformat, version,
md5, creation_date, modification_date). It won't contain for example
a comment, a description, a doctype, a restriction.
"""
fullpath = os.path.abspath(fullpath)
try:
path, name, docformat, version = decompose_file_with_version(fullpath)
except ValueError:
## There is no version
version = 0
path, name, docformat = decompose_file(fullpath)
md5folder = Md5Folder(path)
checksum = md5folder.get_checksum(os.path.basename(fullpath))
return BibDocFile(fullpath=fullpath,
recid_doctypes=[(0, None, name)],
version=version,
docformat=docformat,
docid=0,
status=None,
checksum=checksum,
more_info=None)
class BibDocFile(object):
"""This class represents a physical file in the Invenio filesystem.
It should never be instantiated directly"""
def __init__(self, fullpath, recid_doctypes, version, docformat, docid, status, checksum, more_info=None, human_readable=False, cd=None, md=None, size=None, bibdoc = None):
self.fullpath = os.path.abspath(fullpath)
self.docid = docid
self.recids_doctypes = recid_doctypes
self.version = version
self.status = status
self.checksum = checksum
self.human_readable = human_readable
self.name = recid_doctypes[0][2]
self.bibdoc = bibdoc
if more_info:
self.description = more_info.get_description(docformat, version)
self.comment = more_info.get_comment(docformat, version)
self.flags = more_info.get_flags(docformat, version)
else:
self.description = None
self.comment = None
self.flags = []
self.format = normalize_format(docformat)
self.superformat = get_superformat_from_format(self.format)
self.subformat = get_subformat_from_format(self.format)
if docformat:
self.recids_doctypes = [(a,b,c+self.superformat) for (a,b,c) in self.recids_doctypes]
self.mime, self.encoding = _mimes.guess_type(self.recids_doctypes[0][2])
if self.mime is None:
self.mime = "application/octet-stream"
self.more_info = more_info
self.hidden = 'HIDDEN' in self.flags
self.size = size or os.path.getsize(fullpath)
self.md = md or datetime.fromtimestamp(os.path.getmtime(fullpath))
try:
self.cd = cd or datetime.fromtimestamp(os.path.getctime(fullpath))
except OSError:
self.cd = self.md
self.dir = os.path.dirname(fullpath)
if self.subformat:
self.url = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {'subformat' : self.subformat})
self.fullurl = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {'subformat' : self.subformat, 'version' : self.version})
else:
self.url = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {})
self.fullurl = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {'version' : self.version})
self.etag = '"%i%s%i"' % (self.docid, self.format, self.version)
self.magic = None
def __repr__(self):
return ('BibDocFile(%s, %i, %s, %s, %i, %i, %s, %s, %s, %s)' % (repr(self.fullpath), self.version, repr(self.name), repr(self.format), self.recids_doctypes[0][0], self.docid, repr(self.status), repr(self.checksum), repr(self.more_info), repr(self.human_readable)))
def format_recids(self):
if self.bibdoc:
return self.bibdoc.format_recids()
return "0"
def __str__(self):
recids = self.format_recids()
out = '%s:%s:%s:%s:fullpath=%s\n' % (recids, self.docid, self.version, self.format, self.fullpath)
out += '%s:%s:%s:%s:name=%s\n' % (recids, self.docid, self.version, self.format, self.name)
out += '%s:%s:%s:%s:subformat=%s\n' % (recids, self.docid, self.version, self.format, get_subformat_from_format(self.format))
out += '%s:%s:%s:%s:status=%s\n' % (recids, self.docid, self.version, self.format, self.status)
out += '%s:%s:%s:%s:checksum=%s\n' % (recids, self.docid, self.version, self.format, self.checksum)
if self.human_readable:
out += '%s:%s:%s:%s:size=%s\n' % (recids, self.docid, self.version, self.format, nice_size(self.size))
else:
out += '%s:%s:%s:%s:size=%s\n' % (recids, self.docid, self.version, self.format, self.size)
out += '%s:%s:%s:%s:creation time=%s\n' % (recids, self.docid, self.version, self.format, self.cd)
out += '%s:%s:%s:%s:modification time=%s\n' % (recids, self.docid, self.version, self.format, self.md)
out += '%s:%s:%s:%s:magic=%s\n' % (recids, self.docid, self.version, self.format, self.get_magic())
out += '%s:%s:%s:%s:mime=%s\n' % (recids, self.docid, self.version, self.format, self.mime)
out += '%s:%s:%s:%s:encoding=%s\n' % (recids, self.docid, self.version, self.format, self.encoding)
out += '%s:%s:%s:%s:url=%s\n' % (recids, self.docid, self.version, self.format, self.url)
out += '%s:%s:%s:%s:fullurl=%s\n' % (recids, self.docid, self.version, self.format, self.fullurl)
out += '%s:%s:%s:%s:description=%s\n' % (recids, self.docid, self.version, self.format, self.description)
out += '%s:%s:%s:%s:comment=%s\n' % (recids, self.docid, self.version, self.format, self.comment)
out += '%s:%s:%s:%s:hidden=%s\n' % (recids, self.docid, self.version, self.format, self.hidden)
out += '%s:%s:%s:%s:flags=%s\n' % (recids, self.docid, self.version, self.format, self.flags)
out += '%s:%s:%s:%s:etag=%s\n' % (recids, self.docid, self.version, self.format, self.etag)
return out
def is_restricted(self, user_info):
"""Returns restriction state. (see acc_authorize_action return values)"""
if self.status not in ('', 'DELETED'):
return check_bibdoc_authorization(user_info, status=self.status)
elif self.status == 'DELETED':
return (1, 'File has ben deleted')
else:
return (0, '')
def is_icon(self, subformat_re=CFG_BIBDOCFILE_ICON_SUBFORMAT_RE):
"""
@param subformat_re: by default the convention is that
L{CFG_BIBDOCFILE_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: compiled regular expression
@return: True if this file is an icon.
@rtype: bool
"""
return bool(subformat_re.match(self.subformat))
def hidden_p(self):
return self.hidden
def get_url(self):
return self.url
def get_type(self):
"""Returns the first type connected with the bibdoc of this file."""
return self.recids_doctypes[0][1]
def get_path(self):
return self.fullpath
def get_bibdocid(self):
return self.docid
def get_name(self):
return self.name
def get_full_name(self):
"""Returns the first name connected with the bibdoc of this file."""
return self.recids_doctypes[0][2]
def get_full_path(self):
return self.fullpath
def get_format(self):
return self.format
def get_subformat(self):
return self.subformat
def get_superformat(self):
return self.superformat
def get_size(self):
return self.size
def get_version(self):
return self.version
def get_checksum(self):
return self.checksum
def get_description(self):
return self.description
def get_comment(self):
return self.comment
def get_content(self):
"""Returns the binary content of the file."""
content_fd = open(self.fullpath, 'rb')
content = content_fd.read()
content_fd.close()
return content
def get_recid(self):
"""Returns the first recid connected with the bibdoc of this file."""
return self.recids_doctypes[0][0]
def get_status(self):
"""Returns the status of the file, i.e. either '', 'DELETED' or a
restriction keyword."""
return self.status
def get_magic(self):
"""Return all the possible guesses from the magic library about
the content of the file."""
if self.magic is None:
if CFG_HAS_MAGIC == 1:
magic_cookies = _get_magic_cookies()
magic_result = []
for key in magic_cookies.keys():
magic_result.append(magic_cookies[key].file(self.fullpath))
self.magic = tuple(magic_result)
elif CFG_HAS_MAGIC == 2:
magic_result = []
for key in ({'mime': False, 'mime_encoding': False},
{'mime': True, 'mime_encoding': False},
{'mime': False, 'mime_encoding': True}):
magic_result.append(_magic_wrapper(self.fullpath, **key))
self.magic = tuple(magic_result)
return self.magic
def check(self):
"""Return True if the checksum corresponds to the file."""
return calculate_md5(self.fullpath) == self.checksum
def stream(self, req, download=False):
"""Stream the file. Note that no restriction check is being
done here, since restrictions have been checked previously
inside websubmit_webinterface.py."""
if os.path.exists(self.fullpath):
if random.random() < CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY and calculate_md5(self.fullpath) != self.checksum:
raise InvenioBibDocFileError, "File %s, version %i, is corrupted!" % (self.recids_doctypes[0][2], self.version)
stream_file(req, self.fullpath, "%s%s" % (self.name, self.superformat), self.mime, self.encoding, self.etag, self.checksum, self.fullurl, download=download)
raise apache.SERVER_RETURN, apache.DONE
else:
req.status = apache.HTTP_NOT_FOUND
raise InvenioBibDocFileError, "%s does not exists!" % self.fullpath
_RE_STATUS_PARSER = re.compile(r'^(?P<type>email|group|egroup|role|firerole|status):\s*(?P<value>.*)$', re.S + re.I)
def check_bibdoc_authorization(user_info, status):
"""
Check if the user is authorized to access a document protected with the given status.
L{status} is a string of the form::
auth_type: auth_value
where C{auth_type} can have values in::
email, group, role, firerole, status
and C{auth_value} has a value interpreted againsta C{auth_type}:
- C{email}: the user can access the document if his/her email matches C{auth_value}
- C{group}: the user can access the document if one of the groups (local or
external) of which he/she is member matches C{auth_value}
- C{role}: the user can access the document if he/she belongs to the WebAccess
role specified in C{auth_value}
- C{firerole}: the user can access the document if he/she is implicitly matched
by the role described by the firewall like role definition in C{auth_value}
- C{status}: the user can access the document if he/she is authorized to
for the action C{viewrestrdoc} with C{status} paramter having value
C{auth_value}
@note: If no C{auth_type} is specified or if C{auth_type} is not one of the
above, C{auth_value} will be set to the value contained in the
parameter C{status}, and C{auth_type} will be considered to be C{status}.
@param user_info: the user_info dictionary
@type: dict
@param status: the status of the document.
@type status: string
@return: a tuple, of the form C{(auth_code, auth_message)} where auth_code is 0
if the authorization is granted and greater than 0 otherwise.
@rtype: (int, string)
@raise ValueError: in case of unexpected parsing error.
"""
if not status:
return (0, CFG_WEBACCESS_WARNING_MSGS[0])
def parse_status(status):
g = _RE_STATUS_PARSER.match(status)
if g:
return (g.group('type').lower(), g.group('value'))
else:
return ('status', status)
if acc_is_user_in_role(user_info, acc_get_role_id(SUPERADMINROLE)):
return (0, CFG_WEBACCESS_WARNING_MSGS[0])
auth_type, auth_value = parse_status(status)
if auth_type == 'status':
return acc_authorize_action(user_info, 'viewrestrdoc', status=auth_value)
elif auth_type == 'email':
if not auth_value.lower().strip() == user_info['email'].lower().strip():
return (1, 'You must be member of the group %s in order to access this document' % repr(auth_value))
elif auth_type == 'group':
if not auth_value in user_info['group']:
return (1, 'You must be member of the group %s in order to access this document' % repr(auth_value))
elif auth_type == 'role':
if not acc_is_user_in_role(user_info, acc_get_role_id(auth_value)):
return (1, 'You must be member in the role %s in order to access this document' % repr(auth_value))
elif auth_type == 'firerole':
if not acc_firerole_check_user(user_info, compile_role_definition(auth_value)):
return (1, 'You must be authorized in order to access this document')
else:
raise ValueError, 'Unexpected authorization type %s for %s' % (repr(auth_type), repr(auth_value))
return (0, CFG_WEBACCESS_WARNING_MSGS[0])
## TODO for future reimplementation of stream_file
#class StreamFileException(Exception):
# def __init__(self, value):
# self.value = value
_RE_BAD_MSIE = re.compile("MSIE\s+(\d+\.\d+)")
def stream_file(req, fullpath, fullname=None, mime=None, encoding=None, etag=None, md5str=None, location=None, download=False):
"""This is a generic function to stream a file to the user.
If fullname, mime, encoding, and location are not provided they will be
guessed based on req and fullpath.
md5str should be passed as an hexadecimal string.
"""
## TODO for future reimplementation of stream_file
# from flask import send_file
# if fullname is None:
# fullname = fullpath.split('/')[-1]
# response = send_file(fullpath,
# attachment_filename=fullname.replace('"', '\\"'),
# as_attachment=False)
# if not download:
# response.headers['Content-Disposition'] = 'inline; filename="%s"' % fullname.replace('"', '\\"')
#
# raise StreamFileException(response)
def normal_streaming(size):
req.set_content_length(size)
req.send_http_header()
if req.method != 'HEAD':
req.sendfile(fullpath)
return ""
def single_range(size, the_range):
req.set_content_length(the_range[1])
req.headers_out['Content-Range'] = 'bytes %d-%d/%d' % (the_range[0], the_range[0] + the_range[1] - 1, size)
req.status = apache.HTTP_PARTIAL_CONTENT
req.send_http_header()
if req.method != 'HEAD':
req.sendfile(fullpath, the_range[0], the_range[1])
return ""
def multiple_ranges(size, ranges, mime):
req.status = apache.HTTP_PARTIAL_CONTENT
boundary = '%s%04d' % (time.strftime('THIS_STRING_SEPARATES_%Y%m%d%H%M%S'), random.randint(0, 9999))
req.content_type = 'multipart/byteranges; boundary=%s' % boundary
content_length = 0
for arange in ranges:
content_length += len('--%s\r\n' % boundary)
content_length += len('Content-Type: %s\r\n' % mime)
content_length += len('Content-Range: bytes %d-%d/%d\r\n' % (arange[0], arange[0] + arange[1] - 1, size))
content_length += len('\r\n')
content_length += arange[1]
content_length += len('\r\n')
content_length += len('--%s--\r\n' % boundary)
req.set_content_length(content_length)
req.send_http_header()
if req.method != 'HEAD':
for arange in ranges:
req.write('--%s\r\n' % boundary, 0)
req.write('Content-Type: %s\r\n' % mime, 0)
req.write('Content-Range: bytes %d-%d/%d\r\n' % (arange[0], arange[0] + arange[1] - 1, size), 0)
req.write('\r\n', 0)
req.sendfile(fullpath, arange[0], arange[1])
req.write('\r\n', 0)
req.write('--%s--\r\n' % boundary)
req.flush()
return ""
def parse_date(date):
"""According to <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3>
a date can come in three formats (in order of preference):
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
Moreover IE is adding some trailing information after a ';'.
Wrong dates should be simpled ignored.
This function return the time in seconds since the epoch GMT or None
in case of errors."""
if not date:
return None
try:
date = date.split(';')[0].strip() # Because of IE
## Sun, 06 Nov 1994 08:49:37 GMT
return time.mktime(time.strptime(date, '%a, %d %b %Y %X %Z'))
except:
try:
## Sun, 06 Nov 1994 08:49:37 GMT
return time.mktime(time.strptime(date, '%A, %d-%b-%y %H:%M:%S %Z'))
except:
try:
## Sun, 06 Nov 1994 08:49:37 GMT
return time.mktime(date)
except:
return None
def parse_ranges(ranges):
"""According to <http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35>
a (multiple) range request comes in the form:
bytes=20-30,40-60,70-,-80
with the meaning:
from byte to 20 to 30 inclusive (11 bytes)
from byte to 40 to 60 inclusive (21 bytes)
from byte 70 to (size - 1) inclusive (size - 70 bytes)
from byte size - 80 to (size - 1) inclusive (80 bytes)
This function will return the list of ranges in the form:
[[first_byte, last_byte], ...]
If first_byte or last_byte aren't specified they'll be set to None
If the list is not well formatted it will return None
"""
try:
if ranges.startswith('bytes') and '=' in ranges:
ranges = ranges.split('=')[1].strip()
else:
return None
ret = []
for arange in ranges.split(','):
arange = arange.strip()
if arange.startswith('-'):
ret.append([None, int(arange[1:])])
elif arange.endswith('-'):
ret.append([int(arange[:-1]), None])
else:
ret.append(map(int, arange.split('-')))
return ret
except:
return None
def parse_tags(tags):
"""Return a list of tags starting from a comma separated list."""
return [tag.strip() for tag in tags.split(',')]
def fix_ranges(ranges, size):
"""Complementary to parse_ranges it will transform all the ranges
into (first_byte, length), adjusting all the value based on the
actual size provided.
"""
ret = []
for arange in ranges:
if (arange[0] is None and arange[1] > 0) or arange[0] < size:
if arange[0] is None:
arange[0] = size - arange[1]
elif arange[1] is None:
arange[1] = size - arange[0]
else:
arange[1] = arange[1] - arange[0] + 1
arange[0] = max(0, arange[0])
arange[1] = min(size - arange[0], arange[1])
if arange[1] > 0:
ret.append(arange)
return ret
def get_normalized_headers():
"""Strip and lowerize all the keys of the headers dictionary plus
strip, lowerize and transform known headers value into their value."""
ret = {
'if-match' : None,
'unless-modified-since' : None,
'if-modified-since' : None,
'range' : None,
'if-range' : None,
'if-none-match' : None,
}
for key, value in iteritems(req.headers_in):
key = key.strip().lower()
value = value.strip()
if key in ('unless-modified-since', 'if-modified-since'):
value = parse_date(value)
elif key == 'range':
value = parse_ranges(value)
elif key == 'if-range':
value = parse_date(value) or parse_tags(value)
elif key in ('if-match', 'if-none-match'):
value = parse_tags(value)
if value:
ret[key] = value
return ret
headers = get_normalized_headers()
g = _RE_BAD_MSIE.search(headers.get('user-agent', "MSIE 6.0"))
bad_msie = g and float(g.group(1)) < 9.0
if CFG_BIBDOCFILE_USE_XSENDFILE:
## If XSendFile is supported by the server, let's use it.
if os.path.exists(fullpath):
if fullname is None:
fullname = os.path.basename(fullpath)
if bad_msie:
## IE is confused by quotes
req.headers_out["Content-Disposition"] = 'attachment; filename=%s' % fullname.replace('"', '\\"')
elif download:
req.headers_out["Content-Disposition"] = 'attachment; filename="%s"' % fullname.replace('"', '\\"')
else:
## IE is confused by inline
req.headers_out["Content-Disposition"] = 'inline; filename="%s"' % fullname.replace('"', '\\"')
req.headers_out["X-Sendfile"] = fullpath
if mime is None:
(mime, encoding) = _mimes.guess_type(fullpath)
if mime is None:
mime = "application/octet-stream"
if not bad_msie:
## IE is confused by not supported mimetypes
req.content_type = mime
return ""
else:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
if headers['if-match']:
if etag is not None and etag not in headers['if-match']:
raise apache.SERVER_RETURN, apache.HTTP_PRECONDITION_FAILED
if os.path.exists(fullpath):
mtime = os.path.getmtime(fullpath)
if fullname is None:
fullname = os.path.basename(fullpath)
if mime is None:
(mime, encoding) = _mimes.guess_type(fullpath)
if mime is None:
mime = "application/octet-stream"
if location is None:
location = req.uri
if not bad_msie:
## IE is confused by not supported mimetypes
req.content_type = mime
req.encoding = encoding
req.filename = fullname
req.headers_out["Last-Modified"] = time.strftime('%a, %d %b %Y %X GMT', time.gmtime(mtime))
if CFG_ENABLE_HTTP_RANGE_REQUESTS:
req.headers_out["Accept-Ranges"] = "bytes"
else:
req.headers_out["Accept-Ranges"] = "none"
req.headers_out["Content-Location"] = location
if etag is not None:
req.headers_out["ETag"] = etag
if md5str is not None:
req.headers_out["Content-MD5"] = base64.encodestring(binascii.unhexlify(md5str.upper()))[:-1]
if bad_msie:
## IE is confused by quotes
req.headers_out["Content-Disposition"] = 'attachment; filename=%s' % fullname.replace('"', '\\"')
elif download:
req.headers_out["Content-Disposition"] = 'attachment; filename="%s"' % fullname.replace('"', '\\"')
else:
## IE is confused by inline
req.headers_out["Content-Disposition"] = 'inline; filename="%s"' % fullname.replace('"', '\\"')
size = os.path.getsize(fullpath)
if not size:
try:
raise Exception, '%s exists but is empty' % fullpath
except Exception:
register_exception(req=req, alert_admin=True)
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
if headers['if-modified-since'] and headers['if-modified-since'] >= mtime:
raise apache.SERVER_RETURN, apache.HTTP_NOT_MODIFIED
if headers['if-none-match']:
if etag is not None and etag in headers['if-none-match']:
raise apache.SERVER_RETURN, apache.HTTP_NOT_MODIFIED
if headers['unless-modified-since'] and headers['unless-modified-since'] < mtime:
return normal_streaming(size)
if CFG_ENABLE_HTTP_RANGE_REQUESTS and headers['range']:
try:
if headers['if-range']:
if etag is None or etag not in headers['if-range']:
return normal_streaming(size)
ranges = fix_ranges(headers['range'], size)
except:
return normal_streaming(size)
if len(ranges) > 1:
return multiple_ranges(size, ranges, mime)
elif ranges:
return single_range(size, ranges[0])
else:
raise apache.SERVER_RETURN, apache.HTTP_RANGE_NOT_SATISFIABLE
else:
return normal_streaming(size)
else:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
def stream_restricted_icon(req):
"""Return the content of the "Restricted Icon" file."""
stream_file(req, '%s/img/restricted.gif' % CFG_WEBDIR)
raise apache.SERVER_RETURN, apache.DONE
#def list_versions_from_array(docfiles):
# """Retrieve the list of existing versions from the given docfiles list."""
# versions = []
# for docfile in docfiles:
# if not docfile.get_version() in versions:
# versions.append(docfile.get_version())
# versions.sort()
# versions.reverse()
# return versions
def _make_base_dir(docid):
"""Given a docid it returns the complete path that should host its files."""
group = "g" + str(int(int(docid) / CFG_BIBDOCFILE_FILESYSTEM_BIBDOC_GROUP_LIMIT))
return os.path.join(CFG_BIBDOCFILE_FILEDIR, group, str(docid))
class Md5Folder(object):
"""Manage all the Md5 checksum about a folder"""
def __init__(self, folder):
"""Initialize the class from the md5 checksum of a given path"""
self.folder = folder
self.load()
def update(self, only_new=True):
"""Update the .md5 file with the current files. If only_new
is specified then only not already calculated file are calculated."""
if not only_new:
self.md5s = {}
if os.path.exists(self.folder):
for filename in os.listdir(self.folder):
if filename not in self.md5s and not filename.startswith('.'):
self.md5s[filename] = calculate_md5(os.path.join(self.folder, filename))
self.store()
def store(self):
"""Store the current md5 dictionary into .md5"""
try:
old_umask = os.umask(0o022)
md5file = open(os.path.join(self.folder, ".md5"), "w")
for key, value in self.md5s.items():
md5file.write('%s *%s\n' % (value, key))
md5file.close()
os.umask(old_umask)
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while storing .md5 for folder '%s': '%s'" % (self.folder, e))
def load(self):
"""Load .md5 into the md5 dictionary"""
self.md5s = {}
md5_path = os.path.join(self.folder, ".md5")
if os.path.exists(md5_path):
for row in open(md5_path, "r"):
md5hash = row[:32]
filename = row[34:].strip()
self.md5s[filename] = md5hash
else:
self.update()
def check(self, filename=''):
"""Check the specified file or all the files for which it exists a hash
for being coherent with the stored hash."""
if filename and filename in self.md5s.keys():
try:
return self.md5s[filename] == calculate_md5(os.path.join(self.folder, filename))
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while loading '%s': '%s'" % (os.path.join(self.folder, filename), e))
else:
for filename, md5hash in self.md5s.items():
try:
if calculate_md5(os.path.join(self.folder, filename)) != md5hash:
return False
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while loading '%s': '%s'" % (os.path.join(self.folder, filename), e))
return True
def get_checksum(self, filename):
"""Return the checksum of a physical file."""
md5hash = self.md5s.get(filename, None)
if md5hash is None:
self.update()
# Now it should not fail!
md5hash = self.md5s[filename]
return md5hash
def calculate_md5_external(filename):
"""Calculate the md5 of a physical file through md5sum Command Line Tool.
This is suitable for file larger than 256Kb."""
try:
md5_result = os.popen(CFG_PATH_MD5SUM + ' -b %s' % escape_shell_arg(filename))
ret = md5_result.read()[:32]
md5_result.close()
if len(ret) != 32:
# Error in running md5sum. Let's fallback to internal
# algorithm.
return calculate_md5(filename, force_internal=True)
else:
return ret
except Exception as e:
raise InvenioBibDocFileError("Encountered an exception while calculating md5 for file '%s': '%s'" % (filename, e))
def calculate_md5(filename, force_internal=False):
"""Calculate the md5 of a physical file. This is suitable for files smaller
than 256Kb."""
if not CFG_PATH_MD5SUM or force_internal or os.path.getsize(filename) < CFG_BIBDOCFILE_MD5_THRESHOLD:
try:
to_be_read = open(filename, "rb")
computed_md5 = md5()
while True:
buf = to_be_read.read(CFG_BIBDOCFILE_MD5_BUFFER)
if buf:
computed_md5.update(buf)
else:
break
to_be_read.close()
return computed_md5.hexdigest()
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while calculating md5 for file '%s': '%s'" % (filename, e))
else:
return calculate_md5_external(filename)
def bibdocfile_url_to_bibrecdocs(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
a BibRecDocs object for the corresponding recid."""
recid = decompose_bibdocfile_url(url)[0]
return BibRecDocs(recid)
def bibdocfile_url_to_bibdoc(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
a BibDoc object for the corresponding recid/docname."""
docname = decompose_bibdocfile_url(url)[1]
return bibdocfile_url_to_bibrecdocs(url).get_bibdoc(docname)
def bibdocfile_url_to_bibdocfile(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
a BibDocFile object for the corresponding recid/docname/format."""
docformat = decompose_bibdocfile_url(url)[2]
return bibdocfile_url_to_bibdoc(url).get_file(docformat)
def bibdocfile_url_to_fullpath(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
the fullpath for the corresponding recid/docname/format."""
return bibdocfile_url_to_bibdocfile(url).get_full_path()
def bibdocfile_url_p(url):
"""Return True when the url is a potential valid url pointing to a
fulltext owned by a system."""
if url.startswith('%s/getfile.py' % CFG_SITE_URL) or url.startswith('%s/getfile.py' % CFG_SITE_SECURE_URL):
return True
if not (url.startswith('%s/%s/' % (CFG_SITE_URL, CFG_SITE_RECORD)) or url.startswith('%s/%s/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD))):
return False
splitted_url = url.split('/files/')
return len(splitted_url) == 2 and splitted_url[0] != '' and splitted_url[1] != ''
def get_docid_from_bibdocfile_fullpath(fullpath):
"""Given a bibdocfile fullpath (e.g. "CFG_BIBDOCFILE_FILEDIR/g0/123/bar.pdf;1")
returns the docid (e.g. 123)."""
if not fullpath.startswith(os.path.join(CFG_BIBDOCFILE_FILEDIR, 'g')):
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
dirname = decompose_file_with_version(fullpath)[0]
try:
return int(dirname.split('/')[-1])
except:
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
def decompose_bibdocfile_fullpath(fullpath):
"""Given a bibdocfile fullpath (e.g. "CFG_BIBDOCFILE_FILEDIR/g0/123/bar.pdf;1")
returns a quadruple (recid, docname, format, version)."""
if not fullpath.startswith(os.path.join(CFG_BIBDOCFILE_FILEDIR, 'g')):
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
dirname, dummy, extension, version = decompose_file_with_version(fullpath)
try:
docid = int(dirname.split('/')[-1])
return {"doc_id" : docid, "extension": extension, "version": version}
except:
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
_RE_BIBDOCFILE_URL = re.compile("(%s|%s)/%s/(?P<recid>\d+)(?P<rest>.*)" % (re.escape(CFG_SITE_URL), re.escape(CFG_SITE_SECURE_URL), re.escape(CFG_SITE_RECORD)))
def decompose_bibdocfile_url(url):
"""Given a bibdocfile_url return a triple (recid, docname, format)."""
if url.startswith('%s/getfile.py' % CFG_SITE_URL) or url.startswith('%s/getfile.py' % CFG_SITE_SECURE_URL):
return decompose_bibdocfile_very_old_url(url)
g = _RE_BIBDOCFILE_URL.match(urllib.unquote(url))
if g:
recid = int(g.group('recid'))
rest = g.group('rest')
dummy, docname, docformat = decompose_file(rest)
return recid, docname, docformat
else:
raise InvenioBibDocFileError, "Url %s doesn't correspond to a valid record inside the system." % url
re_bibdocfile_old_url = re.compile(r'/%s/(\d*)/files/' % CFG_SITE_RECORD)
def decompose_bibdocfile_old_url(url):
"""Given a bibdocfile old url (e.g. CFG_SITE_URL/CFG_SITE_RECORD/123/files)
it returns the recid."""
g = re_bibdocfile_old_url.search(url)
if g:
return int(g.group(1))
raise InvenioBibDocFileError('%s is not a valid old bibdocfile url' % url)
def decompose_bibdocfile_very_old_url(url):
"""Decompose an old /getfile.py? URL"""
if url.startswith('%s/getfile.py' % CFG_SITE_URL) or url.startswith('%s/getfile.py' % CFG_SITE_SECURE_URL):
params = urllib.splitquery(url)[1]
if params:
try:
params = cgi.parse_qs(params)
if 'docid' in params:
docid = int(params['docid'][0])
bibdoc = BibDoc.create_instance(docid)
if bibdoc.bibrec_links:
recid = bibdoc.bibrec_links[0]["rec_id"]
docname = bibdoc.bibrec_links[0]["doc_name"]
else:
raise InvenioBibDocFileError("Old style URL pointing to an unattached document")
elif 'recid' in params:
recid = int(params['recid'][0])
if 'name' in params:
docname = params['name'][0]
else:
docname = ''
else:
raise InvenioBibDocFileError('%s has not enough params to correspond to a bibdocfile.' % url)
docformat = normalize_format(params.get('format', [''])[0])
return (recid, docname, docformat)
except Exception as e:
raise InvenioBibDocFileError('Problem with %s: %s' % (url, e))
else:
raise InvenioBibDocFileError('%s has no params to correspond to a bibdocfile.' % url)
else:
raise InvenioBibDocFileError('%s is not a valid very old bibdocfile url' % url)
def get_docname_from_url(url):
"""Return a potential docname given a url"""
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
filename = os.path.split(path)[-1]
return file_strip_ext(filename)
def get_format_from_url(url):
"""Return a potential format given a url"""
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
filename = os.path.split(path)[-1]
return filename[len(file_strip_ext(filename)):]
def clean_url(url):
"""Given a local url e.g. a local path it render it a realpath."""
if is_url_a_local_file(url):
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
return os.path.abspath(path)
else:
return url
def is_url_a_local_file(url):
"""Return True if the given URL is pointing to a local file."""
protocol = urllib2.urlparse.urlsplit(url)[0]
return protocol in ('', 'file')
def check_valid_url(url):
"""
Check for validity of a url or a file.
@param url: the URL to check
@type url: string
@raise StandardError: if the URL is not a valid URL.
"""
try:
if is_url_a_local_file(url):
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
if os.path.abspath(path) != path:
raise StandardError, "%s is not a normalized path (would be %s)." % (path, os.path.normpath(path))
for allowed_path in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS + [CFG_TMPDIR, CFG_TMPSHAREDDIR, CFG_WEBSUBMIT_STORAGEDIR]:
if path.startswith(allowed_path):
dummy_fd = open(path)
dummy_fd.close()
return
raise StandardError, "%s is not in one of the allowed paths." % path
else:
try:
open_url(url)
except InvenioBibdocfileUnauthorizedURL as e:
raise StandardError, str(e)
except Exception as e:
raise StandardError, "%s is not a correct url: %s" % (url, e)
def safe_mkstemp(suffix, prefix='bibdocfile_'):
"""Create a temporary filename that don't have any '.' inside a part
from the suffix."""
tmpfd, tmppath = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=CFG_TMPDIR)
# Close the file and leave the responsability to the client code to
# correctly open/close it.
os.close(tmpfd)
if '.' not in suffix:
# Just in case format is empty
return tmppath
while '.' in os.path.basename(tmppath)[:-len(suffix)]:
os.remove(tmppath)
tmpfd, tmppath = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=CFG_TMPDIR)
os.close(tmpfd)
return tmppath
def download_local_file(filename, docformat=None):
"""
Copies a local file to Invenio's temporary directory.
@param filename: the name of the file to copy
@type filename: string
@param format: the format of the file to copy (will be found if not
specified)
@type format: string
@return: the path of the temporary file created
@rtype: string
@raise StandardError: if something went wrong
"""
# Make sure the format is OK.
if docformat is None:
docformat = guess_format_from_url(filename)
else:
docformat = normalize_format(docformat)
tmppath = ''
# Now try to copy.
try:
path = urllib2.urlparse.urlsplit(urllib.unquote(filename))[2]
if os.path.abspath(path) != path:
raise StandardError, "%s is not a normalized path (would be %s)." \
% (path, os.path.normpath(path))
for allowed_path in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS + [CFG_TMPDIR,
CFG_WEBSUBMIT_STORAGEDIR]:
if path.startswith(allowed_path):
tmppath = safe_mkstemp(docformat)
shutil.copy(path, tmppath)
if os.path.getsize(tmppath) == 0:
os.remove(tmppath)
raise StandardError, "%s seems to be empty" % filename
break
else:
raise StandardError, "%s is not in one of the allowed paths." % path
except Exception as e:
raise StandardError, "Impossible to copy the local file '%s': %s" % \
(filename, str(e))
return tmppath
def download_external_url(url, docformat=None, progress_callback=None):
"""
Download a url (if it corresponds to a remote file) and return a
local url to it.
@param url: the URL to download
@type url: string
@param format: the format of the file (will be found if not specified)
@type format: string
@return: the path to the download local file
@rtype: string
@raise StandardError: if the download failed
"""
tmppath = None
# Make sure the format is OK.
if docformat is None:
# First try to find a known extension to the URL
docformat = decompose_file(url, skip_version=True,
only_known_extensions=True)[2]
if not docformat:
# No correct format could be found. Will try to get it from the
# HTTP message headers.
docformat = ''
else:
docformat = normalize_format(docformat)
from_file, to_file, tmppath = None, None, ''
try:
from_file = open_url(url)
except InvenioBibdocfileUnauthorizedURL as e:
raise StandardError, str(e)
except urllib2.URLError as e:
raise StandardError, 'URL could not be opened: %s' % str(e)
if not docformat:
# We could not determine the format from the URL, so let's try
# to read it from the HTTP headers.
docformat = get_format_from_http_response(from_file)
try:
tmppath = safe_mkstemp(docformat)
if progress_callback:
total_size = int(from_file.info().getheader('Content-Length').strip())
progress_size = 0
to_file = open(tmppath, 'w')
while True:
block = from_file.read(CFG_BIBDOCFILE_BLOCK_SIZE)
if not block:
break
to_file.write(block)
if progress_callback:
progress_size += CFG_BIBDOCFILE_BLOCK_SIZE
progress_callback(progress_size, CFG_BIBDOCFILE_BLOCK_SIZE,
total_size)
to_file.close()
from_file.close()
if os.path.getsize(tmppath) == 0:
raise StandardError, "%s seems to be empty" % url
except Exception as e:
# Try to close and remove the temporary file.
try:
to_file.close()
except Exception:
pass
try:
os.remove(tmppath)
except Exception:
pass
raise StandardError, "Error when downloading %s into %s: %s" % \
(url, tmppath, e)
return tmppath
def get_format_from_http_response(response):
"""
Tries to retrieve the format of the file from the message headers of the
HTTP response.
@param response: the HTTP response
@type response: file-like object (as returned by urllib.urlopen)
@return: the format of the remote resource
@rtype: string
"""
def parse_content_type(text):
return text.split(';')[0].strip()
def parse_content_disposition(text):
for item in text.split(';'):
item = item.strip()
if item.strip().startswith('filename='):
return item[len('filename="'):-len('"')]
info = response.info()
docformat = ''
content_disposition = info.getheader('Content-Disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition)
if filename:
docformat = decompose_file(filename, only_known_extensions=False)[2]
if docformat:
return docformat
content_type = info.getheader('Content-Type')
if content_type:
content_type = parse_content_type(content_type)
if content_type not in ('text/plain', 'application/octet-stream'):
## We actually ignore these mimetypes since they are the
## defaults often returned by Apache in case the mimetype
## was not known
if content_type in CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING:
docformat = normalize_format(CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING[content_type])
else:
ext = _mimes.guess_extension(content_type)
if ext:
docformat = normalize_format(ext)
return docformat
def download_url(url, docformat=None):
"""
Download a url (if it corresponds to a remote file) and return a
local url to it.
"""
tmppath = None
try:
if is_url_a_local_file(url):
tmppath = download_local_file(url, docformat = docformat)
else:
tmppath = download_external_url(url, docformat = docformat)
except StandardError:
raise
return tmppath
class MoreInfo(object):
"""This class represents a genering MoreInfo dictionary.
MoreInfo object can be attached to bibdoc, bibversion, format or BibRelation.
The entity where a particular MoreInfo object is attached has to be specified using the
constructor parametes.
This class is a thin wrapper around the database table.
"""
def __init__(self, docid = None, version = None, docformat = None,
relation = None, cache_only = False, cache_reads = True, initial_data = None):
"""
@param cache_only Determines if MoreInfo object should be created in
memory only or reflected in the database
@type cache_only boolean
@param cache_reads Determines if reads should be executed on the
in-memory cache or should be redirected to the
database. If this is true, cache can be entirely
regenerated from the database only upon an explicit
request. If the value is not present in the cache,
the database is queried
@type cache_reads boolean
@param initial_data Allows to specify initial content of the cache.
This parameter is useful when we create an in-memory
instance from serialised value
@type initial_data string
"""
self.docid = docid
self.version = version
self.format = docformat
self.relation = relation
self.cache_only = cache_only
if initial_data != None:
self.cache = initial_data
self.dirty = initial_data
if not self.cache_only:
self._flush_cache() #inserts new entries
else:
self.cache = {}
self.dirty = {}
self.cache_reads = cache_reads
if not self.cache_only:
self.populate_from_database()
@staticmethod
def create_from_serialised(ser_str, docid = None, version = None, docformat = None,
relation = None, cache_only = False, cache_reads = True):
"""Creates an instance of MoreInfo
using serialised data as the cache content"""
data = cPickle.loads(base64.b64decode(ser_str))
return MoreInfo(docid = docid, version = version, docformat = docformat,
relation = relation, cache_only = cache_only,
cache_reads = cache_reads, initial_data = data);
def serialise_cache(self):
"""Returns a serialised representation of the cache"""
return base64.b64encode(cPickle.dumps(self.get_cache()))
def populate_from_database(self):
"""Retrieves all values of MoreInfo and places them in the cache"""
where_str, where_args = self._generate_where_query_args()
query_str = "SELECT namespace, data_key, data_value FROM bibdocmoreinfo WHERE %s" % (where_str, )
res = run_sql(query_str, where_args)
if res:
for row in res:
namespace, data_key, data_value_ser = row
data_value = cPickle.loads(data_value_ser)
if not namespace in self.cache:
self.cache[namespace] = {}
self.cache[namespace][data_key] = data_value
def _mark_dirty(self, namespace, data_key):
"""Marks a data key dirty - that should be saved into the database"""
if not namespace in self.dirty:
self.dirty[namespace] = {}
self.dirty[namespace][data_key] = True
def _database_get_distinct_string_list(self, column, namespace = None):
"""A private method reading an unique list of strings from the
moreinfo database table"""
where_str, where_args = self._generate_where_query_args(
namespace = namespace)
query_str = "SELECT DISTINCT %s FROM bibdocmoreinfo WHERE %s" % \
( column, where_str, )
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(where_args))
print("Executing query: " + query_str + " ARGS: " + repr(where_args))
res = run_sql(query_str, where_args)
return (res and [x[0] for x in res]) or [] # after migrating to python 2.6, can be rewritten using x if y else z syntax: return [x[0] for x in res] if res else []
def _database_get_namespaces(self):
"""Read the database to discover namespaces declared in a given MoreInfo"""
return self._database_get_distinct_string_list("namespace")
def _database_get_keys(self, namespace):
"""Returns all keys assigned in a given namespace of a MoreInfo instance"""
return self._database_get_distinct_string_list("data_key", namespace=namespace)
def _database_contains_key(self, namespace, key):
return self._database_read_value(namespace, key) != None
def _database_save_value(self, namespace, key, value):
"""Write changes into the database"""
#TODO: this should happen within one transaction
serialised_val = cPickle.dumps(value)
# on duplicate key will not work here as miltiple null values are permitted by the index
if not self._database_contains_key(namespace, key):
#insert new value
query_parts = []
query_args = []
to_process = [(self.docid, "id_bibdoc"), (self.version, "version"),
(self.format, "format"), (self.relation, "id_rel"),
(str(namespace), "namespace"), (str(key), "data_key"),
(str(serialised_val), "data_value")]
for entry in to_process:
_val_or_null(entry[0], q_str = query_parts, q_args = query_args)
columns_str = ", ".join(map(lambda x: x[1], to_process))
values_str = ", ".join(query_parts)
query_str = "INSERT INTO bibdocmoreinfo (%s) VALUES(%s)" % \
(columns_str, values_str)
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(query_args))
print("Executing query: " + query_str + " ARGS: " + repr(query_args))
run_sql(query_str, query_args)
else:
#Update existing value
where_str, where_args = self._generate_where_query_args(namespace, key)
query_str = "UPDATE bibdocmoreinfo SET data_value=%s WHERE " + where_str
query_args = [str(serialised_val)] + where_args
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(query_args))
print("Executing query: " + query_str + " ARGS: " + repr(query_args))
run_sql(query_str, query_args )
def _database_read_value(self, namespace, key):
"""Reads a value directly from the database
@param namespace - namespace of the data to be read
@param key - key of the data to be read
"""
where_str, where_args = self._generate_where_query_args(namespace = namespace, data_key = key)
query_str = "SELECT data_value FROM bibdocmoreinfo WHERE " + where_str
res = run_sql(query_str, where_args)
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(where_args) + "WITH THE RESULT: " + str(res))
s_ = ""
if res:
s_ = cPickle.loads(res[0][0])
print("Executing query: " + query_str + " ARGS: " + repr(where_args) + " WITH THE RESULT: " + str(s_))
if res and res[0][0]:
try:
return cPickle.loads(res[0][0])
except:
raise Exception("Error when deserialising value for %s key=%s retrieved value=%s" % (repr(self), str(key), str(res[0][0])))
return None
def _database_remove_value(self, namespace, key):
"""Removes an entry directly in the database"""
where_str, where_args = self._generate_where_query_args(namespace = namespace, data_key = key)
query_str = "DELETE FROM bibdocmoreinfo WHERE " + where_str
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(where_args))
print("Executing query: " + query_str + " ARGS: " + repr(where_args))
run_sql(query_str, where_args)
return None
def _flush_cache(self):
"""Writes all the dirty cache entries into the database"""
for namespace in self.dirty:
for data_key in self.dirty[namespace]:
if namespace in self.cache and data_key in self.cache[namespace]\
and not self.cache[namespace][data_key] is None:
self._database_save_value(namespace, data_key, self.cache[namespace][data_key])
else:
# This might happen if a value has been removed from the cache
self._database_remove_value(namespace, data_key)
self.dirty = {}
def _generate_where_query_args(self, namespace = None, data_key = None):
"""Private method generating WHERE clause of SQL statements"""
ns = []
if namespace != None:
ns = [(namespace, "namespace")]
dk = []
if data_key != None:
dk = [(data_key, "data_key")]
to_process = [(self.docid, "id_bibdoc"), (self.version, "version"),
(self.format, "format"), (self.relation, "id_rel")] + \
ns + dk
return _sql_generate_conjunctive_where(to_process)
def set_data(self, namespace, key, value):
"""setting data directly in the database dictionary"""
if not namespace in self.cache:
self.cache[namespace] = {}
self.cache[namespace][key] = value
self._mark_dirty(namespace, key)
if not self.cache_only:
self._flush_cache()
def get_data(self, namespace, key):
"""retrieving data from the database"""
if self.cache_reads or self.cache_only:
if namespace in self.cache and key in self.cache[namespace]:
return self.cache[namespace][key]
if not self.cache_only:
# we have a permission to read from the database
value = self._database_read_value(namespace, key)
if value:
if not namespace in self.cache:
self.cache[namespace] = {}
self.cache[namespace][key] = value
return value
return None
def del_key(self, namespace, key):
"""retrieving data from the database"""
if not namespace in self.cache:
return None
del self.cache[namespace][key]
self._mark_dirty(namespace, key)
if not self.cache_only:
self._flush_cache()
def contains_key(self, namespace, key):
return self.get_data(namespace, key) != None
# the dictionary interface -> updating the default namespace
def __setitem__(self, key, value):
self.set_data("", key, value) #the default value
def __getitem__(self, key):
return self.get_data("", key)
def __delitem__(self, key):
self.del_key("", key)
def __contains__(self, key):
return self.contains_key("", key)
def __repr__(self):
return "MoreInfo(docid=%s, version=%s, docformat=%s, relation=%s)" % \
(self.docid, self.version, self.format, self.relation)
def delete(self):
"""Remove all entries associated with this MoreInfo"""
self.cache = {}
if not self.cache_only:
where_str, query_args = self._generate_where_query_args()
query_str = "DELETE FROM bibdocmoreinfo WHERE %s" % (where_str, )
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(query_args))
print("Executing query: " + query_str + " ARGS: " + repr(query_args))
run_sql(query_str, query_args)
def get_cache(self):
"""Returns the content of the cache
@return The content of the MoreInfo cache
@rtype dictionary {namespace: {key1: value1, ... }, namespace2: {}}
"""
return self.cache
def get_namespaces(self):
"""Returns a list of namespaces present in the MoreInfo structure.
If the object is permitted access to the database, the data should
be always read from there. Unlike when reading a particular value,
we can not check if value is missing in the cache
"""
if self.cache_only and self.cache_reads:
return self.cache.keys()
return self._database_get_namespaces()
def get_keys(self, namespace):
"""Returns a list of keys present in a given namespace"""
if self.cache_only and self.cache_reads:
res = []
if namespace in self.cache:
res = self.cache[namespace].keys()
return res
else:
return self._database_get_keys(namespace)
def flush(self):
"""Flush the content into the database"""
self._flush_cache()
class BibDocMoreInfo(MoreInfo):
"""
This class wraps contextual information of the documents, such as the
- comments
- descriptions
- flags.
Such information is kept separately per every format/version instance of
the corresponding document and is searialized in the database, ready
to be retrieved (but not searched).
@param docid: the document identifier.
@type docid: integer
@param more_info: a serialized version of an already existing more_info
object. If not specified this information will be readed from the
database, and othewise an empty dictionary will be allocated.
@raise ValueError: if docid is not a positive integer.
@ivar docid: the document identifier as passed to the constructor.
@type docid: integer
@ivar more_info: the more_info dictionary that will hold all the
additional document information.
@type more_info: dict of dict of dict
@note: in general this class is never instanciated in client code and
never used outside bibdocfile module.
@note: this class will be extended in the future to hold all the new auxiliary
information about a document.
"""
def __init__(self, docid, cache_only = False, initial_data = None):
if not (type(docid) in (long, int) and docid > 0):
raise ValueError("docid is not a positive integer, but %s." % docid)
MoreInfo.__init__(self, docid, cache_only = cache_only, initial_data = initial_data)
if 'descriptions' not in self:
self['descriptions'] = {}
if 'comments' not in self:
self['comments'] = {}
if 'flags' not in self:
self['flags'] = {}
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Creating BibDocMoreInfo :" + repr(self["comments"]))
print("Creating BibdocMoreInfo :" + repr(self["comments"]))
def __repr__(self):
"""
@return: the canonical string representation of the C{BibDocMoreInfo}.
@rtype: string
"""
return 'BibDocMoreInfo(%i, %s)' % (self.docid, repr(cPickle.dumps(self)))
def set_flag(self, flagname, docformat, version):
"""
Sets a flag.
@param flagname: the flag to set (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@type flagname: string
@param format: the format for which the flag should set.
@type format: string
@param version: the version for which the flag should set:
@type version: integer
@raise ValueError: if the flag is not in
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}
"""
if flagname in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
flags = self['flags']
if not flagname in flags:
flags[flagname] = {}
if not version in flags[flagname]:
flags[flagname][version] = {}
if not docformat in flags[flagname][version]:
flags[flagname][version][docformat] = {}
flags[flagname][version][docformat] = True
self['flags'] = flags
else:
raise ValueError, "%s is not in %s" % \
(flagname, CFG_BIBDOCFILE_AVAILABLE_FLAGS)
def get_comment(self, docformat, version):
"""
Returns the specified comment.
@param format: the format for which the comment should be
retrieved.
@type format: string
@param version: the version for which the comment should be
retrieved.
@type version: integer
@return: the specified comment.
@rtype: string
"""
try:
assert(type(version) is int)
docformat = normalize_format(docformat)
return self['comments'].get(version, {}).get(docformat)
except:
register_exception()
raise
def get_description(self, docformat, version):
"""
Returns the specified description.
@param format: the format for which the description should be
retrieved.
@type format: string
@param version: the version for which the description should be
retrieved.
@type version: integer
@return: the specified description.
@rtype: string
"""
try:
assert(type(version) is int)
docformat = normalize_format(docformat)
return self['descriptions'].get(version, {}).get(docformat)
except:
register_exception()
raise
def has_flag(self, flagname, docformat, version):
"""
Return True if the corresponding has been set.
@param flagname: the name of the flag (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@type flagname: string
@param format: the format for which the flag should be checked.
@type format: string
@param version: the version for which the flag should be checked.
@type version: integer
@return: True if the flag is set for the given format/version.
@rtype: bool
@raise ValueError: if the flagname is not in
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}
"""
if flagname in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
return self['flags'].get(flagname, {}).get(version, {}).get(docformat, False)
else:
raise ValueError, "%s is not in %s" % (flagname, CFG_BIBDOCFILE_AVAILABLE_FLAGS)
def get_flags(self, docformat, version):
"""
Return the list of all the enabled flags.
@param format: the format for which the list should be returned.
@type format: string
@param version: the version for which the list should be returned.
@type version: integer
@return: the list of enabled flags (from
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@rtype: list of string
"""
return [flag for flag in self['flags'] if docformat in self['flags'][flag].get(version, {})]
def set_comment(self, comment, docformat, version):
"""
Set a comment.
@param comment: the comment to be set.
@type comment: string
@param format: the format for which the comment should be set.
@type format: string
@param version: the version for which the comment should be set:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
docformat = normalize_format(docformat)
if comment == KEEP_OLD_VALUE:
comment = self.get_comment(docformat, version) or self.get_comment(docformat, version - 1)
if not comment:
self.unset_comment(docformat, version)
return
if not version in self['comments']:
comments = self['comments']
comments[version] = {}
self['comments'] = comments
comments = self['comments']
comments[version][docformat] = comment
self['comments'] = comments
except:
register_exception()
raise
def set_description(self, description, docformat, version):
"""
Set a description.
@param description: the description to be set.
@type description: string
@param format: the format for which the description should be set.
@type format: string
@param version: the version for which the description should be set:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
docformat = normalize_format(docformat)
if description == KEEP_OLD_VALUE:
description = self.get_description(docformat, version) or self.get_description(docformat, version - 1)
if not description:
self.unset_description(docformat, version)
return
descriptions = self['descriptions']
if not version in descriptions:
descriptions[version] = {}
descriptions[version][docformat] = description
self.set_data("", 'descriptions', descriptions)
except:
register_exception()
raise
def unset_comment(self, docformat, version):
"""
Unset a comment.
@param format: the format for which the comment should be unset.
@type format: string
@param version: the version for which the comment should be unset:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
comments = self['comments']
del comments[version][docformat]
self['comments'] = comments
except KeyError:
pass
except:
register_exception()
raise
def unset_description(self, docformat, version):
"""
Unset a description.
@param format: the format for which the description should be unset.
@type format: string
@param version: the version for which the description should be unset:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
descriptions = self['descriptions']
del descriptions[version][docformat]
self['descriptions'] = descriptions
except KeyError:
pass
except:
register_exception()
raise
def unset_flag(self, flagname, docformat, version):
"""
Unset a flag.
@param flagname: the flag to be unset (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@type flagname: string
@param format: the format for which the flag should be unset.
@type format: string
@param version: the version for which the flag should be unset:
@type version: integer
@raise ValueError: if the flag is not in
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}
"""
if flagname in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
try:
flags = self['flags']
del flags[flagname][version][docformat]
self['flags'] = flags
except KeyError:
pass
else:
raise ValueError, "%s is not in %s" % (flagname, CFG_BIBDOCFILE_AVAILABLE_FLAGS)
_bib_relation__any_value = -1
class BibRelation(object):
"""
A representation of a relation between documents or their particular versions
"""
def __init__(self, rel_type = None,
bibdoc1_id = None, bibdoc2_id = None,
bibdoc1_ver = None, bibdoc2_ver = None,
bibdoc1_fmt = None, bibdoc2_fmt = None,
rel_id = None):
"""
The constructor of the class representing a relation between two
documents.
If the more_info parameter is specified, no data is retrieved from
the database and the internal dictionary is initialised with
the passed value. If the more_info is not provided, the value is
read from the database. In the case of non-existing record, an
empty dictionary is assigned.
If a version of whichever record is not specified, the resulting
object desctibes a relation of all version of a given BibDoc.
@param bibdoc1
@type bibdoc1 BibDoc
@param bibdoc1_ver
@type version1_ver int
@param bibdoc2
@type bibdoc2 BibDco
@param bibdoc2_ver
@type bibdoc2_ver int
@param bibdoc1_fmt format of the first document
@type bibdoc1_fmt string
@param bibdoc2_fmt format of the second document
@type bibdoc2_fmt string
@param rel_type
@type rel_type string
@param more_info The serialised representation of the more_info
@type more_info string
@param rel_id allows to specify the identifier of the newly created relation
@type rel_ide unsigned int
"""
self.id = rel_id
self.bibdoc1_id = bibdoc1_id
self.bibdoc2_id = bibdoc2_id
self.bibdoc1_ver = bibdoc1_ver
self.bibdoc2_ver = bibdoc2_ver
self.bibdoc1_fmt = bibdoc1_fmt
self.bibdoc2_fmt = bibdoc2_fmt
self.rel_type = rel_type
if rel_id == None:
self._fill_id_from_data()
else:
self._fill_data_from_id()
self.more_info = MoreInfo(relation = self.id)
def _fill_data_from_id(self):
"""Fill all the relation data from the relation identifier
"""
query = "SELECT id_bibdoc1, version1, format1, id_bibdoc2, version2, format2, rel_type FROM bibdoc_bibdoc WHERE id=%s"
res = run_sql(query, (str(self.id), ))
if res != None and res[0] != None:
self.bibdoc1_id = res[0][0]
self.bibdoc1_ver = res[0][1]
self.bibdoc1_fmt = res[0][2]
self.bibdoc2_id = res[0][3]
self.bibdoc2_ver = res[0][4]
self.bibdoc2_fmt = res[0][5]
self.rel_type = res[0][6]
def _fill_id_from_data(self):
"""Fill the relation identifier based on the data provided"""
where_str, where_args = self._get_where_clauses()
query = "SELECT id FROM bibdoc_bibdoc WHERE %s" % (where_str, )
res = run_sql(query, where_args)
if res and res[0][0]:
self.id = int(res[0][0])
def _get_value_column_mapping(self):
"""
Returns a list of tuples each tuple consists of a value and a name
of a database column where this value should fit
"""
return [(self.rel_type, "rel_type"), (self.bibdoc1_id, "id_bibdoc1"),
(self.bibdoc1_ver, "version1"),
(self.bibdoc1_fmt, "format1"),
(self.bibdoc2_id, "id_bibdoc2"),
(self.bibdoc2_ver, "version2"),
(self.bibdoc2_fmt, "format2")]
def _get_where_clauses(self):
"""Private function returning part of the SQL statement identifying
current relation
@return
@rtype tuple
"""
return _sql_generate_conjunctive_where(self._get_value_column_mapping())
@staticmethod
def create(bibdoc1_id = None, bibdoc1_ver = None,
bibdoc1_fmt = None, bibdoc2_id = None,
bibdoc2_ver = None, bibdoc2_fmt = None,
rel_type = ""):
"""
Create a relation and return instance.
Ommiting an argument means that a particular relation concerns any value of the parameter
"""
# check if there is already entry corresponding to parameters
existing = BibRelation.get_relations(rel_type = rel_type,
bibdoc1_id = bibdoc1_id,
bibdoc2_id = bibdoc2_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc2_ver = bibdoc2_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_fmt = bibdoc2_fmt)
if len(existing) > 0:
return existing[0]
# build the insert query and execute it
to_process = [(rel_type, "rel_type"), (bibdoc1_id, "id_bibdoc1"),
(bibdoc1_ver, "version1"), (bibdoc1_fmt, "format1"),
(bibdoc2_id, "id_bibdoc2"), (bibdoc2_ver, "version2"),
(bibdoc2_fmt, "format2")]
values_list = []
args_list = []
columns_list = []
for entry in to_process:
columns_list.append(entry[1])
if entry[0] == None:
values_list.append("NULL")
else:
values_list.append("%s")
args_list.append(entry[0])
query = "INSERT INTO bibdoc_bibdoc (%s) VALUES (%s)" % (", ".join(columns_list), ", ".join(values_list))
# print "Query: %s Args: %s" % (query, str(args_list))
rel_id = run_sql(query, args_list)
return BibRelation(rel_id = rel_id)
def delete(self):
""" Removes a relation between objects from the database.
executing the flush function on the same object will restore
the relation
"""
where_str, where_args = self._get_where_clauses()
run_sql("DELETE FROM bibdoc_bibdoc WHERE %s" % (where_str,), where_args) # kwalitee: disable=sql
# removing associated MoreInfo
self.more_info.delete()
def get_more_info(self):
return self.more_info
@staticmethod
def get_relations(rel_type = _bib_relation__any_value,
bibdoc1_id = _bib_relation__any_value,
bibdoc2_id = _bib_relation__any_value,
bibdoc1_ver = _bib_relation__any_value,
bibdoc2_ver = _bib_relation__any_value,
bibdoc1_fmt = _bib_relation__any_value,
bibdoc2_fmt = _bib_relation__any_value):
"""Retrieves list of relations satisfying condtions.
If a parameter is specified, its value has to match exactly.
If a parameter is ommited, any of its values will be accepted"""
to_process = [(rel_type, "rel_type"), (bibdoc1_id, "id_bibdoc1"),
(bibdoc1_ver, "version1"), (bibdoc1_fmt, "format1"),
(bibdoc2_id, "id_bibdoc2"), (bibdoc2_ver, "version2"),
(bibdoc2_fmt, "format2")]
where_str, where_args = _sql_generate_conjunctive_where(
filter(lambda x: x[0] != _bib_relation__any_value, to_process))
if where_str:
where_str = "WHERE " + where_str # in case of nonempty where, we need a where clause
query_str = "SELECT id FROM bibdoc_bibdoc %s" % (where_str, )
# print "running query : %s with arguments %s on the object %s" % (query_str, str(where_args), repr(self))
try:
res = run_sql(query_str, where_args)
except:
raise Exception(query_str + " " + str(where_args))
results = []
if res != None:
for res_row in res:
results.append(BibRelation(rel_id=res_row[0]))
return results
# Access to MoreInfo
def set_data(self, category, key, value):
"""assign additional information to this relation"""
self.more_info.set_data(category, key, value)
def get_data(self, category, key):
"""read additional information assigned to this relation"""
return self.more_info.get_data(category, key)
#the dictionary interface allowing to set data bypassing the namespaces
def __setitem__(self, key, value):
self.more_info[key] = value
def __getitem__(self, key):
return self.more_info[key]
def __contains__(self, key):
return self.more_info.__contains__(key)
def __repr__(self):
return "BibRelation(id_bibdoc1 = %s, version1 = %s, format1 = %s, id_bibdoc2 = %s, version2 = %s, format2 = %s, rel_type = %s)" % \
(self.bibdoc1_id, self.bibdoc1_ver, self.bibdoc1_fmt,
self.bibdoc2_id, self.bibdoc2_ver, self.bibdoc2_fmt,
self.rel_type)
def readfile(filename):
"""
Read a file.
@param filename: the name of the file to be read.
@type filename: string
@return: the text contained in the file.
@rtype: string
@note: Returns empty string in case of any error.
@note: this function is useful for quick implementation of websubmit
functions.
"""
try:
return open(filename).read()
except Exception:
return ''
class HeadRequest(urllib2.Request):
"""
A request object to perform a HEAD request.
"""
def get_method(self):
return 'HEAD'
def read_cookie(cookiefile):
"""
Parses a cookie file and returns a string as needed for the urllib2 headers
The file should respect the Netscape cookie specifications
"""
cookie_data = ''
cfile = open(cookiefile, 'r')
for line in cfile.readlines():
tokens = line.split('\t')
if len(tokens) == 7: # we are on a cookie line
cookie_data += '%s=%s; ' % (tokens[5], tokens[6].replace('\n', ''))
cfile.close()
return cookie_data
def open_url(url, headers=None, head_request=False):
"""
Opens a URL. If headers are passed as argument, no check is performed and
the URL will be opened. Otherwise checks if the URL is present in
CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS and uses the headers specified in
the config variable.
@param url: the URL to open
@type url: string
@param headers: the headers to use
@type headers: dictionary
@param head_request: if True, perform a HEAD request, otherwise a POST
request
@type head_request: boolean
@return: a file-like object as returned by urllib2.urlopen.
"""
headers_to_use = None
if headers is None:
for regex, headers in _CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS:
if regex.match(url) is not None:
headers_to_use = headers
break
if headers_to_use is None:
# URL is not allowed.
raise InvenioBibdocfileUnauthorizedURL, "%s is not an authorized " \
"external URL." % url
else:
headers_to_use = headers
request_obj = head_request and HeadRequest or urllib2.Request
request = request_obj(url)
request.add_header('User-Agent', make_user_agent_string('bibdocfile'))
for key, value in headers_to_use.items():
try:
value = globals()[value['fnc']](**value['args'])
except (KeyError, TypeError):
pass
request.add_header(key, value)
return urllib2.urlopen(request)
def update_modification_date_of_file(filepath, modification_date):
"""Update the modification time and date of the file with the modification_date
@param filepath: the full path of the file that needs to be updated
@type filepath: string
@param modification_date: the new modification date and time
@type modification_date: datetime.datetime object
"""
try:
modif_date_in_seconds = time.mktime(modification_date.timetuple()) # try to get the time in seconds
except (AttributeError, TypeError):
modif_date_in_seconds = 0
if modif_date_in_seconds:
statinfo = os.stat(filepath) # we need to keep the same access time
os.utime(filepath, (statinfo.st_atime, modif_date_in_seconds)) #update the modification time
| 1 | 13,224 | @lnielsen-cern thanks. I missed it :( | inveniosoftware-invenio | py |
@@ -97,7 +97,7 @@ const AnalyticsAdSenseDashboardWidgetTopPagesTable = ( { data } ) => {
chartsEnabled: false,
links: rows.map( ( row ) => row.dimensions[ 1 ] || '/' ),
PrimaryLink: withSelect( ( select, { href = '/' } ) => {
- const serviceURL = select( STORE_NAME ).getServiceReportURL( 'content-pages', {
+ const serviceURL = select( STORE_NAME ).getServiceReportURL( 'content-drilldown', {
'explorer-table.plotKeys': '[]',
'_r.drilldown': `analytics.pagePath:${ href }`,
} ); | 1 | /**
* AnalyticsAdSenseDashboardWidgetTopPagesTable component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { getTimeInSeconds, numberFormat } from '../../../../util';
import withData from '../../../../components/higherorder/withdata';
import { TYPE_MODULES } from '../../../../components/data';
import { getDataTableFromData } from '../../../../components/data-table';
import PreviewTable from '../../../../components/PreviewTable';
import ctaWrapper from '../../../../components/legacy-notifications/cta-wrapper';
import AdSenseLinkCTA from '../common/AdSenseLinkCTA';
import { analyticsAdsenseReportDataDefaults, isDataZeroForReporting } from '../../util';
import { STORE_NAME } from '../../datastore/constants';
import AnalyticsAdSenseDashboardWidgetLayout from './AnalyticsAdSenseDashboardWidgetLayout';
import TableOverflowContainer from '../../../../components/TableOverflowContainer';
import Link from '../../../../components/Link';
const { withSelect } = Data;
const AnalyticsAdSenseDashboardWidgetTopPagesTable = ( { data } ) => {
// Do not return zero data callout here since it will already be
// present on the page from other sources.
if ( isDataZeroForReporting( data ) ) {
return null;
}
const { rows } = data?.[ 0 ]?.data || {};
if ( ! Array.isArray( rows ) ) {
return null;
}
const headers = [
{
title: __( 'Page Title', 'google-site-kit' ),
tooltip: __( 'Page Title', 'google-site-kit' ),
primary: true,
},
{
title: __( 'Earnings', 'google-site-kit' ),
tooltip: __( 'Earnings', 'google-site-kit' ),
},
{
title: __( 'Page RPM', 'google-site-kit' ),
tooltip: __( 'Page RPM', 'google-site-kit' ),
},
{
title: __( 'Impressions', 'google-site-kit' ),
tooltip: __( 'Impressions', 'google-site-kit' ),
},
];
const dataMapped = rows.map( ( row ) => {
/**
* The shape of the dimensions and metrics objects:
*
* ```
* dimensions[0] = ga:pageTitle
* dimensions[1] = ga:pagePath
*
* metrics[0] = ga:adsenseECPM
* metrics[1] = ga:adsensePageImpressions
* metrics[2] = ga:adsenseRevenue
* ```
*/
return [
row.dimensions[ 0 ],
Number( row.metrics[ 0 ].values[ 0 ] ).toFixed( 2 ),
Number( row.metrics[ 0 ].values[ 1 ] ).toFixed( 2 ),
numberFormat( row.metrics[ 0 ].values[ 2 ] ),
];
} );
const options = {
hideHeader: false,
chartsEnabled: false,
links: rows.map( ( row ) => row.dimensions[ 1 ] || '/' ),
PrimaryLink: withSelect( ( select, { href = '/' } ) => {
const serviceURL = select( STORE_NAME ).getServiceReportURL( 'content-pages', {
'explorer-table.plotKeys': '[]',
'_r.drilldown': `analytics.pagePath:${ href }`,
} );
return {
href: serviceURL,
external: true,
};
} )( Link ),
};
const dataTable = getDataTableFromData( dataMapped, headers, options );
return (
<AnalyticsAdSenseDashboardWidgetLayout>
<TableOverflowContainer>
{ dataTable }
</TableOverflowContainer>
</AnalyticsAdSenseDashboardWidgetLayout>
);
};
/**
* Checks error data response, and handle the INVALID_ARGUMENT specifically.
*
* @since 1.0.0
*
* @param {Object} data Response data.
* @return {(string|boolean|null)} Returns a string with an error message if there is an error. Returns `false` when there is no data and no error message. Will return `null` when arguments are invalid.
* string data error message if it exists or unidentified error.
* false if no data and no error message
* null if invalid argument
*
*/
const getDataError = ( data ) => {
if ( data.code && data.message && data.data && data.data.status ) {
// Specifically looking for string "badRequest"
if ( 'badRequest' === data.data.reason ) {
return (
<AnalyticsAdSenseDashboardWidgetLayout>
{ ctaWrapper( <AdSenseLinkCTA />, false, false, true ) }
</AnalyticsAdSenseDashboardWidgetLayout>
);
}
return data.message;
}
// Legacy errors? Maybe this is never hit but better be safe than sorry.
if ( data.error ) {
if ( data.error.message ) {
return data.error.message;
}
if ( data.error.errors && data.error.errors[ 0 ] && data.error.errors[ 0 ].message ) {
return data.error.errors[ 0 ].message;
}
return __( 'Unidentified error', 'google-site-kit' );
}
return false;
};
export default withData(
AnalyticsAdSenseDashboardWidgetTopPagesTable,
[
{
type: TYPE_MODULES,
identifier: 'analytics',
datapoint: 'report',
data: analyticsAdsenseReportDataDefaults,
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: 'Single',
},
],
<AnalyticsAdSenseDashboardWidgetLayout>
<PreviewTable padding />
</AnalyticsAdSenseDashboardWidgetLayout>,
{ createGrid: true },
// Force isDataZero to false since it is handled within the component.
() => false,
getDataError
);
| 1 | 34,669 | @tofumatt, has it been changed intentionally? It doesn't seem to be required in IB, plus we haven't had `content-drilldown` before... If it has been changed intentionally, then should we update the `AnalyticsDashboardWidget` component to be use `content-drilldown` instead of `content-pages` as well? | google-site-kit-wp | js |
@@ -16,6 +16,7 @@
from apiclient import discovery
from oauth2client.client import GoogleCredentials
+
from retrying import retry
from google.cloud.security.common.gcp_api._supported_apis import SUPPORTED_APIS | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base GCP client which uses the discovery API."""
from apiclient import discovery
from oauth2client.client import GoogleCredentials
from retrying import retry
from google.cloud.security.common.gcp_api._supported_apis import SUPPORTED_APIS
from google.cloud.security.common.util import retryable_exceptions
# pylint: disable=too-few-public-methods
# TODO: Look into improving to prevent using the disable.
class _BaseClient(object):
"""Base client for a specified GCP API and credentials."""
def __init__(self, credentials=None, **kwargs):
if not credentials:
credentials = GoogleCredentials.get_application_default()
self._credentials = credentials
if not kwargs or not kwargs.get('api_name'):
raise UnsupportedApiError('Unsupported API {}'.format(kwargs))
self.name = kwargs['api_name']
if not SUPPORTED_APIS[self.name] or \
not SUPPORTED_APIS[self.name]['version']:
raise UnsupportedApiVersionError(
'Unsupported version {}'.format(SUPPORTED_APIS[self.name]))
self.version = SUPPORTED_APIS[self.name]['version']
self.service = discovery.build(self.name, self.version,
credentials=self._credentials)
def __repr__(self):
return 'API: name={}, version={}'.format(self.name, self.version)
# The wait time is (2^X * multiplier) milliseconds, where X is the retry
# number.
@retry(retry_on_exception=retryable_exceptions.is_retryable_exception,
wait_exponential_multiplier=1000, wait_exponential_max=10000,
stop_max_attempt_number=5)
# pylint: disable=no-self-use
# TODO: Investigate if this could be a standalone methods to remove dsiable.
def _execute(self, request):
"""Executes requests in a rate-limited way.
Args:
request: GCP API client request object.
Returns:
API response object.
Raises:
When the retry is exceeded, exception will be thrown. This
exception is not wrapped by the retry library, and will be handled
upstream.
"""
return request.execute()
# Eventually, move these to the errors module
class Error(Exception):
"""Base Error class."""
class ApiExecutionError(Error):
"""Error for API executions."""
CUSTOM_ERROR_MESSAGE = 'GCP API Error: unable to get {0} from GCP:\n{1}'
def __init__(self, resource_name, e):
super(ApiExecutionError, self).__init__(
self.CUSTOM_ERROR_MESSAGE.format(resource_name, e))
class UnsupportedApiError(Error):
"""Error for unsupported API."""
pass
class UnsupportedApiVersionError(Error):
"""Error for unsupported API version."""
pass
| 1 | 25,282 | nit: don't think an extra line is needed here; as retrying is an installed library and should be grouped with the rest of the installed library. | forseti-security-forseti-security | py |
@@ -123,6 +123,13 @@ type PrometheusSpec struct {
// The labels to add to any time series or alerts when communicating with
// external systems (federation, remote storage, Alertmanager).
ExternalLabels map[string]string `json:"externalLabels,omitempty"`
+ // Enable access to prometheus web admin API. Defaults to the value of `false`.
+ // WARNING: Enabling the admin APIs enables mutating endpoints, to delete data,
+ // shutdown Prometheus, and more. Enabling this should be done with care and the
+ // user is advised to add additional authentication authorization via a proxy to
+ // ensure only clients authorized to perform these actions can do so.
+ // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
+ EnableAdminAPI string `json:"enableAdminAPI,omitempty"`
// The external URL the Prometheus instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Prometheus is not
// served from root of a DNS name. | 1 | // Copyright 2018 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
Version = "v1"
PrometheusesKind = "Prometheus"
PrometheusName = "prometheuses"
PrometheusKindKey = "prometheus"
AlertmanagersKind = "Alertmanager"
AlertmanagerName = "alertmanagers"
AlertManagerKindKey = "alertmanager"
ServiceMonitorsKind = "ServiceMonitor"
ServiceMonitorName = "servicemonitors"
ServiceMonitorKindKey = "servicemonitor"
PrometheusRuleKind = "PrometheusRule"
PrometheusRuleName = "prometheusrules"
PrometheusRuleKindKey = "prometheusrule"
)
// Prometheus defines a Prometheus deployment.
// +genclient
// +k8s:openapi-gen=true
type Prometheus struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the Prometheus cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec PrometheusSpec `json:"spec"`
// Most recent observed status of the Prometheus cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status *PrometheusStatus `json:"status,omitempty"`
}
// PrometheusList is a list of Prometheuses.
// +k8s:openapi-gen=true
type PrometheusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Prometheuses
Items []*Prometheus `json:"items"`
}
// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type PrometheusSpec struct {
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// Metadata Labels and Annotations gets propagated to the prometheus pods.
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
// ServiceMonitors to be selected for target discovery.
ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"`
// Namespaces to be selected for ServiceMonitor discovery. If nil, only
// check own namespace.
ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"`
// Version of Prometheus to be deployed.
Version string `json:"version,omitempty"`
// Tag of Prometheus container image to be deployed. Defaults to the value of `version`.
// Version is ignored if Tag is set.
Tag string `json:"tag,omitempty"`
// SHA of Prometheus container image to be deployed. Defaults to the value of `version`.
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
// Version and Tag are ignored if SHA is set.
SHA string `json:"sha,omitempty"`
// When a Prometheus deployment is paused, no actions except for deletion
// will be performed on the underlying objects.
Paused bool `json:"paused,omitempty"`
// Image if specified has precedence over baseImage, tag and sha
// combinations. Specifying the version is still necessary to ensure the
// Prometheus Operator knows what version of Prometheus is being
// configured.
Image *string `json:"image,omitempty"`
// Base image to use for a Prometheus deployment.
BaseImage string `json:"baseImage,omitempty"`
// An optional list of references to secrets in the same namespace
// to use for pulling prometheus and alertmanager images from registries
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Number of instances to deploy for a Prometheus deployment.
Replicas *int32 `json:"replicas,omitempty"`
// Time duration Prometheus shall retain data for. Default is '24h',
// and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years).
Retention string `json:"retention,omitempty"`
// Log level for Prometheus to be configured with.
LogLevel string `json:"logLevel,omitempty"`
// Interval between consecutive scrapes.
ScrapeInterval string `json:"scrapeInterval,omitempty"`
// Interval between consecutive evaluations.
EvaluationInterval string `json:"evaluationInterval,omitempty"`
// /--rules.*/ command-line arguments.
Rules Rules `json:"rules,omitempty"`
// The labels to add to any time series or alerts when communicating with
// external systems (federation, remote storage, Alertmanager).
ExternalLabels map[string]string `json:"externalLabels,omitempty"`
// The external URL the Prometheus instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Prometheus is not
// served from root of a DNS name.
ExternalURL string `json:"externalUrl,omitempty"`
// The route prefix Prometheus registers HTTP handlers for. This is useful,
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
// and the actual ExternalURL is still true, but the server serves requests
// under a different route prefix. For example for use with `kubectl proxy`.
RoutePrefix string `json:"routePrefix,omitempty"`
// QuerySpec defines the query command line flags when starting Prometheus.
Query *QuerySpec `json:"query,omitempty"`
// Storage spec to specify how storage shall be used.
Storage *StorageSpec `json:"storage,omitempty"`
// A selector to select which PrometheusRules to mount for loading alerting
// rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus
// Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom
// resources selected by RuleSelector. Make sure it does not match any config
// maps that you do not want to be migrated.
RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"`
// Namespaces to be selected for PrometheusRules discovery. If unspecified, only
// the same namespace as the Prometheus object is in is used.
RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"`
// Define details regarding alerting.
Alerting *AlertingSpec `json:"alerting,omitempty"`
// Define resources requests and limits for single Pods.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// Define which Nodes the Pods are scheduled on.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// Prometheus Pods.
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// Secrets is a list of Secrets in the same namespace as the Prometheus
// object, which shall be mounted into the Prometheus Pods.
// The Secrets are mounted into /etc/prometheus/secrets/<secret-name>.
Secrets []string `json:"secrets,omitempty"`
// ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus
// object, which shall be mounted into the Prometheus Pods.
// The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>.
ConfigMaps []string `json:"configMaps,omitempty"`
// If specified, the pod's scheduling constraints.
Affinity *v1.Affinity `json:"affinity,omitempty"`
// If specified, the pod's tolerations.
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"`
// If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"`
// SecurityContext holds pod-level security attributes and common container settings.
// This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and
// default PodSecurityContext for other versions.
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// ListenLocal makes the Prometheus server listen on loopback, so that it
// does not bind against the Pod IP.
ListenLocal bool `json:"listenLocal,omitempty"`
// Containers allows injecting additional containers. This is meant to
// allow adding an authentication proxy to a Prometheus pod.
Containers []v1.Container `json:"containers,omitempty"`
// AdditionalScrapeConfigs allows specifying a key of a Secret containing
// additional Prometheus scrape configurations. Scrape configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Job configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>.
// As scrape configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible scrape configs are going to break
// Prometheus after the upgrade.
AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"`
// AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing
// additional Prometheus alert relabel configurations. Alert relabel configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Alert relabel configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
// As alert relabel configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible alert relabel configs are going to break
// Prometheus after the upgrade.
AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"`
// AdditionalAlertManagerConfigs allows specifying a key of a Secret containing
// additional Prometheus AlertManager configurations. AlertManager configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Job configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
// As AlertManager configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible AlertManager configs are going to break
// Prometheus after the upgrade.
AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"`
// APIServerConfig allows specifying a host and auth methods to access apiserver.
// If left empty, Prometheus is assumed to run inside of the cluster
// and will discover API servers automatically and use the pod's CA certificate
// and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.
APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"`
// Thanos configuration allows configuring various aspects of a Prometheus
// server in a Thanos environment.
//
// This section is experimental, it may change significantly without
// deprecation notice in any release.
//
// This is experimental and may change significantly without backward
// compatibility in any release.
Thanos *ThanosSpec `json:"thanos,omitempty"`
// Priority class assigned to the Pods
PriorityClassName string `json:"priorityClassName,omitempty"`
}
// PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type PrometheusStatus struct {
// Represents whether any actions on the underlaying managed objects are
// being performed. Only delete actions will be performed.
Paused bool `json:"paused"`
// Total number of non-terminated pods targeted by this Prometheus deployment
// (their labels match the selector).
Replicas int32 `json:"replicas"`
// Total number of non-terminated pods targeted by this Prometheus deployment
// that have the desired version spec.
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of available pods (ready for at least minReadySeconds)
// targeted by this Prometheus deployment.
AvailableReplicas int32 `json:"availableReplicas"`
// Total number of unavailable pods targeted by this Prometheus deployment.
UnavailableReplicas int32 `json:"unavailableReplicas"`
}
// AlertingSpec defines parameters for alerting configuration of Prometheus servers.
// +k8s:openapi-gen=true
type AlertingSpec struct {
// AlertmanagerEndpoints Prometheus should fire alerts against.
Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"`
}
// StorageSpec defines the configured storage for a group Prometheus servers.
// If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used.
// +k8s:openapi-gen=true
type StorageSpec struct {
// EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More
// info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir
EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"`
// A PVC spec to be used by the Prometheus StatefulSets.
VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
}
// QuerySpec defines the query command line flags when starting Prometheus.
// +k8s:openapi-gen=true
type QuerySpec struct {
// The delta difference allowed for retrieving metrics during expression evaluations.
LookbackDelta *string `json:"lookbackDelta,omitempty"`
// Number of concurrent queries that can be run at once.
MaxConcurrency *int32 `json:"maxConcurrency,omitempty"`
// Maximum time a query may take before being aborted.
Timeout *string `json:"timeout,omitempty"`
}
// ThanosSpec defines parameters for a Prometheus server within a Thanos deployment.
// +k8s:openapi-gen=true
type ThanosSpec struct {
// Peers is a DNS name for Thanos to discover peers through.
Peers *string `json:"peers,omitempty"`
// Image if specified has precedence over baseImage, tag and sha
// combinations. Specifying the version is still necessary to ensure the
// Prometheus Operator knows what version of Thanos is being
// configured.
Image *string `json:"image,omitempty"`
// Version describes the version of Thanos to use.
Version *string `json:"version,omitempty"`
// Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`.
// Version is ignored if Tag is set.
Tag *string `json:"tag,omitempty"`
// SHA of Thanos container image to be deployed. Defaults to the value of `version`.
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
// Version and Tag are ignored if SHA is set.
SHA *string `json:"sha,omitempty"`
// Thanos base image if other than default.
BaseImage *string `json:"baseImage,omitempty"`
// Resources defines the resource requirements for the Thanos sidecar.
// If not provided, no requests/limits will be set
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// Deprecated: GCS should be configured with an ObjectStorageConfig secret
// starting with Thanos v0.2.0. This field will be removed.
GCS *ThanosGCSSpec `json:"gcs,omitempty"`
// Deprecated: S3 should be configured with an ObjectStorageConfig secret
// starting with Thanos v0.2.0. This field will be removed.
S3 *ThanosS3Spec `json:"s3,omitempty"`
// ObjectStorageConfig configures object storage in Thanos.
ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"`
}
// Deprecated: ThanosGCSSpec should be configured with an ObjectStorageConfig
// secret starting with Thanos v0.2.0. ThanosGCSSpec will be removed.
//
// +k8s:openapi-gen=true
type ThanosGCSSpec struct {
// Google Cloud Storage bucket name for stored blocks. If empty it won't
// store any block inside Google Cloud Storage.
Bucket *string `json:"bucket,omitempty"`
// Secret to access our Bucket.
SecretKey *v1.SecretKeySelector `json:"credentials,omitempty"`
}
// Deprecated: ThanosS3Spec should be configured with an ObjectStorageConfig
// secret starting with Thanos v0.2.0. ThanosS3Spec will be removed.
//
// +k8s:openapi-gen=true
type ThanosS3Spec struct {
// S3-Compatible API bucket name for stored blocks.
Bucket *string `json:"bucket,omitempty"`
// S3-Compatible API endpoint for stored blocks.
Endpoint *string `json:"endpoint,omitempty"`
// AccessKey for an S3-Compatible API.
AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"`
// SecretKey for an S3-Compatible API.
SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"`
// Whether to use an insecure connection with an S3-Compatible API.
Insecure *bool `json:"insecure,omitempty"`
// Whether to use S3 Signature Version 2; otherwise Signature Version 4 will be used.
SignatureVersion2 *bool `json:"signatureVersion2,omitempty"`
// Whether to use Server Side Encryption
EncryptSSE *bool `json:"encryptsse,omitempty"`
}
// RemoteWriteSpec defines the remote_write configuration for prometheus.
// +k8s:openapi-gen=true
type RemoteWriteSpec struct {
//The URL of the endpoint to send samples to.
URL string `json:"url"`
//Timeout for requests to the remote write endpoint.
RemoteTimeout string `json:"remoteTimeout,omitempty"`
//The list of remote write relabel configurations.
WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"`
//BasicAuth for the URL.
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// File to read bearer token for remote write.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for remote write.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for remote write.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
//Optional ProxyURL
ProxyURL string `json:"proxyUrl,omitempty"`
// QueueConfig allows tuning of the remote write queue parameters.
QueueConfig *QueueConfig `json:"queueConfig,omitempty"`
}
// QueueConfig allows the tuning of remote_write queue_config parameters. This object
// is referenced in the RemoteWriteSpec object.
// +k8s:openapi-gen=true
type QueueConfig struct {
// Capacity is the number of samples to buffer per shard before we start dropping them.
Capacity int `json:"capacity,omitempty"`
// MinShards is the minimum number of shards, i.e. amount of concurrency.
MinShards int `json:"minShards,omitempty"`
// MaxShards is the maximum number of shards, i.e. amount of concurrency.
MaxShards int `json:"maxShards,omitempty"`
// MaxSamplesPerSend is the maximum number of samples per send.
MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"`
// BatchSendDeadline is the maximum time a sample will wait in buffer.
BatchSendDeadline string `json:"batchSendDeadline,omitempty"`
// MaxRetries is the maximum number of times to retry a batch on recoverable errors.
MaxRetries int `json:"maxRetries,omitempty"`
// MinBackoff is the initial retry delay. Gets doubled for every retry.
MinBackoff string `json:"minBackoff,omitempty"`
// MaxBackoff is the maximum retry delay.
MaxBackoff string `json:"maxBackoff,omitempty"`
}
// RemoteReadSpec defines the remote_read configuration for prometheus.
// +k8s:openapi-gen=true
type RemoteReadSpec struct {
//The URL of the endpoint to send samples to.
URL string `json:"url"`
//An optional list of equality matchers which have to be present
// in a selector to query the remote read endpoint.
RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"`
//Timeout for requests to the remote read endpoint.
RemoteTimeout string `json:"remoteTimeout,omitempty"`
//Whether reads should be made for queries for time ranges that
// the local storage should have complete data for.
ReadRecent bool `json:"readRecent,omitempty"`
//BasicAuth for the URL.
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// bearer token for remote read.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for remote read.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for remote read.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
//Optional ProxyURL
ProxyURL string `json:"proxyUrl,omitempty"`
}
// RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion.
// It defines `<metric_relabel_configs>`-section of Prometheus configuration.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
// +k8s:openapi-gen=true
type RelabelConfig struct {
//The source labels select values from existing labels. Their content is concatenated
//using the configured separator and matched against the configured regular expression
//for the replace, keep, and drop actions.
SourceLabels []string `json:"sourceLabels,omitempty"`
//Separator placed between concatenated source label values. default is ';'.
Separator string `json:"separator,omitempty"`
//Label to which the resulting value is written in a replace action.
//It is mandatory for replace actions. Regex capture groups are available.
TargetLabel string `json:"targetLabel,omitempty"`
//Regular expression against which the extracted value is matched. defailt is '(.*)'
Regex string `json:"regex,omitempty"`
// Modulus to take of the hash of the source label values.
Modulus uint64 `json:"modulus,omitempty"`
//Replacement value against which a regex replace is performed if the
//regular expression matches. Regex capture groups are available. Default is '$1'
Replacement string `json:"replacement,omitempty"`
// Action to perform based on regex matching. Default is 'replace'
Action string `json:"action,omitempty"`
}
// APIServerConfig defines a host and auth methods to access apiserver.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
// +k8s:openapi-gen=true
type APIServerConfig struct {
// Host of apiserver.
// A valid string consisting of a hostname or IP followed by an optional port number
Host string `json:"host"`
// BasicAuth allow an endpoint to authenticate over basic authentication
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// Bearer token for accessing apiserver.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for accessing apiserver.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for accessing apiserver.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
}
// AlertmanagerEndpoints defines a selection of a single Endpoints object
// containing alertmanager IPs to fire alerts against.
// +k8s:openapi-gen=true
type AlertmanagerEndpoints struct {
// Namespace of Endpoints object.
Namespace string `json:"namespace"`
// Name of Endpoints object in Namespace.
Name string `json:"name"`
// Port the Alertmanager API is exposed on.
Port intstr.IntOrString `json:"port"`
// Scheme to use when firing alerts.
Scheme string `json:"scheme,omitempty"`
// Prefix for the HTTP path alerts are pushed to.
PathPrefix string `json:"pathPrefix,omitempty"`
// TLS Config to use for alertmanager connection.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
// BearerTokenFile to read from filesystem to use when authenticating to
// Alertmanager.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
}
// ServiceMonitor defines monitoring for a set of services.
// +genclient
// +k8s:openapi-gen=true
type ServiceMonitor struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of desired Service selection for target discrovery by
// Prometheus.
Spec ServiceMonitorSpec `json:"spec"`
}
// ServiceMonitorSpec contains specification parameters for a ServiceMonitor.
// +k8s:openapi-gen=true
type ServiceMonitorSpec struct {
// The label to use to retrieve the job name from.
JobLabel string `json:"jobLabel,omitempty"`
// TargetLabels transfers labels on the Kubernetes Service onto the target.
TargetLabels []string `json:"targetLabels,omitempty"`
// PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
PodTargetLabels []string `json:"podTargetLabels,omitempty"`
// A list of endpoints allowed as part of this ServiceMonitor.
Endpoints []Endpoint `json:"endpoints"`
// Selector to select Endpoints objects.
Selector metav1.LabelSelector `json:"selector"`
// Selector to select which namespaces the Endpoints objects are discovered from.
NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"`
// SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
SampleLimit uint64 `json:"sampleLimit,omitempty"`
}
// Endpoint defines a scrapeable endpoint serving Prometheus metrics.
// +k8s:openapi-gen=true
type Endpoint struct {
// Name of the service port this endpoint refers to. Mutually exclusive with targetPort.
Port string `json:"port,omitempty"`
// Name or number of the target port of the endpoint. Mutually exclusive with port.
TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
// HTTP path to scrape for metrics.
Path string `json:"path,omitempty"`
// HTTP scheme to use for scraping.
Scheme string `json:"scheme,omitempty"`
// Optional HTTP URL parameters
Params map[string][]string `json:"params,omitempty"`
// Interval at which metrics should be scraped
Interval string `json:"interval,omitempty"`
// Timeout after which the scrape is ended
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// TLS configuration to use when scraping the endpoint
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
// File to read bearer token for scraping targets.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// HonorLabels chooses the metric's labels on collisions with target labels.
HonorLabels bool `json:"honorLabels,omitempty"`
// BasicAuth allow an endpoint to authenticate over basic authentication
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// MetricRelabelConfigs to apply to samples before ingestion.
MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"`
// RelabelConfigs to apply to samples before ingestion.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<relabel_config>
RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"`
// ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint.
ProxyURL *string `json:"proxyUrl,omitempty"`
}
// BasicAuth allow an endpoint to authenticate over basic authentication
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
// +k8s:openapi-gen=true
type BasicAuth struct {
// The secret that contains the username for authenticate
Username v1.SecretKeySelector `json:"username,omitempty"`
// The secret that contains the password for authenticate
Password v1.SecretKeySelector `json:"password,omitempty"`
}
// TLSConfig specifies TLS configuration parameters.
// +k8s:openapi-gen=true
type TLSConfig struct {
// The CA cert to use for the targets.
CAFile string `json:"caFile,omitempty"`
// The client cert file for the targets.
CertFile string `json:"certFile,omitempty"`
// The client key file for the targets.
KeyFile string `json:"keyFile,omitempty"`
// Used to verify the hostname for the targets.
ServerName string `json:"serverName,omitempty"`
// Disable target certificate validation.
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
}
// ServiceMonitorList is a list of ServiceMonitors.
// +k8s:openapi-gen=true
type ServiceMonitorList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of ServiceMonitors
Items []*ServiceMonitor `json:"items"`
}
// PrometheusRuleList is a list of PrometheusRules.
// +k8s:openapi-gen=true
type PrometheusRuleList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Rules
Items []*PrometheusRule `json:"items"`
}
// PrometheusRule defines alerting rules for a Prometheus instance
// +genclient
// +k8s:openapi-gen=true
type PrometheusRule struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of desired alerting rule definitions for Prometheus.
Spec PrometheusRuleSpec `json:"spec"`
}
// PrometheusRuleSpec contains specification parameters for a Rule.
// +k8s:openapi-gen=true
type PrometheusRuleSpec struct {
// Content of Prometheus rule file
Groups []RuleGroup `json:"groups,omitempty"`
}
// RuleGroup and Rule are copied instead of vendored because the
// upstream Prometheus struct definitions don't have json struct tags.
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
// +k8s:openapi-gen=true
type RuleGroup struct {
Name string `json:"name"`
Interval string `json:"interval,omitempty"`
Rules []Rule `json:"rules"`
}
// Rule describes an alerting or recording rule.
// +k8s:openapi-gen=true
type Rule struct {
Record string `json:"record,omitempty"`
Alert string `json:"alert,omitempty"`
Expr intstr.IntOrString `json:"expr"`
For string `json:"for,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
// Alertmanager describes an Alertmanager cluster.
// +genclient
// +k8s:openapi-gen=true
type Alertmanager struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the Alertmanager cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec AlertmanagerSpec `json:"spec"`
// Most recent observed status of the Alertmanager cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status *AlertmanagerStatus `json:"status,omitempty"`
}
// AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type AlertmanagerSpec struct {
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// Metadata Labels and Annotations gets propagated to the prometheus pods.
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
// Image if specified has precedence over baseImage, tag and sha
// combinations. Specifying the version is still necessary to ensure the
// Prometheus Operator knows what version of Alertmanager is being
// configured.
Image *string `json:"image,omitempty"`
// Version the cluster should be on.
Version string `json:"version,omitempty"`
// Tag of Alertmanager container image to be deployed. Defaults to the value of `version`.
// Version is ignored if Tag is set.
Tag string `json:"tag,omitempty"`
// SHA of Alertmanager container image to be deployed. Defaults to the value of `version`.
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
// Version and Tag are ignored if SHA is set.
SHA string `json:"sha,omitempty"`
// Base image that is used to deploy pods, without tag.
BaseImage string `json:"baseImage,omitempty"`
// An optional list of references to secrets in the same namespace
// to use for pulling prometheus and alertmanager images from registries
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Secrets is a list of Secrets in the same namespace as the Alertmanager
// object, which shall be mounted into the Alertmanager Pods.
// The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>.
Secrets []string `json:"secrets,omitempty"`
// ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager
// object, which shall be mounted into the Alertmanager Pods.
// The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>.
ConfigMaps []string `json:"configMaps,omitempty"`
// Log level for Alertmanager to be configured with.
LogLevel string `json:"logLevel,omitempty"`
// Size is the expected size of the alertmanager cluster. The controller will
// eventually make the size of the running cluster equal to the expected
// size.
Replicas *int32 `json:"replicas,omitempty"`
// Time duration Alertmanager shall retain data for. Default is '120h',
// and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours).
Retention string `json:"retention,omitempty"`
// Storage is the definition of how storage will be used by the Alertmanager
// instances.
Storage *StorageSpec `json:"storage,omitempty"`
// The external URL the Alertmanager instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Alertmanager is not
// served from root of a DNS name.
ExternalURL string `json:"externalUrl,omitempty"`
// The route prefix Alertmanager registers HTTP handlers for. This is useful,
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
// and the actual ExternalURL is still true, but the server serves requests
// under a different route prefix. For example for use with `kubectl proxy`.
RoutePrefix string `json:"routePrefix,omitempty"`
// If set to true all actions on the underlaying managed objects are not
// goint to be performed, except for delete actions.
Paused bool `json:"paused,omitempty"`
// Define which Nodes the Pods are scheduled on.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Define resources requests and limits for single Pods.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// If specified, the pod's scheduling constraints.
Affinity *v1.Affinity `json:"affinity,omitempty"`
// If specified, the pod's tolerations.
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// SecurityContext holds pod-level security attributes and common container settings.
// This defaults to non root user with uid 1000 and gid 2000.
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// Prometheus Pods.
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// ListenLocal makes the Alertmanager server listen on loopback, so that it
// does not bind against the Pod IP. Note this is only for the Alertmanager
// UI, not the gossip communication.
ListenLocal bool `json:"listenLocal,omitempty"`
// Containers allows injecting additional containers. This is meant to
// allow adding an authentication proxy to an Alertmanager pod.
Containers []v1.Container `json:"containers,omitempty"`
// Priority class assigned to the Pods
PriorityClassName string `json:"priorityClassName,omitempty"`
// AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
AdditionalPeers []string `json:"additionalPeers,omitempty"`
}
// AlertmanagerList is a list of Alertmanagers.
// +k8s:openapi-gen=true
type AlertmanagerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Alertmanagers
Items []Alertmanager `json:"items"`
}
// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type AlertmanagerStatus struct {
// Represents whether any actions on the underlaying managed objects are
// being performed. Only delete actions will be performed.
Paused bool `json:"paused"`
// Total number of non-terminated pods targeted by this Alertmanager
// cluster (their labels match the selector).
Replicas int32 `json:"replicas"`
// Total number of non-terminated pods targeted by this Alertmanager
// cluster that have the desired version spec.
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of available pods (ready for at least minReadySeconds)
// targeted by this Alertmanager cluster.
AvailableReplicas int32 `json:"availableReplicas"`
// Total number of unavailable pods targeted by this Alertmanager cluster.
UnavailableReplicas int32 `json:"unavailableReplicas"`
}
// NamespaceSelector is a selector for selecting either all namespaces or a
// list of namespaces.
// +k8s:openapi-gen=true
type NamespaceSelector struct {
// Boolean describing whether all namespaces are selected in contrast to a
// list restricting them.
Any bool `json:"any,omitempty"`
// List of namespace names.
MatchNames []string `json:"matchNames,omitempty"`
// TODO(fabxc): this should embed metav1.LabelSelector eventually.
// Currently the selector is only used for namespaces which require more complex
// implementation to support label selections.
}
// /--rules.*/ command-line arguments
// +k8s:openapi-gen=true
type Rules struct {
Alert RulesAlert `json:"alert,omitempty"`
}
// /--rules.alert.*/ command-line arguments
// +k8s:openapi-gen=true
type RulesAlert struct {
// Max time to tolerate prometheus outage for restoring 'for' state of alert.
ForOutageTolerance string `json:"forOutageTolerance,omitempty"`
// Minimum duration between alert and restored 'for' state.
// This is maintained only for alerts with configured 'for' time greater than grace period.
ForGracePeriod string `json:"forGracePeriod,omitempty"`
// Minimum amount of time to wait before resending an alert to Alertmanager.
ResendDelay string `json:"resendDelay,omitempty"`
}
// DeepCopyObject implements the runtime.Object interface.
func (l *Alertmanager) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *AlertmanagerList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *Prometheus) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *PrometheusList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *ServiceMonitor) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *ServiceMonitorList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (f *PrometheusRule) DeepCopyObject() runtime.Object {
return f.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *PrometheusRuleList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
| 1 | 12,182 | This should be a bool | prometheus-operator-prometheus-operator | go |
@@ -5129,12 +5129,12 @@ SwiftASTContext::GetReferentType(const CompilerType &compiler_type) {
if (compiler_type.IsValid() &&
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
- swift::CanType swift_can_type(GetCanonicalSwiftType(compiler_type));
- swift::TypeBase *swift_type = swift_can_type.getPointer();
- if (swift_type && llvm::isa<swift::WeakStorageType>(swift_type))
+ swift::Type swift_type(GetSwiftType(compiler_type));
+ swift::TypeBase *swift_typebase = swift_type.getPointer();
+ if (swift_type && llvm::isa<swift::WeakStorageType>(swift_typebase))
return compiler_type;
- auto ref_type = swift_can_type->getReferenceStorageReferent();
+ auto ref_type = swift_type->getReferenceStorageReferent();
return CompilerType(GetASTContext(), ref_type);
}
| 1 | //===-- SwiftASTContext.cpp -------------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "lldb/Symbol/SwiftASTContext.h"
// C++ Includes
#include <mutex> // std::once
#include <queue>
#include <set>
#include <sstream>
#include "swift/ABI/MetadataValues.h"
#include "swift/AST/ASTContext.h"
#include "swift/AST/ASTMangler.h"
#include "swift/AST/DebuggerClient.h"
#include "swift/AST/Decl.h"
#include "swift/AST/DiagnosticEngine.h"
#include "swift/AST/ExistentialLayout.h"
#include "swift/AST/GenericSignature.h"
#include "swift/AST/IRGenOptions.h"
#include "swift/AST/NameLookup.h"
#include "swift/AST/SearchPathOptions.h"
#include "swift/AST/SubstitutionMap.h"
#include "swift/AST/Type.h"
#include "swift/AST/Types.h"
#include "swift/ASTSectionImporter/ASTSectionImporter.h"
#include "swift/Basic/Dwarf.h"
#include "swift/Basic/LangOptions.h"
#include "swift/Basic/Platform.h"
#include "swift/Basic/PrimarySpecificPaths.h"
#include "swift/Basic/SourceManager.h"
#include "swift/ClangImporter/ClangImporter.h"
#include "swift/ClangImporter/ClangImporterOptions.h"
#include "swift/Demangling/Demangle.h"
#include "swift/Driver/Util.h"
#include "swift/Frontend/Frontend.h"
#include "swift/Frontend/PrintingDiagnosticConsumer.h"
#include "swift/IDE/Utils.h"
#include "swift/IRGen/Linking.h"
#include "swift/SIL/SILModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Driver/Driver.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "swift/../../lib/IRGen/FixedTypeInfo.h"
#include "swift/../../lib/IRGen/GenEnum.h"
#include "swift/../../lib/IRGen/GenHeap.h"
#include "swift/../../lib/IRGen/IRGenModule.h"
#include "swift/../../lib/IRGen/TypeInfo.h"
#include "swift/Serialization/SerializedModuleLoader.h"
#include "swift/Strings.h"
#include "Plugins/ExpressionParser/Clang/ClangHost.h"
#include "Plugins/ExpressionParser/Swift/SwiftDiagnostic.h"
#include "Plugins/ExpressionParser/Swift/SwiftUserExpression.h"
#include "lldb/Core/Debugger.h"
#include "lldb/Core/DumpDataExtractor.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/ModuleSpec.h"
#include "lldb/Core/PluginManager.h"
#include "lldb/Core/Section.h"
#include "lldb/Core/StreamFile.h"
#include "lldb/Core/ThreadSafeDenseMap.h"
#include "lldb/Expression/DiagnosticManager.h"
#include "lldb/Host/Host.h"
#include "lldb/Host/HostInfo.h"
#include "lldb/Host/StringConvert.h"
#include "lldb/Symbol/ClangASTContext.h"
#include "lldb/Symbol/CompileUnit.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/SymbolFile.h"
#include "lldb/Symbol/SymbolVendor.h"
#include "lldb/Target/Platform.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/SwiftLanguageRuntime.h"
#include "lldb/Target/Target.h"
#include "lldb/Utility/ArchSpec.h"
#include "lldb/Utility/CleanUp.h"
#include "lldb/Utility/FileSpec.h"
#include "lldb/Utility/LLDBAssert.h"
#include "lldb/Utility/Log.h"
#include "lldb/Utility/Status.h"
#include "Plugins/Platform/MacOSX/PlatformDarwin.h"
#include "Plugins/SymbolFile/DWARF/DWARFASTParserSwift.h"
#define VALID_OR_RETURN(value) \
do { \
if (HasFatalErrors()) { \
return (value); \
} \
} while (0)
#define VALID_OR_RETURN_VOID() \
do { \
if (HasFatalErrors()) { \
return; \
} \
} while (0)
using namespace lldb;
using namespace lldb_private;
typedef lldb_private::ThreadSafeDenseMap<swift::ASTContext *, SwiftASTContext *>
ThreadSafeSwiftASTMap;
static ThreadSafeSwiftASTMap &GetASTMap() {
// The global destructor list will tear down all of the modules when the LLDB
// shared library is being unloaded and this needs to live beyond all of those
// and not be destructed before they have all gone away. So we will leak this
// list intentionally so we can avoid global destructor problems.
static ThreadSafeSwiftASTMap *g_map_ptr = NULL;
static std::once_flag g_once_flag;
std::call_once(g_once_flag, []() {
g_map_ptr = new ThreadSafeSwiftASTMap(); // NOTE: Intentional leak
});
return *g_map_ptr;
}
static inline swift::Type GetSwiftType(void *opaque_ptr) {
return swift::Type((swift::TypeBase *)opaque_ptr);
}
static inline swift::CanType GetCanonicalSwiftType(void *opaque_ptr) {
return ((swift::TypeBase *)opaque_ptr)->getCanonicalType();
}
static inline swift::Type GetSwiftType(CompilerType type) {
return swift::Type((swift::TypeBase *)type.GetOpaqueQualType());
}
static inline swift::CanType GetCanonicalSwiftType(CompilerType type) {
return ((swift::TypeBase *)type.GetOpaqueQualType())->getCanonicalType();
}
class SwiftEnumDescriptor;
typedef std::shared_ptr<SwiftEnumDescriptor> SwiftEnumDescriptorSP;
typedef llvm::DenseMap<lldb::opaque_compiler_type_t, SwiftEnumDescriptorSP>
EnumInfoCache;
typedef std::shared_ptr<EnumInfoCache> EnumInfoCacheSP;
typedef llvm::DenseMap<const swift::ASTContext *, EnumInfoCacheSP>
ASTEnumInfoCacheMap;
static EnumInfoCache *GetEnumInfoCache(const swift::ASTContext *a) {
static ASTEnumInfoCacheMap g_cache;
static std::mutex g_mutex;
std::lock_guard<std::mutex> locker(g_mutex);
ASTEnumInfoCacheMap::iterator pos = g_cache.find(a);
if (pos == g_cache.end()) {
g_cache.insert(
std::make_pair(a, std::shared_ptr<EnumInfoCache>(new EnumInfoCache())));
return g_cache.find(a)->second.get();
}
return pos->second.get();
}
namespace {
bool IsDirectory(const FileSpec &spec) {
return llvm::sys::fs::is_directory(spec.GetPath());
}
bool IsRegularFile(const FileSpec &spec) {
return llvm::sys::fs::is_regular_file(spec.GetPath());
}
}
llvm::LLVMContext &SwiftASTContext::GetGlobalLLVMContext() {
// TODO check with Sean. Do we really want this to be static across
// an LLDB managing multiple Swift processes?
static llvm::LLVMContext s_global_context;
return s_global_context;
}
llvm::ArrayRef<swift::VarDecl *> SwiftASTContext::GetStoredProperties(
swift::NominalTypeDecl *nominal) {
VALID_OR_RETURN(llvm::ArrayRef<swift::VarDecl *>());
// Check whether we already have the stored properties for this
// nominal type.
auto known = m_stored_properties.find(nominal);
if (known != m_stored_properties.end()) return known->second;
// Collect the stored properties from the AST and put them in the
// cache.
auto stored_properties = nominal->getStoredProperties();
auto &stored = m_stored_properties[nominal];
stored = std::vector<swift::VarDecl *>(stored_properties.begin(),
stored_properties.end());
return stored;
}
class SwiftEnumDescriptor {
public:
enum class Kind {
Empty, // no cases in this enum
CStyle, // no cases have payloads
AllPayload, // all cases have payloads
Mixed // some cases have payloads
};
struct ElementInfo {
lldb_private::ConstString name;
CompilerType payload_type;
bool has_payload : 1;
bool is_indirect : 1;
};
Kind GetKind() const { return m_kind; }
ConstString GetTypeName() { return m_type_name; }
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data) = 0;
virtual size_t GetNumElements() {
return GetNumElementsWithPayload() + GetNumCStyleElements();
}
virtual size_t GetNumElementsWithPayload() = 0;
virtual size_t GetNumCStyleElements() = 0;
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) = 0;
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) = 0;
virtual ~SwiftEnumDescriptor() = default;
static SwiftEnumDescriptor *CreateDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl);
protected:
SwiftEnumDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type,
swift::EnumDecl *enum_decl, SwiftEnumDescriptor::Kind k)
: m_kind(k), m_type_name() {
if (swift_can_type.getPointer()) {
if (auto nominal = swift_can_type->getAnyNominal()) {
swift::Identifier name(nominal->getName());
if (name.get())
m_type_name.SetCString(name.get());
}
}
}
private:
Kind m_kind;
ConstString m_type_name;
};
class SwiftEmptyEnumDescriptor : public SwiftEnumDescriptor {
public:
SwiftEmptyEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::Empty) {}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data) {
return nullptr;
}
virtual size_t GetNumElementsWithPayload() { return 0; }
virtual size_t GetNumCStyleElements() { return 0; }
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
return nullptr;
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
return nullptr;
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::Empty;
}
virtual ~SwiftEmptyEnumDescriptor() = default;
};
namespace std {
template <> struct less<swift::ClusteredBitVector> {
bool operator()(const swift::ClusteredBitVector &lhs,
const swift::ClusteredBitVector &rhs) const {
int iL = lhs.size() - 1;
int iR = rhs.size() - 1;
for (; iL >= 0 && iR >= 0; --iL, --iR) {
bool bL = lhs[iL];
bool bR = rhs[iR];
if (bL and not bR)
return false;
if (bR and not bL)
return true;
}
return false;
}
};
}
static std::string Dump(const swift::ClusteredBitVector &bit_vector) {
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
for (size_t i = 0; i < bit_vector.size(); i++) {
if (bit_vector[i])
ostream << '1';
else
ostream << '0';
if ((i % 4) == 3)
ostream << ' ';
}
ostream.flush();
return buffer;
}
class SwiftCStyleEnumDescriptor : public SwiftEnumDescriptor {
public:
SwiftCStyleEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::CStyle),
m_nopayload_elems_bitmask(), m_elements(), m_element_indexes() {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("doing C-style enum layout for %s",
GetTypeName().AsCString());
SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast);
swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule();
const swift::irgen::EnumImplStrategy &enum_impl_strategy =
swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type);
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_no_payload =
enum_impl_strategy.getElementsWithNoPayload();
const bool has_payload = false;
const bool is_indirect = false;
uint64_t case_counter = 0;
m_nopayload_elems_bitmask =
enum_impl_strategy.getBitMaskForNoPayloadElements();
if (log)
log->Printf("m_nopayload_elems_bitmask = %s",
Dump(m_nopayload_elems_bitmask).c_str());
for (auto enum_case : elements_with_no_payload) {
ConstString case_name(enum_case.decl->getName().str().data());
swift::ClusteredBitVector case_value =
enum_impl_strategy.getBitPatternForNoPayloadElement(enum_case.decl);
if (log)
log->Printf("case_name = %s, unmasked value = %s",
case_name.AsCString(), Dump(case_value).c_str());
case_value &= m_nopayload_elems_bitmask;
if (log)
log->Printf("case_name = %s, masked value = %s", case_name.AsCString(),
Dump(case_value).c_str());
std::unique_ptr<ElementInfo> elem_info(
new ElementInfo{case_name, CompilerType(), has_payload, is_indirect});
m_element_indexes.emplace(case_counter, elem_info.get());
case_counter++;
m_elements.emplace(case_value, std::move(elem_info));
}
}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf(
"C-style enum - inspecting data to find enum case for type %s",
GetTypeName().AsCString());
swift::ClusteredBitVector current_payload;
lldb::offset_t offset = 0;
for (size_t idx = 0; idx < data.GetByteSize(); idx++) {
uint64_t byte = data.GetU8(&offset);
current_payload.add(8, byte);
}
if (log) {
log->Printf("m_nopayload_elems_bitmask = %s",
Dump(m_nopayload_elems_bitmask).c_str());
log->Printf("current_payload = %s",
Dump(current_payload).c_str());
}
if (current_payload.size() != m_nopayload_elems_bitmask.size()) {
if (log)
log->Printf("sizes don't match; getting out with an error");
return nullptr;
}
current_payload &= m_nopayload_elems_bitmask;
if (log)
log->Printf("masked current_payload = %s",
Dump(current_payload).c_str());
auto iter = m_elements.find(current_payload), end = m_elements.end();
if (iter == end) {
if (log)
log->Printf("bitmask search failed");
return nullptr;
}
if (log)
log->Printf("bitmask search success - found case %s",
iter->second.get()->name.AsCString());
return iter->second.get();
}
virtual size_t GetNumElementsWithPayload() { return 0; }
virtual size_t GetNumCStyleElements() { return m_elements.size(); }
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
return nullptr;
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
if (idx >= m_element_indexes.size())
return nullptr;
return m_element_indexes[idx];
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::CStyle;
}
virtual ~SwiftCStyleEnumDescriptor() = default;
private:
swift::ClusteredBitVector m_nopayload_elems_bitmask;
std::map<swift::ClusteredBitVector, std::unique_ptr<ElementInfo>> m_elements;
std::map<uint64_t, ElementInfo *> m_element_indexes;
};
static CompilerType
GetFunctionArgumentTuple(const CompilerType &compiler_type) {
if (compiler_type.IsValid() &&
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
swift::CanType swift_can_type(
GetCanonicalSwiftType(compiler_type.GetOpaqueQualType()));
auto func =
swift::dyn_cast_or_null<swift::AnyFunctionType>(
swift_can_type);
if (func) {
auto input = func.getInput();
// See comment in swift::AnyFunctionType for rationale here:
// A function can take either a tuple or a parentype, but if a parentype
// (i.e. (Foo)), then it will be reduced down to just Foo, so if the input
// is not a tuple, that must mean there is only 1 input.
auto tuple = swift::dyn_cast<swift::TupleType>(input);
if (tuple)
return CompilerType(compiler_type.GetTypeSystem(), tuple);
else
return CompilerType(compiler_type.GetTypeSystem(), input.getPointer());
}
}
return CompilerType();
}
class SwiftAllPayloadEnumDescriptor : public SwiftEnumDescriptor {
public:
SwiftAllPayloadEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::AllPayload),
m_tag_bits(), m_elements() {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("doing ADT-style enum layout for %s",
GetTypeName().AsCString());
SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast);
swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule();
const swift::irgen::EnumImplStrategy &enum_impl_strategy =
swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type);
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_payload = enum_impl_strategy.getElementsWithPayload();
m_tag_bits = enum_impl_strategy.getTagBitsForPayloads();
if (log)
log->Printf("tag_bits = %s", Dump(m_tag_bits).c_str());
auto module_ctx = enum_decl->getModuleContext();
const bool has_payload = true;
for (auto enum_case : elements_with_payload) {
ConstString case_name(enum_case.decl->getName().str().data());
swift::EnumElementDecl *case_decl = enum_case.decl;
assert(case_decl);
CompilerType case_type(
ast, swift_can_type->getTypeOfMember(module_ctx, case_decl, nullptr)
.getPointer());
case_type = GetFunctionArgumentTuple(case_type.GetFunctionReturnType());
const bool is_indirect = case_decl->isIndirect()
|| case_decl->getParentEnum()->isIndirect();
if (log)
log->Printf("case_name = %s, type = %s, is_indirect = %s",
case_name.AsCString(), case_type.GetTypeName().AsCString(),
is_indirect ? "yes" : "no");
std::unique_ptr<ElementInfo> elem_info(
new ElementInfo{case_name, case_type, has_payload, is_indirect});
m_elements.push_back(std::move(elem_info));
}
}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf(
"ADT-style enum - inspecting data to find enum case for type %s",
GetTypeName().AsCString());
if (m_elements.size() == 0) // no elements, just fail
{
if (log)
log->Printf("enum with no cases. getting out");
return nullptr;
}
if (m_elements.size() == 1) // one element, so it's gotta be it
{
if (log)
log->Printf("enum with one case. getting out easy with %s",
m_elements.front().get()->name.AsCString());
return m_elements.front().get();
}
swift::ClusteredBitVector current_payload;
lldb::offset_t offset = 0;
for (size_t idx = 0; idx < data.GetByteSize(); idx++) {
uint64_t byte = data.GetU8(&offset);
current_payload.add(8, byte);
}
if (log) {
log->Printf("tag_bits = %s", Dump(m_tag_bits).c_str());
log->Printf("current_payload = %s", Dump(current_payload).c_str());
}
if (current_payload.size() != m_tag_bits.size()) {
if (log)
log->Printf("sizes don't match; getting out with an error");
return nullptr;
}
size_t discriminator = 0;
size_t power_of_2 = 1;
auto enumerator = m_tag_bits.enumerateSetBits();
for (llvm::Optional<size_t> next = enumerator.findNext(); next.hasValue();
next = enumerator.findNext()) {
discriminator =
discriminator + (current_payload[next.getValue()] ? power_of_2 : 0);
power_of_2 <<= 1;
}
if (discriminator >= m_elements.size()) // discriminator too large, get out
{
if (log)
log->Printf("discriminator value of %" PRIu64 " too large, getting out",
(uint64_t)discriminator);
return nullptr;
} else {
auto ptr = m_elements[discriminator].get();
if (log) {
if (!ptr)
log->Printf("discriminator value of %" PRIu64
" acceptable, but null case matched - that's bad",
(uint64_t)discriminator);
else
log->Printf("discriminator value of %" PRIu64
" acceptable, case %s matched",
(uint64_t)discriminator, ptr->name.AsCString());
}
return ptr;
}
}
virtual size_t GetNumElementsWithPayload() { return m_elements.size(); }
virtual size_t GetNumCStyleElements() { return 0; }
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
if (idx >= m_elements.size())
return nullptr;
return m_elements[idx].get();
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
return nullptr;
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::AllPayload;
}
virtual ~SwiftAllPayloadEnumDescriptor() = default;
private:
swift::ClusteredBitVector m_tag_bits;
std::vector<std::unique_ptr<ElementInfo>> m_elements;
};
class SwiftMixedEnumDescriptor : public SwiftEnumDescriptor {
public:
SwiftMixedEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::Mixed),
m_non_payload_cases(ast, swift_can_type, enum_decl),
m_payload_cases(ast, swift_can_type, enum_decl) {}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data) {
ElementInfo *elem_info = m_non_payload_cases.GetElementFromData(data);
return elem_info ? elem_info : m_payload_cases.GetElementFromData(data);
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::Mixed;
}
virtual size_t GetNumElementsWithPayload() {
return m_payload_cases.GetNumElementsWithPayload();
}
virtual size_t GetNumCStyleElements() {
return m_non_payload_cases.GetNumCStyleElements();
}
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
return m_payload_cases.GetElementWithPayloadAtIndex(idx);
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
return m_non_payload_cases.GetElementWithNoPayloadAtIndex(idx);
}
virtual ~SwiftMixedEnumDescriptor() = default;
private:
SwiftCStyleEnumDescriptor m_non_payload_cases;
SwiftAllPayloadEnumDescriptor m_payload_cases;
};
SwiftEnumDescriptor *
SwiftEnumDescriptor::CreateDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl) {
assert(ast);
assert(enum_decl);
assert(swift_can_type.getPointer());
SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast);
assert(swift_ast_ctx);
swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule();
const swift::irgen::EnumImplStrategy &enum_impl_strategy =
swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type);
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_payload = enum_impl_strategy.getElementsWithPayload();
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_no_payload = enum_impl_strategy.getElementsWithNoPayload();
if (elements_with_no_payload.size() == 0) {
// nothing with no payload.. empty or all payloads?
if (elements_with_payload.size() == 0)
return new SwiftEmptyEnumDescriptor(ast, swift_can_type, enum_decl);
else
return new SwiftAllPayloadEnumDescriptor(ast, swift_can_type, enum_decl);
} else {
// something with no payload.. mixed or C-style?
if (elements_with_payload.size() == 0)
return new SwiftCStyleEnumDescriptor(ast, swift_can_type, enum_decl);
else
return new SwiftMixedEnumDescriptor(ast, swift_can_type, enum_decl);
}
}
static SwiftEnumDescriptor *
GetEnumInfoFromEnumDecl(swift::ASTContext *ast, swift::CanType swift_can_type,
swift::EnumDecl *enum_decl) {
return SwiftEnumDescriptor::CreateDescriptor(ast, swift_can_type, enum_decl);
}
SwiftEnumDescriptor *SwiftASTContext::GetCachedEnumInfo(void *type) {
VALID_OR_RETURN(nullptr);
if (type) {
EnumInfoCache *enum_info_cache = GetEnumInfoCache(GetASTContext());
EnumInfoCache::const_iterator pos = enum_info_cache->find(type);
if (pos != enum_info_cache->end())
return pos->second.get();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (!SwiftASTContext::IsFullyRealized(
CompilerType(GetASTContext(), swift_can_type)))
return nullptr;
SwiftEnumDescriptorSP enum_info_sp;
if (auto *enum_type = swift_can_type->getAs<swift::EnumType>()) {
enum_info_sp.reset(GetEnumInfoFromEnumDecl(
GetASTContext(), swift_can_type, enum_type->getDecl()));
} else if (auto *bound_enum_type =
swift_can_type->getAs<swift::BoundGenericEnumType>()) {
enum_info_sp.reset(GetEnumInfoFromEnumDecl(
GetASTContext(), swift_can_type, bound_enum_type->getDecl()));
}
if (enum_info_sp.get())
enum_info_cache->insert(std::make_pair(type, enum_info_sp));
return enum_info_sp.get();
}
return nullptr;
}
namespace {
static inline bool
SwiftASTContextSupportsLanguage(lldb::LanguageType language) {
return language == eLanguageTypeSwift;
}
static bool IsDeviceSupport(const char *path) {
// The old-style check, which we preserve for safety.
if (path && strstr(path, "iOS DeviceSupport"))
return true;
// The new-style check, which should cover more devices.
if (path)
if (const char *Developer_Xcode = strstr(path, "Developer"))
if (const char *DeviceSupport = strstr(Developer_Xcode, "DeviceSupport"))
if (strstr(DeviceSupport, "Symbols"))
return true;
// Don't look in the simulator runtime frameworks either. They either
// duplicate what the SDK has, or for older simulators conflict with them.
if (path && strstr(path, ".simruntime/Contents/Resources/"))
return true;
return false;
}
}
SwiftASTContext::SwiftASTContext(const char *triple, Target *target)
: TypeSystem(TypeSystem::eKindSwift), m_source_manager_ap(),
m_diagnostic_engine_ap(), m_ast_context_ap(), m_ir_gen_module_ap(),
m_compiler_invocation_ap(new swift::CompilerInvocation()),
m_dwarf_ast_parser_ap(), m_scratch_module(NULL), m_sil_module_ap(),
m_serialized_module_loader(NULL), m_clang_importer(NULL),
m_swift_module_cache(), m_mangled_name_to_type_map(),
m_type_to_mangled_name_map(), m_pointer_byte_size(0),
m_pointer_bit_align(0), m_void_function_type(), m_target_wp(),
m_process(NULL), m_platform_sdk_path(), m_resource_dir(),
m_ast_file_data_map(), m_initialized_language_options(false),
m_initialized_search_path_options(false),
m_initialized_clang_importer_options(false),
m_reported_fatal_error(false), m_fatal_errors(), m_negative_type_cache(),
m_extra_type_info_cache(), m_swift_type_map() {
// Set the clang modules cache path.
llvm::SmallString<128> path;
auto props = ModuleList::GetGlobalModuleListProperties();
props.GetClangModulesCachePath().GetPath(path);
m_compiler_invocation_ap->setClangModuleCachePath(path);
if (target)
m_target_wp = target->shared_from_this();
if (triple)
SetTriple(triple);
swift::IRGenOptions &ir_gen_opts =
m_compiler_invocation_ap->getIRGenOptions();
ir_gen_opts.OutputKind = swift::IRGenOutputKind::Module;
ir_gen_opts.UseJIT = true;
ir_gen_opts.DWARFVersion = swift::DWARFVersion;
// FIXME: lldb does not support resilience yet.
ir_gen_opts.EnableResilienceBypass = true;
}
SwiftASTContext::SwiftASTContext(const SwiftASTContext &rhs)
: TypeSystem(rhs.getKind()), m_source_manager_ap(),
m_diagnostic_engine_ap(), m_ast_context_ap(), m_ir_gen_module_ap(),
m_compiler_invocation_ap(new swift::CompilerInvocation()),
m_dwarf_ast_parser_ap(), m_scratch_module(NULL), m_sil_module_ap(),
m_serialized_module_loader(NULL), m_clang_importer(NULL),
m_swift_module_cache(), m_mangled_name_to_type_map(),
m_type_to_mangled_name_map(), m_pointer_byte_size(0),
m_pointer_bit_align(0), m_void_function_type(), m_target_wp(),
m_process(NULL), m_platform_sdk_path(), m_resource_dir(),
m_ast_file_data_map(), m_initialized_language_options(false),
m_initialized_search_path_options(false),
m_initialized_clang_importer_options(false),
m_reported_fatal_error(false), m_fatal_errors(), m_negative_type_cache(),
m_extra_type_info_cache(), m_swift_type_map() {
if (rhs.m_compiler_invocation_ap) {
std::string rhs_triple = rhs.GetTriple();
if (!rhs_triple.empty()) {
SetTriple(rhs_triple.c_str());
}
llvm::StringRef module_cache_path =
rhs.m_compiler_invocation_ap->getClangModuleCachePath();
m_compiler_invocation_ap->setClangModuleCachePath(module_cache_path);
}
swift::IRGenOptions &ir_gen_opts =
m_compiler_invocation_ap->getIRGenOptions();
ir_gen_opts.OutputKind = swift::IRGenOutputKind::Module;
ir_gen_opts.UseJIT = true;
TargetSP target_sp = rhs.m_target_wp.lock();
if (target_sp)
m_target_wp = target_sp;
m_platform_sdk_path = rhs.m_platform_sdk_path;
m_resource_dir = rhs.m_resource_dir;
swift::ASTContext *lhs_ast = GetASTContext();
swift::ASTContext *rhs_ast =
const_cast<SwiftASTContext &>(rhs).GetASTContext();
if (lhs_ast && rhs_ast) {
lhs_ast->SearchPathOpts = rhs_ast->SearchPathOpts;
}
GetClangImporter();
}
SwiftASTContext::~SwiftASTContext() {
if (swift::ASTContext *ctx = m_ast_context_ap.get()) {
// A RemoteASTContext associated with this swift::ASTContext has to be
// destroyed before the swift::ASTContext is destroyed.
if (TargetSP target_sp = m_target_wp.lock())
if (ProcessSP process_sp = target_sp->GetProcessSP())
if (auto *runtime = process_sp->GetSwiftLanguageRuntime())
runtime->ReleaseAssociatedRemoteASTContext(ctx);
GetASTMap().Erase(ctx);
}
}
ConstString SwiftASTContext::GetPluginNameStatic() {
return ConstString("swift");
}
ConstString SwiftASTContext::GetPluginName() {
return ClangASTContext::GetPluginNameStatic();
}
uint32_t SwiftASTContext::GetPluginVersion() { return 1; }
static std::string &GetDefaultResourceDir() {
static std::string s_resource_dir;
return s_resource_dir;
}
/// Initialize the compiler invocation with it the search paths from a
/// serialized AST.
/// \returns true on success.
static bool DeserializeCompilerFlags(swift::CompilerInvocation &invocation,
StringRef section_data_ref, StringRef name,
llvm::raw_ostream &error) {
auto result = invocation.loadFromSerializedAST(section_data_ref);
if (result == swift::serialization::Status::Valid)
return true;
error << "While deserializing" << name << ":\n";
switch (result) {
case swift::serialization::Status::Valid:
llvm_unreachable("already checked");
case swift::serialization::Status::FormatTooOld:
error << "The swift module file format is too old to be used by the "
"version of the swift compiler in LLDB\n";
break;
case swift::serialization::Status::FormatTooNew:
error << "the swift module file format is too new to be used by this "
"version of the swift compiler in LLDB\n";
break;
case swift::serialization::Status::MissingDependency:
error << "the swift module file depends on another module that can't be "
"loaded\n";
break;
case swift::serialization::Status::MissingShadowedModule:
error << "the swift module file is an overlay for a clang module, which "
"can't be found\n";
break;
case swift::serialization::Status::FailedToLoadBridgingHeader:
error << "the swift module file depends on a bridging header that can't "
"be loaded\n";
break;
case swift::serialization::Status::Malformed:
error << "the swift module file is malformed\n";
break;
case swift::serialization::Status::MalformedDocumentation:
error << "the swift module documentation file is malformed in some way\n";
break;
case swift::serialization::Status::NameMismatch:
error << "the swift module file's name does not match the module it is "
"being loaded into\n";
break;
case swift::serialization::Status::TargetIncompatible:
error << "the swift module file was built for a different target "
"platform\n";
break;
case swift::serialization::Status::TargetTooNew:
error << "the swift module file was built for a target newer than the "
"current target\n";
break;
}
return false;
}
/// Retrieve the serialized AST data blobs and initialize the compiler
/// invocation with the concatenated search paths from the blobs.
/// \returns true if an error was encountered.
static bool DeserializeAllCompilerFlags(SwiftASTContext &swift_ast,
Module &module,
llvm::raw_ostream &error,
bool &got_serialized_options) {
got_serialized_options = false;
auto &invocation = swift_ast.GetCompilerInvocation();
SymbolVendor *sym_vendor = module.GetSymbolVendor();
if (!sym_vendor)
return false;
auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("Found %d AST file data entries for library: %s.",
(int)ast_file_datas.size(),
module.GetSpecificationDescription().c_str());
// If no N_AST symbols exist, this is not an error.
if (ast_file_datas.empty())
return false;
// An AST section consists of one or more AST modules, optionally
// with headers. Iterate over all AST modules.
for (auto ast_file_data_sp : ast_file_datas) {
llvm::StringRef buf((const char *)ast_file_data_sp->GetBytes(),
ast_file_data_sp->GetByteSize());
while (!buf.empty()) {
std::string last_sdk_path;
auto info = swift::serialization::validateSerializedAST(buf);
if ((info.status != swift::serialization::Status::Valid) ||
(info.bytes == 0) || (info.bytes > buf.size())) {
if (log)
log->Printf("Unable to load AST for module %s from library: %s.",
info.name.str().c_str(),
module.GetSpecificationDescription().c_str());
return true;
}
if (info.name.empty())
continue;
StringRef moduleData = buf.substr(0, info.bytes);
if (log)
last_sdk_path = invocation.getSDKPath();
got_serialized_options |=
DeserializeCompilerFlags(invocation, moduleData, info.name, error);
if (log && !last_sdk_path.empty() &&
invocation.getSDKPath() != last_sdk_path)
log->Printf("SDK path mismatch!\n"
"Was \"%s\", found \"%s\" in module %s.",
last_sdk_path.c_str(),
invocation.getSDKPath().str().c_str(),
info.name.str().c_str());
buf = buf.substr(info.bytes);
}
}
return false;
}
/// Return whether this module contains any serialized Swift ASTs.
bool HasSwiftModules(Module &module) {
SymbolVendor *sym_vendor = module.GetSymbolVendor();
if (!sym_vendor)
return false;
auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift);
return !ast_file_datas.empty();
}
void SwiftASTContext::RemapClangImporterOptions(
const PathMappingList &path_map) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
auto &options = GetClangImporterOptions();
std::string remapped;
if (path_map.RemapPath(options.BridgingHeader, remapped)) {
if (log)
log->Printf("remapped %s -> %s", options.BridgingHeader.c_str(),
remapped.c_str());
options.BridgingHeader = remapped;
}
for (auto &arg_string : options.ExtraArgs) {
StringRef prefix;
StringRef arg = arg_string;
if (arg.consume_front("-I"))
prefix = "-I";
if (path_map.RemapPath(arg, remapped)) {
if (log)
log->Printf("remapped %s -> %s%s", arg.str().c_str(),
prefix.str().c_str(), remapped.c_str());
arg_string = prefix.str()+remapped;
}
}
}
lldb::TypeSystemSP SwiftASTContext::CreateInstance(lldb::LanguageType language,
Module &module,
Target *target) {
if (!SwiftASTContextSupportsLanguage(language))
return lldb::TypeSystemSP();
ArchSpec arch = module.GetArchitecture();
ObjectFile *objfile = module.GetObjectFile();
ArchSpec object_arch;
if (!objfile || !objfile->GetArchitecture(object_arch))
return TypeSystemSP();
lldb::CompUnitSP main_compile_unit_sp = module.GetCompileUnitAtIndex(0);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (main_compile_unit_sp && !main_compile_unit_sp->Exists()) {
if (log) {
StreamString ss;
module.GetDescription(&ss);
log->Printf("Corresponding source not found for %s, loading module "
"%s is unlikely to succeed",
main_compile_unit_sp->GetCString(), ss.GetData());
}
}
std::shared_ptr<SwiftASTContext> swift_ast_sp(
target ? (new SwiftASTContextForExpressions(*target))
: new SwiftASTContext());
swift_ast_sp->GetLanguageOptions().DebuggerSupport = true;
swift_ast_sp->GetLanguageOptions().EnableAccessControl = false;
swift_ast_sp->GetLanguageOptions().EnableTargetOSChecking = false;
if (!arch.IsValid())
return TypeSystemSP();
llvm::Triple triple = arch.GetTriple();
if (triple.getOS() == llvm::Triple::UnknownOS) {
// cl_kernels are the only binaries that don't have an LC_MIN_VERSION_xxx
// load command. This avoids a Swift assertion.
#if defined(__APPLE__)
switch (triple.getArch()) {
default:
triple.setOS(llvm::Triple::MacOSX);
break;
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
triple.setOS(llvm::Triple::IOS);
break;
}
#else
// Not an elegant hack on OS X, not an elegant hack elsewhere.
// But we shouldn't be claiming things are Mac binaries when they are
// not.
triple.setOS(HostInfo::GetArchitecture().GetTriple().getOS());
#endif
}
swift_ast_sp->SetTriple(triple.getTriple().c_str(), &module);
bool set_triple = false;
SymbolVendor *sym_vendor = module.GetSymbolVendor();
std::string resource_dir;
std::string target_triple;
if (sym_vendor) {
bool got_serialized_options;
llvm::SmallString<0> error;
llvm::raw_svector_ostream errs(error);
if (DeserializeAllCompilerFlags(*swift_ast_sp, module, errs,
got_serialized_options)) {
swift_ast_sp->m_fatal_errors.SetErrorString(error.str());
return swift_ast_sp;
}
// Some of the bits in the compiler options we keep separately, so we
// need to populate them from the serialized options:
llvm::StringRef serialized_triple =
swift_ast_sp->GetCompilerInvocation().getTargetTriple();
if (serialized_triple.empty()) {
if (log)
log->Printf("\tSerialized triple for %s was empty.",
module.GetSpecificationDescription().c_str());
} else {
if (log)
log->Printf("\tFound serialized triple for %s: %s.",
module.GetSpecificationDescription().c_str(),
serialized_triple.data());
swift_ast_sp->SetTriple(serialized_triple.data(), &module);
set_triple = true;
}
llvm::StringRef serialized_sdk_path =
swift_ast_sp->GetCompilerInvocation().getSDKPath();
if (serialized_sdk_path.empty()) {
if (log)
log->Printf("\tNo serialized SDK path.");
} else {
if (log)
log->Printf("\tGot serialized SDK path %s.",
serialized_sdk_path.data());
FileSpec sdk_spec(serialized_sdk_path.data(), false);
if (sdk_spec.Exists()) {
swift_ast_sp->SetPlatformSDKPath(serialized_sdk_path.data());
}
}
if (!got_serialized_options || !swift_ast_sp->GetPlatformSDKPath()) {
std::string platform_sdk_path;
if (sym_vendor->GetCompileOption("-sdk", platform_sdk_path)) {
FileSpec sdk_spec(platform_sdk_path.c_str(), false);
if (sdk_spec.Exists()) {
swift_ast_sp->SetPlatformSDKPath(platform_sdk_path.c_str());
}
if (sym_vendor->GetCompileOption("-target", target_triple)) {
llvm::StringRef parsed_triple(target_triple);
swift_ast_sp->SetTriple(target_triple.c_str(), &module);
set_triple = true;
}
}
}
if (sym_vendor->GetCompileOption("-resource-dir", resource_dir)) {
swift_ast_sp->SetResourceDir(resource_dir.c_str());
} else if (!GetDefaultResourceDir().empty()) {
// Use the first resource dir we found when setting up a target.
swift_ast_sp->SetResourceDir(GetDefaultResourceDir().c_str());
} else {
if (log)
log->Printf("No resource dir available for module's SwiftASTContext.");
}
if (!got_serialized_options) {
std::vector<std::string> framework_search_paths;
if (sym_vendor->GetCompileOptions("-F", framework_search_paths)) {
for (std::string &search_path : framework_search_paths) {
swift_ast_sp->AddFrameworkSearchPath(search_path.c_str());
}
}
std::vector<std::string> include_paths;
if (sym_vendor->GetCompileOptions("-I", include_paths)) {
for (std::string &search_path : include_paths) {
const FileSpec path_spec(search_path.c_str(), false);
if (path_spec.Exists()) {
static const ConstString s_hmap_extension("hmap");
if (IsDirectory(path_spec)) {
swift_ast_sp->AddModuleSearchPath(search_path.c_str());
} else if (IsRegularFile(path_spec) &&
path_spec.GetFileNameExtension() == s_hmap_extension) {
std::string argument("-I");
argument.append(search_path);
swift_ast_sp->AddClangArgument(argument.c_str());
}
}
}
}
std::vector<std::string> cc_options;
if (sym_vendor->GetCompileOptions("-Xcc", cc_options)) {
for (int i = 0; i < cc_options.size(); ++i) {
if (!cc_options[i].compare("-iquote") && i + 1 < cc_options.size()) {
swift_ast_sp->AddClangArgumentPair("-iquote",
cc_options[i + 1].c_str());
}
}
}
}
}
if (!set_triple) {
llvm::Triple llvm_triple(swift_ast_sp->GetTriple());
// LLVM wants this to be set to iOS or MacOSX; if we're working on
// a bare-boards type image, change the triple for LLVM's benefit.
if (llvm_triple.getVendor() == llvm::Triple::Apple &&
llvm_triple.getOS() == llvm::Triple::UnknownOS) {
if (llvm_triple.getArch() == llvm::Triple::arm ||
llvm_triple.getArch() == llvm::Triple::thumb) {
llvm_triple.setOS(llvm::Triple::IOS);
} else {
llvm_triple.setOS(llvm::Triple::MacOSX);
}
swift_ast_sp->SetTriple(llvm_triple.str().c_str(), &module);
}
}
// Apply source path remappings ofund in the module's dSYM.
swift_ast_sp->RemapClangImporterOptions(module.GetSourceMappingList());
if (!swift_ast_sp->GetClangImporter()) {
if (log) {
log->Printf("((Module*)%p) [%s]->GetSwiftASTContext() returning NULL "
"- couldn't create a ClangImporter",
&module,
module.GetFileSpec().GetFilename().AsCString("<anonymous>"));
}
return TypeSystemSP();
}
std::vector<std::string> module_names;
swift_ast_sp->RegisterSectionModules(module, module_names);
swift_ast_sp->ValidateSectionModules(module, module_names);
if (log) {
log->Printf("((Module*)%p) [%s]->GetSwiftASTContext() = %p", &module,
module.GetFileSpec().GetFilename().AsCString("<anonymous>"),
swift_ast_sp.get());
swift_ast_sp->DumpConfiguration(log);
}
return swift_ast_sp;
}
lldb::TypeSystemSP SwiftASTContext::CreateInstance(lldb::LanguageType language,
Target &target,
const char *extra_options) {
if (!SwiftASTContextSupportsLanguage(language))
return lldb::TypeSystemSP();
ArchSpec arch = target.GetArchitecture();
// Make an AST but don't set the triple yet. We need to try and detect
// if we have a iOS simulator...
std::shared_ptr<SwiftASTContextForExpressions> swift_ast_sp(
new SwiftASTContextForExpressions(target));
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("SwiftASTContext::CreateInstance(Target)");
auto logError = [&](const char *message) {
if (log)
log->Printf("((Target*)%p)->GetSwiftASTContext() returning NULL - %s",
&target, message);
};
if (!arch.IsValid()) {
logError("invalid target architecture");
return TypeSystemSP();
}
swift_ast_sp->GetLanguageOptions().EnableTargetOSChecking = false;
bool handled_sdk_path = false;
bool handled_resource_dir = false;
const size_t num_images = target.GetImages().GetSize();
// Set the SDK path and resource dir prior to doing search paths.
// Otherwise when we create search path options we put in the wrong SDK
// path.
FileSpec &target_sdk_spec = target.GetSDKPath();
if (target_sdk_spec && target_sdk_spec.Exists()) {
std::string platform_sdk_path(target_sdk_spec.GetPath());
swift_ast_sp->SetPlatformSDKPath(std::move(platform_sdk_path));
handled_sdk_path = true;
}
if (target.GetSwiftCreateModuleContextsInParallel()) {
// The first call to GetTypeSystemForLanguage() on a module will
// trigger the import (and thus most likely the rebuild) of all
// the Clang modules that were imported in this module. This can
// be a lot of work (potentially ten seconds per module), but it
// can be performed in parallel.
llvm::ThreadPool pool;
for (size_t mi = 0; mi != num_images; ++mi) {
auto module_sp = target.GetImages().GetModuleAtIndex(mi);
pool.async([=] {
module_sp->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift);
});
}
pool.wait();
}
Status module_error;
for (size_t mi = 0; mi != num_images; ++mi) {
ModuleSP module_sp = target.GetImages().GetModuleAtIndex(mi);
// Skip images without a serialized Swift AST. This avoids
// spurious warning messages.
if (!HasSwiftModules(*module_sp))
continue;
SwiftASTContext *module_swift_ast = llvm::dyn_cast_or_null<SwiftASTContext>(
module_sp->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift));
if (!module_swift_ast || module_swift_ast->HasFatalErrors() ||
!module_swift_ast->GetClangImporter()) {
// Make sure we warn about this module load failure, the one that
// comes from loading types often gets swallowed up and not seen,
// this is the only reliable point where we can show this.
// But only do it once per UUID so we don't overwhelm the user with
// warnings...
std::unordered_set<std::string> m_swift_warnings_issued;
UUID module_uuid(module_sp->GetUUID());
std::pair<std::unordered_set<std::string>::iterator, bool> result(
m_swift_warnings_issued.insert(module_uuid.GetAsString()));
if (result.second) {
StreamString ss;
module_sp->GetDescription(&ss, eDescriptionLevelBrief);
if (module_swift_ast && module_swift_ast->HasFatalErrors())
ss << ": "
<< module_swift_ast->GetFatalErrors().AsCString("unknown error");
target.GetDebugger().GetErrorFile()->Printf(
"warning: Swift error in module %s.\n"
"Debug info from this module will be unavailable in the "
"debugger.\n\n",
ss.GetData());
}
continue;
}
if (!handled_sdk_path) {
const char *platform_sdk_path = module_swift_ast->GetPlatformSDKPath();
if (platform_sdk_path) {
handled_sdk_path = true;
swift_ast_sp->SetPlatformSDKPath(platform_sdk_path);
}
}
if (!handled_resource_dir) {
const char *resource_dir = module_swift_ast->GetResourceDir();
if (resource_dir) {
handled_resource_dir = true;
swift_ast_sp->SetResourceDir(resource_dir);
if (GetDefaultResourceDir().empty()) {
// Tuck this away as a reasonable default resource dir
// for contexts that don't have one. The Swift parser
// will assert without one.
GetDefaultResourceDir() = resource_dir;
}
}
}
if (handled_sdk_path && handled_resource_dir)
break;
}
// First, prime the compiler with the options from the main executable:
bool got_serialized_options = false;
ModuleSP exe_module_sp(target.GetExecutableModule());
// If we're debugging a testsuite, then treat the main test bundle as the
// executable.
if (exe_module_sp && PlatformDarwin::IsUnitTestExecutable(*exe_module_sp)) {
ModuleSP unit_test_module =
PlatformDarwin::GetUnitTestModule(target.GetImages());
if (unit_test_module) {
exe_module_sp = unit_test_module;
}
}
// Attempt to deserialize the compiler flags from the AST.
if (exe_module_sp) {
llvm::SmallString<0> error;
llvm::raw_svector_ostream errs(error);
bool failed = DeserializeAllCompilerFlags(*swift_ast_sp, *exe_module_sp,
errs, got_serialized_options);
if (log && failed)
log->Printf(
"Attempt to load compiler options from serialized AST failed: %s",
error.c_str());
}
// Now if the user fully specified the triple, let that override the one
// we got from executable's options:
if (target.GetArchitecture().IsFullySpecifiedTriple()) {
swift_ast_sp->SetTriple(
target.GetArchitecture().GetTriple().str().c_str());
} else {
// Always run using the Host OS triple...
bool set_triple = false;
PlatformSP platform_sp(target.GetPlatform());
if (platform_sp &&
!target.GetArchitecture().GetTriple().hasEnvironment()) {
llvm::VersionTuple version = platform_sp->GetOSVersion(
target.GetProcessSP().get());
StreamString full_triple_name;
full_triple_name.PutCString(target.GetArchitecture().GetTriple().str());
full_triple_name.PutCString(version.getAsString());
swift_ast_sp->SetTriple(full_triple_name.GetString().data());
set_triple = true;
}
if (!set_triple) {
ModuleSP exe_module_sp(target.GetExecutableModule());
if (exe_module_sp) {
Status exe_error;
SwiftASTContext *exe_swift_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(
exe_module_sp->GetTypeSystemForLanguage(
lldb::eLanguageTypeSwift));
if (exe_swift_ctx) {
swift_ast_sp->SetTriple(
exe_swift_ctx->GetLanguageOptions().Target.str().c_str());
}
}
}
}
const bool use_all_compiler_flags =
!got_serialized_options || target.GetUseAllCompilerFlags();
std::function<void(ModuleSP &&)> process_one_module =
[&target, &swift_ast_sp, use_all_compiler_flags](ModuleSP &&module_sp) {
const FileSpec &module_file = module_sp->GetFileSpec();
std::string module_path = module_file.GetPath();
// Add the containing framework to the framework search path. Don't
// do that if this is the executable module, since it might be
// buried in some framework that we don't care about.
if (use_all_compiler_flags &&
target.GetExecutableModulePointer() != module_sp.get()) {
size_t framework_offset = module_path.rfind(".framework/");
if (framework_offset != std::string::npos) {
// Sometimes the version of the framework that got loaded has been
// stripped and in that case, adding it to the framework search
// path will just short-cut a clang search that might otherwise
// find the needed headers. So don't add these paths.
std::string framework_path =
module_path.substr(0, framework_offset);
framework_path.append(".framework");
FileSpec path_spec(framework_path, true);
FileSpec headers_spec =
path_spec.CopyByAppendingPathComponent("Headers");
bool add_it = false;
if (headers_spec.Exists())
add_it = true;
if (!add_it) {
FileSpec module_spec =
path_spec.CopyByAppendingPathComponent("Modules");
if (module_spec.Exists())
add_it = true;
}
if (!add_it) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("process_one_module rejecting framework path"
" \"%s\" as it has no Headers "
"or Modules subdirectories.",
framework_path.c_str());
}
if (add_it) {
while (framework_offset && (module_path[framework_offset] != '/'))
framework_offset--;
if (module_path[framework_offset] == '/') {
// framework_offset now points to the '/';
std::string parent_path =
module_path.substr(0, framework_offset);
if (strncmp(parent_path.c_str(), "/System/Library",
strlen("/System/Library")) &&
!IsDeviceSupport(parent_path.c_str())) {
swift_ast_sp->AddFrameworkSearchPath(parent_path.c_str());
}
}
}
}
}
// Skip images without a serialized Swift AST.
if (!HasSwiftModules(*module_sp))
return;
SymbolVendor *sym_vendor = module_sp->GetSymbolVendor();
if (!sym_vendor)
return;
std::vector<std::string> module_names;
SymbolFile *sym_file = sym_vendor->GetSymbolFile();
if (!sym_file)
return;
Status sym_file_error;
SwiftASTContext *ast_context = llvm::dyn_cast_or_null<SwiftASTContext>(
sym_file->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift));
if (ast_context && !ast_context->HasErrors()) {
if (use_all_compiler_flags ||
target.GetExecutableModulePointer() == module_sp.get()) {
for (size_t msi = 0, mse = ast_context->GetNumModuleSearchPaths();
msi < mse; ++msi) {
const char *search_path =
ast_context->GetModuleSearchPathAtIndex(msi);
swift_ast_sp->AddModuleSearchPath(search_path);
}
for (size_t fsi = 0,
fse = ast_context->GetNumFrameworkSearchPaths();
fsi < fse; ++fsi) {
const char *search_path =
ast_context->GetFrameworkSearchPathAtIndex(fsi);
swift_ast_sp->AddFrameworkSearchPath(search_path);
}
std::string clang_argument;
for (size_t osi = 0, ose = ast_context->GetNumClangArguments();
osi < ose; ++osi) {
// Join multi-arg -D and -U options for uniquing.
clang_argument += ast_context->GetClangArgumentAtIndex(osi);
if (clang_argument == "-D" || clang_argument == "-U")
continue;
// Enable uniquing for -D and -U options.
bool force = true;
if (clang_argument.size() >= 2 && clang_argument[0] == '-' &&
(clang_argument[1] == 'D' || clang_argument[1] == 'U'))
force = false;
swift_ast_sp->AddClangArgument(clang_argument, force);
clang_argument.clear();
}
}
swift_ast_sp->RegisterSectionModules(*module_sp, module_names);
}
};
for (size_t mi = 0; mi != num_images; ++mi) {
process_one_module(target.GetImages().GetModuleAtIndex(mi));
}
FileSpecList &framework_search_paths = target.GetSwiftFrameworkSearchPaths();
FileSpecList &module_search_paths = target.GetSwiftModuleSearchPaths();
for (size_t fi = 0, fe = framework_search_paths.GetSize(); fi != fe; ++fi) {
swift_ast_sp->AddFrameworkSearchPath(
framework_search_paths.GetFileSpecAtIndex(fi).GetPath().c_str());
}
for (size_t mi = 0, me = module_search_paths.GetSize(); mi != me; ++mi) {
swift_ast_sp->AddModuleSearchPath(
module_search_paths.GetFileSpecAtIndex(mi).GetPath().c_str());
}
// Now fold any extra options we were passed. This has to be done BEFORE
// the ClangImporter is made by calling GetClangImporter or these options
// will be ignored.
if (extra_options) {
swift::CompilerInvocation &compiler_invocation =
swift_ast_sp->GetCompilerInvocation();
Args extra_args(extra_options);
llvm::ArrayRef<const char *> extra_args_ref(extra_args.GetArgumentVector(),
extra_args.GetArgumentCount());
compiler_invocation.parseArgs(extra_args_ref,
swift_ast_sp->GetDiagnosticEngine());
}
// Apply source path remappings ofund in the target settings.
swift_ast_sp->RemapClangImporterOptions(target.GetSourcePathMap());
// This needs to happen once all the import paths are set, or otherwise no
// modules will be found.
if (!swift_ast_sp->GetClangImporter()) {
logError("couldn't create a ClangImporter");
return TypeSystemSP();
}
if (log) {
log->Printf("((Target*)%p)->GetSwiftASTContext() = %p", &target,
swift_ast_sp.get());
swift_ast_sp->DumpConfiguration(log);
}
if (swift_ast_sp->HasFatalErrors()) {
const char *errors = swift_ast_sp->GetFatalErrors().AsCString();
swift_ast_sp->m_error.SetErrorStringWithFormat(
"Error creating target Swift AST context: %s", errors);
logError(errors);
return lldb::TypeSystemSP();
}
const bool can_create = true;
if (!swift_ast_sp->m_ast_context_ap->getStdlibModule(can_create)) {
logError("couldn't load the Swift stdlib");
return lldb::TypeSystemSP();
}
return swift_ast_sp;
}
void SwiftASTContext::EnumerateSupportedLanguages(
std::set<lldb::LanguageType> &languages_for_types,
std::set<lldb::LanguageType> &languages_for_expressions) {
static std::vector<lldb::LanguageType> s_supported_languages_for_types(
{lldb::eLanguageTypeSwift});
static std::vector<lldb::LanguageType> s_supported_languages_for_expressions(
{lldb::eLanguageTypeSwift});
languages_for_types.insert(s_supported_languages_for_types.begin(),
s_supported_languages_for_types.end());
languages_for_expressions.insert(
s_supported_languages_for_expressions.begin(),
s_supported_languages_for_expressions.end());
}
static lldb::TypeSystemSP CreateTypeSystemInstance(lldb::LanguageType language,
Module *module,
Target *target,
const char *extra_options) {
// This should be called with either a target or a module.
if (module) {
assert(!target);
assert(StringRef(extra_options).empty());
return SwiftASTContext::CreateInstance(language, *module);
} else if (target) {
assert(!module);
return SwiftASTContext::CreateInstance(language, *target, extra_options);
}
}
void SwiftASTContext::Initialize() {
PluginManager::RegisterPlugin(
GetPluginNameStatic(), "swift AST context plug-in",
CreateTypeSystemInstance, EnumerateSupportedLanguages);
}
void SwiftASTContext::Terminate() {
PluginManager::UnregisterPlugin(CreateTypeSystemInstance);
}
bool SwiftASTContext::SupportsLanguage(lldb::LanguageType language) {
return SwiftASTContextSupportsLanguage(language);
}
Status SwiftASTContext::IsCompatible() { return GetFatalErrors(); }
Status SwiftASTContext::GetFatalErrors() {
Status error;
if (HasFatalErrors()) {
error = m_fatal_errors;
if (error.Success()) {
// Retrieve the error message from the DiagnosticConsumer.
DiagnosticManager diagnostic_manager;
PrintDiagnostics(diagnostic_manager);
error.SetErrorString(diagnostic_manager.GetString());
}
}
return error;
}
swift::IRGenOptions &SwiftASTContext::GetIRGenOptions() {
return m_compiler_invocation_ap->getIRGenOptions();
}
std::string SwiftASTContext::GetTriple() const {
return m_compiler_invocation_ap->getTargetTriple();
}
// Conditions a triple string to be safe for use with Swift.
// Right now this just strips the Haswell marker off the CPU name.
// TODO make Swift more robust
static std::string GetSwiftFriendlyTriple(const std::string &triple) {
static std::string s_x86_64h("x86_64h");
static std::string::size_type s_x86_64h_size = s_x86_64h.size();
if (0 == triple.compare(0, s_x86_64h_size, s_x86_64h)) {
std::string fixed_triple("x86_64");
fixed_triple.append(
triple.substr(s_x86_64h_size, triple.size() - s_x86_64h_size));
return fixed_triple;
}
return triple;
}
bool SwiftASTContext::SetTriple(const char *triple_cstr, Module *module) {
if (triple_cstr && triple_cstr[0]) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
// We can change our triple up until we create the swift::irgen::IRGenModule
if (m_ir_gen_module_ap.get() == NULL) {
std::string raw_triple(triple_cstr);
std::string triple = GetSwiftFriendlyTriple(raw_triple);
llvm::Triple llvm_triple(triple);
const unsigned unspecified = 0;
// If the OS version is unspecified, do fancy things
if (llvm_triple.getOSMajorVersion() == unspecified) {
// If a triple is "<arch>-apple-darwin" change it to be
// "<arch>-apple-macosx" otherwise the major and minor OS version we
// append below would be wrong.
if (llvm_triple.getVendor() == llvm::Triple::VendorType::Apple &&
llvm_triple.getOS() == llvm::Triple::OSType::Darwin) {
llvm_triple.setOS(llvm::Triple::OSType::MacOSX);
triple = llvm_triple.str();
}
// Append the min OS to the triple if we have a target
ModuleSP module_sp;
if (module == NULL) {
TargetSP target_sp(m_target_wp.lock());
if (target_sp) {
module_sp = target_sp->GetExecutableModule();
if (module_sp)
module = module_sp.get();
}
}
if (module) {
ObjectFile *objfile = module->GetObjectFile();
uint32_t versions[3];
if (objfile) {
StreamString strm;
if (llvm::VersionTuple version = objfile->GetMinimumOSVersion()) {
strm.PutCString(llvm_triple.getOSName().str());
strm.PutCString(version.getAsString());
llvm_triple.setOSName(strm.GetString());
triple = llvm_triple.str();
}
}
}
}
if (log)
log->Printf("%p: SwiftASTContext::SetTriple('%s') setting to '%s'%s",
this, triple_cstr, triple.c_str(),
m_target_wp.lock() ? " (target)" : "");
if (llvm::Triple(triple).getOS() == llvm::Triple::UnknownOS) {
// This case triggers an llvm_unreachable() in the Swift compiler.
if (log)
log->Printf("Cannot initialize Swift with an unknown OS");
return false;
}
m_compiler_invocation_ap->setTargetTriple(triple);
// Every time the triple is changed the LangOpts must be
// updated too, because Swift default-initializes the
// EnableObjCInterop flag based on the triple.
GetLanguageOptions().EnableObjCInterop = llvm_triple.isOSDarwin();
return true;
} else {
if (log)
log->Printf("%p: SwiftASTContext::SetTriple('%s') ignoring triple "
"since the IRGenModule has already been created",
this, triple_cstr);
}
}
return false;
}
static std::string GetXcodeContentsPath() {
const char substr[] = ".app/Contents/";
// First, try based on the current shlib's location
{
if (FileSpec fspec = HostInfo::GetShlibDir()) {
std::string path_to_shlib = fspec.GetPath();
size_t pos = path_to_shlib.rfind(substr);
if (pos != std::string::npos) {
path_to_shlib.erase(pos + strlen(substr));
return path_to_shlib;
}
}
}
// Fall back to using xcrun
{
int status = 0;
int signo = 0;
std::string output;
const char *command = "xcrun -sdk macosx --show-sdk-path";
lldb_private::Status error = Host::RunShellCommand(
command, // shell command to run
NULL, // current working directory
&status, // Put the exit status of the process in here
&signo, // Put the signal that caused the process to exit in here
&output, // Get the output from the command and place it in this string
std::chrono::seconds(3)); // Timeout in seconds to wait for shell program to finish
if (status == 0 && !output.empty()) {
size_t first_non_newline = output.find_last_not_of("\r\n");
if (first_non_newline != std::string::npos) {
output.erase(first_non_newline + 1);
}
size_t pos = output.rfind(substr);
if (pos != std::string::npos) {
output.erase(pos + strlen(substr));
return output;
}
}
}
return std::string();
}
static std::string GetCurrentToolchainPath() {
const char substr[] = ".xctoolchain/";
{
if (FileSpec fspec = HostInfo::GetShlibDir()) {
std::string path_to_shlib = fspec.GetPath();
size_t pos = path_to_shlib.rfind(substr);
if (pos != std::string::npos) {
path_to_shlib.erase(pos + strlen(substr));
return path_to_shlib;
}
}
}
return std::string();
}
static std::string GetCurrentCLToolsPath() {
const char substr[] = "/CommandLineTools/";
{
if (FileSpec fspec = HostInfo::GetShlibDir()) {
std::string path_to_shlib = fspec.GetPath();
size_t pos = path_to_shlib.rfind(substr);
if (pos != std::string::npos) {
path_to_shlib.erase(pos + strlen(substr));
return path_to_shlib;
}
}
}
return std::string();
}
namespace {
enum class SDKType {
MacOSX = 0,
iPhoneSimulator,
iPhoneOS,
AppleTVSimulator,
AppleTVOS,
WatchSimulator,
watchOS,
numSDKTypes,
unknown = -1
};
const char *const sdk_strings[] = {
"macosx", "iphonesimulator", "iphoneos", "appletvsimulator",
"appletvos", "watchsimulator", "watchos",
};
struct SDKEnumeratorInfo {
FileSpec found_path;
SDKType sdk_type;
uint32_t least_major;
uint32_t least_minor;
};
static bool SDKSupportsSwift(const FileSpec &sdk_path, SDKType desired_type) {
ConstString last_path_component = sdk_path.GetLastPathComponent();
if (last_path_component) {
const llvm::StringRef sdk_name_raw = last_path_component.GetStringRef();
std::string sdk_name_lower = sdk_name_raw.lower();
const llvm::StringRef sdk_name(sdk_name_lower);
llvm::StringRef version_part;
SDKType sdk_type = SDKType::unknown;
if (desired_type == SDKType::unknown) {
for (int i = (int)SDKType::MacOSX; i < (int)SDKType::numSDKTypes; ++i) {
if (sdk_name.startswith(sdk_strings[i])) {
version_part = sdk_name.drop_front(strlen(sdk_strings[i]));
sdk_type = (SDKType)i;
break;
}
}
// For non-Darwin SDKs assume Swift is supported
if (sdk_type == SDKType::unknown)
return true;
} else {
if (sdk_name.startswith(sdk_strings[(int)desired_type])) {
version_part =
sdk_name.drop_front(strlen(sdk_strings[(int)desired_type]));
sdk_type = desired_type;
} else {
return false;
}
}
const size_t major_dot_offset = version_part.find('.');
if (major_dot_offset == llvm::StringRef::npos)
return false;
const llvm::StringRef major_version =
version_part.slice(0, major_dot_offset);
const llvm::StringRef minor_part =
version_part.drop_front(major_dot_offset + 1);
const size_t minor_dot_offset = minor_part.find('.');
if (minor_dot_offset == llvm::StringRef::npos)
return false;
const llvm::StringRef minor_version = minor_part.slice(0, minor_dot_offset);
unsigned int major = 0;
unsigned int minor = 0;
if (major_version.getAsInteger(10, major))
return false;
if (minor_version.getAsInteger(10, minor))
return false;
switch (sdk_type) {
case SDKType::MacOSX:
if (major > 10 || (major == 10 && minor >= 10))
return true;
break;
case SDKType::iPhoneOS:
case SDKType::iPhoneSimulator:
if (major >= 8)
return true;
break;
case SDKType::AppleTVSimulator:
case SDKType::AppleTVOS:
if (major >= 9)
return true;
break;
case SDKType::WatchSimulator:
case SDKType::watchOS:
if (major >= 2)
return true;
break;
default:
return false;
}
}
return false;
}
FileSpec::EnumerateDirectoryResult
DirectoryEnumerator(void *baton, llvm::sys::fs::file_type file_type,
const FileSpec &spec) {
SDKEnumeratorInfo *enumerator_info = static_cast<SDKEnumeratorInfo *>(baton);
if (SDKSupportsSwift(spec, enumerator_info->sdk_type)) {
enumerator_info->found_path = spec;
return FileSpec::EnumerateDirectoryResult::eEnumerateDirectoryResultNext;
}
return FileSpec::EnumerateDirectoryResult::eEnumerateDirectoryResultNext;
};
static ConstString EnumerateSDKsForVersion(FileSpec sdks_spec, SDKType sdk_type,
uint32_t least_major,
uint32_t least_minor) {
if (!IsDirectory(sdks_spec))
return ConstString();
const bool find_directories = true;
const bool find_files = false;
const bool find_other = true; // include symlinks
SDKEnumeratorInfo enumerator_info;
enumerator_info.sdk_type = sdk_type;
enumerator_info.least_major = least_major;
enumerator_info.least_minor = least_minor;
FileSpec::EnumerateDirectory(sdks_spec.GetPath().c_str(), find_directories,
find_files, find_other, DirectoryEnumerator,
&enumerator_info);
if (IsDirectory(enumerator_info.found_path))
return ConstString(enumerator_info.found_path.GetPath());
else
return ConstString();
}
static ConstString GetSDKDirectory(SDKType sdk_type, uint32_t least_major,
uint32_t least_minor) {
if (sdk_type != SDKType::MacOSX) {
// Look inside Xcode for the required installed iOS SDK version
std::string sdks_path = GetXcodeContentsPath();
sdks_path.append("Developer/Platforms");
if (sdk_type == SDKType::iPhoneSimulator) {
sdks_path.append("/iPhoneSimulator.platform/");
} else if (sdk_type == SDKType::AppleTVSimulator) {
sdks_path.append("/AppleTVSimulator.platform/");
} else if (sdk_type == SDKType::AppleTVOS) {
sdks_path.append("/AppleTVOS.platform/");
} else if (sdk_type == SDKType::WatchSimulator) {
sdks_path.append("/WatchSimulator.platform/");
} else if (sdk_type == SDKType::watchOS) {
// For now, we need to be prepared to handle either capitalization of this
// path.
std::string WatchOS_candidate_path = sdks_path + "/WatchOS.platform/";
if (IsDirectory(FileSpec(WatchOS_candidate_path.c_str(), false))) {
sdks_path = WatchOS_candidate_path;
} else {
std::string watchOS_candidate_path = sdks_path + "/watchOS.platform/";
if (IsDirectory(FileSpec(watchOS_candidate_path.c_str(), false))) {
sdks_path = watchOS_candidate_path;
} else {
return ConstString();
}
}
} else {
sdks_path.append("/iPhoneOS.platform/");
}
sdks_path.append("Developer/SDKs/");
FileSpec sdks_spec(sdks_path.c_str(), false);
return EnumerateSDKsForVersion(sdks_spec, sdk_type, least_major,
least_major);
}
// The SDK type is Mac OS X
llvm::VersionTuple version = HostInfo::GetOSVersion();
if (!version)
return ConstString();
uint32_t major = version.getMajor();
uint32_t minor = version.getMinor().getValueOr(0);
uint32_t update = version.getSubminor().getValueOr(0);
// If there are minimum requirements that exceed the current OS, apply those
if (least_major > major) {
major = least_major;
minor = least_minor;
} else if (least_major == major) {
if (least_minor > minor)
minor = least_minor;
}
typedef std::map<uint64_t, ConstString> SDKDirectoryCache;
static std::mutex g_mutex;
static SDKDirectoryCache g_sdk_cache;
std::lock_guard<std::mutex> locker(g_mutex);
const uint64_t major_minor = (uint64_t)major << 32 | (uint64_t)minor;
SDKDirectoryCache::iterator pos = g_sdk_cache.find(major_minor);
if (pos != g_sdk_cache.end())
return pos->second;
FileSpec fspec;
std::string xcode_contents_path;
if (xcode_contents_path.empty())
xcode_contents_path = GetXcodeContentsPath();
if (!xcode_contents_path.empty()) {
StreamString sdk_path;
sdk_path.Printf(
"%sDeveloper/Platforms/MacOSX.platform/Developer/SDKs/MacOSX%u.%u.sdk",
xcode_contents_path.c_str(), major, minor);
fspec.SetFile(sdk_path.GetString(), false, FileSpec::Style::native);
if (fspec.Exists()) {
ConstString path(sdk_path.GetString());
// Cache results
g_sdk_cache[major_minor] = path;
return path;
} else if ((least_major != major) || (least_minor != minor)) {
// Try the required SDK
sdk_path.Clear();
sdk_path.Printf("%sDeveloper/Platforms/MacOSX.platform/Developer/SDKs/"
"MacOSX%u.%u.sdk",
xcode_contents_path.c_str(), least_major, least_minor);
fspec.SetFile(sdk_path.GetString(), false, FileSpec::Style::native);
if (fspec.Exists()) {
ConstString path(sdk_path.GetString());
// Cache results
g_sdk_cache[major_minor] = path;
return path;
} else {
// Okay, we're going to do an exhaustive search for *any* SDK that has
// an adequate version.
std::string sdks_path = GetXcodeContentsPath();
sdks_path.append("Developer/Platforms/MacOSX.platform/Developer/SDKs");
FileSpec sdks_spec(sdks_path.c_str(), false);
ConstString sdk_path = EnumerateSDKsForVersion(
sdks_spec, sdk_type, least_major, least_major);
if (sdk_path) {
g_sdk_cache[major_minor] = sdk_path;
return sdk_path;
}
}
}
}
// Cache results
g_sdk_cache[major_minor] = ConstString();
return ConstString();
}
static ConstString GetResourceDir() {
static ConstString g_cached_resource_dir;
static std::once_flag g_once_flag;
std::call_once(g_once_flag, []() {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
// First, check if there's something in our bundle
{
FileSpec swift_dir_spec;
if (FileSpec swift_dir_spec = HostInfo::GetSwiftDir()) {
if (log)
log->Printf("%s: trying ePathTypeSwiftDir: %s", __FUNCTION__,
swift_dir_spec.GetCString());
// We can't just check for the Swift directory, because that
// always exists. We have to look for "clang" inside that.
FileSpec swift_clang_dir_spec = swift_dir_spec;
swift_clang_dir_spec.AppendPathComponent("clang");
if (IsDirectory(swift_clang_dir_spec)) {
g_cached_resource_dir = ConstString(swift_dir_spec.GetPath());
if (log)
log->Printf("%s: found Swift resource dir via "
"ePathTypeSwiftDir': %s",
__FUNCTION__, g_cached_resource_dir.AsCString());
return;
}
}
}
// Nothing in our bundle. Are we in a toolchain that has its own Swift
// compiler resource dir?
{
std::string xcode_toolchain_path = GetCurrentToolchainPath();
if (log)
log->Printf("%s: trying toolchain path: %s", __FUNCTION__,
xcode_toolchain_path.c_str());
if (!xcode_toolchain_path.empty()) {
xcode_toolchain_path.append("usr/lib/swift");
if (log)
log->Printf("%s: trying toolchain-based lib path: %s", __FUNCTION__,
xcode_toolchain_path.c_str());
if (IsDirectory(FileSpec(xcode_toolchain_path, false))) {
g_cached_resource_dir = ConstString(xcode_toolchain_path);
if (log)
log->Printf("%s: found Swift resource dir via "
"toolchain path + 'usr/lib/swift': %s",
__FUNCTION__, g_cached_resource_dir.AsCString());
return;
}
}
}
// We're not in a toolchain that has one. Use the Xcode default toolchain.
{
std::string xcode_contents_path = GetXcodeContentsPath();
if (log)
log->Printf("%s: trying Xcode path: %s", __FUNCTION__,
xcode_contents_path.c_str());
if (!xcode_contents_path.empty()) {
xcode_contents_path.append("Developer/Toolchains/"
"XcodeDefault.xctoolchain"
"/usr/lib/swift");
if (log)
log->Printf("%s: trying Xcode-based lib path: %s", __FUNCTION__,
xcode_contents_path.c_str());
if (IsDirectory(FileSpec(xcode_contents_path, false))) {
g_cached_resource_dir = ConstString(xcode_contents_path);
if (log)
log->Printf("%s: found Swift resource dir via "
"Xcode contents path + default toolchain "
"relative dir: %s",
__FUNCTION__, g_cached_resource_dir.AsCString());
return;
}
}
}
// We're not in Xcode. We might be in the command-line tools.
{
std::string cl_tools_path = GetCurrentCLToolsPath();
if (log)
log->Printf("%s: trying command-line tools path: %s", __FUNCTION__,
cl_tools_path.c_str());
if (!cl_tools_path.empty()) {
cl_tools_path.append("usr/lib/swift");
if (log)
log->Printf("%s: trying command-line tools-based lib "
"path: %s",
__FUNCTION__, cl_tools_path.c_str());
if (IsDirectory(FileSpec(cl_tools_path, false))) {
g_cached_resource_dir = ConstString(cl_tools_path);
if (log)
log->Printf("%s: found Swift resource dir via "
"command-line tools path + "
"usr/lib/swift: %s",
__FUNCTION__, g_cached_resource_dir.AsCString());
return;
}
}
}
// We might be in the build-dir configuration for a build-script-driven
// LLDB build, which has the Swift build dir as a sibling directory
// to the lldb build dir. This looks much different than the install-
// dir layout that the previous checks would try.
{
if (FileSpec faux_swift_dir_spec = HostInfo::GetSwiftDir()) {
// We can't use a C++11 stdlib regex feature here because it
// doesn't work on Ubuntu 14.04 x86_64. Once we don't care
// about supporting that anymore, let's pull the code below
// back in since it is a simpler implementation using
// std::regex.
#if 0
// Let's try to regex this.
// We're looking for /some/path/lldb-{os}-{arch}, and want to
// build the following:
// /some/path/swift-{os}-{arch}/lib/swift/{os}/{arch}
// In a match, these are the following assignments for
// backrefs:
// $1 - first part of path before swift build dir
// $2 - the host OS path separator character
// $3 - all the stuff that should come after changing
// lldb to swift for the lib dir.
auto match_regex =
std::regex("^(.+([/\\\\]))lldb-(.+)$");
const std::string replace_format = "$1swift-$3";
const std::string faux_swift_dir =
faux_swift_dir_spec.GetCString();
const std::string build_tree_resource_dir =
std::regex_replace(faux_swift_dir, match_regex,
replace_format);
#else
std::string build_tree_resource_dir;
const std::string faux_swift_dir =
faux_swift_dir_spec.GetCString();
// Find something that matches lldb- (particularly,
// the last one).
const std::string lldb_dash("lldb-");
auto lldb_pos = faux_swift_dir.rfind(lldb_dash);
if ((lldb_pos != std::string::npos) &&
(lldb_pos > 0) &&
((faux_swift_dir[lldb_pos - 1] == '\\') ||
(faux_swift_dir[lldb_pos - 1] == '/')))
{
// We found something that matches ^.+[/\\]lldb-.+$
std::ostringstream stream;
// Take everything before lldb- (the path leading up to
// the lldb dir).
stream << faux_swift_dir.substr(0, lldb_pos);
// replace lldb- with swift-.
stream << "swift-";
// and now tack on the same components from after
// the lldb- part.
stream << faux_swift_dir.substr(lldb_pos +
lldb_dash.length());
const std::string build_tree_resource_dir = stream.str();
if (log)
log->Printf("%s: trying ePathTypeSwiftDir regex-based "
"build dir: %s",
__FUNCTION__,
build_tree_resource_dir.c_str());
FileSpec swift_resource_dir_spec(
build_tree_resource_dir.c_str(), false);
if (IsDirectory(swift_resource_dir_spec))
{
g_cached_resource_dir =
ConstString(swift_resource_dir_spec.GetPath());
if (log)
log->Printf("%s: found Swift resource dir via "
"ePathTypeSwiftDir + inferred "
"build-tree dir: %s", __FUNCTION__,
g_cached_resource_dir.AsCString());
return;
}
}
#endif
}
}
// We failed to find a reasonable Swift resource dir.
if (log)
log->Printf("%s: failed to find a Swift resource dir", __FUNCTION__);
});
return g_cached_resource_dir;
}
} // anonymous namespace
swift::CompilerInvocation &SwiftASTContext::GetCompilerInvocation() {
return *m_compiler_invocation_ap;
}
swift::SourceManager &SwiftASTContext::GetSourceManager() {
if (m_source_manager_ap.get() == NULL)
m_source_manager_ap.reset(new swift::SourceManager());
return *m_source_manager_ap;
}
swift::LangOptions &SwiftASTContext::GetLanguageOptions() {
return GetCompilerInvocation().getLangOptions();
}
swift::DiagnosticEngine &SwiftASTContext::GetDiagnosticEngine() {
if (m_diagnostic_engine_ap.get() == NULL)
m_diagnostic_engine_ap.reset(
new swift::DiagnosticEngine(GetSourceManager()));
return *m_diagnostic_engine_ap;
}
// This code comes from CompilerInvocation.cpp (setRuntimeResourcePath)
static void ConfigureResourceDirs(swift::CompilerInvocation &invocation,
FileSpec resource_dir, llvm::Triple triple) {
// Make sure the triple is right:
invocation.setTargetTriple(triple.str());
invocation.setRuntimeResourcePath(resource_dir.GetPath().c_str());
}
swift::SILOptions &SwiftASTContext::GetSILOptions() {
return GetCompilerInvocation().getSILOptions();
}
bool SwiftASTContext::TargetHasNoSDK() {
llvm::Triple triple(GetTriple());
switch (triple.getOS()) {
case llvm::Triple::OSType::MacOSX:
case llvm::Triple::OSType::Darwin:
case llvm::Triple::OSType::IOS:
return false;
default:
return true;
}
}
swift::ClangImporterOptions &SwiftASTContext::GetClangImporterOptions() {
swift::ClangImporterOptions &clang_importer_options =
GetCompilerInvocation().getClangImporterOptions();
if (!m_initialized_clang_importer_options) {
m_initialized_clang_importer_options = true;
// Set the Clang module search path.
llvm::SmallString<128> path;
auto props = ModuleList::GetGlobalModuleListProperties();
props.GetClangModulesCachePath().GetPath(path);
clang_importer_options.ModuleCachePath = path.str();
FileSpec clang_dir_spec;
clang_dir_spec = GetClangResourceDir();
if (clang_dir_spec.Exists())
clang_importer_options.OverrideResourceDir =
std::move(clang_dir_spec.GetPath());
clang_importer_options.DebuggerSupport = true;
}
return clang_importer_options;
}
swift::SearchPathOptions &SwiftASTContext::GetSearchPathOptions() {
swift::SearchPathOptions &search_path_opts =
GetCompilerInvocation().getSearchPathOptions();
if (!m_initialized_search_path_options) {
m_initialized_search_path_options = true;
bool set_sdk = false;
bool set_resource_dir = false;
if (!search_path_opts.SDKPath.empty()) {
FileSpec provided_sdk_path(search_path_opts.SDKPath, false);
if (provided_sdk_path.Exists()) {
// We don't check whether the SDK supports swift because we figure if
// someone is passing this to us on the command line (e.g., for the
// REPL), they probably know what they're doing.
set_sdk = true;
}
} else if (!m_platform_sdk_path.empty()) {
FileSpec platform_sdk(m_platform_sdk_path.c_str(), false);
if (platform_sdk.Exists() &&
SDKSupportsSwift(platform_sdk, SDKType::unknown)) {
search_path_opts.SDKPath = m_platform_sdk_path.c_str();
set_sdk = true;
}
}
llvm::Triple triple(GetTriple());
if (!m_resource_dir.empty()) {
FileSpec resource_dir(m_resource_dir.c_str(), false);
if (resource_dir.Exists()) {
ConfigureResourceDirs(GetCompilerInvocation(), resource_dir, triple);
set_resource_dir = true;
}
}
auto is_simulator = [&]() -> bool {
return triple.getEnvironment() == llvm::Triple::Simulator ||
!triple.getArchName().startswith("arm");
};
if (!set_sdk) {
switch (triple.getOS()) {
case llvm::Triple::OSType::MacOSX:
case llvm::Triple::OSType::Darwin:
search_path_opts.SDKPath = GetSDKDirectory(SDKType::MacOSX, 10, 10)
.AsCString("");
break;
case llvm::Triple::OSType::IOS:
search_path_opts.SDKPath =
is_simulator()
? GetSDKDirectory(SDKType::iPhoneSimulator, 8, 0).AsCString("")
: GetSDKDirectory(SDKType::iPhoneOS, 8, 0).AsCString("");
break;
case llvm::Triple::OSType::TvOS:
search_path_opts.SDKPath =
is_simulator()
? GetSDKDirectory(SDKType::AppleTVSimulator, 9, 0).AsCString("")
: GetSDKDirectory(SDKType::AppleTVOS, 9, 0).AsCString("");
break;
case llvm::Triple::OSType::WatchOS:
search_path_opts.SDKPath =
is_simulator()
? GetSDKDirectory(SDKType::WatchSimulator, 2, 0).AsCString("")
: GetSDKDirectory(SDKType::watchOS, 2, 0).AsCString("");
break;
default:
// Explicitly leave the SDKPath blank on other platforms.
break;
}
}
if (!set_resource_dir) {
FileSpec resource_dir(::GetResourceDir().AsCString(""), false);
if (resource_dir.Exists())
ConfigureResourceDirs(GetCompilerInvocation(), resource_dir, triple);
}
}
return search_path_opts;
}
namespace lldb_private {
class ANSIColorStringStream : public llvm::raw_string_ostream {
public:
ANSIColorStringStream(bool colorize)
: llvm::raw_string_ostream(m_buffer), m_colorize(colorize) {}
/// Changes the foreground color of text that will be output from this point
/// forward.
/// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
/// change only the bold attribute, and keep colors untouched
/// @param Bold bold/brighter text, default false
/// @param BG if true change the background, default: change foreground
/// @returns itself so it can be used within << invocations
virtual raw_ostream &changeColor(enum Colors colors, bool bold = false,
bool bg = false) {
if (llvm::sys::Process::ColorNeedsFlush())
flush();
const char *colorcode;
if (colors == SAVEDCOLOR)
colorcode = llvm::sys::Process::OutputBold(bg);
else
colorcode = llvm::sys::Process::OutputColor(colors, bold, bg);
if (colorcode) {
size_t len = strlen(colorcode);
write(colorcode, len);
}
return *this;
}
/// Resets the colors to terminal defaults. Call this when you are done
/// outputting colored text, or before program exit.
virtual raw_ostream &resetColor() {
if (llvm::sys::Process::ColorNeedsFlush())
flush();
const char *colorcode = llvm::sys::Process::ResetColor();
if (colorcode) {
size_t len = strlen(colorcode);
write(colorcode, len);
}
return *this;
}
/// Reverses the forground and background colors.
virtual raw_ostream &reverseColor() {
if (llvm::sys::Process::ColorNeedsFlush())
flush();
const char *colorcode = llvm::sys::Process::OutputReverse();
if (colorcode) {
size_t len = strlen(colorcode);
write(colorcode, len);
}
return *this;
}
/// This function determines if this stream is connected to a "tty" or
/// "console" window. That is, the output would be displayed to the user
/// rather than being put on a pipe or stored in a file.
virtual bool is_displayed() const { return m_colorize; }
/// This function determines if this stream is displayed and supports colors.
virtual bool has_colors() const { return m_colorize; }
protected:
std::string m_buffer;
bool m_colorize;
};
class StoringDiagnosticConsumer : public swift::DiagnosticConsumer {
public:
StoringDiagnosticConsumer(SwiftASTContext &ast_context)
: m_ast_context(ast_context), m_diagnostics(), m_num_errors(0),
m_colorize(false) {
m_ast_context.GetDiagnosticEngine().resetHadAnyError();
m_ast_context.GetDiagnosticEngine().addConsumer(*this);
}
~StoringDiagnosticConsumer() {
m_ast_context.GetDiagnosticEngine().takeConsumers();
}
virtual void handleDiagnostic(swift::SourceManager &source_mgr,
swift::SourceLoc source_loc,
swift::DiagnosticKind kind,
llvm::StringRef formatString,
llvm::ArrayRef<swift::DiagnosticArgument> formatArgs,
const swift::DiagnosticInfo &info) {
llvm::StringRef bufferName = "<anonymous>";
unsigned bufferID = 0;
std::pair<unsigned, unsigned> line_col = {0, 0};
llvm::SmallString<256> text;
{
llvm::raw_svector_ostream out(text);
swift::DiagnosticEngine::formatDiagnosticText(out,
formatString,
formatArgs);
}
if (source_loc.isValid()) {
bufferID = source_mgr.findBufferContainingLoc(source_loc);
bufferName = source_mgr.getDisplayNameForLoc(source_loc);
line_col = source_mgr.getLineAndColumn(source_loc);
}
if (line_col.first != 0) {
ANSIColorStringStream os(m_colorize);
// Determine what kind of diagnostic we're emitting, and whether we want
// to use its fixits:
bool use_fixits = false;
llvm::SourceMgr::DiagKind source_mgr_kind;
switch (kind) {
default:
case swift::DiagnosticKind::Error:
source_mgr_kind = llvm::SourceMgr::DK_Error;
use_fixits = true;
break;
case swift::DiagnosticKind::Warning:
source_mgr_kind = llvm::SourceMgr::DK_Warning;
break;
case swift::DiagnosticKind::Note:
source_mgr_kind = llvm::SourceMgr::DK_Note;
break;
}
// Translate ranges.
llvm::SmallVector<llvm::SMRange, 2> ranges;
for (auto R : info.Ranges)
ranges.push_back(getRawRange(source_mgr, R));
// Translate fix-its.
llvm::SmallVector<llvm::SMFixIt, 2> fix_its;
for (swift::DiagnosticInfo::FixIt F : info.FixIts)
fix_its.push_back(getRawFixIt(source_mgr, F));
// Display the diagnostic.
auto message = source_mgr.GetMessage(source_loc, source_mgr_kind, text,
ranges, fix_its);
source_mgr.getLLVMSourceMgr().PrintMessage(os, message);
// Use the llvm::raw_string_ostream::str() accessor as it will flush
// the stream into our "message" and return us a reference to "message".
std::string &message_ref = os.str();
if (message_ref.empty())
m_diagnostics.push_back(RawDiagnostic(
text.str(), kind, bufferName, bufferID, line_col.first,
line_col.second,
use_fixits ? info.FixIts
: llvm::ArrayRef<swift::Diagnostic::FixIt>()));
else
m_diagnostics.push_back(RawDiagnostic(
message_ref, kind, bufferName, bufferID, line_col.first,
line_col.second,
use_fixits ? info.FixIts
: llvm::ArrayRef<swift::Diagnostic::FixIt>()));
} else {
m_diagnostics.push_back(RawDiagnostic(
text.str(), kind, bufferName, bufferID, line_col.first,
line_col.second, llvm::ArrayRef<swift::Diagnostic::FixIt>()));
}
if (kind == swift::DiagnosticKind::Error)
m_num_errors++;
}
void Clear() {
m_ast_context.GetDiagnosticEngine().resetHadAnyError();
m_diagnostics.clear();
m_num_errors = 0;
}
unsigned NumErrors() {
if (m_num_errors)
return m_num_errors;
else if (m_ast_context.GetASTContext()->hadError())
return 1;
else
return 0;
}
static DiagnosticSeverity SeverityForKind(swift::DiagnosticKind kind) {
switch (kind) {
case swift::DiagnosticKind::Error:
return eDiagnosticSeverityError;
case swift::DiagnosticKind::Warning:
return eDiagnosticSeverityWarning;
case swift::DiagnosticKind::Note:
return eDiagnosticSeverityRemark;
}
llvm_unreachable("Unhandled DiagnosticKind in switch.");
}
void PrintDiagnostics(DiagnosticManager &diagnostic_manager,
uint32_t bufferID = UINT32_MAX, uint32_t first_line = 0,
uint32_t last_line = UINT32_MAX,
uint32_t line_offset = 0) {
bool added_one_diagnostic = false;
for (const RawDiagnostic &diagnostic : m_diagnostics) {
// We often make expressions and wrap them in some code.
// When we see errors we want the line numbers to be correct so
// we correct them below. LLVM stores in SourceLoc objects as character
// offsets so there is no way to get LLVM to move its error line numbers
// around by adjusting the source location, we must do it manually. We
// also want to use the same error formatting as LLVM and Clang, so we
// must muck with the string.
const DiagnosticSeverity severity = SeverityForKind(diagnostic.kind);
const DiagnosticOrigin origin = eDiagnosticOriginSwift;
if (first_line > 0 && bufferID != UINT32_MAX &&
diagnostic.bufferID == bufferID && !diagnostic.bufferName.empty()) {
// Make sure the error line is in range
if (diagnostic.line >= first_line && diagnostic.line <= last_line) {
// Need to remap the error/warning to a different line
StreamString match;
match.Printf("%s:%u:", diagnostic.bufferName.str().c_str(),
diagnostic.line);
const size_t match_len = match.GetString().size();
size_t match_pos = diagnostic.description.find(match.GetString());
if (match_pos != std::string::npos) {
// We have some <file>:<line>:" instances that need to be updated
StreamString fixed_description;
size_t start_pos = 0;
do {
if (match_pos > start_pos)
fixed_description.Printf(
"%s", diagnostic.description.substr(start_pos, match_pos)
.c_str());
fixed_description.Printf("%s:%u:",
diagnostic.bufferName.str().c_str(),
diagnostic.line - first_line +
line_offset + 1);
start_pos = match_pos + match_len;
match_pos =
diagnostic.description.find(match.GetString(), start_pos);
} while (match_pos != std::string::npos);
// Append any last remainging text
if (start_pos < diagnostic.description.size())
fixed_description.Printf(
"%s",
diagnostic.description.substr(start_pos,
diagnostic.description.size() -
start_pos)
.c_str());
SwiftDiagnostic *new_diagnostic =
new SwiftDiagnostic(fixed_description.GetString().data(),
severity, origin, bufferID);
for (auto fixit : diagnostic.fixits)
new_diagnostic->AddFixIt(fixit);
diagnostic_manager.AddDiagnostic(new_diagnostic);
added_one_diagnostic = true;
continue;
}
}
}
}
// In general, we don't want to see diagnostics from outside of the source
// text range of the actual user expression. But if we didn't find any
// diagnostics in the text range, it's probably because the source range was
// not specified correctly, and we don't want to lose legit errors because
// of that. So in that case we'll add them all here:
if (!added_one_diagnostic) {
// This will report diagnostic errors from outside the expression's source
// range. Those are not interesting to users, so we only emit them in
// debug builds.
for (const RawDiagnostic &diagnostic : m_diagnostics) {
const DiagnosticSeverity severity = SeverityForKind(diagnostic.kind);
const DiagnosticOrigin origin = eDiagnosticOriginSwift;
diagnostic_manager.AddDiagnostic(diagnostic.description.c_str(),
severity, origin);
}
}
}
bool GetColorize() const { return m_colorize; }
bool SetColorize(bool b) {
const bool old = m_colorize;
m_colorize = b;
return old;
}
private:
// We don't currently use lldb_private::Diagostic or any of the lldb
// DiagnosticManager machinery to store diagnostics as they occur. Instead,
// we store them in raw form using this struct, then transcode them to
// SwiftDiagnostics in PrintDiagnostic.
struct RawDiagnostic {
RawDiagnostic(std::string in_desc, swift::DiagnosticKind in_kind,
llvm::StringRef in_bufferName, unsigned in_bufferID,
uint32_t in_line, uint32_t in_column,
llvm::ArrayRef<swift::Diagnostic::FixIt> in_fixits)
: description(in_desc), kind(in_kind), bufferName(in_bufferName),
bufferID(in_bufferID), line(in_line), column(in_column) {
for (auto fixit : in_fixits) {
fixits.push_back(fixit);
}
}
std::string description;
swift::DiagnosticKind kind;
const llvm::StringRef bufferName;
unsigned bufferID;
uint32_t line;
uint32_t column;
std::vector<swift::DiagnosticInfo::FixIt> fixits;
};
typedef std::vector<RawDiagnostic> RawDiagnosticBuffer;
SwiftASTContext &m_ast_context;
RawDiagnosticBuffer m_diagnostics;
unsigned m_num_errors = 0;
bool m_colorize;
};
}
swift::ASTContext *SwiftASTContext::GetASTContext() {
if (m_ast_context_ap.get() == NULL) {
m_ast_context_ap.reset(
swift::ASTContext::get(GetLanguageOptions(), GetSearchPathOptions(),
GetSourceManager(), GetDiagnosticEngine()));
m_diagnostic_consumer_ap.reset(new StoringDiagnosticConsumer(*this));
if (getenv("LLDB_SWIFT_DUMP_DIAGS")) {
// NOTE: leaking a swift::PrintingDiagnosticConsumer() here, but this only
// gets enabled when the above environment variable is set.
GetDiagnosticEngine().addConsumer(
*new swift::PrintingDiagnosticConsumer());
}
// Install the serialized module loader
std::unique_ptr<swift::ModuleLoader> serialized_module_loader_ap(
swift::SerializedModuleLoader::create(*m_ast_context_ap));
if (serialized_module_loader_ap) {
m_serialized_module_loader =
(swift::SerializedModuleLoader *)serialized_module_loader_ap.get();
m_ast_context_ap->addModuleLoader(std::move(serialized_module_loader_ap));
}
// Set up the required state for the evaluator in the TypeChecker.
registerTypeCheckerRequestFunctions(m_ast_context_ap->evaluator);
GetASTMap().Insert(m_ast_context_ap.get(), this);
}
VALID_OR_RETURN(nullptr);
return m_ast_context_ap.get();
}
swift::SerializedModuleLoader *SwiftASTContext::GetSerializeModuleLoader() {
VALID_OR_RETURN(nullptr);
GetASTContext();
return m_serialized_module_loader;
}
swift::ClangImporter *SwiftASTContext::GetClangImporter() {
VALID_OR_RETURN(nullptr);
if (m_clang_importer == NULL) {
swift::ASTContext *ast_ctx = GetASTContext();
if (!ast_ctx) {
return nullptr;
}
// Install the Clang module loader
TargetSP target_sp(m_target_wp.lock());
if (true /*target_sp*/) {
// PlatformSP platform_sp = target_sp->GetPlatform();
if (true /*platform_sp*/) {
if (!ast_ctx->SearchPathOpts.SDKPath.empty() || TargetHasNoSDK()) {
swift::ClangImporterOptions &clang_importer_options =
GetClangImporterOptions();
if (!clang_importer_options.OverrideResourceDir.empty()) {
std::unique_ptr<swift::ModuleLoader> clang_importer_ap(
swift::ClangImporter::create(*m_ast_context_ap,
clang_importer_options));
if (clang_importer_ap) {
const bool isClang = true;
m_clang_importer =
(swift::ClangImporter *)clang_importer_ap.get();
m_ast_context_ap->addModuleLoader(std::move(clang_importer_ap),
isClang);
}
}
}
}
}
}
return m_clang_importer;
}
bool SwiftASTContext::AddModuleSearchPath(const char *path) {
VALID_OR_RETURN(false);
if (path && path[0]) {
swift::ASTContext *ast = GetASTContext();
std::string path_str(path);
bool add_search_path = true;
for (auto path : ast->SearchPathOpts.ImportSearchPaths) {
if (path == path_str) {
add_search_path = false;
break;
}
}
if (add_search_path) {
ast->SearchPathOpts.ImportSearchPaths.push_back(path);
return true;
}
}
return false;
}
bool SwiftASTContext::AddFrameworkSearchPath(const char *path) {
VALID_OR_RETURN(false);
if (path && path[0]) {
swift::ASTContext *ast = GetASTContext();
std::string path_str(path);
bool add_search_path = true;
for (const auto &swift_path : ast->SearchPathOpts.FrameworkSearchPaths) {
if (swift_path.Path == path_str) {
add_search_path = false;
break;
}
}
if (add_search_path) {
ast->SearchPathOpts.FrameworkSearchPaths.push_back({path, /*isSystem=*/false});
return true;
}
}
return false;
}
bool SwiftASTContext::AddClangArgument(std::string clang_arg, bool force) {
if (!clang_arg.empty()) {
swift::ClangImporterOptions &importer_options = GetClangImporterOptions();
bool add_hmap = true;
if (!force) {
for (std::string &arg : importer_options.ExtraArgs) {
if (!arg.compare(clang_arg)) {
add_hmap = false;
break;
}
}
}
if (add_hmap) {
importer_options.ExtraArgs.push_back(clang_arg);
return true;
}
}
return false;
}
bool SwiftASTContext::AddClangArgumentPair(const char *clang_arg_1,
const char *clang_arg_2) {
if (clang_arg_1 && clang_arg_2 && clang_arg_1[0] && clang_arg_2[0]) {
swift::ClangImporterOptions &importer_options = GetClangImporterOptions();
bool add_hmap = true;
for (ssize_t ai = 0, ae = importer_options.ExtraArgs.size() -
1; // -1 because we look at the next one too
ai < ae;
++ai) {
if (!importer_options.ExtraArgs[ai].compare(clang_arg_1) &&
!importer_options.ExtraArgs[ai + 1].compare(clang_arg_2)) {
add_hmap = false;
break;
}
}
if (add_hmap) {
importer_options.ExtraArgs.push_back(clang_arg_1);
importer_options.ExtraArgs.push_back(clang_arg_2);
return true;
}
}
return false;
}
size_t SwiftASTContext::GetNumModuleSearchPaths() const {
VALID_OR_RETURN(0);
if (m_ast_context_ap.get())
return m_ast_context_ap->SearchPathOpts.ImportSearchPaths.size();
return 0;
}
const char *SwiftASTContext::GetModuleSearchPathAtIndex(size_t idx) const {
VALID_OR_RETURN(nullptr);
if (m_ast_context_ap.get()) {
if (idx < m_ast_context_ap->SearchPathOpts.ImportSearchPaths.size())
return m_ast_context_ap->SearchPathOpts.ImportSearchPaths[idx].c_str();
}
return NULL;
}
size_t SwiftASTContext::GetNumFrameworkSearchPaths() const {
VALID_OR_RETURN(0);
if (m_ast_context_ap.get())
return m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths.size();
return 0;
}
const char *SwiftASTContext::GetFrameworkSearchPathAtIndex(size_t idx) const {
VALID_OR_RETURN(nullptr);
if (m_ast_context_ap.get()) {
if (idx < m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths.size())
return m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths[idx].Path.c_str();
}
return NULL;
}
size_t SwiftASTContext::GetNumClangArguments() {
swift::ClangImporterOptions &importer_options = GetClangImporterOptions();
return importer_options.ExtraArgs.size();
}
const char *SwiftASTContext::GetClangArgumentAtIndex(size_t idx) {
swift::ClangImporterOptions &importer_options = GetClangImporterOptions();
if (idx < importer_options.ExtraArgs.size())
return importer_options.ExtraArgs[idx].c_str();
return NULL;
}
swift::ModuleDecl *
SwiftASTContext::GetCachedModule(const ConstString &module_name) {
VALID_OR_RETURN(nullptr);
SwiftModuleMap::const_iterator iter =
m_swift_module_cache.find(module_name.GetCString());
if (iter != m_swift_module_cache.end())
return iter->second;
return NULL;
}
swift::ModuleDecl *
SwiftASTContext::CreateModule(const ConstString &module_basename,
Status &error) {
VALID_OR_RETURN(nullptr);
if (module_basename) {
swift::ModuleDecl *module = GetCachedModule(module_basename);
if (module) {
error.SetErrorStringWithFormat("module already exists for '%s'",
module_basename.GetCString());
return NULL;
}
swift::ASTContext *ast = GetASTContext();
if (ast) {
swift::Identifier module_id(
ast->getIdentifier(module_basename.GetCString()));
module = swift::ModuleDecl::create(module_id, *ast);
if (module) {
m_swift_module_cache[module_basename.GetCString()] = module;
return module;
} else {
error.SetErrorStringWithFormat("invalid swift AST (NULL)");
}
} else {
error.SetErrorStringWithFormat("invalid swift AST (NULL)");
}
} else {
error.SetErrorStringWithFormat("invalid module name (empty)");
}
return NULL;
}
void SwiftASTContext::CacheModule(swift::ModuleDecl *module) {
VALID_OR_RETURN_VOID();
if (!module)
return;
auto ID = module->getName().get();
if (nullptr == ID || 0 == ID[0])
return;
if (m_swift_module_cache.find(ID) != m_swift_module_cache.end())
return;
m_swift_module_cache.insert({ID, module});
}
swift::ModuleDecl *
SwiftASTContext::GetModule(const ConstString &module_basename, Status &error) {
VALID_OR_RETURN(nullptr);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("((SwiftASTContext*)%p)->GetModule('%s')", this,
module_basename.AsCString("<no name>"));
if (module_basename) {
swift::ModuleDecl *module = GetCachedModule(module_basename);
if (module)
return module;
if (swift::ASTContext *ast = GetASTContext()) {
typedef std::pair<swift::Identifier, swift::SourceLoc> ModuleNameSpec;
llvm::StringRef module_basename_sref(module_basename.GetCString());
ModuleNameSpec name_pair(ast->getIdentifier(module_basename_sref),
swift::SourceLoc());
if (HasFatalErrors()) {
error.SetErrorStringWithFormat("failed to get module '%s' from AST "
"context:\nAST context is in a fatal "
"error state",
module_basename.GetCString());
printf("error in SwiftASTContext::GetModule(%s): AST context is in a "
"fatal error stat",
module_basename.GetCString());
return nullptr;
}
ClearDiagnostics();
module = ast->getModuleByName(module_basename_sref);
if (HasErrors()) {
DiagnosticManager diagnostic_manager;
PrintDiagnostics(diagnostic_manager);
error.SetErrorStringWithFormat(
"failed to get module '%s' from AST context:\n%s",
module_basename.GetCString(),
diagnostic_manager.GetString().data());
#ifdef LLDB_CONFIGURATION_DEBUG
printf("error in SwiftASTContext::GetModule(%s): '%s'",
module_basename.GetCString(),
diagnostic_manager.GetString().data());
#endif
if (log)
log->Printf("((SwiftASTContext*)%p)->GetModule('%s') -- error: %s",
this, module_basename.GetCString(),
diagnostic_manager.GetString().data());
} else if (module) {
if (log)
log->Printf("((SwiftASTContext*)%p)->GetModule('%s') -- found %s",
this, module_basename.GetCString(),
module->getName().str().str().c_str());
m_swift_module_cache[module_basename.GetCString()] = module;
return module;
} else {
if (log)
log->Printf(
"((SwiftASTContext*)%p)->GetModule('%s') -- failed with no error",
this, module_basename.GetCString());
error.SetErrorStringWithFormat(
"failed to get module '%s' from AST context",
module_basename.GetCString());
}
} else {
if (log)
log->Printf(
"((SwiftASTContext*)%p)->GetModule('%s') -- invalid ASTContext",
this, module_basename.GetCString());
error.SetErrorString("invalid swift::ASTContext");
}
} else {
if (log)
log->Printf(
"((SwiftASTContext*)%p)->GetModule('%s') -- empty module name", this,
module_basename.GetCString());
error.SetErrorString("invalid module name (empty)");
}
return NULL;
}
swift::ModuleDecl *SwiftASTContext::GetModule(const FileSpec &module_spec,
Status &error) {
VALID_OR_RETURN(nullptr);
ConstString module_basename(module_spec.GetFileNameStrippingExtension());
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s')", this,
module_spec.GetPath().c_str());
if (module_basename) {
SwiftModuleMap::const_iterator iter =
m_swift_module_cache.find(module_basename.GetCString());
if (iter != m_swift_module_cache.end())
return iter->second;
if (module_spec.Exists()) {
swift::ASTContext *ast = GetASTContext();
if (!GetClangImporter()) {
if (log)
log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- no "
"ClangImporter so giving up",
this, module_spec.GetPath().c_str());
error.SetErrorStringWithFormat("couldn't get a ClangImporter");
return nullptr;
}
std::string module_directory(module_spec.GetDirectory().GetCString());
bool add_search_path = true;
for (auto path : ast->SearchPathOpts.ImportSearchPaths) {
if (path == module_directory) {
add_search_path = false;
break;
}
}
// Add the search path if needed so we can find the module by basename
if (add_search_path)
ast->SearchPathOpts.ImportSearchPaths.push_back(
std::move(module_directory));
typedef std::pair<swift::Identifier, swift::SourceLoc> ModuleNameSpec;
llvm::StringRef module_basename_sref(module_basename.GetCString());
ModuleNameSpec name_pair(ast->getIdentifier(module_basename_sref),
swift::SourceLoc());
swift::ModuleDecl *module =
ast->getModule(llvm::ArrayRef<ModuleNameSpec>(name_pair));
if (module) {
if (log)
log->Printf(
"((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- found %s",
this, module_spec.GetPath().c_str(),
module->getName().str().str().c_str());
m_swift_module_cache[module_basename.GetCString()] = module;
return module;
} else {
if (log)
log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- "
"couldn't get from AST context",
this, module_spec.GetPath().c_str());
error.SetErrorStringWithFormat(
"failed to get module '%s' from AST context",
module_basename.GetCString());
}
} else {
if (log)
log->Printf("((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- "
"doesn't exist",
this, module_spec.GetPath().c_str());
error.SetErrorStringWithFormat("module '%s' doesn't exist",
module_spec.GetPath().c_str());
}
} else {
if (log)
log->Printf(
"((SwiftASTContext*)%p)->GetModule((FileSpec)'%s') -- no basename",
this, module_spec.GetPath().c_str());
error.SetErrorStringWithFormat("no module basename in '%s'",
module_spec.GetPath().c_str());
}
return NULL;
}
swift::ModuleDecl *
SwiftASTContext::FindAndLoadModule(const ConstString &module_basename,
Process &process, Status &error) {
VALID_OR_RETURN(nullptr);
swift::ModuleDecl *swift_module = GetModule(module_basename, error);
if (!swift_module)
return nullptr;
LoadModule(swift_module, process, error);
return swift_module;
}
swift::ModuleDecl *
SwiftASTContext::FindAndLoadModule(const FileSpec &module_spec,
Process &process, Status &error) {
VALID_OR_RETURN(nullptr);
swift::ModuleDecl *swift_module = GetModule(module_spec, error);
if (!swift_module)
return nullptr;
LoadModule(swift_module, process, error);
return swift_module;
}
bool SwiftASTContext::LoadOneImage(Process &process, FileSpec &link_lib_spec,
Status &error) {
VALID_OR_RETURN(false);
error.Clear();
PlatformSP platform_sp = process.GetTarget().GetPlatform();
if (platform_sp)
return platform_sp->LoadImage(&process, FileSpec(), link_lib_spec, error) !=
LLDB_INVALID_IMAGE_TOKEN;
else
return false;
}
static void
GetLibrarySearchPaths(std::vector<std::string> &paths,
const swift::SearchPathOptions &search_path_opts) {
paths.clear();
paths.assign(search_path_opts.LibrarySearchPaths.begin(),
search_path_opts.LibrarySearchPaths.end());
paths.push_back(search_path_opts.RuntimeLibraryPath);
}
void SwiftASTContext::LoadModule(swift::ModuleDecl *swift_module,
Process &process, Status &error) {
VALID_OR_RETURN_VOID();
Status current_error;
auto addLinkLibrary = [&](swift::LinkLibrary link_lib) {
Status load_image_error;
StreamString all_dlopen_errors;
const char *library_name = link_lib.getName().data();
if (library_name == NULL || library_name[0] == '\0') {
error.SetErrorString("Empty library name passed to addLinkLibrary");
return;
}
SwiftLanguageRuntime *runtime = process.GetSwiftLanguageRuntime();
if (runtime && runtime->IsInLibraryNegativeCache(library_name))
return;
swift::LibraryKind library_kind = link_lib.getKind();
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("\nLoading link library \"%s\" of kind: %d.", library_name,
library_kind);
switch (library_kind) {
case swift::LibraryKind::Framework: {
// First make sure the library isn't already loaded. Since this is a
// framework, we make sure the file name and the framework name are the
// same, and that we are contained in FileName.framework with no other
// intervening frameworks. We can get more restrictive if this gives
// false positives.
ConstString library_cstr(library_name);
std::string framework_name(library_name);
framework_name.append(".framework");
// Lookup the module by file basename and make sure that basename has
// "<basename>.framework" in the path.
ModuleSpec module_spec;
module_spec.GetFileSpec().GetFilename() = library_cstr;
lldb_private::ModuleList matching_module_list;
bool module_already_loaded = false;
if (process.GetTarget().GetImages().FindModules(module_spec,
matching_module_list)) {
matching_module_list.ForEach(
[&module_already_loaded, &module_spec,
&framework_name](const ModuleSP &module_sp) -> bool {
module_already_loaded = module_spec.GetFileSpec().GetPath().find(
framework_name) != std::string::npos;
return module_already_loaded ==
false; // Keep iterating if we didn't find the right module
});
}
// If we already have this library loaded, don't try and load it again.
if (module_already_loaded) {
if (log)
log->Printf("Skipping load of %s as it is already loaded.",
framework_name.c_str());
return;
}
for (auto module : process.GetTarget().GetImages().Modules()) {
FileSpec module_file = module->GetFileSpec();
if (module_file.GetFilename() == library_cstr) {
std::string module_path = module_file.GetPath();
size_t framework_offset = module_path.rfind(framework_name);
if (framework_offset != std::string::npos) {
// The Framework is already loaded, so we don't need to try to load
// it again.
if (log)
log->Printf("Skipping load of %s as it is already loaded.",
framework_name.c_str());
return;
}
}
}
std::string framework_path("@rpath/");
framework_path.append(library_name);
framework_path.append(".framework/");
framework_path.append(library_name);
FileSpec framework_spec(framework_path.c_str(), false);
if (LoadOneImage(process, framework_spec, load_image_error)) {
if (log)
log->Printf("Found framework at: %s.", framework_path.c_str());
return;
} else
all_dlopen_errors.Printf("Looking for \"%s\", error: %s\n",
framework_path.c_str(),
load_image_error.AsCString());
// And then in the various framework search paths.
std::unordered_set<std::string> seen_paths;
std::vector<std::string> uniqued_paths;
for (const auto &framework_search_dir :
swift_module->getASTContext().SearchPathOpts.FrameworkSearchPaths) {
// The framework search dir as it comes from the AST context often has
// duplicate entries, don't try to load along the same path twice.
std::pair<std::unordered_set<std::string>::iterator, bool>
insert_result = seen_paths.insert(framework_search_dir.Path);
if (insert_result.second)
{
framework_path = framework_search_dir.Path;
framework_path.append("/");
framework_path.append(library_name);
framework_path.append(".framework/");
uniqued_paths.push_back(framework_path);
}
}
uint32_t token = LLDB_INVALID_IMAGE_TOKEN;
PlatformSP platform_sp = process.GetTarget().GetPlatform();
Status error;
FileSpec library_spec(library_name, false);
FileSpec found_path;
if (platform_sp)
token = platform_sp->LoadImageUsingPaths(&process, library_spec,
uniqued_paths, error,
&found_path);
if (token != LLDB_INVALID_IMAGE_TOKEN) {
if (log)
log->Printf("Found framework at: %s.", framework_path.c_str());
return;
} else {
all_dlopen_errors.Printf("Failed to find framework for \"%s\" looking"
" along paths:\n",
library_name);
for (const std::string &path : uniqued_paths)
all_dlopen_errors.Printf(" %s\n", path.c_str());
}
// Maybe we were told to add a link library that exists in the system. I
// tried just specifying Foo.framework/Foo and letting the system search
// figure that out, but if DYLD_FRAMEWORK_FALLBACK_PATH is set
// (e.g. in Xcode's test scheme) then these aren't found. So for now I
// dial them in explicitly:
std::string system_path("/System/Library/Frameworks/");
system_path.append(library_name);
system_path.append(".framework/");
system_path.append(library_name);
framework_spec.SetFile(system_path.c_str(), true, FileSpec::Style::native);
if (LoadOneImage(process, framework_spec, load_image_error))
return;
else
all_dlopen_errors.Printf("Looking for \"%s\"\n, error: %s\n",
framework_path.c_str(),
load_image_error.AsCString());
} break;
case swift::LibraryKind::Library: {
std::vector<std::string> search_paths;
GetLibrarySearchPaths(search_paths,
swift_module->getASTContext().SearchPathOpts);
if (LoadLibraryUsingPaths(process, library_name, search_paths, true,
all_dlopen_errors))
return;
} break;
}
// If we get here, we aren't going to find this image, so add it to a
// negative cache:
if (runtime)
runtime->AddToLibraryNegativeCache(library_name);
current_error.SetErrorStringWithFormat(
"Failed to load linked library %s of module %s - errors:\n%s\n",
library_name, swift_module->getName().str().str().c_str(),
all_dlopen_errors.GetData());
};
swift_module->forAllVisibleModules(
{}, [&](swift::ModuleDecl::ImportedModule import) {
import.second->collectLinkLibraries(addLinkLibrary);
return true;
});
error = current_error;
}
bool SwiftASTContext::LoadLibraryUsingPaths(
Process &process, llvm::StringRef library_name,
std::vector<std::string> &search_paths, bool check_rpath,
StreamString &all_dlopen_errors) {
VALID_OR_RETURN(false);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_TYPES));
SwiftLanguageRuntime *runtime = process.GetSwiftLanguageRuntime();
if (!runtime) {
all_dlopen_errors.PutCString(
"Can't load Swift libraries without a language runtime.");
return false;
}
if (ConstString::Equals(runtime->GetStandardLibraryBaseName(),
ConstString(library_name))) {
// Never dlopen the standard library. Some binaries statically link to the
// Swift standard library and dlopening it here will cause ObjC runtime
// conflicts.
// If you want to run Swift expressions you have to arrange to load the
// Swift standard library by hand before doing so.
if (log)
log->Printf("Skipping swift standard library \"%s\" - we don't hand load "
"that one.",
runtime->GetStandardLibraryBaseName().AsCString());
return true;
}
PlatformSP platform_sp(process.GetTarget().GetPlatform());
std::string library_fullname;
if (platform_sp) {
library_fullname =
platform_sp->GetFullNameForDylib(ConstString(library_name)).AsCString();
} else // This is the old way, and we shouldn't use it except on Mac OS
{
#ifdef __APPLE__
library_fullname = "lib";
library_fullname.append(library_name);
library_fullname.append(".dylib");
#else
return false;
#endif
}
ModuleSpec module_spec;
module_spec.GetFileSpec().GetFilename().SetCString(library_fullname.c_str());
lldb_private::ModuleList matching_module_list;
if (process.GetTarget().GetImages().FindModules(module_spec,
matching_module_list) > 0) {
if (log)
log->Printf("Skipping module %s as it is already loaded.",
library_fullname.c_str());
return true;
}
std::string library_path;
std::unordered_set<std::string> seen_paths;
Status load_image_error;
std::vector<std::string> uniqued_paths;
for (const std::string &library_search_dir : search_paths) {
// The library search dir as it comes from the AST context often has
// duplicate entries, so lets unique the path list before we send it
// down to the target.
std::pair<std::unordered_set<std::string>::iterator, bool> insert_result =
seen_paths.insert(library_search_dir);
if (insert_result.second)
uniqued_paths.push_back(library_search_dir);
}
FileSpec library_spec(library_fullname, false);
FileSpec found_library;
uint32_t token = LLDB_INVALID_IMAGE_TOKEN;
Status error;
if (platform_sp)
token = platform_sp->LoadImageUsingPaths(&process, library_spec,
uniqued_paths,
error,
&found_library);
if (token != LLDB_INVALID_IMAGE_TOKEN) {
if (log)
log->Printf("Found library at: %s.", found_library.GetCString());
return true;
} else {
all_dlopen_errors.Printf("Failed to find \"%s\" in paths:\n,",
library_fullname.c_str());
for (const std::string &search_dir : uniqued_paths)
all_dlopen_errors.Printf(" %s\n", search_dir.c_str());
}
if (check_rpath) {
// Let our RPATH help us out when finding the right library
library_path = "@rpath/";
library_path += library_fullname;
FileSpec link_lib_spec(library_path.c_str(), false);
if (LoadOneImage(process, link_lib_spec, load_image_error)) {
if (log)
log->Printf("Found library using RPATH at: %s.", library_path.c_str());
return true;
} else
all_dlopen_errors.Printf("Failed to find \"%s\" on RPATH, error: %s\n",
library_fullname.c_str(),
load_image_error.AsCString());
}
return false;
}
void SwiftASTContext::LoadExtraDylibs(Process &process, Status &error) {
VALID_OR_RETURN_VOID();
error.Clear();
swift::IRGenOptions &irgen_options = GetIRGenOptions();
for (const swift::LinkLibrary &link_lib : irgen_options.LinkLibraries) {
// We don't have to do frameworks here, they actually record their link
// libraries properly.
if (link_lib.getKind() == swift::LibraryKind::Library) {
const char *library_name = link_lib.getName().data();
StreamString errors;
std::vector<std::string> search_paths;
GetLibrarySearchPaths(search_paths,
m_compiler_invocation_ap->getSearchPathOptions());
bool success = LoadLibraryUsingPaths(process, library_name, search_paths,
false, errors);
if (!success) {
error.SetErrorString(errors.GetData());
}
}
}
}
bool SwiftASTContext::RegisterSectionModules(
Module &module, std::vector<std::string> &module_names) {
VALID_OR_RETURN(false);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
swift::SerializedModuleLoader *sml = GetSerializeModuleLoader();
if (sml) {
SectionList *section_list = module.GetSectionList();
if (section_list) {
SectionSP section_sp(
section_list->FindSectionByType(eSectionTypeSwiftModules, true));
if (section_sp) {
DataExtractor section_data;
if (section_sp->GetSectionData(section_data)) {
llvm::StringRef section_data_ref(
(const char *)section_data.GetDataStart(),
section_data.GetByteSize());
llvm::SmallVector<std::string, 4> llvm_modules;
if (swift::parseASTSection(sml, section_data_ref, llvm_modules)) {
for (auto module_name : llvm_modules)
module_names.push_back(module_name);
return true;
}
}
} else {
if (m_ast_file_data_map.find(&module) != m_ast_file_data_map.end())
return true;
SymbolVendor *sym_vendor = module.GetSymbolVendor();
if (sym_vendor) {
// Grab all the AST blobs from the symbol vendor.
auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift);
if (log)
log->Printf("SwiftASTContext::%s() retrieved %zu AST Data blobs "
"from the symbol vendor.",
__FUNCTION__, ast_file_datas.size());
// Add each of the AST blobs to the vector of AST blobs for the
// module.
auto &ast_vector = GetASTVectorForModule(&module);
ast_vector.insert(ast_vector.end(), ast_file_datas.begin(),
ast_file_datas.end());
// Retrieve the module names from the AST blobs retrieved from the
// symbol vendor.
size_t parse_fail_count = 0;
size_t ast_number = 0;
for (auto ast_file_data_sp : ast_file_datas) {
// Parse the AST section info from the AST blob.
++ast_number;
llvm::StringRef section_data_ref(
(const char *)ast_file_data_sp->GetBytes(),
ast_file_data_sp->GetByteSize());
llvm::SmallVector<std::string, 4> llvm_modules;
if (swift::parseASTSection(sml, section_data_ref, llvm_modules)) {
// Collect the LLVM module names referenced by the AST.
for (auto module_name : llvm_modules)
module_names.push_back(module_name);
if (log)
log->Printf("SwiftASTContext::%s() - parsed %zu llvm modules "
"from Swift AST section %zu of %zu.",
__FUNCTION__, llvm_modules.size(), ast_number,
ast_file_datas.size());
} else {
// Keep track of the fact that we failed to parse the AST
// section info.
if (log)
log->Printf("SwiftASTContext::%s() - failed to parse AST "
"section %zu of %zu.",
__FUNCTION__, ast_number, ast_file_datas.size());
++parse_fail_count;
}
}
if (!ast_file_datas.empty() && (parse_fail_count == 0)) {
// We found AST data entries and we successfully parsed all of
// them.
return true;
}
}
}
}
}
return false;
}
void SwiftASTContext::ValidateSectionModules(
Module &module, const std::vector<std::string> &module_names) {
VALID_OR_RETURN_VOID();
Status error;
for (const std::string &module_name : module_names)
if (!GetModule(ConstString(module_name.c_str()), error))
module.ReportWarning("unable to load swift module '%s' (%s)",
module_name.c_str(), error.AsCString());
}
swift::Identifier SwiftASTContext::GetIdentifier(const char *name) {
VALID_OR_RETURN(swift::Identifier());
return GetASTContext()->getIdentifier(llvm::StringRef(name));
}
swift::Identifier SwiftASTContext::GetIdentifier(const llvm::StringRef &name) {
VALID_OR_RETURN(swift::Identifier());
return GetASTContext()->getIdentifier(name);
}
ConstString SwiftASTContext::GetMangledTypeName(swift::TypeBase *type_base) {
VALID_OR_RETURN(ConstString());
auto iter = m_type_to_mangled_name_map.find(type_base),
end = m_type_to_mangled_name_map.end();
if (iter != end)
return ConstString(iter->second);
swift::Type swift_type(type_base);
assert(!swift_type->hasArchetype() && "type has not been mapped out of context");
swift::Mangle::ASTMangler mangler(true);
std::string s = mangler.mangleTypeForDebugger(swift_type, nullptr, nullptr);
if (s.empty())
return ConstString();
ConstString mangled_cs(s.c_str());
CacheDemangledType(mangled_cs.AsCString(), type_base);
return mangled_cs;
}
void SwiftASTContext::CacheDemangledType(const char *name,
swift::TypeBase *found_type) {
VALID_OR_RETURN_VOID();
m_type_to_mangled_name_map.insert(std::make_pair(found_type, name));
m_mangled_name_to_type_map.insert(std::make_pair(name, found_type));
}
void SwiftASTContext::CacheDemangledTypeFailure(const char *name) {
VALID_OR_RETURN_VOID();
m_negative_type_cache.Insert(name);
}
CompilerType
SwiftASTContext::GetTypeFromMangledTypename(const char *mangled_typename,
Status &error) {
VALID_OR_RETURN(CompilerType());
if (!mangled_typename ||
!SwiftLanguageRuntime::IsSwiftMangledName(mangled_typename)) {
error.SetErrorStringWithFormat("typename '%s' is not a valid Swift mangled "
"typename, it should begin with $S",
mangled_typename);
return CompilerType();
}
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s')",
this, mangled_typename);
swift::ASTContext *ast_ctx = GetASTContext();
if (!ast_ctx) {
if (log)
log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') "
"-- null Swift AST Context",
this, mangled_typename);
error.SetErrorString("null Swift AST Context");
return CompilerType();
}
error.Clear();
// If we were to crash doing this, remember what type caused it
llvm::PrettyStackTraceFormat PST("error finding type for %s",
mangled_typename);
ConstString mangled_name(mangled_typename);
swift::TypeBase *found_type =
m_mangled_name_to_type_map.lookup(mangled_name.GetCString());
if (found_type) {
if (log)
log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') "
"-- found in the positive cache",
this, mangled_typename);
return CompilerType(ast_ctx, found_type);
}
if (m_negative_type_cache.Lookup(mangled_name.GetCString())) {
if (log)
log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') "
"-- found in the negative cache",
this, mangled_typename);
return CompilerType();
}
if (log)
log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') -- "
"not cached, searching",
this, mangled_typename);
std::string swift_error;
found_type = swift::ide::getTypeFromMangledSymbolname(
*ast_ctx, mangled_typename, swift_error)
.getPointer();
if (found_type) {
CacheDemangledType(mangled_name.GetCString(), found_type);
CompilerType result_type(ast_ctx, found_type);
if (log)
log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') "
"-- found %s",
this, mangled_typename,
result_type.GetTypeName().GetCString());
return result_type;
}
if (log)
log->Printf("((SwiftASTContext*)%p)->GetTypeFromMangledTypename('%s') "
"-- error: %s",
this, mangled_typename, swift_error.c_str());
error.SetErrorStringWithFormat("type for typename '%s' was not found",
mangled_typename);
CacheDemangledTypeFailure(mangled_name.GetCString());
return CompilerType();
}
CompilerType SwiftASTContext::GetAnyObjectType() {
VALID_OR_RETURN(CompilerType());
swift::ASTContext *ast = GetASTContext();
return CompilerType(ast, ast->getAnyObjectType());
}
CompilerType SwiftASTContext::GetVoidFunctionType() {
VALID_OR_RETURN(CompilerType());
if (!m_void_function_type) {
swift::ASTContext *ast = GetASTContext();
swift::Type empty_tuple_type(swift::TupleType::getEmpty(*ast));
m_void_function_type = CompilerType(
ast, swift::FunctionType::get({}, empty_tuple_type));
}
return m_void_function_type;
}
static CompilerType ValueDeclToType(swift::ValueDecl *decl,
swift::ASTContext *ast) {
if (decl) {
switch (decl->getKind()) {
case swift::DeclKind::TypeAlias: {
swift::TypeAliasDecl *alias_decl = swift::cast<swift::TypeAliasDecl>(decl);
if (alias_decl->hasInterfaceType()) {
swift::Type swift_type =
swift::NameAliasType::get(
alias_decl, swift::Type(),
swift::SubstitutionMap(),
alias_decl->getUnderlyingTypeLoc().getType());
return CompilerType(ast, swift_type.getPointer());
}
break;
}
case swift::DeclKind::Enum:
case swift::DeclKind::Struct:
case swift::DeclKind::Protocol:
case swift::DeclKind::Class: {
swift::NominalTypeDecl *nominal_decl = swift::cast<swift::NominalTypeDecl>(decl);
if (nominal_decl->hasInterfaceType()) {
swift::Type swift_type = nominal_decl->getDeclaredType();
return CompilerType(ast, swift_type.getPointer());
}
} break;
default:
break;
}
}
return CompilerType();
}
CompilerType SwiftASTContext::FindQualifiedType(const char *qualified_name) {
VALID_OR_RETURN(CompilerType());
if (qualified_name && qualified_name[0]) {
const char *dot_pos = strchr(qualified_name, '.');
if (dot_pos) {
ConstString module_name(qualified_name, dot_pos - qualified_name);
swift::ModuleDecl *swift_module = GetCachedModule(module_name);
if (swift_module) {
swift::ModuleDecl::AccessPathTy access_path;
llvm::SmallVector<swift::ValueDecl *, 4> decls;
const char *module_type_name = dot_pos + 1;
swift_module->lookupValue(access_path, GetIdentifier(module_type_name),
swift::NLKind::UnqualifiedLookup, decls);
for (auto decl : decls) {
CompilerType type = ValueDeclToType(decl, GetASTContext());
if (type)
return type;
}
}
}
}
return CompilerType();
}
static CompilerType DeclToType(swift::Decl *decl, swift::ASTContext *ast) {
if (swift::ValueDecl *value_decl =
swift::dyn_cast_or_null<swift::ValueDecl>(decl))
return ValueDeclToType(value_decl, ast);
return CompilerType();
}
static SwiftASTContext::TypeOrDecl DeclToTypeOrDecl(swift::ASTContext *ast,
swift::Decl *decl) {
if (decl) {
switch (decl->getKind()) {
case swift::DeclKind::Import:
case swift::DeclKind::Extension:
case swift::DeclKind::PatternBinding:
case swift::DeclKind::TopLevelCode:
case swift::DeclKind::GenericTypeParam:
case swift::DeclKind::AssociatedType:
case swift::DeclKind::EnumElement:
case swift::DeclKind::EnumCase:
case swift::DeclKind::IfConfig:
case swift::DeclKind::Param:
case swift::DeclKind::Module:
case swift::DeclKind::MissingMember:
break;
case swift::DeclKind::InfixOperator:
case swift::DeclKind::PrefixOperator:
case swift::DeclKind::PostfixOperator:
case swift::DeclKind::PrecedenceGroup:
return decl;
case swift::DeclKind::TypeAlias: {
swift::TypeAliasDecl *alias_decl =
swift::cast<swift::TypeAliasDecl>(decl);
if (alias_decl->hasInterfaceType()) {
swift::Type swift_type =
swift::NameAliasType::get(
alias_decl, swift::Type(),
swift::SubstitutionMap(),
alias_decl->getUnderlyingTypeLoc().getType());
return CompilerType(ast, swift_type.getPointer());
}
} break;
case swift::DeclKind::Enum:
case swift::DeclKind::Struct:
case swift::DeclKind::Class:
case swift::DeclKind::Protocol: {
swift::NominalTypeDecl *nominal_decl =
swift::cast<swift::NominalTypeDecl>(decl);
if (nominal_decl->hasInterfaceType()) {
swift::Type swift_type = nominal_decl->getDeclaredType();
return CompilerType(ast, swift_type.getPointer());
}
} break;
case swift::DeclKind::Func:
case swift::DeclKind::Var:
return decl;
case swift::DeclKind::Subscript:
case swift::DeclKind::Constructor:
case swift::DeclKind::Destructor:
break;
}
}
return CompilerType();
}
size_t
SwiftASTContext::FindContainedTypeOrDecl(llvm::StringRef name,
TypeOrDecl container_type_or_decl,
TypesOrDecls &results, bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
size_t size_before = results.size();
CompilerType container_type = container_type_or_decl.Apply<CompilerType>(
[](CompilerType type) -> CompilerType { return type; },
[this](swift::Decl *decl) -> CompilerType {
return DeclToType(decl, GetASTContext());
});
if (false == name.empty() &&
llvm::dyn_cast_or_null<SwiftASTContext>(container_type.GetTypeSystem())) {
swift::Type swift_type(GetSwiftType(container_type));
if (!swift_type)
return 0;
swift::CanType swift_can_type(swift_type->getCanonicalType());
swift::NominalType *nominal_type =
swift_can_type->getAs<swift::NominalType>();
if (!nominal_type)
return 0;
swift::NominalTypeDecl *nominal_decl = nominal_type->getDecl();
llvm::ArrayRef<swift::ValueDecl *> decls =
nominal_decl->lookupDirect(
swift::DeclName(m_ast_context_ap->getIdentifier(name)));
for (auto decl : decls)
results.emplace(DeclToTypeOrDecl(GetASTContext(), decl));
}
return results.size() - size_before;
}
CompilerType SwiftASTContext::FindType(const char *name,
swift::ModuleDecl *swift_module) {
VALID_OR_RETURN(CompilerType());
std::set<CompilerType> search_results;
FindTypes(name, swift_module, search_results, false);
if (search_results.empty())
return CompilerType();
else
return *search_results.begin();
}
llvm::Optional<SwiftASTContext::TypeOrDecl>
SwiftASTContext::FindTypeOrDecl(const char *name,
swift::ModuleDecl *swift_module) {
VALID_OR_RETURN(llvm::Optional<SwiftASTContext::TypeOrDecl>());
TypesOrDecls search_results;
FindTypesOrDecls(name, swift_module, search_results, false);
if (search_results.empty())
return llvm::Optional<SwiftASTContext::TypeOrDecl>();
else
return *search_results.begin();
}
size_t SwiftASTContext::FindTypes(const char *name,
swift::ModuleDecl *swift_module,
std::set<CompilerType> &results,
bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
size_t before = results.size();
TypesOrDecls types_or_decls_results;
FindTypesOrDecls(name, swift_module, types_or_decls_results);
for (const auto &result : types_or_decls_results) {
CompilerType type = result.Apply<CompilerType>(
[](CompilerType type) -> CompilerType { return type; },
[this](swift::Decl *decl) -> CompilerType {
if (swift::ValueDecl *value_decl =
swift::dyn_cast_or_null<swift::ValueDecl>(decl)) {
if (value_decl->hasInterfaceType()) {
swift::Type swift_type = value_decl->getInterfaceType();
swift::MetatypeType *meta_type =
swift_type->getAs<swift::MetatypeType>();
swift::ASTContext *ast = GetASTContext();
if (meta_type)
return CompilerType(ast,
meta_type->getInstanceType().getPointer());
else
return CompilerType(ast, swift_type.getPointer());
}
}
return CompilerType();
});
results.emplace(type);
}
return results.size() - before;
}
size_t SwiftASTContext::FindTypesOrDecls(const char *name,
swift::ModuleDecl *swift_module,
TypesOrDecls &results, bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
size_t before = results.size();
if (name && name[0] && swift_module) {
swift::ModuleDecl::AccessPathTy access_path;
llvm::SmallVector<swift::ValueDecl *, 4> value_decls;
swift::Identifier identifier(GetIdentifier(name));
if (strchr(name, '.'))
swift_module->lookupValue(access_path, identifier,
swift::NLKind::QualifiedLookup, value_decls);
else
swift_module->lookupValue(access_path, identifier,
swift::NLKind::UnqualifiedLookup, value_decls);
if (identifier.isOperator()) {
swift::OperatorDecl *op_decl =
swift_module->lookupPrefixOperator(identifier);
if (op_decl)
results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl));
if ((op_decl = swift_module->lookupInfixOperator(identifier)))
results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl));
if ((op_decl = swift_module->lookupPostfixOperator(identifier)))
results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl));
}
if (swift::PrecedenceGroupDecl *pg_decl =
swift_module->lookupPrecedenceGroup(identifier))
results.emplace(DeclToTypeOrDecl(GetASTContext(), pg_decl));
for (auto decl : value_decls)
results.emplace(DeclToTypeOrDecl(GetASTContext(), decl));
}
return results.size() - before;
}
size_t SwiftASTContext::FindType(const char *name,
std::set<CompilerType> &results, bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
auto iter = m_swift_module_cache.begin(), end = m_swift_module_cache.end();
size_t count = 0;
std::function<void(swift::ModuleDecl *)> lookup_func =
[this, name, &results, &count](swift::ModuleDecl *module) -> void {
CompilerType candidate(this->FindType(name, module));
if (candidate) {
++count;
results.insert(candidate);
}
};
for (; iter != end; iter++)
lookup_func(iter->second);
if (m_scratch_module)
lookup_func(m_scratch_module);
return count;
}
CompilerType SwiftASTContext::FindFirstType(const char *name,
const ConstString &module_name) {
VALID_OR_RETURN(CompilerType());
if (name && name[0]) {
if (module_name) {
return FindType(name, GetCachedModule(module_name));
} else {
std::set<CompilerType> types;
FindType(name, types);
if (!types.empty())
return *types.begin();
}
}
return CompilerType();
}
CompilerType SwiftASTContext::ImportType(CompilerType &type, Status &error) {
VALID_OR_RETURN(CompilerType());
if (m_ast_context_ap.get() == NULL)
return CompilerType();
SwiftASTContext *swift_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem());
if (swift_ast_ctx == nullptr) {
error.SetErrorString("Can't import clang type into a Swift ASTContext.");
return CompilerType();
} else if (swift_ast_ctx == this) {
// This is the same AST context, so the type is already imported...
return type;
}
// For now we're going to do this all using mangled names. If we find that is
// too slow, we can use the TypeBase * in the CompilerType to match this to
// the version of the type we got from the mangled name in the original
// swift::ASTContext.
ConstString mangled_name(type.GetMangledTypeName());
if (mangled_name) {
swift::TypeBase *our_type_base =
m_mangled_name_to_type_map.lookup(mangled_name.GetCString());
if (our_type_base)
return CompilerType(m_ast_context_ap.get(), our_type_base);
else {
Status error;
CompilerType our_type(
GetTypeFromMangledTypename(mangled_name.GetCString(), error));
if (error.Success())
return our_type;
}
}
return CompilerType();
}
swift::IRGenDebugInfoLevel SwiftASTContext::GetGenerateDebugInfo() {
return GetIRGenOptions().DebugInfoLevel;
}
swift::PrintOptions SwiftASTContext::GetUserVisibleTypePrintingOptions(
bool print_help_if_available) {
swift::PrintOptions print_options;
print_options.SynthesizeSugarOnTypes = true;
print_options.VarInitializers = true;
print_options.TypeDefinitions = true;
print_options.PrintGetSetOnRWProperties = true;
print_options.SkipImplicit = false;
print_options.PreferTypeRepr = true;
print_options.FunctionDefinitions = true;
print_options.FullyQualifiedTypesIfAmbiguous = true;
print_options.FullyQualifiedTypes = true;
print_options.ExplodePatternBindingDecls = false;
print_options.PrintDocumentationComments =
print_options.PrintRegularClangComments = print_help_if_available;
return print_options;
}
void SwiftASTContext::SetGenerateDebugInfo(swift::IRGenDebugInfoLevel b) {
GetIRGenOptions().DebugInfoLevel = b;
}
llvm::TargetOptions *SwiftASTContext::getTargetOptions() {
if (m_target_options_ap.get() == NULL) {
m_target_options_ap.reset(new llvm::TargetOptions());
}
return m_target_options_ap.get();
}
swift::ModuleDecl *SwiftASTContext::GetScratchModule() {
VALID_OR_RETURN(nullptr);
if (m_scratch_module == nullptr)
m_scratch_module = swift::ModuleDecl::create(
GetASTContext()->getIdentifier("__lldb_scratch_module"),
*GetASTContext());
return m_scratch_module;
}
swift::SILModule *SwiftASTContext::GetSILModule() {
VALID_OR_RETURN(nullptr);
if (m_sil_module_ap.get() == NULL)
m_sil_module_ap = swift::SILModule::createEmptyModule(GetScratchModule(),
GetSILOptions());
return m_sil_module_ap.get();
}
swift::irgen::IRGenerator &
SwiftASTContext::GetIRGenerator(swift::IRGenOptions &opts,
swift::SILModule &module) {
if (m_ir_generator_ap.get() == nullptr) {
m_ir_generator_ap.reset(new swift::irgen::IRGenerator(opts, module));
}
return *m_ir_generator_ap.get();
}
swift::irgen::IRGenModule &SwiftASTContext::GetIRGenModule() {
VALID_OR_RETURN(*m_ir_gen_module_ap);
llvm::call_once(m_ir_gen_module_once, [this]() {
// Make sure we have a good ClangImporter.
GetClangImporter();
swift::IRGenOptions &ir_gen_opts = GetIRGenOptions();
std::string error_str;
std::string triple = GetTriple();
const llvm::Target *llvm_target =
llvm::TargetRegistry::lookupTarget(triple, error_str);
llvm::CodeGenOpt::Level optimization_level = llvm::CodeGenOpt::Level::None;
// Create a target machine.
llvm::TargetMachine *target_machine = llvm_target->createTargetMachine(
triple,
"generic", // cpu
"", // features
*getTargetOptions(),
llvm::Reloc::Static, // TODO verify with Sean, Default went away
llvm::None, optimization_level);
if (target_machine) {
// Set the module's string representation.
const llvm::DataLayout data_layout = target_machine->createDataLayout();
llvm::Triple llvm_triple(triple);
swift::SILModule *sil_module = GetSILModule();
if (sil_module != nullptr) {
swift::irgen::IRGenerator &ir_generator =
GetIRGenerator(ir_gen_opts, *sil_module);
swift::PrimarySpecificPaths PSPs =
GetCompilerInvocation()
.getFrontendOptions()
.InputsAndOutputs.getPrimarySpecificPathsForAtMostOnePrimary();
m_ir_gen_module_ap.reset(new swift::irgen::IRGenModule(
ir_generator, ir_generator.createTargetMachine(), nullptr,
GetGlobalLLVMContext(), ir_gen_opts.ModuleName, PSPs.OutputFilename,
PSPs.MainInputFilenameForDebugInfo));
llvm::Module *llvm_module = m_ir_gen_module_ap->getModule();
llvm_module->setDataLayout(data_layout.getStringRepresentation());
llvm_module->setTargetTriple(triple);
}
}
});
return *m_ir_gen_module_ap;
}
CompilerType
SwiftASTContext::CreateTupleType(const std::vector<CompilerType> &elements) {
VALID_OR_RETURN(CompilerType());
Status error;
if (elements.size() == 0)
return CompilerType(GetASTContext(), GetASTContext()->TheEmptyTupleType);
else {
std::vector<swift::TupleTypeElt> tuple_elems;
for (const CompilerType &type : elements) {
if (auto swift_type = GetSwiftType(type))
tuple_elems.push_back(swift::TupleTypeElt(swift_type));
else
return CompilerType();
}
llvm::ArrayRef<swift::TupleTypeElt> fields(tuple_elems);
return CompilerType(
GetASTContext(),
swift::TupleType::get(fields, *GetASTContext()).getPointer());
}
}
CompilerType
SwiftASTContext::CreateTupleType(const std::vector<TupleElement> &elements) {
VALID_OR_RETURN(CompilerType());
Status error;
if (elements.size() == 0)
return CompilerType(GetASTContext(), GetASTContext()->TheEmptyTupleType);
else {
std::vector<swift::TupleTypeElt> tuple_elems;
for (const TupleElement &element : elements) {
if (auto swift_type = GetSwiftType(element.element_type)) {
if (element.element_name.IsEmpty())
tuple_elems.push_back(swift::TupleTypeElt(swift_type));
else
tuple_elems.push_back(swift::TupleTypeElt(
swift_type, m_ast_context_ap->getIdentifier(
element.element_name.GetCString())));
} else
return CompilerType();
}
llvm::ArrayRef<swift::TupleTypeElt> fields(tuple_elems);
return CompilerType(
GetASTContext(),
swift::TupleType::get(fields, *GetASTContext()).getPointer());
}
}
CompilerType SwiftASTContext::GetErrorType() {
VALID_OR_RETURN(CompilerType());
swift::ASTContext *swift_ctx = GetASTContext();
if (swift_ctx) {
// Getting the error type requires the Stdlib module be loaded, but doesn't
// cause it to be loaded.
// Do that here:
swift_ctx->getStdlibModule(true);
swift::NominalTypeDecl *error_type_decl = GetASTContext()->getErrorDecl();
if (error_type_decl) {
auto error_type = error_type_decl->getDeclaredType().getPointer();
return CompilerType(GetASTContext(), error_type);
}
}
return CompilerType();
}
CompilerType SwiftASTContext::GetNSErrorType(Status &error) {
VALID_OR_RETURN(CompilerType());
return GetTypeFromMangledTypename(SwiftLanguageRuntime::GetCurrentMangledName("_TtC10Foundation7NSError").c_str(), error);
}
CompilerType SwiftASTContext::CreateMetatypeType(CompilerType instance_type) {
VALID_OR_RETURN(CompilerType());
if (llvm::dyn_cast_or_null<SwiftASTContext>(instance_type.GetTypeSystem()))
return CompilerType(GetASTContext(),
swift::MetatypeType::get(GetSwiftType(instance_type),
*GetASTContext()));
return CompilerType();
}
SwiftASTContext *SwiftASTContext::GetSwiftASTContext(swift::ASTContext *ast) {
SwiftASTContext *swift_ast = GetASTMap().Lookup(ast);
return swift_ast;
}
uint32_t SwiftASTContext::GetPointerByteSize() {
VALID_OR_RETURN(0);
if (m_pointer_byte_size == 0) {
swift::ASTContext *ast = GetASTContext();
m_pointer_byte_size = CompilerType(ast, ast->TheRawPointerType.getPointer())
.GetByteSize(nullptr);
}
return m_pointer_byte_size;
}
uint32_t SwiftASTContext::GetPointerBitAlignment() {
VALID_OR_RETURN(0);
if (m_pointer_bit_align == 0) {
swift::ASTContext *ast = GetASTContext();
m_pointer_bit_align = CompilerType(ast, ast->TheRawPointerType.getPointer())
.GetAlignedBitSize();
}
return m_pointer_bit_align;
}
bool SwiftASTContext::HasErrors() {
if (m_diagnostic_consumer_ap.get())
return (
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->NumErrors() != 0);
else
return false;
}
bool SwiftASTContext::HasFatalErrors(swift::ASTContext *ast_context) {
return (ast_context && ast_context->Diags.hasFatalErrorOccurred());
}
void SwiftASTContext::ClearDiagnostics() {
assert(!HasFatalErrors() && "Never clear a fatal diagnostic!");
if (m_diagnostic_consumer_ap.get())
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->Clear();
}
bool SwiftASTContext::SetColorizeDiagnostics(bool b) {
if (m_diagnostic_consumer_ap.get())
return static_cast<StoringDiagnosticConsumer *>(
m_diagnostic_consumer_ap.get())
->SetColorize(b);
return false;
}
void SwiftASTContext::PrintDiagnostics(DiagnosticManager &diagnostic_manager,
uint32_t bufferID, uint32_t first_line,
uint32_t last_line,
uint32_t line_offset) {
// If this is a fatal error, copy the error into the AST context's fatal error
// field, and then put it to the stream, otherwise just dump the diagnostics
// to the stream.
// N.B. you cannot use VALID_OR_RETURN_VOID here since that exits if you have
// fatal errors, which are what we are trying to print here.
if (!m_ast_context_ap.get()) {
SymbolFile *sym_file = GetSymbolFile();
if (sym_file) {
ConstString name
= sym_file->GetObjectFile()->GetModule()->GetObjectName();
m_fatal_errors.SetErrorStringWithFormat(
"Null context for %s.", name.AsCString());
} else {
m_fatal_errors.SetErrorString("Unknown fatal error occurred.");
}
return;
}
if (m_ast_context_ap->Diags.hasFatalErrorOccurred() &&
!m_reported_fatal_error) {
DiagnosticManager fatal_diagnostics;
if (m_diagnostic_consumer_ap.get())
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->PrintDiagnostics(fatal_diagnostics, bufferID, first_line, last_line,
line_offset);
if (fatal_diagnostics.Diagnostics().size())
m_fatal_errors.SetErrorString(fatal_diagnostics.GetString().data());
else
m_fatal_errors.SetErrorString("Unknown fatal error occurred.");
m_reported_fatal_error = true;
for (const DiagnosticList::value_type &fatal_diagnostic :
fatal_diagnostics.Diagnostics()) {
// FIXME: need to add a CopyDiagnostic operation for copying diagnostics
// from one manager to another.
diagnostic_manager.AddDiagnostic(
fatal_diagnostic->GetMessage(), fatal_diagnostic->GetSeverity(),
fatal_diagnostic->getKind(), fatal_diagnostic->GetCompilerID());
}
} else {
if (m_diagnostic_consumer_ap.get())
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->PrintDiagnostics(diagnostic_manager, bufferID, first_line,
last_line, line_offset);
}
}
void SwiftASTContext::ModulesDidLoad(ModuleList &module_list) {
ClearModuleDependentCaches();
}
void SwiftASTContext::ClearModuleDependentCaches() {
m_negative_type_cache.Clear();
m_extra_type_info_cache.Clear();
}
void SwiftASTContext::DumpConfiguration(Log *log) {
VALID_OR_RETURN_VOID();
if (!log)
return;
log->Printf("(SwiftASTContext*)%p:", this);
if (!m_ast_context_ap)
log->Printf(" (no AST context)");
log->Printf(" Architecture : %s",
m_ast_context_ap->LangOpts.Target.getTriple().c_str());
log->Printf(" SDK path : %s",
m_ast_context_ap->SearchPathOpts.SDKPath.c_str());
log->Printf(" Runtime resource path : %s",
m_ast_context_ap->SearchPathOpts.RuntimeResourcePath.c_str());
log->Printf(" Runtime library path : %s",
m_ast_context_ap->SearchPathOpts.RuntimeLibraryPath.c_str());
log->Printf(
" Runtime library import path : %s",
m_ast_context_ap->SearchPathOpts.RuntimeLibraryImportPath.c_str());
log->Printf(" Framework search paths : (%llu items)",
(unsigned long long)
m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths.size());
for (const auto &framework_search_path :
m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths) {
log->Printf(" %s", framework_search_path.Path.c_str());
}
log->Printf(" Import search paths : (%llu items)",
(unsigned long long)
m_ast_context_ap->SearchPathOpts.ImportSearchPaths.size());
for (std::string &import_search_path :
m_ast_context_ap->SearchPathOpts.ImportSearchPaths) {
log->Printf(" %s", import_search_path.c_str());
}
swift::ClangImporterOptions &clang_importer_options =
GetClangImporterOptions();
log->Printf(" Extra clang arguments : (%llu items)",
(unsigned long long)clang_importer_options.ExtraArgs.size());
for (std::string &extra_arg : clang_importer_options.ExtraArgs) {
log->Printf(" %s", extra_arg.c_str());
}
}
bool SwiftASTContext::HasTarget() const {
lldb::TargetWP empty_wp;
// If either call to "std::weak_ptr::owner_before(...) value returns true,
// this indicates that m_section_wp once contained (possibly still does) a
// reference to a valid shared pointer. This helps us know if we had a valid
// reference to a target which is now invalid because the target was deleted.
return empty_wp.owner_before(m_target_wp) ||
m_target_wp.owner_before(empty_wp);
}
bool SwiftASTContext::CheckProcessChanged() {
if (HasTarget()) {
TargetSP target_sp(m_target_wp.lock());
if (target_sp) {
Process *process = target_sp->GetProcessSP().get();
if (m_process == NULL) {
if (process)
m_process = process;
} else {
if (m_process != process)
return true;
}
}
}
return false;
}
void SwiftASTContext::AddDebuggerClient(
swift::DebuggerClient *debugger_client) {
m_debugger_clients.push_back(
std::unique_ptr<swift::DebuggerClient>(debugger_client));
}
SwiftASTContext::ExtraTypeInformation::ExtraTypeInformation()
: m_flags(false) {}
SwiftASTContext::ExtraTypeInformation::ExtraTypeInformation(
swift::CanType swift_can_type)
: m_flags(false) {
static ConstString g_rawValue("rawValue");
swift::ASTContext &ast_ctx = swift_can_type->getASTContext();
SwiftASTContext *swift_ast = SwiftASTContext::GetSwiftASTContext(&ast_ctx);
if (swift_ast) {
swift::ProtocolDecl *option_set =
ast_ctx.getProtocol(swift::KnownProtocolKind::OptionSet);
if (option_set) {
if (auto nominal_decl =
swift_can_type.getNominalOrBoundGenericNominal()) {
for (swift::ProtocolDecl *protocol_decl :
nominal_decl->getAllProtocols()) {
if (protocol_decl == option_set) {
for (swift::VarDecl *stored_property :
nominal_decl->getStoredProperties()) {
swift::Identifier name = stored_property->getName();
if (name.str() == g_rawValue.GetStringRef()) {
m_flags.m_is_trivial_option_set = true;
break;
}
}
}
}
}
}
}
}
SwiftASTContext::ExtraTypeInformation
SwiftASTContext::GetExtraTypeInformation(void *type) {
if (!type)
return ExtraTypeInformation();
swift::CanType swift_can_type;
void *swift_can_type_ptr = nullptr;
if (auto swift_type = GetSwiftType(type)) {
swift_can_type = swift_type->getCanonicalType();
swift_can_type_ptr = swift_can_type.getPointer();
}
if (!swift_can_type_ptr)
return ExtraTypeInformation();
ExtraTypeInformation eti;
if (!m_extra_type_info_cache.Lookup(swift_can_type_ptr, eti)) {
ExtraTypeInformation extra_info(swift_can_type);
m_extra_type_info_cache.Insert(swift_can_type_ptr, extra_info);
return extra_info;
} else {
return eti;
}
}
bool SwiftASTContext::DeclContextIsStructUnionOrClass(void *opaque_decl_ctx) {
return false;
}
ConstString SwiftASTContext::DeclContextGetName(void *opaque_decl_ctx) {
return ConstString();
}
ConstString
SwiftASTContext::DeclContextGetScopeQualifiedName(void *opaque_decl_ctx) {
return ConstString();
}
bool SwiftASTContext::DeclContextIsClassMethod(
void *opaque_decl_ctx, lldb::LanguageType *language_ptr,
bool *is_instance_method_ptr, ConstString *language_object_name_ptr) {
return false;
}
///////////
////////////////////
///////////
bool SwiftASTContext::IsArrayType(void *type, CompilerType *element_type_ptr,
uint64_t *size, bool *is_incomplete) {
VALID_OR_RETURN(false);
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::BoundGenericStructType *struct_type =
swift_can_type->getAs<swift::BoundGenericStructType>();
if (struct_type) {
swift::StructDecl *struct_decl = struct_type->getDecl();
if (strcmp(struct_decl->getName().get(), "Array") != 0)
return false;
if (!struct_decl->getModuleContext()->isStdlibModule())
return false;
const llvm::ArrayRef<swift::Type> &args = struct_type->getGenericArgs();
if (args.size() != 1)
return false;
if (is_incomplete)
*is_incomplete = true;
if (size)
*size = 0;
if (element_type_ptr)
*element_type_ptr =
CompilerType(GetASTContext(), args[0].getPointer());
return true;
}
return false;
}
bool SwiftASTContext::IsAggregateType(void *type) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto referent_type = swift_can_type->getReferenceStorageReferent();
return (referent_type->is<swift::TupleType>() ||
referent_type->is<swift::BuiltinVectorType>() ||
referent_type->getAnyNominal());
}
return false;
}
bool SwiftASTContext::IsVectorType(void *type, CompilerType *element_type,
uint64_t *size) {
return false;
}
bool SwiftASTContext::IsRuntimeGeneratedType(void *type) { return false; }
bool SwiftASTContext::IsCharType(void *type) { return false; }
bool SwiftASTContext::IsCompleteType(void *type) { return true; }
bool SwiftASTContext::IsConst(void *type) { return false; }
bool SwiftASTContext::IsCStringType(void *type, uint32_t &length) {
return false;
}
bool SwiftASTContext::IsFunctionType(void *type, bool *is_variadic_ptr) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
return true;
case swift::TypeKind::SILFunction:
return false; // TODO: is this correct?
default:
return false;
}
}
return false;
}
// Used to detect "Homogeneous Floating-point Aggregates"
uint32_t SwiftASTContext::IsHomogeneousAggregate(void *type,
CompilerType *base_type_ptr) {
return 0;
}
size_t SwiftASTContext::GetNumberOfFunctionArguments(void *type) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto func =
swift::dyn_cast_or_null<swift::AnyFunctionType>(
swift_can_type);
if (func) {
auto input = func.getInput();
// See comment in swift::AnyFunctionType for rationale here:
// A function can take either a tuple or a parentype, but if a parentype
// (i.e. (Foo)), then it will be reduced down to just Foo, so if the input
// is not a tuple, that must mean there is only 1 input.
auto tuple = swift::dyn_cast<swift::TupleType>(input);
if (tuple)
return tuple->getNumElements();
else
return 1;
}
}
return 0;
}
CompilerType SwiftASTContext::GetFunctionArgumentAtIndex(void *type,
const size_t index) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto func =
swift::dyn_cast<swift::AnyFunctionType>(
swift_can_type);
if (func) {
auto input = func.getInput();
// See comment in swift::AnyFunctionType for rationale here:
// A function can take either a tuple or a parentype, but if a parentype
// (i.e. (Foo)), then it will be reduced down to just Foo, so if the input
// is not a tuple, that must mean there is only 1 input.
auto tuple = swift::dyn_cast<swift::TupleType>(input);
if (tuple) {
if (index < tuple->getNumElements())
return CompilerType(GetASTContext(),
tuple->getElementType(index));
} else
return CompilerType(GetASTContext(), input);
}
}
return CompilerType();
}
bool SwiftASTContext::IsFunctionPointerType(void *type) {
return IsFunctionType(type, nullptr); // FIXME: think about this
}
bool SwiftASTContext::IsBlockPointerType(
void *type, CompilerType *function_pointer_type_ptr) {
return false;
}
bool SwiftASTContext::IsIntegerType(void *type, bool &is_signed) {
return (GetTypeInfo(type, nullptr) & eTypeIsInteger);
}
bool SwiftASTContext::IsPointerType(void *type, CompilerType *pointee_type) {
VALID_OR_RETURN(false);
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto referent_type = swift_can_type->getReferenceStorageReferent();
return (referent_type->is<swift::BuiltinRawPointerType>() ||
referent_type->is<swift::BuiltinNativeObjectType>() ||
referent_type->is<swift::BuiltinUnsafeValueBufferType>() ||
referent_type->is<swift::BuiltinUnknownObjectType>() ||
referent_type->is<swift::BuiltinBridgeObjectType>());
}
if (pointee_type)
pointee_type->Clear();
return false;
}
bool SwiftASTContext::IsPointerOrReferenceType(void *type,
CompilerType *pointee_type) {
return IsPointerType(type, pointee_type) ||
IsReferenceType(type, pointee_type, nullptr);
}
bool SwiftASTContext::ShouldTreatScalarValueAsAddress(
lldb::opaque_compiler_type_t type) {
return Flags(GetTypeInfo(type, nullptr))
.AnySet(eTypeInstanceIsPointer | eTypeIsReference);
}
bool SwiftASTContext::IsReferenceType(void *type, CompilerType *pointee_type,
bool *is_rvalue) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::InOut:
case swift::TypeKind::LValue:
if (pointee_type)
*pointee_type = GetNonReferenceType(type);
return true;
default:
break;
}
}
if (pointee_type)
pointee_type->Clear();
return false;
}
bool SwiftASTContext::IsFloatingPointType(void *type, uint32_t &count,
bool &is_complex) {
if (type) {
if (GetTypeInfo(type, nullptr) & eTypeIsFloat) {
count = 1;
is_complex = false;
return true;
}
}
count = 0;
is_complex = false;
return false;
}
bool SwiftASTContext::IsDefined(void *type) {
if (!type)
return false;
return true;
}
bool SwiftASTContext::IsPolymorphicClass(void *type) { return false; }
bool SwiftASTContext::IsPossibleDynamicType(void *type,
CompilerType *dynamic_pointee_type,
bool check_cplusplus,
bool check_objc, bool check_swift) {
VALID_OR_RETURN(false);
if (type && check_swift) {
// FIXME: use the dynamic_pointee_type
Flags type_flags(GetTypeInfo(type, nullptr));
if (type_flags.AnySet(eTypeIsGenericTypeParam | eTypeIsClass |
eTypeIsProtocol))
return true;
if (type_flags.AnySet(eTypeIsStructUnion | eTypeIsEnumeration |
eTypeIsTuple)) {
CompilerType compiler_type(GetASTContext(), GetCanonicalSwiftType(type));
return !SwiftASTContext::IsFullyRealized(compiler_type);
}
auto can_type = GetCanonicalSwiftType(type).getPointer();
if (can_type == GetASTContext()->TheRawPointerType.getPointer())
return true;
if (can_type == GetASTContext()->TheUnknownObjectType.getPointer())
return true;
if (can_type == GetASTContext()->TheNativeObjectType.getPointer())
return true;
if (can_type == GetASTContext()->TheBridgeObjectType.getPointer())
return true;
}
if (dynamic_pointee_type)
dynamic_pointee_type->Clear();
return false;
}
bool SwiftASTContext::IsScalarType(void *type) {
if (!type)
return false;
return (GetTypeInfo(type, nullptr) & eTypeIsScalar) != 0;
}
bool SwiftASTContext::IsTypedefType(void *type) {
if (!type)
return false;
swift::Type swift_type(GetSwiftType(type));
return swift::isa<swift::NameAliasType>(swift_type.getPointer());
}
bool SwiftASTContext::IsVoidType(void *type) {
VALID_OR_RETURN(false);
if (!type)
return false;
return type == GetASTContext()->TheEmptyTupleType.getPointer();
}
bool SwiftASTContext::IsGenericType(const CompilerType &compiler_type) {
if (!compiler_type.IsValid())
return false;
if (llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
swift::Type swift_type(GetSwiftType(compiler_type));
return swift_type->hasTypeParameter();//is<swift::ArchetypeType>();
}
return false;
}
bool SwiftASTContext::IsSelfArchetypeType(const CompilerType &compiler_type) {
if (!compiler_type.IsValid())
return false;
if (llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
if (swift::isa<swift::GenericTypeParamType>(
(swift::TypeBase *)compiler_type.GetOpaqueQualType())) {
// Hack: Just assume if we have an generic parameter as the type of
// 'self', it's going to be a protocol 'Self' type.
return true;
}
}
return false;
}
bool SwiftASTContext::IsPossibleZeroSizeType(
const CompilerType &compiler_type) {
if (!SwiftASTContext::IsFullyRealized(compiler_type))
return false;
auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem());
if (!ast)
return false;
const swift::irgen::TypeInfo *type_info =
ast->GetSwiftTypeInfo(compiler_type.GetOpaqueQualType());
if (!type_info->isFixedSize())
return false;
auto *fixed_type_info =
swift::cast<const swift::irgen::FixedTypeInfo>(type_info);
return fixed_type_info->getFixedSize().getValue() == 0;
}
bool SwiftASTContext::IsErrorType(const CompilerType &compiler_type) {
if (compiler_type.IsValid() &&
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
ProtocolInfo protocol_info;
if (GetProtocolTypeInfo(compiler_type, protocol_info))
return protocol_info.m_is_errortype;
return false;
}
return false;
}
CompilerType
SwiftASTContext::GetReferentType(const CompilerType &compiler_type) {
VALID_OR_RETURN(CompilerType());
if (compiler_type.IsValid() &&
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(compiler_type));
swift::TypeBase *swift_type = swift_can_type.getPointer();
if (swift_type && llvm::isa<swift::WeakStorageType>(swift_type))
return compiler_type;
auto ref_type = swift_can_type->getReferenceStorageReferent();
return CompilerType(GetASTContext(), ref_type);
}
return CompilerType();
}
bool SwiftASTContext::IsTrivialOptionSetType(
const CompilerType &compiler_type) {
if (compiler_type.IsValid() &&
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem()))
return GetExtraTypeInformation(compiler_type.GetOpaqueQualType())
.m_flags.m_is_trivial_option_set;
return false;
}
bool SwiftASTContext::IsFullyRealized(const CompilerType &compiler_type) {
if (!compiler_type.IsValid())
return false;
if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>(
compiler_type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(compiler_type));
if (swift::isa<swift::MetatypeType>(swift_can_type))
return true;
return !swift_can_type->hasArchetype() && !swift_can_type->hasTypeParameter();
}
return false;
}
bool SwiftASTContext::GetProtocolTypeInfo(const CompilerType &type,
ProtocolInfo &protocol_info) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (!swift_can_type.isExistentialType())
return false;
swift::ExistentialLayout layout = swift_can_type.getExistentialLayout();
protocol_info.m_is_class_only = layout.requiresClass();
protocol_info.m_num_protocols = layout.getProtocols().size();
protocol_info.m_is_objc = layout.isObjC();
protocol_info.m_is_anyobject = layout.isAnyObject();
protocol_info.m_is_errortype = layout.isErrorExistential();
if (auto superclass = layout.explicitSuperclass) {
protocol_info.m_superclass =
CompilerType(ast->GetASTContext(), superclass.getPointer());
}
unsigned num_witness_tables = 0;
for (auto protoTy : layout.getProtocols()) {
if (!protoTy->getDecl()->isObjC())
num_witness_tables++;
}
if (layout.isErrorExistential()) {
// Error existential -- instance pointer only
protocol_info.m_num_payload_words = 0;
protocol_info.m_num_storage_words = 1;
} else if (layout.requiresClass()) {
// Class-constrained existential -- instance pointer plus witness tables
protocol_info.m_num_payload_words = 0;
protocol_info.m_num_storage_words = 1 + num_witness_tables;
} else {
// Opaque existential -- three words of inline storage, metadata and
// witness tables
protocol_info.m_num_payload_words = swift::NumWords_ValueBuffer;
protocol_info.m_num_storage_words =
swift::NumWords_ValueBuffer + 1 + num_witness_tables;
}
return true;
}
return false;
}
SwiftASTContext::TypeAllocationStrategy
SwiftASTContext::GetAllocationStrategy(const CompilerType &type) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
const swift::irgen::TypeInfo *type_info =
ast->GetSwiftTypeInfo(type.GetOpaqueQualType());
if (!type_info)
return TypeAllocationStrategy::eUnknown;
switch (type_info->getFixedPacking(ast->GetIRGenModule())) {
case swift::irgen::FixedPacking::OffsetZero:
return TypeAllocationStrategy::eInline;
case swift::irgen::FixedPacking::Allocate:
return TypeAllocationStrategy::ePointer;
case swift::irgen::FixedPacking::Dynamic:
return TypeAllocationStrategy::eDynamic;
default:
break;
}
}
return TypeAllocationStrategy::eUnknown;
}
bool SwiftASTContext::IsBeingDefined(void *type) { return false; }
bool SwiftASTContext::IsObjCObjectPointerType(const CompilerType &type,
CompilerType *class_type_ptr) {
if (!type)
return false;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
if (type_kind == swift::TypeKind::BuiltinNativeObject ||
type_kind == swift::TypeKind::BuiltinUnknownObject)
return true;
if (class_type_ptr)
class_type_ptr->Clear();
return false;
}
//----------------------------------------------------------------------
// Type Completion
//----------------------------------------------------------------------
bool SwiftASTContext::GetCompleteType(void *type) { return true; }
ConstString SwiftASTContext::GetTypeName(void *type) {
std::string type_name;
if (type) {
swift::Type swift_type(GetSwiftType(type));
swift::Type normalized_type =
swift_type.transform([](swift::Type type) -> swift::Type {
if (swift::SyntaxSugarType *syntax_sugar_type =
swift::dyn_cast<swift::SyntaxSugarType>(type.getPointer())) {
return syntax_sugar_type->getSinglyDesugaredType();
}
if (swift::DictionaryType *dictionary_type =
swift::dyn_cast<swift::DictionaryType>(type.getPointer())) {
return dictionary_type->getSinglyDesugaredType();
}
return type;
});
swift::PrintOptions print_options;
print_options.FullyQualifiedTypes = true;
print_options.SynthesizeSugarOnTypes = false;
type_name = normalized_type.getString(print_options);
}
return ConstString(type_name);
}
ConstString SwiftASTContext::GetDisplayTypeName(void *type) {
std::string type_name(GetTypeName(type).AsCString(""));
if (type) {
swift::Type swift_type(GetSwiftType(type));
swift::PrintOptions print_options;
print_options.FullyQualifiedTypes = false;
print_options.SynthesizeSugarOnTypes = true;
print_options.FullyQualifiedTypesIfAmbiguous = true;
type_name = swift_type.getString(print_options);
}
return ConstString(type_name);
}
ConstString SwiftASTContext::GetTypeSymbolName(void *type) {
swift::Type swift_type(GetSwiftType(type));
return GetTypeName(swift_type->getWithoutParens().getPointer());
}
ConstString SwiftASTContext::GetMangledTypeName(void *type) {
return GetMangledTypeName(GetSwiftType(type).getPointer());
}
uint32_t
SwiftASTContext::GetTypeInfo(void *type,
CompilerType *pointee_or_element_clang_type) {
VALID_OR_RETURN(0);
if (!type)
return 0;
if (pointee_or_element_clang_type)
pointee_or_element_clang_type->Clear();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
uint32_t swift_flags = eTypeIsSwift;
switch (type_kind) {
case swift::TypeKind::Archetype:
case swift::TypeKind::Error:
case swift::TypeKind::Module:
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::UnboundGeneric:
swift_flags |= eTypeIsGeneric;
break;
case swift::TypeKind::GenericFunction:
swift_flags |= eTypeIsGeneric;
case swift::TypeKind::Function:
swift_flags |=
eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeInstanceIsPointer;
break;
case swift::TypeKind::BuiltinInteger:
swift_flags |=
eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeIsInteger;
break;
case swift::TypeKind::BuiltinFloat:
swift_flags |=
eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeIsFloat;
break;
case swift::TypeKind::BuiltinRawPointer:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue;
break;
case swift::TypeKind::BuiltinNativeObject:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue;
break;
case swift::TypeKind::BuiltinUnknownObject:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue | eTypeIsObjC;
break;
case swift::TypeKind::BuiltinBridgeObject:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue | eTypeIsObjC;
break;
case swift::TypeKind::BuiltinUnsafeValueBuffer:
swift_flags |=
eTypeIsBuiltIn | eTypeIsPointer | eTypeIsScalar | eTypeHasValue;
break;
case swift::TypeKind::BuiltinVector:
// TODO: OR in eTypeIsFloat or eTypeIsInteger as needed
return eTypeIsBuiltIn | eTypeHasChildren | eTypeIsVector;
break;
case swift::TypeKind::Tuple:
swift_flags |= eTypeHasChildren | eTypeIsTuple;
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
swift_flags |=
CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetTypeInfo(pointee_or_element_clang_type);
break;
case swift::TypeKind::BoundGenericEnum:
swift_flags |= eTypeIsGeneric | eTypeIsBound;
case swift::TypeKind::Enum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info) {
if (cached_enum_info->GetNumElementsWithPayload() == 0)
swift_flags |= eTypeHasValue | eTypeIsEnumeration;
else
swift_flags |= eTypeHasValue | eTypeIsEnumeration | eTypeHasChildren;
} else
swift_flags |= eTypeIsEnumeration;
} break;
case swift::TypeKind::BoundGenericStruct:
swift_flags |= eTypeIsGeneric | eTypeIsBound;
case swift::TypeKind::Struct:
swift_flags |= eTypeHasChildren | eTypeIsStructUnion;
break;
case swift::TypeKind::BoundGenericClass:
swift_flags |= eTypeIsGeneric | eTypeIsBound;
case swift::TypeKind::Class:
swift_flags |= eTypeHasChildren | eTypeIsClass | eTypeHasValue |
eTypeInstanceIsPointer;
break;
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition:
swift_flags |= eTypeHasChildren | eTypeIsStructUnion | eTypeIsProtocol;
break;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
swift_flags |= eTypeIsMetatype | eTypeHasValue;
break;
case swift::TypeKind::DependentMember:
case swift::TypeKind::GenericTypeParam:
swift_flags |=
eTypeHasValue | eTypeIsScalar | eTypeIsPointer | eTypeIsGenericTypeParam;
break;
case swift::TypeKind::LValue:
if (pointee_or_element_clang_type)
*pointee_or_element_clang_type = GetNonReferenceType(type);
swift_flags |= eTypeHasChildren | eTypeIsReference | eTypeHasValue;
break;
case swift::TypeKind::InOut:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return swift_flags;
}
lldb::LanguageType SwiftASTContext::GetMinimumLanguage(void *type) {
if (!type)
return lldb::eLanguageTypeC;
return lldb::eLanguageTypeSwift;
}
lldb::TypeClass SwiftASTContext::GetTypeClass(void *type) {
VALID_OR_RETURN(lldb::eTypeClassInvalid);
if (!type)
return lldb::eTypeClassInvalid;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
return lldb::eTypeClassOther;
case swift::TypeKind::BuiltinInteger:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinFloat:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinRawPointer:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinNativeObject:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinUnsafeValueBuffer:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinUnknownObject:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinBridgeObject:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinVector:
return lldb::eTypeClassVector;
case swift::TypeKind::Tuple:
return lldb::eTypeClassArray;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetTypeClass();
case swift::TypeKind::GenericTypeParam:
return lldb::eTypeClassOther;
case swift::TypeKind::DependentMember:
return lldb::eTypeClassOther;
case swift::TypeKind::Enum:
return lldb::eTypeClassUnion;
case swift::TypeKind::Struct:
return lldb::eTypeClassStruct;
case swift::TypeKind::Class:
return lldb::eTypeClassClass;
case swift::TypeKind::Protocol:
return lldb::eTypeClassOther;
case swift::TypeKind::Metatype:
return lldb::eTypeClassOther;
case swift::TypeKind::Module:
return lldb::eTypeClassOther;
case swift::TypeKind::Archetype:
return lldb::eTypeClassOther;
case swift::TypeKind::Function:
return lldb::eTypeClassFunction;
case swift::TypeKind::GenericFunction:
return lldb::eTypeClassFunction;
case swift::TypeKind::ProtocolComposition:
return lldb::eTypeClassOther;
case swift::TypeKind::LValue:
return lldb::eTypeClassReference;
case swift::TypeKind::UnboundGeneric:
return lldb::eTypeClassOther;
case swift::TypeKind::BoundGenericClass:
return lldb::eTypeClassClass;
case swift::TypeKind::BoundGenericEnum:
return lldb::eTypeClassUnion;
case swift::TypeKind::BoundGenericStruct:
return lldb::eTypeClassStruct;
case swift::TypeKind::TypeVariable:
return lldb::eTypeClassOther;
case swift::TypeKind::ExistentialMetatype:
return lldb::eTypeClassOther;
case swift::TypeKind::DynamicSelf:
return lldb::eTypeClassOther;
case swift::TypeKind::SILBox:
return lldb::eTypeClassOther;
case swift::TypeKind::SILFunction:
return lldb::eTypeClassFunction;
case swift::TypeKind::SILBlockStorage:
return lldb::eTypeClassOther;
case swift::TypeKind::InOut:
return lldb::eTypeClassOther;
case swift::TypeKind::Unresolved:
return lldb::eTypeClassOther;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return lldb::eTypeClassOther;
}
unsigned SwiftASTContext::GetTypeQualifiers(void *type) { return 0; }
//----------------------------------------------------------------------
// Creating related types
//----------------------------------------------------------------------
CompilerType SwiftASTContext::GetArrayElementType(void *type,
uint64_t *stride) {
VALID_OR_RETURN(CompilerType());
CompilerType element_type;
if (type) {
swift::CanType swift_type(GetCanonicalSwiftType(type));
// There are a couple of structs that mean "Array" in Swift:
// Array<T>
// NativeArray<T>
// Slice<T>
// Treat them as arrays for convenience sake.
swift::BoundGenericStructType *boundGenericStructType(
swift_type->getAs<swift::BoundGenericStructType>());
if (boundGenericStructType) {
auto args = boundGenericStructType->getGenericArgs();
swift::StructDecl *decl = boundGenericStructType->getDecl();
if (args.size() == 1 &&
decl->getModuleContext()->isStdlibModule()) {
const char *declname = decl->getName().get();
if (0 == strcmp(declname, "NativeArray") ||
0 == strcmp(declname, "Array") || 0 == strcmp(declname, "ArraySlice"))
element_type = CompilerType(GetASTContext(), args[0].getPointer());
}
}
}
return element_type;
}
CompilerType SwiftASTContext::GetCanonicalType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type)
return CompilerType(GetASTContext(),
GetCanonicalSwiftType(type).getPointer());
return CompilerType();
}
CompilerType SwiftASTContext::GetInstanceType(void *type) {
VALID_OR_RETURN(CompilerType());
if (!type)
return CompilerType();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
switch (swift_can_type->getKind()) {
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype: {
auto metatype_type =
swift::dyn_cast<swift::AnyMetatypeType>(swift_can_type);
if (metatype_type)
return CompilerType(GetASTContext(),
metatype_type.getInstanceType().getPointer());
return CompilerType();
}
default:
break;
}
return CompilerType(GetASTContext(), GetSwiftType(type));
}
CompilerType SwiftASTContext::GetFullyUnqualifiedType(void *type) {
VALID_OR_RETURN(CompilerType());
return CompilerType(GetASTContext(), GetSwiftType(type));
}
int SwiftASTContext::GetFunctionArgumentCount(void *type) {
return GetNumberOfFunctionArguments(type);
}
CompilerType SwiftASTContext::GetFunctionArgumentTypeAtIndex(void *type,
size_t idx) {
return GetFunctionArgumentAtIndex(type, idx);
}
CompilerType SwiftASTContext::GetFunctionReturnType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
auto func = swift::dyn_cast<swift::AnyFunctionType>(
GetCanonicalSwiftType(type));
if (func)
return CompilerType(GetASTContext(), func.getResult().getPointer());
}
return CompilerType();
}
size_t SwiftASTContext::GetNumMemberFunctions(void *type) {
size_t num_functions = 0;
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto nominal_decl = swift_can_type.getAnyNominal();
if (nominal_decl) {
auto iter = nominal_decl->getMembers().begin();
auto end = nominal_decl->getMembers().end();
for (; iter != end; iter++) {
switch (iter->getKind()) {
case swift::DeclKind::Constructor:
case swift::DeclKind::Destructor:
case swift::DeclKind::Func:
num_functions += 1;
break;
default:
break;
}
}
}
}
return num_functions;
}
TypeMemberFunctionImpl SwiftASTContext::GetMemberFunctionAtIndex(void *type,
size_t idx) {
VALID_OR_RETURN(TypeMemberFunctionImpl());
std::string name("");
CompilerType result_type;
MemberFunctionKind kind(MemberFunctionKind::eMemberFunctionKindUnknown);
swift::AbstractFunctionDecl *the_decl_we_care_about = nullptr;
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto nominal_decl = swift_can_type.getAnyNominal();
if (nominal_decl) {
auto iter = nominal_decl->getMembers().begin();
auto end = nominal_decl->getMembers().end();
for (; iter != end; iter++) {
auto decl_kind = iter->getKind();
switch (decl_kind) {
case swift::DeclKind::Constructor:
case swift::DeclKind::Destructor:
case swift::DeclKind::Func: {
if (idx == 0) {
swift::AbstractFunctionDecl *abstract_func_decl =
llvm::dyn_cast_or_null<swift::AbstractFunctionDecl>(*iter);
if (abstract_func_decl) {
switch (decl_kind) {
case swift::DeclKind::Constructor:
name.clear();
kind = lldb::eMemberFunctionKindConstructor;
the_decl_we_care_about = abstract_func_decl;
break;
case swift::DeclKind::Destructor:
name.clear();
kind = lldb::eMemberFunctionKindDestructor;
the_decl_we_care_about = abstract_func_decl;
break;
case swift::DeclKind::Func:
default: // I know that this can only be one of three kinds
// since I am here..
{
swift::FuncDecl *func_decl =
llvm::dyn_cast<swift::FuncDecl>(*iter);
if (func_decl) {
if (func_decl->getName().empty())
name.clear();
else
name.assign(func_decl->getName().get());
if (func_decl->isStatic())
kind = lldb::eMemberFunctionKindStaticMethod;
else
kind = lldb::eMemberFunctionKindInstanceMethod;
the_decl_we_care_about = func_decl;
}
}
}
result_type =
CompilerType(GetASTContext(),
abstract_func_decl->getInterfaceType().getPointer());
}
} else
--idx;
} break;
default:
break;
}
}
}
}
if (type && the_decl_we_care_about && (kind != eMemberFunctionKindUnknown))
return TypeMemberFunctionImpl(
result_type, CompilerDecl(this, the_decl_we_care_about), name, kind);
return TypeMemberFunctionImpl();
}
CompilerType SwiftASTContext::GetLValueReferenceType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type)
return CompilerType(GetASTContext(),
swift::LValueType::get(GetSwiftType(type)));
return CompilerType();
}
CompilerType SwiftASTContext::GetRValueReferenceType(void *type) {
return CompilerType();
}
CompilerType SwiftASTContext::GetNonReferenceType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::LValueType *lvalue = swift_can_type->getAs<swift::LValueType>();
if (lvalue)
return CompilerType(GetASTContext(),
lvalue->getObjectType().getPointer());
swift::InOutType *inout = swift_can_type->getAs<swift::InOutType>();
if (inout)
return CompilerType(GetASTContext(),
inout->getObjectType().getPointer());
}
return CompilerType();
}
CompilerType SwiftASTContext::GetPointeeType(void *type) {
return CompilerType();
}
CompilerType SwiftASTContext::GetPointerType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::Type swift_type(::GetSwiftType(type));
const swift::TypeKind type_kind = swift_type->getKind();
if (type_kind == swift::TypeKind::BuiltinRawPointer)
return CompilerType(GetASTContext(), swift_type);
}
return CompilerType();
}
CompilerType SwiftASTContext::GetTypedefedType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::Type swift_type(::GetSwiftType(type));
swift::NameAliasType *name_alias_type =
swift::dyn_cast<swift::NameAliasType>(swift_type.getPointer());
if (name_alias_type) {
return CompilerType(GetASTContext(),
name_alias_type->getSinglyDesugaredType());
}
}
return CompilerType();
}
CompilerType
SwiftASTContext::GetUnboundType(lldb::opaque_compiler_type_t type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::BoundGenericType *bound_generic_type =
swift_can_type->getAs<swift::BoundGenericType>();
if (bound_generic_type) {
swift::NominalTypeDecl *nominal_type_decl = bound_generic_type->getDecl();
if (nominal_type_decl)
return CompilerType(GetASTContext(),
nominal_type_decl->getDeclaredType());
}
}
return CompilerType(GetASTContext(), GetSwiftType(type));
}
CompilerType SwiftASTContext::MapIntoContext(lldb::StackFrameSP &frame_sp,
lldb::opaque_compiler_type_t type) {
VALID_OR_RETURN(CompilerType());
if (!type)
return CompilerType(GetASTContext(), nullptr);
if (!frame_sp)
return CompilerType(GetASTContext(), GetSwiftType(type));
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const SymbolContext &sc(frame_sp->GetSymbolContext(eSymbolContextFunction));
if (!sc.function || (swift_can_type && !swift_can_type->hasTypeParameter()))
return CompilerType(GetASTContext(), GetSwiftType(type));
auto *ctx = llvm::dyn_cast_or_null<SwiftASTContext>(
sc.function->GetCompilerType().GetTypeSystem());
if (!ctx)
return CompilerType(GetASTContext(), GetSwiftType(type));
// FIXME: we need the innermost non-inlined function.
auto function_name = sc.GetFunctionName(Mangled::ePreferMangled);
std::string error;
swift::Decl *func_decl =
swift::ide::getDeclFromMangledSymbolName(*ctx->GetASTContext(),
function_name.GetStringRef(),
error);
if (!error.empty()) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("Failed to getDeclFromMangledSymbolName(\"%s\"): %s\n",
function_name.AsCString(), error.c_str());
}
if (auto *dc = llvm::dyn_cast_or_null<swift::DeclContext>(func_decl))
return {GetASTContext(), dc->mapTypeIntoContext(swift_can_type)};
return CompilerType(GetASTContext(), GetSwiftType(type));
}
//----------------------------------------------------------------------
// Create related types using the current type's AST
//----------------------------------------------------------------------
CompilerType SwiftASTContext::GetBasicTypeFromAST(lldb::BasicType basic_type) {
return CompilerType();
}
//----------------------------------------------------------------------
// Exploring the type
//----------------------------------------------------------------------
const swift::irgen::TypeInfo *SwiftASTContext::GetSwiftTypeInfo(void *type) {
VALID_OR_RETURN(nullptr);
if (type) {
auto &irgen_module = GetIRGenModule();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::SILType swift_sil_type = irgen_module.getLoweredType(
swift_can_type);
return &irgen_module.getTypeInfo(swift_sil_type);
}
return nullptr;
}
const swift::irgen::FixedTypeInfo *
SwiftASTContext::GetSwiftFixedTypeInfo(void *type) {
VALID_OR_RETURN(nullptr);
const swift::irgen::TypeInfo *type_info = GetSwiftTypeInfo(type);
if (type_info) {
if (type_info->isFixedSize())
return swift::cast<const swift::irgen::FixedTypeInfo>(type_info);
}
return nullptr;
}
uint64_t SwiftASTContext::GetBitSize(lldb::opaque_compiler_type_t type,
ExecutionContextScope *exe_scope) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
// FIXME: Query remote mirrors for this.
if (swift_can_type->hasTypeParameter())
return GetPointerByteSize() * 8;
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::LValue:
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::Function:
return GetPointerByteSize() * 8;
default:
break;
}
const swift::irgen::FixedTypeInfo *fixed_type_info =
GetSwiftFixedTypeInfo(type);
if (fixed_type_info)
return fixed_type_info->getFixedSize().getValue() * 8;
}
return 0;
}
uint64_t SwiftASTContext::GetByteStride(lldb::opaque_compiler_type_t type) {
if (type) {
const swift::irgen::FixedTypeInfo *fixed_type_info =
GetSwiftFixedTypeInfo(type);
if (fixed_type_info)
return fixed_type_info->getFixedStride().getValue();
}
return 0;
}
size_t SwiftASTContext::GetTypeBitAlign(void *type) {
if (type) {
const swift::irgen::FixedTypeInfo *fixed_type_info =
GetSwiftFixedTypeInfo(type);
if (fixed_type_info)
return fixed_type_info->getFixedAlignment().getValue();
}
return 0;
}
lldb::Encoding SwiftASTContext::GetEncoding(void *type, uint64_t &count) {
VALID_OR_RETURN(lldb::eEncodingInvalid);
if (!type)
return lldb::eEncodingInvalid;
count = 1;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
break;
case swift::TypeKind::BuiltinInteger:
return lldb::eEncodingSint; // TODO: detect if an integer is unsigned
case swift::TypeKind::BuiltinFloat:
return lldb::eEncodingIEEE754; // TODO: detect if an integer is unsigned
case swift::TypeKind::Archetype:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::Class: // Classes are pointers in swift...
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
return lldb::eEncodingUint;
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::Tuple:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetEncoding(count);
break;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
return lldb::eEncodingUint;
case swift::TypeKind::GenericFunction:
case swift::TypeKind::Function:
return lldb::eEncodingUint;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum:
break;
case swift::TypeKind::Struct:
case swift::TypeKind::Protocol:
case swift::TypeKind::Module:
case swift::TypeKind::ProtocolComposition:
break;
case swift::TypeKind::LValue:
return lldb::eEncodingUint;
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::InOut:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
count = 0;
return lldb::eEncodingInvalid;
}
lldb::Format SwiftASTContext::GetFormat(void *type) {
VALID_OR_RETURN(lldb::eFormatInvalid);
if (!type)
return lldb::eFormatDefault;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
break;
case swift::TypeKind::BuiltinInteger:
return eFormatDecimal; // TODO: detect if an integer is unsigned
case swift::TypeKind::BuiltinFloat:
return eFormatFloat; // TODO: detect if an integer is unsigned
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::Archetype:
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
return eFormatAddressInfo;
// Classes are always pointers in swift...
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
return eFormatHex;
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::Tuple:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetFormat();
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum:
return eFormatUnsigned;
case swift::TypeKind::GenericFunction:
case swift::TypeKind::Function:
return lldb::eFormatAddressInfo;
case swift::TypeKind::Struct:
case swift::TypeKind::Protocol:
case swift::TypeKind::Metatype:
case swift::TypeKind::Module:
case swift::TypeKind::ProtocolComposition:
break;
case swift::TypeKind::LValue:
return lldb::eFormatHex;
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::InOut:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
// We don't know hot to display this type...
return lldb::eFormatBytes;
}
uint32_t SwiftASTContext::GetNumChildren(void *type,
bool omit_empty_base_classes) {
VALID_OR_RETURN(0);
if (!type)
return 0;
uint32_t num_children = 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
case swift::TypeKind::Module:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::InOut:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetNumChildren(omit_empty_base_classes);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info)
return cached_enum_info->GetNumElementsWithPayload();
} break;
case swift::TypeKind::Tuple:
case swift::TypeKind::Struct:
case swift::TypeKind::BoundGenericStruct:
return GetNumFields(type);
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass: {
auto class_decl = swift_can_type->getClassOrBoundGenericClass();
return (class_decl->hasSuperclass() ? 1 : 0) + GetNumFields(type);
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(
CompilerType(GetASTContext(), GetSwiftType(type)), protocol_info))
break;
return protocol_info.m_num_storage_words;
}
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
case swift::TypeKind::Archetype:
return 0;
case swift::TypeKind::LValue: {
swift::LValueType *lvalue_type = swift_can_type->castTo<swift::LValueType>();
swift::TypeBase *deref_type = lvalue_type->getObjectType().getPointer();
uint32_t num_pointee_children =
CompilerType(GetASTContext(), deref_type)
.GetNumChildren(omit_empty_base_classes);
// If this type points to a simple type (or to a class), then it has 1 child
if (num_pointee_children == 0 || deref_type->getClassOrBoundGenericClass())
num_children = 1;
else
num_children = num_pointee_children;
} break;
case swift::TypeKind::UnboundGeneric:
break;
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return num_children;
}
lldb::BasicType SwiftASTContext::GetBasicTypeEnumeration(void *type) {
return eBasicTypeInvalid;
}
#pragma mark Aggregate Types
uint32_t SwiftASTContext::GetNumDirectBaseClasses(void *opaque_type) {
if (!opaque_type)
return 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(opaque_type));
swift::ClassDecl *class_decl = swift_can_type->getClassOrBoundGenericClass();
if (class_decl) {
if (class_decl->hasSuperclass())
return 1;
}
return 0;
}
uint32_t SwiftASTContext::GetNumVirtualBaseClasses(void *opaque_type) {
return 0;
}
uint32_t SwiftASTContext::GetNumFields(void *type) {
VALID_OR_RETURN(0);
if (!type)
return 0;
uint32_t count = 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetNumFields();
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info)
return cached_enum_info->GetNumElementsWithPayload();
} break;
case swift::TypeKind::Tuple:
return cast<swift::TupleType>(swift_can_type)->getNumElements();
case swift::TypeKind::Struct:
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
return GetStoredProperties(nominal).size();
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition:
return GetNumChildren(type, /*omit_empty_base_classes=*/false);
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
return 0;
case swift::TypeKind::Module:
case swift::TypeKind::Archetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::LValue:
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::InOut:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return count;
}
CompilerType
SwiftASTContext::GetDirectBaseClassAtIndex(void *opaque_type, size_t idx,
uint32_t *bit_offset_ptr) {
VALID_OR_RETURN(CompilerType());
if (opaque_type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(opaque_type));
swift::ClassDecl *class_decl =
swift_can_type->getClassOrBoundGenericClass();
if (class_decl) {
swift::Type base_class_type = class_decl->getSuperclass();
if (base_class_type)
return CompilerType(GetASTContext(), base_class_type.getPointer());
}
}
return CompilerType();
}
CompilerType
SwiftASTContext::GetVirtualBaseClassAtIndex(void *opaque_type, size_t idx,
uint32_t *bit_offset_ptr) {
return CompilerType();
}
/// Retrieve the printable name of a tuple element.
static std::string GetTupleElementName(const swift::TupleType *tuple_type,
unsigned index,
llvm::StringRef printed_index = "") {
const auto &element = tuple_type->getElement(index);
// Use the element name if there is one.
if (!element.getName().empty()) return element.getName().str();
// If we know the printed index already, use that.
if (!printed_index.empty()) return printed_index;
// Print the index and return that.
std::string str;
llvm::raw_string_ostream(str) << index;
return str;
}
/// Retrieve the printable name of a type referenced as a superclass.
static std::string GetSuperclassName(const CompilerType &superclass_type) {
return superclass_type.GetUnboundType().GetTypeName()
.AsCString("<no type name>");
}
/// Retrieve the type and name of a child of an existential type.
static std::pair<CompilerType, std::string>
GetExistentialTypeChild(swift::ASTContext *swift_ast_ctx,
CompilerType type,
const SwiftASTContext::ProtocolInfo &protocol_info,
unsigned idx) {
assert(idx < protocol_info.m_num_storage_words &&
"caller is responsible for validating index");
// A payload word for a non-class, non-error existential.
if (idx < protocol_info.m_num_payload_words) {
std::string name;
llvm::raw_string_ostream(name) << "payload_data_" << idx;
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
return { CompilerType(swift_ast_ctx, raw_pointer.getPointer()),
std::move(name) };
}
// The instance for a class-bound existential.
if (idx == 0 && protocol_info.m_is_class_only) {
CompilerType class_type;
if (protocol_info.m_superclass) {
class_type = protocol_info.m_superclass;
} else {
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
class_type = CompilerType(swift_ast_ctx, raw_pointer.getPointer());
}
return { class_type, "instance" };
}
// The instance for an error existential.
if (idx == 0 && protocol_info.m_is_errortype) {
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
return { CompilerType(swift_ast_ctx, raw_pointer.getPointer()),
"error_instance" };
}
// The metatype for a non-class, non-error existential.
if (idx && idx == protocol_info.m_num_payload_words) {
// The metatype for a non-class, non-error existential.
auto any_metatype =
swift::ExistentialMetatypeType::get(swift_ast_ctx->TheAnyType);
return { CompilerType(swift_ast_ctx, any_metatype), "instance_type" };
}
// A witness table. Figure out which protocol it corresponds to.
unsigned witness_table_idx = idx - protocol_info.m_num_payload_words - 1;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::ExistentialLayout layout = swift_can_type.getExistentialLayout();
std::string name;
for (auto protoType : layout.getProtocols()) {
auto proto = protoType->getDecl();
if (proto->isObjC()) continue;
if (witness_table_idx == 0) {
llvm::raw_string_ostream(name) << "witness_table_"
<< proto->getBaseName().userFacingName();
break;
}
--witness_table_idx;
}
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
return { CompilerType(swift_ast_ctx, raw_pointer.getPointer()),
std::move(name) };
}
CompilerType SwiftASTContext::GetFieldAtIndex(void *type, size_t idx,
std::string &name,
uint64_t *bit_offset_ptr,
uint32_t *bitfield_bit_size_ptr,
bool *is_bitfield_ptr) {
VALID_OR_RETURN(CompilerType());
if (!type)
return CompilerType();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetFieldAtIndex(idx, name, bit_offset_ptr, bitfield_bit_size_ptr,
is_bitfield_ptr);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info &&
idx < cached_enum_info->GetNumElementsWithPayload()) {
const SwiftEnumDescriptor::ElementInfo *enum_element_info =
cached_enum_info->GetElementWithPayloadAtIndex(idx);
name.assign(enum_element_info->name.GetCString());
if (bit_offset_ptr)
*bit_offset_ptr = 0;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
return enum_element_info->payload_type;
}
} break;
case swift::TypeKind::Tuple: {
auto tuple_type = cast<swift::TupleType>(swift_can_type);
if (idx >= tuple_type->getNumElements()) break;
// We cannot reliably get layout information without an execution
// context.
if (bit_offset_ptr)
*bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
name = GetTupleElementName(tuple_type, idx);
const auto &child = tuple_type->getElement(idx);
return CompilerType(GetASTContext(), child.getType().getPointer());
}
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass: {
auto class_decl = swift_can_type->getClassOrBoundGenericClass();
if (class_decl->hasSuperclass()) {
if (idx == 0) {
swift::Type superclass_swift_type = swift_can_type->getSuperclass();
CompilerType superclass_type(GetASTContext(),
superclass_swift_type.getPointer());
name = GetSuperclassName(superclass_type);
// We cannot reliably get layout information without an execution
// context.
if (bit_offset_ptr)
*bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
return superclass_type;
}
// Adjust the index to refer into the stored properties.
--idx;
}
LLVM_FALLTHROUGH;
}
case swift::TypeKind::Struct:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
auto stored_properties = GetStoredProperties(nominal);
if (idx >= stored_properties.size()) break;
auto property = stored_properties[idx];
name = property->getBaseName().userFacingName();
// We cannot reliably get layout information without an execution
// context.
if (bit_offset_ptr)
*bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
swift::Type child_swift_type = swift_can_type->getTypeOfMember(
nominal->getModuleContext(), property, nullptr);
return CompilerType(GetASTContext(), child_swift_type.getPointer());
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(
CompilerType(GetASTContext(), GetSwiftType(type)), protocol_info))
break;
if (idx >= protocol_info.m_num_storage_words) break;
CompilerType compiler_type(GetASTContext(), GetSwiftType(type));
CompilerType child_type;
std::tie(child_type, name) =
GetExistentialTypeChild(GetASTContext(), compiler_type, protocol_info,
idx);
uint64_t child_size = child_type.GetByteSize(nullptr);
if (bit_offset_ptr)
*bit_offset_ptr = idx * child_size * 8;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
return child_type;
}
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
break;
case swift::TypeKind::Module:
case swift::TypeKind::Archetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::LValue:
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::InOut:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return CompilerType();
}
// If a pointer to a pointee type (the clang_type arg) says that it has no
// children, then we either need to trust it, or override it and return a
// different result. For example, an "int *" has one child that is an integer,
// but a function pointer doesn't have any children. Likewise if a Record type
// claims it has no children, then there really is nothing to show.
uint32_t SwiftASTContext::GetNumPointeeChildren(void *type) {
if (!type)
return 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
return 0;
case swift::TypeKind::BuiltinInteger:
return 1;
case swift::TypeKind::BuiltinFloat:
return 1;
case swift::TypeKind::BuiltinRawPointer:
return 1;
case swift::TypeKind::BuiltinUnsafeValueBuffer:
return 1;
case swift::TypeKind::BuiltinNativeObject:
return 1;
case swift::TypeKind::BuiltinUnknownObject:
return 1;
case swift::TypeKind::BuiltinBridgeObject:
return 1;
case swift::TypeKind::BuiltinVector:
return 0;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return GetNumPointeeChildren(
swift::cast<swift::ReferenceStorageType>(swift_can_type)
.getPointer());
case swift::TypeKind::Tuple:
return 0;
case swift::TypeKind::GenericTypeParam:
return 0;
case swift::TypeKind::DependentMember:
return 0;
case swift::TypeKind::Enum:
return 0;
case swift::TypeKind::Struct:
return 0;
case swift::TypeKind::Class:
return 0;
case swift::TypeKind::Protocol:
return 0;
case swift::TypeKind::Metatype:
return 0;
case swift::TypeKind::Module:
return 0;
case swift::TypeKind::Archetype:
return 0;
case swift::TypeKind::Function:
return 0;
case swift::TypeKind::GenericFunction:
return 0;
case swift::TypeKind::ProtocolComposition:
return 0;
case swift::TypeKind::LValue:
return 1;
case swift::TypeKind::UnboundGeneric:
return 0;
case swift::TypeKind::BoundGenericClass:
return 0;
case swift::TypeKind::BoundGenericEnum:
return 0;
case swift::TypeKind::BoundGenericStruct:
return 0;
case swift::TypeKind::TypeVariable:
return 0;
case swift::TypeKind::ExistentialMetatype:
return 0;
case swift::TypeKind::DynamicSelf:
return 0;
case swift::TypeKind::SILBox:
return 0;
case swift::TypeKind::SILFunction:
return 0;
case swift::TypeKind::SILBlockStorage:
return 0;
case swift::TypeKind::InOut:
return 0;
case swift::TypeKind::Unresolved:
return 0;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return 0;
}
static int64_t GetInstanceVariableOffset_Metadata(
ValueObject *valobj, ExecutionContext *exe_ctx, const CompilerType &type,
ConstString ivar_name, const CompilerType &ivar_type) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf(
"[GetInstanceVariableOffset_Metadata] ivar_name = %s, type = %s",
ivar_name.AsCString(), type.GetTypeName().AsCString());
Process *process = exe_ctx->GetProcessPtr();
if (process) {
SwiftLanguageRuntime *runtime = process->GetSwiftLanguageRuntime();
if (runtime) {
Status error;
if (auto offset =
runtime->GetMemberVariableOffset(type, valobj, ivar_name, &error)) {
if (log)
log->Printf("[GetInstanceVariableOffset_Metadata] for %s: %llu",
ivar_name.AsCString(), *offset);
return *offset;
}
else if (log) {
log->Printf("[GetInstanceVariableOffset_Metadata] resolver failure: %s",
error.AsCString());
}
} else if (log)
log->Printf("[GetInstanceVariableOffset_Metadata] no runtime");
} else if (log)
log->Printf("[GetInstanceVariableOffset_Metadata] no process");
return LLDB_INVALID_IVAR_OFFSET;
}
static int64_t GetInstanceVariableOffset(ValueObject *valobj,
ExecutionContext *exe_ctx,
const CompilerType &class_type,
const char *ivar_name,
const CompilerType &ivar_type) {
int64_t offset = LLDB_INVALID_IVAR_OFFSET;
if (ivar_name && ivar_name[0]) {
if (exe_ctx) {
Target *target = exe_ctx->GetTargetPtr();
if (target) {
offset = GetInstanceVariableOffset_Metadata(
valobj, exe_ctx, class_type, ConstString(ivar_name), ivar_type);
}
}
}
return offset;
}
bool SwiftASTContext::IsNonTriviallyManagedReferenceType(
const CompilerType &type, NonTriviallyManagedReferenceStrategy &strategy,
CompilerType *underlying_type) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
default:
break;
case swift::TypeKind::UnmanagedStorage: {
strategy = NonTriviallyManagedReferenceStrategy::eUnmanaged;
if (underlying_type)
*underlying_type = CompilerType(
ast, swift_can_type->getReferenceStorageReferent()
.getPointer());
}
return true;
case swift::TypeKind::UnownedStorage: {
strategy = NonTriviallyManagedReferenceStrategy::eUnowned;
if (underlying_type)
*underlying_type = CompilerType(
ast, swift_can_type->getReferenceStorageReferent()
.getPointer());
}
return true;
case swift::TypeKind::WeakStorage: {
strategy = NonTriviallyManagedReferenceStrategy::eWeak;
if (underlying_type)
*underlying_type = CompilerType(
ast, swift_can_type->getReferenceStorageReferent()
.getPointer());
}
return true;
}
}
return false;
}
CompilerType SwiftASTContext::GetChildCompilerTypeAtIndex(
void *type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
bool ignore_array_bounds, std::string &child_name,
uint32_t &child_byte_size, int32_t &child_byte_offset,
uint32_t &child_bitfield_bit_size, uint32_t &child_bitfield_bit_offset,
bool &child_is_base_class, bool &child_is_deref_of_parent,
ValueObject *valobj, uint64_t &language_flags) {
VALID_OR_RETURN(CompilerType());
if (!type)
return CompilerType();
language_flags = 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetChildCompilerTypeAtIndex(
exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
ignore_array_bounds, child_name, child_byte_size, child_byte_offset,
child_bitfield_bit_size, child_bitfield_bit_offset,
child_is_base_class, child_is_deref_of_parent, valobj,
language_flags);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info &&
idx < cached_enum_info->GetNumElementsWithPayload()) {
const SwiftEnumDescriptor::ElementInfo *element_info =
cached_enum_info->GetElementWithPayloadAtIndex(idx);
child_name.assign(element_info->name.GetCString());
child_byte_size = element_info->payload_type.GetByteSize(
exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL);
child_byte_offset = 0;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
child_is_base_class = false;
child_is_deref_of_parent = false;
if (element_info->is_indirect) {
language_flags |= LanguageFlags::eIsIndirectEnumCase;
return CompilerType(GetASTContext(),
GetASTContext()->TheRawPointerType.getPointer());
} else
return element_info->payload_type;
}
} break;
case swift::TypeKind::Tuple: {
auto tuple_type = cast<swift::TupleType>(swift_can_type);
if (idx >= tuple_type->getNumElements()) break;
const auto &child = tuple_type->getElement(idx);
// Format the integer.
llvm::SmallString<16> printed_idx;
llvm::raw_svector_ostream(printed_idx) << idx;
CompilerType child_type(GetASTContext(), child.getType().getPointer());
auto exe_ctx_scope =
exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL;
child_name = GetTupleElementName(tuple_type, idx, printed_idx);
child_byte_size = child_type.GetByteSize(exe_ctx_scope);
child_is_base_class = false;
child_is_deref_of_parent = false;
CompilerType compiler_type(GetASTContext(), GetSwiftType(type));
child_byte_offset =
GetInstanceVariableOffset(valobj, exe_ctx, compiler_type,
printed_idx.c_str(), child_type);
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
return child_type;
}
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass: {
auto class_decl = swift_can_type->getClassOrBoundGenericClass();
// Child 0 is the superclass, if there is one.
if (class_decl->hasSuperclass()) {
if (idx == 0) {
swift::Type superclass_swift_type = swift_can_type->getSuperclass();
CompilerType superclass_type(GetASTContext(),
superclass_swift_type.getPointer());
child_name = GetSuperclassName(superclass_type);
auto exe_ctx_scope =
exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL;
child_byte_size = superclass_type.GetByteSize(exe_ctx_scope);
child_is_base_class = true;
child_is_deref_of_parent = false;
child_byte_offset = 0;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
language_flags |= LanguageFlags::eIgnoreInstancePointerness;
return superclass_type;
}
// Adjust the index to refer into the stored properties.
--idx;
}
LLVM_FALLTHROUGH;
}
case swift::TypeKind::Struct:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
auto stored_properties = GetStoredProperties(nominal);
if (idx >= stored_properties.size()) break;
// Find the stored property with this index.
auto property = stored_properties[idx];
swift::Type child_swift_type = swift_can_type->getTypeOfMember(
nominal->getModuleContext(), property, nullptr);
CompilerType child_type(GetASTContext(), child_swift_type.getPointer());
auto exe_ctx_scope =
exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL;
child_name = property->getBaseName().userFacingName();
child_byte_size = child_type.GetByteSize(exe_ctx_scope);
child_is_base_class = false;
child_is_deref_of_parent = false;
CompilerType compiler_type(GetASTContext(), GetSwiftType(type));
child_byte_offset =
GetInstanceVariableOffset(valobj, exe_ctx, compiler_type,
child_name.c_str(), child_type);
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
return child_type;
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(
CompilerType(GetASTContext(), GetSwiftType(type)), protocol_info))
break;
if (idx >= protocol_info.m_num_storage_words) break;
CompilerType compiler_type(GetASTContext(), GetSwiftType(type));
CompilerType child_type;
std::tie(child_type, child_name) =
GetExistentialTypeChild(GetASTContext(), compiler_type, protocol_info,
idx);
auto exe_ctx_scope =
exe_ctx ? exe_ctx->GetBestExecutionContextScope() : nullptr;
child_byte_size = child_type.GetByteSize(exe_ctx_scope);
child_byte_offset = idx * child_byte_size;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
child_is_base_class = false;
child_is_deref_of_parent = false;
return child_type;
}
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
break;
case swift::TypeKind::Module:
case swift::TypeKind::Archetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
break;
case swift::TypeKind::LValue:
if (idx < GetNumChildren(type, omit_empty_base_classes)) {
CompilerType pointee_clang_type(GetNonReferenceType(type));
Flags pointee_clang_type_flags(pointee_clang_type.GetTypeInfo());
const char *parent_name = valobj ? valobj->GetName().GetCString() : NULL;
if (parent_name) {
child_name.assign(1, '&');
child_name += parent_name;
}
// We have a pointer to a simple type
if (idx == 0) {
child_byte_size = pointee_clang_type.GetByteSize(
exe_ctx ? exe_ctx->GetBestExecutionContextScope() : NULL);
child_byte_offset = 0;
return pointee_clang_type;
}
}
break;
case swift::TypeKind::UnboundGeneric:
break;
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::InOut:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return CompilerType();
}
// Look for a child member (doesn't include base classes, but it does include
// their members) in the type hierarchy. Returns an index path into "clang_type"
// on how to reach the appropriate member.
//
// class A
// {
// public:
// int m_a;
// int m_b;
// };
//
// class B
// {
// };
//
// class C :
// public B,
// public A
// {
// };
//
// If we have a clang type that describes "class C", and we wanted to look for
// "m_b" in it:
//
// With omit_empty_base_classes == false we would get an integer array back
// with:
// { 1, 1 }
// The first index 1 is the child index for "class A" within class C.
// The second index 1 is the child index for "m_b" within class A.
//
// With omit_empty_base_classes == true we would get an integer array back with:
// { 0, 1 }
// The first index 0 is the child index for "class A" within class C (since
// class B doesn't have any members it doesn't count).
// The second index 1 is the child index for "m_b" within class A.
size_t SwiftASTContext::GetIndexOfChildMemberWithName(
void *type, const char *name, bool omit_empty_base_classes,
std::vector<uint32_t> &child_indexes) {
VALID_OR_RETURN(0);
if (type && name && name[0]) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.GetIndexOfChildMemberWithName(name, omit_empty_base_classes,
child_indexes);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info) {
ConstString const_name(name);
const size_t num_sized_elements =
cached_enum_info->GetNumElementsWithPayload();
for (size_t i = 0; i < num_sized_elements; ++i) {
if (cached_enum_info->GetElementWithPayloadAtIndex(i)->name ==
const_name) {
child_indexes.push_back(i);
return child_indexes.size();
}
}
}
} break;
case swift::TypeKind::Tuple: {
// For tuples only always look for the member by number first as a tuple
// element can be named, yet still be accessed by the number...
swift::TupleType *tuple_type = swift_can_type->castTo<swift::TupleType>();
uint32_t tuple_idx = StringConvert::ToUInt32(name, UINT32_MAX);
if (tuple_idx != UINT32_MAX) {
if (tuple_idx < tuple_type->getNumElements()) {
child_indexes.push_back(tuple_idx);
return child_indexes.size();
} else
return 0;
}
// Otherwise, perform lookup by name.
for (uint32_t tuple_idx : swift::range(tuple_type->getNumElements())) {
if (tuple_type->getElement(tuple_idx).getName().str() == name) {
child_indexes.push_back(tuple_idx);
return child_indexes.size();
}
}
return 0;
}
case swift::TypeKind::Struct:
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
auto stored_properties = GetStoredProperties(nominal);
auto class_decl = llvm::dyn_cast<swift::ClassDecl>(nominal);
// Search the stored properties.
for (unsigned idx : indices(stored_properties)) {
auto property = stored_properties[idx];
if (property->getBaseName().userFacingName() == name) {
// We found it!
// If we have a superclass, adjust the index accordingly.
if (class_decl && class_decl->hasSuperclass())
++idx;
child_indexes.push_back(idx);
return child_indexes.size();
}
}
// Search the superclass, if there is one.
if (class_decl && class_decl->hasSuperclass()) {
// Push index zero for the base class
child_indexes.push_back(0);
// Look in the superclass.
swift::Type superclass_swift_type = swift_can_type->getSuperclass();
CompilerType superclass_type(GetASTContext(),
superclass_swift_type.getPointer());
if (superclass_type.GetIndexOfChildMemberWithName(
name, omit_empty_base_classes, child_indexes))
return child_indexes.size();
// We didn't find a stored property matching "name" in our
// superclass, pop the superclass zero index that
// we pushed on above.
child_indexes.pop_back();
}
} break;
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(CompilerType(GetASTContext(),
GetSwiftType(type)), protocol_info))
break;
CompilerType compiler_type(GetASTContext(), GetSwiftType(type));
for (unsigned idx : swift::range(protocol_info.m_num_storage_words)) {
CompilerType child_type;
std::string child_name;
std::tie(child_type, child_name) =
GetExistentialTypeChild(GetASTContext(), compiler_type,
protocol_info, idx);
if (name == child_name) {
child_indexes.push_back(idx);
return child_indexes.size();
}
}
} break;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
break;
case swift::TypeKind::Module:
case swift::TypeKind::Archetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
break;
case swift::TypeKind::InOut:
case swift::TypeKind::LValue: {
CompilerType pointee_clang_type(GetNonReferenceType(type));
if (pointee_clang_type.IsAggregateType()) {
return pointee_clang_type.GetIndexOfChildMemberWithName(
name, omit_empty_base_classes, child_indexes);
}
} break;
case swift::TypeKind::UnboundGeneric:
break;
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
}
return 0;
}
// Get the index of the child of "clang_type" whose name matches. This function
// doesn't descend into the children, but only looks one level deep and name
// matches can include base class names.
uint32_t
SwiftASTContext::GetIndexOfChildWithName(void *type, const char *name,
bool omit_empty_base_classes) {
VALID_OR_RETURN(UINT32_MAX);
std::vector<uint32_t> child_indexes;
size_t num_child_indexes =
GetIndexOfChildMemberWithName(type, name, omit_empty_base_classes,
child_indexes);
return num_child_indexes == 1 ? child_indexes.front() : UINT32_MAX;
}
size_t SwiftASTContext::GetNumTemplateArguments(void *type) {
if (!type)
return 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::UnboundGeneric: {
swift::UnboundGenericType *unbound_generic_type =
swift_can_type->castTo<swift::UnboundGenericType>();
auto *nominal_type_decl = unbound_generic_type->getDecl();
swift::GenericParamList *generic_param_list =
nominal_type_decl->getGenericParams();
return generic_param_list->getParams().size();
} break;
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::BoundGenericEnum: {
swift::BoundGenericType *bound_generic_type =
swift_can_type->castTo<swift::BoundGenericType>();
return bound_generic_type->getGenericArgs().size();
}
default:
break;
}
return 0;
}
bool SwiftASTContext::GetSelectedEnumCase(const CompilerType &type,
const DataExtractor &data,
ConstString *name, bool *has_payload,
CompilerType *payload,
bool *is_indirect) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
default:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info =
ast->GetCachedEnumInfo(swift_can_type.getPointer());
if (cached_enum_info) {
auto enum_elem_info = cached_enum_info->GetElementFromData(data);
if (enum_elem_info) {
if (name)
*name = enum_elem_info->name;
if (has_payload)
*has_payload = enum_elem_info->has_payload;
if (payload)
*payload = enum_elem_info->payload_type;
if (is_indirect)
*is_indirect = enum_elem_info->is_indirect;
return true;
}
}
} break;
}
}
return false;
}
lldb::GenericKind SwiftASTContext::GetGenericArgumentKind(void *type,
size_t idx) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (auto *unbound_generic_type =
swift_can_type->getAs<swift::UnboundGenericType>())
return eUnboundGenericKindType;
if (auto *bound_generic_type =
swift_can_type->getAs<swift::BoundGenericType>())
if (idx < bound_generic_type->getGenericArgs().size())
return eBoundGenericKindType;
}
return eNullGenericKindType;
}
CompilerType SwiftASTContext::GetBoundGenericType(void *type, size_t idx) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (auto *bound_generic_type =
swift_can_type->getAs<swift::BoundGenericType>())
if (idx < bound_generic_type->getGenericArgs().size())
return CompilerType(
GetASTContext(),
bound_generic_type->getGenericArgs()[idx].getPointer());
}
return CompilerType();
}
CompilerType SwiftASTContext::GetUnboundGenericType(void *type, size_t idx) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (auto *unbound_generic_type =
swift_can_type->getAs<swift::UnboundGenericType>()) {
auto *nominal_type_decl = unbound_generic_type->getDecl();
swift::GenericSignature *generic_sig =
nominal_type_decl->getGenericSignature();
auto depTy = generic_sig->getGenericParams()[idx];
return CompilerType(GetASTContext(),
nominal_type_decl->mapTypeIntoContext(depTy)
->castTo<swift::ArchetypeType>());
}
}
return CompilerType();
}
CompilerType SwiftASTContext::GetGenericArgumentType(void *type, size_t idx) {
VALID_OR_RETURN(CompilerType());
switch (GetGenericArgumentKind(type, idx)) {
case eBoundGenericKindType:
return GetBoundGenericType(type, idx);
case eUnboundGenericKindType:
return GetUnboundGenericType(type, idx);
default:
break;
}
return CompilerType();
}
CompilerType SwiftASTContext::GetTypeForFormatters(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::Type swift_type(GetSwiftType(type));
return CompilerType(GetASTContext(), swift_type);
}
return CompilerType();
}
LazyBool SwiftASTContext::ShouldPrintAsOneLiner(void *type,
ValueObject *valobj) {
if (type) {
CompilerType can_compiler_type(GetCanonicalType(type));
if (IsImportedType(can_compiler_type, nullptr))
return eLazyBoolNo;
}
if (valobj) {
if (valobj->IsBaseClass())
return eLazyBoolNo;
if ((valobj->GetLanguageFlags() & LanguageFlags::eIsIndirectEnumCase) ==
LanguageFlags::eIsIndirectEnumCase)
return eLazyBoolNo;
}
return eLazyBoolCalculate;
}
bool SwiftASTContext::IsMeaninglessWithoutDynamicResolution(void *type) {
// ((swift::TypeBase*)type)->dump();
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
return swift_can_type->hasTypeParameter();
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::GenericTypeParam:
return true;
default:
return false;
}
}
return false;
}
//----------------------------------------------------------------------
// Dumping types
//----------------------------------------------------------------------
#define DEPTH_INCREMENT 2
void SwiftASTContext::DumpValue(
void *type, ExecutionContext *exe_ctx, Stream *s, lldb::Format format,
const lldb_private::DataExtractor &data, lldb::offset_t data_byte_offset,
size_t data_byte_size, uint32_t bitfield_bit_size,
uint32_t bitfield_bit_offset, bool show_types, bool show_summary,
bool verbose, uint32_t depth) {}
bool SwiftASTContext::DumpTypeValue(
void *type, Stream *s, lldb::Format format,
const lldb_private::DataExtractor &data, lldb::offset_t byte_offset,
size_t byte_size, uint32_t bitfield_bit_size, uint32_t bitfield_bit_offset,
ExecutionContextScope *exe_scope, bool is_base_class) {
VALID_OR_RETURN(false);
if (!type)
return false;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
break;
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
// If we have a class that is in a variable then it is a pointer,
// else if it is a base class, it has no value.
if (is_base_class)
break;
// Fall through to case below
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::Archetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
case swift::TypeKind::LValue: {
uint32_t item_count = 1;
// A few formats, we might need to modify our size and count for depending
// on how we are trying to display the value...
switch (format) {
default:
case eFormatBoolean:
case eFormatBinary:
case eFormatComplex:
case eFormatCString: // NULL terminated C strings
case eFormatDecimal:
case eFormatEnum:
case eFormatHex:
case eFormatHexUppercase:
case eFormatFloat:
case eFormatOctal:
case eFormatOSType:
case eFormatUnsigned:
case eFormatPointer:
case eFormatVectorOfChar:
case eFormatVectorOfSInt8:
case eFormatVectorOfUInt8:
case eFormatVectorOfSInt16:
case eFormatVectorOfUInt16:
case eFormatVectorOfSInt32:
case eFormatVectorOfUInt32:
case eFormatVectorOfSInt64:
case eFormatVectorOfUInt64:
case eFormatVectorOfFloat32:
case eFormatVectorOfFloat64:
case eFormatVectorOfUInt128:
break;
case eFormatAddressInfo:
if (byte_size == 0) {
byte_size = exe_scope->CalculateTarget()
->GetArchitecture()
.GetAddressByteSize();
item_count = 1;
}
break;
case eFormatChar:
case eFormatCharPrintable:
case eFormatCharArray:
case eFormatBytes:
case eFormatBytesWithASCII:
item_count = byte_size;
byte_size = 1;
break;
case eFormatUnicode16:
item_count = byte_size / 2;
byte_size = 2;
break;
case eFormatUnicode32:
item_count = byte_size / 4;
byte_size = 4;
break;
}
return DumpDataExtractor(data, s, byte_offset, format, byte_size, item_count, UINT32_MAX,
LLDB_INVALID_ADDRESS, bitfield_bit_size,
bitfield_bit_offset, exe_scope);
} break;
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::Tuple:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(GetASTContext(),
swift_can_type->getReferenceStorageReferent())
.DumpTypeValue(s, format, data, byte_offset, byte_size,
bitfield_bit_size, bitfield_bit_offset, exe_scope,
is_base_class);
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info) {
auto enum_elem_info = cached_enum_info->GetElementFromData(data);
if (enum_elem_info)
s->Printf("%s", enum_elem_info->name.GetCString());
else {
lldb::offset_t ptr = 0;
if (data.GetByteSize())
s->Printf("<invalid> (0x%" PRIx8 ")", data.GetU8(&ptr));
else
s->Printf("<empty>");
}
return true;
} else
s->Printf("<unknown type>");
} break;
case swift::TypeKind::Struct:
case swift::TypeKind::Protocol:
return false;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype: {
return DumpDataExtractor(data, s, byte_offset, eFormatPointer, byte_size, 1, UINT32_MAX,
LLDB_INVALID_ADDRESS, bitfield_bit_size,
bitfield_bit_offset, exe_scope);
} break;
case swift::TypeKind::Module:
case swift::TypeKind::ProtocolComposition:
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::InOut:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::NameAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
}
return 0;
}
bool SwiftASTContext::IsImportedType(const CompilerType &type,
CompilerType *original_type) {
bool success = false;
if (llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
do {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::NominalType *nominal_type =
swift_can_type->getAs<swift::NominalType>();
if (!nominal_type)
break;
swift::NominalTypeDecl *nominal_type_decl = nominal_type->getDecl();
if (nominal_type_decl && nominal_type_decl->hasClangNode()) {
const clang::Decl *clang_decl = nominal_type_decl->getClangDecl();
if (!clang_decl)
break;
success = true;
if (!original_type)
break;
if (const clang::ObjCInterfaceDecl *objc_interface_decl =
llvm::dyn_cast<clang::ObjCInterfaceDecl>(
clang_decl)) // ObjCInterfaceDecl is not a TypeDecl
{
*original_type =
CompilerType(&objc_interface_decl->getASTContext(),
clang::QualType::getFromOpaquePtr(
objc_interface_decl->getTypeForDecl()));
} else if (const clang::TypeDecl *type_decl =
llvm::dyn_cast<clang::TypeDecl>(clang_decl)) {
*original_type = CompilerType(
&type_decl->getASTContext(),
clang::QualType::getFromOpaquePtr(type_decl->getTypeForDecl()));
} else // TODO: any more cases that we care about?
{
*original_type = CompilerType();
}
}
} while (0);
}
return success;
}
bool SwiftASTContext::IsImportedObjectiveCType(const CompilerType &type,
CompilerType *original_type) {
bool success = false;
if (llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
CompilerType local_original_type;
if (IsImportedType(type, &local_original_type)) {
if (local_original_type.IsValid()) {
ClangASTContext *clang_ast = llvm::dyn_cast_or_null<ClangASTContext>(
local_original_type.GetTypeSystem());
if (clang_ast &&
clang_ast->IsObjCObjectOrInterfaceType(local_original_type)) {
if (original_type)
*original_type = local_original_type;
success = true;
}
}
}
}
return success;
}
void SwiftASTContext::DumpSummary(void *type, ExecutionContext *exe_ctx,
Stream *s,
const lldb_private::DataExtractor &data,
lldb::offset_t data_byte_offset,
size_t data_byte_size) {}
size_t SwiftASTContext::ConvertStringToFloatValue(void *type, const char *s,
uint8_t *dst,
size_t dst_size) {
return 0;
}
void SwiftASTContext::DumpTypeDescription(void *type) {
StreamFile s(stdout, false);
DumpTypeDescription(type, &s);
}
void SwiftASTContext::DumpTypeDescription(void *type, Stream *s) {
DumpTypeDescription(type, s, false, true);
}
void SwiftASTContext::DumpTypeDescription(void *type,
bool print_help_if_available,
bool print_extensions_if_available) {
StreamFile s(stdout, false);
DumpTypeDescription(type, &s, print_help_if_available,
print_extensions_if_available);
}
static void PrintSwiftNominalType(swift::NominalTypeDecl *nominal_type_decl,
Stream *s, bool print_help_if_available,
bool print_extensions_if_available) {
if (nominal_type_decl && s) {
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
nominal_type_decl->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
if (print_extensions_if_available) {
for (auto ext : nominal_type_decl->getExtensions()) {
if (ext) {
buffer.clear();
llvm::raw_string_ostream ext_ostream(buffer);
ext->print(ext_ostream, print_options);
ext_ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
}
}
}
}
}
void SwiftASTContext::DumpTypeDescription(void *type, Stream *s,
bool print_help_if_available,
bool print_extensions_if_available) {
llvm::SmallVector<char, 1024> buf;
llvm::raw_svector_ostream llvm_ostrm(buf);
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
switch (swift_can_type->getKind()) {
case swift::TypeKind::Module: {
swift::ModuleType *module_type =
swift_can_type->castTo<swift::ModuleType>();
swift::ModuleDecl *module = module_type->getModule();
llvm::SmallVector<swift::Decl *, 10> decls;
module->getDisplayDecls(decls);
for (swift::Decl *decl : decls) {
swift::DeclKind kind = decl->getKind();
if (kind >= swift::DeclKind::First_TypeDecl &&
kind <= swift::DeclKind::Last_TypeDecl) {
swift::TypeDecl *type_decl =
llvm::dyn_cast_or_null<swift::TypeDecl>(decl);
if (type_decl) {
CompilerType clang_type(&module->getASTContext(),
type_decl->getDeclaredInterfaceType().getPointer());
if (clang_type) {
Flags clang_type_flags(clang_type.GetTypeInfo());
DumpTypeDescription(clang_type.GetOpaqueQualType(), s,
print_help_if_available,
print_extensions_if_available);
}
}
} else if (kind == swift::DeclKind::Func ||
kind == swift::DeclKind::Var) {
std::string buffer;
llvm::raw_string_ostream stream(buffer);
decl->print(stream,
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
stream.flush();
s->Printf("%s\n", buffer.c_str());
} else if (kind == swift::DeclKind::Import) {
swift::ImportDecl *import_decl =
llvm::dyn_cast_or_null<swift::ImportDecl>(decl);
if (import_decl) {
switch (import_decl->getImportKind()) {
case swift::ImportKind::Module: {
swift::ModuleDecl *imported_module = import_decl->getModule();
if (imported_module) {
s->Printf("import %s\n", imported_module->getName().get());
}
} break;
default: {
for (swift::Decl *imported_decl : import_decl->getDecls()) {
// all of the non-module things you can import should be a
// ValueDecl
if (swift::ValueDecl *imported_value_decl =
llvm::dyn_cast_or_null<swift::ValueDecl>(
imported_decl)) {
if (swift::TypeBase *decl_type =
imported_value_decl->getInterfaceType().getPointer()) {
DumpTypeDescription(decl_type, s,
print_help_if_available,
print_extensions_if_available);
}
}
}
} break;
}
}
}
}
break;
}
case swift::TypeKind::Metatype: {
s->PutCString("metatype ");
swift::MetatypeType *metatype_type =
swift_can_type->castTo<swift::MetatypeType>();
DumpTypeDescription(metatype_type->getInstanceType().getPointer(),
print_help_if_available,
print_extensions_if_available);
} break;
case swift::TypeKind::UnboundGeneric: {
swift::UnboundGenericType *unbound_generic_type =
swift_can_type->castTo<swift::UnboundGenericType>();
auto nominal_type_decl = llvm::dyn_cast<swift::NominalTypeDecl>(
unbound_generic_type->getDecl());
if (nominal_type_decl) {
PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available,
print_extensions_if_available);
}
} break;
case swift::TypeKind::GenericFunction:
case swift::TypeKind::Function: {
swift::AnyFunctionType *any_function_type =
swift_can_type->castTo<swift::AnyFunctionType>();
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
any_function_type->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
} break;
case swift::TypeKind::Tuple: {
swift::TupleType *tuple_type = swift_can_type->castTo<swift::TupleType>();
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
tuple_type->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
} break;
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericEnum:
case swift::TypeKind::BoundGenericStruct: {
swift::BoundGenericType *bound_generic_type =
swift_can_type->castTo<swift::BoundGenericType>();
swift::NominalTypeDecl *nominal_type_decl =
bound_generic_type->getDecl();
PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available,
print_extensions_if_available);
} break;
case swift::TypeKind::BuiltinInteger: {
swift::BuiltinIntegerType *builtin_integer_type =
swift_can_type->castTo<swift::BuiltinIntegerType>();
s->Printf("builtin integer type of width %u bits\n",
builtin_integer_type->getWidth().getGreatestWidth());
break;
}
case swift::TypeKind::BuiltinFloat: {
swift::BuiltinFloatType *builtin_float_type =
swift_can_type->castTo<swift::BuiltinFloatType>();
s->Printf("builtin floating-point type of width %u bits\n",
builtin_float_type->getBitWidth());
break;
}
case swift::TypeKind::ProtocolComposition: {
swift::ProtocolCompositionType *protocol_composition_type =
swift_can_type->castTo<swift::ProtocolCompositionType>();
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
protocol_composition_type->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
break;
}
default: {
swift::NominalType *nominal_type =
llvm::dyn_cast_or_null<swift::NominalType>(
swift_can_type.getPointer());
if (nominal_type) {
swift::NominalTypeDecl *nominal_type_decl = nominal_type->getDecl();
PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available,
print_extensions_if_available);
}
} break;
}
if (buf.size() > 0) {
s->Write(buf.data(), buf.size());
}
}
}
TypeSP SwiftASTContext::GetCachedType(const ConstString &mangled) {
TypeSP type_sp;
if (m_swift_type_map.Lookup(mangled.GetCString(), type_sp))
return type_sp;
else
return TypeSP();
}
void SwiftASTContext::SetCachedType(const ConstString &mangled,
const TypeSP &type_sp) {
m_swift_type_map.Insert(mangled.GetCString(), type_sp);
}
DWARFASTParser *SwiftASTContext::GetDWARFParser() {
if (!m_dwarf_ast_parser_ap)
m_dwarf_ast_parser_ap.reset(new DWARFASTParserSwift(*this));
return m_dwarf_ast_parser_ap.get();
}
std::vector<lldb::DataBufferSP> &
SwiftASTContext::GetASTVectorForModule(const Module *module) {
return m_ast_file_data_map[const_cast<Module *>(module)];
}
SwiftASTContextForExpressions::SwiftASTContextForExpressions(Target &target)
: SwiftASTContext(target.GetArchitecture().GetTriple().getTriple().c_str(),
&target),
m_persistent_state_up(new SwiftPersistentExpressionState) {}
UserExpression *SwiftASTContextForExpressions::GetUserExpression(
llvm::StringRef expr, llvm::StringRef prefix, lldb::LanguageType language,
Expression::ResultType desired_type,
const EvaluateExpressionOptions &options) {
TargetSP target_sp = m_target_wp.lock();
if (!target_sp)
return nullptr;
return new SwiftUserExpression(*target_sp.get(), expr, prefix, language,
desired_type, options);
}
PersistentExpressionState *
SwiftASTContextForExpressions::GetPersistentExpressionState() {
return m_persistent_state_up.get();
}
| 1 | 17,075 | `if (llvm::dyn_cast_or_null<swift::WeakStorageType>(GetSwiftType(compiler_type).getPointer())` ? | apple-swift-lldb | cpp |
@@ -144,13 +144,13 @@ func (e *deployExecutor) generateCanaryManifests(manifests []provider.Manifest,
// Find config map manifests and duplicate them for CANARY variant.
configMaps := findConfigMapManifests(manifests)
- configMaps = duplicateManifests(configMaps, suffix)
- canaryManifests = append(canaryManifests, configMaps...)
+ canaryConfigMaps := duplicateManifests(configMaps, suffix)
+ canaryManifests = append(canaryManifests, canaryConfigMaps...)
// Find secret manifests and duplicate them for CANARY variant.
secrets := findSecretManifests(manifests)
- secrets = duplicateManifests(secrets, suffix)
- canaryManifests = append(canaryManifests, secrets...)
+ canarySecrets := duplicateManifests(secrets, suffix)
+ canaryManifests = append(canaryManifests, canarySecrets...)
// Generate new workload manifests for CANARY variant.
// The generated ones will mount to the new ConfigMaps and Secrets. | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"fmt"
"strings"
provider "github.com/pipe-cd/pipe/pkg/app/piped/cloudprovider/kubernetes"
"github.com/pipe-cd/pipe/pkg/app/piped/executor"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
canaryVariant = "canary"
addedCanaryResourcesMetadataKey = "canary-resources"
)
func (e *deployExecutor) ensureCanaryRollout(ctx context.Context) model.StageStatus {
options := e.StageConfig.K8sCanaryRolloutStageOptions
if options == nil {
e.LogPersister.Errorf("Malformed configuration for stage %s", e.Stage.Name)
return model.StageStatus_STAGE_FAILURE
}
// Load the manifests at the triggered commit.
e.LogPersister.Infof("Loading manifests at commit %s for handling", e.commit)
manifests, err := loadManifests(
ctx,
e.Deployment.ApplicationId,
e.commit,
e.AppManifestsCache,
e.provider,
e.Logger,
)
if err != nil {
e.LogPersister.Errorf("Failed while loading manifests (%v)", err)
return model.StageStatus_STAGE_FAILURE
}
e.LogPersister.Successf("Successfully loaded %d manifests", len(manifests))
if len(manifests) == 0 {
e.LogPersister.Error("This application has no Kubernetes manifests to handle")
return model.StageStatus_STAGE_FAILURE
}
// Find and generate workload & service manifests for CANARY variant.
canaryManifests, err := e.generateCanaryManifests(manifests, *options)
if err != nil {
e.LogPersister.Errorf("Unable to generate manifests for CANARY variant (%v)", err)
return model.StageStatus_STAGE_FAILURE
}
// Add builtin annotations for tracking application live state.
addBuiltinAnnontations(
canaryManifests,
canaryVariant,
e.commit,
e.PipedConfig.PipedID,
e.Deployment.ApplicationId,
)
// Store added resource keys into metadata for cleaning later.
addedResources := make([]string, 0, len(canaryManifests))
for _, m := range canaryManifests {
addedResources = append(addedResources, m.Key.String())
}
metadata := strings.Join(addedResources, ",")
err = e.MetadataStore.Set(ctx, addedCanaryResourcesMetadataKey, metadata)
if err != nil {
e.LogPersister.Errorf("Unable to save deployment metadata (%v)", err)
return model.StageStatus_STAGE_FAILURE
}
// Start rolling out the resources for CANARY variant.
e.LogPersister.Info("Start rolling out CANARY variant...")
if err := applyManifests(ctx, e.provider, canaryManifests, e.deployCfg.Input.Namespace, e.LogPersister); err != nil {
return model.StageStatus_STAGE_FAILURE
}
e.LogPersister.Success("Successfully rolled out CANARY variant")
return model.StageStatus_STAGE_SUCCESS
}
func (e *deployExecutor) ensureCanaryClean(ctx context.Context) model.StageStatus {
value, ok := e.MetadataStore.Get(addedCanaryResourcesMetadataKey)
if !ok {
e.LogPersister.Error("Unable to determine the applied CANARY resources")
return model.StageStatus_STAGE_FAILURE
}
resources := strings.Split(value, ",")
if err := removeCanaryResources(ctx, e.provider, resources, e.LogPersister); err != nil {
e.LogPersister.Errorf("Unable to remove canary resources: %v", err)
return model.StageStatus_STAGE_FAILURE
}
return model.StageStatus_STAGE_SUCCESS
}
func (e *deployExecutor) generateCanaryManifests(manifests []provider.Manifest, opts config.K8sCanaryRolloutStageOptions) ([]provider.Manifest, error) {
suffix := canaryVariant
if opts.Suffix != "" {
suffix = opts.Suffix
}
workloads := findWorkloadManifests(manifests, e.deployCfg.Workloads)
if len(workloads) == 0 {
return nil, fmt.Errorf("unable to find any workload manifests for CANARY variant")
}
var canaryManifests []provider.Manifest
// Find service manifests and duplicate them for CANARY variant.
if opts.CreateService {
serviceName := e.deployCfg.Service.Name
services := findManifests(provider.KindService, serviceName, manifests)
if len(services) == 0 {
return nil, fmt.Errorf("unable to find any service for name=%q", serviceName)
}
// Because the loaded manifests are read-only
// so we duplicate them to avoid updating the shared manifests data in cache.
services = duplicateManifests(services, "")
generatedServices, err := generateVariantServiceManifests(services, canaryVariant, suffix)
if err != nil {
return nil, err
}
canaryManifests = append(canaryManifests, generatedServices...)
}
// Find config map manifests and duplicate them for CANARY variant.
configMaps := findConfigMapManifests(manifests)
configMaps = duplicateManifests(configMaps, suffix)
canaryManifests = append(canaryManifests, configMaps...)
// Find secret manifests and duplicate them for CANARY variant.
secrets := findSecretManifests(manifests)
secrets = duplicateManifests(secrets, suffix)
canaryManifests = append(canaryManifests, secrets...)
// Generate new workload manifests for CANARY variant.
// The generated ones will mount to the new ConfigMaps and Secrets.
replicasCalculator := func(cur *int32) int32 {
if cur == nil {
return 1
}
num := opts.Replicas.Calculate(int(*cur), 1)
return int32(num)
}
// We don't need to duplicate the workload manifests
// because generateVariantWorkloadManifests function is already making a duplicate while decoding.
// workloads = duplicateManifests(workloads, suffix)
generatedWorkloads, err := generateVariantWorkloadManifests(workloads, configMaps, secrets, canaryVariant, suffix, replicasCalculator)
if err != nil {
return nil, err
}
canaryManifests = append(canaryManifests, generatedWorkloads...)
return canaryManifests, nil
}
func removeCanaryResources(ctx context.Context, applier provider.Applier, resources []string, lp executor.LogPersister) error {
if len(resources) == 0 {
return nil
}
var (
workloadKeys = make([]provider.ResourceKey, 0)
serviceKeys = make([]provider.ResourceKey, 0)
)
for _, r := range resources {
key, err := provider.DecodeResourceKey(r)
if err != nil {
lp.Errorf("Had an error while decoding CANARY resource key: %s, %v", r, err)
continue
}
if key.IsWorkload() {
workloadKeys = append(workloadKeys, key)
} else {
serviceKeys = append(serviceKeys, key)
}
}
// We delete the service first to close all incoming connections.
lp.Info("Starting finding and deleting service resources of CANARY variant")
if err := deleteResources(ctx, applier, serviceKeys, lp); err != nil {
return err
}
// Next, delete all workloads.
lp.Info("Starting finding and deleting workload resources of CANARY variant")
if err := deleteResources(ctx, applier, workloadKeys, lp); err != nil {
return err
}
return nil
}
| 1 | 17,332 | The `generateVariantWorkloadManifests` function below relies on the config manifests specified in Git, but it was passed with the newly created canary's manifests where their name was updated. | pipe-cd-pipe | go |
@@ -19,9 +19,10 @@ use Symfony\Component\DependencyInjection\Loader\YamlFileLoader;
class ErgonodeAccountExtension extends Extension implements PrependExtensionInterface
{
/**
+ * @throws \Exception
+ *
* @param array $configs
*
- * @throws \Exception
*/
public function load(array $configs, ContainerBuilder $container): void
{ | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Account\Application\DependencyInjection;
use Nelmio\ApiDocBundle\NelmioApiDocBundle;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Extension\Extension;
use Symfony\Component\DependencyInjection\Extension\PrependExtensionInterface;
use Symfony\Component\DependencyInjection\Loader\YamlFileLoader;
class ErgonodeAccountExtension extends Extension implements PrependExtensionInterface
{
/**
* @param array $configs
*
* @throws \Exception
*/
public function load(array $configs, ContainerBuilder $container): void
{
$loader = new YamlFileLoader(
$container,
new FileLocator(__DIR__.'/../../Resources/config')
);
$loader->load('services.yml');
}
/**
* {@inheritDoc}
*/
public function prepend(ContainerBuilder $container): void
{
if (!in_array(NelmioApiDocBundle::class, $container->getParameter('kernel.bundles'), true)) {
return;
}
$loader = new YamlFileLoader($container, new FileLocator(__DIR__.'/../../Resources/config'));
$loader->load('nelmio_api_doc.yaml');
}
}
| 1 | 9,204 | are this docs neccesary ? | ergonode-backend | php |
@@ -46,8 +46,8 @@ import java.util.List;
public class HttpCommandProcessor implements CommandProcessor {
private String pathToServlet;
- private String browserStartCommand;
- private String browserURL;
+ private final String browserStartCommand;
+ private final String browserURL;
private String sessionId;
private String extensionJs;
private String rcServerLocation; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.thoughtworks.selenium;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.openqa.selenium.net.Urls;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.NumberFormat;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Sends commands and retrieves results via HTTP.
*
* @author Ben Griffiths, Jez Humble
* @deprecated The RC interface will be removed in Selenium 3.0. Please migrate to using WebDriver.
*/
@Deprecated
public class HttpCommandProcessor implements CommandProcessor {
private String pathToServlet;
private String browserStartCommand;
private String browserURL;
private String sessionId;
private String extensionJs;
private String rcServerLocation;
/**
* Specifies a server host/port, a command to launch the browser, and a starting URL for the
* browser.
*
* @param serverHost - the host name on which the Selenium Server resides
* @param serverPort - the port on which the Selenium Server is listening
* @param browserStartCommand - the command string used to launch the browser, e.g. "*firefox" or
* "c:\\program files\\internet explorer\\iexplore.exe"
* @param browserURL - the starting URL including just a domain name. We'll start the browser
* pointing at the Selenium resources on this URL,
*/
public HttpCommandProcessor(String serverHost, int serverPort, String browserStartCommand,
String browserURL) {
rcServerLocation = serverHost +
":" + Integer.toString(serverPort);
this.pathToServlet = "http://" + rcServerLocation + "/selenium-server/driver/";
this.browserStartCommand = browserStartCommand;
this.browserURL = browserURL;
this.extensionJs = "";
}
/**
* Specifies the URL to the CommandBridge servlet, a command to launch the browser, and a starting
* URL for the browser.
*
* @param pathToServlet - the URL of the Selenium Server Driver, e.g.
* "http://localhost:4444/selenium-server/driver/" (don't forget the final slash!)
* @param browserStartCommand - the command string used to launch the browser, e.g. "*firefox" or
* "c:\\program files\\internet explorer\\iexplore.exe"
* @param browserURL - the starting URL including just a domain name. We'll start the browser
* pointing at the Selenium resources on this URL,
*/
public HttpCommandProcessor(String pathToServlet, String browserStartCommand, String browserURL) {
this.pathToServlet = pathToServlet;
this.browserStartCommand = browserStartCommand;
this.browserURL = browserURL;
this.extensionJs = "";
}
@Override
public String getRemoteControlServerLocation() {
return rcServerLocation;
}
@Override
public String doCommand(String commandName, String[] args) {
DefaultRemoteCommand command = new DefaultRemoteCommand(commandName, args);
String result = executeCommandOnServlet(command.getCommandURLString());
if (result == null) {
throw new NullPointerException("Selenium Bug! result must not be null");
}
if (!result.startsWith("OK")) {
return throwAssertionFailureExceptionOrError(result);
}
return result;
}
protected String throwAssertionFailureExceptionOrError(String message) {
throw new SeleniumException(message);
}
/** Sends the specified command string to the bridge servlet
* @param command command to execute
* @return response from the command execution
*/
public String executeCommandOnServlet(String command) {
try {
return getCommandResponseAsString(command);
} catch (IOException e) {
if (e instanceof ConnectException) {
throw new SeleniumException(e.getMessage(), e);
}
e.printStackTrace();
throw new UnsupportedOperationException("Catch body broken: IOException from " + command +
" -> " + e, e);
}
}
private String stringContentsOfInputStream(Reader rdr) throws IOException {
StringBuffer sb = new StringBuffer();
int c;
try {
while ((c = rdr.read()) != -1) {
sb.append((char) c);
}
return sb.toString();
} finally {
rdr.close();
}
}
// for testing
protected HttpURLConnection getHttpUrlConnection(URL urlForServlet) throws IOException {
return (HttpURLConnection) urlForServlet.openConnection();
}
// for testing
protected Writer getOutputStreamWriter(HttpURLConnection conn) throws IOException {
return new BufferedWriter(new OutputStreamWriter(conn.getOutputStream(), UTF_8));
}
// for testing
protected Reader getInputStreamReader(HttpURLConnection conn) throws IOException {
return new InputStreamReader(conn.getInputStream(), UTF_8);
}
// for testing
protected int getResponseCode(HttpURLConnection conn) throws IOException {
return conn.getResponseCode();
}
protected String getCommandResponseAsString(String command) throws IOException {
String responseString = null;
int responseCode = HttpURLConnection.HTTP_MOVED_PERM;
HttpURLConnection uc = null;
Writer wr = null;
Reader rdr = null;
while (responseCode == HttpURLConnection.HTTP_MOVED_PERM) {
URL result = new URL(pathToServlet);
String body = buildCommandBody(command);
try {
uc = getHttpUrlConnection(result);
uc.setRequestProperty("Content-Type", "application/x-www-form-urlencoded; charset=utf-8");
uc.setInstanceFollowRedirects(false);
uc.setDoOutput(true);
wr = getOutputStreamWriter(uc);
wr.write(body);
wr.flush();
responseCode = getResponseCode(uc);
if (responseCode == HttpURLConnection.HTTP_MOVED_PERM) {
pathToServlet = uc.getHeaderField("Location");
} else if (responseCode != HttpURLConnection.HTTP_OK) {
throwAssertionFailureExceptionOrError(uc.getResponseMessage() + " URL: " + result);
} else {
rdr = getInputStreamReader(uc);
responseString = stringContentsOfInputStream(rdr);
}
} finally {
closeResources(uc, wr, rdr);
}
}
return responseString;
}
protected void closeResources(HttpURLConnection conn, Writer wr, Reader rdr) {
try {
if (null != wr) {
wr.close();
}
} catch (IOException ioe) {
// ignore
}
try {
if (null != rdr) {
rdr.close();
}
} catch (IOException ioe) {
// ignore
}
if (null != conn) {
conn.disconnect();
}
}
private String buildCommandBody(String command) {
StringBuffer sb = new StringBuffer();
sb.append(command);
if (sessionId != null) {
sb.append("&sessionId=");
sb.append(Urls.urlEncode(sessionId));
}
return sb.toString();
}
/**
* This should be invoked before start().
*
* @param extensionJs the extra extension Javascript to include in this browser session.
*/
@Override
public void setExtensionJs(String extensionJs) {
this.extensionJs = extensionJs;
}
@Override
public void start() {
String result = getString("getNewBrowserSession",
new String[] {browserStartCommand, browserURL, extensionJs});
setSessionInProgress(result);
}
@Override
public void start(String optionsString) {
String result = getString("getNewBrowserSession",
new String[] {browserStartCommand, browserURL,
extensionJs, optionsString});
setSessionInProgress(result);
}
/**
* Wraps the version of start() that takes a String parameter, sending it the result of calling
* toString() on optionsObject, which will likely be a BrowserConfigurationOptions instance.
*
* @param optionsObject start options
*/
@Override
public void start(Object optionsObject) {
start(optionsObject.toString());
}
protected void setSessionInProgress(String result) {
sessionId = result;
}
@Override
public void stop() {
if (hasSessionInProgress()) {
doCommand("testComplete", new String[0]);
}
setSessionInProgress(null);
}
public boolean hasSessionInProgress() {
return null != sessionId;
}
@Override
public String getString(String commandName, String[] args) {
String result = doCommand(commandName, args);
if (result.length() >= "OK,".length()) {
return result.substring("OK,".length());
}
System.err.println("WARNING: getString(" + commandName + ") saw a bad result " + result);
return "";
}
@Override
public String[] getStringArray(String commandName, String[] args) {
String result = getString(commandName, args);
return parseCSV(result);
}
/**
* Convert backslash-escaped comma-delimited string into String array. As described in SRC-CDP
* spec section 5.2.1.2, these strings are comma-delimited, but commas can be escaped with a
* backslash "\". Backslashes can also be escaped as a double-backslash.
*
* @param input the unparsed string, e.g. "veni\, vidi\, vici,c:\\foo\\bar,c:\\I came\, I
* \\saw\\\, I conquered"
* @return the string array resulting from parsing this string
*/
public static String[] parseCSV(String input) {
List<String> output = new ArrayList<>();
StringBuffer sb = new StringBuffer();
for (int i = 0; i < input.length(); i++) {
char c = input.charAt(i);
switch (c) {
case ',':
output.add(sb.toString());
sb = new StringBuffer();
continue;
case '\\':
i++;
c = input.charAt(i);
// fall through to:
default:
sb.append(c);
}
}
output.add(sb.toString());
return output.toArray(new String[output.size()]);
}
@Override
public Number getNumber(String commandName, String[] args) {
String result = getString(commandName, args);
Number n;
try {
n = NumberFormat.getInstance().parse(result);
} catch (ParseException e) {
throw new RuntimeException(e);
}
if (n instanceof Long && n.intValue() == n.longValue()) {
// SRC-315 we should return Integers if possible
return Integer.valueOf(n.intValue());
}
return n;
}
@Override
public Number[] getNumberArray(String commandName, String[] args) {
String[] result = getStringArray(commandName, args);
Number[] n = new Number[result.length];
for (int i = 0; i < result.length; i++) {
try {
n[i] = NumberFormat.getInstance().parse(result[i]);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
return n;
}
@Override
public boolean getBoolean(String commandName, String[] args) {
String result = getString(commandName, args);
boolean b;
if ("true".equals(result)) {
b = true;
return b;
}
if ("false".equals(result)) {
b = false;
return b;
}
throw new RuntimeException("result was neither 'true' nor 'false': " + result);
}
@Override
public boolean[] getBooleanArray(String commandName, String[] args) {
String[] result = getStringArray(commandName, args);
boolean[] b = new boolean[result.length];
for (int i = 0; i < result.length; i++) {
if ("true".equals(result[i])) {
b[i] = true;
continue;
}
if ("false".equals(result[i])) {
b[i] = false;
continue;
}
throw new RuntimeException("result was neither 'true' nor 'false': " +
Arrays.toString(result));
}
return b;
}
}
| 1 | 19,387 | Can you please revert changes to files in the `thoughtworks` package? This is legacy code and we will eventually phase out RC. | SeleniumHQ-selenium | js |
@@ -13,6 +13,8 @@ void SoftmaxWithLossLayer<Dtype>::LayerSetUp(
LossLayer<Dtype>::LayerSetUp(bottom, top);
LayerParameter softmax_param(this->layer_param_);
softmax_param.set_type("Softmax");
+ // no loss weight for the Softmax internal layer.
+ softmax_param.clear_loss_weight();
softmax_layer_ = LayerRegistry<Dtype>::CreateLayer(softmax_param);
softmax_bottom_vec_.clear();
softmax_bottom_vec_.push_back(bottom[0]); | 1 | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
LayerParameter softmax_param(this->layer_param_);
softmax_param.set_type("Softmax");
softmax_layer_ = LayerRegistry<Dtype>::CreateLayer(softmax_param);
softmax_bottom_vec_.clear();
softmax_bottom_vec_.push_back(bottom[0]);
softmax_top_vec_.clear();
softmax_top_vec_.push_back(&prob_);
softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_);
has_ignore_label_ =
this->layer_param_.loss_param().has_ignore_label();
if (has_ignore_label_) {
ignore_label_ = this->layer_param_.loss_param().ignore_label();
}
if (!this->layer_param_.loss_param().has_normalization() &&
this->layer_param_.loss_param().has_normalize()) {
normalization_ = this->layer_param_.loss_param().normalize() ?
LossParameter_NormalizationMode_VALID :
LossParameter_NormalizationMode_BATCH_SIZE;
} else {
normalization_ = this->layer_param_.loss_param().normalization();
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_);
softmax_axis_ =
bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis());
outer_num_ = bottom[0]->count(0, softmax_axis_);
inner_num_ = bottom[0]->count(softmax_axis_ + 1);
CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count())
<< "Number of labels must match number of predictions; "
<< "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), "
<< "label count (number of labels) must be N*H*W, "
<< "with integer values in {0, 1, ..., C-1}.";
if (top.size() >= 2) {
// softmax output
top[1]->ReshapeLike(*bottom[0]);
}
}
template <typename Dtype>
Dtype SoftmaxWithLossLayer<Dtype>::get_normalizer(
LossParameter_NormalizationMode normalization_mode, int valid_count) {
Dtype normalizer;
switch (normalization_mode) {
case LossParameter_NormalizationMode_FULL:
normalizer = Dtype(outer_num_ * inner_num_);
break;
case LossParameter_NormalizationMode_VALID:
if (valid_count == -1) {
normalizer = Dtype(outer_num_ * inner_num_);
} else {
normalizer = Dtype(valid_count);
}
break;
case LossParameter_NormalizationMode_BATCH_SIZE:
normalizer = Dtype(outer_num_);
break;
case LossParameter_NormalizationMode_NONE:
normalizer = Dtype(1);
break;
default:
LOG(FATAL) << "Unknown normalization mode: "
<< LossParameter_NormalizationMode_Name(normalization_mode);
}
// Some users will have no labels for some examples in order to 'turn off' a
// particular loss in a multi-task setup. The max prevents NaNs in that case.
return std::max(Dtype(1.0), normalizer);
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.cpu_data();
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;
int count = 0;
Dtype loss = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; j++) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, prob_.shape(softmax_axis_));
loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
Dtype(FLT_MIN)));
++count;
}
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* prob_data = prob_.cpu_data();
caffe_copy(prob_.count(), prob_data, bottom_diff);
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;
int count = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
bottom_diff[i * dim + c * inner_num_ + j] = 0;
}
} else {
bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;
++count;
}
}
}
// Scale gradient
Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, count);
caffe_scal(prob_.count(), loss_weight, bottom_diff);
}
}
#ifdef CPU_ONLY
STUB_GPU(SoftmaxWithLossLayer);
#endif
INSTANTIATE_CLASS(SoftmaxWithLossLayer);
REGISTER_LAYER_CLASS(SoftmaxWithLoss);
} // namespace caffe
| 1 | 38,381 | This change suppose to resolve issue #2968. `loss_weight` should be of length 2, the second entry is ignored. | BVLC-caffe | cpp |
@@ -242,7 +242,17 @@ var filesToIgnore = map[string]bool{
func ignoreFile(filename string) bool {
_, base := path.Split(filename)
- return filesToIgnore[base] || strings.HasPrefix(base, "._")
+ if filesToIgnore[base] || strings.HasPrefix(base, "._") {
+ return true
+ }
+ // Treat the files to ignore as prefixes, since if they ever
+ // conflict they'll have the conflict suffix.
+ for prefix := range filesToIgnore {
+ if strings.HasPrefix(base, prefix) {
+ return true
+ }
+ }
+ return false
}
// processNotification adds the notification to the recomputer's | 1 | // Copyright 2018 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package kbfsedits
import (
"container/heap"
"encoding/json"
"fmt"
"path"
"sort"
"strings"
"sync"
"github.com/keybase/kbfs/kbfsmd"
)
const (
// The max number of edits needed for each writer.
maxEditsPerWriter = 10
maxWritersPerHistory = 10
)
type writerNotifications struct {
writerName string
notifications notificationsByRevision
}
// writersByRevision sorts sets of per-writer notifications in reverse
// order by the revision of the latest notification for each writer.
type writersByRevision []*writerNotifications
func (wbr writersByRevision) Len() int {
return len(wbr)
}
func (wbr writersByRevision) Less(i, j int) bool {
// Some revisions come before no revisions.
iHasZero := len(wbr[i].notifications) == 0
jHasZero := len(wbr[j].notifications) == 0
if jHasZero {
return iHasZero
} else if iHasZero {
return false
}
// Reverse sort, so latest revisions come first.
return wbr[i].notifications[0].Revision > wbr[j].notifications[0].Revision
}
func (wbr writersByRevision) Swap(i, j int) {
wbr[i], wbr[j] = wbr[j], wbr[i]
}
func (wbr *writersByRevision) Push(x interface{}) {
wn := x.(*writerNotifications)
*wbr = append(*wbr, wn)
}
func (wbr *writersByRevision) Pop() interface{} {
// The item to remove is the last item; heap has already swapped
// it to the end.
old := *wbr
n := len(old)
item := old[n-1]
*wbr = old[0 : n-1]
return item
}
// TlfHistory maintains a history of the last N file edits from each
// writer in the TLF.
//
// There will be two users of a TlfHistory instance:
//
// * One user (likely something outside of the kbfsedits package,
// e.g. libkbfs.folderBranchOps) will read notifications from the
// corresponding TLF and add them to this history. After adding a
// batch or several batches of messages, it should call
// `Recompute()`, and if some writers need more, earlier revisions,
// it should fetch more notifications for the indicated writer and
// repeat.
//
// * The other user (within the kbfsedits package) will collate the
// histories from multiple TlfHistory instances together using
// `getHistory()` from each one. It may also construct pretty
// versions of individual edit histories for a particular TLF.
type TlfHistory struct {
lock sync.RWMutex
byWriter map[string]*writerNotifications
unflushed *writerNotifications
computed bool
cachedHistory writersByRevision
cachedLoggedInUser string
}
// NewTlfHistory constructs a new TlfHistory instance.
func NewTlfHistory() *TlfHistory {
return &TlfHistory{
byWriter: make(map[string]*writerNotifications),
}
}
// AddNotifications takes in a set of messages in this TLF by
// `writer`, and adds them to the history. Once done adding messages,
// the caller should call `Recompute` to find out if more messages
// should be added for any particular writer.
func (th *TlfHistory) AddNotifications(
writerName string, messages []string) (err error) {
newEdits := make(notificationsByRevision, 0, len(messages))
// Unmarshal and sort the new messages.
for _, msg := range messages {
var revList []NotificationMessage
err := json.Unmarshal([]byte(msg), &revList)
if err != nil {
// The messages might be from a new version we don't
// understand, so swallow the error.
continue
}
for j := len(revList) - 1; j >= 0; j-- {
revMsg := revList[j]
if revMsg.Version != NotificationV2 {
// Ignore messages that are too new for us to understand.
continue
}
revMsg.numWithinRevision = j
newEdits = append(newEdits, revMsg)
}
}
th.lock.Lock()
defer th.lock.Unlock()
wn, existed := th.byWriter[writerName]
if !existed {
wn = &writerNotifications{writerName, nil}
}
oldLen := len(wn.notifications)
newEdits = append(newEdits, wn.notifications...)
sort.Sort(newEdits)
wn.notifications = newEdits.uniquify()
if len(wn.notifications) == oldLen {
// No new messages.
return nil
}
if !existed {
th.byWriter[writerName] = wn
}
// Invalidate the cached results.
th.computed = false
th.cachedLoggedInUser = ""
return nil
}
// AddUnflushedNotifications adds notifications to a special
// "unflushed" list that takes precedences over the regular
// notifications with revision numbers equal or greater to the minimum
// unflushed revision.
func (th *TlfHistory) AddUnflushedNotifications(
loggedInUser string, msgs []NotificationMessage) {
th.lock.Lock()
defer th.lock.Unlock()
if th.unflushed == nil {
th.unflushed = &writerNotifications{loggedInUser, nil}
}
if th.unflushed.writerName != loggedInUser {
panic(fmt.Sprintf("Logged-in user %s doesn't match unflushed user %s",
loggedInUser, th.unflushed.writerName))
}
newEdits := append(
notificationsByRevision(msgs), th.unflushed.notifications...)
sort.Sort(newEdits)
th.unflushed.notifications = newEdits.uniquify()
// Invalidate the cached results.
th.computed = false
th.cachedLoggedInUser = ""
}
// FlushRevision clears all any unflushed notifications with a
// revision equal or less than `rev`.
func (th *TlfHistory) FlushRevision(rev kbfsmd.Revision) {
th.lock.Lock()
defer th.lock.Unlock()
if th.unflushed == nil {
return
}
lastToKeep := len(th.unflushed.notifications) - 1
for ; lastToKeep >= 0; lastToKeep-- {
if th.unflushed.notifications[lastToKeep].Revision > rev {
break
}
}
if lastToKeep < len(th.unflushed.notifications)-1 {
th.unflushed.notifications = th.unflushed.notifications[:lastToKeep+1]
// Invalidate the cached results.
th.computed = false
th.cachedLoggedInUser = ""
}
}
// ClearAllUnflushed clears all unflushed notifications.
func (th *TlfHistory) ClearAllUnflushed() {
th.lock.Lock()
defer th.lock.Unlock()
if th.unflushed != nil {
// Invalidate the cached results.
th.computed = false
th.cachedLoggedInUser = ""
}
th.unflushed = nil
}
type fileEvent struct {
delete bool
newName string
}
type recomputer struct {
byWriter map[string]*writerNotifications
modifiedFiles map[string]map[string]bool // writer -> file -> bool
fileEvents map[string]fileEvent // currentName -> ultimate fate
numProcessed map[string]int // writer name -> num
minUnflushed kbfsmd.Revision
}
func newRecomputer() *recomputer {
return &recomputer{
byWriter: make(map[string]*writerNotifications),
modifiedFiles: make(map[string]map[string]bool),
fileEvents: make(map[string]fileEvent),
numProcessed: make(map[string]int),
minUnflushed: kbfsmd.RevisionUninitialized,
}
}
var filesToIgnore = map[string]bool{
".Trashes": true,
".fseventsd": true,
".DS_Store": true,
}
func ignoreFile(filename string) bool {
_, base := path.Split(filename)
return filesToIgnore[base] || strings.HasPrefix(base, "._")
}
// processNotification adds the notification to the recomputer's
// history if it is a create/modify for a file that hasn't yet been
// deleted. If the file is renamed in a future revision, the added
// notification has the new name of the file. processNotification
// should be called with notifications in reverse order of their
// revision number.
//
// It returns true if it has added enough notifications for the given
// writer, and the caller should not send any more for that writer.
func (r *recomputer) processNotification(
writer string, notification NotificationMessage) (doTrim bool) {
// Ignore notifications that come after any present unflushed
// notifications, as the local client won't be able to see them.
if r.minUnflushed != kbfsmd.RevisionUninitialized &&
notification.Revision >= r.minUnflushed {
return false
}
filename := notification.Filename
r.numProcessed[writer]++
// If the file is renamed in a future revision, rename it in the
// notification.
//
// TODO(KBFS-3073): maybe we should check all the parent
// directories for renames as well, so we can give a full, updated
// path for older edits. That would also help avoid showing edits
// for files that were later deleted, but in a renamed directory.
eventFilename := filename
event, hasEvent := r.fileEvents[filename]
if hasEvent && event.newName != "" {
notification.Filename = event.newName
filename = event.newName
}
// Keep only the creates and modifies for non-deleted files,
// but remember the renames and deletes.
switch notification.Type {
case NotificationCreate, NotificationModify:
// Disregard any file that's already been deleted.
if hasEvent && event.delete {
return false
}
// We only care about files, so skip dir and sym creates.
if notification.FileType != EntryTypeFile {
return false
}
// Ignore macOS dotfiles.
if ignoreFile(filename) {
return false
}
// See if any of the parent directories were renamed, checking
// backwards until we get to the TLF name.
prefix := filename
suffix := ""
for strings.Count(prefix, "/") > 4 {
var finalElem string
prefix, finalElem = path.Split(prefix)
prefix = strings.TrimSuffix(prefix, "/")
suffix = path.Clean(path.Join(finalElem, suffix))
event, hasEvent := r.fileEvents[prefix]
if hasEvent && event.newName != "" {
prefix = event.newName
}
}
filename = path.Clean(path.Join(prefix, suffix))
notification.Filename = filename
// We only need one modify message per writer per file.
if r.modifiedFiles[writer][filename] {
return false
}
wn, ok := r.byWriter[writer]
if !ok {
wn = &writerNotifications{writer, nil}
r.byWriter[writer] = wn
}
wn.notifications = append(wn.notifications, notification)
modified, ok := r.modifiedFiles[writer]
if !ok {
modified = make(map[string]bool)
r.modifiedFiles[writer] = modified
}
modified[filename] = true
if len(wn.notifications) == maxEditsPerWriter {
// We have enough edits for this user.
return true
}
case NotificationRename:
// If the file already has a final event, move that to the old
// filename. Otherwise, this is the final event.
if hasEvent {
r.fileEvents[notification.Params.OldFilename] = event
delete(r.fileEvents, eventFilename)
} else {
r.fileEvents[notification.Params.OldFilename] =
fileEvent{newName: eventFilename}
}
// If renaming a directory, check whether there are any events
// for children of the directory, and rename them
// accordingly. TODO: there's probably a better data structure
// for doing this when storing events, maybe a multi-layer map
// structured like a file system.
if notification.FileType == EntryTypeDir {
for f, event := range r.fileEvents {
if strings.HasPrefix(f, eventFilename) {
oldF := strings.Replace(
f, eventFilename, notification.Params.OldFilename, -1)
r.fileEvents[oldF] = event
delete(r.fileEvents, f)
}
}
}
// The renamed file overwrote any existing file with the new
// name.
r.fileEvents[eventFilename] = fileEvent{delete: true}
case NotificationDelete:
r.fileEvents[eventFilename] = fileEvent{delete: true}
}
return false
}
func (th *TlfHistory) recomputeLocked(loggedInUser string) (
history writersByRevision, writersWhoNeedMore map[string]bool) {
writersWhoNeedMore = make(map[string]bool)
r := newRecomputer()
// First add all of the unflushed notifications for the logged-in
// writer.
skipLoggedIn := false
loggedInProcessed := 0
if th.unflushed != nil {
if th.unflushed.writerName != loggedInUser {
panic(fmt.Sprintf(
"Logged-in user %s doesn't match unflushed user %s",
loggedInUser, th.unflushed.writerName))
}
for _, n := range th.unflushed.notifications {
doTrim := r.processNotification(th.unflushed.writerName, n)
if doTrim {
skipLoggedIn = true
break
}
}
if ln := len(th.unflushed.notifications); ln > 0 {
r.minUnflushed = th.unflushed.notifications[ln-1].Revision
}
loggedInProcessed = r.numProcessed[th.unflushed.writerName]
}
// Copy the writer notifications into a heap.
var writersHeap writersByRevision
for _, wn := range th.byWriter {
if skipLoggedIn && wn.writerName == loggedInUser {
// There are enough unflushed notifications already, so
// skip the logged-in user.
continue
}
wnCopy := writerNotifications{
writerName: wn.writerName,
notifications: make(notificationsByRevision, len(wn.notifications)),
}
copy(wnCopy.notifications, wn.notifications)
writersHeap = append(writersHeap, &wnCopy)
}
heap.Init(&writersHeap)
// Iterate through the heap. The writer with the next highest
// revision will always be at index 0. Process that writer's
// first notification, then remove it and fix the heap so that the
// next highest revision is at index 0. That way events that
// happen more recently (like deletes and renames) can be taken
// into account when looking at older events.
for writersHeap.Len() > 0 {
nextWriter := writersHeap[0].writerName
nextNotification := writersHeap[0].notifications[0]
doTrim := r.processNotification(nextWriter, nextNotification)
// Remove that notification, and fix the heap because this
// writer has a different newest revision.
if doTrim {
// Trim all earlier revisions because they won't be needed
// for the cached history.
numProcessed := r.numProcessed[nextWriter]
if loggedInUser == nextWriter {
numProcessed -= loggedInProcessed
}
th.byWriter[nextWriter].notifications =
th.byWriter[nextWriter].notifications[:numProcessed]
} else {
writersHeap[0].notifications = writersHeap[0].notifications[1:]
}
if len(writersHeap[0].notifications) == 0 || doTrim {
heap.Pop(&writersHeap)
} else {
heap.Fix(&writersHeap, 0)
}
}
history = make(writersByRevision, 0, len(r.byWriter))
for writerName := range th.byWriter {
wn := r.byWriter[writerName]
if wn != nil && len(wn.notifications) > 0 {
history = append(history, wn)
}
if wn == nil || len(wn.notifications) < maxEditsPerWriter {
writersWhoNeedMore[writerName] = true
}
}
if _, ok := th.byWriter[loggedInUser]; !ok {
// The logged-in user only has unflushed edits.
wn := r.byWriter[loggedInUser]
if wn != nil && len(wn.notifications) > 0 {
history = append(history, wn)
}
}
sort.Sort(history)
if len(history) > maxWritersPerHistory {
// Garbage-collect any writers that don't appear in the history.
loggedInIndex := -1
for i := maxWritersPerHistory; i < len(history); i++ {
if history[i].writerName == loggedInUser {
// Don't purge the logged-in user.
loggedInIndex = i
continue
}
delete(th.byWriter, history[i].writerName)
delete(writersWhoNeedMore, history[i].writerName)
}
if loggedInIndex > 0 {
// Keep the logged-in user as the last entry. Note that
// `loggedInIndex` is guaranteed to be greater or equal to
// `maxWritersPerHistory`, so this logic swaps in the
// loggedIn entry (and doesn't duplicate it).
history = append(
history[:maxWritersPerHistory-1], history[loggedInIndex])
} else {
history = history[:maxWritersPerHistory]
}
}
th.computed = true
th.cachedHistory = history
th.cachedLoggedInUser = loggedInUser
return history, writersWhoNeedMore
}
func (th *TlfHistory) getHistoryIfCached() (
cached bool, history writersByRevision, loggedInUser string) {
th.lock.RLock()
defer th.lock.RUnlock()
if th.computed {
return true, th.cachedHistory, th.cachedLoggedInUser
}
return false, nil, ""
}
func (th *TlfHistory) getHistory(loggedInUser string) writersByRevision {
cached, history, cachedLoggedInUser := th.getHistoryIfCached()
if cached && loggedInUser == cachedLoggedInUser {
return history
}
th.lock.Lock()
defer th.lock.Unlock()
if th.computed {
// Maybe another goroutine got the lock and recomuted the
// history since we checked above.
return th.cachedHistory
}
history, _ = th.recomputeLocked(loggedInUser)
return history
}
// Recompute processes (and caches) the history so that it reflects
// all recently-added notifications, and returns the names of writers
// which don't yet have the maximum number of edits in the history.
func (th *TlfHistory) Recompute(loggedInUser string) (
writersWhoNeedMore map[string]bool) {
th.lock.Lock()
defer th.lock.Unlock()
_, writersWhoNeedMore = th.recomputeLocked(loggedInUser)
return writersWhoNeedMore
}
| 1 | 20,579 | Is there an easy way to check for conflict suffix to avoid false positives here? | keybase-kbfs | go |
@@ -59,7 +59,7 @@ module Bolt
run_opts = {}
run_opts[:run_as] = opts['_run_as'] if opts['_run_as']
begin
- task = apply_prep.get_task(opts['task'], params)
+ task = @context.get_validated_task(opts['task'], params)
rescue Bolt::Error => e
raise Bolt::Plugin::PluginError::ExecutionError.new(e.message, name, 'puppet_library')
end | 1 | # frozen_string_literal: true
module Bolt
class Plugin
class Task
def hooks
hook_descriptions.keys
end
def hook_descriptions
{
puppet_library: 'Run a task to install the Puppet agent package.',
resolve_reference: 'Run a task as a plugin.',
validate_resolve_reference: nil
}
end
def name
'task'
end
attr_accessor :pal, :executor, :inventory
def initialize(context:, **_opts)
@context = context
end
def run_task(opts)
params = opts['parameters'] || {}
options = { catch_errors: true }
raise Bolt::ValidationError, "Task plugin requires that the 'task' is specified" unless opts['task']
task = @context.get_validated_task(opts['task'], params)
result = @context.run_local_task(task, params, options).first
raise Bolt::Error.new(result.error_hash['msg'], result.error_hash['kind']) if result.error_hash
result
end
def validate_options(opts)
raise Bolt::ValidationError, "Task plugin requires that the 'task' is specified" unless opts['task']
@context.get_validated_task(opts['task'], opts['parameters'] || {})
end
alias validate_resolve_reference validate_options
def resolve_reference(opts)
result = run_task(opts)
unless result.value.include?('value')
raise Bolt::ValidationError, "Task result did not return 'value': #{result.value}"
end
result['value']
end
def puppet_library(opts, target, apply_prep)
params = opts['parameters'] || {}
run_opts = {}
run_opts[:run_as] = opts['_run_as'] if opts['_run_as']
begin
task = apply_prep.get_task(opts['task'], params)
rescue Bolt::Error => e
raise Bolt::Plugin::PluginError::ExecutionError.new(e.message, name, 'puppet_library')
end
proc do
apply_prep.run_task([target], task, params, run_opts).first
end
end
end
end
end
| 1 | 18,938 | `apply_prep.run_task` also seems like it can just use `@context.run_task` | puppetlabs-bolt | rb |
@@ -36,6 +36,9 @@ define(["events", "layoutManager", "inputManager", "userSettings", "libraryMenu"
showYear: true,
centerText: true
});
+
+ // FIXME: Wait for all sections to load
+ autoFocus(page);
});
}
| 1 | define(["events", "layoutManager", "inputManager", "userSettings", "libraryMenu", "mainTabsManager", "cardBuilder", "dom", "imageLoader", "playbackManager", "emby-itemscontainer", "emby-tabs", "emby-button"], function (events, layoutManager, inputManager, userSettings, libraryMenu, mainTabsManager, cardBuilder, dom, imageLoader, playbackManager) {
"use strict";
function enableScrollX() {
return !layoutManager.desktop;
}
function getPortraitShape() {
return enableScrollX() ? "overflowPortrait" : "portrait";
}
function getThumbShape() {
return enableScrollX() ? "overflowBackdrop" : "backdrop";
}
function loadLatest(page, userId, parentId) {
var options = {
IncludeItemTypes: "Movie",
Limit: 18,
Fields: "PrimaryImageAspectRatio,MediaSourceCount,BasicSyncInfo",
ParentId: parentId,
ImageTypeLimit: 1,
EnableImageTypes: "Primary,Backdrop,Banner,Thumb",
EnableTotalRecordCount: false
};
ApiClient.getJSON(ApiClient.getUrl("Users/" + userId + "/Items/Latest", options)).then(function (items) {
var allowBottomPadding = !enableScrollX();
var container = page.querySelector("#recentlyAddedItems");
cardBuilder.buildCards(items, {
itemsContainer: container,
shape: getPortraitShape(),
scalable: true,
overlayPlayButton: true,
allowBottomPadding: allowBottomPadding,
showTitle: true,
showYear: true,
centerText: true
});
});
}
function loadResume(page, userId, parentId) {
var screenWidth = dom.getWindowSize().innerWidth;
var options = {
SortBy: "DatePlayed",
SortOrder: "Descending",
IncludeItemTypes: "Movie",
Filters: "IsResumable",
Limit: screenWidth >= 1920 ? 5 : screenWidth >= 1600 ? 5 : 3,
Recursive: true,
Fields: "PrimaryImageAspectRatio,MediaSourceCount,BasicSyncInfo",
CollapseBoxSetItems: false,
ParentId: parentId,
ImageTypeLimit: 1,
EnableImageTypes: "Primary,Backdrop,Banner,Thumb",
EnableTotalRecordCount: false
};
ApiClient.getItems(userId, options).then(function (result) {
if (result.Items.length) {
page.querySelector("#resumableSection").classList.remove("hide");
} else {
page.querySelector("#resumableSection").classList.add("hide");
}
var allowBottomPadding = !enableScrollX();
var container = page.querySelector("#resumableItems");
cardBuilder.buildCards(result.Items, {
itemsContainer: container,
preferThumb: true,
shape: getThumbShape(),
scalable: true,
overlayPlayButton: true,
allowBottomPadding: allowBottomPadding,
cardLayout: false,
showTitle: true,
showYear: true,
centerText: true
});
});
}
function getRecommendationHtml(recommendation) {
var html = "";
var title = "";
switch (recommendation.RecommendationType) {
case "SimilarToRecentlyPlayed":
title = Globalize.translate("RecommendationBecauseYouWatched").replace("{0}", recommendation.BaselineItemName);
break;
case "SimilarToLikedItem":
title = Globalize.translate("RecommendationBecauseYouLike").replace("{0}", recommendation.BaselineItemName);
break;
case "HasDirectorFromRecentlyPlayed":
case "HasLikedDirector":
title = Globalize.translate("RecommendationDirectedBy").replace("{0}", recommendation.BaselineItemName);
break;
case "HasActorFromRecentlyPlayed":
case "HasLikedActor":
title = Globalize.translate("RecommendationStarring").replace("{0}", recommendation.BaselineItemName);
break;
}
html += '<div class="verticalSection">';
html += '<h2 class="sectionTitle sectionTitle-cards padded-left">' + title + "</h2>";
var allowBottomPadding = true;
if (enableScrollX()) {
allowBottomPadding = false;
html += '<div is="emby-itemscontainer" class="itemsContainer hiddenScrollX padded-left padded-right">';
} else {
html += '<div is="emby-itemscontainer" class="itemsContainer vertical-wrap padded-left padded-right">';
}
html += cardBuilder.getCardsHtml(recommendation.Items, {
shape: getPortraitShape(),
scalable: true,
overlayPlayButton: true,
allowBottomPadding: allowBottomPadding
});
html += "</div>";
html += "</div>";
return html;
}
function loadSuggestions(page, userId, parentId) {
var screenWidth = dom.getWindowSize().innerWidth;
var url = ApiClient.getUrl("Movies/Recommendations", {
userId: userId,
categoryLimit: 6,
ItemLimit: screenWidth >= 1920 ? 8 : screenWidth >= 1600 ? 8 : screenWidth >= 1200 ? 6 : 5,
Fields: "PrimaryImageAspectRatio,MediaSourceCount,BasicSyncInfo",
ImageTypeLimit: 1,
EnableImageTypes: "Primary,Backdrop,Banner,Thumb"
});
ApiClient.getJSON(url).then(function (recommendations) {
if (!recommendations.length) {
page.querySelector(".noItemsMessage").classList.remove("hide");
page.querySelector(".recommendations").innerHTML = "";
return;
}
var html = recommendations.map(getRecommendationHtml).join("");
page.querySelector(".noItemsMessage").classList.add("hide");
var recs = page.querySelector(".recommendations");
recs.innerHTML = html;
imageLoader.lazyChildren(recs);
});
}
function setScrollClasses(elem, scrollX) {
if (scrollX) {
elem.classList.add("hiddenScrollX");
if (layoutManager.tv) {
elem.classList.add("smoothScrollX");
}
elem.classList.add("scrollX");
elem.classList.remove("vertical-wrap");
} else {
elem.classList.remove("hiddenScrollX");
elem.classList.remove("smoothScrollX");
elem.classList.remove("scrollX");
elem.classList.add("vertical-wrap");
}
}
function initSuggestedTab(page, tabContent) {
var containers = tabContent.querySelectorAll(".itemsContainer");
for (var i = 0, length = containers.length; i < length; i++) {
setScrollClasses(containers[i], enableScrollX());
}
}
function loadSuggestionsTab(view, params, tabContent) {
var parentId = params.topParentId;
var userId = ApiClient.getCurrentUserId();
console.log("loadSuggestionsTab");
loadResume(tabContent, userId, parentId);
loadLatest(tabContent, userId, parentId);
loadSuggestions(tabContent, userId, parentId);
}
function getTabs() {
return [{
name: Globalize.translate("Movies")
}, {
name: Globalize.translate("TabSuggestions")
}, {
name: Globalize.translate("TabTrailers")
}, {
name: Globalize.translate("TabFavorites")
}, {
name: Globalize.translate("TabCollections")
}, {
name: Globalize.translate("TabGenres")
}, {
name: Globalize.translate("ButtonSearch"),
cssClass: "searchTabButton"
}];
}
function getDefaultTabIndex(folderId) {
switch (userSettings.get("landing-" + folderId)) {
case "suggestions":
return 1;
case "favorites":
return 3;
case "collections":
return 4;
case "genres":
return 5;
default:
return 0;
}
}
return function (view, params) {
function onBeforeTabChange(e) {
preLoadTab(view, parseInt(e.detail.selectedTabIndex));
}
function onTabChange(e) {
var newIndex = parseInt(e.detail.selectedTabIndex);
loadTab(view, newIndex);
}
function getTabContainers() {
return view.querySelectorAll(".pageTabContent");
}
function initTabs() {
mainTabsManager.setTabs(view, currentTabIndex, getTabs, getTabContainers, onBeforeTabChange, onTabChange);
}
function getTabController(page, index, callback) {
var depends = [];
switch (index) {
case 0:
depends.push("controllers/movies/movies");
break;
case 1:
break;
case 2:
depends.push("controllers/movies/movietrailers");
break;
case 3:
depends.push("controllers/movies/movies");
break;
case 4:
depends.push("controllers/movies/moviecollections");
break;
case 5:
depends.push("controllers/movies/moviegenres");
break;
case 6:
depends.push("scripts/searchtab");
}
require(depends, function (controllerFactory) {
var tabContent;
if (index === suggestionsTabIndex) {
tabContent = view.querySelector(".pageTabContent[data-index='" + index + "']");
self.tabContent = tabContent;
}
var controller = tabControllers[index];
if (!controller) {
tabContent = view.querySelector(".pageTabContent[data-index='" + index + "']");
if (index === suggestionsTabIndex) {
controller = self;
} else if (index === 6) {
controller = new controllerFactory(view, tabContent, {
collectionType: "movies",
parentId: params.topParentId
});
} else if (index == 0 || index == 3) {
controller = new controllerFactory(view, params, tabContent, {
mode: index ? "favorites" : "movies"
});
} else {
controller = new controllerFactory(view, params, tabContent);
}
tabControllers[index] = controller;
if (controller.initTab) {
controller.initTab();
}
}
callback(controller);
});
}
function preLoadTab(page, index) {
getTabController(page, index, function (controller) {
if (renderedTabs.indexOf(index) == -1 && controller.preRender) {
controller.preRender();
}
});
}
function loadTab(page, index) {
currentTabIndex = index;
getTabController(page, index, function (controller) {
initialTabIndex = null;
if (renderedTabs.indexOf(index) == -1) {
renderedTabs.push(index);
controller.renderTab();
}
});
}
function onPlaybackStop(e, state) {
if (state.NowPlayingItem && state.NowPlayingItem.MediaType == "Video") {
renderedTabs = [];
mainTabsManager.getTabsElement().triggerTabChange();
}
}
function onInputCommand(e) {
switch (e.detail.command) {
case "search":
e.preventDefault();
Dashboard.navigate("search.html?collectionType=movies&parentId=" + params.topParentId);
}
}
var isViewRestored;
var self = this;
var currentTabIndex = parseInt(params.tab || getDefaultTabIndex(params.topParentId));
var initialTabIndex = currentTabIndex;
var suggestionsTabIndex = 1;
self.initTab = function () {
var tabContent = view.querySelector(".pageTabContent[data-index='" + suggestionsTabIndex + "']");
initSuggestedTab(view, tabContent);
};
self.renderTab = function () {
var tabContent = view.querySelector(".pageTabContent[data-index='" + suggestionsTabIndex + "']");
loadSuggestionsTab(view, params, tabContent);
};
var tabControllers = [];
var renderedTabs = [];
view.addEventListener("viewshow", function (e) {
if (isViewRestored = e.detail.isRestored, initTabs(), !view.getAttribute("data-title")) {
var parentId = params.topParentId;
if (parentId) {
ApiClient.getItem(ApiClient.getCurrentUserId(), parentId).then(function (item) {
view.setAttribute("data-title", item.Name);
libraryMenu.setTitle(item.Name);
});
} else {
view.setAttribute("data-title", Globalize.translate("TabMovies"));
libraryMenu.setTitle(Globalize.translate("TabMovies"));
}
}
events.on(playbackManager, "playbackstop", onPlaybackStop);
inputManager.on(window, onInputCommand);
});
view.addEventListener("viewbeforehide", function (e) {
inputManager.off(window, onInputCommand);
});
view.addEventListener("viewdestroy", function (e) {
tabControllers.forEach(function (t) {
if (t.destroy) {
t.destroy();
}
});
});
};
});
| 1 | 12,179 | do you have any idea on how to fix it? | jellyfin-jellyfin-web | js |
@@ -159,3 +159,19 @@ class GCSTargetTest(_GCSBaseTestCase, FileSystemTargetTestMixin):
def create_target(self, format=None):
return gcs.GCSTarget(bucket_url(self.id()), format=format, client=self.client)
+
+ def test_close_twice(self):
+ # Ensure gcs._DeleteOnCloseFile().close() can be called multiple times
+ tgt = self.create_target()
+
+ with tgt.open('w') as dst:
+ dst.write('data')
+ assert dst.closed
+ dst.close()
+ assert dst.closed
+
+ with tgt.open() as src:
+ assert src.read().strip() == 'data'
+ assert src.closed
+ src.close()
+ assert src.closed | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This is an integration test for the GCS-luigi binding.
This test requires credentials that can access GCS & access to a bucket below.
Follow the directions in the gcloud tools to set up local credentials.
"""
from helpers import unittest
try:
import googleapiclient.errors
import oauth2client
except ImportError:
raise unittest.SkipTest('Unable to load googleapiclient module')
import os
import tempfile
import unittest
from luigi.contrib import gcs
from target_test import FileSystemTargetTestMixin
from nose.plugins.attrib import attr
# In order to run this test, you should set these to your GCS project/bucket.
# Unfortunately there's no mock
PROJECT_ID = os.environ.get('GCS_TEST_PROJECT_ID', 'your_project_id_here')
BUCKET_NAME = os.environ.get('GCS_TEST_BUCKET', 'your_test_bucket_here')
TEST_FOLDER = os.environ.get('TRAVIS_BUILD_ID', 'gcs_test_folder')
CREDENTIALS = oauth2client.client.GoogleCredentials.get_application_default()
ATTEMPTED_BUCKET_CREATE = False
def bucket_url(suffix):
"""
Actually it's bucket + test folder name
"""
return 'gs://{}/{}/{}'.format(BUCKET_NAME, TEST_FOLDER, suffix)
class _GCSBaseTestCase(unittest.TestCase):
def setUp(self):
self.client = gcs.GCSClient(CREDENTIALS)
global ATTEMPTED_BUCKET_CREATE
if not ATTEMPTED_BUCKET_CREATE:
try:
self.client.client.buckets().insert(
project=PROJECT_ID, body={'name': BUCKET_NAME}).execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status != 409: # bucket already exists
raise
ATTEMPTED_BUCKET_CREATE = True
self.client.remove(bucket_url(''), recursive=True)
self.client.mkdir(bucket_url(''))
def tearDown(self):
self.client.remove(bucket_url(''), recursive=True)
@attr('gcloud')
class GCSClientTest(_GCSBaseTestCase):
def test_not_exists(self):
self.assertFalse(self.client.exists(bucket_url('does_not_exist')))
self.assertFalse(self.client.isdir(bucket_url('does_not_exist')))
def test_exists(self):
self.client.put_string('hello', bucket_url('exists_test'))
self.assertTrue(self.client.exists(bucket_url('exists_test')))
self.assertFalse(self.client.isdir(bucket_url('exists_test')))
def test_mkdir(self):
self.client.mkdir(bucket_url('exists_dir_test'))
self.assertTrue(self.client.exists(bucket_url('exists_dir_test')))
self.assertTrue(self.client.isdir(bucket_url('exists_dir_test')))
def test_mkdir_by_upload(self):
self.client.put_string('hello', bucket_url('test_dir_recursive/yep/file'))
self.assertTrue(self.client.exists(bucket_url('test_dir_recursive')))
self.assertTrue(self.client.isdir(bucket_url('test_dir_recursive')))
def test_download(self):
self.client.put_string('hello', bucket_url('test_download'))
fp = self.client.download(bucket_url('test_download'))
self.assertEquals(b'hello', fp.read())
def test_rename(self):
self.client.put_string('hello', bucket_url('test_rename_1'))
self.client.rename(bucket_url('test_rename_1'), bucket_url('test_rename_2'))
self.assertFalse(self.client.exists(bucket_url('test_rename_1')))
self.assertTrue(self.client.exists(bucket_url('test_rename_2')))
def test_rename_recursive(self):
self.client.mkdir(bucket_url('test_rename_recursive'))
self.client.put_string('hello', bucket_url('test_rename_recursive/1'))
self.client.put_string('hello', bucket_url('test_rename_recursive/2'))
self.client.rename(bucket_url('test_rename_recursive'), bucket_url('test_rename_recursive_dest'))
self.assertFalse(self.client.exists(bucket_url('test_rename_recursive')))
self.assertFalse(self.client.exists(bucket_url('test_rename_recursive/1')))
self.assertTrue(self.client.exists(bucket_url('test_rename_recursive_dest')))
self.assertTrue(self.client.exists(bucket_url('test_rename_recursive_dest/1')))
def test_remove(self):
self.client.put_string('hello', bucket_url('test_remove'))
self.client.remove(bucket_url('test_remove'))
self.assertFalse(self.client.exists(bucket_url('test_remove')))
def test_remove_recursive(self):
self.client.mkdir(bucket_url('test_remove_recursive'))
self.client.put_string('hello', bucket_url('test_remove_recursive/1'))
self.client.put_string('hello', bucket_url('test_remove_recursive/2'))
self.client.remove(bucket_url('test_remove_recursive'))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive')))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive/1')))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive/2')))
def test_listdir(self):
self.client.put_string('hello', bucket_url('test_listdir/1'))
self.client.put_string('hello', bucket_url('test_listdir/2'))
self.assertEqual([bucket_url('test_listdir/1'), bucket_url('test_listdir/2')],
list(self.client.listdir(bucket_url('test_listdir/'))))
self.assertEqual([bucket_url('test_listdir/1'), bucket_url('test_listdir/2')],
list(self.client.listdir(bucket_url('test_listdir'))))
def test_put_file(self):
with tempfile.NamedTemporaryFile() as fp:
lorem = 'Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt\n'
# Larger file than chunk size, fails with incorrect progress set up
big = lorem * 41943
fp.write(big)
fp.flush()
self.client.put(fp.name, bucket_url('test_put_file'))
self.assertTrue(self.client.exists(bucket_url('test_put_file')))
self.assertEquals(big, self.client.download(bucket_url('test_put_file')).read())
@attr('gcloud')
class GCSTargetTest(_GCSBaseTestCase, FileSystemTargetTestMixin):
def create_target(self, format=None):
return gcs.GCSTarget(bucket_url(self.id()), format=format, client=self.client)
| 1 | 13,062 | You don't need to fix this (you've done so many iterations). But for next time, you can make this into a docstring so it'll have a nicer descriptive name when the tests are running. | spotify-luigi | py |
@@ -90,6 +90,15 @@ func (eni *ENIAttachment) StopAckTimer() {
eni.ackTimer.Stop()
}
+// HasExpired returns true if the ENI attachment object has exceeded the
+// threshold for notifying the backend of the attachment
+func (eni *ENIAttachment) HasExpired() bool {
+ eni.guard.RLock()
+ defer eni.guard.RUnlock()
+
+ return time.Now().After(eni.ExpiresAt)
+}
+
// String returns a string representation of the ENI Attachment
func (eni *ENIAttachment) String() string {
eni.guard.RLock() | 1 | // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import (
"fmt"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/cihub/seelog"
"github.com/pkg/errors"
)
// ENIAttachment contains the information of the eni attachment
type ENIAttachment struct {
// TaskARN is the task identifier from ecs
TaskARN string `json:"taskArn"`
// AttachmentARN is the identifier for the eni attachment
AttachmentARN string `json:"attachmentArn"`
// AttachStatusSent indicates whether the attached status has been sent to backend
AttachStatusSent bool `json:"attachSent"`
// MACAddress is the mac address of eni
MACAddress string `json:"macAddress"`
// Status is the status of the eni: none/attached/detached
Status ENIAttachmentStatus `json:"status"`
// ExpiresAt is the timestamp past which the ENI Attachment is considered
// unsuccessful. The SubmitTaskStateChange API, with the attachment information
// should be invoked before this timestamp.
ExpiresAt time.Time `json:"expiresAt"`
// ackTimer is used to register the expirtation timeout callback for unsuccessful
// ENI attachments
ackTimer ttime.Timer
// guard protects access to fields of this struct
guard sync.RWMutex
}
// StartTimer starts the ack timer to record the expiration of ENI attachment
func (eni *ENIAttachment) StartTimer(timeoutFunc func()) error {
eni.guard.Lock()
defer eni.guard.Unlock()
if eni.ackTimer != nil {
// The timer has already been initialized, do nothing
return nil
}
now := time.Now()
duration := eni.ExpiresAt.Sub(now)
if duration <= 0 {
return errors.Errorf("eni attachment: timer expiration is in the past; expiration [%s] < now [%s]",
eni.ExpiresAt.String(), now.String())
}
seelog.Infof("Starting ENI ack timer with duration=%s, %s", duration.String(), eni.stringUnsafe())
eni.ackTimer = time.AfterFunc(duration, timeoutFunc)
return nil
}
// IsSent checks if the eni attached status has been sent
func (eni *ENIAttachment) IsSent() bool {
eni.guard.RLock()
defer eni.guard.RUnlock()
return eni.AttachStatusSent
}
// SetSentStatus marks the eni attached status has been sent
func (eni *ENIAttachment) SetSentStatus() {
eni.guard.Lock()
defer eni.guard.Unlock()
eni.AttachStatusSent = true
}
// StopAckTimer stops the ack timer set on the ENI attachment
func (eni *ENIAttachment) StopAckTimer() {
eni.guard.Lock()
defer eni.guard.Unlock()
eni.ackTimer.Stop()
}
// String returns a string representation of the ENI Attachment
func (eni *ENIAttachment) String() string {
eni.guard.RLock()
defer eni.guard.RUnlock()
return eni.stringUnsafe()
}
// stringUnsafe returns a string representation of the ENI Attachment
func (eni *ENIAttachment) stringUnsafe() string {
return fmt.Sprintf(
"ENI Attachment: task=%s;attachment=%s;attachmentSent=%t;mac=%s;status=%s;expiresAt=%s",
eni.TaskARN, eni.AttachmentARN, eni.AttachStatusSent, eni.MACAddress, eni.Status.String(), eni.ExpiresAt.String())
}
| 1 | 17,682 | When does 'expiresAt' change? Do you need this? | aws-amazon-ecs-agent | go |
@@ -75,3 +75,15 @@ const Outfit* Outfits::getOutfitByLookType(PlayerSex_t sex, uint16_t lookType) c
}
return nullptr;
}
+
+const Outfit* Outfits::getOutfitByLookType(uint16_t lookType) const
+{
+ for (uint8_t sex = 0; sex <= 1; sex++) {
+ for (const Outfit& outfit : outfits[sex]) {
+ if (outfit.lookType == lookType) {
+ return &outfit;
+ }
+ }
+ }
+ return nullptr;
+} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "outfit.h"
#include "pugicast.h"
#include "tools.h"
bool Outfits::loadFromXml()
{
pugi::xml_document doc;
pugi::xml_parse_result result = doc.load_file("data/XML/outfits.xml");
if (!result) {
printXMLError("Error - Outfits::loadFromXml", "data/XML/outfits.xml", result);
return false;
}
for (auto outfitNode : doc.child("outfits").children()) {
pugi::xml_attribute attr;
if ((attr = outfitNode.attribute("enabled")) && !attr.as_bool()) {
continue;
}
if (!(attr = outfitNode.attribute("type"))) {
std::cout << "[Warning - Outfits::loadFromXml] Missing outfit type." << std::endl;
continue;
}
uint16_t type = pugi::cast<uint16_t>(attr.value());
if (type > PLAYERSEX_LAST) {
std::cout << "[Warning - Outfits::loadFromXml] Invalid outfit type " << type << "." << std::endl;
continue;
}
pugi::xml_attribute lookTypeAttribute = outfitNode.attribute("looktype");
if (!lookTypeAttribute) {
std::cout << "[Warning - Outfits::loadFromXml] Missing looktype on outfit." << std::endl;
continue;
}
outfits[type].emplace_back(
outfitNode.attribute("name").as_string(),
pugi::cast<uint16_t>(lookTypeAttribute.value()),
outfitNode.attribute("premium").as_bool(),
outfitNode.attribute("unlocked").as_bool(true)
);
}
return true;
}
const Outfit* Outfits::getOutfitByLookType(PlayerSex_t sex, uint16_t lookType) const
{
for (const Outfit& outfit : outfits[sex]) {
if (outfit.lookType == lookType) {
return &outfit;
}
}
return nullptr;
}
| 1 | 17,215 | There are enums for genders + `PLAYERSEX_LAST`. | otland-forgottenserver | cpp |
@@ -23,9 +23,14 @@ func TestCalculateRollupFee(t *testing.T) {
data := make([]byte, 0, tt.dataLen)
fee := CalculateRollupFee(data, tt.gasUsed, big.NewInt(tt.dataPrice), big.NewInt(tt.executionPrice))
- dataFee := uint64((RollupBaseTxSize + len(data)) * int(tt.dataPrice))
+ zeroes, ones := zeroesAndOnes(data)
+ zeroesCost := zeroes * 4
+ onesCost := (96 + ones) * 16
+ dataCost := zeroesCost + onesCost
+ dataFee := int64(dataCost) * tt.dataPrice
+
executionFee := uint64(tt.executionPrice) * tt.gasUsed
- expectedFee := dataFee + executionFee
+ expectedFee := uint64(dataFee) + executionFee
if fee.Cmp(big.NewInt(int64(expectedFee))) != 0 {
t.Errorf("rollup fee check failed: expected %d, got %s", expectedFee, fee.String())
} | 1 | package core
import (
"math/big"
"testing"
)
var feeTests = map[string]struct {
dataLen int
gasUsed uint64
dataPrice int64
executionPrice int64
}{
"simple": {10000, 10, 20, 30},
"zero gas used": {10000, 0, 20, 30},
"zero data price": {10000, 0, 0, 30},
"zero execution price": {10000, 0, 0, 0},
}
func TestCalculateRollupFee(t *testing.T) {
for name, tt := range feeTests {
t.Run(name, func(t *testing.T) {
data := make([]byte, 0, tt.dataLen)
fee := CalculateRollupFee(data, tt.gasUsed, big.NewInt(tt.dataPrice), big.NewInt(tt.executionPrice))
dataFee := uint64((RollupBaseTxSize + len(data)) * int(tt.dataPrice))
executionFee := uint64(tt.executionPrice) * tt.gasUsed
expectedFee := dataFee + executionFee
if fee.Cmp(big.NewInt(int64(expectedFee))) != 0 {
t.Errorf("rollup fee check failed: expected %d, got %s", expectedFee, fee.String())
}
})
}
}
| 1 | 15,226 | Should this use the constants as well? Or intentionally covering the fact that the constants may change | ethereum-optimism-optimism | go |
@@ -58,7 +58,10 @@ class TopNContainer(object):
return self.extras
def __len__(self):
- return self._size
+ if self._size >= 0:
+ return self._size
+ else:
+ return len(self.best)
def __getitem__(self, which):
return self.best[which], self.extras[which] | 1 | # $Id$
#
# Copyright (C) 2003-2013 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
import bisect
class TopNContainer(object):
""" maintains a sorted list of a particular number of data elements.
"""
def __init__(self, size, mostNeg=-1e99):
"""
if size is negative, all entries will be kept in sorted order
"""
self._size = size
if (size >= 0):
self.best = [mostNeg] * self._size
self.extras = [None] * self._size
else:
self.best = []
self.extras = []
def Insert(self, val, extra=None):
""" only does the insertion if val fits """
if self._size >= 0:
if val > self.best[0]:
idx = bisect.bisect(self.best, val)
# insert the new element
if idx == self._size:
self.best.append(val)
self.extras.append(extra)
else:
self.best.insert(idx, val)
self.extras.insert(idx, extra)
# and pop off the head
self.best.pop(0)
self.extras.pop(0)
else:
idx = bisect.bisect(self.best, val)
self.best.insert(idx, val)
self.extras.insert(idx, extra)
def GetPts(self):
""" returns our set of points """
return self.best
def GetExtras(self):
""" returns our set of extras """
return self.extras
def __len__(self):
return self._size
def __getitem__(self, which):
return self.best[which], self.extras[which]
def reverse(self):
self.best.reverse()
self.extras.reverse()
if __name__ == '__main__':
import random
pts = [int(100 * random.random()) for x in range(10)]
c = TopNContainer(4)
for pt in pts:
c.Insert(pt, extra=str(pt))
print(c.GetPts())
print(c.GetExtras())
| 1 | 15,553 | The case of a negative size (= keep all elements) was not handled in this function. | rdkit-rdkit | cpp |
@@ -70,13 +70,13 @@ var keys = map[Key]string{
EnableNamespaceNotActiveAutoForwarding: "system.enableNamespaceNotActiveAutoForwarding",
TransactionSizeLimit: "system.transactionSizeLimit",
MinRetentionDays: "system.minRetentionDays",
- MaxWorkflowTaskTimeout: "system.maxWorkflowTaskTimeout",
DisallowQuery: "system.disallowQuery",
EnableBatcher: "worker.enableBatcher",
EnableParentClosePolicyWorker: "system.enableParentClosePolicyWorker",
EnableStickyQuery: "system.enableStickyQuery",
EnablePriorityTaskProcessor: "system.enablePriorityTaskProcessor",
EnableAuthorization: "system.enableAuthorization",
+ EnableInfiniteTimeout: "system.enableInfiniteTimeout",
// size limit
BlobSizeLimitError: "limit.blobSize.error", | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package dynamicconfig
import (
enumspb "go.temporal.io/api/enums/v1"
)
// Key represents a key/property stored in dynamic config
type Key int
func (k Key) String() string {
keyName, ok := keys[k]
if !ok {
return keys[unknownKey]
}
return keyName
}
// Mapping from Key to keyName, where keyName are used dynamic config source.
var keys = map[Key]string{
unknownKey: "unknownKey",
// tests keys
testGetPropertyKey: "testGetPropertyKey",
testGetIntPropertyKey: "testGetIntPropertyKey",
testGetFloat64PropertyKey: "testGetFloat64PropertyKey",
testGetDurationPropertyKey: "testGetDurationPropertyKey",
testGetBoolPropertyKey: "testGetBoolPropertyKey",
testGetStringPropertyKey: "testGetStringPropertyKey",
testGetMapPropertyKey: "testGetMapPropertyKey",
testGetIntPropertyFilteredByNamespaceKey: "testGetIntPropertyFilteredByNamespaceKey",
testGetDurationPropertyFilteredByNamespaceKey: "testGetDurationPropertyFilteredByNamespaceKey",
testGetIntPropertyFilteredByTaskQueueInfoKey: "testGetIntPropertyFilteredByTaskQueueInfoKey",
testGetDurationPropertyFilteredByTaskQueueInfoKey: "testGetDurationPropertyFilteredByTaskQueueInfoKey",
testGetBoolPropertyFilteredByNamespaceIDKey: "testGetBoolPropertyFilteredByNamespaceIDKey",
testGetBoolPropertyFilteredByTaskQueueInfoKey: "testGetBoolPropertyFilteredByTaskQueueInfoKey",
// system settings
EnableGlobalNamespace: "system.enableGlobalNamespace",
EnableVisibilitySampling: "system.enableVisibilitySampling",
AdvancedVisibilityWritingMode: "system.advancedVisibilityWritingMode",
EnableReadVisibilityFromES: "system.enableReadVisibilityFromES",
HistoryArchivalState: "system.historyArchivalState",
EnableReadFromHistoryArchival: "system.enableReadFromHistoryArchival",
VisibilityArchivalState: "system.visibilityArchivalState",
EnableReadFromVisibilityArchival: "system.enableReadFromVisibilityArchival",
EnableNamespaceNotActiveAutoForwarding: "system.enableNamespaceNotActiveAutoForwarding",
TransactionSizeLimit: "system.transactionSizeLimit",
MinRetentionDays: "system.minRetentionDays",
MaxWorkflowTaskTimeout: "system.maxWorkflowTaskTimeout",
DisallowQuery: "system.disallowQuery",
EnableBatcher: "worker.enableBatcher",
EnableParentClosePolicyWorker: "system.enableParentClosePolicyWorker",
EnableStickyQuery: "system.enableStickyQuery",
EnablePriorityTaskProcessor: "system.enablePriorityTaskProcessor",
EnableAuthorization: "system.enableAuthorization",
// size limit
BlobSizeLimitError: "limit.blobSize.error",
BlobSizeLimitWarn: "limit.blobSize.warn",
HistorySizeLimitError: "limit.historySize.error",
HistorySizeLimitWarn: "limit.historySize.warn",
HistoryCountLimitError: "limit.historyCount.error",
HistoryCountLimitWarn: "limit.historyCount.warn",
MaxIDLengthLimit: "limit.maxIDLength",
// frontend settings
FrontendPersistenceMaxQPS: "frontend.persistenceMaxQPS",
FrontendPersistenceGlobalMaxQPS: "frontend.persistenceGlobalMaxQPS",
FrontendVisibilityMaxPageSize: "frontend.visibilityMaxPageSize",
FrontendVisibilityListMaxQPS: "frontend.visibilityListMaxQPS",
FrontendESVisibilityListMaxQPS: "frontend.esVisibilityListMaxQPS",
FrontendMaxBadBinaries: "frontend.maxBadBinaries",
FrontendESIndexMaxResultWindow: "frontend.esIndexMaxResultWindow",
FrontendHistoryMaxPageSize: "frontend.historyMaxPageSize",
FrontendRPS: "frontend.rps",
FrontendMaxNamespaceRPSPerInstance: "frontend.namespacerps",
FrontendGlobalNamespaceRPS: "frontend.globalNamespacerps",
FrontendHistoryMgrNumConns: "frontend.historyMgrNumConns",
FrontendShutdownDrainDuration: "frontend.shutdownDrainDuration",
DisableListVisibilityByFilter: "frontend.disableListVisibilityByFilter",
FrontendThrottledLogRPS: "frontend.throttledLogRPS",
EnableClientVersionCheck: "frontend.enableClientVersionCheck",
ValidSearchAttributes: "frontend.validSearchAttributes",
SendRawWorkflowHistory: "frontend.sendRawWorkflowHistory",
FrontendEnableRPCReplication: "frontend.enableRPCReplication",
FrontendEnableCleanupReplicationTask: "frontend.enableCleanupReplicationTask",
SearchAttributesNumberOfKeysLimit: "frontend.searchAttributesNumberOfKeysLimit",
SearchAttributesSizeOfValueLimit: "frontend.searchAttributesSizeOfValueLimit",
SearchAttributesTotalSizeLimit: "frontend.searchAttributesTotalSizeLimit",
VisibilityArchivalQueryMaxPageSize: "frontend.visibilityArchivalQueryMaxPageSize",
VisibilityArchivalQueryMaxRangeInDays: "frontend.visibilityArchivalQueryMaxRangeInDays",
VisibilityArchivalQueryMaxQPS: "frontend.visibilityArchivalQueryMaxQPS",
EnableServerVersionCheck: "frontend.enableServerVersionCheck",
// matching settings
MatchingRPS: "matching.rps",
MatchingPersistenceMaxQPS: "matching.persistenceMaxQPS",
MatchingPersistenceGlobalMaxQPS: "matching.persistenceGlobalMaxQPS",
MatchingMinTaskThrottlingBurstSize: "matching.minTaskThrottlingBurstSize",
MatchingGetTasksBatchSize: "matching.getTasksBatchSize",
MatchingLongPollExpirationInterval: "matching.longPollExpirationInterval",
MatchingEnableSyncMatch: "matching.enableSyncMatch",
MatchingUpdateAckInterval: "matching.updateAckInterval",
MatchingIdleTaskqueueCheckInterval: "matching.idleTaskqueueCheckInterval",
MaxTaskqueueIdleTime: "matching.maxTaskqueueIdleTime",
MatchingOutstandingTaskAppendsThreshold: "matching.outstandingTaskAppendsThreshold",
MatchingMaxTaskBatchSize: "matching.maxTaskBatchSize",
MatchingMaxTaskDeleteBatchSize: "matching.maxTaskDeleteBatchSize",
MatchingThrottledLogRPS: "matching.throttledLogRPS",
MatchingNumTaskqueueWritePartitions: "matching.numTaskqueueWritePartitions",
MatchingNumTaskqueueReadPartitions: "matching.numTaskqueueReadPartitions",
MatchingForwarderMaxOutstandingPolls: "matching.forwarderMaxOutstandingPolls",
MatchingForwarderMaxOutstandingTasks: "matching.forwarderMaxOutstandingTasks",
MatchingForwarderMaxRatePerSecond: "matching.forwarderMaxRatePerSecond",
MatchingForwarderMaxChildrenPerNode: "matching.forwarderMaxChildrenPerNode",
MatchingShutdownDrainDuration: "matching.shutdownDrainDuration",
// history settings
HistoryRPS: "history.rps",
HistoryPersistenceMaxQPS: "history.persistenceMaxQPS",
HistoryPersistenceGlobalMaxQPS: "history.persistenceGlobalMaxQPS",
HistoryVisibilityOpenMaxQPS: "history.historyVisibilityOpenMaxQPS",
HistoryVisibilityClosedMaxQPS: "history.historyVisibilityClosedMaxQPS",
HistoryLongPollExpirationInterval: "history.longPollExpirationInterval",
HistoryCacheInitialSize: "history.cacheInitialSize",
HistoryMaxAutoResetPoints: "history.historyMaxAutoResetPoints",
HistoryCacheMaxSize: "history.cacheMaxSize",
HistoryCacheTTL: "history.cacheTTL",
HistoryShutdownDrainDuration: "history.shutdownDrainDuration",
EventsCacheInitialSize: "history.eventsCacheInitialSize",
EventsCacheMaxSize: "history.eventsCacheMaxSize",
EventsCacheTTL: "history.eventsCacheTTL",
AcquireShardInterval: "history.acquireShardInterval",
AcquireShardConcurrency: "history.acquireShardConcurrency",
StandbyClusterDelay: "history.standbyClusterDelay",
StandbyTaskMissingEventsResendDelay: "history.standbyTaskMissingEventsResendDelay",
StandbyTaskMissingEventsDiscardDelay: "history.standbyTaskMissingEventsDiscardDelay",
TaskProcessRPS: "history.taskProcessRPS",
TaskSchedulerType: "history.taskSchedulerType",
TaskSchedulerWorkerCount: "history.taskSchedulerWorkerCount",
TaskSchedulerQueueSize: "history.taskSchedulerQueueSize",
TaskSchedulerRoundRobinWeights: "history.taskSchedulerRoundRobinWeight",
TimerTaskBatchSize: "history.timerTaskBatchSize",
TimerTaskWorkerCount: "history.timerTaskWorkerCount",
TimerTaskMaxRetryCount: "history.timerTaskMaxRetryCount",
TimerProcessorGetFailureRetryCount: "history.timerProcessorGetFailureRetryCount",
TimerProcessorCompleteTimerFailureRetryCount: "history.timerProcessorCompleteTimerFailureRetryCount",
TimerProcessorUpdateShardTaskCount: "history.timerProcessorUpdateShardTaskCount",
TimerProcessorUpdateAckInterval: "history.timerProcessorUpdateAckInterval",
TimerProcessorUpdateAckIntervalJitterCoefficient: "history.timerProcessorUpdateAckIntervalJitterCoefficient",
TimerProcessorCompleteTimerInterval: "history.timerProcessorCompleteTimerInterval",
TimerProcessorFailoverMaxPollRPS: "history.timerProcessorFailoverMaxPollRPS",
TimerProcessorMaxPollRPS: "history.timerProcessorMaxPollRPS",
TimerProcessorMaxPollInterval: "history.timerProcessorMaxPollInterval",
TimerProcessorMaxPollIntervalJitterCoefficient: "history.timerProcessorMaxPollIntervalJitterCoefficient",
TimerProcessorRedispatchInterval: "history.timerProcessorRedispatchInterval",
TimerProcessorRedispatchIntervalJitterCoefficient: "history.timerProcessorRedispatchIntervalJitterCoefficient",
TimerProcessorMaxRedispatchQueueSize: "history.timerProcessorMaxRedispatchQueueSize",
TimerProcessorEnablePriorityTaskProcessor: "history.timerProcessorEnablePriorityTaskProcessor",
TimerProcessorMaxTimeShift: "history.timerProcessorMaxTimeShift",
TimerProcessorHistoryArchivalSizeLimit: "history.timerProcessorHistoryArchivalSizeLimit",
TimerProcessorArchivalTimeLimit: "history.timerProcessorArchivalTimeLimit",
TransferTaskBatchSize: "history.transferTaskBatchSize",
TransferProcessorFailoverMaxPollRPS: "history.transferProcessorFailoverMaxPollRPS",
TransferProcessorMaxPollRPS: "history.transferProcessorMaxPollRPS",
TransferTaskWorkerCount: "history.transferTaskWorkerCount",
TransferTaskMaxRetryCount: "history.transferTaskMaxRetryCount",
TransferProcessorCompleteTransferFailureRetryCount: "history.transferProcessorCompleteTransferFailureRetryCount",
TransferProcessorUpdateShardTaskCount: "history.transferProcessorUpdateShardTaskCount",
TransferProcessorMaxPollInterval: "history.transferProcessorMaxPollInterval",
TransferProcessorMaxPollIntervalJitterCoefficient: "history.transferProcessorMaxPollIntervalJitterCoefficient",
TransferProcessorUpdateAckInterval: "history.transferProcessorUpdateAckInterval",
TransferProcessorUpdateAckIntervalJitterCoefficient: "history.transferProcessorUpdateAckIntervalJitterCoefficient",
TransferProcessorCompleteTransferInterval: "history.transferProcessorCompleteTransferInterval",
TransferProcessorRedispatchInterval: "history.transferProcessorRedispatchInterval",
TransferProcessorRedispatchIntervalJitterCoefficient: "history.transferProcessorRedispatchIntervalJitterCoefficient",
TransferProcessorMaxRedispatchQueueSize: "history.transferProcessorMaxRedispatchQueueSize",
TransferProcessorEnablePriorityTaskProcessor: "history.transferProcessorEnablePriorityTaskProcessor",
TransferProcessorVisibilityArchivalTimeLimit: "history.transferProcessorVisibilityArchivalTimeLimit",
ReplicatorTaskBatchSize: "history.replicatorTaskBatchSize",
ReplicatorTaskWorkerCount: "history.replicatorTaskWorkerCount",
ReplicatorTaskMaxRetryCount: "history.replicatorTaskMaxRetryCount",
ReplicatorProcessorMaxPollRPS: "history.replicatorProcessorMaxPollRPS",
ReplicatorProcessorUpdateShardTaskCount: "history.replicatorProcessorUpdateShardTaskCount",
ReplicatorProcessorMaxPollInterval: "history.replicatorProcessorMaxPollInterval",
ReplicatorProcessorMaxPollIntervalJitterCoefficient: "history.replicatorProcessorMaxPollIntervalJitterCoefficient",
ReplicatorProcessorUpdateAckInterval: "history.replicatorProcessorUpdateAckInterval",
ReplicatorProcessorUpdateAckIntervalJitterCoefficient: "history.replicatorProcessorUpdateAckIntervalJitterCoefficient",
ReplicatorProcessorRedispatchInterval: "history.replicatorProcessorRedispatchInterval",
ReplicatorProcessorRedispatchIntervalJitterCoefficient: "history.replicatorProcessorRedispatchIntervalJitterCoefficient",
ReplicatorProcessorMaxRedispatchQueueSize: "history.replicatorProcessorMaxRedispatchQueueSize",
ReplicatorProcessorEnablePriorityTaskProcessor: "history.replicatorProcessorEnablePriorityTaskProcessor",
ExecutionMgrNumConns: "history.executionMgrNumConns",
HistoryMgrNumConns: "history.historyMgrNumConns",
MaximumBufferedEventsBatch: "history.maximumBufferedEventsBatch",
MaximumSignalsPerExecution: "history.maximumSignalsPerExecution",
ShardUpdateMinInterval: "history.shardUpdateMinInterval",
ShardSyncMinInterval: "history.shardSyncMinInterval",
ShardSyncTimerJitterCoefficient: "history.shardSyncMinInterval",
DefaultEventEncoding: "history.defaultEventEncoding",
EnableAdminProtection: "history.enableAdminProtection",
AdminOperationToken: "history.adminOperationToken",
EnableParentClosePolicy: "history.enableParentClosePolicy",
NumArchiveSystemWorkflows: "history.numArchiveSystemWorkflows",
ArchiveRequestRPS: "history.archiveRequestRPS",
EmitShardDiffLog: "history.emitShardDiffLog",
HistoryThrottledLogRPS: "history.throttledLogRPS",
StickyTTL: "history.stickyTTL",
DefaultWorkflowExecutionTimeout: "history.defaultWorkflowExecutionTimeout",
DefaultWorkflowRunTimeout: "history.defaultWorkflowRunTimeout",
MaxWorkflowExecutionTimeout: "history.maximumWorkflowExecutionTimeout",
MaxWorkflowRunTimeout: "history.maximumWorkflowRunTimeout",
WorkflowTaskHeartbeatTimeout: "history.workflowTaskHeartbeatTimeout",
DefaultWorkflowTaskTimeout: "history.defaultWorkflowTaskTimeout",
ParentClosePolicyThreshold: "history.parentClosePolicyThreshold",
NumParentClosePolicySystemWorkflows: "history.numParentClosePolicySystemWorkflows",
ReplicationTaskFetcherParallelism: "history.ReplicationTaskFetcherParallelism",
ReplicationTaskFetcherAggregationInterval: "history.ReplicationTaskFetcherAggregationInterval",
ReplicationTaskFetcherTimerJitterCoefficient: "history.ReplicationTaskFetcherTimerJitterCoefficient",
ReplicationTaskFetcherErrorRetryWait: "history.ReplicationTaskFetcherErrorRetryWait",
ReplicationTaskProcessorErrorRetryWait: "history.ReplicationTaskProcessorErrorRetryWait",
ReplicationTaskProcessorErrorRetryMaxAttempts: "history.ReplicationTaskProcessorErrorRetryMaxAttempts",
ReplicationTaskProcessorNoTaskInitialWait: "history.ReplicationTaskProcessorNoTaskInitialWait",
ReplicationTaskProcessorCleanupInterval: "history.ReplicationTaskProcessorCleanupInterval",
ReplicationTaskProcessorCleanupJitterCoefficient: "history.ReplicationTaskProcessorCleanupJitterCoefficient",
ReplicationTaskProcessorStartWait: "history.ReplicationTaskProcessorStartWait",
ReplicationTaskProcessorStartWaitJitterCoefficient: "history.ReplicationTaskProcessorStartWaitJitterCoefficient",
ReplicationTaskProcessorHostQPS: "history.ReplicationTaskProcessorHostQPS",
ReplicationTaskProcessorShardQPS: "history.ReplicationTaskProcessorShardQPS",
HistoryEnableRPCReplication: "history.EnableRPCReplication",
HistoryEnableKafkaReplication: "history.EnableKafkaReplication",
HistoryEnableCleanupReplicationTask: "history.EnableCleanupReplicationTask",
MaxBufferedQueryCount: "history.MaxBufferedQueryCount",
MutableStateChecksumGenProbability: "history.mutableStateChecksumGenProbability",
MutableStateChecksumVerifyProbability: "history.mutableStateChecksumVerifyProbability",
MutableStateChecksumInvalidateBefore: "history.mutableStateChecksumInvalidateBefore",
ReplicationEventsFromCurrentCluster: "history.ReplicationEventsFromCurrentCluster",
StandbyTaskReReplicationContextTimeout: "history.standbyTaskReReplicationContextTimeout",
EnableDropStuckTaskByNamespaceID: "history.DropStuckTaskByNamespace",
SkipReapplicationByNamespaceId: "history.SkipReapplicationByNamespaceId",
DefaultActivityRetryPolicy: "history.defaultActivityRetryPolicy",
DefaultWorkflowRetryPolicy: "history.defaultWorkflowRetryPolicy",
WorkerPersistenceMaxQPS: "worker.persistenceMaxQPS",
WorkerPersistenceGlobalMaxQPS: "worker.persistenceGlobalMaxQPS",
WorkerReplicatorMetaTaskConcurrency: "worker.replicatorMetaTaskConcurrency",
WorkerReplicatorTaskConcurrency: "worker.replicatorTaskConcurrency",
WorkerReplicatorMessageConcurrency: "worker.replicatorMessageConcurrency",
WorkerReplicatorActivityBufferRetryCount: "worker.replicatorActivityBufferRetryCount",
WorkerReplicatorHistoryBufferRetryCount: "worker.replicatorHistoryBufferRetryCount",
WorkerReplicationTaskMaxRetryCount: "worker.replicationTaskMaxRetryCount",
WorkerReplicationTaskMaxRetryDuration: "worker.replicationTaskMaxRetryDuration",
WorkerReplicationTaskContextDuration: "worker.replicationTaskContextDuration",
WorkerReReplicationContextTimeout: "worker.workerReReplicationContextTimeout",
WorkerEnableRPCReplication: "worker.enableWorkerRPCReplication",
WorkerEnableKafkaReplication: "worker.enableKafkaReplication",
WorkerIndexerConcurrency: "worker.indexerConcurrency",
WorkerESProcessorNumOfWorkers: "worker.ESProcessorNumOfWorkers",
WorkerESProcessorBulkActions: "worker.ESProcessorBulkActions",
WorkerESProcessorBulkSize: "worker.ESProcessorBulkSize",
WorkerESProcessorFlushInterval: "worker.ESProcessorFlushInterval",
EnableArchivalCompression: "worker.EnableArchivalCompression",
WorkerHistoryPageSize: "worker.WorkerHistoryPageSize",
WorkerTargetArchivalBlobSize: "worker.WorkerTargetArchivalBlobSize",
WorkerArchiverConcurrency: "worker.ArchiverConcurrency",
WorkerArchivalsPerIteration: "worker.ArchivalsPerIteration",
WorkerDeterministicConstructionCheckProbability: "worker.DeterministicConstructionCheckProbability",
WorkerBlobIntegrityCheckProbability: "worker.BlobIntegrityCheckProbability",
WorkerTimeLimitPerArchivalIteration: "worker.TimeLimitPerArchivalIteration",
WorkerThrottledLogRPS: "worker.throttledLogRPS",
ScannerPersistenceMaxQPS: "worker.scannerPersistenceMaxQPS",
TaskQueueScannerEnabled: "worker.taskQueueScannerEnabled",
HistoryScannerEnabled: "worker.historyScannerEnabled",
ExecutionsScannerEnabled: "worker.executionsScannerEnabled",
}
const (
unknownKey Key = iota
// key for tests
testGetPropertyKey
testGetIntPropertyKey
testGetFloat64PropertyKey
testGetDurationPropertyKey
testGetBoolPropertyKey
testGetStringPropertyKey
testGetMapPropertyKey
testGetIntPropertyFilteredByNamespaceKey
testGetDurationPropertyFilteredByNamespaceKey
testGetIntPropertyFilteredByTaskQueueInfoKey
testGetDurationPropertyFilteredByTaskQueueInfoKey
testGetBoolPropertyFilteredByNamespaceIDKey
testGetBoolPropertyFilteredByTaskQueueInfoKey
// EnableGlobalNamespace is key for enable global namespace
EnableGlobalNamespace
// EnableVisibilitySampling is key for enable visibility sampling
EnableVisibilitySampling
// AdvancedVisibilityWritingMode is key for how to write to advanced visibility
AdvancedVisibilityWritingMode
// EmitShardDiffLog whether emit the shard diff log
EmitShardDiffLog
// EnableReadVisibilityFromES is key for enable read from elastic search
EnableReadVisibilityFromES
// DisableListVisibilityByFilter is config to disable list open/close workflow using filter
DisableListVisibilityByFilter
// HistoryArchivalState is key for the state of history archival
HistoryArchivalState
// EnableReadFromHistoryArchival is key for enabling reading history from archival store
EnableReadFromHistoryArchival
// VisibilityArchivalState is key for the state of visibility archival
VisibilityArchivalState
// EnableReadFromVisibilityArchival is key for enabling reading visibility from archival store
EnableReadFromVisibilityArchival
// EnableNamespaceNotActiveAutoForwarding whether enabling DC auto forwarding to active cluster
// for signal / start / signal with start API if namespace is not active
EnableNamespaceNotActiveAutoForwarding
// TransactionSizeLimit is the largest allowed transaction size to persistence
TransactionSizeLimit
// MinRetentionDays is the minimal allowed retention days for namespace
MinRetentionDays
// MaxWorkflowTaskTimeout is the maximum allowed workflow task start to close timeout
MaxWorkflowTaskTimeout
// DisallowQuery is the key to disallow query for a namespace
DisallowQuery
// EnablePriorityTaskProcessor is the key for enabling priority task processor
EnablePriorityTaskProcessor
// EnableAuthorization is the key to enable authorization for a namespace
EnableAuthorization
// BlobSizeLimitError is the per event blob size limit
BlobSizeLimitError
// BlobSizeLimitWarn is the per event blob size limit for warning
BlobSizeLimitWarn
// HistorySizeLimitError is the per workflow execution history size limit
HistorySizeLimitError
// HistorySizeLimitWarn is the per workflow execution history size limit for warning
HistorySizeLimitWarn
// HistoryCountLimitError is the per workflow execution history event count limit
HistoryCountLimitError
// HistoryCountLimitWarn is the per workflow execution history event count limit for warning
HistoryCountLimitWarn
// MaxIDLengthLimit is the length limit for various IDs, including: Namespace, TaskQueue, WorkflowID, ActivityID, TimerID,
// WorkflowType, ActivityType, SignalName, MarkerName, ErrorReason/FailureReason/CancelCause, Identity, RequestID
MaxIDLengthLimit
// key for frontend
// FrontendPersistenceMaxQPS is the max qps frontend host can query DB
FrontendPersistenceMaxQPS
// FrontendPersistenceGlobalMaxQPS is the max qps frontend cluster can query DB
FrontendPersistenceGlobalMaxQPS
// FrontendVisibilityMaxPageSize is default max size for ListWorkflowExecutions in one page
FrontendVisibilityMaxPageSize
// FrontendVisibilityListMaxQPS is max qps frontend can list open/close workflows
FrontendVisibilityListMaxQPS
// FrontendESVisibilityListMaxQPS is max qps frontend can list open/close workflows from ElasticSearch
FrontendESVisibilityListMaxQPS
// FrontendESIndexMaxResultWindow is ElasticSearch index setting max_result_window
FrontendESIndexMaxResultWindow
// FrontendHistoryMaxPageSize is default max size for GetWorkflowExecutionHistory in one page
FrontendHistoryMaxPageSize
// FrontendRPS is workflow rate limit per second
FrontendRPS
// FrontendMaxNamespaceRPSPerInstance is workflow namespace rate limit per second
FrontendMaxNamespaceRPSPerInstance
// FrontendGlobalNamespaceRPS is workflow namespace rate limit per second for the whole cluster
FrontendGlobalNamespaceRPS
// FrontendHistoryMgrNumConns is for persistence cluster.NumConns
FrontendHistoryMgrNumConns
// FrontendThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger
FrontendThrottledLogRPS
// FrontendShutdownDrainDuration is the duration of traffic drain during shutdown
FrontendShutdownDrainDuration
// EnableClientVersionCheck enables client version check for frontend
EnableClientVersionCheck
// FrontendMaxBadBinaries is the max number of bad binaries in namespace config
FrontendMaxBadBinaries
// ValidSearchAttributes is legal indexed keys that can be used in list APIs
ValidSearchAttributes
// SendRawWorkflowHistory is whether to enable raw history retrieving
SendRawWorkflowHistory
// FrontendEnableRPCReplication is a feature flag for rpc replication
FrontendEnableRPCReplication
// FrontendEnableCleanupReplicationTask is a feature flag for rpc replication cleanup
FrontendEnableCleanupReplicationTask
// SearchAttributesNumberOfKeysLimit is the limit of number of keys
SearchAttributesNumberOfKeysLimit
// SearchAttributesSizeOfValueLimit is the size limit of each value
SearchAttributesSizeOfValueLimit
// SearchAttributesTotalSizeLimit is the size limit of the whole map
SearchAttributesTotalSizeLimit
// VisibilityArchivalQueryMaxPageSize is the maximum page size for a visibility archival query
VisibilityArchivalQueryMaxPageSize
// VisibilityArchivalQueryMaxRangeInDays is the maximum number of days for a visibility archival query
VisibilityArchivalQueryMaxRangeInDays
// VisibilityArchivalQueryMaxQPS is the timeout for a visibility archival query
VisibilityArchivalQueryMaxQPS
// EnableServerVersionCheck is a flag that controls whether or not periodic version checking is enabled
EnableServerVersionCheck
// key for matching
// MatchingRPS is request rate per second for each matching host
MatchingRPS
// MatchingPersistenceMaxQPS is the max qps matching host can query DB
MatchingPersistenceMaxQPS
// MatchingPersistenceGlobalMaxQPS is the max qps matching cluster can query DB
MatchingPersistenceGlobalMaxQPS
// MatchingMinTaskThrottlingBurstSize is the minimum burst size for task queue throttling
MatchingMinTaskThrottlingBurstSize
// MatchingGetTasksBatchSize is the maximum batch size to fetch from the task buffer
MatchingGetTasksBatchSize
// MatchingLongPollExpirationInterval is the long poll expiration interval in the matching service
MatchingLongPollExpirationInterval
// MatchingEnableSyncMatch is to enable sync match
MatchingEnableSyncMatch
// MatchingUpdateAckInterval is the interval for update ack
MatchingUpdateAckInterval
// MatchingIdleTaskqueueCheckInterval is the IdleTaskqueueCheckInterval
MatchingIdleTaskqueueCheckInterval
// MaxTaskqueueIdleTime is the max time taskqueue being idle
MaxTaskqueueIdleTime
// MatchingOutstandingTaskAppendsThreshold is the threshold for outstanding task appends
MatchingOutstandingTaskAppendsThreshold
// MatchingMaxTaskBatchSize is max batch size for task writer
MatchingMaxTaskBatchSize
// MatchingMaxTaskDeleteBatchSize is the max batch size for range deletion of tasks
MatchingMaxTaskDeleteBatchSize
// MatchingThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger
MatchingThrottledLogRPS
// MatchingNumTaskqueueWritePartitions is the number of write partitions for a task queue
MatchingNumTaskqueueWritePartitions
// MatchingNumTaskqueueReadPartitions is the number of read partitions for a task queue
MatchingNumTaskqueueReadPartitions
// MatchingForwarderMaxOutstandingPolls is the max number of inflight polls from the forwarder
MatchingForwarderMaxOutstandingPolls
// MatchingForwarderMaxOutstandingTasks is the max number of inflight addTask/queryTask from the forwarder
MatchingForwarderMaxOutstandingTasks
// MatchingForwarderMaxRatePerSecond is the max rate at which add/query can be forwarded
MatchingForwarderMaxRatePerSecond
// MatchingForwarderMaxChildrenPerNode is the max number of children per node in the task queue partition tree
MatchingForwarderMaxChildrenPerNode
// MatchingShutdownDrainDuration is the duration of traffic drain during shutdown
MatchingShutdownDrainDuration
// key for history
// HistoryRPS is request rate per second for each history host
HistoryRPS
// HistoryPersistenceMaxQPS is the max qps history host can query DB
HistoryPersistenceMaxQPS
// HistoryPersistenceGlobalMaxQPS is the max qps history cluster can query DB
HistoryPersistenceGlobalMaxQPS
// HistoryVisibilityOpenMaxQPS is max qps one history host can write visibility open_executions
HistoryVisibilityOpenMaxQPS
// HistoryVisibilityClosedMaxQPS is max qps one history host can write visibility closed_executions
HistoryVisibilityClosedMaxQPS
// HistoryLongPollExpirationInterval is the long poll expiration interval in the history service
HistoryLongPollExpirationInterval
// HistoryCacheInitialSize is initial size of history cache
HistoryCacheInitialSize
// HistoryCacheMaxSize is max size of history cache
HistoryCacheMaxSize
// HistoryCacheTTL is TTL of history cache
HistoryCacheTTL
// HistoryShutdownDrainDuration is the duration of traffic drain during shutdown
HistoryShutdownDrainDuration
// EventsCacheInitialSize is initial size of events cache
EventsCacheInitialSize
// EventsCacheMaxSize is max size of events cache
EventsCacheMaxSize
// EventsCacheTTL is TTL of events cache
EventsCacheTTL
// AcquireShardInterval is interval that timer used to acquire shard
AcquireShardInterval
// AcquireShardConcurrency is number of goroutines that can be used to acquire shards in the shard controller.
AcquireShardConcurrency
// StandbyClusterDelay is the artificial delay added to standby cluster's view of active cluster's time
StandbyClusterDelay
// StandbyTaskMissingEventsResendDelay is the amount of time standby cluster's will wait (if events are missing)
// before calling remote for missing events
StandbyTaskMissingEventsResendDelay
// StandbyTaskMissingEventsDiscardDelay is the amount of time standby cluster's will wait (if events are missing)
// before discarding the task
StandbyTaskMissingEventsDiscardDelay
// TaskProcessRPS is the task processing rate per second for each namespace
TaskProcessRPS
// TaskSchedulerType is the task scheduler type for priority task processor
TaskSchedulerType
// TaskSchedulerWorkerCount is the number of workers per shard in task scheduler
TaskSchedulerWorkerCount
// TaskSchedulerQueueSize is the size of task channel size in task scheduler
TaskSchedulerQueueSize
// TaskSchedulerRoundRobinWeights is the priority weight for weighted round robin task scheduler
TaskSchedulerRoundRobinWeights
// TimerTaskBatchSize is batch size for timer processor to process tasks
TimerTaskBatchSize
// TimerTaskWorkerCount is number of task workers for timer processor
TimerTaskWorkerCount
// TimerTaskMaxRetryCount is max retry count for timer processor
TimerTaskMaxRetryCount
// TimerProcessorGetFailureRetryCount is retry count for timer processor get failure operation
TimerProcessorGetFailureRetryCount
// TimerProcessorCompleteTimerFailureRetryCount is retry count for timer processor complete timer operation
TimerProcessorCompleteTimerFailureRetryCount
// TimerProcessorUpdateShardTaskCount is update shard count for timer processor
TimerProcessorUpdateShardTaskCount
// TimerProcessorUpdateAckInterval is update interval for timer processor
TimerProcessorUpdateAckInterval
// TimerProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient
TimerProcessorUpdateAckIntervalJitterCoefficient
// TimerProcessorCompleteTimerInterval is complete timer interval for timer processor
TimerProcessorCompleteTimerInterval
// TimerProcessorFailoverMaxPollRPS is max poll rate per second for timer processor
TimerProcessorFailoverMaxPollRPS
// TimerProcessorMaxPollRPS is max poll rate per second for timer processor
TimerProcessorMaxPollRPS
// TimerProcessorMaxPollInterval is max poll interval for timer processor
TimerProcessorMaxPollInterval
// TimerProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient
TimerProcessorMaxPollIntervalJitterCoefficient
// TimerProcessorRedispatchInterval is the redispatch interval for timer processor
TimerProcessorRedispatchInterval
// TimerProcessorRedispatchIntervalJitterCoefficient is the redispatch interval jitter coefficient
TimerProcessorRedispatchIntervalJitterCoefficient
// TimerProcessorMaxRedispatchQueueSize is the threshold of the number of tasks in the redispatch queue for timer processor
TimerProcessorMaxRedispatchQueueSize
// TimerProcessorEnablePriorityTaskProcessor indicates whether priority task processor should be used for timer processor
TimerProcessorEnablePriorityTaskProcessor
// TimerProcessorMaxTimeShift is the max shift timer processor can have
TimerProcessorMaxTimeShift
// TimerProcessorHistoryArchivalSizeLimit is the max history size for inline archival
TimerProcessorHistoryArchivalSizeLimit
// TimerProcessorArchivalTimeLimit is the upper time limit for inline history archival
TimerProcessorArchivalTimeLimit
// TransferTaskBatchSize is batch size for transferQueueProcessor
TransferTaskBatchSize
// TransferProcessorFailoverMaxPollRPS is max poll rate per second for transferQueueProcessor
TransferProcessorFailoverMaxPollRPS
// TransferProcessorMaxPollRPS is max poll rate per second for transferQueueProcessor
TransferProcessorMaxPollRPS
// TransferTaskWorkerCount is number of worker for transferQueueProcessor
TransferTaskWorkerCount
// TransferTaskMaxRetryCount is max times of retry for transferQueueProcessor
TransferTaskMaxRetryCount
// TransferProcessorCompleteTransferFailureRetryCount is times of retry for failure
TransferProcessorCompleteTransferFailureRetryCount
// TransferProcessorUpdateShardTaskCount is update shard count for transferQueueProcessor
TransferProcessorUpdateShardTaskCount
// TransferProcessorMaxPollInterval max poll interval for transferQueueProcessor
TransferProcessorMaxPollInterval
// TransferProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient
TransferProcessorMaxPollIntervalJitterCoefficient
// TransferProcessorUpdateAckInterval is update interval for transferQueueProcessor
TransferProcessorUpdateAckInterval
// TransferProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient
TransferProcessorUpdateAckIntervalJitterCoefficient
// TransferProcessorCompleteTransferInterval is complete timer interval for transferQueueProcessor
TransferProcessorCompleteTransferInterval
// TransferProcessorRedispatchInterval is the redispatch interval for transferQueueProcessor
TransferProcessorRedispatchInterval
// TransferProcessorRedispatchIntervalJitterCoefficient is the redispatch interval jitter coefficient
TransferProcessorRedispatchIntervalJitterCoefficient
// TransferProcessorMaxRedispatchQueueSize is the threshold of the number of tasks in the redispatch queue for transferQueueProcessor
TransferProcessorMaxRedispatchQueueSize
// TransferProcessorEnablePriorityTaskProcessor indicates whether priority task processor should be used for transferQueueProcessor
TransferProcessorEnablePriorityTaskProcessor
// TransferProcessorVisibilityArchivalTimeLimit is the upper time limit for archiving visibility records
TransferProcessorVisibilityArchivalTimeLimit
// ReplicatorTaskBatchSize is batch size for ReplicatorProcessor
ReplicatorTaskBatchSize
// ReplicatorTaskWorkerCount is number of worker for ReplicatorProcessor
ReplicatorTaskWorkerCount
// ReplicatorTaskMaxRetryCount is max times of retry for ReplicatorProcessor
ReplicatorTaskMaxRetryCount
// ReplicatorProcessorMaxPollRPS is max poll rate per second for ReplicatorProcessor
ReplicatorProcessorMaxPollRPS
// ReplicatorProcessorUpdateShardTaskCount is update shard count for ReplicatorProcessor
ReplicatorProcessorUpdateShardTaskCount
// ReplicatorProcessorMaxPollInterval is max poll interval for ReplicatorProcessor
ReplicatorProcessorMaxPollInterval
// ReplicatorProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient
ReplicatorProcessorMaxPollIntervalJitterCoefficient
// ReplicatorProcessorUpdateAckInterval is update interval for ReplicatorProcessor
ReplicatorProcessorUpdateAckInterval
// ReplicatorProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient
ReplicatorProcessorUpdateAckIntervalJitterCoefficient
// ReplicatorProcessorRedispatchInterval is the redispatch interval for ReplicatorProcessor
ReplicatorProcessorRedispatchInterval
// ReplicatorProcessorRedispatchIntervalJitterCoefficient is the redispatch interval jitter coefficient
ReplicatorProcessorRedispatchIntervalJitterCoefficient
// ReplicatorProcessorMaxRedispatchQueueSize is the threshold of the number of tasks in the redispatch queue for ReplicatorProcessor
ReplicatorProcessorMaxRedispatchQueueSize
// ReplicatorProcessorEnablePriorityTaskProcessor indicates whether priority task processor should be used for ReplicatorProcessor
ReplicatorProcessorEnablePriorityTaskProcessor
// ExecutionMgrNumConns is persistence connections number for ExecutionManager
ExecutionMgrNumConns
// HistoryMgrNumConns is persistence connections number for HistoryManager
HistoryMgrNumConns
// MaximumBufferedEventsBatch is max number of buffer event in mutable state
MaximumBufferedEventsBatch
// MaximumSignalsPerExecution is max number of signals supported by single execution
MaximumSignalsPerExecution
// ShardUpdateMinInterval is the minimal time interval which the shard info can be updated
ShardUpdateMinInterval
// ShardSyncMinInterval is the minimal time interval which the shard info should be sync to remote
ShardSyncMinInterval
// ShardSyncTimerJitterCoefficient is the sync shard jitter coefficient
ShardSyncTimerJitterCoefficient
// DefaultEventEncoding is the encoding type for history events
DefaultEventEncoding
// NumArchiveSystemWorkflows is key for number of archive system workflows running in total
NumArchiveSystemWorkflows
// ArchiveRequestRPS is the rate limit on the number of archive request per second
ArchiveRequestRPS
// DefaultActivityRetryPolicy represents the out-of-box retry policy for activities where
// the user has not specified an explicit RetryPolicy
DefaultActivityRetryPolicy
// DefaultWorkflowRetryPolicy represents the out-of-box retry policy for unset fields
// where the user has set an explicit RetryPolicy, but not specified all the fields
DefaultWorkflowRetryPolicy
// EnableAdminProtection is whether to enable admin checking
EnableAdminProtection
// AdminOperationToken is the token to pass admin checking
AdminOperationToken
// HistoryMaxAutoResetPoints is the key for max number of auto reset points stored in mutableState
HistoryMaxAutoResetPoints
// EnableParentClosePolicy whether to ParentClosePolicy
EnableParentClosePolicy
// ParentClosePolicyThreshold decides that parent close policy will be processed by sys workers(if enabled) if
// the number of children greater than or equal to this threshold
ParentClosePolicyThreshold
// NumParentClosePolicySystemWorkflows is key for number of parentClosePolicy system workflows running in total
NumParentClosePolicySystemWorkflows
// HistoryThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger
HistoryThrottledLogRPS
// StickyTTL is to expire a sticky taskqueue if no update more than this duration
StickyTTL
// WorkflowTaskHeartbeatTimeout for workflow task heartbeat
WorkflowTaskHeartbeatTimeout
// DefaultWorkflowExecutionTimeout for a workflow execution
DefaultWorkflowExecutionTimeout
// DefaultWorkflowRunTimeout for a workflow run
DefaultWorkflowRunTimeout
// MaxWorkflowExecutionTimeout maximum allowed workflow execution timeout
MaxWorkflowExecutionTimeout
// MaxWorkflowRunTimeout maximum allowed workflow run timeout
MaxWorkflowRunTimeout
// DefaultWorkflowTaskTimeout for a workflow task
DefaultWorkflowTaskTimeout
// EnableDropStuckTaskByNamespaceID is whether stuck timer/transfer task should be dropped for a namespace
EnableDropStuckTaskByNamespaceID
// SkipReapplicationByNameSpaceId is whether skipping a event re-application for a namespace
SkipReapplicationByNamespaceId
// key for worker
// WorkerPersistenceMaxQPS is the max qps worker host can query DB
WorkerPersistenceMaxQPS
// WorkerPersistenceGlobalMaxQPS is the max qps worker cluster can query DB
WorkerPersistenceGlobalMaxQPS
// WorkerReplicatorMetaTaskConcurrency is the number of coroutine handling metadata related tasks
WorkerReplicatorMetaTaskConcurrency
// WorkerReplicatorTaskConcurrency is the number of coroutine handling non metadata related tasks
WorkerReplicatorTaskConcurrency
// WorkerReplicatorMessageConcurrency is the max concurrent tasks provided by messaging client
WorkerReplicatorMessageConcurrency
// WorkerReplicatorActivityBufferRetryCount is the retry attempt when encounter retry error on activity
WorkerReplicatorActivityBufferRetryCount
// WorkerReplicatorHistoryBufferRetryCount is the retry attempt when encounter retry error on history
WorkerReplicatorHistoryBufferRetryCount
// WorkerReplicationTaskMaxRetryCount is the max retry count for any task
WorkerReplicationTaskMaxRetryCount
// WorkerReplicationTaskMaxRetryDuration is the max retry duration for any task
WorkerReplicationTaskMaxRetryDuration
// WorkerReplicationTaskContextDuration is the context timeout for apply replication tasks
WorkerReplicationTaskContextDuration
// WorkerReReplicationContextTimeout is the context timeout for end to end re-replication process
WorkerReReplicationContextTimeout
// WorkerEnableRPCReplication is the feature flag for RPC replication
WorkerEnableRPCReplication
// WorkerEnableKafkaReplication is the feature flag for kafka replication
WorkerEnableKafkaReplication
// WorkerIndexerConcurrency is the max concurrent messages to be processed at any given time
WorkerIndexerConcurrency
// WorkerESProcessorNumOfWorkers is num of workers for esProcessor
WorkerESProcessorNumOfWorkers
// WorkerESProcessorBulkActions is max number of requests in bulk for esProcessor
WorkerESProcessorBulkActions
// WorkerESProcessorBulkSize is max total size of bulk in bytes for esProcessor
WorkerESProcessorBulkSize
// WorkerESProcessorFlushInterval is flush interval for esProcessor
WorkerESProcessorFlushInterval
// EnableArchivalCompression indicates whether blobs are compressed before they are archived
EnableArchivalCompression
// WorkerHistoryPageSize indicates the page size of history fetched from persistence for archival
WorkerHistoryPageSize
// WorkerTargetArchivalBlobSize indicates the target blob size in bytes for archival, actual blob size may vary
WorkerTargetArchivalBlobSize
// WorkerArchiverConcurrency controls the number of coroutines handling archival work per archival workflow
WorkerArchiverConcurrency
// WorkerArchivalsPerIteration controls the number of archivals handled in each iteration of archival workflow
WorkerArchivalsPerIteration
// WorkerDeterministicConstructionCheckProbability controls the probability of running a deterministic construction check for any given archival
WorkerDeterministicConstructionCheckProbability
// WorkerBlobIntegrityCheckProbability controls the probability of running an integrity check for any given archival
WorkerBlobIntegrityCheckProbability
// WorkerTimeLimitPerArchivalIteration controls the time limit of each iteration of archival workflow
WorkerTimeLimitPerArchivalIteration
// WorkerThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger
WorkerThrottledLogRPS
// ScannerPersistenceMaxQPS is the maximum rate of persistence calls from worker.Scanner
ScannerPersistenceMaxQPS
// TaskQueueScannerEnabled indicates if task queue scanner should be started as part of worker.Scanner
TaskQueueScannerEnabled
// HistoryScannerEnabled indicates if history scanner should be started as part of worker.Scanner
HistoryScannerEnabled
// ExecutionsScannerEnabled indicates if executions scanner should be started as part of worker.Scanner
ExecutionsScannerEnabled
// EnableBatcher decides whether start batcher in our worker
EnableBatcher
// EnableParentClosePolicyWorker decides whether or not enable system workers for processing parent close policy task
EnableParentClosePolicyWorker
// EnableStickyQuery indicates if sticky query should be enabled per namespace
EnableStickyQuery
// ReplicationTaskFetcherParallelism determines how many go routines we spin up for fetching tasks
ReplicationTaskFetcherParallelism
// ReplicationTaskFetcherAggregationInterval determines how frequently the fetch requests are sent
ReplicationTaskFetcherAggregationInterval
// ReplicationTaskFetcherTimerJitterCoefficient is the jitter for fetcher timer
ReplicationTaskFetcherTimerJitterCoefficient
// ReplicationTaskFetcherErrorRetryWait is the wait time when fetcher encounters error
ReplicationTaskFetcherErrorRetryWait
// ReplicationTaskProcessorErrorRetryWait is the initial retry wait when we see errors in applying replication tasks
ReplicationTaskProcessorErrorRetryWait
// ReplicationTaskProcessorErrorRetryMaxAttempts is the max retry attempts for applying replication tasks
ReplicationTaskProcessorErrorRetryMaxAttempts
// ReplicationTaskProcessorNoTaskInitialWait is the wait time when not ask is returned
ReplicationTaskProcessorNoTaskInitialWait
// ReplicationTaskProcessorCleanupInterval determines how frequently the cleanup replication queue
ReplicationTaskProcessorCleanupInterval
// ReplicationTaskProcessorCleanupJitterCoefficient is the jitter for cleanup timer
ReplicationTaskProcessorCleanupJitterCoefficient
// ReplicationTaskProcessorStartWait is the wait time before each task processing batch
ReplicationTaskProcessorStartWait
// ReplicationTaskProcessorStartWaitJitterCoefficient is the jitter for batch start wait timer
ReplicationTaskProcessorStartWaitJitterCoefficient
// ReplicationTaskProcessorHostQPS is the qps of task processing rate limiter on host level
ReplicationTaskProcessorHostQPS
// ReplicationTaskProcessorShardQPS is the qps of task processing rate limiter on shard level
ReplicationTaskProcessorShardQPS
// HistoryEnableRPCReplication is the feature flag for RPC replication
HistoryEnableRPCReplication
// HistoryEnableKafkaReplication is the migration flag for Kafka replication
HistoryEnableKafkaReplication
// HistoryEnableCleanupReplicationTask is the migration flag for Kafka replication
HistoryEnableCleanupReplicationTask
// EnableConsistentQuery indicates if consistent query is enabled for the cluster
MaxBufferedQueryCount
// MutableStateChecksumGenProbability is the probability [0-100] that checksum will be generated for mutable state
MutableStateChecksumGenProbability
// MutableStateChecksumVerifyProbability is the probability [0-100] that checksum will be verified for mutable state
MutableStateChecksumVerifyProbability
// MutableStateChecksumInvalidateBefore is the epoch timestamp before which all checksums are to be discarded
MutableStateChecksumInvalidateBefore
// ReplicationEventsFromCurrentCluster is a feature flag to allow cross DC replicate events that generated from the current cluster
ReplicationEventsFromCurrentCluster
// StandbyTaskReReplicationContextTimeout is the context timeout for standby task re-replication
StandbyTaskReReplicationContextTimeout
// lastKeyForTest must be the last one in this const group for testing purpose
lastKeyForTest
)
// Filter represents a filter on the dynamic config key
type Filter int
func (f Filter) String() string {
if f <= unknownFilter || f > TaskType {
return filters[unknownFilter]
}
return filters[f]
}
var filters = []string{
"unknownFilter",
"namespace",
"namespaceID",
"taskQueueName",
"taskType",
"shardID",
}
const (
unknownFilter Filter = iota
// Namespace is the namespace name
Namespace
// NamespaceID is the namespace Id
NamespaceID
// TaskQueueName is the taskqueue name
TaskQueueName
// TaskType is the task type (0:Workflow, 1:Activity)
TaskType
// RangeHash is the shard id
ShardID
// lastFilterTypeForTest must be the last one in this const group for testing purpose
lastFilterTypeForTest
)
const DefaultNumTaskQueuePartitions = 4
// FilterOption is used to provide filters for dynamic config keys
type FilterOption func(filterMap map[Filter]interface{})
// TaskQueueFilter filters by task queue name
func TaskQueueFilter(name string) FilterOption {
return func(filterMap map[Filter]interface{}) {
filterMap[TaskQueueName] = name
}
}
// NamespaceFilter filters by namespace name
func NamespaceFilter(name string) FilterOption {
return func(filterMap map[Filter]interface{}) {
filterMap[Namespace] = name
}
}
// NamespaceIDFilter filters by namespace id
func NamespaceIDFilter(namespaceID string) FilterOption {
return func(filterMap map[Filter]interface{}) {
filterMap[NamespaceID] = namespaceID
}
}
// TaskTypeFilter filters by task type
func TaskTypeFilter(taskType enumspb.TaskQueueType) FilterOption {
return func(filterMap map[Filter]interface{}) {
filterMap[TaskType] = enumspb.TaskQueueType_name[int32(taskType)]
}
}
// ShardIDFilter filters by shard id
func ShardIDFilter(shardID int32) FilterOption {
return func(filterMap map[Filter]interface{}) {
filterMap[ShardID] = shardID
}
}
| 1 | 10,867 | I think we should keep this dynamic config knob for operations purpose. | temporalio-temporal | go |
@@ -60,7 +60,6 @@ class TestCharacterOffsets(unittest.TestCase):
obj = BasicTextProvider(text=u"\U0001f926\U0001f60a\U0001f44d") # 🤦😊👍
ti = obj.makeTextInfo(Offsets(5, 5))
ti.expand(textInfos.UNIT_CHARACTER) # Range at 👍
- self.assertEqual(ti.offsets, (4, 6)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 😊
self.assertEqual(ti.offsets, (2, 4)) # Two offsets | 1 | # -*- coding: UTF-8 -*-
#tests/unit/test_textInfos.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2018 NV Access Limited, Babbage B.V.
"""Unit tests for the textInfos module, its submodules and classes."""
import unittest
from .textProvider import BasicTextProvider
import textInfos
from textInfos.offsets import Offsets
class TestCharacterOffsets(unittest.TestCase):
"""
Tests for textInfos.offsets.OffsetsTextInfo for its ability to deal with
UTF-16 surrogate characters (i.e. whether a surrogate pair is treated as one character).
These tests are also implicit tests for the textUtils module,
as its logic is used for character offset calculation in wide character strings.
"""
def test_nonSurrogateForward(self):
obj = BasicTextProvider(text="abc")
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at b
self.assertEqual(ti.offsets, (1, 2)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (2, 3)) # One offset
def test_nonSurrogateBackward(self):
obj = BasicTextProvider(text="abc")
ti = obj.makeTextInfo(Offsets(2, 2))
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (2, 3)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at b
self.assertEqual(ti.offsets, (1, 2)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
def test_surrogatePairsForward(self):
obj = BasicTextProvider(text=u"\U0001f926\U0001f60a\U0001f44d") # 🤦😊👍
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (0, 2)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 😊
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 👍
self.assertEqual(ti.offsets, (4, 6)) # Two offsets
def test_surrogatePairsBackward(self):
obj = BasicTextProvider(text=u"\U0001f926\U0001f60a\U0001f44d") # 🤦😊👍
ti = obj.makeTextInfo(Offsets(5, 5))
ti.expand(textInfos.UNIT_CHARACTER) # Range at 👍
self.assertEqual(ti.offsets, (4, 6)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 😊
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (0, 2)) # Two offsets
def test_mixedSurrogatePairsAndNonSurrogatesForward(self):
obj = BasicTextProvider(text=u"a\U0001f926b") # a🤦b
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (1, 3)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (3, 4)) # One offset
def test_mixedSurrogatePairsAndNonSurrogatesBackward(self):
obj = BasicTextProvider(text=u"a\U0001f926b") # a🤦b
ti = obj.makeTextInfo(Offsets(3, 3))
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (3, 4)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (1, 3)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
def test_mixedSurrogatePairsNonSurrogatesAndSingleSurrogatesForward(self):
"""
Tests surrogate pairs, non surrogates as well as
single surrogate characters (i.e. incomplete pairs)
"""
obj = BasicTextProvider(text=u"a\ud83e\U0001f926\udd26b")
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🠀
self.assertEqual(ti.offsets, (1, 2)) # Leading surrogate without a trailing surrogate
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at
self.assertEqual(ti.offsets, (4, 5)) # Trailing surrogate without a leading surrogate.
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (5, 6)) # One offset
def test_mixedSurrogatePairsNonSurrogatesAndSingleSurrogatesBackward(self):
obj = BasicTextProvider(text=u"a\ud83e\U0001f926\udd26b")
ti = obj.makeTextInfo(Offsets(5, 5))
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (5, 6)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at
self.assertEqual(ti.offsets, (4, 5)) # Trailing surrogate without a leading surrogate.
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🠀
self.assertEqual(ti.offsets, (1, 2)) # Leading surrogate without a trailing surrogate
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
| 1 | 32,288 | Was this intentional? Perhaps this could be replaced with another way to check the offsets? | nvaccess-nvda | py |
@@ -86,6 +86,7 @@ type PipelineManifest struct {
Version PipelineSchemaMajorVersion `yaml:"version"`
Source *Source `yaml:"source"`
Stages []PipelineStage `yaml:"stages"`
+ // ArtifactBuckets?
}
// Source defines the source of the artifacts to be built and deployed. | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"bytes"
"errors"
"fmt"
"text/template"
"github.com/fatih/structs"
"gopkg.in/yaml.v3"
"github.com/aws/amazon-ecs-cli-v2/templates"
)
const (
GithubProviderName = "GitHub"
GithubSecretIdKeyName = "access_token_secret"
)
// Provider defines a source of the artifacts
// that will be built and deployed via a pipeline
type Provider interface {
fmt.Stringer
Name() string
Properties() map[string]interface{}
}
type githubProvider struct {
properties *GitHubProperties
}
func (p *githubProvider) Name() string {
return GithubProviderName
}
func (p *githubProvider) String() string {
return GithubProviderName
}
func (p *githubProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
// GitHubProperties contain information for configuring a Github
// source provider.
type GitHubProperties struct {
// use tag from https://godoc.org/github.com/fatih/structs#example-Map--Tags
// to specify the name of the field in the output properties
// An example for OwnerAndRepository would be: "aws/amazon-ecs-cli-v2"
OwnerAndRepository string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
GithubSecretIdKeyName string `structs:"access_token_secret" yaml:"access_token_secret` // TODO fix naming
}
// NewProvider creates a source provider based on the type of
// the provided provider-specific configurations
func NewProvider(configs interface{}) (Provider, error) {
switch props := configs.(type) {
case *GitHubProperties:
return &githubProvider{
properties: props,
}, nil
default:
return nil, &ErrUnknownProvider{unknownProviderProperties: props}
}
}
// PipelineSchemaMajorVersion is the major version number
// of the pipeline manifest schema
type PipelineSchemaMajorVersion int
const (
// Ver1 is the current schema major version of the pipeline.yml file.
Ver1 PipelineSchemaMajorVersion = iota + 1
)
// PipelineManifest contains information that defines the relationship
// and deployment ordering of your environments.
type PipelineManifest struct {
// Name of the pipeline
Name string `yaml:"name"`
Version PipelineSchemaMajorVersion `yaml:"version"`
Source *Source `yaml:"source"`
Stages []PipelineStage `yaml:"stages"`
}
// Source defines the source of the artifacts to be built and deployed.
type Source struct {
ProviderName string `yaml:"provider"`
Properties map[string]interface{} `yaml:"properties"`
}
// PipelineStage represents a stage in the pipeline manifest
type PipelineStage struct {
Name string `yaml:"name`
}
// CreatePipeline returns a pipeline manifest object.
func CreatePipeline(pipelineName string, provider Provider, stageNames []string) (*PipelineManifest, error) {
// TODO: #221 Do more validations
if len(stageNames) == 0 {
return nil, fmt.Errorf("a pipeline %s can not be created without a deployment stage",
pipelineName)
}
stages := make([]PipelineStage, 0, len(stageNames))
for _, name := range stageNames {
stages = append(stages, PipelineStage{Name: name})
}
return &PipelineManifest{
Name: pipelineName,
Version: Ver1,
Source: &Source{
ProviderName: provider.Name(),
Properties: provider.Properties(),
},
Stages: stages,
}, nil
}
// Marshal serializes the pipeline manifest object into byte array that
// represents the pipeline.yml document.
func (m *PipelineManifest) Marshal() ([]byte, error) {
box := templates.Box()
content, err := box.FindString("cicd/pipeline.yml")
if err != nil {
return nil, err
}
tpl, err := template.New("pipelineTemplate").Parse(content)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if err := tpl.Execute(&buf, *m); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalPipeline deserializes the YAML input stream into a pipeline
// manifest object. It returns an error if any issue occurs during
// deserialization or the YAML input contains invalid fields.
func UnmarshalPipeline(in []byte) (*PipelineManifest, error) {
pm := PipelineManifest{}
err := yaml.Unmarshal(in, &pm)
if err != nil {
return nil, err
}
var version PipelineSchemaMajorVersion
if version, err = validateVersion(&pm); err != nil {
return nil, err
}
// TODO: #221 Do more validations
switch version {
case Ver1:
return &pm, nil
}
// we should never reach here, this is just to make the compiler happy
return nil, errors.New("unexpected error occurs while unmarshalling pipeline.yml")
}
func validateVersion(pm *PipelineManifest) (PipelineSchemaMajorVersion, error) {
switch pm.Version {
case Ver1:
return Ver1, nil
default:
return pm.Version,
&ErrInvalidPipelineManifestVersion{
invalidVersion: pm.Version,
}
}
}
| 1 | 11,149 | the buckets are created via the stackset, customers don't need to provide them | aws-copilot-cli | go |
@@ -25,7 +25,7 @@ module Blacklight
# Execute a search query against solr
# @param [Hash] solr query parameters
def search params = {}
- send_and_receive blacklight_config.solr_path, { qt: blacklight_config.qt }.merge(params)
+ send_and_receive blacklight_config.solr_path, params.merge({ qt: blacklight_config.qt })
end
## | 1 | module Blacklight
class SolrRepository
attr_accessor :blacklight_config, :blacklight_solr
# ActiveSupport::Benchmarkable requires a logger method
attr_accessor :logger
include ActiveSupport::Benchmarkable
def initialize blacklight_config
@blacklight_config = blacklight_config
end
##
# Find a single solr document result (by id) using the document configuration
# @param [String] document's unique key value
# @param [Hash] additional solr query parameters
def find id, params = {}
solr_response = send_and_receive blacklight_config.document_solr_path || blacklight_config.solr_path, {qt: blacklight_config.document_solr_request_handler}.merge(blacklight_config.default_document_solr_params.merge(params).merge(blacklight_config.document_unique_id_param => id))
raise Blacklight::Exceptions::InvalidSolrID.new if solr_response.documents.empty?
solr_response
end
##
# Execute a search query against solr
# @param [Hash] solr query parameters
def search params = {}
send_and_receive blacklight_config.solr_path, { qt: blacklight_config.qt }.merge(params)
end
##
# Execute a solr query
# @see [RSolr::Client#send_and_receive]
# @overload find(solr_path, params)
# Execute a solr query at the given path with the parameters
# @param [String] solr path (defaults to blacklight_config.solr_path)
# @param [Hash] parameters for RSolr::Client#send_and_receive
# @overload find(params)
# @param [Hash] parameters for RSolr::Client#send_and_receive
# @return [Blacklight::SolrResponse] the solr response object
def send_and_receive(path, solr_params = {})
benchmark("Solr fetch", level: :debug) do
key = blacklight_config.http_method == :post ? :data : :params
res = blacklight_solr.send_and_receive(path, {key=>solr_params.to_hash, method:blacklight_config.http_method})
solr_response = blacklight_config.solr_response_model.new(res, solr_params, solr_document_model: blacklight_config.solr_document_model)
Rails.logger.debug("Solr query: #{solr_params.inspect}")
Rails.logger.debug("Solr response: #{solr_response.inspect}") if defined?(::BLACKLIGHT_VERBOSE_LOGGING) and ::BLACKLIGHT_VERBOSE_LOGGING
solr_response
end
rescue Errno::ECONNREFUSED => e
raise Blacklight::Exceptions::ECONNREFUSED.new("Unable to connect to Solr instance using #{blacklight_solr.inspect}")
end
def blacklight_solr
@blacklight_solr ||= RSolr.connect(blacklight_solr_config)
end
protected
def blacklight_solr_config
@blacklight_solr_config ||= Blacklight.solr_config
end
def logger
@logger ||= Rails.logger if defined? Rails
end
end
end | 1 | 5,628 | This doesn't have the same behavior as before. Maybe you want `reverse_merge` (and it either just works or we implement it on `Blacklight::Solr::Request`) | projectblacklight-blacklight | rb |
@@ -6,6 +6,7 @@ module Travis
def setup
super
+ cmd "export PATH=/usr/local/ghc/#{ghc_version}/bin/:$PATH"
cmd 'cabal update', fold: 'cabal', retry: true
end
| 1 | module Travis
module Build
class Script
class Haskell < Script
DEFAULTS = {}
def setup
super
cmd 'cabal update', fold: 'cabal', retry: true
end
def announce
super
cmd 'ghc --version'
cmd 'cabal --version'
end
def install
cmd 'cabal install --only-dependencies --enable-tests', fold: 'install', retry: true
end
def script
cmd 'cabal configure --enable-tests && cabal build && cabal test'
end
end
end
end
end
| 1 | 11,071 | This should be ` /$(ghc_find #{config[:ghc]})/ ` instead of ` /#{ghc_version}/ `, I think. | travis-ci-travis-build | rb |
@@ -374,7 +374,7 @@ define(["appSettings", "browser", "events", "htmlMediaHelper"], function (appSet
return -1 !== supportedFeatures.indexOf(command.toLowerCase());
},
preferVisualCards: browser.android || browser.chrome,
- moreIcon: browser.android ? "dots-vert" : "dots-horiz",
+ moreIcon: browser.android ? "more_vert" : "more_horiz",
getSyncProfile: getSyncProfile,
getDefaultLayout: function () {
if (window.NativeShell) { | 1 | define(["appSettings", "browser", "events", "htmlMediaHelper"], function (appSettings, browser, events, htmlMediaHelper) {
"use strict";
function getBaseProfileOptions(item) {
var disableHlsVideoAudioCodecs = [];
if (item && htmlMediaHelper.enableHlsJsPlayer(item.RunTimeTicks, item.MediaType)) {
if (browser.edge || browser.msie) {
disableHlsVideoAudioCodecs.push("mp3");
}
disableHlsVideoAudioCodecs.push("ac3");
disableHlsVideoAudioCodecs.push("eac3");
disableHlsVideoAudioCodecs.push("opus");
}
return {
enableMkvProgressive: false,
disableHlsVideoAudioCodecs: disableHlsVideoAudioCodecs
};
}
function getDeviceProfileForWindowsUwp(item) {
return new Promise(function (resolve, reject) {
require(["browserdeviceprofile", "environments/windows-uwp/mediacaps"], function (profileBuilder, uwpMediaCaps) {
var profileOptions = getBaseProfileOptions(item);
profileOptions.supportsDts = uwpMediaCaps.supportsDTS();
profileOptions.supportsTrueHd = uwpMediaCaps.supportsDolby();
profileOptions.audioChannels = uwpMediaCaps.getAudioChannels();
resolve(profileBuilder(profileOptions));
});
});
}
function getDeviceProfile(item, options) {
options = options || {};
if (self.Windows) {
return getDeviceProfileForWindowsUwp(item);
}
return new Promise(function (resolve) {
require(["browserdeviceprofile"], function (profileBuilder) {
var profile;
if (window.NativeShell) {
profile = window.NativeShell.AppHost.getDeviceProfile(profileBuilder);
} else {
profile = profileBuilder(getBaseProfileOptions(item));
if (item && !options.isRetry && "allcomplexformats" !== appSettings.get("subtitleburnin")) {
if (!browser.orsay && !browser.tizen) {
profile.SubtitleProfiles.push({
Format: "ass",
Method: "External"
});
profile.SubtitleProfiles.push({
Format: "ssa",
Method: "External"
});
}
}
}
resolve(profile);
});
});
}
function escapeRegExp(str) {
return str.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, "\\$1");
}
function replaceAll(originalString, strReplace, strWith) {
var strReplace2 = escapeRegExp(strReplace);
var reg = new RegExp(strReplace2, "ig");
return originalString.replace(reg, strWith);
}
function generateDeviceId() {
var keys = [];
if (keys.push(navigator.userAgent), keys.push(new Date().getTime()), self.btoa) {
var result = replaceAll(btoa(keys.join("|")), "=", "1");
return Promise.resolve(result);
}
return Promise.resolve(new Date().getTime());
}
function getDeviceId() {
var key = "_deviceId2";
var deviceId = appSettings.get(key);
if (deviceId) {
return Promise.resolve(deviceId);
}
return generateDeviceId().then(function (deviceId) {
appSettings.set(key, deviceId);
return deviceId;
});
}
function getDeviceName() {
var deviceName;
deviceName = browser.tizen ? "Samsung Smart TV" : browser.web0s ? "LG Smart TV" : browser.operaTv ? "Opera TV" : browser.xboxOne ? "Xbox One" : browser.ps4 ? "Sony PS4" : browser.chrome ? "Chrome" : browser.edge ? "Edge" : browser.firefox ? "Firefox" : browser.msie ? "Internet Explorer" : browser.opera ? "Opera" : browser.safari ? "Safari" : "Web Browser";
if (browser.ipad) {
deviceName += " iPad";
} else {
if (browser.iphone) {
deviceName += " iPhone";
} else {
if (browser.android) {
deviceName += " Android";
}
}
}
return deviceName;
}
function supportsVoiceInput() {
if (!browser.tv) {
return window.SpeechRecognition || window.webkitSpeechRecognition || window.mozSpeechRecognition || window.oSpeechRecognition || window.msSpeechRecognition;
}
return false;
}
function supportsFullscreen() {
if (browser.tv) {
return false;
}
var element = document.documentElement;
return (element.requestFullscreen || element.mozRequestFullScreen || element.webkitRequestFullscreen || element.msRequestFullscreen) || document.createElement("video").webkitEnterFullscreen;
}
function getSyncProfile() {
return new Promise(function (resolve) {
require(["browserdeviceprofile", "appSettings"], function (profileBuilder, appSettings) {
var profile;
if (window.NativeShell) {
profile = window.NativeShell.AppHost.getSyncProfile(profileBuilder, appSettings);
} else {
profile = profileBuilder();
profile.MaxStaticMusicBitrate = appSettings.maxStaticMusicBitrate();
}
resolve(profile);
});
});
}
function getDefaultLayout() {
return "desktop";
}
function supportsHtmlMediaAutoplay() {
if (browser.edgeUwp || browser.tizen || browser.web0s || browser.orsay || browser.operaTv || browser.ps4 || browser.xboxOne) {
return true;
}
if (browser.mobile) {
return false;
}
return true;
}
function supportsCue() {
try {
var video = document.createElement("video");
var style = document.createElement("style");
style.textContent = "video::cue {background: inherit}";
document.body.appendChild(style);
document.body.appendChild(video);
var cue = window.getComputedStyle(video, "::cue").background;
document.body.removeChild(style);
document.body.removeChild(video);
return !!cue.length;
} catch (err) {
console.log("error detecting cue support: " + err);
return false;
}
}
function onAppVisible() {
if (isHidden) {
isHidden = false;
console.log("triggering app resume event");
events.trigger(appHost, "resume");
}
}
function onAppHidden() {
if (!isHidden) {
isHidden = true;
console.log("app is hidden");
}
}
var supportedFeatures = function () {
var features = [];
if (navigator.share) {
features.push("sharing");
}
if (!browser.edgeUwp && !browser.tv && !browser.xboxOne && !browser.ps4) {
features.push("filedownload");
}
if (browser.operaTv || browser.tizen || browser.orsay || browser.web0s) {
features.push("exit");
} else {
features.push("exitmenu");
features.push("plugins");
}
if (!browser.operaTv && !browser.tizen && !browser.orsay && !browser.web0s && !browser.ps4) {
features.push("externallinks");
features.push("externalpremium");
}
if (!browser.operaTv) {
features.push("externallinkdisplay");
}
if (supportsVoiceInput()) {
features.push("voiceinput");
}
if (!browser.tv && !browser.xboxOne) {
browser.ps4;
}
if (supportsHtmlMediaAutoplay()) {
features.push("htmlaudioautoplay");
features.push("htmlvideoautoplay");
}
if (browser.edgeUwp) {
features.push("sync");
}
if (supportsFullscreen()) {
features.push("fullscreenchange");
}
if (browser.chrome || browser.edge && !browser.slow) {
if (!browser.noAnimation && !browser.edgeUwp && !browser.xboxOne) {
features.push("imageanalysis");
}
}
if (browser.tv || browser.xboxOne || browser.ps4 || browser.mobile) {
features.push("physicalvolumecontrol");
}
if (!browser.tv && !browser.xboxOne && !browser.ps4) {
features.push("remotecontrol");
}
if (!browser.operaTv && !browser.tizen && !browser.orsay && !browser.web0s && !browser.edgeUwp) {
features.push("remotevideo");
}
features.push("displaylanguage");
features.push("otherapppromotions");
features.push("displaymode");
features.push("targetblank");
// allows users to connect to more than one server
//features.push("multiserver");
features.push("screensaver");
if (!browser.orsay && !browser.tizen && !browser.msie && (browser.firefox || browser.ps4 || browser.edge || supportsCue())) {
features.push("subtitleappearancesettings");
}
if (!browser.orsay && !browser.tizen) {
features.push("subtitleburnsettings");
}
if (!browser.tv && !browser.ps4 && !browser.xboxOne) {
features.push("fileinput");
}
if (browser.chrome) {
features.push("chromecast");
}
return features;
}();
/**
* Do exit according to platform
*/
function doExit() {
try {
if (window.NativeShell) {
window.NativeShell.AppHost.exit();
} else if (browser.tizen) {
tizen.application.getCurrentApplication().exit();
} else if (browser.web0s) {
webOS.platformBack();
} else {
window.close();
}
} catch (err) {
console.log("error closing application: " + err);
}
}
var exitPromise;
/**
* Ask user for exit
*/
function askForExit() {
if (!!exitPromise) {
return;
}
require(["actionsheet"], function (actionsheet) {
exitPromise = actionsheet.show({
title: Globalize.translate("MessageConfirmAppExit"),
items: [
{id: "yes", name: Globalize.translate("Yes")},
{id: "no", name: Globalize.translate("No")}
]
}).then(function (value) {
if (value === "yes") {
doExit();
}
}).finally(function () {
exitPromise = null;
});
});
}
var deviceId;
var deviceName;
var appName = "Jellyfin Web";
var appVersion = "10.5.0";
var visibilityChange;
var visibilityState;
var appHost = {
getWindowState: function () {
return document.windowState || "Normal";
},
setWindowState: function (state) {
alert("setWindowState is not supported and should not be called");
},
exit: function () {
if (!!window.appMode && browser.tizen) {
askForExit();
} else {
doExit();
}
},
supports: function (command) {
if (window.NativeShell) {
return window.NativeShell.AppHost.supports(command);
}
return -1 !== supportedFeatures.indexOf(command.toLowerCase());
},
preferVisualCards: browser.android || browser.chrome,
moreIcon: browser.android ? "dots-vert" : "dots-horiz",
getSyncProfile: getSyncProfile,
getDefaultLayout: function () {
if (window.NativeShell) {
return window.NativeShell.AppHost.getDefaultLayout();
}
return getDefaultLayout()
},
getDeviceProfile: getDeviceProfile,
init: function () {
if (window.NativeShell) {
return window.NativeShell.AppHost.init();
}
deviceName = getDeviceName();
getDeviceId().then(function (id) {
deviceId = id;
});
},
deviceName: function () {
return window.NativeShell ? window.NativeShell.AppHost.deviceName() : deviceName;
},
deviceId: function () {
return window.NativeShell ? window.NativeShell.AppHost.deviceId() : deviceId;
},
appName: function () {
return window.NativeShell ? window.NativeShell.AppHost.appName() : appName;
},
appVersion: function () {
return window.NativeShell ? window.NativeShell.AppHost.appVersion() : appVersion;
},
getPushTokenInfo: function () {
return {};
},
setThemeColor: function (color) {
var metaThemeColor = document.querySelector("meta[name=theme-color]");
if (metaThemeColor) {
metaThemeColor.setAttribute("content", color);
}
},
setUserScalable: function (scalable) {
if (!browser.tv) {
var att = scalable ? "width=device-width, initial-scale=1, minimum-scale=1, user-scalable=yes" : "width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no";
document.querySelector("meta[name=viewport]").setAttribute("content", att);
}
}
};
var doc = self.document;
var isHidden = false;
if (doc) {
if (void 0 !== doc.visibilityState) {
visibilityChange = "visibilitychange";
visibilityState = "hidden";
} else {
if (void 0 !== doc.mozHidden) {
visibilityChange = "mozvisibilitychange";
visibilityState = "mozVisibilityState";
} else {
if (void 0 !== doc.msHidden) {
visibilityChange = "msvisibilitychange";
visibilityState = "msVisibilityState";
} else {
if (void 0 !== doc.webkitHidden) {
visibilityChange = "webkitvisibilitychange";
visibilityState = "webkitVisibilityState";
}
}
}
}
}
if (doc) {
doc.addEventListener(visibilityChange, function () {
if (document[visibilityState]) {
onAppHidden();
} else {
onAppVisible();
}
});
}
if (self.addEventListener) {
self.addEventListener("focus", onAppVisible);
self.addEventListener("blur", onAppHidden);
}
return appHost;
});
| 1 | 13,409 | We should deprecate this at some point. | jellyfin-jellyfin-web | js |
@@ -49,7 +49,9 @@ class FCOSHead(nn.Module):
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ center_sampling=False,
+ center_sample_radius=1.5):
super(FCOSHead, self).__init__()
self.num_classes = num_classes | 1 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
INF = 1e8
@HEADS.register_module
class FCOSHead(nn.Module):
"""
Fully Convolutional One-Stage Object Detection head from [1]_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
References:
.. [1] https://arxiv.org/abs/1904.01355
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(FCOSHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_centerness = build_loss(loss_centerness)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.fcos_centerness, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
centerness = self.fcos_centerness(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, centerness
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
centerness_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
for cls_score, bbox_pred, centerness, points in zip(
cls_scores, bbox_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
def centerness_target(self, pos_bbox_targets):
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
| 1 | 18,677 | Move these two arguments before `loss_cls`. | open-mmlab-mmdetection | py |
@@ -93,10 +93,16 @@ func GenerateSelfSignedX509CA(commonName string, extUsage []x509.ExtKeyUsage, ke
}
// GenerateServerX509UsingCA generates a TLS serverCert that is self-signed
-func GenerateServerX509UsingCA(commonName string, ca *tls.Certificate) (*tls.Certificate, *rsa.PrivateKey, error) {
+func GenerateServerX509UsingCAAndSerialNumber(commonName string, serialNumber int64, ca *tls.Certificate) (*tls.Certificate, *rsa.PrivateKey, error) {
now := time.Now().UTC()
- i := mathrand.Int63n(100000000000000000)
+ var i int64
+ if serialNumber == 0 {
+ i = mathrand.Int63n(100000000000000000)
+ } else {
+ i = serialNumber
+ }
+
template := &x509.Certificate{
SerialNumber: big.NewInt(i),
Subject: pkix.Name{ | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package encryption
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
mathrand "math/rand"
"net"
"strings"
"time"
)
// GenerateSelfSignedUseEverywhereX509 generates a TLS serverCert that is self-signed
func GenerateSelfSignedUseEverywhereX509(commonName string, keyLengthBits int) (*tls.Certificate, error) {
return GenerateSelfSignedX509CA(commonName, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, keyLengthBits)
}
// GenerateSelfSignedX509CA generates a TLS serverCert that is self-signed
func GenerateSelfSignedX509CA(commonName string, extUsage []x509.ExtKeyUsage, keyLengthBits int) (*tls.Certificate, error) {
now := time.Now().UTC()
template := &x509.Certificate{
SerialNumber: big.NewInt(now.Unix()),
Subject: pkix.Name{
CommonName: commonName,
Country: []string{"USA"},
Organization: []string{"TemporalTechnologiesTesting"},
},
NotBefore: now.Add(-time.Minute),
NotAfter: now.AddDate(3, 0, 0), // 3 year expiry
BasicConstraintsValid: true,
IsCA: true,
ExtKeyUsage: extUsage,
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageKeyEncipherment |
x509.KeyUsageDigitalSignature,
}
if ip := net.ParseIP(commonName).To4(); ip != nil {
template.IPAddresses = []net.IP{ip}
if ip.IsLoopback() {
template.DNSNames = []string{"localhost"}
}
}
if strings.ToLower(commonName) == "localhost" {
template.IPAddresses = []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}
template.DNSNames = []string{"localhost"}
}
privateKey, err := rsa.GenerateKey(rand.Reader, keyLengthBits)
if err != nil {
return &tls.Certificate{}, err
}
cert, err := x509.CreateCertificate(rand.Reader, template, template, privateKey.Public(), privateKey)
if err != nil {
return &tls.Certificate{}, err
}
var tlsCert tls.Certificate
tlsCert.Certificate = append(tlsCert.Certificate, cert)
tlsCert.PrivateKey = privateKey
return &tlsCert, nil
}
// GenerateServerX509UsingCA generates a TLS serverCert that is self-signed
func GenerateServerX509UsingCA(commonName string, ca *tls.Certificate) (*tls.Certificate, *rsa.PrivateKey, error) {
now := time.Now().UTC()
i := mathrand.Int63n(100000000000000000)
template := &x509.Certificate{
SerialNumber: big.NewInt(i),
Subject: pkix.Name{
CommonName: commonName,
Country: []string{"USA"},
Organization: []string{"TemporalTechnologiesTesting"},
},
NotBefore: now.Add(-time.Minute),
NotAfter: now.AddDate(3, 0, 0), // 3 year expiry
BasicConstraintsValid: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
KeyUsage: x509.KeyUsageDigitalSignature,
}
if ip := net.ParseIP(commonName).To4(); ip != nil {
template.IPAddresses = []net.IP{ip}
if ip.IsLoopback() {
template.DNSNames = []string{"localhost"}
}
}
if strings.ToLower(commonName) == "localhost" {
template.IPAddresses = []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}
template.DNSNames = []string{"localhost"}
}
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return &tls.Certificate{}, nil, err
}
caCert, err := x509.ParseCertificate(ca.Certificate[0])
if err != nil {
return nil, nil, err
}
cert, err := x509.CreateCertificate(rand.Reader, template, caCert, privateKey.Public(), ca.PrivateKey)
if err != nil {
return &tls.Certificate{}, nil, err
}
var tlsCert tls.Certificate
tlsCert.Certificate = append(tlsCert.Certificate, cert)
tlsCert.PrivateKey = privateKey
return &tlsCert, privateKey, err
}
| 1 | 11,223 | [Super NIT]: i := serialNumber if i == 0 { i = mathrand.Int63n(...) } | temporalio-temporal | go |
@@ -14,8 +14,8 @@ namespace Microsoft.CodeAnalysis.Sarif.ConvertToSarif
public string InputFilePath { get; internal set; }
[Option(
- 'f',
- "format",
+ 't',
+ "tool",
HelpText = "The tool format of the input file.")]
public ToolFormat ToolFormat { get; internal set; }
| 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using CommandLine;
namespace Microsoft.CodeAnalysis.Sarif.ConvertToSarif
{
[Verb("convert", HelpText = "Convert a tool output log to SARIF format.")]
internal class ConvertOptions
{
[Value(0,
HelpText = "A file path to a tool log file that should be converted to the SARIF format.")]
public string InputFilePath { get; internal set; }
[Option(
'f',
"format",
HelpText = "The tool format of the input file.")]
public ToolFormat ToolFormat { get; internal set; }
[Option(
'o',
"output",
HelpText = "A file path to the converted SARIF log. Defaults to <input file name>.sarif.")]
public string OutputFilePath { get; internal set; }
[Option(
'p',
"pretty",
Default = false,
HelpText = "Produce pretty-printed JSON output rather than compact form.")]
public bool PrettyPrint { get; internal set; }
[Option(
'f',
"force",
Default = false,
HelpText = "Force overwrite of output file if it exists.")]
public bool Force { get; internal set; }
}
} | 1 | 10,158 | -f short form here collided with --force. :) | microsoft-sarif-sdk | .cs |
@@ -12,6 +12,5 @@ describe('preact', () => {
expect(cloneElement).to.be.a('function');
expect(createContext).to.be.a('function');
expect(options).to.exist.and.be.an('object');
- // expect(rerender).to.be.a('function');
});
}); | 1 | import { createElement, h, createContext, Component, Fragment, render, hydrate, cloneElement, options } from '../../src/index';
import { expect } from 'chai';
describe('preact', () => {
it('should be available as named exports', () => {
expect(h).to.be.a('function');
expect(createElement).to.be.a('function');
expect(Component).to.be.a('function');
expect(Fragment).to.exist;
expect(render).to.be.a('function');
expect(hydrate).to.be.a('function');
expect(cloneElement).to.be.a('function');
expect(createContext).to.be.a('function');
expect(options).to.exist.and.be.an('object');
// expect(rerender).to.be.a('function');
});
});
| 1 | 12,912 | I think it is better to describe in the release note that `rerender()` was removed. | preactjs-preact | js |
@@ -3,5 +3,5 @@ VERSION = ".".join(str(i) for i in IVERSION)
PATHOD = "pathod " + VERSION
MITMPROXY = "mitmproxy " + VERSION
-if __name__ == "__main__":
+if __name__ == "__main__": # pragma: no cover
print(VERSION) | 1 | IVERSION = (2, 0, 0)
VERSION = ".".join(str(i) for i in IVERSION)
PATHOD = "pathod " + VERSION
MITMPROXY = "mitmproxy " + VERSION
if __name__ == "__main__":
print(VERSION)
| 1 | 12,749 | We could also test this easily with `runpy.run_module`, what do you think? | mitmproxy-mitmproxy | py |
@@ -388,8 +388,10 @@ func decode(v reflect.Value, d Decoder) error {
if err != nil {
return err
}
- v.Set(reflect.ValueOf(val))
- return nil
+ if v.Type().AssignableTo(reflect.TypeOf(val)) {
+ v.Set(reflect.ValueOf(val))
+ return nil
+ }
}
// Handle implemented interfaces first. | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO(jba): support struct tags.
// TODO(jba): for efficiency, enable encoding of only a subset of field paths.
package driver
import (
"encoding"
"fmt"
"reflect"
"strconv"
"github.com/golang/protobuf/proto"
"gocloud.dev/docstore/internal/fields"
"gocloud.dev/internal/gcerr"
)
var (
binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
)
// An Encoder encodes Go values in some other form (e.g. JSON, protocol buffers).
// The encoding protocol is designed to avoid losing type information by passing
// values using interface{}. An Encoder is responsible for storing the value
// it is encoding.
//
// Because all drivers must support the same set of values, the encoding methods
// (with the exception of EncodeStruct) do not return errors. EncodeStruct is special
// because it is an escape hatch for arbitrary structs, not all of which may be
// encodable.
type Encoder interface {
// These methods all encode and store a single Go value.
EncodeNil()
EncodeBool(bool)
EncodeString(string)
EncodeInt(int64)
EncodeUint(uint64)
EncodeFloat(float64)
EncodeBytes([]byte)
// EncodeList is called when a slice or array is encountered (except for a
// []byte, which is handled by EncodeBytes). Its argument is the length of the
// slice or array. The encoding algorithm will call the returned Encoder that
// many times to encode the successive values of the list. After each such call,
// ListIndex will be called with the index of the element just encoded.
//
// For example, []string{"a", "b"} will result in these calls:
// enc2 := enc.EncodeList(2)
// enc2.EncodeString("a")
// enc2.ListIndex(0)
// enc2.EncodeString("b")
// enc2.ListIndex(1)
EncodeList(n int) Encoder
ListIndex(i int)
// EncodeMap is called when a map is encountered. Its argument is the number of
// fields in the map. The encoding algorithm will call the returned Encoder that
// many times to encode the successive values of the map. After each such call,
// MapKey will be called with the key of the element just encoded.
//
// For example, map[string}int{"A": 1, "B": 2} will result in these calls:
// enc2 := enc.EncodeMap(2)
// enc2.EncodeInt(1)
// enc2.MapKey("A")
// enc2.EncodeInt(2)
// enc2.MapKey("B")
//
// EncodeMap is also called for structs. The map then consists of the exported
// fields of the struct. For struct{A, B int}{1, 2}, if EncodeStruct returns
// false, the same sequence of calls as above will occur.
EncodeMap(n int) Encoder
MapKey(string)
// If the encoder wants to encode a value in a special way it should do so here
// and return true along with any error from the encoding. Otherwise, it should
// return false.
EncodeSpecial(v reflect.Value) (bool, error)
}
// Encode encodes the value using the given Encoder. It traverses the value,
// iterating over arrays, slices, maps and the exported fields of structs. If it
// encounters a non-nil pointer, it encodes the value that it points to.
// Encode treats a few interfaces specially:
//
// If the value implements encoding.BinaryMarshaler, Encode invokes MarshalBinary
// on it and encodes the resulting byte slice.
//
// If the value implements encoding.TextMarshaler, Encode invokes MarshalText on it
// and encodes the resulting string.
//
// If the value implements proto.Message, Encode invokes proto.Marshal on it and encodes
// the resulting byte slice. Here proto is the package "github.com/golang/protobuf/proto".
//
// Not every map key type can be encoded. Only strings, integers (signed or
// unsigned), and types that implement encoding.TextMarshaler are permitted as map
// keys. These restrictions match exactly those of the encoding/json package.
func Encode(v reflect.Value, e Encoder) error {
return wrap(encode(v, e), gcerr.InvalidArgument)
}
func encode(v reflect.Value, enc Encoder) error {
if !v.IsValid() {
enc.EncodeNil()
return nil
}
done, err := enc.EncodeSpecial(v)
if done {
return err
}
if v.Type().Implements(binaryMarshalerType) {
bytes, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return err
}
enc.EncodeBytes(bytes)
return nil
}
if v.Type().Implements(protoMessageType) {
if v.IsNil() {
enc.EncodeNil()
} else {
bytes, err := proto.Marshal(v.Interface().(proto.Message))
if err != nil {
return err
}
enc.EncodeBytes(bytes)
}
return nil
}
if v.Type().Implements(textMarshalerType) {
bytes, err := v.Interface().(encoding.TextMarshaler).MarshalText()
if err != nil {
return err
}
enc.EncodeString(string(bytes))
return nil
}
switch v.Kind() {
case reflect.Bool:
enc.EncodeBool(v.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.EncodeInt(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
enc.EncodeUint(v.Uint())
case reflect.Float32, reflect.Float64:
enc.EncodeFloat(v.Float())
case reflect.String:
enc.EncodeString(v.String())
case reflect.Slice:
if v.IsNil() {
enc.EncodeNil()
return nil
}
fallthrough
case reflect.Array:
return encodeList(v, enc)
case reflect.Map:
return encodeMap(v, enc)
case reflect.Ptr:
if v.IsNil() {
enc.EncodeNil()
return nil
}
return encode(v.Elem(), enc)
case reflect.Interface:
if v.IsNil() {
enc.EncodeNil()
return nil
}
return encode(v.Elem(), enc)
case reflect.Struct:
fields, err := fieldCache.Fields(v.Type())
if err != nil {
return err
}
return encodeStructWithFields(v, fields, enc)
default:
return gcerr.Newf(gcerr.InvalidArgument, nil, "cannot encode type %s", v.Type())
}
return nil
}
// Encode an array or non-nil slice.
func encodeList(v reflect.Value, enc Encoder) error {
// Byte slices encode specially.
if v.Type().Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
enc.EncodeBytes(v.Bytes())
return nil
}
n := v.Len()
enc2 := enc.EncodeList(n)
for i := 0; i < n; i++ {
if err := encode(v.Index(i), enc2); err != nil {
return err
}
enc2.ListIndex(i)
}
return nil
}
// Encode a map.
func encodeMap(v reflect.Value, enc Encoder) error {
if v.IsNil() {
enc.EncodeNil()
return nil
}
keys := v.MapKeys()
enc2 := enc.EncodeMap(len(keys))
for _, k := range keys {
sk, err := stringifyMapKey(k)
if err != nil {
return err
}
if err := encode(v.MapIndex(k), enc2); err != nil {
return err
}
enc2.MapKey(sk)
}
return nil
}
// k is the key of a map. Encode it as a string.
// Only strings, integers (signed or unsigned), and types that implement
// encoding.TextMarshaler are supported.
func stringifyMapKey(k reflect.Value) (string, error) {
// This is basically reflectWithString.resolve, from encoding/json/encode.go.
if k.Kind() == reflect.String {
return k.String(), nil
}
if tm, ok := k.Interface().(encoding.TextMarshaler); ok {
b, err := tm.MarshalText()
if err != nil {
return "", err
}
return string(b), nil
}
switch k.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(k.Int(), 10), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(k.Uint(), 10), nil
default:
return "", gcerr.Newf(gcerr.InvalidArgument, nil, "cannot encode key %v of type %s", k, k.Type())
}
}
func encodeStructWithFields(v reflect.Value, fields fields.List, e Encoder) error {
e2 := e.EncodeMap(len(fields))
for _, f := range fields {
fv, ok := fieldByIndex(v, f.Index)
if !ok {
// if !ok, then f is a field in an embedded pointer to struct, and that embedded pointer
// is nil in v. In other words, the field exists in the struct type, but not this particular
// struct value. So we just ignore it.
continue
}
if f.ParsedTag.(tagOptions).omitEmpty && IsEmptyValue(fv) {
continue
}
if err := encode(fv, e2); err != nil {
return err
}
e2.MapKey(f.Name)
}
return nil
}
// fieldByIndex retrieves the field of v at the given index if present.
// v must be a struct. index must refer to a valid field of v's type.
// The second return value is false if there is a nil embedded pointer
// along the path denoted by index.
//
// From encoding/json/encode.go.
func fieldByIndex(v reflect.Value, index []int) (reflect.Value, bool) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}, false
}
v = v.Elem()
}
v = v.Field(i)
}
return v, true
}
////////////////////////////////////////////////////////////////
// TODO(jba): consider a fast path: if we are decoding into a struct, assume the same struct
// was used to encode. Then we can build a map from field names to functions, where each
// function avoids all the tests of Decode and contains just the code for setting the field.
// TODO(jba): provide a way to override the check on missing fields.
// A Decoder decodes data that was produced by Encode back into Go values.
// Each Decoder instance is responsible for decoding one value.
type Decoder interface {
// The AsXXX methods each report whether the value being decoded can be represented as
// a particular Go type. If so, the method should return the value as that type, and true;
// otherwise it should return the zero value and false.
AsString() (string, bool)
AsInt() (int64, bool)
AsUint() (uint64, bool)
AsFloat() (float64, bool)
AsBytes() ([]byte, bool)
AsBool() (bool, bool)
AsNull() bool
// ListLen should return the length of the value being decoded and true, if the
// value can be decoded into a slice or array. Otherwise, ListLen should return
// (0, false).
ListLen() (int, bool)
// If ListLen returned true, then DecodeList will be called. It should iterate
// over the value being decoded in sequence from index 0, invoking the callback
// for each element with the element's index and a Decoder for the element.
// If the callback returns false, DecodeList should return immediately.
DecodeList(func(int, Decoder) bool)
// MapLen should return the number of fields of the value being decoded and true,
// if the value can be decoded into a map or struct. Otherwise, it should return
// (0, false).
MapLen() (int, bool)
// DecodeMap iterates over the fields of the value being decoded, invoke the
// callback on each with field name, a Decoder for the field value, and a bool
// to indicate whether or not to use exact match for the field names. It will
// be called when MapLen returns true or decoding a struct. If the callback
// returns false, DecodeMap should return immediately.
DecodeMap(func(string, Decoder, bool) bool)
// AsInterface should decode the value into the Go value that best represents it.
AsInterface() (interface{}, error)
// If the decoder wants to decode a value in a special way it should do so here
// and return true, the decoded value, and any error from the decoding.
// Otherwise, it should return false.
AsSpecial(reflect.Value) (bool, interface{}, error)
// String should return a human-readable representation of the Decoder, for error messages.
String() string
}
// Decode decodes the value held in the Decoder d into v.
// Decode creates slices, maps and pointer elements as needed.
// It treats values that implement encoding.BinaryUnmarshaler,
// encoding.TextUnmarshaler and proto.Message specially; see Encode.
func Decode(v reflect.Value, d Decoder) error {
return wrap(decode(v, d), gcerr.InvalidArgument)
}
func decode(v reflect.Value, d Decoder) error {
if !v.CanSet() {
return fmt.Errorf("while decoding: cannot set %+v", v)
}
// A Null value sets anything nullable to nil.
// If the value isn't nullable, we keep going.
// TODO(jba): should we treat decoding a null into a non-nullable as an error, or
// ignore it like encoding/json does?
if d.AsNull() {
switch v.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
v.Set(reflect.Zero(v.Type()))
return nil
}
}
if done, val, err := d.AsSpecial(v); done {
if err != nil {
return err
}
v.Set(reflect.ValueOf(val))
return nil
}
// Handle implemented interfaces first.
if reflect.PtrTo(v.Type()).Implements(binaryUnmarshalerType) {
if b, ok := d.AsBytes(); ok {
return v.Addr().Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(b)
}
return decodingError(v, d)
}
if reflect.PtrTo(v.Type()).Implements(protoMessageType) {
if b, ok := d.AsBytes(); ok {
return proto.Unmarshal(b, v.Addr().Interface().(proto.Message))
}
return decodingError(v, d)
}
if reflect.PtrTo(v.Type()).Implements(textUnmarshalerType) {
if s, ok := d.AsString(); ok {
return v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(s))
}
return decodingError(v, d)
}
switch v.Kind() {
case reflect.Bool:
if b, ok := d.AsBool(); ok {
v.SetBool(b)
return nil
}
case reflect.String:
if s, ok := d.AsString(); ok {
v.SetString(s)
return nil
}
case reflect.Float32, reflect.Float64:
if f, ok := d.AsFloat(); ok {
v.SetFloat(f)
return nil
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, ok := d.AsInt()
if !ok {
// Accept a floating-point number with integral value.
f, ok := d.AsFloat()
if !ok {
return decodingError(v, d)
}
i = int64(f)
if float64(i) != f {
return gcerr.Newf(gcerr.InvalidArgument, nil, "float %f does not fit into %s", f, v.Type())
}
}
if v.OverflowInt(i) {
return overflowError(i, v.Type())
}
v.SetInt(i)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u, ok := d.AsUint()
if !ok {
// Accept a floating-point number with integral value.
f, ok := d.AsFloat()
if !ok {
return decodingError(v, d)
}
u = uint64(f)
if float64(u) != f {
return gcerr.Newf(gcerr.InvalidArgument, nil, "float %f does not fit into %s", f, v.Type())
}
}
if v.OverflowUint(u) {
return overflowError(u, v.Type())
}
v.SetUint(u)
return nil
case reflect.Slice, reflect.Array:
return decodeList(v, d)
case reflect.Map:
return decodeMap(v, d)
case reflect.Ptr:
// If the pointer is nil, set it to a zero value.
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return decode(v.Elem(), d)
case reflect.Struct:
return decodeStruct(v, d)
case reflect.Interface:
if v.NumMethod() == 0 { // empty interface
// If v holds a pointer, set the pointer.
if !v.IsNil() && v.Elem().Kind() == reflect.Ptr {
return decode(v.Elem(), d)
}
// Otherwise, create a fresh value.
x, err := d.AsInterface()
if err != nil {
return err
}
v.Set(reflect.ValueOf(x))
return nil
}
// Any other kind of interface is an error???
}
return decodingError(v, d)
}
func decodeList(v reflect.Value, d Decoder) error {
// If we're decoding into a byte slice or array, and the decoded value
// supports that, then do the decoding.
if v.Type().Elem().Kind() == reflect.Uint8 {
if b, ok := d.AsBytes(); ok {
v.SetBytes(b)
return nil
}
// Fall through to decode the []byte as an ordinary slice.
}
dlen, ok := d.ListLen()
if !ok {
return decodingError(v, d)
}
err := prepareLength(v, dlen)
if err != nil {
return err
}
d.DecodeList(func(i int, vd Decoder) bool {
if err != nil || i >= dlen {
return false
}
err = decode(v.Index(i), vd)
return err == nil
})
return err
}
// v must be a slice or array. We want it to be of length wantLen. Prepare it as
// necessary (details described in the code below), and return its resulting length.
// If an array is too short, return an error. This behavior differs from
// encoding/json, which just populates a short array with whatever it can and drops
// the rest. That can lose data.
func prepareLength(v reflect.Value, wantLen int) error {
vLen := v.Len()
if v.Kind() == reflect.Slice {
// Construct a slice of the right size, avoiding allocation if possible.
switch {
case vLen < wantLen: // v too short
if v.Cap() >= wantLen { // extend its length if there's room
v.SetLen(wantLen)
} else { // else make a new one
v.Set(reflect.MakeSlice(v.Type(), wantLen, wantLen))
}
case vLen > wantLen: // v too long; truncate it
v.SetLen(wantLen)
}
} else { // array
switch {
case vLen < wantLen: // v too short
return gcerr.Newf(gcerr.InvalidArgument, nil, "array length %d is too short for incoming list of length %d",
vLen, wantLen)
case vLen > wantLen: // v too long; set extra elements to zero
z := reflect.Zero(v.Type().Elem())
for i := wantLen; i < vLen; i++ {
v.Index(i).Set(z)
}
}
}
return nil
}
// Since a map value is not settable via reflection, this function always creates a
// new element for each corresponding map key. Existing values of v are overwritten.
// This happens even if the map value is something like a pointer to a struct, where
// we could in theory populate the existing struct value instead of discarding it.
// This behavior matches encoding/json.
func decodeMap(v reflect.Value, d Decoder) error {
mapLen, ok := d.MapLen()
if !ok {
return decodingError(v, d)
}
t := v.Type()
if v.IsNil() {
v.Set(reflect.MakeMapWithSize(t, mapLen))
}
et := t.Elem()
var err error
kt := v.Type().Key()
d.DecodeMap(func(key string, vd Decoder, _ bool) bool {
if err != nil {
return false
}
el := reflect.New(et).Elem()
err = decode(el, vd)
if err != nil {
return false
}
vk, e := unstringifyMapKey(key, kt)
if e != nil {
err = e
return false
}
v.SetMapIndex(vk, el)
return err == nil
})
return err
}
// Given a map key encoded as a string, and the type of the map key, convert the key
// into the type.
// For example, if we are decoding the key "3" for a map[int]interface{}, then key is "3"
// and keyType is reflect.Int.
func unstringifyMapKey(key string, keyType reflect.Type) (reflect.Value, error) {
// This code is mostly from the middle of decodeState.object in encoding/json/decode.go.
// Except for literalStore, which I don't understand.
// TODO(jba): understand literalStore.
switch {
case keyType.Kind() == reflect.String:
return reflect.ValueOf(key).Convert(keyType), nil
case reflect.PtrTo(keyType).Implements(textUnmarshalerType):
tu := reflect.New(keyType)
if err := tu.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(key)); err != nil {
return reflect.Value{}, err
}
return tu.Elem(), nil
case keyType.Kind() == reflect.Interface && keyType.NumMethod() == 0:
// TODO: remove this case? encoding/json doesn't support it.
return reflect.ValueOf(key), nil
default:
switch keyType.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(key, 10, 64)
if err != nil {
return reflect.Value{}, err
}
if reflect.Zero(keyType).OverflowInt(n) {
return reflect.Value{}, overflowError(n, keyType)
}
return reflect.ValueOf(n).Convert(keyType), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(key, 10, 64)
if err != nil {
return reflect.Value{}, err
}
if reflect.Zero(keyType).OverflowUint(n) {
return reflect.Value{}, overflowError(n, keyType)
}
return reflect.ValueOf(n).Convert(keyType), nil
default:
return reflect.Value{}, gcerr.Newf(gcerr.InvalidArgument, nil, "invalid key type %s", keyType)
}
}
}
func decodeStruct(v reflect.Value, d Decoder) error {
fs, err := fieldCache.Fields(v.Type())
if err != nil {
return err
}
d.DecodeMap(func(key string, d2 Decoder, exactMatch bool) bool {
if err != nil {
return false
}
var f *fields.Field
if exactMatch {
f = fs.MatchExact(key)
} else {
f = fs.MatchFold(key)
}
if f == nil {
err = gcerr.Newf(gcerr.InvalidArgument, nil, "no field matching %q in %s", key, v.Type())
return false
}
fv, ok := fieldByIndexCreate(v, f.Index)
if !ok {
err = gcerr.Newf(gcerr.InvalidArgument, nil,
"setting field %q in %s: cannot create embedded pointer field of unexported type",
key, v.Type())
return false
}
err = decode(fv, d2)
return err == nil
})
return err
}
// fieldByIndexCreate retrieves the the field of v at the given index if present,
// creating embedded struct pointers where necessary.
// v must be a struct. index must refer to a valid field of v's type.
// The second return value is false If there is a nil embedded pointer of unexported
// type along the path denoted by index. (We cannot create such pointers.)
func fieldByIndexCreate(v reflect.Value, index []int) (reflect.Value, bool) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
if !v.CanSet() {
return reflect.Value{}, false
}
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
v = v.Field(i)
}
return v, true
}
func decodingError(v reflect.Value, d Decoder) error {
return gcerr.Newf(gcerr.InvalidArgument, nil, "cannot set type %s to %v", v.Type(), d)
}
func overflowError(x interface{}, t reflect.Type) error {
return gcerr.Newf(gcerr.InvalidArgument, nil, "value %v overflows type %s", x, t)
}
func wrap(err error, code gcerr.ErrorCode) error {
if _, ok := err.(*gcerr.Error); !ok && err != nil {
err = gcerr.New(code, err, 2, err.Error())
}
return err
}
var fieldCache = fields.NewCache(parseTag, nil, nil)
// IsEmptyValue returns whether or not v is a zero value of its type.
// Copied from encoding/json, go 1.12.
func IsEmptyValue(v reflect.Value) bool {
switch k := v.Kind(); k {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
// Options for struct tags.
type tagOptions struct {
omitEmpty bool // do not encode value if empty
}
// parseTag interprets docstore struct field tags.
func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
var opts []string
name, keep, opts = fields.ParseStandardTag("docstore", t)
tagOpts := tagOptions{}
for _, opt := range opts {
switch opt {
case "omitempty":
tagOpts.omitEmpty = true
default:
return "", false, nil, gcerr.Newf(gcerr.InvalidArgument, nil, "unknown tag option: %q", opt)
}
}
return name, keep, tagOpts, nil
}
| 1 | 20,363 | Is it OK to fall through if it's not assignable? I was kind of surprised when the tests passed. Maybe one of the options below ends up working? For the new tests, this panics because `**not assignable uuid.UUID to []uint8**`. `uuid.UUID` is an array, not a slice. | google-go-cloud | go |
@@ -133,7 +133,8 @@ func runAPIAndWait(ctx context.Context, nd *node.Node, config *config.Config, re
ctx: context.Background(),
api: api,
porcelainAPI: nd.PorcelainAPI,
- blockpAPI: nd.BlockAPI,
+ blockAPI: nd.BlockAPI,
+ retrievalAPI: nd.RetrievalAPI,
}
cfg := cmdhttp.NewServerConfig() | 1 | package commands
import (
"context"
"fmt"
"net/http"
_ "net/http/pprof" // nolint: golint
"os"
"os/signal"
"syscall"
"time"
ma "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
"gx/ipfs/QmQtQrtNioesAWtrx8csBvfY37gTe94d6wQ3VikZUjxD39/go-ipfs-cmds"
cmdhttp "gx/ipfs/QmQtQrtNioesAWtrx8csBvfY37gTe94d6wQ3VikZUjxD39/go-ipfs-cmds/http"
"gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors"
"gx/ipfs/QmZcLBXKaFe8ND5YHPkJRAwmhJGrVsi1JqDZNyJ4nRK5Mj/go-multiaddr-net"
writer "gx/ipfs/QmbkT7eMTyXfpeyB3ZMxxcxg7XH8t6uXp49jqzz4HB7BGF/go-log/writer"
"gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit"
"github.com/filecoin-project/go-filecoin/api/impl"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/mining"
"github.com/filecoin-project/go-filecoin/node"
"github.com/filecoin-project/go-filecoin/repo"
)
// exposed here, to be available during testing
var sigCh = make(chan os.Signal, 1)
var daemonCmd = &cmds.Command{
Helptext: cmdkit.HelpText{
Tagline: "Start a long-running daemon process",
},
Options: []cmdkit.Option{
cmdkit.StringOption(SwarmAddress, "multiaddress to listen on for filecoin network connections"),
cmdkit.StringOption(SwarmPublicRelayAddress, "public multiaddress for routing circuit relay traffic. Necessary for relay nodes to provide this if they are not publically dialable"),
cmdkit.BoolOption(OfflineMode, "start the node without networking"),
cmdkit.BoolOption(ELStdout),
cmdkit.BoolOption(IsRelay, "advertise and allow filecoin network traffic to be relayed through this node"),
cmdkit.StringOption(BlockTime, "time a node waits before trying to mine the next block").WithDefault(mining.DefaultBlockTime.String()),
},
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
return daemonRun(req, re, env)
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.Encoders[cmds.Text],
},
}
func daemonRun(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
// third precedence is config file.
rep, err := getRepo(req)
if err != nil {
return err
}
// second highest precedence is env vars.
if envapi := os.Getenv("FIL_API"); envapi != "" {
rep.Config().API.Address = envapi
}
// highest precedence is cmd line flag.
if apiAddress, ok := req.Options[OptionAPI].(string); ok && apiAddress != "" {
rep.Config().API.Address = apiAddress
}
if swarmAddress, ok := req.Options[SwarmAddress].(string); ok && swarmAddress != "" {
rep.Config().Swarm.Address = swarmAddress
}
if publicRelayAddress, ok := req.Options[SwarmPublicRelayAddress].(string); ok && publicRelayAddress != "" {
rep.Config().Swarm.PublicRelayAddress = publicRelayAddress
}
opts, err := node.OptionsFromRepo(rep)
if err != nil {
return err
}
if offlineMode, ok := req.Options[OfflineMode].(bool); ok {
opts = append(opts, node.OfflineMode(offlineMode))
}
if isRelay, ok := req.Options[IsRelay].(bool); ok && isRelay {
opts = append(opts, node.IsRelay())
}
durStr, ok := req.Options[BlockTime].(string)
if !ok {
return errors.New("Bad block time passed")
}
blockTime, err := time.ParseDuration(durStr)
if err != nil {
return errors.Wrap(err, "Bad block time passed")
}
opts = append(opts, node.BlockTime(blockTime))
fcn, err := node.New(req.Context, opts...)
if err != nil {
return err
}
if fcn.OfflineMode {
re.Emit("Filecoin node running in offline mode (libp2p is disabled)\n") // nolint: errcheck
} else {
re.Emit(fmt.Sprintf("My peer ID is %s\n", fcn.Host().ID().Pretty())) // nolint: errcheck
for _, a := range fcn.Host().Addrs() {
re.Emit(fmt.Sprintf("Swarm listening on: %s\n", a)) // nolint: errcheck
}
}
if _, ok := req.Options[ELStdout].(bool); ok {
writer.WriterGroup.AddWriter(os.Stdout)
}
return runAPIAndWait(req.Context, fcn, rep.Config(), req)
}
func getRepo(req *cmds.Request) (repo.Repo, error) {
return repo.OpenFSRepo(getRepoDir(req))
}
func runAPIAndWait(ctx context.Context, nd *node.Node, config *config.Config, req *cmds.Request) error {
api := impl.New(nd)
if err := api.Daemon().Start(ctx); err != nil {
return err
}
servenv := &Env{
// TODO: should this be the passed in context?
ctx: context.Background(),
api: api,
porcelainAPI: nd.PorcelainAPI,
blockpAPI: nd.BlockAPI,
}
cfg := cmdhttp.NewServerConfig()
cfg.APIPath = APIPrefix
cfg.SetAllowedOrigins(config.API.AccessControlAllowOrigin...)
cfg.SetAllowedMethods(config.API.AccessControlAllowMethods...)
cfg.SetAllowCredentials(config.API.AccessControlAllowCredentials)
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(sigCh)
maddr, err := ma.NewMultiaddr(config.API.Address)
if err != nil {
return err
}
// For the case when /ip4/127.0.0.1/tcp/0 is passed,
// we want to fetch the new multiaddr from the listener, as it may (should)
// have resolved to some other value. i.e. resolve port zero to real value.
apiLis, err := manet.Listen(maddr)
if err != nil {
return err
}
config.API.Address = apiLis.Multiaddr().String()
handler := http.NewServeMux()
handler.Handle("/debug/pprof/", http.DefaultServeMux)
handler.Handle(APIPrefix+"/", cmdhttp.NewHandler(servenv, rootCmdDaemon, cfg))
apiserv := http.Server{
Handler: handler,
}
go func() {
err := apiserv.Serve(manet.NetListener(apiLis))
if err != nil && err != http.ErrServerClosed {
panic(err)
}
}()
// write our api address to file
// TODO: use api.Repo() once implemented
if err := nd.Repo.SetAPIAddr(config.API.Address); err != nil {
return errors.Wrap(err, "Could not save API address to repo")
}
signal := <-sigCh
fmt.Printf("Got %s, shutting down...\n", signal)
// allow 5 seconds for clean shutdown. Ideally it would never take this long.
ctx, cancel := context.WithTimeout(ctx, time.Second*5)
defer cancel()
if err := apiserv.Shutdown(ctx); err != nil {
fmt.Println("failed to shut down api server:", err)
}
return api.Daemon().Stop(ctx)
}
| 1 | 17,912 | I know this is after the fact but imo blockAPI should be blockMiningAPI. | filecoin-project-venus | go |
@@ -13,7 +13,11 @@ use Ergonode\SharedKernel\Domain\AbstractCode;
class AttributeCode extends AbstractCode
{
- public const PATTERN = '/^([a-zA-Z0-9_]+)$/';
+ public const NOT_SYSTEM_ATTRIBUTE_PATTERN = '/^(?!'.self::SYSTEM_ATTRIBUTE_PREFIX.')[a-zA-Z0-9_]+$/';
+
+ public const SYSTEM_ATTRIBUTE_PATTERN = '/^'.self::SYSTEM_ATTRIBUTE_PREFIX.'[a-zA-Z0-9_]+$/';
+
+ public const SYSTEM_ATTRIBUTE_PREFIX = 'esa_';
public function __construct(string $value)
{ | 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Attribute\Domain\ValueObject;
use Ergonode\SharedKernel\Domain\AbstractCode;
class AttributeCode extends AbstractCode
{
public const PATTERN = '/^([a-zA-Z0-9_]+)$/';
public function __construct(string $value)
{
parent::__construct(strtolower($value));
}
public static function isValid(string $value): bool
{
$value = strtolower($value);
return parent::isValid($value)
&& preg_match(self::PATTERN, $value);
}
}
| 1 | 9,621 | it's just a pattern as it was. SystemAttributeCode should have it's own definition and validate by it | ergonode-backend | php |
@@ -0,0 +1,17 @@
+/* Copyright (c) 2019 vesoft inc. All rights reserved.
+ *
+ * This source code is licensed under Apache 2.0 License,
+ * attached with Common Clause Condition 1.0, found in the LICENSES directory.
+ */
+
+#include "LookUpEdgeIndexProcessor.h"
+
+namespace nebula {
+namespace storage {
+
+void LookUpEdgeIndexProcessor::process(const cpp2::LookUpIndexRequest& req) {
+ UNUSED(req);
+}
+} // namespace storage
+} // namespace nebula
+ | 1 | 1 | 26,081 | Similar to the code block when we use prefix, maybe we can unify them to base? | vesoft-inc-nebula | cpp |
|
@@ -100,6 +100,15 @@ func (in *PodChaos) ValidateScheduler(spec *field.Path) field.ErrorList {
allErrs = append(allErrs, err...)
}
break
+ case PodNotReadyAction:
+ // We choose to ignore the Duration property even user define it
+ if in.Spec.Scheduler == nil {
+ allErrs = append(allErrs, field.Invalid(schedulerField, in.Spec.Scheduler, ValidatePodchaosSchedulerError))
+ } else {
+ _, err := ParseCron(in.Spec.Scheduler.Cron, schedulerField.Child("cron"))
+ allErrs = append(allErrs, err...)
+ }
+ break
default:
err := fmt.Errorf("podchaos[%s/%s] have unknown action type", in.Namespace, in.Name)
log.Error(err, "Wrong PodChaos Action type") | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
// log is for logging in this package.
var podchaoslog = logf.Log.WithName("podchaos-resource")
// +kubebuilder:webhook:path=/mutate-chaos-mesh-org-v1alpha1-podchaos,mutating=true,failurePolicy=fail,groups=chaos-mesh.org,resources=podchaos,verbs=create;update,versions=v1alpha1,name=mpodchaos.kb.io
var _ webhook.Defaulter = &PodChaos{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (in *PodChaos) Default() {
podchaoslog.Info("default", "name", in.Name)
in.Spec.Selector.DefaultNamespace(in.GetNamespace())
}
// +kubebuilder:webhook:verbs=create;update,path=/validate-chaos-mesh-org-v1alpha1-podchaos,mutating=false,failurePolicy=fail,groups=chaos-mesh.org,resources=podchaos,versions=v1alpha1,name=vpodchaos.kb.io
var _ ChaosValidator = &PodChaos{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (in *PodChaos) ValidateCreate() error {
podchaoslog.Info("validate create", "name", in.Name)
return in.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (in *PodChaos) ValidateUpdate(old runtime.Object) error {
podchaoslog.Info("validate update", "name", in.Name)
return in.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (in *PodChaos) ValidateDelete() error {
podchaoslog.Info("validate delete", "name", in.Name)
// Nothing to do?
return nil
}
// Validate validates chaos object
func (in *PodChaos) Validate() error {
specField := field.NewPath("spec")
allErrs := in.ValidateScheduler(specField)
allErrs = append(allErrs, in.ValidatePodMode(specField)...)
allErrs = append(allErrs, in.Spec.validateContainerName(specField.Child("containerName"))...)
if len(allErrs) > 0 {
return fmt.Errorf(allErrs.ToAggregate().Error())
}
return nil
}
// ValidateScheduler validates the scheduler and duration
func (in *PodChaos) ValidateScheduler(spec *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
schedulerField := spec.Child("scheduler")
switch in.Spec.Action {
case PodFailureAction:
allErrs = append(allErrs, ValidateScheduler(in, spec)...)
break
case PodKillAction:
// We choose to ignore the Duration property even user define it
if in.Spec.Scheduler == nil {
allErrs = append(allErrs, field.Invalid(schedulerField, in.Spec.Scheduler, ValidatePodchaosSchedulerError))
} else {
_, err := ParseCron(in.Spec.Scheduler.Cron, schedulerField.Child("cron"))
allErrs = append(allErrs, err...)
}
break
case ContainerKillAction:
// We choose to ignore the Duration property even user define it
if in.Spec.Scheduler == nil {
allErrs = append(allErrs, field.Invalid(schedulerField, in.Spec.Scheduler, ValidatePodchaosSchedulerError))
} else {
_, err := ParseCron(in.Spec.Scheduler.Cron, schedulerField.Child("cron"))
allErrs = append(allErrs, err...)
}
break
default:
err := fmt.Errorf("podchaos[%s/%s] have unknown action type", in.Namespace, in.Name)
log.Error(err, "Wrong PodChaos Action type")
actionField := spec.Child("action")
allErrs = append(allErrs, field.Invalid(actionField, in.Spec.Action, err.Error()))
break
}
return allErrs
}
// ValidatePodMode validates the value with podmode
func (in *PodChaos) ValidatePodMode(spec *field.Path) field.ErrorList {
return ValidatePodMode(in.Spec.Value, in.Spec.Mode, spec.Child("value"))
}
// validateContainerName validates the ContainerName
func (in *PodChaosSpec) validateContainerName(containerField *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if in.Action == ContainerKillAction {
if in.ContainerName == "" {
err := fmt.Errorf("the name of container should not be empty on %s action", in.Action)
allErrs = append(allErrs, field.Invalid(containerField, in.ContainerName, err.Error()))
}
}
return allErrs
}
| 1 | 17,830 | I think `Duration` is useful for this action and we shouldn't ignore it. | chaos-mesh-chaos-mesh | go |
@@ -243,13 +243,10 @@ func (s *stream) Write(p []byte) (int, error) {
s.mutex.Lock()
}
- if err != nil {
- return 0, err
- }
if s.err != nil {
- return len(p) - len(s.dataForWriting), s.err
+ err = s.err
}
- return len(p), nil
+ return len(p) - len(s.dataForWriting), err
}
func (s *stream) GetWriteOffset() protocol.ByteCount { | 1 | package quic
import (
"context"
"fmt"
"io"
"net"
"sync"
"time"
"github.com/lucas-clemente/quic-go/internal/flowcontrol"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/internal/wire"
)
type streamI interface {
Stream
HandleStreamFrame(*wire.StreamFrame) error
HandleRstStreamFrame(*wire.RstStreamFrame) error
PopStreamFrame(maxBytes protocol.ByteCount) *wire.StreamFrame
Finished() bool
Cancel(error)
// methods needed for flow control
GetWindowUpdate() protocol.ByteCount
HandleMaxStreamDataFrame(*wire.MaxStreamDataFrame)
IsFlowControlBlocked() bool
}
// A Stream assembles the data from StreamFrames and provides a super-convenient Read-Interface
//
// Read() and Write() may be called concurrently, but multiple calls to Read() or Write() individually must be synchronized manually.
type stream struct {
mutex sync.Mutex
ctx context.Context
ctxCancel context.CancelFunc
streamID protocol.StreamID
// onData tells the session that there's stuff to pack into a new packet
onData func()
// queueControlFrame queues a new control frame for sending
// it does not call onData
queueControlFrame func(wire.Frame)
readPosInFrame int
writeOffset protocol.ByteCount
readOffset protocol.ByteCount
// Once set, the errors must not be changed!
err error
// cancelled is set when Cancel() is called
cancelled utils.AtomicBool
// finishedReading is set once we read a frame with a FinBit
finishedReading utils.AtomicBool
// finisedWriting is set once Close() is called
finishedWriting utils.AtomicBool
// resetLocally is set if Reset() is called
resetLocally utils.AtomicBool
// resetRemotely is set if HandleRstStreamFrame() is called
resetRemotely utils.AtomicBool
frameQueue *streamFrameSorter
readChan chan struct{}
readDeadline time.Time
dataForWriting []byte
finSent utils.AtomicBool
rstSent utils.AtomicBool
writeChan chan struct{}
writeDeadline time.Time
flowController flowcontrol.StreamFlowController
version protocol.VersionNumber
}
var _ Stream = &stream{}
var _ streamI = &stream{}
type deadlineError struct{}
func (deadlineError) Error() string { return "deadline exceeded" }
func (deadlineError) Temporary() bool { return true }
func (deadlineError) Timeout() bool { return true }
var errDeadline net.Error = &deadlineError{}
// newStream creates a new Stream
func newStream(StreamID protocol.StreamID,
onData func(),
queueControlFrame func(wire.Frame),
flowController flowcontrol.StreamFlowController,
version protocol.VersionNumber,
) *stream {
s := &stream{
onData: onData,
queueControlFrame: queueControlFrame,
streamID: StreamID,
flowController: flowController,
frameQueue: newStreamFrameSorter(),
readChan: make(chan struct{}, 1),
writeChan: make(chan struct{}, 1),
version: version,
}
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
return s
}
// Read implements io.Reader. It is not thread safe!
func (s *stream) Read(p []byte) (int, error) {
s.mutex.Lock()
err := s.err
s.mutex.Unlock()
if s.cancelled.Get() || s.resetLocally.Get() {
return 0, err
}
if s.finishedReading.Get() {
return 0, io.EOF
}
bytesRead := 0
for bytesRead < len(p) {
s.mutex.Lock()
frame := s.frameQueue.Head()
if frame == nil && bytesRead > 0 {
err = s.err
s.mutex.Unlock()
return bytesRead, err
}
var err error
for {
// Stop waiting on errors
if s.resetLocally.Get() || s.cancelled.Get() {
err = s.err
break
}
deadline := s.readDeadline
if !deadline.IsZero() && !time.Now().Before(deadline) {
err = errDeadline
break
}
if frame != nil {
s.readPosInFrame = int(s.readOffset - frame.Offset)
break
}
s.mutex.Unlock()
if deadline.IsZero() {
<-s.readChan
} else {
select {
case <-s.readChan:
case <-time.After(deadline.Sub(time.Now())):
}
}
s.mutex.Lock()
frame = s.frameQueue.Head()
}
s.mutex.Unlock()
if err != nil {
return bytesRead, err
}
m := utils.Min(len(p)-bytesRead, int(frame.DataLen())-s.readPosInFrame)
if bytesRead > len(p) {
return bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p))
}
if s.readPosInFrame > int(frame.DataLen()) {
return bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, frame.DataLen())
}
copy(p[bytesRead:], frame.Data[s.readPosInFrame:])
s.readPosInFrame += m
bytesRead += m
s.readOffset += protocol.ByteCount(m)
// when a RST_STREAM was received, the was already informed about the final byteOffset for this stream
if !s.resetRemotely.Get() {
s.flowController.AddBytesRead(protocol.ByteCount(m))
}
s.onData() // so that a possible WINDOW_UPDATE is sent
if s.readPosInFrame >= int(frame.DataLen()) {
fin := frame.FinBit
s.mutex.Lock()
s.frameQueue.Pop()
s.mutex.Unlock()
if fin {
s.finishedReading.Set(true)
return bytesRead, io.EOF
}
}
}
return bytesRead, nil
}
func (s *stream) Write(p []byte) (int, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.resetLocally.Get() || s.err != nil {
return 0, s.err
}
if s.finishedWriting.Get() {
return 0, fmt.Errorf("write on closed stream %d", s.streamID)
}
if len(p) == 0 {
return 0, nil
}
s.dataForWriting = make([]byte, len(p))
copy(s.dataForWriting, p)
s.onData()
var err error
for {
deadline := s.writeDeadline
if !deadline.IsZero() && !time.Now().Before(deadline) {
err = errDeadline
break
}
if s.dataForWriting == nil || s.err != nil {
break
}
s.mutex.Unlock()
if deadline.IsZero() {
<-s.writeChan
} else {
select {
case <-s.writeChan:
case <-time.After(deadline.Sub(time.Now())):
}
}
s.mutex.Lock()
}
if err != nil {
return 0, err
}
if s.err != nil {
return len(p) - len(s.dataForWriting), s.err
}
return len(p), nil
}
func (s *stream) GetWriteOffset() protocol.ByteCount {
return s.writeOffset
}
// PopStreamFrame returns the next STREAM frame that is supposed to be sent on this stream
// maxBytes is the maximum length this frame (including frame header) will have.
func (s *stream) PopStreamFrame(maxBytes protocol.ByteCount) *wire.StreamFrame {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.err != nil {
return nil
}
frame := &wire.StreamFrame{
StreamID: s.streamID,
Offset: s.writeOffset,
DataLenPresent: true,
}
frameLen := frame.MinLength(s.version)
if frameLen >= maxBytes { // a STREAM frame must have at least one byte of data
return nil
}
frame.Data, frame.FinBit = s.getDataForWriting(maxBytes - frameLen)
if len(frame.Data) == 0 && !frame.FinBit {
return nil
}
if frame.FinBit {
s.finSent.Set(true)
}
return frame
}
func (s *stream) getDataForWriting(maxBytes protocol.ByteCount) ([]byte, bool /* should send FIN */) {
if s.dataForWriting == nil {
return nil, s.finishedWriting.Get() && !s.finSent.Get()
}
// TODO(#657): Flow control for the crypto stream
if s.streamID != s.version.CryptoStreamID() {
maxBytes = utils.MinByteCount(maxBytes, s.flowController.SendWindowSize())
}
if maxBytes == 0 {
return nil, false
}
var ret []byte
if protocol.ByteCount(len(s.dataForWriting)) > maxBytes {
ret = s.dataForWriting[:maxBytes]
s.dataForWriting = s.dataForWriting[maxBytes:]
} else {
ret = s.dataForWriting
s.dataForWriting = nil
s.signalWrite()
}
s.writeOffset += protocol.ByteCount(len(ret))
s.flowController.AddBytesSent(protocol.ByteCount(len(ret)))
return ret, s.finishedWriting.Get() && s.dataForWriting == nil && !s.finSent.Get()
}
// Close implements io.Closer
func (s *stream) Close() error {
s.finishedWriting.Set(true)
s.ctxCancel()
s.onData()
return nil
}
func (s *stream) shouldSendReset() bool {
if s.rstSent.Get() {
return false
}
return (s.resetLocally.Get() || s.resetRemotely.Get()) && !s.finishedWriteAndSentFin()
}
// HandleStreamFrame adds a new stream frame
func (s *stream) HandleStreamFrame(frame *wire.StreamFrame) error {
maxOffset := frame.Offset + frame.DataLen()
if err := s.flowController.UpdateHighestReceived(maxOffset, frame.FinBit); err != nil {
return err
}
s.mutex.Lock()
defer s.mutex.Unlock()
if err := s.frameQueue.Push(frame); err != nil && err != errDuplicateStreamData {
return err
}
s.signalRead()
return nil
}
// signalRead performs a non-blocking send on the readChan
func (s *stream) signalRead() {
select {
case s.readChan <- struct{}{}:
default:
}
}
// signalRead performs a non-blocking send on the writeChan
func (s *stream) signalWrite() {
select {
case s.writeChan <- struct{}{}:
default:
}
}
func (s *stream) SetReadDeadline(t time.Time) error {
s.mutex.Lock()
oldDeadline := s.readDeadline
s.readDeadline = t
s.mutex.Unlock()
// if the new deadline is before the currently set deadline, wake up Read()
if t.Before(oldDeadline) {
s.signalRead()
}
return nil
}
func (s *stream) SetWriteDeadline(t time.Time) error {
s.mutex.Lock()
oldDeadline := s.writeDeadline
s.writeDeadline = t
s.mutex.Unlock()
if t.Before(oldDeadline) {
s.signalWrite()
}
return nil
}
func (s *stream) SetDeadline(t time.Time) error {
_ = s.SetReadDeadline(t) // SetReadDeadline never errors
_ = s.SetWriteDeadline(t) // SetWriteDeadline never errors
return nil
}
// CloseRemote makes the stream receive a "virtual" FIN stream frame at a given offset
func (s *stream) CloseRemote(offset protocol.ByteCount) {
s.HandleStreamFrame(&wire.StreamFrame{FinBit: true, Offset: offset})
}
// Cancel is called by session to indicate that an error occurred
// The stream should will be closed immediately
func (s *stream) Cancel(err error) {
s.mutex.Lock()
s.cancelled.Set(true)
s.ctxCancel()
// errors must not be changed!
if s.err == nil {
s.err = err
s.signalRead()
s.signalWrite()
}
s.mutex.Unlock()
}
// resets the stream locally
func (s *stream) Reset(err error) {
if s.resetLocally.Get() {
return
}
s.mutex.Lock()
s.resetLocally.Set(true)
s.ctxCancel()
// errors must not be changed!
if s.err == nil {
s.err = err
s.signalRead()
s.signalWrite()
}
if s.shouldSendReset() {
s.queueControlFrame(&wire.RstStreamFrame{
StreamID: s.streamID,
ByteOffset: s.writeOffset,
})
s.onData()
s.rstSent.Set(true)
}
s.mutex.Unlock()
}
func (s *stream) HandleRstStreamFrame(frame *wire.RstStreamFrame) error {
if s.resetRemotely.Get() {
return nil
}
s.mutex.Lock()
s.resetRemotely.Set(true)
s.ctxCancel()
// errors must not be changed!
if s.err == nil {
s.err = fmt.Errorf("RST_STREAM received with code %d", frame.ErrorCode)
s.signalWrite()
}
if err := s.flowController.UpdateHighestReceived(frame.ByteOffset, true); err != nil {
return err
}
if s.shouldSendReset() {
s.queueControlFrame(&wire.RstStreamFrame{
StreamID: s.streamID,
ByteOffset: s.writeOffset,
})
s.onData()
s.rstSent.Set(true)
}
s.mutex.Unlock()
return nil
}
func (s *stream) finishedWriteAndSentFin() bool {
return s.finishedWriting.Get() && s.finSent.Get()
}
func (s *stream) Finished() bool {
return s.cancelled.Get() ||
(s.finishedReading.Get() && s.finishedWriteAndSentFin()) ||
(s.resetRemotely.Get() && s.rstSent.Get()) ||
(s.finishedReading.Get() && s.rstSent.Get()) ||
(s.finishedWriteAndSentFin() && s.resetRemotely.Get())
}
func (s *stream) Context() context.Context {
return s.ctx
}
func (s *stream) StreamID() protocol.StreamID {
return s.streamID
}
func (s *stream) HandleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) {
s.flowController.UpdateSendWindow(frame.ByteOffset)
}
func (s *stream) IsFlowControlBlocked() bool {
return s.flowController.IsBlocked()
}
func (s *stream) GetWindowUpdate() protocol.ByteCount {
return s.flowController.GetWindowUpdate()
}
| 1 | 7,110 | Everything below here can be replaced with `return s.err, len(p) - len(s.dataForWriting)`. | lucas-clemente-quic-go | go |
@@ -84,6 +84,12 @@ namespace Impl {
HIPLockArrays g_host_hip_lock_arrays = {nullptr, nullptr, 0};
void initialize_host_hip_lock_arrays() {
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+ desul::Impl::init_lock_arrays();
+
+ DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE();
+#endif
+
if (g_host_hip_lock_arrays.atomic != nullptr) return;
HIP_SAFE_CALL(hipMalloc(
&g_host_hip_lock_arrays.atomic, | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#include <HIP/Kokkos_HIP_Locks.hpp>
#include <HIP/Kokkos_HIP_Error.hpp>
#include <Kokkos_HIP_Space.hpp>
#include <hip/hip_runtime.h>
#include <iostream>
namespace Kokkos {
#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
namespace Impl {
__device__ __constant__ HIPLockArrays g_device_hip_lock_arrays = {nullptr,
nullptr, 0};
}
#endif
namespace {
__global__ void init_lock_array_kernel_atomic() {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1) {
Kokkos::Impl::g_device_hip_lock_arrays.atomic[i] = 0;
}
}
__global__ void init_lock_array_kernel_threadid(int N) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < static_cast<unsigned>(N)) {
Kokkos::Impl::g_device_hip_lock_arrays.scratch[i] = 0;
}
}
} // namespace
namespace Impl {
HIPLockArrays g_host_hip_lock_arrays = {nullptr, nullptr, 0};
void initialize_host_hip_lock_arrays() {
if (g_host_hip_lock_arrays.atomic != nullptr) return;
HIP_SAFE_CALL(hipMalloc(
&g_host_hip_lock_arrays.atomic,
sizeof(std::int32_t) * (KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1)));
HIP_SAFE_CALL(hipMalloc(
&g_host_hip_lock_arrays.scratch,
sizeof(std::int32_t) * (::Kokkos::Experimental::HIP::concurrency())));
g_host_hip_lock_arrays.n = ::Kokkos::Experimental::HIP::concurrency();
KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
init_lock_array_kernel_atomic<<<
(KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1 + 255) / 256, 256, 0, nullptr>>>();
init_lock_array_kernel_threadid<<<
(::Kokkos::Experimental::HIP::concurrency() + 255) / 256, 256, 0,
nullptr>>>(::Kokkos::Experimental::HIP::concurrency());
}
void finalize_host_hip_lock_arrays() {
if (g_host_hip_lock_arrays.atomic == nullptr) return;
HIP_SAFE_CALL(hipFree(g_host_hip_lock_arrays.atomic));
g_host_hip_lock_arrays.atomic = nullptr;
HIP_SAFE_CALL(hipFree(g_host_hip_lock_arrays.scratch));
g_host_hip_lock_arrays.scratch = nullptr;
g_host_hip_lock_arrays.n = 0;
#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
#endif
}
} // namespace Impl
} // namespace Kokkos
| 1 | 17,217 | Again, we can also guard `g_host_hip_lock_arrays.atomic`, can't we? | kokkos-kokkos | cpp |
@@ -69,12 +69,13 @@ class ImportSimpleProductCommandHandler
);
if (!empty($attributesToRedispatch)) {
- $this->commandBus->dispatch(new ImportProductAttributesValueCommand(
+ $ImportProductAttributesValueCommand = new ImportProductAttributesValueCommand(
$command->getId(),
$command->getImportId(),
$attributesToRedispatch,
$command->getSku()
- ));
+ );
+ $this->commandBus->dispatch($ImportProductAttributesValueCommand, true);
}
$this->repository->markLineAsSuccess($command->getId(), $product->getId());
} catch (ImportException $exception) { | 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Importer\Infrastructure\Handler;
use Ergonode\Importer\Domain\Command\Import\Attribute\ImportProductAttributesValueCommand;
use Ergonode\Importer\Infrastructure\Filter\AttributeImportFilter;
use Ergonode\Importer\Infrastructure\Exception\ImportException;
use Ergonode\Importer\Domain\Repository\ImportRepositoryInterface;
use Ergonode\Importer\Domain\Command\Import\ImportSimpleProductCommand;
use Ergonode\Importer\Infrastructure\Action\SimpleProductImportAction;
use Ergonode\SharedKernel\Domain\Bus\CommandBusInterface;
use Psr\Log\LoggerInterface;
use Ergonode\Category\Domain\ValueObject\CategoryCode;
use Ergonode\Product\Domain\ValueObject\Sku;
class ImportSimpleProductCommandHandler
{
private SimpleProductImportAction $action;
private ImportRepositoryInterface $repository;
private LoggerInterface $logger;
private AttributeImportFilter $attributeImportFilter;
private CommandBusInterface $commandBus;
public function __construct(
SimpleProductImportAction $action,
ImportRepositoryInterface $repository,
LoggerInterface $logger,
AttributeImportFilter $attributeImportFilter,
CommandBusInterface $commandBus
) {
$this->action = $action;
$this->repository = $repository;
$this->logger = $logger;
$this->attributeImportFilter = $attributeImportFilter;
$this->commandBus = $commandBus;
}
public function __invoke(ImportSimpleProductCommand $command): void
{
try {
if (!Sku::isValid($command->getSku())) {
throw new ImportException('Sku {sku} is not valid', ['{sku}' => $command->getSku()]);
}
$categories = [];
foreach ($command->getCategories() as $category) {
if (!CategoryCode::isValid($category)) {
throw new ImportException('Category code {code} is not valid', ['{code}' => $category]);
}
$categories[] = new CategoryCode($category);
}
$attributesToRedispatch = $this->attributeImportFilter->filter($command->getAttributes());
$validatedAttributes = array_diff_key($command->getAttributes(), $attributesToRedispatch);
$product = $this->action->action(
new Sku($command->getSku()),
$command->getTemplate(),
$categories,
$validatedAttributes,
);
if (!empty($attributesToRedispatch)) {
$this->commandBus->dispatch(new ImportProductAttributesValueCommand(
$command->getId(),
$command->getImportId(),
$attributesToRedispatch,
$command->getSku()
));
}
$this->repository->markLineAsSuccess($command->getId(), $product->getId());
} catch (ImportException $exception) {
$this->repository->markLineAsFailure($command->getId());
$this->repository->addError($command->getImportId(), $exception->getMessage(), $exception->getParameters());
} catch (\Exception $exception) {
$message = 'Can\'t import simple product {sku}';
$this->repository->markLineAsFailure($command->getId());
$this->repository->addError($command->getImportId(), $message, ['{sku}' => $command->getSku()]);
$this->logger->error($exception);
}
}
}
| 1 | 9,526 | Variable name should start from lowercase latter | ergonode-backend | php |
@@ -47,12 +47,12 @@ import (
// certificate, ca, and private key is stored into the target Secret to
// complete Issuing the Certificate.
func TestIssuingController(t *testing.T) {
- config, stopFn := framework.RunControlPlane(t)
- defer stopFn()
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*20)
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*40)
defer cancel()
+ config, stopFn := framework.RunControlPlane(t, ctx)
+ defer stopFn()
+
// Build, instantiate and run the issuing controller.
kubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)
controllerOptions := controllerpkg.CertificateOptions{ | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"bytes"
"context"
"encoding/pem"
"fmt"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/clock"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
controllerpkg "github.com/jetstack/cert-manager/pkg/controller"
"github.com/jetstack/cert-manager/pkg/controller/certificates/issuing"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/metrics"
utilpki "github.com/jetstack/cert-manager/pkg/util/pki"
"github.com/jetstack/cert-manager/test/integration/framework"
"github.com/jetstack/cert-manager/test/unit/gen"
)
// TestIssuingController performs a basic test to ensure that the issuing
// controller works when instantiated.
// This is not an exhaustive set of test cases. It only ensures that the signed
// certificate, ca, and private key is stored into the target Secret to
// complete Issuing the Certificate.
func TestIssuingController(t *testing.T) {
config, stopFn := framework.RunControlPlane(t)
defer stopFn()
ctx, cancel := context.WithTimeout(context.Background(), time.Second*20)
defer cancel()
// Build, instantiate and run the issuing controller.
kubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)
controllerOptions := controllerpkg.CertificateOptions{
EnableOwnerRef: true,
}
ctrl, queue, mustSync := issuing.NewController(logf.Log, kubeClient, cmCl, factory, cmFactory, framework.NewEventRecorder(t), clock.RealClock{}, controllerOptions)
c := controllerpkg.NewController(
ctx,
"issuing_test",
metrics.New(logf.Log, clock.RealClock{}),
ctrl.ProcessItem,
mustSync,
nil,
queue,
)
stopController := framework.StartInformersAndController(t, factory, cmFactory, c)
defer stopController()
var (
crtName = "testcrt"
revision = 1
namespace = "testns"
nextPrivateKeySecretName = "next-private-key-test-crt"
secretName = "test-crt-tls"
)
// Create Namespace
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
_, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create a new private key
sk, err := utilpki.GenerateRSAPrivateKey(2048)
if err != nil {
t.Fatal(err)
}
// Encode the private key as PKCS#1, the default format
skBytes := utilpki.EncodePKCS1PrivateKey(sk)
// Store new private key in secret
_, err = kubeClient.CoreV1().Secrets(namespace).Create(ctx, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: nextPrivateKeySecretName,
Namespace: namespace,
},
Data: map[string][]byte{
corev1.TLSPrivateKeyKey: skBytes,
},
}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create Certificate
crt := gen.Certificate(crtName,
gen.SetCertificateNamespace(namespace),
gen.SetCertificateCommonName("my-common-name"),
gen.SetCertificateDNSNames("example.com", "foo.example.com"),
gen.SetCertificateIPs("1.2.3.4", "5.6.7.8"),
gen.SetCertificateURIs("spiffe://hello.world"),
gen.SetCertificateKeyAlgorithm(cmapi.RSAKeyAlgorithm),
gen.SetCertificateKeySize(2048),
gen.SetCertificateSecretName(secretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: "testissuer", Group: "foo.io", Kind: "Issuer"}),
)
crt, err = cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, crt, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create x509 CSR from Certificate
csr, err := utilpki.GenerateCSR(crt)
if err != nil {
t.Fatal(err)
}
// Encode CSR
csrDER, err := utilpki.EncodeCSR(csr, sk)
if err != nil {
t.Fatal(err)
}
csrPEM := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE REQUEST", Bytes: csrDER,
})
// Sign Certificate
certTemplate, err := utilpki.GenerateTemplate(crt)
if err != nil {
t.Fatal(err)
}
// Sign and encode the certificate
certPem, _, err := utilpki.SignCertificate(certTemplate, certTemplate, sk.Public(), sk)
if err != nil {
t.Fatal(err)
}
// Create CertificateRequest
req := gen.CertificateRequest(crtName,
gen.SetCertificateRequestNamespace(namespace),
gen.SetCertificateRequestCSR(csrPEM),
gen.SetCertificateRequestIssuer(crt.Spec.IssuerRef),
gen.SetCertificateRequestAnnotations(map[string]string{
cmapi.CertificateRequestRevisionAnnotationKey: fmt.Sprintf("%d", revision+1),
}),
gen.AddCertificateRequestOwnerReferences(*metav1.NewControllerRef(
crt,
cmapi.SchemeGroupVersion.WithKind("Certificate"),
)),
)
req, err = cmCl.CertmanagerV1().CertificateRequests(namespace).Create(ctx, req, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Set CertificateRequest as ready
req.Status.CA = certPem
req.Status.Certificate = certPem
apiutil.SetCertificateRequestCondition(req, cmapi.CertificateRequestConditionReady, cmmeta.ConditionTrue, cmapi.CertificateRequestReasonIssued, "")
req, err = cmCl.CertmanagerV1().CertificateRequests(namespace).UpdateStatus(ctx, req, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
// Add Issuing condition to Certificate
apiutil.SetCertificateCondition(crt, crt.Generation, cmapi.CertificateConditionIssuing, cmmeta.ConditionTrue, "", "")
crt.Status.NextPrivateKeySecretName = &nextPrivateKeySecretName
crt.Status.Revision = &revision
crt, err = cmCl.CertmanagerV1().Certificates(namespace).UpdateStatus(ctx, crt, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
// Wait for the Certificate to have the 'Issuing' condition removed, and for
// the signed certificate, ca, and private key stored in the Secret.
err = wait.Poll(time.Millisecond*100, time.Second*5, func() (done bool, err error) {
crt, err = cmCl.CertmanagerV1().Certificates(namespace).Get(ctx, crtName, metav1.GetOptions{})
if err != nil {
t.Logf("Failed to fetch Certificate resource, retrying: %v", err)
return false, nil
}
if cond := apiutil.GetCertificateCondition(crt, cmapi.CertificateConditionIssuing); cond != nil {
t.Logf("Certificate does not have expected condition, got=%#v", cond)
return false, nil
}
// If the condition is set, but the rest of the values are not there,
// error. This is to assert that all Secret data and metadata is pushed in
// a single resource update.
if crt.Status.Revision == nil ||
*crt.Status.Revision != 2 {
return false, fmt.Errorf("Certificate does not have a revision of 2: %v", crt.Status.Revision)
}
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, crt.Spec.SecretName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to fetch Secret %s/%s: %s", namespace, crt.Spec.SecretName, err)
}
if !bytes.Equal(secret.Data[corev1.TLSPrivateKeyKey], skBytes) ||
!bytes.Equal(secret.Data[corev1.TLSCertKey], certPem) ||
!bytes.Equal(secret.Data[cmmeta.TLSCAKey], certPem) {
return false, fmt.Errorf("Contents of secret did not match expected: %+v", secret.Data)
}
for expKey, expV := range map[string]string{
cmapi.AltNamesAnnotationKey: "example.com,foo.example.com",
cmapi.IPSANAnnotationKey: "1.2.3.4,5.6.7.8",
cmapi.URISANAnnotationKey: "spiffe://hello.world",
cmapi.CommonNameAnnotationKey: "my-common-name",
cmapi.IssuerNameAnnotationKey: "testissuer",
cmapi.IssuerKindAnnotationKey: "Issuer",
cmapi.IssuerGroupAnnotationKey: "foo.io",
cmapi.CertificateNameKey: "testcrt",
} {
if v, ok := secret.Annotations[expKey]; !ok || expV != v {
return false, fmt.Errorf("expected Secret to have the annotation %s:%s, got %s:%s",
expKey, expV, expKey, v)
}
}
return true, nil
})
if err != nil {
t.Fatalf("Failed to wait for final state: %+v", crt)
}
}
func TestIssuingController_PKCS8_PrivateKey(t *testing.T) {
config, stopFn := framework.RunControlPlane(t)
defer stopFn()
ctx, cancel := context.WithTimeout(context.Background(), time.Second*20)
defer cancel()
// Build, instantiate and run the issuing controller.
kubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)
controllerOptions := controllerpkg.CertificateOptions{
EnableOwnerRef: true,
}
ctrl, queue, mustSync := issuing.NewController(logf.Log, kubeClient, cmCl, factory, cmFactory, framework.NewEventRecorder(t), clock.RealClock{}, controllerOptions)
c := controllerpkg.NewController(
ctx,
"issuing_test",
metrics.New(logf.Log, clock.RealClock{}),
ctrl.ProcessItem,
mustSync,
nil,
queue,
)
stopController := framework.StartInformersAndController(t, factory, cmFactory, c)
defer stopController()
var (
crtName = "testcrt"
revision = 1
namespace = "testns"
nextPrivateKeySecretName = "next-private-key-test-crt"
secretName = "test-crt-tls"
)
// Create Namespace
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
_, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create a new private key
sk, err := utilpki.GenerateRSAPrivateKey(2048)
if err != nil {
t.Fatal(err)
}
// Encode the private key as PKCS#1, the default format
skBytesPKCS1 := utilpki.EncodePKCS1PrivateKey(sk)
skBytesPKCS8, err := utilpki.EncodePKCS8PrivateKey(sk)
if err != nil {
t.Fatal(err)
}
// Store new private key in secret
_, err = kubeClient.CoreV1().Secrets(namespace).Create(ctx, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: nextPrivateKeySecretName,
Namespace: namespace,
},
Data: map[string][]byte{
// store PKCS#1 bytes so we can ensure they are correctly converted to
// PKCS#8 later on
corev1.TLSPrivateKeyKey: skBytesPKCS1,
},
}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create Certificate
crt := gen.Certificate(crtName,
gen.SetCertificateNamespace(namespace),
gen.SetCertificateCommonName("my-common-name"),
gen.SetCertificateDNSNames("example.com", "foo.example.com"),
gen.SetCertificateIPs("1.2.3.4", "5.6.7.8"),
gen.SetCertificateURIs("spiffe://hello.world"),
gen.SetCertificateKeyAlgorithm(cmapi.RSAKeyAlgorithm),
gen.SetCertificateKeyEncoding(cmapi.PKCS8),
gen.SetCertificateKeySize(2048),
gen.SetCertificateSecretName(secretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: "testissuer", Group: "foo.io", Kind: "Issuer"}),
)
crt, err = cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, crt, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create x509 CSR from Certificate
csr, err := utilpki.GenerateCSR(crt)
if err != nil {
t.Fatal(err)
}
// Encode CSR
csrDER, err := utilpki.EncodeCSR(csr, sk)
if err != nil {
t.Fatal(err)
}
csrPEM := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE REQUEST", Bytes: csrDER,
})
// Sign Certificate
certTemplate, err := utilpki.GenerateTemplate(crt)
if err != nil {
t.Fatal(err)
}
// Sign and encode the certificate
certPem, _, err := utilpki.SignCertificate(certTemplate, certTemplate, sk.Public(), sk)
if err != nil {
t.Fatal(err)
}
// Create CertificateRequest
req := gen.CertificateRequest(crtName,
gen.SetCertificateRequestNamespace(namespace),
gen.SetCertificateRequestCSR(csrPEM),
gen.SetCertificateRequestIssuer(crt.Spec.IssuerRef),
gen.SetCertificateRequestAnnotations(map[string]string{
cmapi.CertificateRequestRevisionAnnotationKey: fmt.Sprintf("%d", revision+1),
}),
gen.AddCertificateRequestOwnerReferences(*metav1.NewControllerRef(
crt,
cmapi.SchemeGroupVersion.WithKind("Certificate"),
)),
)
req, err = cmCl.CertmanagerV1().CertificateRequests(namespace).Create(ctx, req, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Set CertificateRequest as ready
req.Status.CA = certPem
req.Status.Certificate = certPem
apiutil.SetCertificateRequestCondition(req, cmapi.CertificateRequestConditionReady, cmmeta.ConditionTrue, cmapi.CertificateRequestReasonIssued, "")
req, err = cmCl.CertmanagerV1().CertificateRequests(namespace).UpdateStatus(ctx, req, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
// Add Issuing condition to Certificate
apiutil.SetCertificateCondition(crt, crt.Generation, cmapi.CertificateConditionIssuing, cmmeta.ConditionTrue, "", "")
crt.Status.NextPrivateKeySecretName = &nextPrivateKeySecretName
crt.Status.Revision = &revision
crt, err = cmCl.CertmanagerV1().Certificates(namespace).UpdateStatus(ctx, crt, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
// Wait for the Certificate to have the 'Issuing' condition removed, and for
// the signed certificate, ca, and private key stored in the Secret.
err = wait.Poll(time.Millisecond*100, time.Second*5, func() (done bool, err error) {
crt, err = cmCl.CertmanagerV1().Certificates(namespace).Get(ctx, crtName, metav1.GetOptions{})
if err != nil {
t.Logf("Failed to fetch Certificate resource, retrying: %v", err)
return false, nil
}
if cond := apiutil.GetCertificateCondition(crt, cmapi.CertificateConditionIssuing); cond != nil {
t.Logf("Certificate does not have expected condition, got=%#v", cond)
return false, nil
}
// If the condition is set, but the rest of the values are not there,
// error. This is to assert that all Secret data and metadata is pushed in
// a single resource update.
if crt.Status.Revision == nil ||
*crt.Status.Revision != 2 {
return false, fmt.Errorf("Certificate does not have a revision of 2: %v", crt.Status.Revision)
}
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, crt.Spec.SecretName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to fetch Secret %s/%s: %s", namespace, crt.Spec.SecretName, err)
}
if !bytes.Equal(secret.Data[corev1.TLSPrivateKeyKey], skBytesPKCS8) ||
!bytes.Equal(secret.Data[corev1.TLSCertKey], certPem) ||
!bytes.Equal(secret.Data[cmmeta.TLSCAKey], certPem) {
return false, fmt.Errorf("Contents of secret did not match expected: %+v", secret.Data)
}
for expKey, expV := range map[string]string{
cmapi.AltNamesAnnotationKey: "example.com,foo.example.com",
cmapi.IPSANAnnotationKey: "1.2.3.4,5.6.7.8",
cmapi.URISANAnnotationKey: "spiffe://hello.world",
cmapi.CommonNameAnnotationKey: "my-common-name",
cmapi.IssuerNameAnnotationKey: "testissuer",
cmapi.IssuerKindAnnotationKey: "Issuer",
cmapi.IssuerGroupAnnotationKey: "foo.io",
cmapi.CertificateNameKey: "testcrt",
} {
if v, ok := secret.Annotations[expKey]; !ok || expV != v {
return false, fmt.Errorf("expected Secret to have the annotation %s:%s, got %s:%s",
expKey, expV, expKey, v)
}
}
return true, nil
})
if err != nil {
t.Fatalf("Failed to wait for final state: %+v", crt)
}
}
| 1 | 29,104 | Why has this timeout doubled? | jetstack-cert-manager | go |
@@ -113,7 +113,7 @@ type Config struct {
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;30"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
- IptablesLockTimeoutSecs time.Duration `config:"seconds;30"`
+ IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"` | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/api"
"github.com/projectcalico/libcalico-go/lib/client"
)
var (
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
)
var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
DatastoreType string `config:"oneof(kubernetes,etcdv2);etcdv2;non-zero,die-on-fail"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;"`
TyphaK8sServiceName string `config:"string;"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero"`
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;30"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;30"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xff000000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;tcp:2379,tcp:2380,tcp:4001,tcp:7001,udp:53,udp:67;die-on-fail"`
UsageReportingEnabled bool `config:"bool;true"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
DebugMemoryProfilePath string `config:"file;;"`
DebugDisableLogDropping bool `config:"bool;false"`
// State tracking.
// nameToSource tracks where we loaded each config param from.
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
numIptablesBitsAllocated int
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
log.Debug("Cluster type contains OpenStack")
return true
}
if config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) NextIptablesMark() uint32 {
mark := config.NthIPTablesMark(config.numIptablesBitsAllocated)
config.numIptablesBitsAllocated++
return mark
}
func (config *Config) NthIPTablesMark(n int) uint32 {
numBitsFound := 0
for shift := uint(0); shift < 32; shift++ {
candidate := uint32(1) << shift
if config.IptablesMarkMask&candidate > 0 {
if numBitsFound == n {
return candidate
}
numBitsFound += 1
}
}
log.WithFields(log.Fields{
"IptablesMarkMask": config.IptablesMarkMask,
"requestedMark": n,
}).Panic("Not enough iptables mark bits available.")
return 0
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
currentSource := nameToSource[rawName]
param, ok := knownParams[strings.ToLower(rawName)]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for
// a plugin. Since we don't know the canonical
// name, use the raw name.
newRawValues[rawName] = rawValue
nameToSource[rawName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("Non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[name] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) DatastoreConfig() api.CalicoAPIConfig {
// Special case for etcdv2 datastore, where we want to honour established Felix-specific
// config mechanisms.
if config.DatastoreType == "etcdv2" {
// Build a CalicoAPIConfig with the etcd fields filled in from Felix-specific
// config.
var etcdEndpoints string
if len(config.EtcdEndpoints) == 0 {
etcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr
} else {
etcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
}
etcdCfg := api.EtcdConfig{
EtcdEndpoints: etcdEndpoints,
EtcdKeyFile: config.EtcdKeyFile,
EtcdCertFile: config.EtcdCertFile,
EtcdCACertFile: config.EtcdCaFile,
}
return api.CalicoAPIConfig{
Spec: api.CalicoAPIConfigSpec{
DatastoreType: api.EtcdV2,
EtcdConfig: etcdCfg,
},
}
}
// Build CalicoAPIConfig from the environment. This means that any XxxYyy field in
// CalicoAPIConfigSpec can be set by a corresponding XXX_YYY or CALICO_XXX_YYY environment
// variable, and that the datastore type can be set by a DATASTORE_TYPE or
// CALICO_DATASTORE_TYPE variable. (Except in the etcdv2 case which is handled specially
// above.)
cfg, err := client.LoadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// If that didn't set the datastore type (in which case the field will have been set to its
// default 'etcdv2' value), copy it from the Felix config.
if cfg.Spec.DatastoreType == "etcdv2" {
cfg.Spec.DatastoreType = api.DatastoreType(config.DatastoreType)
}
if !config.IpInIpEnabled {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("IPIP disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv2" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Index(flags, "non-zero") > -1 {
metadata.NonZero = true
}
if strings.Index(flags, "die-on-fail") > -1 {
metadata.DieOnParseFailure = true
}
if strings.Index(flags, "local") > -1 {
metadata.Local = true
}
if defaultStr != "" {
if strings.Index(flags, "skip-default-validation") > -1 {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: make(map[string]string),
sourceToRawConfig: make(map[Source]map[string]string),
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := os.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = os.Getenv("HOSTNAME")
}
p.FelixHostname = hostname
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 15,520 | Should IptablesPostWriteCheckIntervalSecs be set back to its previous smaller value, if use of the iptables lock is disabled? | projectcalico-felix | go |
@@ -335,6 +335,13 @@ class WebEngineCaret(browsertab.AbstractCaret):
"""
if js_elem is None:
return
+ if js_elem == "focused":
+ # we had a focused element, not a selected one. Just send <enter>
+ if tab:
+ self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
+ else:
+ self._tab.key_press(Qt.Key_Enter)
+
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab: | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineView."""
import math
import functools
import sys
import re
import html as html_utils
import sip
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QEvent, QPoint, QPointF,
QUrl, QTimer)
from PyQt5.QtGui import QKeyEvent, QIcon
from PyQt5.QtNetwork import QAuthenticator
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript
from qutebrowser.config import configdata, config
from qutebrowser.browser import browsertab, mouse, shared
from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory,
interceptor, webenginequtescheme,
webenginedownloads,
webenginesettings)
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import (usertypes, qtutils, log, javascript, utils,
message, objreg, jinja, debug)
_qute_scheme_handler = None
def init():
"""Initialize QtWebEngine-specific modules."""
# For some reason we need to keep a reference, otherwise the scheme handler
# won't work...
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-September/038075.html
global _qute_scheme_handler
app = QApplication.instance()
log.init.debug("Initializing qute://* handler...")
_qute_scheme_handler = webenginequtescheme.QuteSchemeHandler(parent=app)
_qute_scheme_handler.install(webenginesettings.default_profile)
_qute_scheme_handler.install(webenginesettings.private_profile)
log.init.debug("Initializing request interceptor...")
host_blocker = objreg.get('host-blocker')
args = objreg.get('args')
req_interceptor = interceptor.RequestInterceptor(
host_blocker, args=args, parent=app)
req_interceptor.install(webenginesettings.default_profile)
req_interceptor.install(webenginesettings.private_profile)
log.init.debug("Initializing QtWebEngine downloads...")
download_manager = webenginedownloads.DownloadManager(parent=app)
download_manager.install(webenginesettings.default_profile)
download_manager.install(webenginesettings.private_profile)
objreg.register('webengine-download-manager', download_manager)
# Clear visited links on web history clear
hist = objreg.get('web-history')
for p in [webenginesettings.default_profile,
webenginesettings.private_profile]:
hist.history_cleared.connect(p.clearAllVisitedLinks)
hist.url_cleared.connect(lambda url, profile=p:
profile.clearVisitedLinks([url]))
# Mapping worlds from usertypes.JsWorld to QWebEngineScript world IDs.
_JS_WORLD_MAP = {
usertypes.JsWorld.main: QWebEngineScript.MainWorld,
usertypes.JsWorld.application: QWebEngineScript.ApplicationWorld,
usertypes.JsWorld.user: QWebEngineScript.UserWorld,
usertypes.JsWorld.jseval: QWebEngineScript.UserWorld + 1,
}
class WebEngineAction(browsertab.AbstractAction):
"""QtWebEngine implementations related to web actions."""
action_class = QWebEnginePage
action_base = QWebEnginePage.WebAction
def exit_fullscreen(self):
self._widget.triggerPageAction(QWebEnginePage.ExitFullScreen)
def save_page(self):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
def show_source(self):
try:
self._widget.triggerPageAction(QWebEnginePage.ViewSource)
except AttributeError:
# Qt < 5.8
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
urlstr = self._tab.url().toString(QUrl.RemoveUserInfo)
# The original URL becomes the path of a view-source: URL
# (without a host), but query/fragment should stay.
url = QUrl('view-source:' + urlstr)
tb.tabopen(url, background=False, related=True)
class WebEnginePrinting(browsertab.AbstractPrinting):
"""QtWebEngine implementations related to printing."""
def check_pdf_support(self):
return True
def check_printer_support(self):
if not hasattr(self._widget.page(), 'print'):
raise browsertab.WebTabError(
"Printing is unsupported with QtWebEngine on Qt < 5.8")
def check_preview_support(self):
raise browsertab.WebTabError(
"Print previews are unsupported with QtWebEngine")
def to_pdf(self, filename):
self._widget.page().printToPdf(filename)
def to_printer(self, printer, callback=None):
if callback is None:
callback = lambda _ok: None
self._widget.page().print(printer, callback)
class WebEngineSearch(browsertab.AbstractSearch):
"""QtWebEngine implementations related to searching on the page.
Attributes:
_flags: The QWebEnginePage.FindFlags of the last search.
_pending_searches: How many searches have been started but not called
back yet.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
self._pending_searches = 0
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
self._pending_searches += 1
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
self._pending_searches -= 1
if self._pending_searches > 0:
# See https://github.com/qutebrowser/qutebrowser/issues/2442
# and https://github.com/qt/qtwebengine/blob/5.10/src/core/web_contents_adapter.cpp#L924-L934
log.webview.debug("Ignoring cancelled search callback with "
"{} pending searches".format(
self._pending_searches))
return
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key(
QWebEnginePage, flags, klass=QWebEnginePage.FindFlag))
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
callback(found)
self._widget.findText(text, flags, wrapped_callback)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
self.text = text
self._flags = QWebEnginePage.FindFlags(0)
if self._is_case_sensitive(ignore_case):
self._flags |= QWebEnginePage.FindCaseSensitively
if reverse:
self._flags |= QWebEnginePage.FindBackward
self._find(text, self._flags, result_cb, 'search')
def clear(self):
self.search_displayed = False
self._widget.findText('')
def prev_result(self, *, result_cb=None):
# The int() here makes sure we get a copy of the flags.
flags = QWebEnginePage.FindFlags(int(self._flags))
if flags & QWebEnginePage.FindBackward:
flags &= ~QWebEnginePage.FindBackward
else:
flags |= QWebEnginePage.FindBackward
self._find(self.text, flags, result_cb, 'prev_result')
def next_result(self, *, result_cb=None):
self._find(self.text, self._flags, result_cb, 'next_result')
class WebEngineCaret(browsertab.AbstractCaret):
"""QtWebEngine implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
if self._tab.search.search_displayed:
# We are currently in search mode.
# convert the search to a blue selection so we can operate on it
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
self._tab.run_js_async(
javascript.assemble('caret', 'setPlatform', sys.platform))
self._js_call('setInitialCursor', self._selection_cb)
def _selection_cb(self, enabled):
"""Emit selection_toggled based on setInitialCursor."""
if enabled is None:
log.webview.debug("Ignoring selection status None")
return
self.selection_toggled.emit(enabled)
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
if mode != usertypes.KeyMode.caret:
return
self.drop_selection()
self._js_call('disableCaret')
def move_to_next_line(self, count=1):
for _ in range(count):
self._js_call('moveDown')
def move_to_prev_line(self, count=1):
for _ in range(count):
self._js_call('moveUp')
def move_to_next_char(self, count=1):
for _ in range(count):
self._js_call('moveRight')
def move_to_prev_char(self, count=1):
for _ in range(count):
self._js_call('moveLeft')
def move_to_end_of_word(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfWord')
def move_to_next_word(self, count=1):
for _ in range(count):
self._js_call('moveToNextWord')
def move_to_prev_word(self, count=1):
for _ in range(count):
self._js_call('moveToPreviousWord')
def move_to_start_of_line(self):
self._js_call('moveToStartOfLine')
def move_to_end_of_line(self):
self._js_call('moveToEndOfLine')
def move_to_start_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfNextBlock')
def move_to_start_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfPrevBlock')
def move_to_end_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfNextBlock')
def move_to_end_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfPrevBlock')
def move_to_start_of_document(self):
self._js_call('moveToStartOfDocument')
def move_to_end_of_document(self):
self._js_call('moveToEndOfDocument')
def toggle_selection(self):
self._js_call('toggleSelection', self.selection_toggled.emit)
def drop_selection(self):
self._js_call('dropSelection')
def selection(self, callback):
# Not using selectedText() as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-53134
# Even on Qt 5.10 selectedText() seems to work poorly, see
# https://github.com/qutebrowser/qutebrowser/issues/3523
self._tab.run_js_async(javascript.assemble('caret', 'getSelection'),
callback)
def _follow_selected_cb(self, js_elem, tab=False):
"""Callback for javascript which clicks the selected element.
Args:
js_elem: The element serialized from javascript.
tab: Open in a new tab.
"""
if js_elem is None:
return
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab:
click_type = usertypes.ClickTarget.tab
else:
click_type = usertypes.ClickTarget.normal
# Only click if we see a link
if elem.is_link():
log.webview.debug("Found link in selection, clicking. ClickTarget "
"{}, elem {}".format(click_type, elem))
elem.click(click_type)
def follow_selected(self, *, tab=False):
if self._tab.search.search_displayed:
# We are currently in search mode.
# let's click the link via a fake-click
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
log.webview.debug("Clicking a searched link via fake key press.")
# send a fake enter, clicking the orange selection box
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
else:
# click an existing blue selection
js_code = javascript.assemble('webelem', 'find_selected_link')
self._tab.run_js_async(js_code, lambda jsret:
self._follow_selected_cb(jsret, tab))
def _js_call(self, command, callback=None):
self._tab.run_js_async(javascript.assemble('caret', command), callback)
class WebEngineScroller(browsertab.AbstractScroller):
"""QtWebEngine implementations related to scrolling."""
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._args = objreg.get('args')
self._pos_perc = (0, 0)
self._pos_px = QPoint()
self._at_bottom = False
def _init_widget(self, widget):
super()._init_widget(widget)
page = widget.page()
page.scrollPositionChanged.connect(self._update_pos)
def _repeated_key_press(self, key, count=1, modifier=Qt.NoModifier):
"""Send count fake key presses to this scroller's WebEngineTab."""
for _ in range(min(count, 1000)):
self._tab.key_press(key, modifier)
@pyqtSlot(QPointF)
def _update_pos(self, pos):
"""Update the scroll position attributes when it changed."""
self._pos_px = pos.toPoint()
contents_size = self._widget.page().contentsSize()
scrollable_x = contents_size.width() - self._widget.width()
if scrollable_x == 0:
perc_x = 0
else:
try:
perc_x = min(100, round(100 / scrollable_x * pos.x()))
except ValueError:
# https://github.com/qutebrowser/qutebrowser/issues/3219
log.misc.debug("Got ValueError!")
log.misc.debug("contents_size.width(): {}".format(
contents_size.width()))
log.misc.debug("self._widget.width(): {}".format(
self._widget.width()))
log.misc.debug("scrollable_x: {}".format(scrollable_x))
log.misc.debug("pos.x(): {}".format(pos.x()))
raise
scrollable_y = contents_size.height() - self._widget.height()
if scrollable_y == 0:
perc_y = 0
else:
perc_y = min(100, round(100 / scrollable_y * pos.y()))
self._at_bottom = math.ceil(pos.y()) >= scrollable_y
if (self._pos_perc != (perc_x, perc_y) or
'no-scroll-filtering' in self._args.debug_flags):
self._pos_perc = perc_x, perc_y
self.perc_changed.emit(*self._pos_perc)
def pos_px(self):
return self._pos_px
def pos_perc(self):
return self._pos_perc
def to_perc(self, x=None, y=None):
js_code = javascript.assemble('scroll', 'to_perc', x, y)
self._tab.run_js_async(js_code)
def to_point(self, point):
js_code = javascript.assemble('window', 'scroll', point.x(), point.y())
self._tab.run_js_async(js_code)
def to_anchor(self, name):
url = self._tab.url()
url.setFragment(name)
self._tab.openurl(url)
def delta(self, x=0, y=0):
self._tab.run_js_async(javascript.assemble('window', 'scrollBy', x, y))
def delta_page(self, x=0, y=0):
js_code = javascript.assemble('scroll', 'delta_page', x, y)
self._tab.run_js_async(js_code)
def up(self, count=1):
self._repeated_key_press(Qt.Key_Up, count)
def down(self, count=1):
self._repeated_key_press(Qt.Key_Down, count)
def left(self, count=1):
self._repeated_key_press(Qt.Key_Left, count)
def right(self, count=1):
self._repeated_key_press(Qt.Key_Right, count)
def top(self):
self._tab.key_press(Qt.Key_Home)
def bottom(self):
self._tab.key_press(Qt.Key_End)
def page_up(self, count=1):
self._repeated_key_press(Qt.Key_PageUp, count)
def page_down(self, count=1):
self._repeated_key_press(Qt.Key_PageDown, count)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
return self._at_bottom
class WebEngineHistory(browsertab.AbstractHistory):
"""QtWebEngine implementations related to page history."""
def current_idx(self):
return self._history.currentItemIndex()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def _item_at(self, i):
return self._history.itemAt(i)
def _go_to_item(self, item):
self._tab.predicted_navigation.emit(item.url())
self._history.goToItem(item)
def serialize(self):
if not qtutils.version_check('5.9', compiled=False):
# WORKAROUND for
# https://github.com/qutebrowser/qutebrowser/issues/2289
# Don't use the history's currentItem here, because of
# https://bugreports.qt.io/browse/QTBUG-59599 and because it doesn't
# contain view-source.
scheme = self._tab.url().scheme()
if scheme in ['view-source', 'chrome']:
raise browsertab.WebTabError("Can't serialize special URL!")
return qtutils.serialize(self._history)
def deserialize(self, data):
return qtutils.deserialize(data, self._history)
def load_items(self, items):
if items:
self._tab.predicted_navigation.emit(items[-1].url)
stream, _data, cur_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
@pyqtSlot()
def _on_load_finished():
self._tab.scroller.to_point(cur_data['scroll-pos'])
self._tab.load_finished.disconnect(_on_load_finished)
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
self._tab.load_finished.connect(_on_load_finished)
class WebEngineZoom(browsertab.AbstractZoom):
"""QtWebEngine implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
class WebEngineElements(browsertab.AbstractElements):
"""QtWebEngine implemementations related to elements on the page."""
def _js_cb_multiple(self, callback, js_elems):
"""Handle found elements coming from JS and call the real callback.
Args:
callback: The callback to call with the found elements.
Called with None if there was an error.
js_elems: The elements serialized from javascript.
"""
if js_elems is None:
callback(None)
return
elems = []
for js_elem in js_elems:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
elems.append(elem)
callback(elems)
def _js_cb_single(self, callback, js_elem):
"""Handle a found focus elem coming from JS and call the real callback.
Args:
callback: The callback to call with the found element.
Called with a WebEngineElement or None.
js_elem: The element serialized from javascript.
"""
debug_str = ('None' if js_elem is None
else utils.elide(repr(js_elem), 1000))
log.webview.debug("Got element from JS: {}".format(debug_str))
if js_elem is None:
callback(None)
else:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
callback(elem)
def find_css(self, selector, callback, *, only_visible=False):
js_code = javascript.assemble('webelem', 'find_css', selector,
only_visible)
js_cb = functools.partial(self._js_cb_multiple, callback)
self._tab.run_js_async(js_code, js_cb)
def find_id(self, elem_id, callback):
js_code = javascript.assemble('webelem', 'find_id', elem_id)
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_focused(self, callback):
js_code = javascript.assemble('webelem', 'find_focused')
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
pos /= self._tab.zoom.factor()
js_code = javascript.assemble('webelem', 'find_at_pos',
pos.x(), pos.y())
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
class WebEngineTab(browsertab.AbstractTab):
"""A QtWebEngine tab in the browser.
Signals:
_load_finished_fake:
Used in place of unreliable loadFinished
"""
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
_load_finished_fake = pyqtSignal(bool)
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
private=private, parent=parent)
widget = webview.WebEngineView(tabdata=self.data, win_id=win_id,
private=private)
self.history = WebEngineHistory(self)
self.scroller = WebEngineScroller(self, parent=self)
self.caret = WebEngineCaret(mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebEngineZoom(tab=self, parent=self)
self.search = WebEngineSearch(parent=self)
self.printing = WebEnginePrinting()
self.elements = WebEngineElements(tab=self)
self.action = WebEngineAction(tab=self)
# We're assigning settings in _set_widget
self.settings = webenginesettings.WebEngineSettings(settings=None)
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebEngine
self._child_event_filter = None
self._saved_zoom = None
self._reload_url = None
config.instance.changed.connect(self._on_config_changed)
self._init_js()
@pyqtSlot(str)
def _on_config_changed(self, option):
if option in ['scrolling.bar', 'content.user_stylesheets']:
self._init_stylesheet()
self._update_stylesheet()
def _update_stylesheet(self):
"""Update the custom stylesheet in existing tabs."""
css = shared.get_user_stylesheet()
code = javascript.assemble('stylesheet', 'set_css', css)
self.run_js_async(code)
def _inject_early_js(self, name, js_code, *,
world=QWebEngineScript.ApplicationWorld,
subframes=False):
"""Inject the given script to run early on a page load.
This runs the script both on DocumentCreation and DocumentReady as on
some internal pages, DocumentCreation will not work.
That is a WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66011
"""
scripts = self._widget.page().scripts()
for injection in ['creation', 'ready']:
injection_points = {
'creation': QWebEngineScript.DocumentCreation,
'ready': QWebEngineScript.DocumentReady,
}
script = QWebEngineScript()
script.setInjectionPoint(injection_points[injection])
script.setSourceCode(js_code)
script.setWorldId(world)
script.setRunsOnSubFrames(subframes)
script.setName('_qute_{}_{}'.format(name, injection))
scripts.insert(script)
def _remove_early_js(self, name):
"""Remove an early QWebEngineScript."""
scripts = self._widget.page().scripts()
for injection in ['creation', 'ready']:
full_name = '_qute_{}_{}'.format(name, injection)
script = scripts.findScript(full_name)
if not script.isNull():
scripts.remove(script)
def _init_js(self):
"""Initialize global qutebrowser JavaScript."""
js_code = javascript.wrap_global(
'scripts',
utils.read_file('javascript/scroll.js'),
utils.read_file('javascript/webelem.js'),
utils.read_file('javascript/caret.js'),
)
# FIXME:qtwebengine what about subframes=True?
self._inject_early_js('js', js_code, subframes=True)
self._init_stylesheet()
greasemonkey = objreg.get('greasemonkey')
greasemonkey.scripts_reloaded.connect(self._inject_userscripts)
self._inject_userscripts()
def _init_stylesheet(self):
"""Initialize custom stylesheets.
Partially inspired by QupZilla:
https://github.com/QupZilla/qupzilla/blob/v2.0/src/lib/app/mainapplication.cpp#L1063-L1101
"""
self._remove_early_js('stylesheet')
css = shared.get_user_stylesheet()
js_code = javascript.wrap_global(
'stylesheet',
utils.read_file('javascript/stylesheet.js'),
javascript.assemble('stylesheet', 'set_css', css),
)
self._inject_early_js('stylesheet', js_code, subframes=True)
def _inject_userscripts(self):
"""Register user JavaScript files with the global profiles."""
# The Greasemonkey metadata block support in QtWebEngine only starts at
# Qt 5.8. With 5.7.1, we need to inject the scripts ourselves in
# response to urlChanged.
if not qtutils.version_check('5.8'):
return
# Since we are inserting scripts into profile.scripts they won't
# just get replaced by new gm scripts like if we were injecting them
# ourselves so we need to remove all gm scripts, while not removing
# any other stuff that might have been added. Like the one for
# stylesheets.
greasemonkey = objreg.get('greasemonkey')
scripts = self._widget.page().scripts()
for script in scripts.toList():
if script.name().startswith("GM-"):
log.greasemonkey.debug('Removing script: {}'
.format(script.name()))
removed = scripts.remove(script)
assert removed, script.name()
# Then add the new scripts.
for script in greasemonkey.all_scripts():
# @run-at (and @include/@exclude/@match) is parsed by
# QWebEngineScript.
new_script = QWebEngineScript()
new_script.setWorldId(QWebEngineScript.MainWorld)
new_script.setSourceCode(script.code())
new_script.setName("GM-{}".format(script.name))
new_script.setRunsOnSubFrames(script.runs_on_sub_frames)
log.greasemonkey.debug('adding script: {}'
.format(new_script.name()))
scripts.insert(new_script)
def _install_event_filter(self):
fp = self._widget.focusProxy()
if fp is not None:
fp.installEventFilter(self._mouse_event_filter)
self._child_event_filter = mouse.ChildEventFilter(
eventfilter=self._mouse_event_filter, widget=self._widget,
parent=self)
self._widget.installEventFilter(self._child_event_filter)
@pyqtSlot()
def _restore_zoom(self):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if self._saved_zoom is None:
return
self.zoom.set_factor(self._saved_zoom)
self._saved_zoom = None
def openurl(self, url, *, predict=True):
"""Open the given URL in this tab.
Arguments:
url: The QUrl to open.
predict: If set to False, predicted_navigation is not emitted.
"""
self._saved_zoom = self.zoom.factor()
self._openurl_prepare(url, predict=predict)
self._widget.load(url)
def url(self, requested=False):
page = self._widget.page()
if requested:
return page.requestedUrl()
else:
return page.url()
def dump_async(self, callback, *, plain=False):
if plain:
self._widget.page().toPlainText(callback)
else:
self._widget.page().toHtml(callback)
def run_js_async(self, code, callback=None, *, world=None):
if world is None:
world_id = QWebEngineScript.ApplicationWorld
elif isinstance(world, int):
world_id = world
else:
world_id = _JS_WORLD_MAP[world]
if callback is None:
self._widget.page().runJavaScript(code, world_id)
else:
self._widget.page().runJavaScript(code, world_id, callback)
def shutdown(self):
self.shutting_down.emit()
self.action.exit_fullscreen()
self._widget.shutdown()
def reload(self, *, force=False):
if force:
action = QWebEnginePage.ReloadAndBypassCache
else:
action = QWebEnginePage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def icon(self):
return self._widget.icon()
def set_html(self, html, base_url=QUrl()):
# FIXME:qtwebengine
# check this and raise an exception if too big:
# Warning: The content will be percent encoded before being sent to the
# renderer via IPC. This may increase its size. The maximum size of the
# percent encoded content is 2 megabytes minus 30 bytes.
self._widget.setHtml(html, base_url)
def networkaccessmanager(self):
return None
def user_agent(self):
return None
def clear_ssl_errors(self):
raise browsertab.UnsupportedOperationError
def key_press(self, key, modifier=Qt.NoModifier):
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def _show_error_page(self, url, error):
"""Show an error page in the tab."""
log.misc.debug("Showing error page for {}".format(error))
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error=error)
self.set_html(error_page)
@pyqtSlot()
def _on_history_trigger(self):
try:
self._widget.page()
except RuntimeError:
# Looks like this slot can be triggered on destroyed tabs:
# https://crashes.qutebrowser.org/view/3abffbed (Qt 5.9.1)
# wrapped C/C++ object of type WebEngineView has been deleted
log.misc.debug("Ignoring history trigger for destroyed tab")
return
url = self.url()
requested_url = self.url(requested=True)
# Don't save the title if it's generated from the URL
title = self.title()
title_url = QUrl(url)
title_url.setScheme('')
if title == title_url.toDisplayString(QUrl.RemoveScheme).strip('/'):
title = ""
# Don't add history entry if the URL is invalid anyways
if not url.isValid():
log.misc.debug("Ignoring invalid URL being added to history")
return
self.add_history_item.emit(url, requested_url, title)
@pyqtSlot(QUrl, 'QAuthenticator*', 'QString')
def _on_proxy_authentication_required(self, url, authenticator,
proxy_host):
"""Called when a proxy needs authentication."""
msg = "<b>{}</b> requires a username and password.".format(
html_utils.escape(proxy_host))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=[self.shutting_down, self.load_started], url=urlstr)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
else:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
self._show_error_page(url, "Proxy authentication required")
@pyqtSlot(QUrl, 'QAuthenticator*')
def _on_authentication_required(self, url, authenticator):
netrc_success = False
if not self.data.netrc_used:
self.data.netrc_used = True
netrc_success = shared.netrc_authentication(url, authenticator)
if not netrc_success:
abort_on = [self.shutting_down, self.load_started]
answer = shared.authentication_required(url, authenticator,
abort_on)
if not netrc_success and answer is None:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
# WORKAROUND for
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-December/038400.html
self._show_error_page(url, "Authentication required")
@pyqtSlot('QWebEngineFullScreenRequest')
def _on_fullscreen_requested(self, request):
request.accept()
on = request.toggleOn()
self.data.fullscreen = on
self.fullscreen_requested.emit(on)
if on:
notification = miscwidgets.FullscreenNotification(self)
notification.show()
notification.set_timeout(3000)
@pyqtSlot()
def _on_load_started(self):
"""Clear search when a new load is started if needed."""
if (qtutils.version_check('5.9', compiled=False) and
not qtutils.version_check('5.9.2', compiled=False)):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-61506
self.search.clear()
super()._on_load_started()
self.data.netrc_used = False
@pyqtSlot(QWebEnginePage.RenderProcessTerminationStatus, int)
def _on_render_process_terminated(self, status, exitcode):
"""Show an error when the renderer process terminated."""
if (status == QWebEnginePage.AbnormalTerminationStatus and
exitcode == 256):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58697
status = QWebEnginePage.CrashedTerminationStatus
status_map = {
QWebEnginePage.NormalTerminationStatus:
browsertab.TerminationStatus.normal,
QWebEnginePage.AbnormalTerminationStatus:
browsertab.TerminationStatus.abnormal,
QWebEnginePage.CrashedTerminationStatus:
browsertab.TerminationStatus.crashed,
QWebEnginePage.KilledTerminationStatus:
browsertab.TerminationStatus.killed,
-1:
browsertab.TerminationStatus.unknown,
}
self.renderer_process_terminated.emit(status_map[status], exitcode)
@pyqtSlot(int)
def _on_load_progress_workaround(self, perc):
"""Use loadProgress(100) to emit loadFinished(True).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if perc == 100 and self.load_status() != usertypes.LoadStatus.error:
self._load_finished_fake.emit(True)
@pyqtSlot(bool)
def _on_load_finished_workaround(self, ok):
"""Use only loadFinished(False).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if not ok:
self._load_finished_fake.emit(False)
def _error_page_workaround(self, html):
"""Check if we're displaying a Chromium error page.
This gets only called if we got loadFinished(False) without JavaScript,
so we can display at least some error page.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643
Needs to check the page content as a WORKAROUND for
https://bugreports.qt.io/browse/QTBUG-66661
"""
match = re.search(r'"errorCode":"([^"]*)"', html)
if match is None:
return
self._show_error_page(self.url(), error=match.group(1))
@pyqtSlot(bool)
def _on_load_finished(self, ok):
"""Display a static error page if JavaScript is disabled."""
super()._on_load_finished(ok)
js_enabled = self.settings.test_attribute('content.javascript.enabled')
if not ok and not js_enabled:
self.dump_async(self._error_page_workaround)
if ok and self._reload_url is not None:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
log.config.debug(
"Loading {} again because of config change".format(
self._reload_url.toDisplayString()))
QTimer.singleShot(100, functools.partial(self.openurl,
self._reload_url,
predict=False))
self._reload_url = None
if not qtutils.version_check('5.10', compiled=False):
# We can't do this when we have the loadFinished workaround as that
# sometimes clears icons without loading a new page.
# In general, this is handled by Qt, but when loading takes long,
# the old icon is still displayed.
self.icon_changed.emit(QIcon())
@pyqtSlot(QUrl)
def _on_predicted_navigation(self, url):
"""If we know we're going to visit an URL soon, change the settings."""
super()._on_predicted_navigation(url)
self.settings.update_for_url(url)
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
super()._on_navigation_request(navigation)
if qtutils.version_check('5.11.0', exact=True, compiled=False):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68224
layout = self._widget.layout()
count = layout.count()
children = self._widget.findChildren(QWidget)
if not count and children:
log.webview.warning("Found children not in layout: {}, "
"focus proxy {} (QTBUG-68224)".format(
children, self._widget.focusProxy()))
if count > 1:
log.webview.debug("Found {} widgets! (QTBUG-68224)"
.format(count))
for i in range(count):
item = layout.itemAt(i)
if item is None:
continue
widget = item.widget()
if widget is not self._widget.focusProxy():
log.webview.debug("Removing widget {} (QTBUG-68224)"
.format(widget))
layout.removeWidget(widget)
if not navigation.accepted or not navigation.is_main_frame:
return
settings_needing_reload = {
'content.plugins',
'content.javascript.enabled',
'content.javascript.can_access_clipboard',
'content.print_element_backgrounds',
'input.spatial_navigation',
}
assert settings_needing_reload.issubset(configdata.DATA)
changed = self.settings.update_for_url(navigation.url)
reload_needed = changed & settings_needing_reload
# On Qt < 5.11, we don't don't need a reload when type == link_clicked.
# On Qt 5.11.0, we always need a reload.
# TODO on Qt > 5.11.0, we hopefully never need a reload:
# https://codereview.qt-project.org/#/c/229525/1
if not qtutils.version_check('5.11.0', exact=True, compiled=False):
if navigation.navigation_type != navigation.Type.link_clicked:
reload_needed = False
if reload_needed:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
self._reload_url = navigation.url
def _connect_signals(self):
view = self._widget
page = view.page()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
page.loadStarted.connect(self._on_load_started)
page.certificate_error.connect(self._on_ssl_errors)
page.authenticationRequired.connect(self._on_authentication_required)
page.proxyAuthenticationRequired.connect(
self._on_proxy_authentication_required)
page.fullScreenRequested.connect(self._on_fullscreen_requested)
page.contentsSizeChanged.connect(self.contents_size_changed)
page.navigation_request.connect(self._on_navigation_request)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.renderProcessTerminated.connect(
self._on_render_process_terminated)
view.iconChanged.connect(self.icon_changed)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
if qtutils.version_check('5.10', compiled=False):
page.loadProgress.connect(self._on_load_progress_workaround)
self._load_finished_fake.connect(self._on_history_trigger)
self._load_finished_fake.connect(self._restore_zoom)
self._load_finished_fake.connect(self._on_load_finished)
page.loadFinished.connect(self._on_load_finished_workaround)
else:
# for older Qt versions which break with the above
page.loadProgress.connect(self._on_load_progress)
page.loadFinished.connect(self._on_history_trigger)
page.loadFinished.connect(self._restore_zoom)
page.loadFinished.connect(self._on_load_finished)
self.predicted_navigation.connect(self._on_predicted_navigation)
def event_target(self):
return self._widget.focusProxy()
| 1 | 21,464 | You're missing a `return` here, but I'll add it :smile: | qutebrowser-qutebrowser | py |
@@ -116,7 +116,7 @@ static bool gles2_render_texture_with_matrix(struct wlr_renderer *wlr_renderer,
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
struct wlr_gles2_texture *texture =
- get_gles2_texture_in_context(wlr_texture);
+ gles2_get_texture(wlr_texture);
struct wlr_gles2_tex_shader *shader = NULL;
GLenum target = 0; | 1 | #include <assert.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <wayland-server-protocol.h>
#include <wayland-util.h>
#include <wlr/render/egl.h>
#include <wlr/render/interface.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_matrix.h>
#include <wlr/util/log.h>
#include "glapi.h"
#include "render/gles2.h"
static const struct wlr_renderer_impl renderer_impl;
static struct wlr_gles2_renderer *gles2_get_renderer(
struct wlr_renderer *wlr_renderer) {
assert(wlr_renderer->impl == &renderer_impl);
return (struct wlr_gles2_renderer *)wlr_renderer;
}
static struct wlr_gles2_renderer *gles2_get_renderer_in_context(
struct wlr_renderer *wlr_renderer) {
struct wlr_gles2_renderer *renderer = gles2_get_renderer(wlr_renderer);
assert(wlr_egl_is_current(renderer->egl));
return renderer;
}
static void gles2_begin(struct wlr_renderer *wlr_renderer, uint32_t width,
uint32_t height) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
PUSH_GLES2_DEBUG;
glViewport(0, 0, width, height);
renderer->viewport_width = width;
renderer->viewport_height = height;
// enable transparency
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
// XXX: maybe we should save output projection and remove some of the need
// for users to sling matricies themselves
POP_GLES2_DEBUG;
}
static void gles2_end(struct wlr_renderer *wlr_renderer) {
gles2_get_renderer_in_context(wlr_renderer);
// no-op
}
static void gles2_clear(struct wlr_renderer *wlr_renderer,
const float color[static 4]) {
gles2_get_renderer_in_context(wlr_renderer);
PUSH_GLES2_DEBUG;
glClearColor(color[0], color[1], color[2], color[3]);
glClear(GL_COLOR_BUFFER_BIT);
POP_GLES2_DEBUG;
}
static void gles2_scissor(struct wlr_renderer *wlr_renderer,
struct wlr_box *box) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
PUSH_GLES2_DEBUG;
if (box != NULL) {
struct wlr_box gl_box;
wlr_box_transform(box, WL_OUTPUT_TRANSFORM_FLIPPED_180,
renderer->viewport_width, renderer->viewport_height, &gl_box);
glScissor(gl_box.x, gl_box.y, gl_box.width, gl_box.height);
glEnable(GL_SCISSOR_TEST);
} else {
glDisable(GL_SCISSOR_TEST);
}
POP_GLES2_DEBUG;
}
static void draw_quad(void) {
GLfloat verts[] = {
1, 0, // top right
0, 0, // top left
1, 1, // bottom right
0, 1, // bottom left
};
GLfloat texcoord[] = {
1, 0, // top right
0, 0, // top left
1, 1, // bottom right
0, 1, // bottom left
};
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texcoord);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
}
static bool gles2_render_texture_with_matrix(struct wlr_renderer *wlr_renderer,
struct wlr_texture *wlr_texture, const float matrix[static 9],
float alpha) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
struct wlr_gles2_texture *texture =
get_gles2_texture_in_context(wlr_texture);
struct wlr_gles2_tex_shader *shader = NULL;
GLenum target = 0;
switch (texture->type) {
case WLR_GLES2_TEXTURE_GLTEX:
case WLR_GLES2_TEXTURE_WL_DRM_GL:
if (texture->has_alpha) {
shader = &renderer->shaders.tex_rgba;
} else {
shader = &renderer->shaders.tex_rgbx;
}
target = GL_TEXTURE_2D;
break;
case WLR_GLES2_TEXTURE_WL_DRM_EXT:
case WLR_GLES2_TEXTURE_DMABUF:
shader = &renderer->shaders.tex_ext;
target = GL_TEXTURE_EXTERNAL_OES;
break;
}
// OpenGL ES 2 requires the glUniformMatrix3fv transpose parameter to be set
// to GL_FALSE
float transposition[9];
wlr_matrix_transpose(transposition, matrix);
PUSH_GLES2_DEBUG;
GLuint tex_id = texture->type == WLR_GLES2_TEXTURE_GLTEX ?
texture->gl_tex : texture->image_tex;
glActiveTexture(GL_TEXTURE0);
glBindTexture(target, tex_id);
glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glUseProgram(shader->program);
glUniformMatrix3fv(shader->proj, 1, GL_FALSE, transposition);
glUniform1i(shader->invert_y, texture->inverted_y);
glUniform1i(shader->tex, 0);
glUniform1f(shader->alpha, alpha);
draw_quad();
POP_GLES2_DEBUG;
return true;
}
static void gles2_render_quad_with_matrix(struct wlr_renderer *wlr_renderer,
const float color[static 4], const float matrix[static 9]) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
// OpenGL ES 2 requires the glUniformMatrix3fv transpose parameter to be set
// to GL_FALSE
float transposition[9];
wlr_matrix_transpose(transposition, matrix);
PUSH_GLES2_DEBUG;
glUseProgram(renderer->shaders.quad.program);
glUniformMatrix3fv(renderer->shaders.quad.proj, 1, GL_FALSE, transposition);
glUniform4f(renderer->shaders.quad.color, color[0], color[1], color[2], color[3]);
draw_quad();
POP_GLES2_DEBUG;
}
static void gles2_render_ellipse_with_matrix(struct wlr_renderer *wlr_renderer,
const float color[static 4], const float matrix[static 9]) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
// OpenGL ES 2 requires the glUniformMatrix3fv transpose parameter to be set
// to GL_FALSE
float transposition[9];
wlr_matrix_transpose(transposition, matrix);
PUSH_GLES2_DEBUG;
glUseProgram(renderer->shaders.ellipse.program);
glUniformMatrix3fv(renderer->shaders.ellipse.proj, 1, GL_FALSE, transposition);
glUniform4f(renderer->shaders.ellipse.color, color[0], color[1], color[2], color[3]);
draw_quad();
POP_GLES2_DEBUG;
}
static const enum wl_shm_format *gles2_renderer_formats(
struct wlr_renderer *wlr_renderer, size_t *len) {
return get_gles2_formats(len);
}
static bool gles2_resource_is_wl_drm_buffer(struct wlr_renderer *wlr_renderer,
struct wl_resource *resource) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
if (!eglQueryWaylandBufferWL) {
return false;
}
EGLint fmt;
return eglQueryWaylandBufferWL(renderer->egl->display, resource,
EGL_TEXTURE_FORMAT, &fmt);
}
static void gles2_wl_drm_buffer_get_size(struct wlr_renderer *wlr_renderer,
struct wl_resource *buffer, int *width, int *height) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
if (!eglQueryWaylandBufferWL) {
return;
}
eglQueryWaylandBufferWL(renderer->egl->display, buffer, EGL_WIDTH, width);
eglQueryWaylandBufferWL(renderer->egl->display, buffer, EGL_HEIGHT, height);
}
static int gles2_get_dmabuf_formats(struct wlr_renderer *wlr_renderer,
int **formats) {
struct wlr_gles2_renderer *renderer = gles2_get_renderer(wlr_renderer);
return wlr_egl_get_dmabuf_formats(renderer->egl, formats);
}
static int gles2_get_dmabuf_modifiers(struct wlr_renderer *wlr_renderer,
int format, uint64_t **modifiers) {
struct wlr_gles2_renderer *renderer = gles2_get_renderer(wlr_renderer);
return wlr_egl_get_dmabuf_modifiers(renderer->egl, format, modifiers);
}
static bool gles2_read_pixels(struct wlr_renderer *wlr_renderer,
enum wl_shm_format wl_fmt, uint32_t *flags, uint32_t stride,
uint32_t width, uint32_t height, uint32_t src_x, uint32_t src_y,
uint32_t dst_x, uint32_t dst_y, void *data) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
const struct wlr_gles2_pixel_format *fmt = get_gles2_format_from_wl(wl_fmt);
if (fmt == NULL) {
wlr_log(WLR_ERROR, "Cannot read pixels: unsupported pixel format");
return false;
}
PUSH_GLES2_DEBUG;
// Make sure any pending drawing is finished before we try to read it
glFinish();
glGetError(); // Clear the error flag
unsigned char *p = data + dst_y * stride;
uint32_t pack_stride = width * fmt->bpp / 8;
if (pack_stride == stride && dst_x == 0 && flags != NULL) {
// Under these particular conditions, we can read the pixels with only
// one glReadPixels call
glReadPixels(src_x, renderer->viewport_height - height - src_y,
width, height, fmt->gl_format, fmt->gl_type, p);
*flags = WLR_RENDERER_READ_PIXELS_Y_INVERT;
} else {
// Unfortunately GLES2 doesn't support GL_PACK_*, so we have to read
// the lines out row by row
for (size_t i = src_y; i < src_y + height; ++i) {
glReadPixels(src_x, src_y + height - i - 1, width, 1, fmt->gl_format,
fmt->gl_type, p + i * stride + dst_x * fmt->bpp / 8);
}
if (flags != NULL) {
*flags = 0;
}
}
POP_GLES2_DEBUG;
return glGetError() == GL_NO_ERROR;
}
static bool gles2_format_supported(struct wlr_renderer *wlr_renderer,
enum wl_shm_format wl_fmt) {
return get_gles2_format_from_wl(wl_fmt) != NULL;
}
static struct wlr_texture *gles2_texture_from_pixels(
struct wlr_renderer *wlr_renderer, enum wl_shm_format wl_fmt,
uint32_t stride, uint32_t width, uint32_t height, const void *data) {
struct wlr_gles2_renderer *renderer = gles2_get_renderer(wlr_renderer);
return wlr_gles2_texture_from_pixels(renderer->egl, wl_fmt, stride, width,
height, data);
}
static struct wlr_texture *gles2_texture_from_wl_drm(
struct wlr_renderer *wlr_renderer, struct wl_resource *data) {
struct wlr_gles2_renderer *renderer = gles2_get_renderer(wlr_renderer);
return wlr_gles2_texture_from_wl_drm(renderer->egl, data);
}
static struct wlr_texture *gles2_texture_from_dmabuf(
struct wlr_renderer *wlr_renderer,
struct wlr_dmabuf_attributes *attribs) {
struct wlr_gles2_renderer *renderer = gles2_get_renderer(wlr_renderer);
return wlr_gles2_texture_from_dmabuf(renderer->egl, attribs);
}
static void gles2_init_wl_display(struct wlr_renderer *wlr_renderer,
struct wl_display *wl_display) {
struct wlr_gles2_renderer *renderer =
gles2_get_renderer_in_context(wlr_renderer);
if (!wlr_egl_bind_display(renderer->egl, wl_display)) {
wlr_log(WLR_INFO, "failed to bind wl_display to EGL");
}
}
static void gles2_destroy(struct wlr_renderer *wlr_renderer) {
struct wlr_gles2_renderer *renderer = gles2_get_renderer(wlr_renderer);
wlr_egl_make_current(renderer->egl, EGL_NO_SURFACE, NULL);
PUSH_GLES2_DEBUG;
glDeleteProgram(renderer->shaders.quad.program);
glDeleteProgram(renderer->shaders.ellipse.program);
glDeleteProgram(renderer->shaders.tex_rgba.program);
glDeleteProgram(renderer->shaders.tex_rgbx.program);
glDeleteProgram(renderer->shaders.tex_ext.program);
POP_GLES2_DEBUG;
if (glDebugMessageCallbackKHR) {
glDisable(GL_DEBUG_OUTPUT_KHR);
glDebugMessageCallbackKHR(NULL, NULL);
}
free(renderer);
}
static const struct wlr_renderer_impl renderer_impl = {
.destroy = gles2_destroy,
.begin = gles2_begin,
.end = gles2_end,
.clear = gles2_clear,
.scissor = gles2_scissor,
.render_texture_with_matrix = gles2_render_texture_with_matrix,
.render_quad_with_matrix = gles2_render_quad_with_matrix,
.render_ellipse_with_matrix = gles2_render_ellipse_with_matrix,
.formats = gles2_renderer_formats,
.resource_is_wl_drm_buffer = gles2_resource_is_wl_drm_buffer,
.wl_drm_buffer_get_size = gles2_wl_drm_buffer_get_size,
.get_dmabuf_formats = gles2_get_dmabuf_formats,
.get_dmabuf_modifiers = gles2_get_dmabuf_modifiers,
.read_pixels = gles2_read_pixels,
.format_supported = gles2_format_supported,
.texture_from_pixels = gles2_texture_from_pixels,
.texture_from_wl_drm = gles2_texture_from_wl_drm,
.texture_from_dmabuf = gles2_texture_from_dmabuf,
.init_wl_display = gles2_init_wl_display,
};
void push_gles2_marker(const char *file, const char *func) {
if (!glPushDebugGroupKHR) {
return;
}
int len = snprintf(NULL, 0, "%s:%s", file, func) + 1;
char str[len];
snprintf(str, len, "%s:%s", file, func);
glPushDebugGroupKHR(GL_DEBUG_SOURCE_APPLICATION_KHR, 1, -1, str);
}
void pop_gles2_marker(void) {
if (glPopDebugGroupKHR) {
glPopDebugGroupKHR();
}
}
static enum wlr_log_importance gles2_log_importance_to_wlr(GLenum type) {
switch (type) {
case GL_DEBUG_TYPE_ERROR_KHR: return WLR_ERROR;
case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR: return WLR_DEBUG;
case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR: return WLR_ERROR;
case GL_DEBUG_TYPE_PORTABILITY_KHR: return WLR_DEBUG;
case GL_DEBUG_TYPE_PERFORMANCE_KHR: return WLR_DEBUG;
case GL_DEBUG_TYPE_OTHER_KHR: return WLR_DEBUG;
case GL_DEBUG_TYPE_MARKER_KHR: return WLR_DEBUG;
case GL_DEBUG_TYPE_PUSH_GROUP_KHR: return WLR_DEBUG;
case GL_DEBUG_TYPE_POP_GROUP_KHR: return WLR_DEBUG;
default: return WLR_DEBUG;
}
}
static void gles2_log(GLenum src, GLenum type, GLuint id, GLenum severity,
GLsizei len, const GLchar *msg, const void *user) {
_wlr_log(gles2_log_importance_to_wlr(type), "[GLES2] %s", msg);
}
static GLuint compile_shader(GLuint type, const GLchar *src) {
PUSH_GLES2_DEBUG;
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, &src, NULL);
glCompileShader(shader);
GLint ok;
glGetShaderiv(shader, GL_COMPILE_STATUS, &ok);
if (ok == GL_FALSE) {
glDeleteShader(shader);
shader = 0;
}
POP_GLES2_DEBUG;
return shader;
}
static GLuint link_program(const GLchar *vert_src, const GLchar *frag_src) {
PUSH_GLES2_DEBUG;
GLuint vert = compile_shader(GL_VERTEX_SHADER, vert_src);
if (!vert) {
goto error;
}
GLuint frag = compile_shader(GL_FRAGMENT_SHADER, frag_src);
if (!frag) {
glDeleteShader(vert);
goto error;
}
GLuint prog = glCreateProgram();
glAttachShader(prog, vert);
glAttachShader(prog, frag);
glLinkProgram(prog);
glDetachShader(prog, vert);
glDetachShader(prog, frag);
glDeleteShader(vert);
glDeleteShader(frag);
GLint ok;
glGetProgramiv(prog, GL_LINK_STATUS, &ok);
if (ok == GL_FALSE) {
glDeleteProgram(prog);
goto error;
}
POP_GLES2_DEBUG;
return prog;
error:
POP_GLES2_DEBUG;
return 0;
}
extern const GLchar quad_vertex_src[];
extern const GLchar quad_fragment_src[];
extern const GLchar ellipse_fragment_src[];
extern const GLchar tex_vertex_src[];
extern const GLchar tex_fragment_src_rgba[];
extern const GLchar tex_fragment_src_rgbx[];
extern const GLchar tex_fragment_src_external[];
struct wlr_renderer *wlr_gles2_renderer_create(struct wlr_egl *egl) {
if (!load_glapi()) {
return NULL;
}
struct wlr_gles2_renderer *renderer =
calloc(1, sizeof(struct wlr_gles2_renderer));
if (renderer == NULL) {
return NULL;
}
wlr_renderer_init(&renderer->wlr_renderer, &renderer_impl);
renderer->egl = egl;
wlr_egl_make_current(renderer->egl, EGL_NO_SURFACE, NULL);
renderer->exts_str = (const char*) glGetString(GL_EXTENSIONS);
wlr_log(WLR_INFO, "Using %s", glGetString(GL_VERSION));
wlr_log(WLR_INFO, "GL vendor: %s", glGetString(GL_VENDOR));
wlr_log(WLR_INFO, "Supported GLES2 extensions: %s", renderer->exts_str);
if (glDebugMessageCallbackKHR && glDebugMessageControlKHR) {
glEnable(GL_DEBUG_OUTPUT_KHR);
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR);
glDebugMessageCallbackKHR(gles2_log, NULL);
// Silence unwanted message types
glDebugMessageControlKHR(GL_DONT_CARE, GL_DEBUG_TYPE_POP_GROUP_KHR,
GL_DONT_CARE, 0, NULL, GL_FALSE);
glDebugMessageControlKHR(GL_DONT_CARE, GL_DEBUG_TYPE_PUSH_GROUP_KHR,
GL_DONT_CARE, 0, NULL, GL_FALSE);
}
PUSH_GLES2_DEBUG;
GLuint prog;
renderer->shaders.quad.program = prog =
link_program(quad_vertex_src, quad_fragment_src);
if (!renderer->shaders.quad.program) {
goto error;
}
renderer->shaders.quad.proj = glGetUniformLocation(prog, "proj");
renderer->shaders.quad.color = glGetUniformLocation(prog, "color");
renderer->shaders.ellipse.program = prog =
link_program(quad_vertex_src, ellipse_fragment_src);
if (!renderer->shaders.ellipse.program) {
goto error;
}
renderer->shaders.ellipse.proj = glGetUniformLocation(prog, "proj");
renderer->shaders.ellipse.color = glGetUniformLocation(prog, "color");
renderer->shaders.tex_rgba.program = prog =
link_program(tex_vertex_src, tex_fragment_src_rgba);
if (!renderer->shaders.tex_rgba.program) {
goto error;
}
renderer->shaders.tex_rgba.proj = glGetUniformLocation(prog, "proj");
renderer->shaders.tex_rgba.invert_y = glGetUniformLocation(prog, "invert_y");
renderer->shaders.tex_rgba.tex = glGetUniformLocation(prog, "tex");
renderer->shaders.tex_rgba.alpha = glGetUniformLocation(prog, "alpha");
renderer->shaders.tex_rgbx.program = prog =
link_program(tex_vertex_src, tex_fragment_src_rgbx);
if (!renderer->shaders.tex_rgbx.program) {
goto error;
}
renderer->shaders.tex_rgbx.proj = glGetUniformLocation(prog, "proj");
renderer->shaders.tex_rgbx.invert_y = glGetUniformLocation(prog, "invert_y");
renderer->shaders.tex_rgbx.tex = glGetUniformLocation(prog, "tex");
renderer->shaders.tex_rgbx.alpha = glGetUniformLocation(prog, "alpha");
if (glEGLImageTargetTexture2DOES) {
renderer->shaders.tex_ext.program = prog =
link_program(tex_vertex_src, tex_fragment_src_external);
if (!renderer->shaders.tex_ext.program) {
goto error;
}
renderer->shaders.tex_ext.proj = glGetUniformLocation(prog, "proj");
renderer->shaders.tex_ext.invert_y = glGetUniformLocation(prog, "invert_y");
renderer->shaders.tex_ext.tex = glGetUniformLocation(prog, "tex");
renderer->shaders.tex_ext.alpha = glGetUniformLocation(prog, "alpha");
}
POP_GLES2_DEBUG;
return &renderer->wlr_renderer;
error:
glDeleteProgram(renderer->shaders.quad.program);
glDeleteProgram(renderer->shaders.ellipse.program);
glDeleteProgram(renderer->shaders.tex_rgba.program);
glDeleteProgram(renderer->shaders.tex_rgbx.program);
glDeleteProgram(renderer->shaders.tex_ext.program);
POP_GLES2_DEBUG;
if (glDebugMessageCallbackKHR) {
glDisable(GL_DEBUG_OUTPUT_KHR);
glDebugMessageCallbackKHR(NULL, NULL);
}
free(renderer);
return NULL;
}
| 1 | 12,196 | Can you try adding back this assertion? I'd like to assert that the texture has been created in the same context as the renderer. | swaywm-wlroots | c |
@@ -234,7 +234,8 @@ class RouteFactory(object):
required_permission = self.method_permissions.get(method)
# For create permission, the object id is the plural endpoint.
- collection_path = service.collection_path.format(**request.matchdict)
+ plural_endpoint = service.collection_path.decode('utf-8')
+ collection_path = plural_endpoint.format(**request.matchdict)
# In the case of a "PUT", check if the targetted record already
# exists, return "write" if it does, "create" otherwise. | 1 | import functools
from pyramid.settings import aslist
from pyramid.security import IAuthorizationPolicy, Authenticated
from zope.interface import implementer
from kinto.core import utils
from kinto.core.storage import exceptions as storage_exceptions
from kinto.core.authentication import prefixed_userid
# A permission is called "dynamic" when it's computed at request time.
DYNAMIC = 'dynamic'
# When permission is set to "private", only the current user is allowed.
PRIVATE = 'private'
def groupfinder(userid, request):
"""Fetch principals from permission backend for the specified `userid`.
This is plugged by default using the ``multiauth.groupfinder`` setting.
"""
backend = getattr(request.registry, 'permission', None)
# Permission backend not configured. Ignore.
if not backend:
return []
# Safety check when Kinto-Core is used without pyramid_multiauth.
if request.prefixed_userid:
userid = request.prefixed_userid
# Query the permission backend only once per request (e.g. batch).
reify_key = userid + '_principals'
if reify_key not in request.bound_data:
principals = backend.get_user_principals(userid)
request.bound_data[reify_key] = principals
return request.bound_data[reify_key]
@implementer(IAuthorizationPolicy)
class AuthorizationPolicy(object):
"""Default authorization class, that leverages the permission backend
for shareable resources.
"""
get_bound_permissions = None
"""Callable that takes an object id and a permission and returns
a list of tuples (<object id>, <permission>). Useful when objects
permission depend on others."""
def permits(self, context, principals, permission):
if permission == PRIVATE:
return Authenticated in principals
# Add prefixed user id to principals.
prefixed_userid = context.get_prefixed_userid()
if prefixed_userid and ':' in prefixed_userid:
principals = principals + [prefixed_userid]
prefix, user_id = prefixed_userid.split(':', 1)
# Remove unprefixed user id to avoid conflicts.
# (it is added via Pyramid Authn policy effective principals)
if user_id in principals:
principals.remove(user_id)
# Retro-compatibility with cliquet 2.0 '_' user id prefixes.
# Just in case it was used in permissions definitions.
principals.append('%s_%s' % (prefix, user_id))
if permission == DYNAMIC:
permission = context.required_permission
create_permission = '%s:create' % context.resource_name
if permission == 'create':
permission = create_permission
object_id = context.permission_object_id
if self.get_bound_permissions is None:
bound_perms = [(object_id, permission)]
else:
bound_perms = self.get_bound_permissions(object_id, permission)
allowed = context.check_permission(principals, bound_perms)
# If not allowed on this collection, but some records are shared with
# the current user, then authorize.
# The ShareableResource class will take care of the filtering.
is_list_operation = (context.on_collection and not permission.endswith('create'))
if not allowed and is_list_operation:
shared = context.fetch_shared_records(permission,
principals,
self.get_bound_permissions)
# If allowed to create this kind of object on parent, then allow to obtain the list.
if len(bound_perms) > 0:
# Here we consider that parent URI is one path level above.
parent_uri = '/'.join(object_id.split('/')[:-1])
parent_create_perm = [(parent_uri, create_permission)]
else:
parent_create_perm = [('', 'create')] # Root object.
allowed_to_create = context.check_permission(principals, parent_create_perm)
allowed = shared or allowed_to_create
return allowed
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError() # PRAGMA NOCOVER
class RouteFactory(object):
resource_name = None
on_collection = False
required_permission = None
permission_object_id = None
current_record = None
shared_ids = None
method_permissions = {
"head": "read",
"get": "read",
"post": "create",
"delete": "write",
"patch": "write"
}
def __init__(self, request):
# Make it available for the authorization policy.
self.get_prefixed_userid = functools.partial(prefixed_userid, request)
# Store some shortcuts.
permission = request.registry.permission
self._check_permission = permission.check_permission
self._get_accessible_objects = permission.get_accessible_objects
# Store current resource and required permission.
service = utils.current_service(request)
is_on_resource = (service is not None and
hasattr(service, 'viewset') and
hasattr(service, 'resource'))
if is_on_resource:
self.resource_name = request.current_resource_name
self.on_collection = getattr(service, "type", None) == "collection"
self.permission_object_id, self.required_permission = (
self._find_required_permission(request, service))
# To obtain shared records on a collection endpoint, use a match:
self._object_id_match = self.get_permission_object_id(request, '*')
self._settings = request.registry.settings
def check_permission(self, principals, bound_perms):
"""Read allowed principals from settings, if not any, query the permission
backend to check if view is allowed.
"""
if not bound_perms:
bound_perms = [(self.resource_name, self.required_permission)]
for (_, permission) in bound_perms:
setting = '%s_%s_principals' % (self.resource_name, permission)
allowed_principals = aslist(self._settings.get(setting, ''))
if allowed_principals:
if bool(set(allowed_principals) & set(principals)):
return True
return self._check_permission(principals, bound_perms)
def fetch_shared_records(self, perm, principals, get_bound_permissions):
"""Fetch records that are readable or writable for the current
principals.
See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`
If no record is shared, it returns None.
.. warning::
This sets the ``shared_ids`` attribute to the context with the
return value. The attribute is then read by
:class:`kinto.core.resource.ShareableResource`
"""
if get_bound_permissions:
bound_perms = get_bound_permissions(self._object_id_match, perm)
else:
bound_perms = [(self._object_id_match, perm)]
by_obj_id = self._get_accessible_objects(principals, bound_perms)
ids = by_obj_id.keys()
# Store for later use in ``ShareableResource``.
self.shared_ids = [self._extract_object_id(id_) for id_ in ids]
return self.shared_ids
def get_permission_object_id(self, request, object_id=None):
"""Returns the permission object id for the current request.
In the nominal case, it is just the current URI without version prefix.
For collections, it is the related record URI using the specified
`object_id`.
See :meth:`kinto.core.resource.model.SharableModel` and
:meth:`kinto.core.authorization.RouteFactory.__init__`
"""
object_uri = utils.strip_uri_prefix(request.path)
if self.on_collection and object_id is not None:
# With the current request on a collection, the record URI must
# be found out by inspecting the collection service and its sibling
# record service.
matchdict = request.matchdict.copy()
matchdict['id'] = object_id
try:
object_uri = utils.instance_uri(request,
self.resource_name,
**matchdict)
if object_id == '*':
object_uri = object_uri.replace('%2A', '*')
except KeyError:
# Maybe the resource has no single record endpoint.
# We consider that object URIs in permissions backend will
# be stored naively:
object_uri = object_uri + '/' + object_id
return object_uri
def _extract_object_id(self, object_uri):
# XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']
return object_uri.split('/')[-1]
def _find_required_permission(self, request, service):
"""Find out what is the permission object id and the required
permission.
.. note::
This method saves an attribute ``self.current_record`` used
in :class:`kinto.core.resource.UserResource`.
"""
# By default, it's a URI a and permission associated to the method.
permission_object_id = self.get_permission_object_id(request)
method = request.method.lower()
required_permission = self.method_permissions.get(method)
# For create permission, the object id is the plural endpoint.
collection_path = service.collection_path.format(**request.matchdict)
# In the case of a "PUT", check if the targetted record already
# exists, return "write" if it does, "create" otherwise.
if request.method.lower() == "put":
resource = service.resource(request=request, context=self)
try:
record = resource.model.get_record(resource.record_id)
# Save a reference, to avoid refetching from storage in
# resource.
self.current_record = record
except storage_exceptions.RecordNotFoundError:
# The record does not exist, the permission to create on
# the related collection is required.
permission_object_id = collection_path
required_permission = "create"
else:
# For safe creations, the user needs a create permission.
# See Kinto/kinto#792
if request.headers.get('If-None-Match') == '*':
permission_object_id = collection_path
required_permission = "create"
else:
required_permission = "write"
return (permission_object_id, required_permission)
| 1 | 10,212 | This won't work with Python 3 because it will already be a string. In that case we need to use `decode_header` from utils. | Kinto-kinto | py |
@@ -28,6 +28,8 @@ module Mongoid
raise Errors::InvalidDiscriminatorKeyTarget.new(self, self.superclass)
end
+ self._clear_types if self.respond_to?(:_clear_types)
+
if value
Mongoid::Fields::Validators::Macro.validate_field_name(self, value)
value = value.to_s | 1 | # frozen_string_literal: true
# encoding: utf-8
require "mongoid/fields/validators/macro"
module Mongoid
# Provides behavior around traversing the document graph.
#
# @since 4.0.0
module Traversable
extend ActiveSupport::Concern
def _parent
@__parent ||= nil
end
def _parent=(p)
@__parent = p
end
# Module used for prepending to the various discriminator_*= methods
#
# @api private
module DiscriminatorAssignment
def discriminator_key=(value)
if hereditary?
raise Errors::InvalidDiscriminatorKeyTarget.new(self, self.superclass)
end
if value
Mongoid::Fields::Validators::Macro.validate_field_name(self, value)
value = value.to_s
super
else
# When discriminator key is set to nil, replace the class's definition
# of the discriminator key reader (provided by class_attribute earlier)
# and re-delegate to Mongoid.
class << self
delegate :discriminator_key, to: ::Mongoid
end
end
# This condition checks if the new discriminator key would overwrite
# an existing field.
# This condition also checks if the class has any descendants, because
# if it doesn't then it doesn't need a discriminator key.
if !fields.has_key?(self.discriminator_key) && !descendants.empty?
default_proc = lambda { self.class.discriminator_value }
field(self.discriminator_key, default: default_proc, type: String)
end
end
def discriminator_value=(value)
value ||= self.name
add_discriminator_mapping(value)
super
end
end
included do
class_attribute :discriminator_key, instance_accessor: false
class_attribute :discriminator_value, instance_accessor: false
class << self
delegate :discriminator_key, to: ::Mongoid
prepend DiscriminatorAssignment
# @api private
#
# @return [ Hash<String, Class> ] The current mapping of discriminator_values to classes
attr_accessor :discriminator_mapping
end
# Add a discriminator mapping to the parent class. This mapping is used when
# receiving a document to identify its class.
#
# @param [ String ] value The discriminator_value that was just set
# @param [ Class ] The class the discriminator_value was set on
#
# @api private
def self.add_discriminator_mapping(value, klass=self)
self.discriminator_mapping ||= {}
self.discriminator_mapping[value] = klass
superclass.add_discriminator_mapping(value, klass) if hereditary?
end
# Get the discriminator mapping from the parent class
#
# @param [ String ] value The discriminator_value to retrieve
#
# @return [ Class ] klass The class corresponding to the given discriminator_value
#
# @api private
def self.get_discriminator_mapping(value)
self.discriminator_mapping[value]
end
end
# Get all child +Documents+ to this +Document+, going n levels deep if
# necessary. This is used when calling update persistence operations from
# the root document, where changes in the entire tree need to be
# determined. Note that persistence from the embedded documents will
# always be preferred, since they are optimized calls... This operation
# can get expensive in domains with large hierarchies.
#
# @example Get all the document's children.
# person._children
#
# @return [ Array<Document> ] All child documents in the hierarchy.
def _children
@__children ||= collect_children
end
# Collect all the children of this document.
#
# @example Collect all the children.
# document.collect_children
#
# @return [ Array<Document> ] The children.
#
# @since 2.4.0
def collect_children
children = []
embedded_relations.each_pair do |name, association|
without_autobuild do
child = send(name)
Array.wrap(child).each do |doc|
children.push(doc)
children.concat(doc._children)
end if child
end
end
children
end
# Marks all children as being persisted.
#
# @example Flag all the children.
# document.flag_children_persisted
#
# @return [ Array<Document> ] The flagged children.
#
# @since 3.0.7
def flag_children_persisted
_children.each do |child|
child.new_record = false
end
end
# Determines if the document is a subclass of another document.
#
# @example Check if the document is a subclass
# Square.new.hereditary?
#
# @return [ true, false ] True if hereditary, false if not.
def hereditary?
self.class.hereditary?
end
# Sets up a child/parent association. This is used for newly created
# objects so they can be properly added to the graph.
#
# @example Set the parent document.
# document.parentize(parent)
#
# @param [ Document ] document The parent document.
#
# @return [ Document ] The parent document.
def parentize(document)
self._parent = document
end
# Remove a child document from this parent. If an embeds one then set to
# nil, otherwise remove from the embeds many.
#
# This is called from the +RemoveEmbedded+ persistence command.
#
# @example Remove the child.
# document.remove_child(child)
#
# @param [ Document ] child The child (embedded) document to remove.
#
# @since 2.0.0.beta.1
def remove_child(child)
name = child.association_name
if child.embedded_one?
remove_ivar(name)
else
relation = send(name)
relation.send(:delete_one, child)
end
end
# After children are persisted we can call this to move all their changes
# and flag them as persisted in one call.
#
# @example Reset the children.
# document.reset_persisted_children
#
# @return [ Array<Document> ] The children.
#
# @since 2.1.0
def reset_persisted_children
_children.each do |child|
child.move_changes
child.new_record = false
end
_reset_memoized_children!
end
# Resets the memoized children on the object. Called internally when an
# embedded array changes size.
#
# @api semiprivate
#
# @example Reset the memoized children.
# document._reset_memoized_children!
#
# @return [ nil ] nil.
#
# @since 5.0.0
def _reset_memoized_children!
_parent._reset_memoized_children! if _parent
@__children = nil
end
# Return the root document in the object graph. If the current document
# is the root object in the graph it will return self.
#
# @example Get the root document in the hierarchy.
# document._root
#
# @return [ Document ] The root document in the hierarchy.
def _root
object = self
while (object._parent) do object = object._parent; end
object
end
# Is this document the root document of the hierarchy?
#
# @example Is the document the root?
# document._root?
#
# @return [ true, false ] If the document is the root.
#
# @since 3.1.0
def _root?
_parent ? false : true
end
module ClassMethods
# Determines if the document is a subclass of another document.
#
# @example Check if the document is a subclass.
# Square.hereditary?
#
# @return [ true, false ] True if hereditary, false if not.
def hereditary?
!!(Mongoid::Document > superclass)
end
# When inheriting, we want to copy the fields from the parent class and
# set the on the child to start, mimicking the behavior of the old
# class_inheritable_accessor that was deprecated in Rails edge.
#
# @example Inherit from this class.
# Person.inherited(Doctor)
#
# @param [ Class ] subclass The inheriting class.
#
# @since 2.0.0.rc.6
def inherited(subclass)
super
@_type = nil
subclass.aliased_fields = aliased_fields.dup
subclass.localized_fields = localized_fields.dup
subclass.fields = fields.dup
subclass.pre_processed_defaults = pre_processed_defaults.dup
subclass.post_processed_defaults = post_processed_defaults.dup
subclass._declared_scopes = Hash.new { |hash,key| self._declared_scopes[key] }
subclass.discriminator_value = subclass.name
# We only need the _type field if inheritance is in play, but need to
# add to the root class as well for backwards compatibility.
unless fields.has_key?(self.discriminator_key)
self.discriminator_value = self.name
default_proc = lambda { self.class.discriminator_value }
field(self.discriminator_key, default: default_proc, type: String)
end
end
end
end
end
| 1 | 12,715 | Why is the respond_to check here? | mongodb-mongoid | rb |
@@ -229,7 +229,7 @@ class FunctionDocblockManipulator
continue;
}
- if ($chars[$i] === '\\' || preg_match('/\w/', $char)) {
+ if ($char === '\\' || preg_match('/\w/', $char)) {
if ($this->return_typehint_start === null) {
$this->return_typehint_start = $i + $end_bracket_position + 1;
} | 1 | <?php
namespace Psalm\Internal\FileManipulation;
use PhpParser;
use PhpParser\Node\Expr\ArrowFunction;
use PhpParser\Node\Expr\Closure;
use PhpParser\Node\FunctionLike;
use PhpParser\Node\Stmt\ClassMethod;
use PhpParser\Node\Stmt\Function_;
use Psalm\DocComment;
use Psalm\FileManipulation;
use Psalm\Internal\Analyzer\CommentAnalyzer;
use Psalm\Internal\Analyzer\ProjectAnalyzer;
use function array_merge;
use function count;
use function ltrim;
use function preg_match;
use function reset;
use function str_replace;
use function str_split;
use function strlen;
use function strpos;
use function strrpos;
use function substr;
/**
* @internal
*/
class FunctionDocblockManipulator
{
/**
* Manipulators ordered by line number
*
* @var array<string, array<int, FunctionDocblockManipulator>>
*/
private static $manipulators = [];
/** @var Closure|Function_|ClassMethod|ArrowFunction */
private $stmt;
/** @var int */
private $docblock_start;
/** @var int */
private $docblock_end;
/** @var int */
private $return_typehint_area_start;
/** @var null|int */
private $return_typehint_colon_start;
/** @var null|int */
private $return_typehint_start;
/** @var null|int */
private $return_typehint_end;
/** @var null|string */
private $new_php_return_type;
/** @var bool */
private $return_type_is_php_compatible = false;
/** @var null|string */
private $new_phpdoc_return_type;
/** @var null|string */
private $new_psalm_return_type;
/** @var array<string, string> */
private $new_php_param_types = [];
/** @var array<string, string> */
private $new_phpdoc_param_types = [];
/** @var array<string, string> */
private $new_psalm_param_types = [];
/** @var string */
private $indentation;
/** @var string|null */
private $return_type_description;
/** @var array<string, int> */
private $param_offsets = [];
/** @var array<string, array{int, int}> */
private $param_typehint_offsets = [];
/** @var bool */
private $is_pure = false;
/**
* @param Closure|Function_|ClassMethod|ArrowFunction $stmt
*/
public static function getForFunction(
ProjectAnalyzer $project_analyzer,
string $file_path,
FunctionLike $stmt
): FunctionDocblockManipulator {
if (isset(self::$manipulators[$file_path][$stmt->getLine()])) {
return self::$manipulators[$file_path][$stmt->getLine()];
}
$manipulator
= self::$manipulators[$file_path][$stmt->getLine()]
= new self($file_path, $stmt, $project_analyzer);
return $manipulator;
}
/**
* @param Closure|Function_|ClassMethod|ArrowFunction $stmt
*/
private function __construct(string $file_path, FunctionLike $stmt, ProjectAnalyzer $project_analyzer)
{
$this->stmt = $stmt;
$docblock = $stmt->getDocComment();
$this->docblock_start = $docblock ? $docblock->getStartFilePos() : (int)$stmt->getAttribute('startFilePos');
$this->docblock_end = $function_start = (int)$stmt->getAttribute('startFilePos');
$function_end = (int)$stmt->getAttribute('endFilePos');
foreach ($stmt->params as $param) {
if ($param->var instanceof PhpParser\Node\Expr\Variable
&& \is_string($param->var->name)
) {
$this->param_offsets[$param->var->name] = (int) $param->getAttribute('startFilePos');
if ($param->type) {
$this->param_typehint_offsets[$param->var->name] = [
(int) $param->type->getAttribute('startFilePos'),
(int) $param->type->getAttribute('endFilePos')
];
}
}
}
$codebase = $project_analyzer->getCodebase();
$file_contents = $codebase->getFileContents($file_path);
$last_arg_position = $stmt->params
? (int) $stmt->params[count($stmt->params) - 1]->getAttribute('endFilePos') + 1
: null;
if ($stmt instanceof Closure && $stmt->uses) {
$last_arg_position = (int) $stmt->uses[count($stmt->uses) - 1]->getAttribute('endFilePos') + 1;
}
$end_bracket_position = (int) strpos($file_contents, ')', $last_arg_position ?: $function_start);
$this->return_typehint_area_start = $end_bracket_position + 1;
$function_code = substr($file_contents, $function_start, $function_end);
$function_code_after_bracket = substr($function_code, $end_bracket_position + 1 - $function_start);
// do a little parsing here
$chars = str_split($function_code_after_bracket);
$in_single_line_comment = $in_multi_line_comment = false;
for ($i = 0, $iMax = count($chars); $i < $iMax; ++$i) {
$char = $chars[$i];
switch ($char) {
case "\n":
$in_single_line_comment = false;
continue 2;
case ':':
if ($in_multi_line_comment || $in_single_line_comment) {
continue 2;
}
$this->return_typehint_colon_start = $i + $end_bracket_position + 1;
continue 2;
case '/':
if ($in_multi_line_comment || $in_single_line_comment) {
continue 2;
}
if ($chars[$i + 1] === '*') {
$in_multi_line_comment = true;
++$i;
}
if ($chars[$i + 1] === '/') {
$in_single_line_comment = true;
++$i;
}
continue 2;
case '*':
if ($in_single_line_comment) {
continue 2;
}
if ($chars[$i + 1] === '/') {
$in_multi_line_comment = false;
++$i;
}
continue 2;
case '{':
if ($in_multi_line_comment || $in_single_line_comment) {
continue 2;
}
break 2;
case '?':
if ($in_multi_line_comment || $in_single_line_comment) {
continue 2;
}
$this->return_typehint_start = $i + $end_bracket_position + 1;
break;
}
if ($in_multi_line_comment || $in_single_line_comment) {
continue;
}
if ($chars[$i] === '\\' || preg_match('/\w/', $char)) {
if ($this->return_typehint_start === null) {
$this->return_typehint_start = $i + $end_bracket_position + 1;
}
if ($chars[$i + 1] !== '\\' && !preg_match('/[\w]/', $chars[$i + 1])) {
$this->return_typehint_end = $i + $end_bracket_position + 2;
break;
}
}
}
$preceding_newline_pos = strrpos($file_contents, "\n", $this->docblock_end - strlen($file_contents));
if ($preceding_newline_pos === false) {
$this->indentation = '';
return;
}
$first_line = substr($file_contents, $preceding_newline_pos + 1, $this->docblock_end - $preceding_newline_pos);
$this->indentation = str_replace(ltrim($first_line), '', $first_line);
}
/**
* Sets the new return type
*
*/
public function setReturnType(
?string $php_type,
string $new_type,
string $phpdoc_type,
bool $is_php_compatible,
?string $description
): void {
$new_type = str_replace(['<mixed, mixed>', '<array-key, mixed>'], '', $new_type);
$this->new_php_return_type = $php_type;
$this->new_phpdoc_return_type = $phpdoc_type;
$this->new_psalm_return_type = $new_type;
$this->return_type_is_php_compatible = $is_php_compatible;
$this->return_type_description = $description;
}
/**
* Sets a new param type
*/
public function setParamType(
string $param_name,
?string $php_type,
string $new_type,
string $phpdoc_type
): void {
$new_type = str_replace(['<mixed, mixed>', '<array-key, mixed>', '<empty, empty>'], '', $new_type);
if ($php_type) {
$this->new_php_param_types[$param_name] = $php_type;
}
if ($php_type !== $new_type) {
$this->new_phpdoc_param_types[$param_name] = $phpdoc_type;
$this->new_psalm_param_types[$param_name] = $new_type;
}
}
/**
* Gets a new docblock given the existing docblock, if one exists, and the updated return types
* and/or parameters
*
*/
private function getDocblock(): string
{
$docblock = $this->stmt->getDocComment();
if ($docblock) {
$parsed_docblock = DocComment::parsePreservingLength($docblock);
} else {
$parsed_docblock = new \Psalm\Internal\Scanner\ParsedDocblock('', []);
}
$modified_docblock = false;
foreach ($this->new_phpdoc_param_types as $param_name => $phpdoc_type) {
$found_in_params = false;
$new_param_block = $phpdoc_type . ' ' . '$' . $param_name;
if (isset($parsed_docblock->tags['param'])) {
foreach ($parsed_docblock->tags['param'] as &$param_block) {
$doc_parts = CommentAnalyzer::splitDocLine($param_block);
if (($doc_parts[1] ?? null) === '$' . $param_name) {
if ($param_block !== $new_param_block) {
$modified_docblock = true;
}
$param_block = $new_param_block;
$found_in_params = true;
break;
}
}
}
if (!$found_in_params) {
$modified_docblock = true;
$parsed_docblock->tags['param'][] = $new_param_block;
}
}
$old_phpdoc_return_type = null;
if (isset($parsed_docblock->tags['return'])) {
$old_phpdoc_return_type = reset($parsed_docblock->tags['return']);
}
if ($this->is_pure) {
$modified_docblock = true;
$parsed_docblock->tags['psalm-pure'] = [''];
}
if ($this->new_phpdoc_return_type
&& $this->new_phpdoc_return_type !== $old_phpdoc_return_type
) {
$modified_docblock = true;
$parsed_docblock->tags['return'] = [
$this->new_phpdoc_return_type
. ($this->return_type_description ? (' ' . $this->return_type_description) : ''),
];
}
$old_psalm_return_type = null;
if (isset($parsed_docblock->tags['psalm-return'])) {
$old_psalm_return_type = reset($parsed_docblock->tags['psalm-return']);
}
if ($this->new_psalm_return_type
&& $this->new_phpdoc_return_type !== $this->new_psalm_return_type
&& $this->new_psalm_return_type !== $old_psalm_return_type
) {
$modified_docblock = true;
$parsed_docblock->tags['psalm-return'] = [$this->new_psalm_return_type];
}
if (!$parsed_docblock->tags && !$parsed_docblock->description) {
return '';
}
if (!$modified_docblock) {
return (string)$docblock . "\n" . $this->indentation;
}
return $parsed_docblock->render($this->indentation);
}
/**
* @return array<int, FileManipulation>
*/
public static function getManipulationsForFile(string $file_path): array
{
if (!isset(self::$manipulators[$file_path])) {
return [];
}
$file_manipulations = [];
foreach (self::$manipulators[$file_path] as $manipulator) {
if ($manipulator->new_php_return_type) {
if ($manipulator->return_typehint_start && $manipulator->return_typehint_end) {
$file_manipulations[$manipulator->return_typehint_start] = new FileManipulation(
$manipulator->return_typehint_start,
$manipulator->return_typehint_end,
$manipulator->new_php_return_type
);
} else {
$file_manipulations[$manipulator->return_typehint_area_start] = new FileManipulation(
$manipulator->return_typehint_area_start,
$manipulator->return_typehint_area_start,
': ' . $manipulator->new_php_return_type
);
}
} elseif ($manipulator->new_php_return_type === ''
&& $manipulator->return_typehint_colon_start
&& $manipulator->new_phpdoc_return_type
&& $manipulator->return_typehint_start
&& $manipulator->return_typehint_end
) {
$file_manipulations[$manipulator->return_typehint_start] = new FileManipulation(
$manipulator->return_typehint_colon_start,
$manipulator->return_typehint_end,
''
);
}
if (!$manipulator->new_php_return_type
|| !$manipulator->return_type_is_php_compatible
|| $manipulator->docblock_start !== $manipulator->docblock_end
|| $manipulator->is_pure
) {
$file_manipulations[$manipulator->docblock_start] = new FileManipulation(
$manipulator->docblock_start,
$manipulator->docblock_end,
$manipulator->getDocblock()
);
}
foreach ($manipulator->new_php_param_types as $param_name => $new_php_param_type) {
if (!isset($manipulator->param_offsets[$param_name])) {
continue;
}
$param_offset = $manipulator->param_offsets[$param_name];
$typehint_offsets = $manipulator->param_typehint_offsets[$param_name] ?? null;
if ($new_php_param_type) {
if ($typehint_offsets) {
$file_manipulations[$typehint_offsets[0]] = new FileManipulation(
$typehint_offsets[0],
$typehint_offsets[1],
$new_php_param_type
);
} else {
$file_manipulations[$param_offset] = new FileManipulation(
$param_offset,
$param_offset,
$new_php_param_type . ' '
);
}
} elseif ($new_php_param_type === ''
&& $typehint_offsets
) {
$file_manipulations[$typehint_offsets[0]] = new FileManipulation(
$typehint_offsets[0],
$param_offset,
''
);
}
}
}
return $file_manipulations;
}
public function makePure() : void
{
$this->is_pure = true;
}
public static function clearCache(): void
{
self::$manipulators = [];
}
/**
* @param array<string, array<int, FunctionDocblockManipulator>> $manipulators
*/
public static function addManipulators(array $manipulators) : void
{
self::$manipulators = array_merge($manipulators, self::$manipulators);
}
/**
* @return array<string, array<int, FunctionDocblockManipulator>>
*/
public static function getManipulators(): array
{
return self::$manipulators;
}
}
| 1 | 11,324 | @orklah are you sure it's the same as `$chars[$i]`? It looks like `$i` is changed above, after `$char` assignment. | vimeo-psalm | php |
@@ -22,7 +22,7 @@ TEST(DeleteVertexTest, SimpleTest) {
std::unique_ptr<kvstore::KVStore> kv(TestUtils::initKV(rootPath.path()));
// Add vertices
{
- auto* processor = AddVerticesProcessor::instance(kv.get(), nullptr, nullptr);
+ auto* processor = AddVerticesProcessor::instance(kv.get(), nullptr, nullptr, nullptr);
cpp2::AddVerticesRequest req;
req.space_id = 0;
req.overwritable = false; | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include <gtest/gtest.h>
#include <rocksdb/db.h>
#include "fs/TempDir.h"
#include "storage/test/TestUtils.h"
#include "storage/mutate/DeleteVertexProcessor.h"
#include "storage/mutate/AddVerticesProcessor.h"
#include "base/NebulaKeyUtils.h"
namespace nebula {
namespace storage {
TEST(DeleteVertexTest, SimpleTest) {
fs::TempDir rootPath("/tmp/DeleteVertexTest.XXXXXX");
std::unique_ptr<kvstore::KVStore> kv(TestUtils::initKV(rootPath.path()));
// Add vertices
{
auto* processor = AddVerticesProcessor::instance(kv.get(), nullptr, nullptr);
cpp2::AddVerticesRequest req;
req.space_id = 0;
req.overwritable = false;
// partId => List<Vertex>
for (auto partId = 0; partId < 3; partId++) {
std::vector<cpp2::Vertex> vertices;
for (auto vertexId = partId * 10; vertexId < 10 * (partId + 1); vertexId++) {
std::vector<cpp2::Tag> tags;
for (auto tagId = 0; tagId < 10; tagId++) {
tags.emplace_back(apache::thrift::FragileConstructor::FRAGILE,
tagId,
folly::stringPrintf("%d_%d_%d", partId, vertexId, tagId));
}
vertices.emplace_back(apache::thrift::FragileConstructor::FRAGILE,
vertexId,
std::move(tags));
}
req.parts.emplace(partId, std::move(vertices));
}
auto fut = processor->getFuture();
processor->process(req);
auto resp = std::move(fut).get();
EXPECT_EQ(0, resp.result.failed_codes.size());
for (auto partId = 0; partId < 3; partId++) {
for (auto vertexId = 10 * partId; vertexId < 10 * (partId + 1); vertexId++) {
auto prefix = NebulaKeyUtils::vertexPrefix(partId, vertexId);
std::unique_ptr<kvstore::KVIterator> iter;
EXPECT_EQ(kvstore::ResultCode::SUCCEEDED, kv->prefix(0, partId, prefix, &iter));
TagID tagId = 0;
while (iter->valid()) {
EXPECT_EQ(folly::stringPrintf("%d_%d_%d",
partId, vertexId, tagId), iter->val());
tagId++;
iter->next();
}
EXPECT_EQ(10, tagId);
}
}
}
// Delete vertices
{
for (auto partId = 0; partId < 3; partId++) {
for (auto vertexId = 10 * partId; vertexId < 10 * (partId + 1); vertexId++) {
auto* processor = DeleteVertexProcessor::instance(kv.get(), nullptr, nullptr);
cpp2::DeleteVertexRequest req;
req.set_space_id(0);
req.set_part_id(partId);
req.set_vid(vertexId);
auto fut = processor->getFuture();
processor->process(req);
auto resp = std::move(fut).get();
EXPECT_EQ(0, resp.result.failed_codes.size());
}
}
}
for (auto partId = 0; partId < 3; partId++) {
for (auto vertexId = 10 * partId; vertexId < 10 * (partId + 1); vertexId++) {
auto prefix = NebulaKeyUtils::vertexPrefix(partId, vertexId);
std::unique_ptr<kvstore::KVIterator> iter;
EXPECT_EQ(kvstore::ResultCode::SUCCEEDED, kv->prefix(0, partId, prefix, &iter));
CHECK(!iter->valid());
}
}
}
} // namespace storage
} // namespace nebula
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
folly::init(&argc, &argv, true);
google::SetStderrLogging(google::INFO);
return RUN_ALL_TESTS();
}
| 1 | 26,557 | I have a question? How do I construct a indexManager for testcase? | vesoft-inc-nebula | cpp |
@@ -73,6 +73,9 @@ int __FPGA_API__ opae_plugin_configure(opae_api_adapter_table *adapter,
{
UNUSED_PARAM(jsonConfig);
+ if (adapter == NULL || adapter->plugin.dl_handle == NULL)
+ return -1;
+
adapter->fpgaOpen = dlsym(adapter->plugin.dl_handle, "xfpga_fpgaOpen");
adapter->fpgaClose =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaClose"); | 1 | // Copyright(c) 2018, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include <dlfcn.h>
#include "xfpga.h"
#include "common_int.h"
#include "adapter.h"
#include "sysfs_int.h"
#include "opae_drv.h"
int __FPGA_API__ xfpga_plugin_initialize(void)
{
int res = sysfs_initialize();
if (res) {
return res;
}
res = opae_ioctl_initialize();
if (res) {
return res;
}
return 0;
}
int __FPGA_API__ xfpga_plugin_finalize(void)
{
sysfs_finalize();
return 0;
}
bool __FPGA_API__ xfpga_plugin_supports_device(const char *device_type)
{
UNUSED_PARAM(device_type);
return true;
}
bool __FPGA_API__ xfpga_plugin_supports_host(const char *hostname)
{
UNUSED_PARAM(hostname);
return true;
}
int __FPGA_API__ opae_plugin_configure(opae_api_adapter_table *adapter,
const char *jsonConfig)
{
UNUSED_PARAM(jsonConfig);
adapter->fpgaOpen = dlsym(adapter->plugin.dl_handle, "xfpga_fpgaOpen");
adapter->fpgaClose =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaClose");
adapter->fpgaReset =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaReset");
adapter->fpgaGetPropertiesFromHandle = dlsym(
adapter->plugin.dl_handle, "xfpga_fpgaGetPropertiesFromHandle");
adapter->fpgaGetProperties =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetProperties");
adapter->fpgaUpdateProperties =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaUpdateProperties");
adapter->fpgaWriteMMIO64 =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaWriteMMIO64");
adapter->fpgaReadMMIO64 =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaReadMMIO64");
adapter->fpgaWriteMMIO32 =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaWriteMMIO32");
adapter->fpgaReadMMIO32 =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaReadMMIO32");
adapter->fpgaMapMMIO =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaMapMMIO");
adapter->fpgaUnmapMMIO =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaUnmapMMIO");
adapter->fpgaEnumerate =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaEnumerate");
adapter->fpgaCloneToken =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaCloneToken");
adapter->fpgaDestroyToken =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaDestroyToken");
adapter->fpgaGetNumUmsg =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetNumUmsg");
adapter->fpgaSetUmsgAttributes =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaSetUmsgAttributes");
adapter->fpgaTriggerUmsg =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaTriggerUmsg");
adapter->fpgaGetUmsgPtr =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetUmsgPtr");
adapter->fpgaPrepareBuffer =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaPrepareBuffer");
adapter->fpgaReleaseBuffer =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaReleaseBuffer");
adapter->fpgaGetIOAddress =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetIOAddress");
/*
** adapter->fpgaGetOPAECVersion = dlsym(adapter->plugin.dl_handle,
*"xfpga_fpgaGetOPAECVersion");
** adapter->fpgaGetOPAECVersionString =
*dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetOPAECVersionString"); *
*adapter->fpgaGetOPAECBuildString = dlsym(adapter->plugin.dl_handle,
*"xfpga_fpgaGetOPAECBuildString");
*/
adapter->fpgaReadError =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaReadError");
adapter->fpgaClearError =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaClearError");
adapter->fpgaClearAllErrors =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaClearAllErrors");
adapter->fpgaGetErrorInfo =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetErrorInfo");
adapter->fpgaCreateEventHandle =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaCreateEventHandle");
adapter->fpgaDestroyEventHandle = dlsym(adapter->plugin.dl_handle,
"xfpga_fpgaDestroyEventHandle");
adapter->fpgaGetOSObjectFromEventHandle =
dlsym(adapter->plugin.dl_handle,
"xfpga_fpgaGetOSObjectFromEventHandle");
adapter->fpgaRegisterEvent =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaRegisterEvent");
adapter->fpgaUnregisterEvent =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaUnregisterEvent");
adapter->fpgaAssignPortToInterface = dlsym(
adapter->plugin.dl_handle, "xfpga_fpgaAssignPortToInterface");
adapter->fpgaAssignToInterface =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaAssignToInterface");
adapter->fpgaReleaseFromInterface = dlsym(
adapter->plugin.dl_handle, "xfpga_fpgaReleaseFromInterface");
adapter->fpgaReconfigureSlot =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaReconfigureSlot");
adapter->fpgaTokenGetObject =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaTokenGetObject");
adapter->fpgaHandleGetObject =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaHandleGetObject");
adapter->fpgaObjectGetObject =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaObjectGetObject");
adapter->fpgaObjectGetObjectAt =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaObjectGetObjectAt");
adapter->fpgaDestroyObject =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaDestroyObject");
adapter->fpgaObjectRead =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaObjectRead");
adapter->fpgaObjectRead64 =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaObjectRead64");
adapter->fpgaObjectGetSize =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaObjectGetSize");
adapter->fpgaObjectGetType =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaObjectGetType");
adapter->fpgaObjectWrite64 =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaObjectWrite64");
adapter->fpgaSetUserClock =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaSetUserClock");
adapter->fpgaGetUserClock =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetUserClock");
adapter->initialize =
dlsym(adapter->plugin.dl_handle, "xfpga_plugin_initialize");
adapter->finalize =
dlsym(adapter->plugin.dl_handle, "xfpga_plugin_finalize");
adapter->supports_device = dlsym(adapter->plugin.dl_handle,
"xfpga_plugin_supports_device");
adapter->supports_host =
dlsym(adapter->plugin.dl_handle, "xfpga_plugin_supports_host");
adapter->fpgaGetNumMetrics =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetNumMetrics");
adapter->fpgaGetMetricsInfo =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetMetricsInfo");
adapter->fpgaGetMetricsByIndex =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetMetricsByIndex");
adapter->fpgaGetMetricsByName =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetMetricsByName");
adapter->fpgaGetMetricsThresholdInfo =
dlsym(adapter->plugin.dl_handle, "xfpga_fpgaGetMetricsThresholdInfo");
return 0;
}
| 1 | 19,087 | These checks aren't needed. See pluginmgr.c : opae_plugin_mgr_load_dflt_plugins( ). | OPAE-opae-sdk | c |
@@ -554,15 +554,14 @@ class SearchRequest
/**
* Method to check if the query string is an empty string
- * (also empty string or whitespaces only are handled as empty).
+ * (also whitespaces only are handled as empty).
*
* When no query string is set (null) the method returns false.
* @return bool
*/
public function getRawUserQueryIsEmptyString()
{
- $path = $this->prefixWithNamespace('q');
- $query = $this->argumentsAccessor->get($path, null);
+ $query = $this->getRawUserQuery();
if ($query === null) {
return false; | 1 | <?php
namespace ApacheSolrForTypo3\Solr\Domain\Search;
/***************************************************************
* Copyright notice
*
* (c) 2015-2016 Timo Schmidt <[email protected]>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use ApacheSolrForTypo3\Solr\Domain\Search\ResultSet\Facets\UrlFacetContainer;
use ApacheSolrForTypo3\Solr\System\Configuration\TypoScriptConfiguration;
use ApacheSolrForTypo3\Solr\System\Util\ArrayAccessor;
use TYPO3\CMS\Core\Utility\ArrayUtility;
/**
* The searchRequest is used to act as an api to the arguments that have been passed
* with GET and POST.
*
* @author Timo Schmidt <[email protected]>
*/
class SearchRequest
{
/**
* The default plugin namespace.
*
* @var string
*/
const DEFAULT_PLUGIN_NAMESPACE = 'tx_solr';
/**
* @var string
*/
protected $id;
/**
* Default namespace overwritten with the configured plugin namespace.
*
* @var string
*/
protected $argumentNameSpace = self::DEFAULT_PLUGIN_NAMESPACE;
/**
* Arguments that should be kept for sub requests.
*
* Default values, overwritten in the constructor with the namespaced arguments
*
* @var array
*/
protected $persistentArgumentsPaths = ['tx_solr:q', 'tx_solr:filter', 'tx_solr:sort'];
/**
* @var bool
*/
protected $stateChanged = false;
/**
* @var ArrayAccessor
*/
protected $argumentsAccessor;
/**
* The sys_language_uid that was used in the context where the request was build.
* This could be different from the "L" parameter and and not relevant for urls,
* because typolink itself will handle it.
*
* @var int
*/
protected $contextSystemLanguageUid;
/**
* The page_uid that was used in the context where the request was build.
*
* The pageUid is not relevant for the typolink additionalArguments and therefore
* a separate property.
*
* @var int
*/
protected $contextPageUid;
/**
* @var TypoScriptConfiguration
*/
protected $contextTypoScriptConfiguration;
/**
* Container for all active facets inside of the URL(TYPO3/FE)
*
* @var UrlFacetContainer
*/
protected $activeFacetContainer;
/**
* @var array
*/
protected $persistedArguments = [];
/**
* @param array $argumentsArray
* @param int $pageUid
* @param int $sysLanguageUid
* @param TypoScriptConfiguration|null $typoScriptConfiguration
*/
public function __construct(array $argumentsArray = [], int $pageUid = 0, int $sysLanguageUid = 0, TypoScriptConfiguration $typoScriptConfiguration = null)
{
$this->stateChanged = true;
$this->persistedArguments = $argumentsArray;
$this->contextPageUid = $pageUid;
$this->contextSystemLanguageUid = $sysLanguageUid;
$this->contextTypoScriptConfiguration = $typoScriptConfiguration;
$this->id = spl_object_hash($this);
// overwrite the plugin namespace and the persistentArgumentsPaths
if (!is_null($typoScriptConfiguration)) {
$this->argumentNameSpace = $typoScriptConfiguration->getSearchPluginNamespace() ?? self::DEFAULT_PLUGIN_NAMESPACE;
}
$this->persistentArgumentsPaths = [$this->argumentNameSpace . ':q', $this->argumentNameSpace . ':filter', $this->argumentNameSpace . ':sort', $this->argumentNameSpace . ':groupPage'];
if (!is_null($typoScriptConfiguration)) {
$additionalPersistentArgumentsNames = $typoScriptConfiguration->getSearchAdditionalPersistentArgumentNames();
foreach ($additionalPersistentArgumentsNames ?? [] as $additionalPersistentArgumentsName) {
$this->persistentArgumentsPaths[] = $this->argumentNameSpace . ':' . $additionalPersistentArgumentsName;
}
$this->persistentArgumentsPaths = array_unique($this->persistentArgumentsPaths);
}
$this->reset();
}
/**
* @return string
*/
public function getId()
{
return $this->id;
}
/**
* Can be used do merge arguments into the request arguments
*
* @param array $argumentsToMerge
* @return SearchRequest
*/
public function mergeArguments(array $argumentsToMerge)
{
ArrayUtility::mergeRecursiveWithOverrule(
$this->persistedArguments,
$argumentsToMerge
);
$this->reset();
return $this;
}
/**
* Helper method to prefix an accessor with the arguments namespace.
*
* @param string $path
* @return string
*/
protected function prefixWithNamespace($path)
{
return $this->argumentNameSpace . ':' . $path;
}
/**
* @return array
*/
public function getActiveFacetNames()
{
return $this->activeFacetContainer->getActiveFacetNames();
}
/**
* Returns all facet values for a certain facetName
* @param string $facetName
* @return array
*/
public function getActiveFacetValuesByName(string $facetName)
{
return $this->activeFacetContainer->getActiveFacetValuesByName($facetName);
}
/**
* @return array
*/
public function getActiveFacets()
{
return $this->activeFacetContainer->getActiveFacets();
}
/**
* Enable sorting of URL parameters
*/
public function sortActiveFacets(): void
{
$this->activeFacetContainer->enableSort();
}
/**
* @return bool
*/
public function isActiveFacetsSorted(): bool
{
return $this->activeFacetContainer->isSorted();
}
/**
* @return string
*/
public function getActiveFacetsUrlParameterStyle(): string
{
return $this->activeFacetContainer->getParameterStyle();
}
/**
* Returns the active count of facets
*
* @return int
*/
public function getActiveFacetCount()
{
return $this->activeFacetContainer->count();
}
/**
* @param array $activeFacets
*
* @return SearchRequest
*/
protected function setActiveFacets($activeFacets = [])
{
$this->activeFacetContainer->setActiveFacets($activeFacets);
return $this;
}
/**
* Adds a facet value to the request.
*
* @param string $facetName
* @param mixed $facetValue
*
* @return SearchRequest
*/
public function addFacetValue(string $facetName, $facetValue)
{
$this->activeFacetContainer->addFacetValue($facetName, $facetValue);
if ($this->activeFacetContainer->hasChanged()) {
$this->stateChanged = true;
$this->activeFacetContainer->acknowledgeChange();
}
return $this;
}
/**
* Removes a facet value from the request.
*
* @param string $facetName
* @param mixed $facetValue
*
* @return SearchRequest
*/
public function removeFacetValue(string $facetName, $facetValue)
{
$this->activeFacetContainer->removeFacetValue($facetName, $facetValue);
if ($this->activeFacetContainer->hasChanged()) {
$this->stateChanged = true;
$this->activeFacetContainer->acknowledgeChange();
}
return $this;
}
/**
* Removes all facet values from the request by a certain facet name
*
* @param string $facetName
*
* @return SearchRequest
*/
public function removeAllFacetValuesByName(string $facetName)
{
$this->activeFacetContainer->removeAllFacetValuesByName($facetName);
if ($this->activeFacetContainer->hasChanged()) {
$this->stateChanged = true;
$this->activeFacetContainer->acknowledgeChange();
}
return $this;
}
/**
* Removes all active facets from the request.
*
* @return SearchRequest
*/
public function removeAllFacets()
{
$this->activeFacetContainer->removeAllFacets();
if ($this->activeFacetContainer->hasChanged()) {
$this->stateChanged = true;
$this->activeFacetContainer->acknowledgeChange();
}
return $this;
}
/**
* Check if an active facet has a given value
*
* @param string $facetName
* @param mixed $facetValue
* @return bool
*/
public function getHasFacetValue(string $facetName, $facetValue): bool
{
return $this->activeFacetContainer->hasFacetValue($facetName, $facetValue);
}
/**
* @return bool
*/
public function getHasSorting()
{
$path = $this->prefixWithNamespace('sort');
return $this->argumentsAccessor->has($path);
}
/**
* Returns the sorting string in the url e.g. title asc.
*
* @return string
*/
public function getSorting()
{
$path = $this->prefixWithNamespace('sort');
return $this->argumentsAccessor->get($path, '');
}
/**
* Helper function to get the sorting configuration name or direction.
*
* @param int $index
* @return string
*/
protected function getSortingPart($index)
{
$sorting = $this->getSorting();
if ($sorting === '') {
return null;
}
$parts = explode(' ', $sorting);
return isset($parts[$index]) ? $parts[$index] : null;
}
/**
* Returns the sorting configuration name that is currently used.
*
* @return string
*/
public function getSortingName()
{
return $this->getSortingPart(0);
}
/**
* Returns the sorting direction that is currently used.
*
* @return string
*/
public function getSortingDirection()
{
return mb_strtolower($this->getSortingPart(1));
}
/**
* @return SearchRequest
*/
public function removeSorting()
{
$path = $this->prefixWithNamespace('sort');
$this->argumentsAccessor->reset($path);
$this->stateChanged = true;
return $this;
}
/**
* @param string $sortingName
* @param string $direction (asc or desc)
*
* @return SearchRequest
*/
public function setSorting($sortingName, $direction = 'asc')
{
$value = $sortingName . ' ' . $direction;
$path = $this->prefixWithNamespace('sort');
$this->argumentsAccessor->set($path, $value);
$this->stateChanged = true;
return $this;
}
/**
* Method to set the paginated page of the search
*
* @param int $page
* @return SearchRequest
*/
public function setPage($page)
{
$this->stateChanged = true;
$path = $this->prefixWithNamespace('page');
$this->argumentsAccessor->set($path, $page);
// use initial url by switching back to page 0
if ($page === 0) {
$this->argumentsAccessor->reset($path);
}
return $this;
}
/**
* Returns the passed page.
*
* @return int|null
*/
public function getPage()
{
$path = $this->prefixWithNamespace('page');
return $this->argumentsAccessor->get($path);
}
/**
* Can be used to reset all groupPages.
*
* @return SearchRequest
*/
public function removeAllGroupItemPages(): SearchRequest
{
$path = $this->prefixWithNamespace('groupPage');
$this->argumentsAccessor->reset($path);
return $this;
}
/**
* Can be used to paginate within a groupItem.
*
* @param string $groupName e.g. type
* @param string $groupItemValue e.g. pages
* @param int $page
* @return SearchRequest
*/
public function setGroupItemPage(string $groupName, string $groupItemValue, int $page): SearchRequest
{
$this->stateChanged = true;
$escapedValue = $this->getEscapedGroupItemValue($groupItemValue);
$path = $this->prefixWithNamespace('groupPage:' . $groupName . ':' . $escapedValue);
$this->argumentsAccessor->set($path, $page);
return $this;
}
/**
* Retrieves the current page for this group item.
*
* @param string $groupName
* @param string $groupItemValue
* @return int
*/
public function getGroupItemPage(string $groupName, string $groupItemValue): int
{
$escapedValue = $this->getEscapedGroupItemValue($groupItemValue);
$path = $this->prefixWithNamespace('groupPage:' . $groupName . ':' . $escapedValue);
return max(1, (int)$this->argumentsAccessor->get($path));
}
/**
* Removes all non alphanumeric values from the groupItem value to have a valid array key.
*
* @param string $groupItemValue
* @return string
*/
protected function getEscapedGroupItemValue(string $groupItemValue)
{
return preg_replace("/[^A-Za-z0-9]/", '', $groupItemValue);
}
/**
* Retrieves the highest page of the groups.
*
* @return int
*/
public function getHighestGroupPage()
{
$max = 1;
$path = $this->prefixWithNamespace('groupPage');
$groupPages = $this->argumentsAccessor->get($path, []);
foreach ($groupPages as $groups) {
if (!is_array($groups)) continue;
foreach ($groups as $groupItemPage) {
if ($groupItemPage > $max) {
$max = $groupItemPage;
}
}
}
return $max;
}
/**
* Method to overwrite the query string.
*
* @param string $rawQueryString
* @return SearchRequest
*/
public function setRawQueryString($rawQueryString)
{
$this->stateChanged = true;
$path = $this->prefixWithNamespace('q');
$this->argumentsAccessor->set($path, $rawQueryString);
return $this;
}
/**
* Returns the passed rawQueryString.
*
* @return string|null
*/
public function getRawUserQuery()
{
$path = $this->prefixWithNamespace('q');
$query = $this->argumentsAccessor->get($path, null);
return is_null($query) ? $query : (string)$query;
}
/**
* Method to check if the query string is an empty string
* (also empty string or whitespaces only are handled as empty).
*
* When no query string is set (null) the method returns false.
* @return bool
*/
public function getRawUserQueryIsEmptyString()
{
$path = $this->prefixWithNamespace('q');
$query = $this->argumentsAccessor->get($path, null);
if ($query === null) {
return false;
}
if (trim($query) === '') {
return true;
}
return false;
}
/**
* This method returns true when no querystring is present at all.
* Which means no search by the user was triggered
*
* @return bool
*/
public function getRawUserQueryIsNull()
{
$path = $this->prefixWithNamespace('q');
$query = $this->argumentsAccessor->get($path, null);
return $query === null;
}
/**
* Sets the results per page that are used during search.
*
* @param int $resultsPerPage
* @return SearchRequest
*/
public function setResultsPerPage($resultsPerPage)
{
$path = $this->prefixWithNamespace('resultsPerPage');
$this->argumentsAccessor->set($path, $resultsPerPage);
$this->stateChanged = true;
return $this;
}
/**
* @return bool
*/
public function getStateChanged()
{
return $this->stateChanged;
}
/**
* Returns the passed resultsPerPage value
* @return int|null
*/
public function getResultsPerPage()
{
$path = $this->prefixWithNamespace('resultsPerPage');
return $this->argumentsAccessor->get($path);
}
/**
* Allows to set additional filters that are used on time and not transported during the request.
*
* @param array $additionalFilters
* @return SearchRequest
*/
public function setAdditionalFilters($additionalFilters)
{
$path = $this->prefixWithNamespace('additionalFilters');
$this->argumentsAccessor->set($path, $additionalFilters);
$this->stateChanged = true;
return $this;
}
/**
* Retrieves the addtional filters that have been set
*
* @return array
*/
public function getAdditionalFilters()
{
$path = $this->prefixWithNamespace('additionalFilters');
return $this->argumentsAccessor->get($path, []);
}
/**
* @return int
*/
public function getContextSystemLanguageUid()
{
return $this->contextSystemLanguageUid;
}
/**
* @return int
*/
public function getContextPageUid()
{
return $this->contextPageUid;
}
/**
* Get contextTypoScriptConfiguration
*
* @return TypoScriptConfiguration
*/
public function getContextTypoScriptConfiguration(): ?TypoScriptConfiguration
{
return $this->contextTypoScriptConfiguration;
}
/**
* Assigns the last known persistedArguments and restores their state.
*
* @return SearchRequest
*/
public function reset(): SearchRequest
{
$this->argumentsAccessor = new ArrayAccessor($this->persistedArguments);
$this->stateChanged = false;
$this->activeFacetContainer = new UrlFacetContainer(
$this->argumentsAccessor,
$this->argumentNameSpace ?? self::DEFAULT_PLUGIN_NAMESPACE,
$this->contextTypoScriptConfiguration === null ?
UrlFacetContainer::PARAMETER_STYLE_INDEX :
$this->contextTypoScriptConfiguration->getSearchFacetingUrlParameterStyle()
);
// If the default of sorting parameter should be true, a modification of this condition is needed.
// If instance of contextTypoScriptConfiguration is not TypoScriptConfiguration the sort should be enabled too
if ($this->contextTypoScriptConfiguration instanceof TypoScriptConfiguration &&
$this->contextTypoScriptConfiguration->getSearchFacetingUrlParameterSort(false)) {
$this->activeFacetContainer->enableSort();
}
return $this;
}
/**
* This can be used to start a new sub request, e.g. for a faceted search.
*
* @param bool $onlyPersistentArguments
* @return SearchRequest
*/
public function getCopyForSubRequest(bool $onlyPersistentArguments = true): SearchRequest
{
if (!$onlyPersistentArguments) {
// create a new request with all data
$argumentsArray = $this->argumentsAccessor->getData();
return new SearchRequest(
$argumentsArray,
$this->contextPageUid,
$this->contextSystemLanguageUid,
$this->contextTypoScriptConfiguration
);
}
$arguments = new ArrayAccessor();
foreach ($this->persistentArgumentsPaths as $persistentArgumentPath) {
if ($this->argumentsAccessor->has($persistentArgumentPath)) {
$arguments->set($persistentArgumentPath, $this->argumentsAccessor->get($persistentArgumentPath));
}
}
return new SearchRequest(
$arguments->getData(),
$this->contextPageUid,
$this->contextSystemLanguageUid,
$this->contextTypoScriptConfiguration
);
}
/**
* @return string
*/
public function getArgumentNamespace(): string
{
return $this->argumentNameSpace;
}
/**
* @return array
*/
public function getAsArray(): array
{
return $this->argumentsAccessor->getData();
}
/**
* Returns only the arguments as array.
*
* @return array
*/
public function getArguments(): array
{
return $this->argumentsAccessor->get($this->argumentNameSpace) ?? [];
}
}
| 1 | 6,937 | Please take a care about prefixing. | TYPO3-Solr-ext-solr | php |
@@ -70,6 +70,17 @@ public interface WorldUpdater extends MutableWorldView {
return account == null ? createAccount(address) : account;
}
+ /**
+ * Retrieves the provided account for a sender of a transaction if it exists, or creates it if it
+ * doesn't.
+ *
+ * @param address the address of the account.
+ * @return the account {@code address}, or {@code null} if the account does not exist.
+ */
+ default EvmAccount getOrCreateSenderAccount(final Address address) {
+ return getOrCreate(address);
+ }
+
/**
* Retrieves the provided account, returning a modifiable object (whose updates are accumulated by
* this updater). | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.core;
import java.util.Collection;
import java.util.Optional;
/**
* An object that buffers updates made over a particular {@link WorldView}.
*
* <p>All changes made to this object, being it account creation/deletion or account modifications
* through {@link MutableAccount}, are immediately reflected on this object (so for instance,
* deleting an account and trying to get it afterwards will return {@code null}) but do not impact
* whichever {@link WorldView} this is an updater for until the {@link #commit} method is called.
*/
public interface WorldUpdater extends MutableWorldView {
/**
* Creates a new account, or reset it (that is, act as if it was deleted and created anew) if it
* already exists.
*
* <p>After this call, the account will exists and will have the provided nonce and balance. His
* code and storage will be empty.
*
* @param address the address of the account to create (or reset).
* @param nonce the nonce for created/reset account.
* @param balance the balance for created/reset account.
* @return the account {@code address}, which will have nonce {@code nonce}, balance {@code
* balance} and empty code and storage.
*/
EvmAccount createAccount(Address address, long nonce, Wei balance);
/**
* Creates a new account, or reset it (that is, act as if it was deleted and created anew) if it
* already exists.
*
* <p>This call is equivalent to {@link #createAccount(Address, long, Wei)} but defaults both the
* nonce and balance to zero.
*
* @param address the address of the account to create (or reset).
* @return the account {@code address}, which will have 0 for the nonce and balance and empty code
* and storage.
*/
default EvmAccount createAccount(final Address address) {
return createAccount(address, Account.DEFAULT_NONCE, Account.DEFAULT_BALANCE);
}
/**
* Retrieves the provided account if it exists, or create it if it doesn't.
*
* @param address the address of the account.
* @return the account {@code address}. If that account exists, it is returned as if by {@link
* #getAccount(Address)}, otherwise, it is created and returned as if by {@link
* #createAccount(Address)} (and thus all his fields will be zero/empty).
*/
default EvmAccount getOrCreate(final Address address) {
final EvmAccount account = getAccount(address);
return account == null ? createAccount(address) : account;
}
/**
* Retrieves the provided account, returning a modifiable object (whose updates are accumulated by
* this updater).
*
* @param address the address of the account.
* @return the account {@code address}, or {@code null} if the account does not exist.
*/
EvmAccount getAccount(Address address);
/**
* Deletes the provided account.
*
* @param address the address of the account to delete. If that account doesn't exists prior to
* this call, this is a no-op.
*/
void deleteAccount(Address address);
/**
* Returns the accounts that have been touched within the scope of this updater.
*
* @return the accounts that have been touched within the scope of this updater
*/
Collection<? extends Account> getTouchedAccounts();
/**
* Returns the account addresses that have been deleted within the scope of this updater.
*
* @return the account addresses that have been deleted within the scope of this updater
*/
Collection<Address> getDeletedAccountAddresses();
/** Removes the changes that were made to this updater. */
void revert();
/**
* Commits the changes made to this updater to the underlying {@link WorldView} this is an updater
* of.
*/
void commit();
/**
* The parent updater (if it exists).
*
* @return The parent WorldUpdater if this wraps another one, empty otherwise
*/
Optional<WorldUpdater> parentUpdater();
}
| 1 | 24,568 | this line doesn't match what the rest says | hyperledger-besu | java |
@@ -1,4 +1,4 @@
-// +build !linux linux,android
+// +build windows linux,android
/*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors. | 1 | // +build !linux linux,android
/*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package service
import (
"github.com/mysteriumnetwork/node/core/service"
service_noop "github.com/mysteriumnetwork/node/services/noop"
service_openvpn "github.com/mysteriumnetwork/node/services/openvpn"
"github.com/urfave/cli"
)
var (
serviceTypesAvailable = []string{"openvpn", "noop"}
serviceTypesEnabled = []string{"openvpn", "noop"}
serviceTypesFlagsParser = map[string]func(ctx *cli.Context) service.Options{
service_noop.ServiceType: parseNoopFlags,
service_openvpn.ServiceType: parseOpenvpnFlags,
}
)
| 1 | 13,152 | Since it's not working for linux, maybe leave the !linux in there as well? | mysteriumnetwork-node | go |
@@ -60,6 +60,9 @@ func (i *IncludeWorkflow) populate(ctx context.Context, s *Step) error {
// Copy Sources up to parent resolving relative paths as we go.
for k, v := range i.w.Sources {
+ if v == "" {
+ continue
+ }
if _, ok := s.w.Sources[k]; ok {
return fmt.Errorf("source %q already exists in workflow", k)
} | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workflow
import (
"context"
"fmt"
"path/filepath"
"reflect"
"strings"
)
// IncludeWorkflow defines a Daisy workflow injection step. This step will
// 'include' the workflow found the path given into the parent workflow. Unlike
// a Subworkflow the included workflow will exist in the same namespace
// as the parent and have access to all its resources.
type IncludeWorkflow struct {
Path string
Vars map[string]string `json:",omitempty"`
w *Workflow
}
func (i *IncludeWorkflow) populate(ctx context.Context, s *Step) error {
i.w.GCSPath = s.w.GCSPath
i.w.Name = s.name
i.w.Project = s.w.Project
i.w.Zone = s.w.Zone
i.w.autovars = s.w.autovars
for k, v := range i.Vars {
i.w.AddVar(k, v)
}
var replacements []string
for k, v := range i.w.autovars {
if k == "NAME" {
v = s.name
}
if k == "WFDIR" {
v = i.w.workflowDir
}
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
for k, v := range i.w.Vars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value)
}
substitute(reflect.ValueOf(i.w).Elem(), strings.NewReplacer(replacements...))
// Copy Sources up to parent resolving relative paths as we go.
for k, v := range i.w.Sources {
if _, ok := s.w.Sources[k]; ok {
return fmt.Errorf("source %q already exists in workflow", k)
}
if s.w.Sources == nil {
s.w.Sources = map[string]string{}
}
if _, _, err := splitGCSPath(v); err != nil && !filepath.IsAbs(v) {
v = filepath.Join(i.w.workflowDir, v)
}
s.w.Sources[k] = v
}
for name, st := range i.w.Steps {
st.name = name
st.w = s.w
if err := st.w.populateStep(ctx, st); err != nil {
return err
}
}
return nil
}
func (i *IncludeWorkflow) validate(ctx context.Context, s *Step) error {
return i.w.validate(ctx)
}
func (i *IncludeWorkflow) run(ctx context.Context, s *Step) error {
return i.w.run(ctx)
}
| 1 | 6,618 | Should we remove the key-value pair entirely? Same for workflows in general. We have a "sourceExists" method that checks if a key is there if I remember correctly. | GoogleCloudPlatform-compute-image-tools | go |
@@ -96,7 +96,7 @@ public class ContentSoqlSyncDownTarget extends SoqlSyncDownTarget {
@Override
public JSONArray startFetch(SyncManager syncManager, long maxTimeStamp) throws IOException, JSONException {
- String queryToRun = maxTimeStamp > 0 ? SoqlSyncDownTarget.addFilterForReSync(getQuery(), maxTimeStamp) : getQuery();
+ String queryToRun = maxTimeStamp > 0 ? SoqlSyncDownTarget.addFilterForReSync(getQuery(), getModificationDateFieldName(), maxTimeStamp) : getQuery();
syncManager.getRestClient().sendSync(RestRequest.getRequestForResources(syncManager.apiVersion)); // cheap call to refresh session
RestRequest request = buildQueryRequest(syncManager.getRestClient().getAuthToken(), queryToRun);
RestResponse response = syncManager.sendSyncWithSmartSyncUserAgent(request); | 1 | /*
* Copyright (c) 2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.samples.notesync;
import android.util.Log;
import android.util.Xml;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.rest.ApiVersionStrings;
import com.salesforce.androidsdk.rest.RestRequest;
import com.salesforce.androidsdk.rest.RestResponse;
import com.salesforce.androidsdk.smartsync.manager.SyncManager;
import com.salesforce.androidsdk.smartsync.util.Constants;
import com.salesforce.androidsdk.smartsync.util.SoqlSyncDownTarget;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.xmlpull.v1.XmlPullParser;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Map;
import okhttp3.MediaType;
import okhttp3.RequestBody;
/**
* Target for sync defined by a SOQL query
*/
public class ContentSoqlSyncDownTarget extends SoqlSyncDownTarget {
private static final MediaType MEDIA_TYPE_XML = MediaType.parse("text/xml; charset=utf-8");
private static final String REQUEST_TEMPLATE = "<?xml version=\"1.0\"?>" +
"<se:Envelope xmlns:se=\"http://schemas.xmlsoap.org/soap/envelope/\">" +
"<se:Header xmlns:sfns=\"urn:partner.soap.sforce.com\">" +
"<sfns:SessionHeader><sessionId>%s</sessionId></sfns:SessionHeader>" +
"</se:Header>" +
"<se:Body>%s</se:Body>" +
"</se:Envelope>";
private static final String QUERY_TEMPLATE = "<query xmlns=\"urn:partner.soap.sforce.com\" xmlns:ns1=\"sobject.partner.soap.sforce.com\">" +
"<queryString>%s</queryString></query>";
private static final String QUERY_MORE_TEMPLATE = "<queryMore xmlns=\"urn:partner.soap.sforce.com\" xmlns:ns1=\"sobject.partner.soap.sforce.com\">\n"+
" <queryLocator>%s</queryLocator>\n"+
" </queryMore>";
public static final String RESULT = "result";
public static final String RECORDS = "records";
public static final String SF = "sf:";
public static final String QUERY_LOCATOR = "queryLocator";
public static final String SIZE = "size";
public static final String DONE = "done";
public static final String TYPE = "type";
private String queryLocator;
/**
* Construct ContentSoqlSyncDownTarget from json
* @param target
* @throws JSONException
*/
public ContentSoqlSyncDownTarget(JSONObject target) throws JSONException {
super(target);
}
@Override
public JSONArray startFetch(SyncManager syncManager, long maxTimeStamp) throws IOException, JSONException {
String queryToRun = maxTimeStamp > 0 ? SoqlSyncDownTarget.addFilterForReSync(getQuery(), maxTimeStamp) : getQuery();
syncManager.getRestClient().sendSync(RestRequest.getRequestForResources(syncManager.apiVersion)); // cheap call to refresh session
RestRequest request = buildQueryRequest(syncManager.getRestClient().getAuthToken(), queryToRun);
RestResponse response = syncManager.sendSyncWithSmartSyncUserAgent(request);
JSONArray records = parseSoapResponse(response);
return records;
}
@Override
public JSONArray continueFetch(SyncManager syncManager) throws IOException, JSONException {
if (queryLocator == null) {
return null;
}
RestRequest request = buildQueryMoreRequest(syncManager.getRestClient().getAuthToken(), queryLocator);
RestResponse response = syncManager.sendSyncWithSmartSyncUserAgent(request);
JSONArray records = parseSoapResponse(response);
return records;
}
/**
* @param sessionId
* @param query
* @return request to run a soql query
* @throws UnsupportedEncodingException
*/
private RestRequest buildQueryRequest(String sessionId, String query) throws UnsupportedEncodingException {
return buildSoapRequest(sessionId, String.format(QUERY_TEMPLATE, query));
}
/**
* @param sessionId
* @param locator
* @return request to do queryMore
* @throws UnsupportedEncodingException
*/
private RestRequest buildQueryMoreRequest(String sessionId, String locator) throws UnsupportedEncodingException {
return buildSoapRequest(sessionId, String.format(QUERY_MORE_TEMPLATE, locator));
}
/**
*
* @param sessionId
* @param body
* @return request for soap call
* @throws UnsupportedEncodingException
*/
private RestRequest buildSoapRequest(String sessionId, String body) throws UnsupportedEncodingException {
Map<String, String> customHeaders = new HashMap<String, String>();
customHeaders.put("SOAPAction", "\"\"");
RequestBody requestBody = RequestBody.create(MEDIA_TYPE_XML, String.format(REQUEST_TEMPLATE, sessionId, body));
String version = ApiVersionStrings.getVersionNumber(SalesforceSDKManager.getInstance().getAppContext()).substring(1); /* no v */;
return new RestRequest(RestRequest.RestMethod.POST, "/services/Soap/u/" + version, requestBody, customHeaders);
}
/**
* @param response returned by soap soql request - also sets totalSize field
* @return
*/
private JSONArray parseSoapResponse(RestResponse response) {
JSONArray records = null;
try {
XmlPullParser parser = Xml.newPullParser();
parser.setFeature(XmlPullParser.FEATURE_PROCESS_NAMESPACES, false);
parser.setInput(new ByteArrayInputStream(response.asBytes()), null);
JSONObject record = null;
boolean inDocument = true;
boolean inResults = false;
boolean inRecord = false;
boolean queryDone = false;
while(inDocument) {
int next = parser.next();
switch (next) {
case XmlPullParser.START_TAG:
Log.i("----> Starting TAG", parser.getName());
if (parser.getName().equals(RESULT)) {
inResults = true;
records = new JSONArray();
}
else if (inResults && parser.getName().equals(RECORDS)) {
inRecord = true;
record = new JSONObject();
}
else if (inResults && parser.getName().equals(DONE)) {
queryDone = Boolean.parseBoolean(parser.nextText());
}
else if (inResults && parser.getName().equals(QUERY_LOCATOR)) {
queryLocator = queryDone ? null : parser.nextText();
}
else if (inResults && parser.getName().equals(SIZE)) {
totalSize = Integer.parseInt(parser.nextText());
}
else if (inRecord && parser.getName().startsWith(SF)) {
String attributeName = parser.getName().substring(SF.length());
String attributeValue = parser.nextText();
if (attributeName.equals(TYPE)) {
JSONObject t = new JSONObject();
t.put(TYPE, attributeValue);
record.put(Constants.ATTRIBUTES, t);
}
else {
record.put(attributeName, attributeValue);
}
}
break;
case XmlPullParser.END_TAG:
Log.i("----> Ending TAG", parser.getName());
if (inRecord && parser.getName().equals(RECORDS)) {
inRecord = false;
records.put(record);
}
else if (inResults && parser.getName().equals(RESULT)) {
inResults = false;
}
break;
case XmlPullParser.END_DOCUMENT:
inDocument = false;
break;
}
}
totalSize = records.length();
} catch (Exception e) {
Log.e("ContentSoqlSyncDownT..t", "parseSoapResponse - Parsing failed", e);
}
return records;
}
}
| 1 | 15,589 | Unrelated bug fix | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -173,15 +173,15 @@ define(['serverNotifications', 'playbackManager', 'events', 'globalize', 'requir
};
if (status === 'completed') {
- notification.title = globalize.translate('PackageInstallCompleted').replace('{0}', installation.Name + ' ' + installation.Version);
+ notification.title = globalize.translate('PackageInstallCompleted', installation.Name + ' ' + installation.Version);
notification.vibrate = true;
} else if (status === 'cancelled') {
- notification.title = globalize.translate('PackageInstallCancelled').replace('{0}', installation.Name + ' ' + installation.Version);
+ notification.title = globalize.translate('PackageInstallCancelled', installation.Name + ' ' + installation.Version);
} else if (status === 'failed') {
- notification.title = globalize.translate('PackageInstallFailed').replace('{0}', installation.Name + ' ' + installation.Version);
+ notification.title = globalize.translate('PackageInstallFailed', installation.Name + ' ' + installation.Version);
notification.vibrate = true;
} else if (status === 'progress') {
- notification.title = globalize.translate('InstallingPackage').replace('{0}', installation.Name + ' ' + installation.Version);
+ notification.title = globalize.translate('InstallingPackage', installation.Name + ' ' + installation.Version);
notification.actions =
[ | 1 | define(['serverNotifications', 'playbackManager', 'events', 'globalize', 'require'], function (serverNotifications, playbackManager, events, globalize, require) {
'use strict';
function onOneDocumentClick() {
document.removeEventListener('click', onOneDocumentClick);
document.removeEventListener('keydown', onOneDocumentClick);
if (window.Notification) {
Notification.requestPermission();
}
}
document.addEventListener('click', onOneDocumentClick);
document.addEventListener('keydown', onOneDocumentClick);
var serviceWorkerRegistration;
function closeAfter(notification, timeoutMs) {
setTimeout(function () {
if (notification.close) {
notification.close();
} else if (notification.cancel) {
notification.cancel();
}
}, timeoutMs);
}
function resetRegistration() {
var serviceWorker = navigator.serviceWorker;
if (serviceWorker) {
serviceWorker.ready.then(function (registration) {
serviceWorkerRegistration = registration;
});
}
}
resetRegistration();
function showPersistentNotification(title, options, timeoutMs) {
serviceWorkerRegistration.showNotification(title, options);
}
function showNonPersistentNotification(title, options, timeoutMs) {
try {
var notif = new Notification(title, options);
if (notif.show) {
notif.show();
}
if (timeoutMs) {
closeAfter(notif, timeoutMs);
}
} catch (err) {
if (options.actions) {
options.actions = [];
showNonPersistentNotification(title, options, timeoutMs);
} else {
throw err;
}
}
}
function showNotification(options, timeoutMs, apiClient) {
var title = options.title;
options.data = options.data || {};
options.data.serverId = apiClient.serverInfo().Id;
options.icon = options.icon || getIconUrl();
options.badge = options.badge || getIconUrl('badge.png');
resetRegistration();
if (serviceWorkerRegistration) {
showPersistentNotification(title, options, timeoutMs);
return;
}
showNonPersistentNotification(title, options, timeoutMs);
}
function showNewItemNotification(item, apiClient) {
if (playbackManager.isPlayingLocally(['Video'])) {
return;
}
var body = item.Name;
if (item.SeriesName) {
body = item.SeriesName + ' - ' + body;
}
var notification = {
title: "New " + item.Type,
body: body,
vibrate: true,
tag: "newItem" + item.Id,
data: {
//options: {
// url: LibraryBrowser.getHref(item)
//}
}
};
var imageTags = item.ImageTags || {};
if (imageTags.Primary) {
notification.icon = apiClient.getScaledImageUrl(item.Id, {
width: 80,
tag: imageTags.Primary,
type: "Primary"
});
}
showNotification(notification, 15000, apiClient);
}
function onLibraryChanged(data, apiClient) {
var newItems = data.ItemsAdded;
if (!newItems.length) {
return;
}
// Don't put a massive number of Id's onto the query string
if (newItems.length > 12) {
newItems.length = 12;
}
apiClient.getItems(apiClient.getCurrentUserId(), {
Recursive: true,
Limit: 3,
Filters: "IsNotFolder",
SortBy: "DateCreated",
SortOrder: "Descending",
Ids: newItems.join(','),
MediaTypes: "Audio,Video",
EnableTotalRecordCount: false
}).then(function (result) {
var items = result.Items;
for (var i = 0, length = items.length ; i < length; i++) {
showNewItemNotification(items[i], apiClient);
}
});
}
function getIconUrl(name) {
name = name || 'notificationicon.png';
return require.toUrl('.').split('?')[0] + '/' + name;
}
function showPackageInstallNotification(apiClient, installation, status) {
apiClient.getCurrentUser().then(function (user) {
if (!user.Policy.IsAdministrator) {
return;
}
var notification = {
tag: "install" + installation.Id,
data: {}
};
if (status === 'completed') {
notification.title = globalize.translate('PackageInstallCompleted').replace('{0}', installation.Name + ' ' + installation.Version);
notification.vibrate = true;
} else if (status === 'cancelled') {
notification.title = globalize.translate('PackageInstallCancelled').replace('{0}', installation.Name + ' ' + installation.Version);
} else if (status === 'failed') {
notification.title = globalize.translate('PackageInstallFailed').replace('{0}', installation.Name + ' ' + installation.Version);
notification.vibrate = true;
} else if (status === 'progress') {
notification.title = globalize.translate('InstallingPackage').replace('{0}', installation.Name + ' ' + installation.Version);
notification.actions =
[
{
action: 'cancel-install',
title: globalize.translate('ButtonCancel'),
icon: getIconUrl()
}
];
notification.data.id = installation.id;
}
if (status === 'progress') {
var percentComplete = Math.round(installation.PercentComplete || 0);
notification.body = percentComplete + '% complete.';
}
var timeout = status === 'cancelled' ? 5000 : 0;
showNotification(notification, timeout, apiClient);
});
}
events.on(serverNotifications, 'LibraryChanged', function (e, apiClient, data) {
onLibraryChanged(data, apiClient);
});
events.on(serverNotifications, 'PackageInstallationCompleted', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, "completed");
});
events.on(serverNotifications, 'PackageInstallationFailed', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, "failed");
});
events.on(serverNotifications, 'PackageInstallationCancelled', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, "cancelled");
});
events.on(serverNotifications, 'PackageInstalling', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, "progress");
});
events.on(serverNotifications, 'ServerShuttingDown', function (e, apiClient, data) {
var serverId = apiClient.serverInfo().Id;
var notification = {
tag: "restart" + serverId,
title: globalize.translate('ServerNameIsShuttingDown', apiClient.serverInfo().Name)
};
showNotification(notification, 0, apiClient);
});
events.on(serverNotifications, 'ServerRestarting', function (e, apiClient, data) {
var serverId = apiClient.serverInfo().Id;
var notification = {
tag: "restart" + serverId,
title: globalize.translate('ServerNameIsRestarting', apiClient.serverInfo().Name)
};
showNotification(notification, 0, apiClient);
});
events.on(serverNotifications, 'RestartRequired', function (e, apiClient) {
var serverId = apiClient.serverInfo().Id;
var notification = {
tag: "restart" + serverId,
title: globalize.translate('PleaseRestartServerName', apiClient.serverInfo().Name)
};
notification.actions =
[
{
action: 'restart',
title: globalize.translate('ButtonRestart'),
icon: getIconUrl()
}
];
showNotification(notification, 0, apiClient);
});
});
| 1 | 13,797 | We probably want to let the translation library handle the string replacement for these as well. | jellyfin-jellyfin-web | js |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.