patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -123,6 +123,12 @@ public class DatabaseHelper extends DaoMaster.OpenHelper {
}
}
break;
+ case 10:
+ AllergenDao.dropTable(db, true);
+ AllergenDao.createTable(db, true);
+ AllergenNameDao.dropTable(db, true);
+ AllergenNameDao.createTable(db, true);
+ break;
}
}
| 1 | package openfoodfacts.github.scrachx.openfood.models;
import android.content.Context;
import android.content.SharedPreferences;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.util.Log;
import org.greenrobot.greendao.database.Database;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
public class DatabaseHelper extends DaoMaster.OpenHelper {
private SharedPreferences settings;
public DatabaseHelper(Context context, String name, SQLiteDatabase.CursorFactory factory) {
super(context, name, factory);
settings = context.getSharedPreferences("prefs", 0);
}
public DatabaseHelper(Context context, String name) {
super(context, name);
settings = context.getSharedPreferences("prefs", 0);
}
@Override
public void onCreate(Database db) {
Log.i("greenDAO", "Creating tables for schema version " + DaoMaster.SCHEMA_VERSION);
DaoMaster.createAllTables(db, true);
}
@Override
public void onUpgrade(Database db, int oldVersion, int newVersion) {
Log.i("greenDAO", "migrating schema from version " + oldVersion + " to " + newVersion);
//dropAllTables(db, true);
for (int migrateVersion = oldVersion + 1; migrateVersion <= newVersion; migrateVersion++) {
upgrade(db, migrateVersion);
}
//db model has changed we need to invalidate and reload taxonomies
if( settings != null && oldVersion != newVersion )
{
settings.edit().putLong( Utils.LAST_REFRESH_DATE, 0 ).apply();
}
}
/**
* in case of android.database.sqlite.SQLiteException, the schema version is
* left untouched just fix the code in the version case and push a new
* release
*
* @param db database
* @param migrateVersion
*/
private void upgrade(Database db, int migrateVersion) {
Log.e("MIGRATE VERSION", "" + migrateVersion);
switch (migrateVersion) {
case 2:
db.execSQL("ALTER TABLE send_product ADD COLUMN 'lang' TEXT NOT NULL DEFAULT 'fr';");
break;
case 3:
ToUploadProductDao.createTable(db, true);
break;
case 4:
TagDao.createTable(db, true);
break;
case 5: {
db.execSQL("ALTER TABLE history_product ADD COLUMN 'quantity' TEXT NOT NULL DEFAULT '';");
db.execSQL("ALTER TABLE history_product ADD COLUMN 'nutrition_grade' TEXT NOT NULL DEFAULT '';");
break;
}
case 6: {
LabelDao.createTable(db, true);
LabelNameDao.createTable(db, true);
AllergenDao.dropTable(db, true);
AllergenDao.createTable(db, true);
AllergenNameDao.createTable(db, true);
AdditiveDao.dropTable(db, true);
AdditiveDao.createTable(db, true);
AdditiveNameDao.createTable(db, true);
CountryDao.createTable(db, true);
CountryNameDao.createTable(db, true);
CategoryDao.createTable(db, true);
CategoryNameDao.createTable(db, true);
break;
}
case 7: {
String newColumns[] = new String[]{"wiki_data_id", "is_wiki_data_id_present"};
String updatedTables[] = new String[]{"additive_name", "additive", "category_name", "category", "label_name", "label"};
for (String table : updatedTables) {
for (String column : newColumns) {
if (!isFieldExist(db, table, column)) {
db.execSQL(String.format("ALTER TABLE %s ADD COLUMN '%s' TEXT NOT NULL DEFAULT '';", table, column));
}
}
}
break;
}
case 8:
OfflineSavedProductDao.createTable(db, true);
break;
case 9:
String newColumns[] = new String[]{ "overexposure_risk", "exposure_mean_greater_than_adi", "exposure_mean_greater_than_noael",
"exposure95_th_greater_than_adi", "exposure95_th_greater_than_noael" };
String updatedTables[] = new String[]{ "additive_name", "additive" };
for( String table : updatedTables )
{
for( String column : newColumns )
{
if (!isFieldExist(db, table, column))
{
db.execSQL( String.format( "ALTER TABLE %s ADD COLUMN '%s' TEXT;", table, column ) );
}
}
}
break;
}
}
private boolean isFieldExist(Database db, String tableName, String fieldName) {
boolean isExist = false;
String query = String.format("PRAGMA table_info(%s)", tableName);
Cursor res = db.rawQuery(query, null);
res.moveToFirst();
do {
String currentColumn = res.getString(1);
if (currentColumn.equals(fieldName)) {
isExist = true;
}
} while (res.moveToNext());
return isExist;
}
} | 1 | 65,977 | Is dropping the **Allergen table** necessary? Can't we just add the two new columns to the existing table using a raw query? | openfoodfacts-openfoodfacts-androidapp | java |
@@ -13,11 +13,11 @@ feature "Subscriber views completed trails" do
within ".completed-trails" do
expect(page).to have_content(just_finished_trail.name)
- expect(page).to have_no_content(incomplete_trail.name)
+ expect(page).not_to have_content(incomplete_trail.name)
end
within ".incomplete-trails" do
- expect(page).to have_no_content(just_finished_trail.name)
+ expect(page).not_to have_content(just_finished_trail.name)
expect(page).to have_content(incomplete_trail.name)
end
| 1 | require "rails_helper"
feature "Subscriber views completed trails" do
scenario "completed and incompleted trails are separated" do
completed_trail = create(:trail, :published, :completed)
user = completed_trail.users.last
just_finished_trail = create(:trail, :published)
create(:status, :completed, completeable: just_finished_trail, user: user)
incomplete_trail = create(:trail, :published)
sign_in_as user
visit practice_path
within ".completed-trails" do
expect(page).to have_content(just_finished_trail.name)
expect(page).to have_no_content(incomplete_trail.name)
end
within ".incomplete-trails" do
expect(page).to have_no_content(just_finished_trail.name)
expect(page).to have_content(incomplete_trail.name)
end
expect(page).to have_no_content(completed_trail.name)
end
end
| 1 | 17,450 | I'm confused why the expectations here would have inverted? Seems unrelated to topic stuff. Can you clarify? | thoughtbot-upcase | rb |
@@ -127,12 +127,13 @@ std::string MetaServiceUtils::schemaEdgesPrefix(GraphSpaceID spaceId) {
std::string MetaServiceUtils::schemaEdgeKey(GraphSpaceID spaceId,
EdgeType edgeType,
int64_t version) {
+ int64_t storageVer = std::numeric_limits<int64_t>::max() - version;
std::string key;
key.reserve(128);
key.append(kEdgesTable.data(), kEdgesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&edgeType), sizeof(edgeType));
- key.append(reinterpret_cast<const char*>(&version), sizeof(version));
+ key.append(reinterpret_cast<const char*>(&storageVer), sizeof(storageVer));
return key;
}
| 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "meta/MetaServiceUtils.h"
#include <thrift/lib/cpp2/protocol/Serializer.h>
#include <thrift/lib/cpp2/protocol/CompactProtocol.h>
namespace nebula {
namespace meta {
const std::string kSpacesTable = "__spaces__"; // NOLINT
const std::string kPartsTable = "__parts__"; // NOLINT
const std::string kHostsTable = "__hosts__"; // NOLINT
const std::string kTagsTable = "__tags__"; // NOLINT
const std::string kEdgesTable = "__edges__"; // NOLINT
const std::string kIndexTable = "__index__"; // NOLINT
std::string MetaServiceUtils::spaceKey(GraphSpaceID spaceId) {
std::string key;
key.reserve(256);
key.append(kSpacesTable.data(), kSpacesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
return key;
}
std::string MetaServiceUtils::spaceVal(int32_t partsNum,
int32_t replicaFactor,
const std::string& name) {
std::string val;
val.reserve(256);
val.append(reinterpret_cast<const char*>(&partsNum), sizeof(partsNum));
val.append(reinterpret_cast<const char*>(&replicaFactor), sizeof(replicaFactor));
val.append(name);
return val;
}
const std::string& MetaServiceUtils::spacePrefix() {
return kSpacesTable;
}
GraphSpaceID MetaServiceUtils::spaceId(folly::StringPiece rawKey) {
return *reinterpret_cast<const GraphSpaceID*>(rawKey.data() + kSpacesTable.size());
}
folly::StringPiece MetaServiceUtils::spaceName(folly::StringPiece rawVal) {
return rawVal.subpiece(sizeof(int32_t)*2);
}
std::string MetaServiceUtils::partKey(GraphSpaceID spaceId, PartitionID partId) {
std::string key;
key.reserve(128);
key.append(kPartsTable.data(), kPartsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
key.append(reinterpret_cast<const char*>(&partId), sizeof(PartitionID));
return key;
}
std::string MetaServiceUtils::partVal(const std::vector<nebula::cpp2::HostAddr>& hosts) {
std::string val;
val.reserve(128);
for (auto& h : hosts) {
val.append(reinterpret_cast<const char*>(&h.ip), sizeof(h.ip));
val.append(reinterpret_cast<const char*>(&h.port), sizeof(h.port));
}
return val;
}
std::string MetaServiceUtils::partPrefix(GraphSpaceID spaceId) {
std::string prefix;
prefix.reserve(128);
prefix.append(kPartsTable.data(), kPartsTable.size());
prefix.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
return prefix;
}
std::vector<nebula::cpp2::HostAddr> MetaServiceUtils::parsePartVal(folly::StringPiece val) {
std::vector<nebula::cpp2::HostAddr> hosts;
static const size_t unitSize = sizeof(int32_t) * 2;
auto hostsNum = val.size() / unitSize;
hosts.reserve(hostsNum);
VLOG(3) << "Total size:" << val.size()
<< ", host size:" << unitSize
<< ", host num:" << hostsNum;
for (decltype(hostsNum) i = 0; i < hostsNum; i++) {
nebula::cpp2::HostAddr h;
h.set_ip(*reinterpret_cast<const int32_t*>(val.data() + i * unitSize));
h.set_port(*reinterpret_cast<const int32_t*>(val.data() + i * unitSize + sizeof(int32_t)));
hosts.emplace_back(std::move(h));
}
return hosts;
}
std::string MetaServiceUtils::hostKey(IPv4 ip, Port port) {
std::string key;
key.reserve(128);
key.append(kHostsTable.data(), kHostsTable.size());
key.append(reinterpret_cast<const char*>(&ip), sizeof(ip));
key.append(reinterpret_cast<const char*>(&port), sizeof(port));
return key;
}
std::string MetaServiceUtils::hostVal() {
return "";
}
const std::string& MetaServiceUtils::hostPrefix() {
return kHostsTable;
}
nebula::cpp2::HostAddr MetaServiceUtils::parseHostKey(folly::StringPiece key) {
nebula::cpp2::HostAddr host;
memcpy(&host, key.data() + kHostsTable.size(), sizeof(host));
return host;
}
std::string MetaServiceUtils::schemaEdgesPrefix(GraphSpaceID spaceId) {
std::string key;
key.reserve(kEdgesTable.size() + sizeof(GraphSpaceID));
key.append(kEdgesTable.data(), kEdgesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
return key;
}
std::string MetaServiceUtils::schemaEdgeKey(GraphSpaceID spaceId,
EdgeType edgeType,
int64_t version) {
std::string key;
key.reserve(128);
key.append(kEdgesTable.data(), kEdgesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&edgeType), sizeof(edgeType));
key.append(reinterpret_cast<const char*>(&version), sizeof(version));
return key;
}
std::string MetaServiceUtils::schemaEdgeVal(const std::string& name, nebula::cpp2::Schema schema) {
int32_t len = name.size();
std::string val, sval;
apache::thrift::CompactSerializer::serialize(schema, &sval);
val.reserve(sizeof(int32_t) + name.size() + sval.size());
val.append(reinterpret_cast<const char*>(&len), sizeof(int32_t));
val.append(name);
val.append(sval);
return val;
}
int64_t MetaServiceUtils::parseEdgeVersion(folly::StringPiece key) {
auto offset = kEdgesTable.size() + sizeof(GraphSpaceID) + sizeof(EdgeType);
int64_t ver = std::numeric_limits<int64_t>::max() -
*reinterpret_cast<const int64_t*>(key.begin() + offset);
return ver;
}
std::string MetaServiceUtils::schemaTagKey(GraphSpaceID spaceId, TagID tagId, int64_t version) {
int64_t storageVer = std::numeric_limits<int64_t>::max() - version;
std::string key;
key.reserve(128);
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&tagId), sizeof(tagId));
key.append(reinterpret_cast<const char*>(&storageVer), sizeof(version));
return key;
}
int64_t MetaServiceUtils::parseTagVersion(folly::StringPiece key) {
auto offset = kTagsTable.size() + sizeof(GraphSpaceID) + sizeof(TagID);
int64_t ver = std::numeric_limits<int64_t>::max() -
*reinterpret_cast<const int64_t*>(key.begin() + offset);
return ver;
}
std::string MetaServiceUtils::schemaTagPrefix(GraphSpaceID spaceId, TagID tagId) {
std::string key;
key.reserve(128);
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&tagId), sizeof(tagId));
return key;
}
std::string MetaServiceUtils::schemaTagsPrefix(GraphSpaceID spaceId) {
std::string key;
key.reserve(kTagsTable.size() + sizeof(GraphSpaceID));
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
return key;
}
std::string MetaServiceUtils::schemaTagVal(const std::string& name, nebula::cpp2::Schema schema) {
int32_t len = name.size();
std::string val, sval;
apache::thrift::CompactSerializer::serialize(schema, &sval);
val.reserve(sizeof(int32_t) + name.size() + sval.size());
val.append(reinterpret_cast<const char*>(&len), sizeof(int32_t));
val.append(name);
val.append(sval);
return val;
}
nebula::cpp2::Schema MetaServiceUtils::parseSchema(folly::StringPiece rawData) {
nebula::cpp2::Schema schema;
int32_t offset = sizeof(int32_t) + *reinterpret_cast<const int32_t *>(rawData.begin());
auto schval = rawData.subpiece(offset, rawData.size() - offset);
apache::thrift::CompactSerializer::deserialize(schval, schema);
return schema;
}
std::string MetaServiceUtils::indexSpaceKey(const std::string& name) {
std::string key;
key.reserve(128);
key.append(kIndexTable.data(), kIndexTable.size());
EntryType type = EntryType::SPACE;
key.append(reinterpret_cast<const char*>(&type), sizeof(type));
key.append(name);
return key;
}
std::string MetaServiceUtils::indexTagKey(GraphSpaceID spaceId,
const std::string& name) {
std::string key;
key.reserve(128);
key.append(kIndexTable.data(), kIndexTable.size());
EntryType type = EntryType::TAG;
key.append(reinterpret_cast<const char*>(&type), sizeof(type));
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
key.append(name);
return key;
}
std::string MetaServiceUtils::indexEdgeKey(GraphSpaceID spaceId,
const std::string& name) {
std::string key;
key.reserve(128);
key.append(kIndexTable.data(), kIndexTable.size());
EntryType type = EntryType::EDGE;
key.append(reinterpret_cast<const char*>(&type), sizeof(type));
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
key.append(name);
return key;
}
std::string MetaServiceUtils::assembleSegmentKey(const std::string& segment,
const std::string& key) {
std::string segmentKey;
segmentKey.reserve(64);
segmentKey.append(segment);
segmentKey.append(key.data(), key.size());
return segmentKey;
}
} // namespace meta
} // namespace nebula
| 1 | 16,857 | I am very sorry that I neglected to resolve the conflict and did not fully unit test edge | vesoft-inc-nebula | cpp |
@@ -25,13 +25,14 @@ import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.NoSuchTableException;
+import org.apache.iceberg.io.CloseableGroup;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public abstract class BaseMetastoreCatalog implements Catalog {
+public abstract class BaseMetastoreCatalog extends CloseableGroup implements Catalog {
private static final Logger LOG = LoggerFactory.getLogger(BaseMetastoreCatalog.class);
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.Map;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class BaseMetastoreCatalog implements Catalog {
private static final Logger LOG = LoggerFactory.getLogger(BaseMetastoreCatalog.class);
@Override
public Table createTable(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties) {
return buildTable(identifier, schema)
.withPartitionSpec(spec)
.withLocation(location)
.withProperties(properties)
.create();
}
@Override
public Transaction newCreateTableTransaction(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties) {
return buildTable(identifier, schema)
.withPartitionSpec(spec)
.withLocation(location)
.withProperties(properties)
.createTransaction();
}
@Override
public Transaction newReplaceTableTransaction(
TableIdentifier identifier,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties,
boolean orCreate) {
TableBuilder tableBuilder = buildTable(identifier, schema)
.withPartitionSpec(spec)
.withLocation(location)
.withProperties(properties);
if (orCreate) {
return tableBuilder.createOrReplaceTransaction();
} else {
return tableBuilder.replaceTransaction();
}
}
@Override
public Table loadTable(TableIdentifier identifier) {
Table result;
if (isValidIdentifier(identifier)) {
TableOperations ops = newTableOps(identifier);
if (ops.current() == null) {
// the identifier may be valid for both tables and metadata tables
if (isValidMetadataIdentifier(identifier)) {
result = loadMetadataTable(identifier);
} else {
throw new NoSuchTableException("Table does not exist: %s", identifier);
}
} else {
result = new BaseTable(ops, fullTableName(name(), identifier));
}
} else if (isValidMetadataIdentifier(identifier)) {
result = loadMetadataTable(identifier);
} else {
throw new NoSuchTableException("Invalid table identifier: %s", identifier);
}
LOG.info("Table loaded by catalog: {}", result);
return result;
}
@Override
public TableBuilder buildTable(TableIdentifier identifier, Schema schema) {
return new BaseMetastoreCatalogTableBuilder(identifier, schema);
}
private Table loadMetadataTable(TableIdentifier identifier) {
String tableName = identifier.name();
MetadataTableType type = MetadataTableType.from(tableName);
if (type != null) {
TableIdentifier baseTableIdentifier = TableIdentifier.of(identifier.namespace().levels());
TableOperations ops = newTableOps(baseTableIdentifier);
if (ops.current() == null) {
throw new NoSuchTableException("Table does not exist: %s", baseTableIdentifier);
}
return MetadataTableUtils.createMetadataTableInstance(ops, name(), baseTableIdentifier, identifier, type);
} else {
throw new NoSuchTableException("Table does not exist: %s", identifier);
}
}
private boolean isValidMetadataIdentifier(TableIdentifier identifier) {
return MetadataTableType.from(identifier.name()) != null &&
isValidIdentifier(TableIdentifier.of(identifier.namespace().levels()));
}
protected boolean isValidIdentifier(TableIdentifier tableIdentifier) {
// by default allow all identifiers
return true;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).toString();
}
protected abstract TableOperations newTableOps(TableIdentifier tableIdentifier);
protected abstract String defaultWarehouseLocation(TableIdentifier tableIdentifier);
protected class BaseMetastoreCatalogTableBuilder implements TableBuilder {
private final TableIdentifier identifier;
private final Schema schema;
private final ImmutableMap.Builder<String, String> propertiesBuilder = ImmutableMap.builder();
private PartitionSpec spec = PartitionSpec.unpartitioned();
private SortOrder sortOrder = SortOrder.unsorted();
private String location = null;
public BaseMetastoreCatalogTableBuilder(TableIdentifier identifier, Schema schema) {
Preconditions.checkArgument(isValidIdentifier(identifier), "Invalid table identifier: %s", identifier);
this.identifier = identifier;
this.schema = schema;
}
@Override
public TableBuilder withPartitionSpec(PartitionSpec newSpec) {
this.spec = newSpec != null ? newSpec : PartitionSpec.unpartitioned();
return this;
}
@Override
public TableBuilder withSortOrder(SortOrder newSortOrder) {
this.sortOrder = newSortOrder != null ? newSortOrder : SortOrder.unsorted();
return this;
}
@Override
public TableBuilder withLocation(String newLocation) {
this.location = newLocation;
return this;
}
@Override
public TableBuilder withProperties(Map<String, String> properties) {
if (properties != null) {
propertiesBuilder.putAll(properties);
}
return this;
}
@Override
public TableBuilder withProperty(String key, String value) {
propertiesBuilder.put(key, value);
return this;
}
@Override
public Table create() {
TableOperations ops = newTableOps(identifier);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: %s", identifier);
}
String baseLocation = location != null ? location : defaultWarehouseLocation(identifier);
Map<String, String> properties = propertiesBuilder.build();
TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, sortOrder, baseLocation, properties);
try {
ops.commit(null, metadata);
} catch (CommitFailedException ignored) {
throw new AlreadyExistsException("Table was created concurrently: %s", identifier);
}
return new BaseTable(ops, fullTableName(name(), identifier));
}
@Override
public Transaction createTransaction() {
TableOperations ops = newTableOps(identifier);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: %s", identifier);
}
String baseLocation = location != null ? location : defaultWarehouseLocation(identifier);
Map<String, String> properties = propertiesBuilder.build();
TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, sortOrder, baseLocation, properties);
return Transactions.createTableTransaction(identifier.toString(), ops, metadata);
}
@Override
public Transaction replaceTransaction() {
return newReplaceTableTransaction(false);
}
@Override
public Transaction createOrReplaceTransaction() {
return newReplaceTableTransaction(true);
}
private Transaction newReplaceTableTransaction(boolean orCreate) {
TableOperations ops = newTableOps(identifier);
if (!orCreate && ops.current() == null) {
throw new NoSuchTableException("No such table: %s", identifier);
}
TableMetadata metadata;
if (ops.current() != null) {
String baseLocation = location != null ? location : ops.current().location();
metadata = ops.current().buildReplacement(schema, spec, sortOrder, baseLocation, propertiesBuilder.build());
} else {
String baseLocation = location != null ? location : defaultWarehouseLocation(identifier);
metadata = TableMetadata.newTableMetadata(schema, spec, sortOrder, baseLocation, propertiesBuilder.build());
}
if (orCreate) {
return Transactions.createOrReplaceTableTransaction(identifier.toString(), ops, metadata);
} else {
return Transactions.replaceTableTransaction(identifier.toString(), ops, metadata);
}
}
}
protected static String fullTableName(String catalogName, TableIdentifier identifier) {
StringBuilder sb = new StringBuilder();
if (catalogName.contains("/") || catalogName.contains(":")) {
// use / for URI-like names: thrift://host:port/db.table
sb.append(catalogName);
if (!catalogName.endsWith("/")) {
sb.append("/");
}
} else {
// use . for non-URI named catalogs: prod.db.table
sb.append(catalogName).append(".");
}
for (String level : identifier.namespace().levels()) {
sb.append(level).append(".");
}
sb.append(identifier.name());
return sb.toString();
}
}
| 1 | 39,812 | I think closeable should be handled by the concrete impl of a catalog and not by the base class, I don't think all catalogs must implement closeable by design. This should be done via composition rather than inheritance. Additionally, I don't know what the effect of this change is on other catalogs so this feels a bit on the risky side. | apache-iceberg | java |
@@ -9,11 +9,13 @@ void ConvolutionLayer<Dtype>::compute_output_shape() {
const int* kernel_shape_data = this->kernel_shape_.cpu_data();
const int* stride_data = this->stride_.cpu_data();
const int* pad_data = this->pad_.cpu_data();
+ const int* dilation_data = this->dilation_.cpu_data();
this->output_shape_.clear();
for (int i = 0; i < this->num_spatial_axes_; ++i) {
// i + 1 to skip channel axis
const int input_dim = this->input_shape(i + 1);
- const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i])
+ const int kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1;
+ const int output_dim = (input_dim + 2 * pad_data[i] - kernel_extent)
/ stride_data[i] + 1;
this->output_shape_.push_back(output_dim);
} | 1 | #include <vector>
#include "caffe/layers/conv_layer.hpp"
namespace caffe {
template <typename Dtype>
void ConvolutionLayer<Dtype>::compute_output_shape() {
const int* kernel_shape_data = this->kernel_shape_.cpu_data();
const int* stride_data = this->stride_.cpu_data();
const int* pad_data = this->pad_.cpu_data();
this->output_shape_.clear();
for (int i = 0; i < this->num_spatial_axes_; ++i) {
// i + 1 to skip channel axis
const int input_dim = this->input_shape(i + 1);
const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i])
/ stride_data[i] + 1;
this->output_shape_.push_back(output_dim);
}
}
template <typename Dtype>
void ConvolutionLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->cpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->cpu_data();
Dtype* top_data = top[i]->mutable_cpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight,
top_data + n * this->top_dim_);
if (this->bias_term_) {
const Dtype* bias = this->blobs_[1]->cpu_data();
this->forward_cpu_bias(top_data + n * this->top_dim_, bias);
}
}
}
}
template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->cpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->cpu_diff();
const Dtype* bottom_data = bottom[i]->cpu_data();
Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_,
top_diff + n * this->top_dim_, weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_cpu_gemm(top_diff + n * this->top_dim_, weight,
bottom_diff + n * this->bottom_dim_);
}
}
}
}
}
#ifdef CPU_ONLY
STUB_GPU(ConvolutionLayer);
#endif
INSTANTIATE_CLASS(ConvolutionLayer);
} // namespace caffe
| 1 | 35,873 | Should this be `const`? | BVLC-caffe | cpp |
@@ -6,7 +6,7 @@ namespace Benchmarks.Trace
{
private static void Main(string[] args)
{
- BenchmarkRunner.Run<SpanBenchmark>();
+ BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args);
}
}
} | 1 | using BenchmarkDotNet.Running;
namespace Benchmarks.Trace
{
internal class Program
{
private static void Main(string[] args)
{
BenchmarkRunner.Run<SpanBenchmark>();
}
}
}
| 1 | 17,518 | Can we also add the Exporters here by using `DefaultConfig.AddExporter(...)`? At least the Datadog exporter that can't be configured from the command line... | DataDog-dd-trace-dotnet | .cs |
@@ -77,7 +77,7 @@ int main(int argc, char *argv[]) {
// When using checkpoint states, skip training as those could be the result
// of checkpointing by steps.
- if (!opts->has_string("ckpt_dir")){
+ if (!opts->has_string("no_model1_train")){
model_1->train( pb_model.num_epochs() );
}
// Evaluate model 1 unless it is set to skip | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
// lbann_proto.cpp - prototext application
////////////////////////////////////////////////////////////////////////////////
#include "lbann/lbann.hpp"
#include "lbann/proto/proto_common.hpp"
#include "lbann/utils/protobuf_utils.hpp"
#include <dirent.h>
#include <cstdlib>
using namespace lbann;
const int lbann_default_random_seed = 42;
model * build_model_from_prototext(int argc, char **argv, lbann_data::LbannPB &pb);
bool load_model_weights(std::string ckpt_dir, model * m);
int main(int argc, char *argv[]) {
int random_seed = lbann_default_random_seed;
lbann_comm *comm = initialize(argc, argv, random_seed);
bool master = comm->am_world_master();
#ifdef EL_USE_CUBLAS
El::GemmUseGPU(32,32,32);
#endif
try {
// Initialize options db (this parses the command line)
options *opts = options::get();
opts->init(argc, argv);
if (opts->has_string("h") or opts->has_string("help") or argc == 1) {
print_help(comm);
finalize(comm);
return 0;
}
std::stringstream err;
std::vector<lbann_data::LbannPB *> pbs;
protobuf_utils::load_prototext(master, argc, argv, pbs);
model *model_1 = build_model_from_prototext(argc, argv, *(pbs[0]));
model *model_2 = nullptr;
if (pbs.size() > 1) {
model_2 = build_model_from_prototext(argc, argv, *(pbs[1]));
}
// Load layer weights from checkpoint if checkpoint directory given
if(opts->has_string("ckpt_dir")){
load_model_weights(opts->get_string("ckpt_dir"), model_1);
}
// Train model
if (master) std::cerr << "\nSTARTING train - model 1\n\n";
const lbann_data::Model pb_model = pbs[0]->model();
// When using checkpoint states, skip training as those could be the result
// of checkpointing by steps.
if (!opts->has_string("ckpt_dir")){
model_1->train( pb_model.num_epochs() );
}
// Evaluate model 1 unless it is set to skip
if (!opts->has_string("no_model1_eval")){
model_1->evaluate(execution_mode::testing);
}
if (model_2 != nullptr) {
const auto layers1 = model_1->get_layers();
const auto layers2 = model_2->get_layers();
for(size_t l2=0; l2 < layers2.size(); l2++) {
for(size_t l1=0; l1 < layers1.size(); l1++) {
if(layers2[l2]->get_name() == layers1[l1]->get_name()){
if(master) std::cout << "Model 1 Layer " << layers1[l1]->get_name();
layers2[l2]->replace_weights(layers1[l1]);
if(master) std::cout << " copied to Model2 Layer " << std::endl;
}
}
}
if (master) std::cerr << "\n STARTING train - model 2\n\n";
const lbann_data::Model pb_model_2 = pbs[1]->model();
model_2->train( pb_model_2.num_epochs() );
model_2->evaluate(execution_mode::testing);
}
delete model_1;
if (model_2 != nullptr) {
delete model_2;
}
for (auto t : pbs) {
delete t;
}
} catch (lbann_exception& e) {
lbann_report_exception(e, comm);
} catch (std::exception& e) {
El::ReportException(e); // Elemental exceptions
}
// free all resources by El and MPI
finalize(comm);
return 0;
}
model * build_model_from_prototext(int argc, char **argv, lbann_data::LbannPB &pb) {
int random_seed = lbann_default_random_seed;
lbann_comm *comm = initialize(argc, argv, random_seed);
bool master = comm->am_world_master();
if (master) std::cerr << "starting build_model_from_prototext\n";
model *model = nullptr; //d hysom bad namimg! should fix
try {
std::stringstream err;
options *opts = options::get();
// Optionally over-ride some values in prototext
get_cmdline_overrides(comm, pb);
lbann_data::Model *pb_model = pb.mutable_model();
// Adjust the number of parallel readers; this may be adjusted
// after calling split_models()
set_num_parallel_readers(comm, pb);
// Set algorithmic blocksize
if (pb_model->block_size() == 0 and master) {
err << __FILE__ << " " << __LINE__ << " :: model does not provide a valid block size: " << pb_model->block_size();
throw lbann_exception(err.str());
}
El::SetBlocksize(pb_model->block_size());
// Change random seed if needed.
if (pb_model->random_seed() > 0) {
random_seed = pb_model->random_seed();
// Reseed here so that setup is done with this new seed.
init_random(random_seed);
init_data_seq_random(random_seed);
}
// Initialize models differently if needed.
#ifndef LBANN_SEQUENTIAL_CONSISTENCY
if (pb_model->random_init_models_differently()) {
random_seed = random_seed + comm->get_model_rank();
// Reseed here so that setup is done with this new seed.
init_random(random_seed);
init_data_seq_random(random_seed);
}
#else
if (pb_model->random_init_models_differently()) {
if (master) {
std::cout << "WARNING: Ignoring random_init_models_differently " <<
"due to sequential consistency" << std::endl;
}
}
#endif
// Set up the communicator and get the grid.
int procs_per_model = pb_model->procs_per_model();
if (procs_per_model == 0) {
procs_per_model = comm->get_procs_in_world();
}
comm->split_models(procs_per_model);
if (pb_model->num_parallel_readers() > procs_per_model) {
pb_model->set_num_parallel_readers(procs_per_model);
}
if (master) {
std::cout << "Model settings" << std::endl
<< " Models : " << comm->get_num_models() << std::endl
<< " Processes per model : " << procs_per_model << std::endl
<< " Grid dimensions : " << comm->get_model_grid().Height() << " x " << comm->get_model_grid().Width() << std::endl;
std::cout << std::endl;
}
// Save info to file; this includes the complete prototext (with any over-rides
// from the cmd line) and various other info
//save_session(comm, argc, argv, pb);
// Check for cudnn, with user feedback
cudnn::cudnn_manager *cudnn = nullptr;
#ifdef LBANN_HAS_CUDNN
const size_t workspace_size = 1 << 9; // 1 GB
if (! pb_model->disable_cuda()) {
if (master) {
std::cerr << "code was compiled with LBANN_HAS_CUDNN, and we are using cudnn\n";
}
cudnn = new cudnn::cudnn_manager(workspace_size);
} else {
if (master) {
std::cerr << "code was compiled with LBANN_HAS_CUDNN, but we are NOT USING cudnn\n";
}
}
#else
if (master) {
std::cerr << "code was NOT compiled with LBANN_HAS_CUDNN\n";
}
#endif
if (master) {
std::cout << "Hardware settings (for master process)" << std::endl
<< " Processes on node : " << comm->get_procs_per_node() << std::endl
<< " OpenMP threads per process : " << omp_get_max_threads() << std::endl;
#ifdef LBANN_HAS_GPU
if (cudnn != nullptr) {
std::cout << " GPUs on node : " << El::GPUManager::NumDevices() << std::endl;
const auto* env = std::getenv("MV2_USE_CUDA");
std::cout << " MV2_USE_CUDA : " << (env != nullptr ? env : "") << std::endl;
}
#endif // LBANN_HAS_GPU
std::cout << std::endl;
}
// Display how the OpenMP threads are provisioned
if (opts->has_string("print_affinity")) {
display_omp_setup();
}
// Initialize data readers
//@todo: code not in place for correctly handling image preprocessing
std::map<execution_mode, generic_data_reader *> data_readers;
init_data_readers(comm, pb, data_readers);
// User feedback
print_parameters(comm, pb);
// Initalize model
model = proto::construct_model(comm,
cudnn,
data_readers,
pb.optimizer(),
pb.model());
model->setup();
// restart model from checkpoint if we have one
//@todo
//model->restartShared();
if (comm->am_world_master()) {
std::cout << std::endl;
std::cout << "Callbacks:" << std::endl;
for (lbann_callback *cb : model->get_callbacks()) {
std::cout << cb->name() << std::endl;
}
std::cout << std::endl;
const std::vector<Layer *>& layers = model->get_layers();
for (size_t h=0; h<layers.size(); h++) {
std::cout << h << " " << layers[h]->get_description() << std::endl;
}
}
#ifndef LBANN_SEQUENTIAL_CONSISTENCY
// Under normal conditions, reinitialize the random number generator so
// that regularization techniques (e.g. dropout) generate unique patterns
// on different ranks.
init_random(random_seed + comm->get_rank_in_world());
#else
if(comm->am_world_master()) {
std::cout <<
"--------------------------------------------------------------------------------\n"
"ALERT: executing in sequentially consistent mode -- performance will suffer\n"
"--------------------------------------------------------------------------------\n";
}
#endif
} catch (lbann_exception& e) {
lbann_report_exception(e, comm);
} catch (std::exception& e) {
El::ReportException(e); // Elemental exceptions
}
return model;
}
bool load_model_weights(std::string ckpt_dir, model * m){
std::vector<std::string> weight_list = std::vector<std::string>();
int epochLast = -1;
int stepLast = -1;
// define filename
char latest[1024];
sprintf(latest, "%s/last.shared.checkpoint", ckpt_dir.c_str());
// get last epoch and step saved.
int fd = openread(latest);
if (fd != -1) {
char field[256];
read_string(fd, "shared.last", field, sizeof(field));
int ret = sscanf(field, "epoch=%d step=%d\n", &epochLast, &stepLast);
if(ret != 2) { return false; }
closeread(fd, latest);
sprintf(latest, "%s/shared.epoch.%d.step.%d/", ckpt_dir.c_str() ,epochLast, stepLast);
}
DIR *weight_dir;
struct dirent *weight_file;
if((weight_dir = opendir(latest)) == NULL)
{
std::cout << "error opening " << latest << "\n";
return false;
}
// Populate weight list
while ((weight_file = readdir(weight_dir)) != NULL){
if(!strncmp(weight_file->d_name,"model_weights_",14))
weight_list.push_back(std::string(weight_file->d_name));
}
closedir(weight_dir);
// load weights that appear in weight list.
for(weights *w : m->get_weights()) {
w->load_from_save(latest,weight_list);
}
return true;
}
| 1 | 12,910 | @JaeseungYeom I believe you had set this in a previous commit to, by default, not train model1 when loading from ckpt. Would it work for you to have this as a command line option `--no_model1_train` instead of the default behavior? | LLNL-lbann | cpp |
@@ -86,6 +86,11 @@ namespace Microsoft.Rest.Generator.Ruby.TemplateModels
return "Array<Integer>";
}
+ if (type == PrimaryType.TimeSpan)
+ {
+ return "Duration"; //TODO: Is this a real Ruby type...?
+ }
+
if (compositeType != null)
{
return compositeType.Name; | 1 | // Copyright (c) Microsoft Open Technologies, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using Microsoft.Rest.Generator.ClientModel;
using System.Linq;
namespace Microsoft.Rest.Generator.Ruby.TemplateModels
{
using Utilities;
using System.Collections.Generic;
/// <summary>
/// Keeps a few aux method used across all templates/models.
/// </summary>
public static class ClientModelExtensions
{
/// <summary>
/// Determines if a type can be assigned the value null.
/// </summary>
/// <param name="type">The type to check.</param>
/// <returns>True if null can be assigned, otherwise false.</returns>
public static bool IsNullable(this IType type)
{
return true;
}
/// <summary>
/// Simple conversion of the type to string.
/// </summary>
/// <param name="type">The type to convert</param>
/// <param name="reference">a reference to an instance of the type</param>
/// <returns></returns>
public static string ToString(this IType type, string reference)
{
var known = type as PrimaryType;
return (known != null && known == PrimaryType.String)
? reference
: string.Format("{0}.to_s", reference);
}
/// <summary>
/// Internal method for generating Yard-compatible representation of given type.
/// </summary>
/// <param name="type">The type doc needs to be generated for.</param>
/// <returns>Doc in form of string.</returns>
private static string PrepareTypeForDocRecursively(IType type)
{
var sequenceType = type as SequenceType;
var compositeType = type as CompositeType;
var enumType = type as EnumType;
var dictionaryType = type as DictionaryType;
if (type == PrimaryType.String)
{
return "String";
}
if (type == PrimaryType.Int || type == PrimaryType.Long)
{
return "Integer";
}
if (type == PrimaryType.Boolean)
{
return "Boolean";
}
if (type == PrimaryType.Double)
{
return "Float";
}
if (type == PrimaryType.Date)
{
return "Date";
}
if (type == PrimaryType.DateTime)
{
return "DateTime";
}
if (type == PrimaryType.ByteArray)
{
return "Array<Integer>";
}
if (compositeType != null)
{
return compositeType.Name;
}
if (enumType != null)
{
return enumType.Name;
}
if (sequenceType != null)
{
string internalString = PrepareTypeForDocRecursively(sequenceType.ElementType);
if (!string.IsNullOrEmpty(internalString))
{
return string.Format("Array<{0}>", internalString);
}
return string.Empty;
}
if (dictionaryType != null)
{
string internalString = PrepareTypeForDocRecursively(dictionaryType.ValueType);
if (!string.IsNullOrEmpty(internalString))
{
return string.Format("Hash{{String => {0}}}", internalString);
}
return string.Empty;
}
return string.Empty;
}
/// <summary>
/// Return the separator associated with a given collectionFormat.
/// </summary>
/// <param name="format">The collection format.</param>
/// <returns>The separator.</returns>
private static string GetSeparator(this CollectionFormat format)
{
switch (format)
{
case CollectionFormat.Csv:
return ",";
case CollectionFormat.Pipes:
return "|";
case CollectionFormat.Ssv:
return " ";
case CollectionFormat.Tsv:
return "\t";
default:
throw new NotSupportedException(string.Format("Collection format {0} is not supported.", format));
}
}
/// <summary>
/// Format the value of a sequence given the modeled element format. Note that only sequences of strings are supported.
/// </summary>
/// <param name="parameter">The parameter to format.</param>
/// <returns>A reference to the formatted parameter value.</returns>
public static string GetFormattedReferenceValue(this Parameter parameter)
{
SequenceType sequence = parameter.Type as SequenceType;
if (sequence == null)
{
return parameter.Type.ToString(parameter.Name);
}
PrimaryType primaryType = sequence.ElementType as PrimaryType;
EnumType enumType = sequence.ElementType as EnumType;
if (enumType != null && enumType.IsExpandable)
{
primaryType = PrimaryType.String;
}
if (primaryType != PrimaryType.String)
{
throw new InvalidOperationException(
string.Format("Cannot generate a formatted sequence from a " +
"non-string array parameter {0}", parameter));
}
return string.Format("{0}.join('{1}')", parameter.Name, parameter.CollectionFormat.GetSeparator());
}
/// <summary>
/// Generates Yard-compatible representation of given type.
/// </summary>
/// <param name="type">The type doc needs to be generated for.</param>
/// <returns>Doc in form of string.</returns>
public static string GetYardDocumentation(this IType type)
{
string typeForDoc = PrepareTypeForDocRecursively(type);
if (string.IsNullOrEmpty(typeForDoc))
{
return string.Empty;
}
return string.Format("[{0}] ", typeForDoc);
}
/// <summary>
/// Generate code to perform required validation on a type.
/// </summary>
/// <param name="type">The type to validate.</param>
/// <param name="scope">A scope provider for generating variable names as necessary.</param>
/// <param name="valueReference">A reference to the value being validated.</param>
/// <returns>The code to validate the reference of the given type.</returns>
public static string ValidateType(this IType type, IScopeProvider scope, string valueReference)
{
CompositeType model = type as CompositeType;
SequenceType sequence = type as SequenceType;
DictionaryType dictionary = type as DictionaryType;
if (model != null && model.Properties.Any())
{
return string.Format("{0}.validate unless {0}.nil?", valueReference);
}
if (sequence != null || dictionary != null)
{
return string.Format("{0}.each{{ |e| e.validate if e.respond_to?(:validate) }} unless {0}.nil?\r\n", valueReference);
}
return null;
}
/// <summary>
/// Determine whether a model should be serializable.
/// </summary>
/// <param name="type">The type to check.</param>
public static bool IsSerializable(this IType type)
{
var known = type as PrimaryType;
return (known != PrimaryType.Object);
}
/// <summary>
/// Verifies whether client includes model types.
/// </summary>
/// <param name="client">The client.</param>
/// <returns>True if client contain model types, false otherwise.</returns>
public static bool HasModelTypes(this ServiceClient client)
{
return client.ModelTypes.Any(mt => mt.Extensions.Count == 0);
}
/// <summary>
/// Generates Ruby code in form of string for deserializing object of given type.
/// </summary>
/// <param name="type">Type of object needs to be deserialized.</param>
/// <param name="scope">Current scope.</param>
/// <param name="valueReference">Reference to object which needs to be deserialized.</param>
/// <param name="namespacesToLookForClasses">List of namespaces where classes for polymorphic serialization can be found.</param>
/// <returns>Generated Ruby code in form of string.</returns>
public static string DeserializeType(
this IType type,
IScopeProvider scope,
string valueReference,
List<string> namespacesToLookForClasses)
{
var composite = type as CompositeType;
var sequence = type as SequenceType;
var dictionary = type as DictionaryType;
var primary = type as PrimaryType;
var enumType = type as EnumType;
var builder = new IndentedStringBuilder(" ");
if (primary != null)
{
if (primary == PrimaryType.Int || primary == PrimaryType.Long)
{
return builder.AppendLine("{0} = Integer({0}) unless {0}.to_s.empty?", valueReference).ToString();
}
if (primary == PrimaryType.Double)
{
return builder.AppendLine("{0} = Float({0}) unless {0}.to_s.empty?", valueReference).ToString();
}
if (primary == PrimaryType.ByteArray)
{
return builder.AppendLine("{0} = Base64.strict_decode64({0}).unpack('C*') unless {0}.to_s.empty?", valueReference).ToString();
}
if (primary == PrimaryType.Date)
{
return builder.AppendLine("{0} = MsRest::Serialization.deserialize_date({0}) unless {0}.to_s.empty?", valueReference).ToString();
}
if (primary == PrimaryType.DateTime)
{
return builder.AppendLine("{0} = DateTime.parse({0}) unless {0}.to_s.empty?", valueReference).ToString();
}
}
else if (enumType != null && !string.IsNullOrEmpty(enumType.Name))
{
return builder
.AppendLine("if (!{0}.nil? && !{0}.empty?)", valueReference)
.AppendLine(
" enum_is_valid = {0}.constants.any? {{ |e| {0}.const_get(e).to_s.downcase == {1}.downcase }}",
enumType.Name, valueReference)
.AppendLine(
" fail MsRest::DeserializationError.new('Error occured while deserializing the enum', nil, nil, nil) unless enum_is_valid")
.AppendLine("end")
.ToString();
}
else if (sequence != null)
{
var elementVar = scope.GetVariableName("element");
var innerSerialization = sequence.ElementType.DeserializeType(scope, elementVar, namespacesToLookForClasses);
if (!string.IsNullOrEmpty(innerSerialization))
{
return
builder
.AppendLine("unless {0}.nil?", valueReference)
.Indent()
.AppendLine("deserialized{0} = [];", sequence.Name)
.AppendLine("{0}.each do |{1}|", valueReference, elementVar)
.Indent()
.AppendLine(innerSerialization)
.AppendLine("deserialized{0}.push({1});", sequence.Name.ToPascalCase(), elementVar)
.Outdent()
.AppendLine("end")
.AppendLine("{0} = deserialized{1};", valueReference, sequence.Name.ToPascalCase())
.Outdent()
.AppendLine("end")
.ToString();
}
}
else if (dictionary != null)
{
var valueVar = scope.GetVariableName("valueElement");
var innerSerialization = dictionary.ValueType.DeserializeType(scope, valueVar, namespacesToLookForClasses);
if (!string.IsNullOrEmpty(innerSerialization))
{
return builder.AppendLine("unless {0}.nil?", valueReference)
.Indent()
.AppendLine("{0}.each do |key, {1}|", valueReference, valueVar)
.Indent()
.AppendLine(innerSerialization)
.AppendLine("{0}[key] = {1}", valueReference, valueVar)
.Outdent()
.AppendLine("end")
.Outdent()
.AppendLine("end").ToString();
}
}
else if (composite != null)
{
if (!string.IsNullOrEmpty(composite.PolymorphicDiscriminator))
{
builder
.AppendLine("unless {0}['dtype'].nil?", valueReference)
.Indent()
.AppendLine("class_name = {0}['dtype'].capitalize", valueReference)
.AppendLine("class_instance = Models.const_get(class_name)");
foreach (var ns in namespacesToLookForClasses)
{
builder
.AppendLine("class_instance = {0}.const_get(class_name) if class_instance.nil?", ns);
}
builder
.AppendLine("{0} = class_instance.deserialize_object({0})", valueReference)
.Outdent()
.AppendLine("else")
.Indent()
.AppendLine("{0} = {1}.deserialize_object({0})", valueReference, composite.Name)
.Outdent()
.AppendLine("end");
return builder.ToString();
}
return builder.AppendLine("unless {0}.nil?", valueReference)
.Indent()
.AppendLine("{0} = {1}.deserialize_object({0})", valueReference, composite.Name)
.Outdent()
.AppendLine("end").ToString();
}
return string.Empty;
}
/// <summary>
/// Generates Ruby code in form of string for serializing object of given type.
/// </summary>
/// <param name="type">Type of object needs to be serialized.</param>
/// <param name="scope">Current scope.</param>
/// <param name="valueReference">Reference to object which needs to serialized.</param>
/// <param name="namespacesToLookForClasses">List of namespaces where classes for polymorphic deserialization can be found.</param>
/// <returns>Generated Ruby code in form of string.</returns>
public static string SerializeType(
this IType type,
IScopeProvider scope,
string valueReference,
List<string> namespacesToLookForClasses)
{
var composite = type as CompositeType;
var sequence = type as SequenceType;
var dictionary = type as DictionaryType;
var primary = type as PrimaryType;
var builder = new IndentedStringBuilder(" ");
if (primary != null)
{
if (primary == PrimaryType.ByteArray)
{
return builder.AppendLine("{0} = Base64.strict_encode64({0}.pack('c*'))", valueReference).ToString();
}
if (primary == PrimaryType.DateTime)
{
return builder.AppendLine("{0} = {0}.new_offset(0).strftime('%FT%TZ')", valueReference).ToString();
}
}
else if (sequence != null)
{
var elementVar = scope.GetVariableName("element");
var innerSerialization = sequence.ElementType.SerializeType(scope, elementVar, namespacesToLookForClasses);
if (!string.IsNullOrEmpty(innerSerialization))
{
return
builder
.AppendLine("unless {0}.nil?", valueReference)
.Indent()
.AppendLine("serialized{0} = []", sequence.Name)
.AppendLine("{0}.each do |{1}|", valueReference, elementVar)
.Indent()
.AppendLine(innerSerialization)
.AppendLine("serialized{0}.push({1})", sequence.Name.ToPascalCase(), elementVar)
.Outdent()
.AppendLine("end")
.AppendLine("{0} = serialized{1}", valueReference, sequence.Name.ToPascalCase())
.Outdent()
.AppendLine("end")
.ToString();
}
}
else if (dictionary != null)
{
var valueVar = scope.GetVariableName("valueElement");
var innerSerialization = dictionary.ValueType.SerializeType(scope, valueVar, namespacesToLookForClasses);
if (!string.IsNullOrEmpty(innerSerialization))
{
return builder.AppendLine("unless {0}.nil?", valueReference)
.Indent()
.AppendLine("{0}.each {{ |key, {1}|", valueReference, valueVar)
.Indent()
.AppendLine(innerSerialization)
.AppendLine("{0}[key] = {1}", valueReference, valueVar)
.Outdent()
.AppendLine("}")
.Outdent()
.AppendLine("end").ToString();
}
}
else if (composite != null)
{
if (!string.IsNullOrEmpty(composite.PolymorphicDiscriminator))
{
builder
.AppendLine("unless {0}.dtype.nil?", valueReference)
.Indent()
.AppendLine("class_name = {0}.dtype.capitalize", valueReference)
.AppendLine("class_instance = Models.const_get(class_name)");
foreach (var ns in namespacesToLookForClasses)
{
builder
.AppendLine("class_instance = {0}.const_get(class_name) if class_instance.nil?", ns);
}
builder
.AppendLine("{0} = class_instance.serialize_object({0})", valueReference)
.Outdent()
.AppendLine("else")
.Indent()
.AppendLine("{0} = {1}.serialize_object({0})", valueReference, composite.Name)
.Outdent()
.AppendLine("end");
return builder.ToString();
}
return builder.AppendLine("unless {0}.nil?", valueReference)
.Indent()
.AppendLine("{0} = {1}.serialize_object({0})", valueReference, composite.Name)
.Outdent()
.AppendLine("end").ToString();
}
return string.Empty;
}
}
}
| 1 | 21,148 | @devigned, please clarify | Azure-autorest | java |
@@ -19,6 +19,7 @@ import (
"reflect"
"testing"
"time"
+ "github.com/jmcvetta/randutil"
)
func TestConcurrentCommand_UnmarshalJSON(t *testing.T) { | 1 | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this currentFile except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package script
import (
"encoding/json"
"reflect"
"testing"
"time"
)
func TestConcurrentCommand_UnmarshalJSON(t *testing.T) {
tests := []struct {
input []byte
command ConcurrentCommand
err error
}{
{
[]byte(`[]`),
ConcurrentCommand{},
nil,
},
{
[]byte(`[{"sleep": "1s"}]`),
ConcurrentCommand{
SleepCommand(1 * time.Second),
},
nil,
},
{
[]byte(`[{"call": "A"}, {"sleep": "10ms"}]`),
ConcurrentCommand{
RequestCommand{ServiceName: "A"},
SleepCommand(10 * time.Millisecond),
},
nil,
},
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
t.Parallel()
var command ConcurrentCommand
err := json.Unmarshal(test.input, &command)
if test.err != err {
t.Errorf("expected %v; actual %v", test.err, err)
}
if !reflect.DeepEqual(test.command, command) {
t.Errorf("expected %v; actual %v", test.command, command)
}
})
}
}
| 1 | 6,724 | File is not `gofmt`-ed with `-s` (from `gofmt`) | istio-tools | go |
@@ -127,7 +127,7 @@ def GenerateConfig(context):
'items': [{
'key': 'startup-script',
'value': """#!/bin/bash
-sudo apt-get install -y unzip
+sudo apt-get install -y unzip git
sudo apt-get install -y libmysqlclient-dev
sudo apt-get install -y python-pip python-dev
| 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a GCE instance template for Forseti Security."""
def GenerateConfig(context):
"""Generate configuration."""
if context.properties.get('branch-name'):
DOWNLOAD_FORSETI = """
git clone {}.git --branch {} --single-branch forseti-security
cd forseti-security
""".format(
context.properties['src-path'],
context.properties['branch-name'])
else:
DOWNLOAD_FORSETI = """
wget -qO- {}/archive/v{}.tar.gz | tar xvz
cd forseti-security-{}
""".format(
context.properties['src-path'],
context.properties['release-version'],
context.properties['release-version'])
CLOUDSQL_CONN_STRING = '{}:{}:{}'.format(
context.env['project'],
'$(ref.cloudsql-instance.region)',
'$(ref.cloudsql-instance.name)')
SCANNER_BUCKET = context.properties['scanner-bucket']
DATABASE_NAME = context.properties['database-name']
SHOULD_INVENTORY_GROUPS = bool(context.properties['inventory-groups'])
SERVICE_ACCOUNT_SCOPES = context.properties['service-account-scopes']
inventory_command = '/usr/local/bin/forseti_inventory --organization_id {} --db_name {} '.format(
context.properties['organization-id'],
DATABASE_NAME,
)
scanner_command = '/usr/local/bin/forseti_scanner --rules {} --output_path {} --organization_id {} --db_name {} '.format(
'gs://{}/rules/rules.yaml'.format(SCANNER_BUCKET),
'gs://{}/scanner_violations'.format(SCANNER_BUCKET),
context.properties['organization-id'],
DATABASE_NAME,
)
# Extend the commands, based on whether email is required.
SENDGRID_API_KEY = context.properties.get('sendgrid-api-key')
EMAIL_SENDER = context.properties.get('email-sender')
EMAIL_RECIPIENT = context.properties.get('email-recipient')
if EMAIL_RECIPIENT is not None:
email_flags = '--sendgrid_api_key {} --email_sender {} --email_recipient {}'.format(
SENDGRID_API_KEY,
EMAIL_SENDER,
EMAIL_RECIPIENT,
)
inventory_command = inventory_command + email_flags
scanner_command = scanner_command + email_flags
# Extend the commands, based on whether inventory-groups is set.
if SHOULD_INVENTORY_GROUPS:
GROUPS_DOMAIN_SUPER_ADMIN_EMAIL = context.properties[
'groups-domain-super-admin-email']
GROUPS_SERVICE_ACCOUNT_KEY_FILE = context.properties[
'groups-service-account-key-file']
inventory_groups_flags = ' --inventory_groups --domain_super_admin_email {} --groups_service_account_key_file {}'.format(
GROUPS_DOMAIN_SUPER_ADMIN_EMAIL,
GROUPS_SERVICE_ACCOUNT_KEY_FILE,
)
inventory_command = inventory_command + inventory_groups_flags
resources = []
resources.append({
'name': '{}-vm'.format(context.env['deployment']),
'type': 'compute.v1.instance',
'properties': {
'zone': context.properties['zone'],
'machineType': (
'https://www.googleapis.com/compute/v1/projects/{}'
'/zones/{}/machineTypes/{}'.format(
context.env['project'], context.properties['zone'],
context.properties['instance-type'])),
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': (
'https://www.googleapis.com/compute/v1'
'/projects/{}/global/images/family/{}'.format(
context.properties['image-project'],
context.properties['image-family']
)
)
}
}],
'networkInterfaces': [{
'network': (
'https://www.googleapis.com/compute/v1/'
'projects/{}/global/networks/default'.format(
context.env['project'])),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}]
}],
'serviceAccounts': [{
'email': context.properties['service-account'],
'scopes': SERVICE_ACCOUNT_SCOPES,
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': """#!/bin/bash
sudo apt-get install -y unzip
sudo apt-get install -y libmysqlclient-dev
sudo apt-get install -y python-pip python-dev
USER_HOME=/home/ubuntu
FORSETI_PROTOC_URL=https://raw.githubusercontent.com/GoogleCloudPlatform/forseti-security/master/data/protoc_url.txt
# Install fluentd if necessary
FLUENTD=$(ls /usr/sbin/google-fluentd)
if [ -z "$FLUENTD" ]; then
cd $USER_HOME
curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh
bash install-logging-agent.sh
fi
# Check whether Cloud SQL proxy is installed
CLOUD_SQL_PROXY=$(ls $USER_HOME/cloud_sql_proxy)
if [ -z "$CLOUD_SQL_PROXY" ]; then
cd $USER_HOME
wget https://dl.google.com/cloudsql/cloud_sql_proxy.{}
mv cloud_sql_proxy.{} cloud_sql_proxy
chmod +x cloud_sql_proxy
fi
$USER_HOME/cloud_sql_proxy -instances={}=tcp:{} &
# Check if rules.yaml exists
RULES_FILE=$(gsutil ls gs://{}/rules/rules.yaml)
if [ $? -eq 1 ]; then
cd $USER_HOME
read -d '' RULES_YAML << EOF
rules:
- name: sample whitelist
mode: whitelist
resource:
- type: organization
applies_to: self_and_children
resource_ids:
- {}
inherit_from_parents: true
bindings:
- role: roles/*
members:
- serviceAccount:*@*.gserviceaccount.com
EOF
echo "$RULES_YAML" > $USER_HOME/rules.yaml
gsutil cp $USER_HOME/rules.yaml gs://{}/rules/rules.yaml
fi
# Check whether protoc is installed
PROTOC_PATH=$(which protoc)
if [ -z "$PROTOC_PATH" ]; then
cd $USER_HOME
PROTOC_DOWNLOAD_URL=$(curl -s $FORSETI_PROTOC_URL)
if [ -z "$PROTOC_DOWNLOAD_URL" ]; then
echo "No PROTOC_DOWNLOAD_URL set: $PROTOC_DOWNLOAD_URL"
exit 1
else
wget $PROTOC_DOWNLOAD_URL
unzip -o $(basename $PROTOC_DOWNLOAD_URL)
sudo cp bin/protoc /usr/local/bin
fi
fi
# Install Forseti Security
cd $USER_HOME
rm -rf forseti-*
pip install --upgrade pip
pip install --upgrade setuptools
cd $USER_HOME
# Download Forseti src; see DOWNLOAD_FORSETI
{}
python setup.py install
# Create the startup run script
read -d '' RUN_FORSETI << EOF
#!/bin/bash
# inventory command
{}
# scanner command
{}
EOF
echo "$RUN_FORSETI" > $USER_HOME/run_forseti.sh
chmod +x $USER_HOME/run_forseti.sh
(echo "0 * * * * $USER_HOME/run_forseti.sh") | crontab -
""".format(
# cloud_sql_proxy
context.properties['cloudsqlproxy-os-arch'],
context.properties['cloudsqlproxy-os-arch'],
CLOUDSQL_CONN_STRING,
context.properties['db-port'],
# rules.yaml
SCANNER_BUCKET,
context.properties['organization-id'],
SCANNER_BUCKET,
# install forseti
DOWNLOAD_FORSETI,
# run_forseti.sh
# - forseti_inventory
inventory_command,
# - forseti_scanner
scanner_command,
)
}]
}
}
})
return {'resources': resources}
| 1 | 25,519 | Should we break this up into a separate line? | forseti-security-forseti-security | py |
@@ -57,6 +57,7 @@ var _ = Describe("conformance tests", func() {
Measure(specName, func(b Benchmarker) {
name := fmt.Sprintf("cluster-%s", util.RandomString(6))
+ setEnvVar("USE_CI_ARTIFACTS", "true", false)
kubernetesVersion := e2eConfig.GetVariable(KubernetesVersion)
flavor := clusterctl.DefaultFlavor
if useCIArtifacts { | 1 | // +build e2e
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"path/filepath"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/test/framework/kubernetesversions"
"sigs.k8s.io/cluster-api/test/framework/kubetest"
"sigs.k8s.io/cluster-api/util"
)
const (
AMIPrefix = "capa-ami-ubuntu-18.04-"
DefaultImageLookupOrg = "258751437250"
)
var _ = Describe("conformance tests", func() {
var (
namespace *corev1.Namespace
ctx context.Context
specName = "conformance"
)
BeforeEach(func() {
Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil")
Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion))
ctx = context.TODO()
// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder)
})
Measure(specName, func(b Benchmarker) {
name := fmt.Sprintf("cluster-%s", util.RandomString(6))
kubernetesVersion := e2eConfig.GetVariable(KubernetesVersion)
flavor := clusterctl.DefaultFlavor
if useCIArtifacts {
flavor = "conformance-ci-artifacts"
var err error
kubernetesVersion, err = kubernetesversions.LatestCIRelease()
Expect(err).NotTo(HaveOccurred())
}
workerMachineCount, err := strconv.ParseInt(e2eConfig.GetVariable("CONFORMANCE_WORKER_MACHINE_COUNT"), 10, 64)
Expect(err).NotTo(HaveOccurred())
controlPlaneMachineCount, err := strconv.ParseInt(e2eConfig.GetVariable("CONFORMANCE_CONTROL_PLANE_MACHINE_COUNT"), 10, 64)
Expect(err).NotTo(HaveOccurred())
runtime := b.Time("cluster creation", func() {
_ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: flavor,
Namespace: namespace.Name,
ClusterName: name,
KubernetesVersion: kubernetesVersion,
ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount),
WorkerMachineCount: pointer.Int64Ptr(workerMachineCount),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
})
})
b.RecordValue("cluster creation", runtime.Seconds())
workloadProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, name)
runtime = b.Time("conformance suite", func() {
kubetest.Run(
kubetest.RunInput{
ClusterProxy: workloadProxy,
NumberOfNodes: int(workerMachineCount),
ConfigFilePath: kubetestConfigFilePath,
},
)
})
b.RecordValue("conformance suite run time", runtime.Seconds())
}, 1)
AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, "", bootstrapClusterProxy, artifactFolder, namespace, e2eConfig.GetIntervals, skipCleanup)
})
})
| 1 | 16,753 | Why the override on the env variable, wouldn't we want to use the value set from Prow? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -0,0 +1,13 @@
+widget :projected_monthly_revenue do
+ key "74b333b94cd772ee15be863e646f8ca1e535f1c6"
+ type "number_and_secondary"
+ data do
+ {
+ value:
+ IndividualPlan.prime_basic.projected_monthly_revenue +
+ IndividualPlan.prime_workshops.projected_monthly_revenue +
+ IndividualPlan.prime_with_mentoring.projected_monthly_revenue +
+ TeamPlan.instance.projected_monthly_revenue
+ }
+ end
+end | 1 | 1 | 7,893 | I don't see anything here that excludes people on a Team plan from from the revenue report. I see that TeamPlans are counted below, but the individual subscribers on the plan would still be included. | thoughtbot-upcase | rb |
|
@@ -1078,11 +1078,8 @@ public final class Vector<T> implements IndexedSeq<T>, Serializable {
@Override
public Vector<T> subSequence(int beginIndex, int endIndex) {
- if ((beginIndex >= 0) && (beginIndex <= endIndex) && (endIndex <= length())) {
- return slice(beginIndex, endIndex);
- } else {
- throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ", " + endIndex + ") on Vector of size " + length());
- }
+ Collections.subSequenceRangeCheck(beginIndex, endIndex, length());
+ return slice(beginIndex, endIndex);
}
@Override | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/.ɪᴏ
* ᶜᵒᵖʸʳᶦᵍʰᵗ ᵇʸ ᵛᵃᵛʳ ⁻ ˡᶦᶜᵉⁿˢᵉᵈ ᵘⁿᵈᵉʳ ᵗʰᵉ ᵃᵖᵃᶜʰᵉ ˡᶦᶜᵉⁿˢᵉ ᵛᵉʳˢᶦᵒⁿ ᵗʷᵒ ᵈᵒᵗ ᶻᵉʳᵒ
*/
package io.vavr.collection;
import io.vavr.*;
import io.vavr.collection.VectorModule.Combinations;
import io.vavr.control.Option;
import java.io.Serializable;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
import static io.vavr.collection.Collections.withSize;
/**
* Vector is the default Seq implementation that provides effectively constant time access to any element.
* Many other operations (e.g. `tail`, `drop`, `slice`) are also effectively constant.
*
* The implementation is based on a `bit-mapped trie`, a very wide and shallow tree (i.e. depth ≤ 6).
*
* @param <T> Component type of the Vector.
* @author Ruslan Sennov, Pap Lőrinc
* @since 2.0.0
*/
@SuppressWarnings("deprecation")
public final class Vector<T> implements IndexedSeq<T>, Serializable {
private static final long serialVersionUID = 1L;
private static final Vector<?> EMPTY = new Vector<>(BitMappedTrie.empty());
final BitMappedTrie<T> trie;
private Vector(BitMappedTrie<T> trie) { this.trie = trie; }
@SuppressWarnings("ObjectEquality")
private Vector<T> wrap(BitMappedTrie<T> trie) {
return (trie == this.trie)
? this
: ofAll(trie);
}
private static <T> Vector<T> ofAll(BitMappedTrie<T> trie) {
return (trie.length() == 0)
? empty()
: new Vector<>(trie);
}
/**
* Returns the empty Vector.
*
* @param <T> Component type.
* @return The empty Vector.
*/
@SuppressWarnings("unchecked")
public static <T> Vector<T> empty() { return (Vector<T>) EMPTY; }
/**
* Returns a {@link Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(Collector)} to obtain a {@link Vector}.
*
* @param <T> Component type of the Vector.
* @return A io.vavr.collection.List Collector.
*/
public static <T> Collector<T, ArrayList<T>, Vector<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, Vector<T>> finisher = Vector::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Narrows a widened {@code Vector<? extends T>} to {@code Vector<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param vector An {@code Vector}.
* @param <T> Component type of the {@code Vector}.
* @return the given {@code vector} instance as narrowed type {@code Vector<T>}.
*/
@SuppressWarnings("unchecked")
public static <T> Vector<T> narrow(Vector<? extends T> vector) { return (Vector<T>) vector; }
/**
* Returns a singleton {@code Vector}, i.e. a {@code Vector} of one element.
*
* @param element An element.
* @param <T> The component type
* @return A new Vector instance containing the given element
*/
public static <T> Vector<T> of(T element) {
return ofAll(Iterator.of(element));
}
/**
* Creates a Vector of the given elements.
*
* @param <T> Component type of the Vector.
* @param elements Zero or more elements.
* @return A vector containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SafeVarargs
@SuppressWarnings("varargs")
public static <T> Vector<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Returns a Vector containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the Vector
* @param n The number of elements in the Vector
* @param f The Function computing element values
* @return A Vector consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static <T> Vector<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return io.vavr.collection.Collections.tabulate(n, f, empty(), Vector::of);
}
/**
* Returns a Vector containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the Vector
* @param n The number of elements in the Vector
* @param s The Supplier computing element values
* @return A Vector of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static <T> Vector<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return io.vavr.collection.Collections.fill(n, s, empty(), Vector::of);
}
/**
* Creates a Vector of the given elements.
* <p>
* The resulting vector has the same iteration order as the given iterable of elements
* if the iteration order of the elements is stable.
*
* @param <T> Component type of the Vector.
* @param iterable An Iterable of elements.
* @return A vector containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SuppressWarnings("unchecked")
public static <T> Vector<T> ofAll(Iterable<? extends T> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
if (iterable instanceof Vector) {
return (Vector<T>) iterable;
} else {
final Object[] values = withSize(iterable).toArray();
return ofAll(BitMappedTrie.ofAll(values));
}
}
/**
* Creates a Vector that contains the elements of the given {@link java.util.stream.Stream}.
*
* @param javaStream A {@link java.util.stream.Stream}
* @param <T> Component type of the Stream.
* @return A Vector containing the given elements in the same order.
*/
public static <T> Vector<T> ofAll(java.util.stream.Stream<? extends T> javaStream) {
Objects.requireNonNull(javaStream, "javaStream is null");
return ofAll(Iterator.ofAll(javaStream.iterator()));
}
/**
* Creates a Vector from boolean values.
*
* @param elements boolean values
* @return A new Vector of Boolean values
* @throws NullPointerException if elements is null
*/
public static Vector<Boolean> ofAll(boolean... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Creates a Vector from byte values.
*
* @param elements byte values
* @return A new Vector of Byte values
* @throws NullPointerException if elements is null
*/
public static Vector<Byte> ofAll(byte... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Creates a Vector from char values.
*
* @param elements char values
* @return A new Vector of Character values
* @throws NullPointerException if elements is null
*/
public static Vector<Character> ofAll(char... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Creates a Vector from double values.
*
* @param elements double values
* @return A new Vector of Double values
* @throws NullPointerException if elements is null
*/
public static Vector<Double> ofAll(double... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Creates a Vector from float values.
*
* @param elements float values
* @return A new Vector of Float values
* @throws NullPointerException if elements is null
*/
public static Vector<Float> ofAll(float... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Creates a Vector from int values.
*
* @param elements int values
* @return A new Vector of Integer values
* @throws NullPointerException if elements is null
*/
public static Vector<Integer> ofAll(int... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Creates a Vector from long values.
*
* @param elements long values
* @return A new Vector of Long values
* @throws NullPointerException if elements is null
*/
public static Vector<Long> ofAll(long... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
/**
* Creates a Vector from short values.
*
* @param elements short values
* @return A new Vector of Short values
* @throws NullPointerException if elements is null
*/
public static Vector<Short> ofAll(short... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(BitMappedTrie.ofAll(elements));
}
public static Vector<Character> range(char from, char toExclusive) {
return ofAll(ArrayType.<char[]> asPrimitives(char.class, Iterator.range(from, toExclusive)));
}
public static Vector<Character> rangeBy(char from, char toExclusive, int step) {
return ofAll(ArrayType.<char[]> asPrimitives(char.class, Iterator.rangeBy(from, toExclusive, step)));
}
@GwtIncompatible
public static Vector<Double> rangeBy(double from, double toExclusive, double step) {
return ofAll(ArrayType.<double[]> asPrimitives(double.class, Iterator.rangeBy(from, toExclusive, step)));
}
/**
* Creates a Vector of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.range(0, 0) // = Vector()
* Vector.range(2, 0) // = Vector()
* Vector.range(-2, 2) // = Vector(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or the empty range if {@code from >= toExclusive}
*/
public static Vector<Integer> range(int from, int toExclusive) {
return ofAll(ArrayType.<int[]> asPrimitives(int.class, Iterator.range(from, toExclusive)));
}
/**
* Creates a Vector of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.rangeBy(1, 3, 1) // = Vector(1, 2)
* Vector.rangeBy(1, 4, 2) // = Vector(1, 3)
* Vector.rangeBy(4, 1, -2) // = Vector(4, 2)
* Vector.rangeBy(4, 1, 2) // = Vector()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Vector<Integer> rangeBy(int from, int toExclusive, int step) {
return ofAll(ArrayType.<int[]> asPrimitives(int.class, Iterator.rangeBy(from, toExclusive, step)));
}
/**
* Creates a Vector of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.range(0L, 0L) // = Vector()
* Vector.range(2L, 0L) // = Vector()
* Vector.range(-2L, 2L) // = Vector(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or the empty range if {@code from >= toExclusive}
*/
public static Vector<Long> range(long from, long toExclusive) {
return ofAll(ArrayType.<long[]> asPrimitives(long.class, Iterator.range(from, toExclusive)));
}
/**
* Creates a Vector of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.rangeBy(1L, 3L, 1L) // = Vector(1L, 2L)
* Vector.rangeBy(1L, 4L, 2L) // = Vector(1L, 3L)
* Vector.rangeBy(4L, 1L, -2L) // = Vector(4L, 2L)
* Vector.rangeBy(4L, 1L, 2L) // = Vector()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Vector<Long> rangeBy(long from, long toExclusive, long step) {
return ofAll(ArrayType.<long[]> asPrimitives(long.class, Iterator.rangeBy(from, toExclusive, step)));
}
public static Vector<Character> rangeClosed(char from, char toInclusive) {
return ofAll(ArrayType.<char[]> asPrimitives(char.class, Iterator.rangeClosed(from, toInclusive)));
}
public static Vector<Character> rangeClosedBy(char from, char toInclusive, int step) {
return ofAll(ArrayType.<char[]> asPrimitives(char.class, Iterator.rangeClosedBy(from, toInclusive, step)));
}
@GwtIncompatible
public static Vector<Double> rangeClosedBy(double from, double toInclusive, double step) {
return ofAll(ArrayType.<double[]> asPrimitives(double.class, Iterator.rangeClosedBy(from, toInclusive, step)));
}
/**
* Creates a Vector of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.rangeClosed(0, 0) // = Vector(0)
* Vector.rangeClosed(2, 0) // = Vector()
* Vector.rangeClosed(-2, 2) // = Vector(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or the empty range if {@code from > toInclusive}
*/
public static Vector<Integer> rangeClosed(int from, int toInclusive) {
return ofAll(ArrayType.<int[]> asPrimitives(int.class, Iterator.rangeClosed(from, toInclusive)));
}
/**
* Creates a Vector of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.rangeClosedBy(1, 3, 1) // = Vector(1, 2, 3)
* Vector.rangeClosedBy(1, 4, 2) // = Vector(1, 3)
* Vector.rangeClosedBy(4, 1, -2) // = Vector(4, 2)
* Vector.rangeClosedBy(4, 1, 2) // = Vector()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Vector<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return ofAll(ArrayType.<int[]> asPrimitives(int.class, Iterator.rangeClosedBy(from, toInclusive, step)));
}
/**
* Creates a Vector of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.rangeClosed(0L, 0L) // = Vector(0L)
* Vector.rangeClosed(2L, 0L) // = Vector()
* Vector.rangeClosed(-2L, 2L) // = Vector(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or the empty range if {@code from > toInclusive}
*/
public static Vector<Long> rangeClosed(long from, long toInclusive) {
return ofAll(ArrayType.<long[]> asPrimitives(long.class, Iterator.rangeClosed(from, toInclusive)));
}
/**
* Creates a Vector of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Vector.rangeClosedBy(1L, 3L, 1L) // = Vector(1L, 2L, 3L)
* Vector.rangeClosedBy(1L, 4L, 2L) // = Vector(1L, 3L)
* Vector.rangeClosedBy(4L, 1L, -2L) // = Vector(4L, 2L)
* Vector.rangeClosedBy(4L, 1L, 2L) // = Vector()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Vector<Long> rangeClosedBy(long from, long toInclusive, long step) {
return ofAll(ArrayType.<long[]> asPrimitives(long.class, Iterator.rangeClosedBy(from, toInclusive, step)));
}
/**
* Transposes the rows and columns of a {@link Vector} matrix.
*
* @param <T> matrix element type
* @param matrix to be transposed.
* @return a transposed {@link Vector} matrix.
* @throws IllegalArgumentException if the row lengths of {@code matrix} differ.
* <p>
* ex: {@code
* Vector.transpose(Vector(Vector(1,2,3), Vector(4,5,6))) → Vector(Vector(1,4), Vector(2,5), Vector(3,6))
* }
*/
public static <T> Vector<Vector<T>> transpose(Vector<Vector<T>> matrix) {
return io.vavr.collection.Collections.transpose(matrix, Vector::ofAll, Vector::of);
}
/**
* Creates a Vector from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Vector, otherwise {@code Some} {@code Tuple}
* of the element for the next call and the value to add to the
* resulting Vector.
* <p>
* Example:
* <pre>
* <code>
* Vector.unfoldRight(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x, x-1)));
* // Vector(10, 9, 8, 7, 6, 5, 4, 3, 2, 1))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param <U> type of unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Vector with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T, U> Vector<U> unfoldRight(T seed, Function<? super T, Option<Tuple2<? extends U, ? extends T>>> f) {
return Iterator.unfoldRight(seed, f).toVector();
}
/**
* Creates a Vector from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Vector, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting Vector and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Vector.unfoldLeft(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param <U> type of unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Vector with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T, U> Vector<U> unfoldLeft(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends U>>> f) {
return Iterator.unfoldLeft(seed, f).toVector();
}
/**
* Creates a Vector from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Vector, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting Vector and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Vector.unfold(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param <T> type of seeds and unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Vector with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T> Vector<T> unfold(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends T>>> f) {
return Iterator.unfold(seed, f).toVector();
}
@Override
public Vector<T> append(T element) { return appendAll(io.vavr.collection.List.of(element)); }
@Override
public Vector<T> appendAll(Iterable<? extends T> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
if (isEmpty()) {
return ofAll(iterable);
} else {
final BitMappedTrie<T> that = trie.appendAll(iterable);
return (that == trie) ? this : new Vector<>(that);
}
}
@Override
public <R> Vector<R> collect(PartialFunction<? super T, ? extends R> partialFunction) {
return ofAll(iterator().<R> collect(partialFunction));
}
@Override
public Vector<Vector<T>> combinations() { return rangeClosed(0, length()).map(this::combinations).flatMap(Function.identity()); }
@Override
public Vector<Vector<T>> combinations(int k) { return Combinations.apply(this, Math.max(k, 0)); }
@Override
public Iterator<Vector<T>> crossProduct(int power) { return io.vavr.collection.Collections.crossProduct(empty(), this, power); }
@Override
public Vector<T> distinct() { return distinctBy(Function.identity()); }
@Override
public Vector<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
final java.util.Set<T> seen = new java.util.TreeSet<>(comparator);
return filter(seen::add);
}
@Override
public <U> Vector<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
final java.util.Set<U> seen = new java.util.HashSet<>(length());
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
public Vector<T> drop(int n) {
return wrap(trie.drop(n));
}
@Override
public Vector<T> dropUntil(Predicate<? super T> predicate) {
return io.vavr.collection.Collections.dropUntil(this, predicate);
}
@Override
public Vector<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropUntil(predicate.negate());
}
@Override
public Vector<T> dropRight(int n) {
return take(length() - n);
}
@Override
public Vector<T> dropRightUntil(Predicate<? super T> predicate) {
return io.vavr.collection.Collections.dropRightUntil(this, predicate);
}
@Override
public Vector<T> dropRightWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropRightUntil(predicate.negate());
}
@Override
public Vector<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return wrap(trie.filter(predicate));
}
@Override
public <U> Vector<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
final Iterator<? extends U> results = iterator().flatMap(mapper);
return ofAll(results);
}
@Override
public T get(int index) {
if (isValid(index)) {
return trie.get(index);
} else {
throw new IndexOutOfBoundsException("get(" + index + ")");
}
}
private boolean isValid(int index) { return (index >= 0) && (index < length()); }
@Override
public T head() {
if (nonEmpty()) {
return get(0);
} else {
throw new NoSuchElementException("head of empty Vector");
}
}
@Override
public <C> Map<C, Vector<T>> groupBy(Function<? super T, ? extends C> classifier) { return io.vavr.collection.Collections.groupBy(this, classifier, Vector::ofAll); }
@Override
public Iterator<Vector<T>> grouped(int size) { return sliding(size, size); }
@Override
public boolean hasDefiniteSize() { return true; }
@Override
public int indexOf(T element, int from) {
for (int i = from; i < length(); i++) {
if (Objects.equals(get(i), element)) {
return i;
}
}
return -1;
}
@Override
public Vector<T> init() {
if (nonEmpty()) {
return dropRight(1);
} else {
throw new UnsupportedOperationException("init of empty Vector");
}
}
@Override
public Option<Vector<T>> initOption() { return isEmpty() ? Option.none() : Option.some(init()); }
@Override
public Vector<T> insert(int index, T element) { return insertAll(index, Iterator.of(element)); }
@Override
public Vector<T> insertAll(int index, Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if ((index >= 0) && (index <= length())) {
final Vector<T> begin = take(index).appendAll(elements);
final Vector<T> end = drop(index);
return (begin.size() > end.size())
? begin.appendAll(end)
: end.prependAll(begin);
} else {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on Vector of length " + length());
}
}
@Override
public Vector<T> intersperse(T element) { return ofAll(iterator().intersperse(element)); }
@Override
public boolean isEmpty() { return length() == 0; }
@Override
public boolean isTraversableAgain() { return true; }
@Override
public Iterator<T> iterator() {
return isEmpty() ? Iterator.empty()
: trie.iterator();
}
@Override
public int lastIndexOf(T element, int end) {
for (int i = Math.min(end, length() - 1); i >= 0; i--) {
if (Objects.equals(get(i), element)) {
return i;
}
}
return -1;
}
@Override
public int length() { return trie.length(); }
@Override
public <U> Vector<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return ofAll(trie.map(mapper));
}
@Override
public Vector<T> orElse(Iterable<? extends T> other) {
return isEmpty() ? ofAll(other) : this;
}
@Override
public Vector<T> orElse(Supplier<? extends Iterable<? extends T>> supplier) {
return isEmpty() ? ofAll(supplier.get()) : this;
}
@Override
public Vector<T> padTo(int length, T element) {
final int actualLength = length();
return (length <= actualLength)
? this
: appendAll(Iterator.continually(element)
.take(length - actualLength));
}
@Override
public Vector<T> leftPadTo(int length, T element) {
if (length <= length()) {
return this;
} else {
final Iterator<T> prefix = Iterator.continually(element).take(length - length());
return prependAll(prefix);
}
}
@Override
public Vector<T> patch(int from, Iterable<? extends T> that, int replaced) {
from = Math.max(from, 0);
replaced = Math.max(replaced, 0);
Vector<T> result = take(from).appendAll(that);
from += replaced;
result = result.appendAll(drop(from));
return result;
}
@Override
public Tuple2<Vector<T>, Vector<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final ArrayList<T> left = new ArrayList<>(), right = new ArrayList<>();
for (int i = 0; i < length(); i++) {
final T t = get(i);
(predicate.test(t) ? left : right).add(t);
}
return Tuple.of(ofAll(left), ofAll(right));
}
@Override
public Vector<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(head());
}
return this;
}
@Override
public Vector<Vector<T>> permutations() {
if (isEmpty()) {
return empty();
} else if (length() == 1) {
return of(this);
} else {
Vector<Vector<T>> results = empty();
for (T t : distinct()) {
for (Vector<T> ts : remove(t).permutations()) {
results = results.append(of(t).appendAll(ts));
}
}
return results;
}
}
@Override
public Vector<T> prepend(T element) { return prependAll(io.vavr.collection.List.of(element)); }
@Override
public Vector<T> prependAll(Iterable<? extends T> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
if (isEmpty()) {
return ofAll(iterable);
} else {
final BitMappedTrie<T> that = trie.prependAll(iterable);
return (that == trie) ? this : new Vector<>(that);
}
}
@Override
public Vector<T> remove(T element) {
for (int i = 0; i < length(); i++) {
if (Objects.equals(get(i), element)) {
return removeAt(i);
}
}
return this;
}
@Override
public Vector<T> removeFirst(Predicate<T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = 0; i < length(); i++) {
if (predicate.test(get(i))) {
return removeAt(i);
}
}
return this;
}
@Override
public Vector<T> removeLast(Predicate<T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = length() - 1; i >= 0; i--) {
if (predicate.test(get(i))) {
return removeAt(i);
}
}
return this;
}
@Override
public Vector<T> removeAt(int index) {
if (isValid(index)) {
final Vector<T> begin = take(index);
final Vector<T> end = drop(index + 1);
return (begin.size() > end.size())
? begin.appendAll(end)
: end.prependAll(begin);
} else {
throw new IndexOutOfBoundsException("removeAt(" + index + ")");
}
}
@Override
public Vector<T> removeAll(T element) {
return io.vavr.collection.Collections.removeAll(this, element);
}
@Override
public Vector<T> removeAll(Iterable<? extends T> elements) {
return io.vavr.collection.Collections.removeAll(this, elements);
}
@Override
public Vector<T> removeAll(Predicate<? super T> predicate) {
return io.vavr.collection.Collections.removeAll(this, predicate);
}
@Override
public Vector<T> replace(T currentElement, T newElement) {
return indexOfOption(currentElement)
.map(i -> update(i, newElement))
.getOrElse(this);
}
@Override
public Vector<T> replaceAll(T currentElement, T newElement) {
Vector<T> result = this;
int index = 0;
for (T value : iterator()) {
if (Objects.equals(value, currentElement)) {
result = result.update(index, newElement);
}
index++;
}
return result;
}
@Override
public Vector<T> retainAll(Iterable<? extends T> elements) {
return io.vavr.collection.Collections.retainAll(this, elements);
}
@Override
public Vector<T> reverse() {
return (length() <= 1) ? this : ofAll(reverseIterator());
}
@Override
public Vector<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
public <U> Vector<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
return io.vavr.collection.Collections.scanLeft(this, zero, operation, Iterator::toVector);
}
@Override
public <U> Vector<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
return io.vavr.collection.Collections.scanRight(this, zero, operation, Iterator::toVector);
}
@Override
public Vector<T> shuffle() {
return io.vavr.collection.Collections.shuffle(this, Vector::ofAll);
}
@Override
public Vector<T> slice(int beginIndex, int endIndex) {
if ((beginIndex >= endIndex) || (beginIndex >= size()) || isEmpty()) {
return empty();
} else if ((beginIndex <= 0) && (endIndex >= length())) {
return this;
} else {
return take(endIndex).drop(beginIndex);
}
}
@Override
public Iterator<Vector<T>> slideBy(Function<? super T, ?> classifier) {
return iterator().slideBy(classifier).map(Vector::ofAll);
}
@Override
public Iterator<Vector<T>> sliding(int size) {
return sliding(size, 1);
}
@Override
public Iterator<Vector<T>> sliding(int size, int step) {
return iterator().sliding(size, step).map(Vector::ofAll);
}
@Override
public Vector<T> sorted() {
if (isEmpty()) {
return this;
} else {
@SuppressWarnings("unchecked")
final T[] list = (T[]) toJavaArray();
Arrays.sort(list);
return Vector.of(list);
}
}
@Override
public Vector<T> sorted(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(collector());
}
@Override
public <U extends Comparable<? super U>> Vector<T> sortBy(Function<? super T, ? extends U> mapper) {
return sortBy(U::compareTo, mapper);
}
@Override
public <U> Vector<T> sortBy(Comparator<? super U> comparator, Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(mapper, "mapper is null");
final Function<? super T, ? extends U> domain = Function1.of(mapper::apply).memoized();
return toJavaStream()
.sorted((e1, e2) -> comparator.compare(domain.apply(e1), domain.apply(e2)))
.collect(collector());
}
@Override
public Tuple2<Vector<T>, Vector<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(takeWhile(predicate), dropWhile(predicate));
}
@Override
public Tuple2<Vector<T>, Vector<T>> splitAt(int n) {
return Tuple.of(take(n), drop(n));
}
@Override
public Tuple2<Vector<T>, Vector<T>> splitAt(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final Vector<T> init = takeWhile(predicate.negate());
return Tuple.of(init, drop(init.size()));
}
@Override
public Tuple2<Vector<T>, Vector<T>> splitAtInclusive(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = 0; i < length(); i++) {
final T value = get(i);
if (predicate.test(value)) {
return (i == (length() - 1)) ? Tuple.of(this, empty())
: Tuple.of(take(i + 1), drop(i + 1));
}
}
return Tuple.of(this, empty());
}
@Override
public Vector<T> subSequence(int beginIndex) {
if ((beginIndex >= 0) && (beginIndex <= length())) {
return drop(beginIndex);
} else {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ")");
}
}
@Override
public Vector<T> subSequence(int beginIndex, int endIndex) {
if ((beginIndex >= 0) && (beginIndex <= endIndex) && (endIndex <= length())) {
return slice(beginIndex, endIndex);
} else {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ", " + endIndex + ") on Vector of size " + length());
}
}
@Override
public Vector<T> tail() {
if (nonEmpty()) {
return drop(1);
} else {
throw new UnsupportedOperationException("tail of empty Vector");
}
}
@Override
public Option<Vector<T>> tailOption() { return isEmpty() ? Option.none() : Option.some(tail()); }
@Override
public Vector<T> take(int n) {
return wrap(trie.take(n));
}
@Override
public Vector<T> takeRight(int n) {
return drop(length() - n);
}
@Override
public Vector<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeWhile(predicate.negate());
}
@Override
public Vector<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = 0; i < length(); i++) {
final T value = get(i);
if (!predicate.test(value)) {
return take(i);
}
}
return this;
}
/**
* Transforms this {@code Vector}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
public <U> U transform(Function<? super Vector<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@SuppressWarnings("deprecation")
@Override
public <U> Vector<U> unit(Iterable<? extends U> iterable) { return ofAll(iterable); }
@Override
public <T1, T2> Tuple2<Vector<T1>, Vector<T2>> unzip(Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
Vector<T1> xs = empty();
Vector<T2> ys = empty();
for (int i = 0; i < length(); i++) {
final Tuple2<? extends T1, ? extends T2> t = unzipper.apply(get(i));
xs = xs.append(t._1);
ys = ys.append(t._2);
}
return Tuple.of(xs, ys);
}
@Override
public <T1, T2, T3> Tuple3<Vector<T1>, Vector<T2>, Vector<T3>> unzip3(Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
Vector<T1> xs = empty();
Vector<T2> ys = empty();
Vector<T3> zs = empty();
for (int i = 0; i < length(); i++) {
final Tuple3<? extends T1, ? extends T2, ? extends T3> t = unzipper.apply(get(i));
xs = xs.append(t._1);
ys = ys.append(t._2);
zs = zs.append(t._3);
}
return Tuple.of(xs, ys, zs);
}
@Override
public Vector<T> update(int index, T element) {
if (isValid(index)) {
return wrap(trie.update(index, element));
} else {
throw new IndexOutOfBoundsException("update(" + index + ")");
}
}
@Override
public Vector<T> update(int index, Function<? super T, ? extends T> updater) {
Objects.requireNonNull(updater, "updater is null");
return update(index, updater.apply(get(index)));
}
@Override
public <U> Vector<Tuple2<T, U>> zip(Iterable<? extends U> that) {
return zipWith(that, Tuple::of);
}
@Override
public <U, R> Vector<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
Objects.requireNonNull(mapper, "mapper is null");
return ofAll(iterator().zipWith(that, mapper));
}
@Override
public <U> Vector<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
return ofAll(iterator().zipAll(that, thisElem, thatElem));
}
@Override
public Vector<Tuple2<T, Integer>> zipWithIndex() {
return zipWithIndex(Tuple::of);
}
@Override
public <U> Vector<U> zipWithIndex(BiFunction<? super T, ? super Integer, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return ofAll(iterator().zipWithIndex(mapper));
}
private Object readResolve() { return isEmpty() ? EMPTY : this; }
@Override
public boolean equals(Object o) {
return io.vavr.collection.Collections.equals(this, o);
}
@Override
public int hashCode() {
return io.vavr.collection.Collections.hashOrdered(this);
}
@Override
public String stringPrefix() { return "Vector"; }
@Override
public String toString() { return mkString(stringPrefix() + "(", ", ", ")"); }
}
interface VectorModule {
final class Combinations {
static <T> Vector<Vector<T>> apply(Vector<T> elements, int k) {
return (k == 0)
? Vector.of(Vector.empty())
: elements.zipWithIndex().flatMap(
t -> apply(elements.drop(t._2 + 1), (k - 1)).map((Vector<T> c) -> c.prepend(t._1)));
}
}
}
| 1 | 12,242 | I know that we do not pull the last bit of performance out of the JVM but it is more robust and removes redundant code. | vavr-io-vavr | java |
@@ -130,6 +130,11 @@ public abstract class ScheduledReporter implements Closeable, Reporter {
ScheduledExecutorService executor,
boolean shutdownExecutorOnStop,
Set<MetricAttribute> disabledMetricAttributes) {
+
+ if (registry == null) {
+ throw new IllegalArgumentException("MetricRegistry must not be null !!!");
+ }
+
this.registry = registry;
this.filter = filter;
this.executor = executor == null ? createDefaultExecutor(name) : executor; | 1 | package com.codahale.metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.util.Collections;
import java.util.Locale;
import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* The abstract base class for all scheduled reporters (i.e., reporters which process a registry's
* metrics periodically).
*
* @see ConsoleReporter
* @see CsvReporter
* @see Slf4jReporter
*/
public abstract class ScheduledReporter implements Closeable, Reporter {
private static final Logger LOG = LoggerFactory.getLogger(ScheduledReporter.class);
/**
* A simple named thread factory.
*/
@SuppressWarnings("NullableProblems")
private static class NamedThreadFactory implements ThreadFactory {
private final ThreadGroup group;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private final String namePrefix;
private NamedThreadFactory(String name) {
final SecurityManager s = System.getSecurityManager();
this.group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup();
this.namePrefix = "metrics-" + name + "-thread-";
}
@Override
public Thread newThread(Runnable r) {
final Thread t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement(), 0);
t.setDaemon(true);
if (t.getPriority() != Thread.NORM_PRIORITY) {
t.setPriority(Thread.NORM_PRIORITY);
}
return t;
}
}
private static final AtomicInteger FACTORY_ID = new AtomicInteger();
private final MetricRegistry registry;
private final ScheduledExecutorService executor;
private final boolean shutdownExecutorOnStop;
private final Set<MetricAttribute> disabledMetricAttributes;
private ScheduledFuture<?> scheduledFuture;
private final MetricFilter filter;
private final long durationFactor;
private final String durationUnit;
private final long rateFactor;
private final String rateUnit;
/**
* Creates a new {@link ScheduledReporter} instance.
*
* @param registry the {@link com.codahale.metrics.MetricRegistry} containing the metrics this
* reporter will report
* @param name the reporter's name
* @param filter the filter for which metrics to report
* @param rateUnit a unit of time
* @param durationUnit a unit of time
*/
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit) {
this(registry, name, filter, rateUnit, durationUnit, createDefaultExecutor(name));
}
/**
* Creates a new {@link ScheduledReporter} instance.
*
* @param registry the {@link com.codahale.metrics.MetricRegistry} containing the metrics this
* reporter will report
* @param name the reporter's name
* @param filter the filter for which metrics to report
* @param executor the executor to use while scheduling reporting of metrics.
*/
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
ScheduledExecutorService executor) {
this(registry, name, filter, rateUnit, durationUnit, executor, true);
}
/**
* Creates a new {@link ScheduledReporter} instance.
*
* @param registry the {@link com.codahale.metrics.MetricRegistry} containing the metrics this
* reporter will report
* @param name the reporter's name
* @param filter the filter for which metrics to report
* @param executor the executor to use while scheduling reporting of metrics.
* @param shutdownExecutorOnStop if true, then executor will be stopped in same time with this reporter
*/
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
ScheduledExecutorService executor,
boolean shutdownExecutorOnStop) {
this(registry, name, filter, rateUnit, durationUnit, executor, shutdownExecutorOnStop, Collections.emptySet());
}
protected ScheduledReporter(MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
ScheduledExecutorService executor,
boolean shutdownExecutorOnStop,
Set<MetricAttribute> disabledMetricAttributes) {
this.registry = registry;
this.filter = filter;
this.executor = executor == null ? createDefaultExecutor(name) : executor;
this.shutdownExecutorOnStop = shutdownExecutorOnStop;
this.rateFactor = rateUnit.toSeconds(1);
this.rateUnit = calculateRateUnit(rateUnit);
this.durationFactor = durationUnit.toNanos(1);
this.durationUnit = durationUnit.toString().toLowerCase(Locale.US);
this.disabledMetricAttributes = disabledMetricAttributes != null ? disabledMetricAttributes :
Collections.emptySet();
}
/**
* Starts the reporter polling at the given period.
*
* @param period the amount of time between polls
* @param unit the unit for {@code period}
*/
public void start(long period, TimeUnit unit) {
start(period, period, unit);
}
/**
* Starts the reporter polling at the given period with the specific runnable action.
* Visible only for testing.
*/
synchronized void start(long initialDelay, long period, TimeUnit unit, Runnable runnable) {
if (this.scheduledFuture != null) {
throw new IllegalArgumentException("Reporter already started");
}
this.scheduledFuture = executor.scheduleAtFixedRate(runnable, initialDelay, period, unit);
}
/**
* Starts the reporter polling at the given period.
*
* @param initialDelay the time to delay the first execution
* @param period the amount of time between polls
* @param unit the unit for {@code period} and {@code initialDelay}
*/
synchronized public void start(long initialDelay, long period, TimeUnit unit) {
start(initialDelay, period, unit, () -> {
try {
report();
} catch (Throwable ex) {
LOG.error("Exception thrown from {}#report. Exception was suppressed.", ScheduledReporter.this.getClass().getSimpleName(), ex);
}
});
}
/**
* Stops the reporter and if shutdownExecutorOnStop is true then shuts down its thread of execution.
* <p>
* Uses the shutdown pattern from http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html
*/
public void stop() {
if (shutdownExecutorOnStop) {
executor.shutdown(); // Disable new tasks from being submitted
try {
// Wait a while for existing tasks to terminate
if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
executor.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
System.err.println(getClass().getSimpleName() + ": ScheduledExecutorService did not terminate");
}
}
} catch (InterruptedException ie) {
// (Re-)Cancel if current thread also interrupted
executor.shutdownNow();
// Preserve interrupt status
Thread.currentThread().interrupt();
}
} else {
// The external manager(like JEE container) responsible for lifecycle of executor
synchronized (this) {
if (this.scheduledFuture == null) {
// was never started
return;
}
if (this.scheduledFuture.isCancelled()) {
// already cancelled
return;
}
// just cancel the scheduledFuture and exit
this.scheduledFuture.cancel(false);
}
}
}
/**
* Stops the reporter and shuts down its thread of execution.
*/
@Override
public void close() {
stop();
}
/**
* Report the current values of all metrics in the registry.
*/
public void report() {
synchronized (this) {
report(registry.getGauges(filter),
registry.getCounters(filter),
registry.getHistograms(filter),
registry.getMeters(filter),
registry.getTimers(filter));
}
}
/**
* Called periodically by the polling thread. Subclasses should report all the given metrics.
*
* @param gauges all of the gauges in the registry
* @param counters all of the counters in the registry
* @param histograms all of the histograms in the registry
* @param meters all of the meters in the registry
* @param timers all of the timers in the registry
*/
@SuppressWarnings("rawtypes")
public abstract void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers);
protected String getRateUnit() {
return rateUnit;
}
protected String getDurationUnit() {
return durationUnit;
}
protected double convertDuration(double duration) {
return duration / durationFactor;
}
protected double convertRate(double rate) {
return rate * rateFactor;
}
protected boolean isShutdownExecutorOnStop() {
return shutdownExecutorOnStop;
}
protected Set<MetricAttribute> getDisabledMetricAttributes() {
return disabledMetricAttributes;
}
private String calculateRateUnit(TimeUnit unit) {
final String s = unit.toString().toLowerCase(Locale.US);
return s.substring(0, s.length() - 1);
}
private static ScheduledExecutorService createDefaultExecutor(String name) {
return Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(name + '-' + FACTORY_ID.incrementAndGet()));
}
}
| 1 | 7,297 | I'd make this a `throw new NullPointerException("registry == null");` instead | dropwizard-metrics | java |
@@ -163,10 +163,12 @@ type Config struct {
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;tcp:2379,tcp:2380,tcp:4001,tcp:7001,udp:53,udp:67;die-on-fail"`
- UsageReportingEnabled bool `config:"bool;true"`
- ClusterGUID string `config:"string;baddecaf"`
- ClusterType string `config:"string;"`
- CalicoVersion string `config:"string;"`
+ UsageReportingEnabled bool `config:"bool;true"`
+ UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
+ UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
+ ClusterGUID string `config:"string;baddecaf"`
+ ClusterType string `config:"string;"`
+ CalicoVersion string `config:"string;"`
DebugMemoryProfilePath string `config:"file;;"`
DebugDisableLogDropping bool `config:"bool;false"` | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/api"
"github.com/projectcalico/libcalico-go/lib/client"
)
var (
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
)
var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
DatastoreType string `config:"oneof(kubernetes,etcdv2);etcdv2;non-zero,die-on-fail"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;"`
TyphaK8sServiceName string `config:"string;"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero"`
TyphaReadTimeout time.Duration `config:"seconds;30"`
TyphaWriteTimeout time.Duration `config:"seconds;10"`
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xff000000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;tcp:2379,tcp:2380,tcp:4001,tcp:7001,udp:53,udp:67;die-on-fail"`
UsageReportingEnabled bool `config:"bool;true"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
DebugMemoryProfilePath string `config:"file;;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
// State tracking.
// nameToSource tracks where we loaded each config param from.
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
numIptablesBitsAllocated int
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) NextIptablesMark() uint32 {
mark := config.NthIPTablesMark(config.numIptablesBitsAllocated)
config.numIptablesBitsAllocated++
return mark
}
func (config *Config) NthIPTablesMark(n int) uint32 {
numBitsFound := 0
for shift := uint(0); shift < 32; shift++ {
candidate := uint32(1) << shift
if config.IptablesMarkMask&candidate > 0 {
if numBitsFound == n {
return candidate
}
numBitsFound += 1
}
}
log.WithFields(log.Fields{
"IptablesMarkMask": config.IptablesMarkMask,
"requestedMark": n,
}).Panic("Not enough iptables mark bits available.")
return 0
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
currentSource := nameToSource[rawName]
param, ok := knownParams[strings.ToLower(rawName)]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for
// a plugin. Since we don't know the canonical
// name, use the raw name.
newRawValues[rawName] = rawValue
nameToSource[rawName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("Non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[name] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) DatastoreConfig() api.CalicoAPIConfig {
// Special case for etcdv2 datastore, where we want to honour established Felix-specific
// config mechanisms.
if config.DatastoreType == "etcdv2" {
// Build a CalicoAPIConfig with the etcd fields filled in from Felix-specific
// config.
var etcdEndpoints string
if len(config.EtcdEndpoints) == 0 {
etcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr
} else {
etcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
}
etcdCfg := api.EtcdConfig{
EtcdEndpoints: etcdEndpoints,
EtcdKeyFile: config.EtcdKeyFile,
EtcdCertFile: config.EtcdCertFile,
EtcdCACertFile: config.EtcdCaFile,
}
return api.CalicoAPIConfig{
Spec: api.CalicoAPIConfigSpec{
DatastoreType: api.EtcdV2,
EtcdConfig: etcdCfg,
},
}
}
// Build CalicoAPIConfig from the environment. This means that any XxxYyy field in
// CalicoAPIConfigSpec can be set by a corresponding XXX_YYY or CALICO_XXX_YYY environment
// variable, and that the datastore type can be set by a DATASTORE_TYPE or
// CALICO_DATASTORE_TYPE variable. (Except in the etcdv2 case which is handled specially
// above.)
cfg, err := client.LoadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// If that didn't set the datastore type (in which case the field will have been set to its
// default 'etcdv2' value), copy it from the Felix config.
if cfg.Spec.DatastoreType == "etcdv2" {
cfg.Spec.DatastoreType = api.DatastoreType(config.DatastoreType)
}
if !config.IpInIpEnabled {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("IPIP disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv2" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Index(flags, "non-zero") > -1 {
metadata.NonZero = true
}
if strings.Index(flags, "die-on-fail") > -1 {
metadata.DieOnParseFailure = true
}
if strings.Index(flags, "local") > -1 {
metadata.Local = true
}
if defaultStr != "" {
if strings.Index(flags, "skip-default-validation") > -1 {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: make(map[string]string),
sourceToRawConfig: make(map[Source]map[string]string),
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := os.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = os.Getenv("HOSTNAME")
}
p.FelixHostname = hostname
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 15,931 | Does adding things here require us also to extend the FelixConfiguration resource in libcalico-go? | projectcalico-felix | go |
@@ -31,7 +31,11 @@ import (
"github.com/pkg/errors"
)
-const currentCNISpec = "0.3.1"
+const (
+ currentCNISpec = "0.3.1"
+ currentCNIVersion = "2018.08.0"
+ CNIGitHash = "a134a973585b560439ed25ec3857e4789bfeb89f"
+)
// CNIClient defines the method of setting/cleaning up container namespace
type CNIClient interface { | 1 | // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package ecscni
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/cihub/seelog"
"github.com/containernetworking/cni/libcni"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/pkg/errors"
)
const currentCNISpec = "0.3.1"
// CNIClient defines the method of setting/cleaning up container namespace
type CNIClient interface {
// Version returns the version of the plugin
Version(string) (string, error)
// Capabilities returns the capabilities supported by a plugin
Capabilities(string) ([]string, error)
// SetupNS sets up the namespace of container
SetupNS(context.Context, *Config, time.Duration) (*current.Result, error)
// CleanupNS cleans up the container namespace
CleanupNS(context.Context, *Config, time.Duration) error
// ReleaseIPResource marks the ip available in the ipam db
ReleaseIPResource(*Config) error
}
// cniClient is the client to call plugin and setup the network
type cniClient struct {
pluginsPath string
cniVersion string
subnet string
libcni libcni.CNI
}
// NewClient creates a client of ecscni which is used to invoke the plugin
func NewClient(cfg *Config) CNIClient {
libcniConfig := &libcni.CNIConfig{
Path: []string{cfg.PluginsPath},
}
return &cniClient{
pluginsPath: cfg.PluginsPath,
cniVersion: cfg.MinSupportedCNIVersion,
subnet: ecsSubnet,
libcni: libcniConfig,
}
}
// SetupNS will set up the namespace of container, including create the bridge
// and the veth pair, move the eni to container namespace, setup the routes
func (client *cniClient) SetupNS(ctx context.Context,
cfg *Config,
timeout time.Duration) (*current.Result, error) {
derivedCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
type output struct {
result *current.Result
err error
}
response := make(chan output)
go func(response chan output) {
result, err := client.setupNS(cfg)
response <- output{
result: result,
err: err,
}
}(response)
select {
case <-derivedCtx.Done():
return nil, errors.Wrap(derivedCtx.Err(), "cni setup: container namespace setup failed")
case result := <-response:
return result.result, result.err
}
}
func (client *cniClient) setupNS(cfg *Config) (*current.Result, error) {
runtimeConfig := libcni.RuntimeConf{
ContainerID: cfg.ContainerID,
NetNS: fmt.Sprintf(netnsFormat, cfg.ContainerPID),
}
seelog.Debugf("[ECSCNI] Starting ENI (%s) setup in the the container namespace: %s", cfg.ENIID, cfg.ContainerID)
os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel())
defer os.Unsetenv("ECS_CNI_LOGLEVEL")
// Invoke eni plugin ADD command
result, err := client.add(runtimeConfig, cfg, client.createENINetworkConfig)
if err != nil {
return nil, errors.Wrap(err, "cni setup: invoke eni plugin failed")
}
seelog.Debugf("[ECSCNI] ENI setup done: %s", result.String())
// Invoke bridge plugin ADD command
result, err = client.add(runtimeConfig, cfg, client.createBridgeNetworkConfigWithIPAM)
if err != nil {
return nil, errors.Wrap(err, "cni setup: invoke bridge plugin failed")
}
seelog.Debugf("[ECSCNI] Set up container namespace done: %s", result.String())
if _, err = result.GetAsVersion(currentCNISpec); err != nil {
seelog.Warnf("[ECSCNI] Unable to convert result to spec version %s; error: %v; result is of version: %s",
currentCNISpec, err, result.Version())
return nil, err
}
var curResult *current.Result
curResult, ok := result.(*current.Result)
if !ok {
return nil, errors.Errorf(
"cni setup: unable to convert result to expected version '%s'",
result.String())
}
return curResult, nil
}
// CleanupNS will clean up the container namespace, including remove the veth
// pair and stop the dhclient
func (client *cniClient) CleanupNS(
ctx context.Context,
cfg *Config,
timeout time.Duration) error {
derivedCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
err := make(chan error)
go func(err chan error) {
err <- client.cleanupNS(cfg)
}(err)
select {
case <-derivedCtx.Done():
return errors.Wrap(derivedCtx.Err(), "cni cleanup: container namespace cleanup failed")
case err := <-err:
return err
}
}
func (client *cniClient) cleanupNS(cfg *Config) error {
runtimeConfig := libcni.RuntimeConf{
ContainerID: cfg.ContainerID,
NetNS: fmt.Sprintf(netnsFormat, cfg.ContainerPID),
}
os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel())
defer os.Unsetenv("ECS_CNI_LOGLEVEL")
seelog.Debugf("[ECSCNI] Starting clean up the container namespace: %s", cfg.ContainerID)
// clean up the network namespace is separate from releasing the IP from IPAM
err := client.del(runtimeConfig, cfg, client.createBridgeNetworkConfigWithoutIPAM)
if err != nil {
return errors.Wrap(err, "cni cleanup: invoke bridge plugin failed")
}
seelog.Debugf("[ECSCNI] bridge cleanup done: %s", cfg.ContainerID)
err = client.del(runtimeConfig, cfg, client.createENINetworkConfig)
if err != nil {
return errors.Wrap(err, "cni cleanup: invoke eni plugin failed")
}
seelog.Debugf("[ECSCNI] container namespace cleanup done: %s", cfg.ContainerID)
return nil
}
// ReleaseIPResource marks the ip available in the ipam db
func (client *cniClient) ReleaseIPResource(cfg *Config) error {
runtimeConfig := libcni.RuntimeConf{
ContainerID: cfg.ContainerID,
NetNS: fmt.Sprintf(netnsFormat, cfg.ContainerPID),
}
seelog.Debugf("[ECSCNI] Releasing the ip resource from ipam db, id: [%s], ip: [%v]", cfg.ID, cfg.IPAMV4Address)
os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel())
defer os.Unsetenv("ECS_CNI_LOGLEVEL")
return client.del(runtimeConfig, cfg, client.createIPAMNetworkConfig)
}
// add invokes the ADD command of the given plugin
func (client *cniClient) add(runtimeConfig libcni.RuntimeConf,
cfg *Config,
pluginConfigFunc func(*Config) (string, *libcni.NetworkConfig, error)) (cnitypes.Result, error) {
deviceName, networkConfig, err := pluginConfigFunc(cfg)
if err != nil {
return nil, err
}
runtimeConfig.IfName = deviceName
return client.libcni.AddNetwork(networkConfig, &runtimeConfig)
}
// del invokes the DEL command of the given plugin
func (client *cniClient) del(runtimeConfig libcni.RuntimeConf,
cfg *Config,
pluginConfigFunc func(*Config) (string, *libcni.NetworkConfig, error)) error {
deviceName, networkConfig, err := pluginConfigFunc(cfg)
if err != nil {
return err
}
runtimeConfig.IfName = deviceName
return client.libcni.DelNetwork(networkConfig, &runtimeConfig)
}
// createBridgeNetworkConfigWithIPAM creates the config of bridge for ADD command, where
// bridge plugin acquires the IP and route information from IPAM
func (client *cniClient) createBridgeNetworkConfigWithIPAM(cfg *Config) (string, *libcni.NetworkConfig, error) {
// Create the bridge config first
bridgeConfig := client.createBridgeConfig(cfg)
// Create the ipam config
ipamConfig, err := client.createIPAMConfig(cfg)
if err != nil {
return "", nil, errors.Wrap(err, "createBridgeNetworkConfigWithIPAM: create ipam configuration failed")
}
bridgeConfig.IPAM = ipamConfig
networkConfig, err := client.constructNetworkConfig(bridgeConfig, ECSBridgePluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createBridgeNetworkConfigWithIPAM: construct bridge and ipam network configuration failed")
}
return defaultVethName, networkConfig, nil
}
// createBridgeNetworkConfigWithoutIPAM creates the config of the bridge for removal
func (client *cniClient) createBridgeNetworkConfigWithoutIPAM(cfg *Config) (string, *libcni.NetworkConfig, error) {
networkConfig, err := client.constructNetworkConfig(client.createBridgeConfig(cfg), ECSBridgePluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createBridgeNetworkConfigWithoutIPAM: construct bridge network configuration failed")
}
return defaultVethName, networkConfig, nil
}
func (client *cniClient) createBridgeConfig(cfg *Config) BridgeConfig {
bridgeName := defaultBridgeName
if len(cfg.BridgeName) != 0 {
bridgeName = cfg.BridgeName
}
bridgeConfig := BridgeConfig{
Type: ECSBridgePluginName,
CNIVersion: client.cniVersion,
BridgeName: bridgeName,
}
return bridgeConfig
}
// constructNetworkConfig takes in the config from agent and construct the configuration
// that's accepted by the libcni
func (client *cniClient) constructNetworkConfig(cfg interface{}, plugin string) (*libcni.NetworkConfig, error) {
configBytes, err := json.Marshal(cfg)
if err != nil {
seelog.Errorf("[ECSCNI] Marshal configuration for plugin %s failed, error: %v", plugin, err)
return nil, err
}
networkConfig := &libcni.NetworkConfig{
Network: &cnitypes.NetConf{
Type: plugin,
},
Bytes: configBytes,
}
return networkConfig, nil
}
func (client *cniClient) createENINetworkConfig(cfg *Config) (string, *libcni.NetworkConfig, error) {
eniConf := ENIConfig{
Type: ECSENIPluginName,
CNIVersion: client.cniVersion,
ENIID: cfg.ENIID,
IPV4Address: cfg.ENIIPV4Address,
IPV6Address: cfg.ENIIPV6Address,
MACAddress: cfg.ENIMACAddress,
BlockInstanceMetdata: cfg.BlockInstanceMetdata,
SubnetGatewayIPV4Address: cfg.SubnetGatewayIPV4Address,
}
networkConfig, err := client.constructNetworkConfig(eniConf, ECSENIPluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createENINetworkConfig: construct the eni network configuration failed")
}
return defaultENIName, networkConfig, nil
}
// createIPAMNetworkConfig constructs the ipam configuration accepted by libcni
func (client *cniClient) createIPAMNetworkConfig(cfg *Config) (string, *libcni.NetworkConfig, error) {
ipamConfig, err := client.createIPAMConfig(cfg)
if err != nil {
return defaultVethName, nil, errors.Wrap(err, "createIPAMNetworkConfig: create ipam network configuration failed")
}
ipamNetworkConfig := IPAMNetworkConfig{
Name: ECSIPAMPluginName,
CNIVersion: client.cniVersion,
IPAM: ipamConfig,
}
networkConfig, err := client.constructNetworkConfig(ipamNetworkConfig, ECSIPAMPluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createIPAMNetworkConfig: construct ipam network configuration failed")
}
return defaultVethName, networkConfig, nil
}
func (client *cniClient) createIPAMConfig(cfg *Config) (IPAMConfig, error) {
_, dst, err := net.ParseCIDR(TaskIAMRoleEndpoint)
if err != nil {
return IPAMConfig{}, err
}
routes := []*cnitypes.Route{
{
Dst: *dst,
},
}
for _, route := range cfg.AdditionalLocalRoutes {
seelog.Debugf("[ECSCNI] Adding an additional route for %s", route)
ipNetRoute := (net.IPNet)(route)
routes = append(routes, &cnitypes.Route{Dst: ipNetRoute})
}
ipamConfig := IPAMConfig{
Type: ECSIPAMPluginName,
CNIVersion: client.cniVersion,
IPV4Subnet: client.subnet,
IPV4Address: cfg.IPAMV4Address,
ID: cfg.ID,
IPV4Routes: routes,
}
return ipamConfig, nil
}
// Version returns the version of the plugin
func (client *cniClient) Version(name string) (string, error) {
file := filepath.Join(client.pluginsPath, name)
// Check if the plugin file exists before executing it
_, err := os.Stat(file)
if err != nil {
return "", err
}
cmd := exec.Command(file, versionCommand)
versionInfo, err := cmd.Output()
if err != nil {
return "", err
}
version := &cniPluginVersion{}
// versionInfo is of the format
// {"version":"2017.06.0","dirty":true,"gitShortHash":"226db36"}
// Unmarshal this
err = json.Unmarshal(versionInfo, version)
if err != nil {
return "", errors.Wrapf(err, "ecscni: unmarshal version from string: %s", versionInfo)
}
return version.str(), nil
}
// cniPluginVersion is used to convert the JSON output of the
// '--version' command into a string
type cniPluginVersion struct {
Version string `json:"version"`
Dirty bool `json:"dirty"`
Hash string `json:"gitShortHash"`
}
// str generates a string version of the CNI plugin version
// Example:
// {"version":"2017.06.0","dirty":true,"gitShortHash":"226db36"} => @226db36-2017.06.0
// {"version":"2017.06.0","dirty":false,"gitShortHash":"326db36"} => 326db36-2017.06.0
func (version *cniPluginVersion) str() string {
ver := ""
if version.Dirty {
ver = "@"
}
return ver + version.Hash + "-" + version.Version
}
// Capabilities returns the capabilities supported by a plugin
func (client *cniClient) Capabilities(name string) ([]string, error) {
file := filepath.Join(client.pluginsPath, name)
// Check if the plugin file exists before executing it
_, err := os.Stat(file)
if err != nil {
return nil, errors.Wrapf(err, "ecscni: unable to describe file info for '%s'", file)
}
cmd := exec.Command(file, capabilitiesCommand)
capabilitiesInfo, err := cmd.Output()
if err != nil {
return nil, errors.Wrapf(err, "ecscni: failed invoking capabilities command for '%s'", name)
}
capabilities := &struct {
Capabilities []string `json:"capabilities"`
}{}
err = json.Unmarshal(capabilitiesInfo, capabilities)
if err != nil {
return nil, errors.Wrapf(err, "ecscni: failed to unmarshal capabilities for '%s' from string: %s", name, capabilitiesInfo)
}
return capabilities.Capabilities, nil
}
| 1 | 20,950 | does this need to be public? | aws-amazon-ecs-agent | go |
@@ -15,6 +15,7 @@
from . import encode
from . import number_types as N
+FILEIDENTIFIER_LENGTH = 4
class Table(object):
"""Table wraps a byte slice and provides read access to its data. | 1 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import encode
from . import number_types as N
class Table(object):
"""Table wraps a byte slice and provides read access to its data.
The variable `Pos` indicates the root of the FlatBuffers object therein."""
__slots__ = ("Bytes", "Pos")
def __init__(self, buf, pos):
N.enforce_number(pos, N.UOffsetTFlags)
self.Bytes = buf
self.Pos = pos
def Offset(self, vtableOffset):
"""Offset provides access into the Table's vtable.
Deprecated fields are ignored by checking the vtable's length."""
vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos)
vtableEnd = self.Get(N.VOffsetTFlags, vtable)
if vtableOffset < vtableEnd:
return self.Get(N.VOffsetTFlags, vtable + vtableOffset)
return 0
def Indirect(self, off):
"""Indirect retrieves the relative offset stored at `offset`."""
N.enforce_number(off, N.UOffsetTFlags)
return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
def String(self, off):
"""String gets a string from data stored inside the flatbuffer."""
N.enforce_number(off, N.UOffsetTFlags)
off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
start = off + N.UOffsetTFlags.bytewidth
length = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
return bytes(self.Bytes[start:start+length])
def VectorLen(self, off):
"""VectorLen retrieves the length of the vector whose offset is stored
at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
return ret
def Vector(self, off):
"""Vector retrieves the start of data of the vector whose offset is
stored at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
x = off + self.Get(N.UOffsetTFlags, off)
# data starts after metadata containing the vector length
x += N.UOffsetTFlags.bytewidth
return x
def Union(self, t2, off):
"""Union initializes any Table-derived type to point to the union at
the given offset."""
assert type(t2) is Table
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
t2.Pos = off + self.Get(N.UOffsetTFlags, off)
t2.Bytes = self.Bytes
def Get(self, flags, off):
"""
Get retrieves a value of the type specified by `flags` at the
given offset.
"""
N.enforce_number(off, N.UOffsetTFlags)
return flags.py_type(encode.Get(flags.packer_type, self.Bytes, off))
def GetSlot(self, slot, d, validator_flags):
N.enforce_number(slot, N.VOffsetTFlags)
if validator_flags is not None:
N.enforce_number(d, validator_flags)
off = self.Offset(slot)
if off == 0:
return d
return self.Get(validator_flags, self.Pos + off)
def GetVectorAsNumpy(self, flags, off):
"""
GetVectorAsNumpy returns the vector that starts at `Vector(off)`
as a numpy array with the type specified by `flags`. The array is
a `view` into Bytes, so modifying the returned array will
modify Bytes in place.
"""
offset = self.Vector(off)
length = self.VectorLen(off) # TODO: length accounts for bytewidth, right?
numpy_dtype = N.to_numpy_type(flags)
return encode.GetVectorAsNumpy(numpy_dtype, self.Bytes, length, offset)
def GetVOffsetTSlot(self, slot, d):
"""
GetVOffsetTSlot retrieves the VOffsetT that the given vtable location
points to. If the vtable value is zero, the default value `d`
will be returned.
"""
N.enforce_number(slot, N.VOffsetTFlags)
N.enforce_number(d, N.VOffsetTFlags)
off = self.Offset(slot)
if off == 0:
return d
return off
| 1 | 13,776 | Please import this from a pre-existing definition. | google-flatbuffers | java |
@@ -0,0 +1,18 @@
+class WebOptions:
+ def load(self, loader):
+ loader.add_option(
+ "web_open_browser", bool, True,
+ "Start a browser."
+ )
+ loader.add_option(
+ "web_debug", bool, False,
+ "Mitmweb debugging."
+ )
+ loader.add_option(
+ "web_port", int, 8081,
+ "Mitmweb port."
+ )
+ loader.add_option(
+ "web_iface", str, "127.0.0.1",
+ "Mitmweb interface."
+ ) | 1 | 1 | 13,477 | File name does not represent class name... if we ever cared for such things? | mitmproxy-mitmproxy | py |
|
@@ -39,7 +39,7 @@ Rails.application.routes.draw do
get :languages
get :commits_by_project_chart
get :commits_by_language_chart
- post :make_spammer
+ post :label_as_spammer
get 'edit_privacy' => 'privacy#edit', as: :edit_account_privacy
put 'update_privacy' => 'privacy#update', as: :account_privacy
end | 1 | Rails.application.routes.draw do
ActiveAdmin.routes(self)
root 'home#index'
resources :sessions, only: [:new, :create] do
collection do
delete :destroy
end
end
resources :password_reset, only: [:new, :create] do
collection do
get :confirm
post :reset
end
end
resources :activation_resends, only: [:new, :create]
resources :api_keys, only: :index
resources :domain_blacklists, except: :show
resources :reviews, only: :destroy do
resources :helpfuls, only: :create
end
resources :kudos
resources :accounts do
resources :api_keys, constraints: { format: :html }, except: :show
resources :projects, only: [:index]
resources :positions, only: [:index]
resources :stacks, only: [:index]
resources :widgets, only: [:index]
resources :kudos, only: [:index, :show]
resources :edits, only: [:index]
resources :posts, only: [:index]
resources :reviews, only: [:index]
member do
get :disabled
get :settings
get :languages
get :commits_by_project_chart
get :commits_by_language_chart
post :make_spammer
get 'edit_privacy' => 'privacy#edit', as: :edit_account_privacy
put 'update_privacy' => 'privacy#update', as: :account_privacy
end
end
resources :forums do
resources :topics, shallow: true
end
resources :topics, except: [:index, :new, :create] do
resources :posts, except: [:new]
end
resources :posts, only: :index, as: 'all_posts'
get 'markdown_syntax', to: 'abouts#markdown_syntax'
resources :projects, path: :p, only: [:show, :edit] do
member do
get :users
get :map
get :settings
get :estimated_cost
get 'permissions' => 'permissions#show', as: :permissions
put 'permissions' => 'permissions#update', as: :update_permissions
post 'rate' => 'ratings#rate', as: :rate
delete 'unrate' => 'ratings#unrate', as: :unrate
end
collection do
get :compare
get :autocomplete
end
resource :logos, only: [:new, :create, :destroy]
resources :links, except: :show
resources :managers, only: [:index, :new, :create, :edit, :update] do
member do
post :approve
post :reject
end
end
resources :rss_articles, only: :index
resources :widgets, only: :index
resources :similar_projects, only: :index
resources :ratings
resources :reviews, except: :show do
collection { get :summary }
resources :helpfuls, only: :create
end
resources :analyses, only: :index do
member do
get :languages_summary
end
end
resources :commits, only: :index do
collection { get :summary }
end
resources :contributors, only: :index do
collection { get :summary }
end
end
resources :organizations, path: :orgs, only: [:show] do
member do
get :settings
get :projects
end
resource :logos, only: [:new, :create, :destroy]
resources :managers, only: [:index, :new, :create, :edit, :update] do
member do
post :approve
post :reject
end
end
resources :widgets
end
resources :stacks, only: [:show, :create, :update, :destroy] do
member do
get :similar
get :builder
end
resources :stack_entries, only: [:create, :destroy]
resources :stack_ignores, only: [:create] do
collection do
delete :delete_all
end
end
resources :widgets, only: [:index]
end
resources :languages, only: [:show, :index] do
collection { get :compare }
end
resource :compare_repositories
# The priority is based upon order of creation: first created -> highest
# priority.
# See how all your routes lay out with "rake routes".
# You can have the root of your site routed with "root"
# Example of regular route:
# get 'products/:id' => 'catalog#view'
# Example of named route that can be invoked with purchase_url(id: product.id)
# get 'products/:id/purchase' => 'catalog#purchase', as: :purchase
# Example resource route (maps HTTP verbs to controller actions automatically)
# resources :products
# Example resource route with options:
# resources :products do
# member do
# get 'short'
# post 'toggle'
# end
#
# collection do
# get 'sold'
# end
# end
# Example resource route with sub-resources:
# resources :products do
# resources :comments, :sales
# resource :seller
# end
# Example resource route with more complex sub-resources:
# resources :products do
# resources :comments
# resources :sales do
# get 'recent', on: :collection
# end
# end
# Example resource route with concerns:
# concern :toggleable do
# post 'toggle'
# end
# resources :posts, concerns: :toggleable
# resources :photos, concerns: :toggleable
# Example resource route within a namespace:
# namespace :admin do
# # Directs /admin/products/* to Admin::ProductsController
# # (app/controllers/admin/products_controller.rb)
# resources :products
# end
end
| 1 | 7,015 | I don't mind the `make_spammer` route name. It seems to more clearly convey that the state of the account will change. The "label as" route name seems less definitive, as if one is simply adding something to the account -- a new label -- instead of changing the status of the account. | blackducksoftware-ohloh-ui | rb |
@@ -46,7 +46,8 @@ var loggingCmd = &cobra.Command{
Long: `Enable/disable and configure Algorand remote logging`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- cfg, err := logging.EnsureTelemetryConfig(nil, "")
+ dataDir := ensureSingleDataDir()
+ cfg, err := logging.EnsureTelemetryConfig(&dataDir, "")
// If error loading config, can't disable / no need to disable
if err != nil { | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/spf13/cobra"
"github.com/algorand/go-algorand/logging"
)
var (
nodeName string
)
func init() {
loggingCmd.AddCommand(enableCmd)
loggingCmd.AddCommand(disableCmd)
loggingCmd.AddCommand(loggingSendCmd)
// Enable Logging : node name
enableCmd.Flags().StringVarP(&nodeName, "name", "n", "", "Friendly-name to use for node")
}
var loggingCmd = &cobra.Command{
Use: "logging",
Short: "Control and manage Algorand logging",
Long: `Enable/disable and configure Algorand remote logging`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
cfg, err := logging.EnsureTelemetryConfig(nil, "")
// If error loading config, can't disable / no need to disable
if err != nil {
fmt.Println(err)
fmt.Println(loggingNotConfigured)
} else if cfg.Enable == false {
fmt.Println(loggingNotEnabled)
} else {
fmt.Printf(loggingEnabled, cfg.Name, cfg.GUID)
}
},
}
var enableCmd = &cobra.Command{
Use: "enable -n nodeName",
Short: "Enable Algorand remote logging",
Long: `This will turn on remote logging. The "friendly name" for the node, used by logging, will be determined by -n nodename.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
cfg, err := logging.EnsureTelemetryConfig(nil, "")
if err != nil {
fmt.Println(err)
return
}
cfg.Enable = true
if len(nodeName) > 0 {
cfg.Name = nodeName
}
cfg.Save(cfg.FilePath)
fmt.Printf("Logging enabled: Name = %s, Guid = %s\n", cfg.Name, cfg.GUID)
},
}
var disableCmd = &cobra.Command{
Use: "disable",
Short: "Disable Algorand remote logging",
Long: `Disable Algorand remote logging`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
cfg, err := logging.EnsureTelemetryConfig(nil, "")
// If error loading config, can't disable / no need to disable
if err != nil {
return
}
cfg.Enable = false
cfg.Save(cfg.FilePath)
},
}
var loggingSendCmd = &cobra.Command{
Use: "send",
Short: "Upload logs and debugging information for analysis",
Long: `Upload logs and debugging information to Algorand for analysis. Ledger and wallet data are not included.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
cfg, err := logging.EnsureTelemetryConfig(nil, "")
if err != nil {
fmt.Println(err)
return
}
basename := cfg.Name
if len(basename) > 0 {
basename = basename + "-"
}
timestamp := time.Now().UTC().Format("20060102150405")
modifier := ""
counter := uint(1)
onDataDirs(func(dataDir string) {
dirname := filepath.Base(dataDir)
name := basename + cfg.GUID + "_" + dirname + "-" + timestamp + modifier + ".tar.gz"
for err := range logging.CollectAndUploadData(dataDir, name) {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
modifier = fmt.Sprintf("-%d", counter)
counter++
})
},
}
| 1 | 35,755 | This exits if `-d` not specified which isn't exactly what we want, right? | algorand-go-algorand | go |
@@ -313,3 +313,10 @@ def sizeof_fmt(num, suffix='B'):
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
+
+
+def escape_like(string, escape_char='*'):
+ """Escape the string parameter used in SQL LIKE expressions."""
+ return string.replace(escape_char, escape_char * 2) \
+ .replace('%', escape_char + '%') \
+ .replace('_', escape_char + '_') | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Util module.
"""
import datetime
import hashlib
import os
import re
import socket
import subprocess
import psutil
from libcodechecker.logger import LoggerFactory
# WARNING! LOG should be only used in this module.
LOG = LoggerFactory.get_new_logger('UTIL')
def get_free_port():
""" Get a free port from the OS. """
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
free_port = s.getsockname()[1]
s.close()
return free_port
def is_localhost(address):
"""
Check if address is one of the valid values and try to get the
IP-addresses from the system.
"""
valid_values = ['localhost', '0.0.0.0', '*', '::1']
try:
valid_values.append(socket.gethostbyname('localhost'))
except Exception:
# Failed to get ip address for localhost.
pass
try:
valid_values.append(socket.gethostbyname(socket.gethostname()))
except Exception:
# Failed to get ip address for host_name.
pass
return address in valid_values
def get_tmp_dir_hash():
"""Generate a hash based on the current time and process id."""
pid = os.getpid()
time = datetime.datetime.now()
data = str(pid) + str(time)
dir_hash = hashlib.md5()
dir_hash.update(data)
LOG.debug('The generated temporary directory hash is %s.'
% dir_hash.hexdigest())
return dir_hash.hexdigest()
def find_by_regex_in_envpath(pattern, environment):
"""
Searches for files matching the pattern string in the environment's PATH.
"""
regex = re.compile(pattern)
binaries = {}
for path in environment['PATH'].split(os.pathsep):
_, _, filenames = next(os.walk(path), ([], [], []))
for f in filenames:
if re.match(regex, f):
if binaries.get(f) is None:
binaries[f] = [os.path.join(path, f)]
else:
binaries[f].append(os.path.join(path, f))
return binaries
def get_binary_in_path(basename_list, versioning_pattern, env):
"""
Select the most matching binary for the given pattern in the given
environment. Works well for binaries that contain versioning.
"""
binaries = find_by_regex_in_envpath(versioning_pattern, env)
if len(binaries) == 0:
return False
elif len(binaries) == 1:
# Return the first found (earliest in PATH) binary for the only
# found binary name group.
return binaries.values()[0][0]
else:
keys = list(binaries.keys())
keys.sort()
# If one of the base names match, select that version.
files = None
for base_key in basename_list:
# Cannot use set here as it would destroy precendence.
if base_key in keys:
files = binaries[base_key]
break
if not files:
# Select the "newest" available version if there are multiple and
# none of the base names matched.
files = binaries[keys[-1]]
# Return the one earliest in PATH.
return files[0]
def call_command(command, env=None):
""" Call an external command and return with (output, return_code)."""
try:
LOG.debug('Run ' + ' '.join(command))
out = subprocess.check_output(command,
bufsize=-1,
env=env,
stderr=subprocess.STDOUT)
LOG.debug(out)
return out, 0
except subprocess.CalledProcessError as ex:
LOG.debug('Running command "' + ' '.join(command) + '" Failed.')
LOG.debug(str(ex.returncode))
LOG.debug(ex.output)
return ex.output, ex.returncode
except OSError as oerr:
LOG.warning(oerr.strerror)
return oerr.strerror, oerr.errno
def kill_process_tree(parent_pid):
proc = psutil.Process(parent_pid)
children = proc.children()
# Send a SIGTERM (Ctrl-C) to the main process
proc.terminate()
# If children processes don't stop gracefully in time,
# slaughter them by force.
__, still_alive = psutil.wait_procs(children, timeout=5)
for p in still_alive:
p.kill()
def get_default_workspace():
"""
Default workspace in the users home directory.
"""
workspace = os.path.join(os.path.expanduser("~"), '.codechecker')
return workspace
def split_server_url(url):
"""
Splits the given CodeChecker server URL into its parts.
The format of a valid URL is:
protocol://host:port/
where
* Protocol: HTTP or HTTPS
* Host: The server's host name or IP address
* Port: The server's port number
As a shortcut, the following formats are also valid:
hostname (means: http://hostname:8001)
"""
LOG.debug("Parsing server url '{0}'".format(url))
protocol = 'http'
if url.startswith('http'):
parts = url.split('://', 1)
protocol = parts[0]
url = url.replace(parts[0] + '://', '')
url = url.lstrip('/').rstrip('/')
# A valid server_url looks like this: 'http://localhost:8001/'.
host, port = 'localhost', 8001
try:
parts = url.split('/', 1)
# Something is either a hostname, or a host:port.
server_addr = parts[0].split(":")
if len(server_addr) == 2:
host, port = server_addr[0], int(server_addr[1])
elif len(server_addr) == 1:
host = server_addr[0]
else:
raise ValueError("The server's address is not in a valid "
"'host:port' format!")
except:
LOG.error("The specified server URL is invalid.")
raise
LOG.debug("Result: With '{0}' on server '{1}:{2}'"
.format(protocol, host, port))
return protocol, host, port
def create_product_url(protocol, host, port, endpoint):
return "{0}://{1}:{2}{3}".format(protocol, host, str(port), endpoint)
def split_product_url(url):
"""
Splits the given CodeChecker server's product-specific URL into its parts.
The format of a valid URL is:
protocol://host:port/ProductEndpoint
where
* Protocol: HTTP or HTTPS
* Host: The server's host name or IP address
* Port: The server's port number
* ProductEndpoint: The product's unique endpoint folder under the server.
As a shortcut, the following formats are also valid:
ProductEndpoint (means: http://localhost:8001/ProductEndpoint)
hostname/ProductEndpoint (means: http://hostname:8001/ProductEndpoint)
"""
LOG.debug("Parsing product url '{0}'".format(url))
protocol = 'http'
if url.startswith('http'):
parts = url.split('://', 1)
protocol = parts[0]
url = url.replace(parts[0] + '://', '')
url = url.lstrip('/').rstrip('/')
# A valid product_url looks like this: 'http://localhost:8001/Product'.
host, port, product_name = 'localhost', 8001, 'Default'
try:
parts = url.split("/")
if len(parts) == 1:
# If only one word is given in the URL, consider it as product
# name, but then it cannot begin with a number.
product_name = parts[0]
if product_name[0].isdigit():
raise ValueError("Product name was given in URL, but it "
"cannot begin with a number!")
elif len(parts) == 2:
# URL is at least something/product-name.
product_name = parts[1]
# Something is either a hostname, or a host:port.
server_addr = parts[0].split(":")
if len(server_addr) == 2:
host, port = server_addr[0], int(server_addr[1])
elif len(server_addr) == 1:
# We consider "localhost/product" as "localhost:8001/product".
host = server_addr[0]
else:
raise ValueError("The server's address is not in a valid "
"'host:port' format!")
else:
raise ValueError("Product URL can not contain extra '/' chars.")
except:
LOG.error("The specified product URL is invalid.")
raise
LOG.debug("Result: With '{0}' on server '{1}:{2}', product '{3}'"
.format(protocol, host, port, product_name))
return protocol, host, port, product_name
def arg_match(options, args):
"""Checks and selects the option string specified in 'options'
that are present in parameter 'args'."""
matched_args = []
for option in options:
if any([arg if option.startswith(arg) else None
for arg in args]):
matched_args.append(option)
continue
return matched_args
def sizeof_fmt(num, suffix='B'):
"""
Pretty print storage units.
Source: https://stackoverflow.com/questions/1094841/
reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
| 1 | 8,157 | So a `*` becomes `**`? How does this affect the query? If I have `csordas*marton` as filter and I have `csordas*marton` and `csordasFOOBARmarton` as run names in the database I get only the first as result? | Ericsson-codechecker | c |
@@ -191,11 +191,14 @@ InterimResult::buildIndex(const std::string &vidColumn) const {
auto name = schema->getFieldName(i);
if (vidColumn == name) {
VLOG(1) << "col name: " << vidColumn << ", col index: " << i;
- if (schema->getFieldType(i).type != SupportedType::VID) {
+ if (schema->getFieldType(i).type != SupportedType::INT &&
+ schema->getFieldType(i).type != SupportedType::VID &&
+ schema->getFieldType(i).type != SupportedType::TIMESTAMP) {
return Status::Error(
- "Build internal index for input data failed. "
- "The specific vid column `%s' is not type of VID, column index: %ul.",
- vidColumn.c_str(), i);
+ "Build internal index for input data failed. "
+ "The specific vid column `%s' is not type of VID, INT or TIMESTAMP, "
+ "column index: %u.",
+ vidColumn.c_str(), i);
}
vidIndex = i;
} | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "graph/InterimResult.h"
#include "dataman/RowReader.h"
namespace nebula {
namespace graph {
constexpr char NotSupported[] = "Type not supported yet";
InterimResult::InterimResult(std::vector<VertexID> vids) {
vids_ = std::move(vids);
}
InterimResult::InterimResult(std::vector<std::string> &&colNames) {
colNames_ = std::move(colNames);
}
void InterimResult::setInterim(std::unique_ptr<RowSetWriter> rsWriter) {
rsWriter_ = std::move(rsWriter);
rsReader_ = std::make_unique<RowSetReader>(rsWriter_->schema(), rsWriter_->data());
}
StatusOr<std::vector<VertexID>> InterimResult::getVIDs(const std::string &col) const {
if (!vids_.empty()) {
DCHECK(rsReader_ == nullptr);
return vids_;
}
if (!hasData()) {
return Status::Error("Interim has no data.");
}
std::vector<VertexID> result;
auto iter = rsReader_->begin();
while (iter) {
VertexID vid;
auto rc = iter->getVid(col, vid);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Column `%s' not found", col.c_str());
}
result.emplace_back(vid);
++iter;
}
return result;
}
StatusOr<std::vector<VertexID>> InterimResult::getDistinctVIDs(const std::string &col) const {
if (!vids_.empty()) {
DCHECK(rsReader_ == nullptr);
return vids_;
}
if (!hasData()) {
return Status::Error("Interim has no data.");
}
std::unordered_set<VertexID> uniq;
auto iter = rsReader_->begin();
while (iter) {
VertexID vid;
auto rc = iter->getVid(col, vid);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Column `%s' not found", col.c_str());
}
uniq.emplace(vid);
++iter;
}
std::vector<VertexID> result(uniq.begin(), uniq.end());
return result;
}
StatusOr<std::vector<cpp2::RowValue>> InterimResult::getRows() const {
if (!hasData()) {
return Status::Error("Interim has no data.");
}
auto schema = rsReader_->schema();
auto columnCnt = schema->getNumFields();
VLOG(1) << "columnCnt: " << columnCnt;
std::vector<cpp2::RowValue> rows;
folly::StringPiece piece;
using nebula::cpp2::SupportedType;
auto rowIter = rsReader_->begin();
while (rowIter) {
std::vector<cpp2::ColumnValue> row;
row.reserve(columnCnt);
auto fieldIter = schema->begin();
int64_t cnt = 0;
while (fieldIter) {
++cnt;
auto type = fieldIter->getType().type;
auto field = fieldIter->getName();
VLOG(1) << "field: " << field << " type: " << static_cast<int64_t>(type);
row.emplace_back();
switch (type) {
case SupportedType::VID: {
int64_t v;
auto rc = rowIter->getVid(field, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error(
"Get vid from interim failed, field: %s, index: %ld.",
field, cnt);
}
row.back().set_id(v);
break;
}
case SupportedType::DOUBLE: {
double v;
auto rc = rowIter->getDouble(field, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error(
"Get double from interim failed, field: %s, index: %ld.",
field, cnt);
}
row.back().set_double_precision(v);
break;
}
case SupportedType::BOOL: {
bool v;
auto rc = rowIter->getBool(field, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error(
"Get bool from interim failed, field: %s, index: %ld.",
field, cnt);
}
row.back().set_bool_val(v);
break;
}
case SupportedType::STRING: {
auto rc = rowIter->getString(field, piece);
if (rc != ResultType::SUCCEEDED) {
return Status::Error(
"Get string from interim failed, field: %s, index: %ld.",
field, cnt);
}
row.back().set_str(piece.toString());
break;
}
case SupportedType::INT: {
int64_t v;
auto rc = rowIter->getInt(field, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error(
"Get int from interim failed, field: %s, index: %ld.",
field, cnt);
}
row.back().set_integer(v);
break;
}
case SupportedType::TIMESTAMP: {
int64_t v;
auto rc = rowIter->getInt(field, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error(
"Get timestamp from interim failed, field: %s, index: %ld.",
field, cnt);
}
row.back().set_timestamp(v);
break;
}
default:
std::string err =
folly::sformat("Unknown Type: %d", static_cast<int32_t>(type));
LOG(ERROR) << err;
return Status::Error(err);
}
++fieldIter;
}
rows.emplace_back();
rows.back().set_columns(std::move(row));
++rowIter;
}
return rows;
}
StatusOr<std::unique_ptr<InterimResult::InterimResultIndex>>
InterimResult::buildIndex(const std::string &vidColumn) const {
using nebula::cpp2::SupportedType;
std::unique_ptr<InterimResultIndex> index;
if (!hasData()) {
return Status::Error("Interim has no data.");
}
auto schema = rsReader_->schema();
auto columnCnt = schema->getNumFields();
uint32_t vidIndex = 0u;
index = std::make_unique<InterimResultIndex>();
for (auto i = 0u; i < columnCnt; i++) {
auto name = schema->getFieldName(i);
if (vidColumn == name) {
VLOG(1) << "col name: " << vidColumn << ", col index: " << i;
if (schema->getFieldType(i).type != SupportedType::VID) {
return Status::Error(
"Build internal index for input data failed. "
"The specific vid column `%s' is not type of VID, column index: %ul.",
vidColumn.c_str(), i);
}
vidIndex = i;
}
index->columnToIndex_[name] = i;
}
auto rowIter = rsReader_->begin();
auto rowIndex = 0u;
while (rowIter) {
Row row;
row.reserve(columnCnt);
for (auto i = 0u; i < columnCnt; i++) {
auto type = schema->getFieldType(i).type;
switch (type) {
case SupportedType::VID: {
int64_t v;
auto rc = rowIter->getVid(i, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Get vid from interim failed.");
}
if (i == vidIndex) {
index->vidToRowIndex_.emplace(v, rowIndex++);
}
row.emplace_back(v);
break;
}
case SupportedType::DOUBLE: {
double v;
auto rc = rowIter->getDouble(i, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Get double from interim failed.");
}
row.emplace_back(v);
break;
}
case SupportedType::BOOL: {
bool v;
auto rc = rowIter->getBool(i, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Get bool from interim failed.");
}
row.emplace_back(v);
break;
}
case SupportedType::STRING: {
folly::StringPiece piece;
auto rc = rowIter->getString(i, piece);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Get string from interim failed.");
}
row.emplace_back(piece.toString());
break;
}
case SupportedType::INT:
case SupportedType::TIMESTAMP: {
int64_t v;
auto rc = rowIter->getInt(i, v);
if (rc != ResultType::SUCCEEDED) {
return Status::Error("Get int from interim failed.");
}
row.emplace_back(v);
break;
}
default:
std::string err =
folly::sformat("Unknown Type: %d", static_cast<int32_t>(type));
LOG(ERROR) << err;
return Status::Error(err);
}
}
index->rows_.emplace_back(std::move(row));
++rowIter;
}
index->schema_ = schema;
return index;
}
OptVariantType
InterimResult::InterimResultIndex::getColumnWithRow(std::size_t row, const std::string &col) const {
if (row >= rows_.size()) {
return Status::Error("Out of range");
}
uint32_t columnIndex = 0;
{
auto iter = columnToIndex_.find(col);
if (iter == columnToIndex_.end()) {
LOG(ERROR) << "Prop `" << col << "' not found";
return Status::Error("Prop `%s' not found", col.c_str());
}
columnIndex = iter->second;
}
return rows_[row][columnIndex];
}
nebula::cpp2::SupportedType InterimResult::getColumnType(
const std::string &col) const {
auto schema = rsReader_->schema();
if (schema == nullptr) {
return nebula::cpp2::SupportedType::UNKNOWN;
}
auto type = schema->getFieldType(col);
return type.type;
}
Status InterimResult::castTo(cpp2::ColumnValue *col,
const nebula::cpp2::SupportedType &type) {
using nebula::cpp2::SupportedType;
switch (type) {
case SupportedType::VID:
return castToVid(col);
case SupportedType::INT:
return castToInt(col);
case SupportedType::TIMESTAMP:
return castToTimestamp(col);
case SupportedType::DOUBLE:
return castToDouble(col);
case SupportedType::BOOL:
return castToBool(col);
case SupportedType::STRING:
return castToStr(col);
default:
// Notice: if we implement some other type,
// we should update here.
LOG(ERROR) << NotSupported << static_cast<int32_t>(type);
return Status::Error(NotSupported);
}
}
Status InterimResult::castToInt(cpp2::ColumnValue *col) {
switch (col->getType()) {
case cpp2::ColumnValue::Type::integer:
break;
case cpp2::ColumnValue::Type::id:
col->set_integer(col->get_id());
break;
case cpp2::ColumnValue::Type::timestamp:
col->set_integer(col->get_timestamp());
break;
case cpp2::ColumnValue::Type::double_precision: {
auto d2i = static_cast<int64_t>(col->get_double_precision());
col->set_integer(d2i);
break;
}
case cpp2::ColumnValue::Type::bool_val: {
auto b2i = static_cast<int64_t>(col->get_bool_val());
col->set_integer(b2i);
break;
}
case cpp2::ColumnValue::Type::str: {
auto r = folly::tryTo<int64_t>(col->get_str());
if (r.hasValue()) {
col->set_integer(r.value());
break;
} else {
return Status::Error(
"Casting from string `%s' to int failed.", col->get_str().c_str());
}
}
default:
LOG(ERROR) << NotSupported << static_cast<int32_t>(col->getType());
return Status::Error(NotSupported);
}
return Status::OK();
}
Status InterimResult::castToVid(cpp2::ColumnValue *col) {
switch (col->getType()) {
case cpp2::ColumnValue::Type::id:
break;
case cpp2::ColumnValue::Type::integer:
col->set_id(col->get_integer());
break;
case cpp2::ColumnValue::Type::timestamp:
col->set_id(col->get_timestamp());
break;
case cpp2::ColumnValue::Type::double_precision: {
auto d2i = static_cast<int64_t>(col->get_double_precision());
col->set_id(d2i);
break;
}
case cpp2::ColumnValue::Type::bool_val: {
auto b2i = static_cast<int64_t>(col->get_bool_val());
col->set_id(b2i);
break;
}
case cpp2::ColumnValue::Type::str: {
auto r = folly::tryTo<int64_t>(col->get_str());
if (r.hasValue()) {
col->set_id(r.value());
break;
} else {
return Status::Error(
"Casting from string %s to vid failed.", col->get_str().c_str());
}
}
default:
LOG(ERROR) << NotSupported << static_cast<int32_t>(col->getType());
return Status::Error(NotSupported);
}
return Status::OK();
}
Status InterimResult::castToTimestamp(cpp2::ColumnValue *col) {
switch (col->getType()) {
case cpp2::ColumnValue::Type::timestamp:
break;
case cpp2::ColumnValue::Type::integer:
col->set_timestamp(col->get_integer());
break;
case cpp2::ColumnValue::Type::id:
col->set_timestamp(col->get_id());
break;
case cpp2::ColumnValue::Type::double_precision: {
auto d2i = static_cast<int64_t>(col->get_double_precision());
col->set_timestamp(d2i);
break;
}
case cpp2::ColumnValue::Type::bool_val: {
auto b2i = static_cast<int64_t>(col->get_bool_val());
col->set_timestamp(b2i);
break;
}
case cpp2::ColumnValue::Type::str: {
auto r = folly::tryTo<int64_t>(col->get_str());
if (r.hasValue()) {
col->set_timestamp(r.value());
break;
} else {
return Status::Error(
"Casting from string %s to timestamp failed.", col->get_str().c_str());
}
}
default:
LOG(ERROR) << NotSupported << static_cast<int32_t>(col->getType());
return Status::Error(NotSupported);
}
return Status::OK();
}
Status InterimResult::castToDouble(cpp2::ColumnValue *col) {
switch (col->getType()) {
case cpp2::ColumnValue::Type::id: {
auto i2d = static_cast<double>(col->get_id());
col->set_double_precision(i2d);
break;
}
case cpp2::ColumnValue::Type::integer: {
auto i2d = static_cast<double>(col->get_integer());
col->set_double_precision(i2d);
break;
}
case cpp2::ColumnValue::Type::double_precision:
break;
case cpp2::ColumnValue::Type::bool_val: {
auto b2d = static_cast<double>(col->get_bool_val());
col->set_double_precision(b2d);
break;
}
case cpp2::ColumnValue::Type::str: {
auto r = folly::tryTo<double>(col->get_str());
if (r.hasValue()) {
col->set_double_precision(r.value());
break;
} else {
return Status::Error(
"Casting from string %s to double failed.", col->get_str().c_str());
}
}
case cpp2::ColumnValue::Type::timestamp: {
auto i2d = static_cast<double>(col->get_timestamp());
col->set_double_precision(i2d);
break;
}
default:
LOG(ERROR) << NotSupported << static_cast<int32_t>(col->getType());
return Status::Error(NotSupported);
}
return Status::OK();
}
Status InterimResult::castToBool(cpp2::ColumnValue *col) {
switch (col->getType()) {
case cpp2::ColumnValue::Type::id: {
auto i2b = col->get_id() != 0;
col->set_bool_val(i2b);
break;
}
case cpp2::ColumnValue::Type::integer: {
auto i2b = col->get_integer() != 0;
col->set_bool_val(i2b);
break;
}
case cpp2::ColumnValue::Type::double_precision: {
auto d2b = col->get_double_precision() != 0.0;
col->set_bool_val(d2b);
break;
}
case cpp2::ColumnValue::Type::bool_val:
break;
case cpp2::ColumnValue::Type::str: {
auto s2b = col->get_str().empty();
col->set_bool_val(s2b);
break;
}
case cpp2::ColumnValue::Type::timestamp: {
auto i2b = col->get_timestamp() != 0;
col->set_bool_val(i2b);
break;
}
default:
LOG(ERROR) << NotSupported << static_cast<int32_t>(col->getType());
return Status::Error(NotSupported);
}
return Status::OK();
}
Status InterimResult::castToStr(cpp2::ColumnValue *col) {
switch (col->getType()) {
case cpp2::ColumnValue::Type::id: {
auto i2s = folly::to<std::string>(col->get_id());
col->set_str(std::move(i2s));
break;
}
case cpp2::ColumnValue::Type::integer: {
auto i2s = folly::to<std::string>(col->get_integer());
col->set_str(std::move(i2s));
break;
}
case cpp2::ColumnValue::Type::double_precision: {
auto d2s = folly::to<std::string>(col->get_double_precision());
col->set_str(std::move(d2s));
break;
}
case cpp2::ColumnValue::Type::bool_val: {
auto b2s = folly::to<std::string>(col->get_bool_val());
col->set_str(std::move(b2s));
break;
}
case cpp2::ColumnValue::Type::str:
break;
case cpp2::ColumnValue::Type::timestamp: {
auto i2s = folly::to<std::string>(col->get_timestamp());
col->set_str(std::move(i2s));
break;
}
default:
LOG(ERROR) << NotSupported << static_cast<int32_t>(col->getType());
return Status::Error(NotSupported);
}
return Status::OK();
}
StatusOr<std::unique_ptr<InterimResult>>
InterimResult::getInterim(
std::shared_ptr<const meta::SchemaProviderIf> resultSchema,
std::vector<cpp2::RowValue> &rows) {
auto rsWriter = std::make_unique<RowSetWriter>(resultSchema);
for (auto &r : rows) {
RowWriter writer(resultSchema);
auto &cols = r.get_columns();
for (auto &col : cols) {
switch (col.getType()) {
case cpp2::ColumnValue::Type::id:
writer << col.get_id();
break;
case cpp2::ColumnValue::Type::integer:
writer << col.get_integer();
break;
case cpp2::ColumnValue::Type::double_precision:
writer << col.get_double_precision();
break;
case cpp2::ColumnValue::Type::bool_val:
writer << col.get_bool_val();
break;
case cpp2::ColumnValue::Type::str:
writer << col.get_str();
break;
case cpp2::ColumnValue::Type::timestamp:
writer << col.get_timestamp();
break;
default:
LOG(ERROR) << NotSupported << static_cast<int32_t>(col.getType());
return Status::Error(NotSupported);
}
}
rsWriter->addRow(writer);
}
std::vector<std::string> colNames;
auto iter = resultSchema->begin();
while (iter) {
colNames.emplace_back(iter->getName());
++iter;
}
auto result = std::make_unique<InterimResult>(std::move(colNames));
result->setInterim(std::move(rsWriter));
return result;
}
Status InterimResult::applyTo(std::function<Status(const RowReader *reader)> visitor,
int64_t limit) const {
auto status = Status::OK();
auto iter = rsReader_->begin();
while (iter && (limit > 0)) {
status = visitor(&*iter);
if (!status.ok()) {
break;
}
--limit;
++iter;
}
return status;
}
Status InterimResult::getResultWriter(const std::vector<cpp2::RowValue> &rows,
RowSetWriter *rsWriter) {
if (rsWriter == nullptr) {
return Status::Error("rsWriter is nullptr");
}
using Type = cpp2::ColumnValue::Type;
for (auto &row : rows) {
RowWriter writer(rsWriter->schema());
auto columns = row.get_columns();
for (auto &column : columns) {
switch (column.getType()) {
case Type::id:
writer << column.get_id();
break;
case Type::integer:
writer << column.get_integer();
break;
case Type::double_precision:
writer << column.get_double_precision();
break;
case Type::bool_val:
writer << column.get_bool_val();
break;
case Type::str:
writer << column.get_str();
break;
case Type::timestamp:
writer << column.get_timestamp();
break;
default:
LOG(ERROR) << "Not Support: " << column.getType();
return Status::Error("Not Support: %d", column.getType());
}
}
rsWriter->addRow(writer);
}
return Status::OK();
}
} // namespace graph
} // namespace nebula
| 1 | 29,370 | I want to know in which situation you need to traverse from "Timestamp" data..... | vesoft-inc-nebula | cpp |
@@ -687,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error {
if !aws.IsReaderSeekable(ctx.Body) {
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
}
- hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+ hasBytes, err := makeSha256Reader(ctx.Body)
+ if err != nil {
+ return err
+ }
+ hash = hex.EncodeToString(hasBytes)
}
if includeSHA256Header { | 1 | // Package v4 implements signing for AWS V4 signer
//
// Provides request signing for request that need to be signed with
// AWS V4 Signatures.
//
// Standalone Signer
//
// Generally using the signer outside of the SDK should not require any additional
// logic when using Go v1.5 or higher. The signer does this by taking advantage
// of the URL.EscapedPath method. If your request URI requires additional escaping
// you many need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
//
// The signer will first check the URL.Opaque field, and use its value if set.
// The signer does require the URL.Opaque field to be set in the form of:
//
// "//<hostname>/<path>"
//
// // e.g.
// "//example.com/some/path"
//
// The leading "//" and hostname are required or the URL.Opaque escaping will
// not work correctly.
//
// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
// method and using the returned value. If you're using Go v1.4 you must set
// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
// Go v1.5 the signer will fallback to URL.Path.
//
// AWS v4 signature validation requires that the canonical string's URI path
// element must be the URI escaped form of the HTTP request's path.
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
//
// The Go HTTP client will perform escaping automatically on the request. Some
// of these escaping may cause signature validation errors because the HTTP
// request differs from the URI path or query that the signature was generated.
// https://golang.org/pkg/net/url/#URL.EscapedPath
//
// Because of this, it is recommended that when using the signer outside of the
// SDK that explicitly escaping the request prior to being signed is preferable,
// and will help prevent signature validation errors. This can be done by setting
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
// call URL.EscapedPath() if Opaque is not set.
//
// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
// request URL. https://github.com/golang/go/issues/16847 points to a bug in
// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
// message. URL.Opaque generally will force Go to make requests with absolute URL.
// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
// or url.EscapedPath will ignore the RawPath escaping.
//
// Test `TestStandaloneSign` provides a complete example of using the signer
// outside of the SDK and pre-escaping the URI path.
package v4
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkio"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
// emptyStringSHA256 is a SHA256 of an empty string
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
)
var ignoredHeaders = rules{
blacklist{
mapRule{
"Authorization": struct{}{},
"User-Agent": struct{}{},
"X-Amzn-Trace-Id": struct{}{},
},
},
}
// requiredSignedHeaders is a whitelist for build canonical headers.
var requiredSignedHeaders = rules{
whitelist{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
"X-Amz-Grant-Write": struct{}{},
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Tagging": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
"X-Amz-Content-Sha256": struct{}{},
},
},
patterns{"X-Amz-Meta-"},
}
// allowedHoisting is a whitelist for build query headers. The boolean value
// represents whether or not it is a pattern.
var allowedQueryHoisting = inclusiveRules{
blacklist{requiredSignedHeaders},
patterns{"X-Amz-"},
}
// Signer applies AWS v4 signing to given request. Use this to sign requests
// that need to be signed with AWS V4 Signatures.
type Signer struct {
// The authentication credentials the request will be signed against.
// This value must be set to sign requests.
Credentials *credentials.Credentials
// Sets the log level the signer should use when reporting information to
// the logger. If the logger is nil nothing will be logged. See
// aws.LogLevelType for more information on available logging levels
//
// By default nothing will be logged.
Debug aws.LogLevelType
// The logger loging information will be written to. If there the logger
// is nil, nothing will be logged.
Logger aws.Logger
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
// request header to the request's query string. This is most commonly used
// with pre-signed requests preventing headers from being added to the
// request's query string.
DisableHeaderHoisting bool
// Disables the automatic escaping of the URI path of the request for the
// siganture's canonical string's path. For services that do not need additional
// escaping then use this to disable the signer escaping the path.
//
// S3 is an example of a service that does not need additional escaping.
//
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
// Disables the automatical setting of the HTTP request's Body field with the
// io.ReadSeeker passed in to the signer. This is useful if you're using a
// custom wrapper around the body for the io.ReadSeeker and want to preserve
// the Body value on the Request.Body.
//
// This does run the risk of signing a request with a body that will not be
// sent in the request. Need to ensure that the underlying data of the Body
// values are the same.
DisableRequestBodyOverwrite bool
// currentTimeFn returns the time value which represents the current time.
// This value should only be used for testing. If it is nil the default
// time.Now will be used.
currentTimeFn func() time.Time
// UnsignedPayload will prevent signing of the payload. This will only
// work for services that have support for this.
UnsignedPayload bool
}
// NewSigner returns a Signer pointer configured with the credentials and optional
// option values provided. If not options are provided the Signer will use its
// default configuration.
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
v4 := &Signer{
Credentials: credentials,
}
for _, option := range options {
option(v4)
}
return v4
}
type signingCtx struct {
ServiceName string
Region string
Request *http.Request
Body io.ReadSeeker
Query url.Values
Time time.Time
ExpireTime time.Duration
SignedHeaderVals http.Header
DisableURIPathEscaping bool
credValues credentials.Value
isPresign bool
formattedTime string
formattedShortTime string
unsignedPayload bool
bodyDigest string
signedHeaders string
canonicalHeaders string
canonicalString string
credentialString string
stringToSign string
signature string
authorization string
}
// Sign signs AWS v4 requests with the provided body, service name, region the
// request is made to, and time the request is signed at. The signTime allows
// you to specify that a request is signed for the future, and cannot be
// used until then.
//
// Returns a list of HTTP headers that were included in the signature or an
// error if signing the request failed. Generally for signed requests this value
// is not needed as the full request context will be captured by the http.Request
// value. It is included for reference though.
//
// Sign will set the request's Body to be the `body` parameter passed in. If
// the body is not already an io.ReadCloser, it will be wrapped within one. If
// a `nil` body parameter passed to Sign, the request's Body field will be
// also set to nil. Its important to note that this functionality will not
// change the request's ContentLength of the request.
//
// Sign differs from Presign in that it will sign the request using HTTP
// header values. This type of signing is intended for http.Request values that
// will not be shared, or are shared in a way the header values on the request
// will not be lost.
//
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
// generated. To bypass the signer computing the hash you can set the
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
// only compute the hash if the request header value is empty.
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
return v4.signWithBody(r, body, service, region, 0, false, signTime)
}
// Presign signs AWS v4 requests with the provided body, service name, region
// the request is made to, and time the request is signed at. The signTime
// allows you to specify that a request is signed for the future, and cannot
// be used until then.
//
// Returns a list of HTTP headers that were included in the signature or an
// error if signing the request failed. For presigned requests these headers
// and their values must be included on the HTTP request when it is made. This
// is helpful to know what header values need to be shared with the party the
// presigned request will be distributed to.
//
// Presign differs from Sign in that it will sign the request using query string
// instead of header values. This allows you to share the Presigned Request's
// URL with third parties, or distribute it throughout your system with minimal
// dependencies.
//
// Presign also takes an exp value which is the duration the
// signed request will be valid after the signing time. This is allows you to
// set when the request will expire.
//
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
// generated. To bypass the signer computing the hash you can set the
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
// only compute the hash if the request header value is empty.
//
// Presigning a S3 request will not compute the body's SHA256 hash by default.
// This is done due to the general use case for S3 presigned URLs is to share
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
// presigned request's signature you can set the "X-Amz-Content-Sha256"
// HTTP header and that will be included in the request's signature.
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
return v4.signWithBody(r, body, service, region, exp, true, signTime)
}
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
currentTimeFn := v4.currentTimeFn
if currentTimeFn == nil {
currentTimeFn = time.Now
}
ctx := &signingCtx{
Request: r,
Body: body,
Query: r.URL.Query(),
Time: signTime,
ExpireTime: exp,
isPresign: isPresign,
ServiceName: service,
Region: region,
DisableURIPathEscaping: v4.DisableURIPathEscaping,
unsignedPayload: v4.UnsignedPayload,
}
for key := range ctx.Query {
sort.Strings(ctx.Query[key])
}
if ctx.isRequestSigned() {
ctx.Time = currentTimeFn()
ctx.handlePresignRemoval()
}
var err error
ctx.credValues, err = v4.Credentials.Get()
if err != nil {
return http.Header{}, err
}
ctx.sanitizeHostForHeader()
ctx.assignAmzQueryValues()
if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
return nil, err
}
// If the request is not presigned the body should be attached to it. This
// prevents the confusion of wanting to send a signed request without
// the body the request was signed for attached.
if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
var reader io.ReadCloser
if body != nil {
var ok bool
if reader, ok = body.(io.ReadCloser); !ok {
reader = ioutil.NopCloser(body)
}
}
r.Body = reader
}
if v4.Debug.Matches(aws.LogDebugWithSigning) {
v4.logSigningInfo(ctx)
}
return ctx.SignedHeaderVals, nil
}
func (ctx *signingCtx) sanitizeHostForHeader() {
request.SanitizeHostForHeader(ctx.Request)
}
func (ctx *signingCtx) handlePresignRemoval() {
if !ctx.isPresign {
return
}
// The credentials have expired for this request. The current signing
// is invalid, and needs to be request because the request will fail.
ctx.removePresign()
// Update the request's query string to ensure the values stays in
// sync in the case retrieving the new credentials fails.
ctx.Request.URL.RawQuery = ctx.Query.Encode()
}
func (ctx *signingCtx) assignAmzQueryValues() {
if ctx.isPresign {
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
if ctx.credValues.SessionToken != "" {
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
} else {
ctx.Query.Del("X-Amz-Security-Token")
}
return
}
if ctx.credValues.SessionToken != "" {
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
}
}
// SignRequestHandler is a named request handler the SDK will use to sign
// service client request with using the V4 signature.
var SignRequestHandler = request.NamedHandler{
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
}
// SignSDKRequest signs an AWS request with the V4 signature. This
// request handler should only be used with the SDK's built in service client's
// API operation requests.
//
// This function should not be used on its on its own, but in conjunction with
// an AWS service client's API operation call. To sign a standalone request
// not created by a service client's API operation method use the "Sign" or
// "Presign" functions of the "Signer" type.
//
// If the credentials of the request's config are set to
// credentials.AnonymousCredentials the request will not be signed.
func SignSDKRequest(req *request.Request) {
SignSDKRequestWithCurrentTime(req, time.Now)
}
// BuildNamedHandler will build a generic handler for signing.
func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
return request.NamedHandler{
Name: name,
Fn: func(req *request.Request) {
SignSDKRequestWithCurrentTime(req, time.Now, opts...)
},
}
}
// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
// function passed in. Behaves the same as SignSDKRequest with the exception
// the request is signed with the value returned by the current time function.
func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
region := req.ClientInfo.SigningRegion
if region == "" {
region = aws.StringValue(req.Config.Region)
}
name := req.ClientInfo.SigningName
if name == "" {
name = req.ClientInfo.ServiceName
}
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
v4.Debug = req.Config.LogLevel.Value()
v4.Logger = req.Config.Logger
v4.DisableHeaderHoisting = req.NotHoist
v4.currentTimeFn = curTimeFn
if name == "s3" {
// S3 service should not have any escaping applied
v4.DisableURIPathEscaping = true
}
// Prevents setting the HTTPRequest's Body. Since the Body could be
// wrapped in a custom io.Closer that we do not want to be stompped
// on top of by the signer.
v4.DisableRequestBodyOverwrite = true
})
for _, opt := range opts {
opt(v4)
}
curTime := curTimeFn()
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
)
if err != nil {
req.Error = err
req.SignedHeaderVals = nil
return
}
req.SignedHeaderVals = signedHeaders
req.LastSignedAt = curTime
}
const logSignInfoMsg = `DEBUG: Request Signature:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
signedURLMsg := ""
if ctx.isPresign {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
}
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
v4.Logger.Log(msg)
}
func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
ctx.buildTime() // no depends
ctx.buildCredentialString() // no depends
if err := ctx.buildBodyDigest(); err != nil {
return err
}
unsignedHeaders := ctx.Request.Header
if ctx.isPresign {
if !disableHeaderHoisting {
urlValues := url.Values{}
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
for k := range urlValues {
ctx.Query[k] = urlValues[k]
}
}
}
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
ctx.buildCanonicalString() // depends on canon headers / signed headers
ctx.buildStringToSign() // depends on canon string
ctx.buildSignature() // depends on string to sign
if ctx.isPresign {
ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
} else {
parts := []string{
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
"SignedHeaders=" + ctx.signedHeaders,
"Signature=" + ctx.signature,
}
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
}
return nil
}
func (ctx *signingCtx) buildTime() {
ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
if ctx.isPresign {
duration := int64(ctx.ExpireTime / time.Second)
ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
} else {
ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
}
}
func (ctx *signingCtx) buildCredentialString() {
ctx.credentialString = strings.Join([]string{
ctx.formattedShortTime,
ctx.Region,
ctx.ServiceName,
"aws4_request",
}, "/")
if ctx.isPresign {
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
}
}
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
for k, h := range header {
if r.IsValid(k) {
query[k] = h
} else {
unsignedHeaders[k] = h
}
}
return query, unsignedHeaders
}
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
var headers []string
headers = append(headers, "host")
for k, v := range header {
canonicalKey := http.CanonicalHeaderKey(k)
if !r.IsValid(canonicalKey) {
continue // ignored header
}
if ctx.SignedHeaderVals == nil {
ctx.SignedHeaderVals = make(http.Header)
}
lowerCaseKey := strings.ToLower(k)
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
// include additional values
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
continue
}
headers = append(headers, lowerCaseKey)
ctx.SignedHeaderVals[lowerCaseKey] = v
}
sort.Strings(headers)
ctx.signedHeaders = strings.Join(headers, ";")
if ctx.isPresign {
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
}
headerValues := make([]string, len(headers))
for i, k := range headers {
if k == "host" {
if ctx.Request.Host != "" {
headerValues[i] = "host:" + ctx.Request.Host
} else {
headerValues[i] = "host:" + ctx.Request.URL.Host
}
} else {
headerValues[i] = k + ":" +
strings.Join(ctx.SignedHeaderVals[k], ",")
}
}
stripExcessSpaces(headerValues)
ctx.canonicalHeaders = strings.Join(headerValues, "\n")
}
func (ctx *signingCtx) buildCanonicalString() {
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
uri := getURIPath(ctx.Request.URL)
if !ctx.DisableURIPathEscaping {
uri = rest.EscapePath(uri, false)
}
ctx.canonicalString = strings.Join([]string{
ctx.Request.Method,
uri,
ctx.Request.URL.RawQuery,
ctx.canonicalHeaders + "\n",
ctx.signedHeaders,
ctx.bodyDigest,
}, "\n")
}
func (ctx *signingCtx) buildStringToSign() {
ctx.stringToSign = strings.Join([]string{
authHeaderPrefix,
ctx.formattedTime,
ctx.credentialString,
hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
}, "\n")
}
func (ctx *signingCtx) buildSignature() {
secret := ctx.credValues.SecretAccessKey
date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
region := makeHmac(date, []byte(ctx.Region))
service := makeHmac(region, []byte(ctx.ServiceName))
credentials := makeHmac(service, []byte("aws4_request"))
signature := makeHmac(credentials, []byte(ctx.stringToSign))
ctx.signature = hex.EncodeToString(signature)
}
func (ctx *signingCtx) buildBodyDigest() error {
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
includeSHA256Header := ctx.unsignedPayload ||
ctx.ServiceName == "s3" ||
ctx.ServiceName == "glacier"
s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
if ctx.unsignedPayload || s3Presign {
hash = "UNSIGNED-PAYLOAD"
includeSHA256Header = !s3Presign
} else if ctx.Body == nil {
hash = emptyStringSHA256
} else {
if !aws.IsReaderSeekable(ctx.Body) {
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
}
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
}
if includeSHA256Header {
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
}
}
ctx.bodyDigest = hash
return nil
}
// isRequestSigned returns if the request is currently signed or presigned
func (ctx *signingCtx) isRequestSigned() bool {
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
return true
}
if ctx.Request.Header.Get("Authorization") != "" {
return true
}
return false
}
// unsign removes signing flags for both signed and presigned requests.
func (ctx *signingCtx) removePresign() {
ctx.Query.Del("X-Amz-Algorithm")
ctx.Query.Del("X-Amz-Signature")
ctx.Query.Del("X-Amz-Security-Token")
ctx.Query.Del("X-Amz-Date")
ctx.Query.Del("X-Amz-Expires")
ctx.Query.Del("X-Amz-Credential")
ctx.Query.Del("X-Amz-SignedHeaders")
}
func makeHmac(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256Reader(reader io.ReadSeeker) []byte {
hash := sha256.New()
start, _ := reader.Seek(0, sdkio.SeekCurrent)
defer reader.Seek(start, sdkio.SeekStart)
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
size, err := aws.SeekerLen(reader)
if err != nil {
io.Copy(hash, reader)
} else {
io.CopyN(hash, reader, size)
}
return hash.Sum(nil)
}
const doubleSpace = " "
// stripExcessSpaces will rewrite the passed in slice's string values to not
// contain multiple side-by-side spaces.
func stripExcessSpaces(vals []string) {
var j, k, l, m, spaces int
for i, str := range vals {
// Trim trailing spaces
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
}
// Trim leading spaces
for k = 0; k < j && str[k] == ' '; k++ {
}
str = str[k : j+1]
// Strip multiple spaces.
j = strings.Index(str, doubleSpace)
if j < 0 {
vals[i] = str
continue
}
buf := []byte(str)
for k, m, l = j, j, len(buf); k < l; k++ {
if buf[k] == ' ' {
if spaces == 0 {
// First space.
buf[m] = buf[k]
m++
}
spaces++
} else {
// End of multiple spaces.
spaces = 0
buf[m] = buf[k]
m++
}
}
vals[i] = string(buf[:m])
}
}
| 1 | 9,777 | nit: hasBytes should be hashBytes | aws-aws-sdk-go | go |
@@ -171,6 +171,15 @@ def perform_analysis(args, skip_handler, context, actions, metadata):
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
+ # Statistics collector checkers must be explicitly disabled
+ # as they trash the output.
+ if "clangsa" in analyzers:
+ config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
+ SpecialReturnValueCollector.checker_collect, False)
+
+ config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
+ ReturnValueCollector.checker_collect, False)
+
# Save some metadata information.
versions = __get_analyzer_version(context, config_map)
metadata['versions'].update(versions) | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Prepare and start different analysis types
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from multiprocessing.managers import SyncManager
import os
import shlex
import shutil
import signal
import subprocess
import time
from codechecker_common.logger import get_logger
from . import analysis_manager, pre_analysis_manager, env
from .analyzers import analyzer_types
from .analyzers.clangsa.analyzer import ClangSA
from .analyzers.clangsa.statistics_collector import \
SpecialReturnValueCollector
from .analyzers.clangsa.statistics_collector import ReturnValueCollector
LOG = get_logger('analyzer')
def prepare_actions(actions, enabled_analyzers):
"""
Set the analyzer type for each buildaction.
Multiple actions if multiple source analyzers are set.
"""
res = []
for ea in enabled_analyzers:
for action in actions:
res.append(action.with_attr('analyzer_type', ea))
return res
def create_actions_map(actions, manager):
"""
Create a dict for the build actions which is shareable
safely between processes.
Key: (source_file, target)
Value: BuildAction
"""
result = manager.dict()
for act in actions:
key = act.source, act.target
if key in result:
LOG.debug("Multiple entires in compile database "
"with the same (source, target) pair: (%s, %s)",
act.source, act.target)
result[key] = act
return result
def __get_analyzer_version(context, analyzer_config_map):
"""
Get the path and the version of the analyzer binaries.
"""
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# Get the analyzer binaries from the config_map which
# contains only the checked and available analyzers.
versions = {}
for _, analyzer_cfg in analyzer_config_map.items():
analyzer_bin = analyzer_cfg.analyzer_binary
version = [analyzer_bin, u' --version']
try:
output = subprocess.check_output(shlex.split(' '.join(version)),
env=check_env,
universal_newlines=True)
versions[analyzer_bin] = output
except (subprocess.CalledProcessError, OSError) as oerr:
LOG.warning("Failed to get analyzer version: %s",
' '.join(version))
LOG.warning(oerr.strerror)
return versions
def __mgr_init():
"""
This function is set for the SyncManager object which handles shared data
structures among the processes of the pool. Ignoring the SIGINT signal is
necessary in the manager object so it doesn't terminate before the
termination of the process pool.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def __get_statistics_data(args, manager):
statistics_data = None
if 'stats_enabled' in args and args.stats_enabled:
statistics_data = manager.dict({
'stats_out_dir': os.path.join(args.output_path, "stats")})
if 'stats_output' in args and args.stats_output:
statistics_data = manager.dict({'stats_out_dir':
args.stats_output})
if 'stats_min_sample_count' in args and statistics_data:
if args.stats_min_sample_count > 1:
statistics_data['stats_min_sample_count'] =\
args.stats_min_sample_count
else:
LOG.error("stats_min_sample_count"
"must be greater than 1.")
return None
if 'stats_relevance_threshold' in args and statistics_data:
if 1 > args.stats_relevance_threshold > 0:
statistics_data['stats_relevance_threshold'] =\
args.stats_relevance_threshold
else:
LOG.error("stats-relevance-threshold must be"
" greater than 0 and smaller than 1.")
return None
return statistics_data
def perform_analysis(args, skip_handler, context, actions, metadata):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
actions = prepare_actions(actions, analyzers)
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Save some metadata information.
versions = __get_analyzer_version(context, config_map)
metadata['versions'].update(versions)
metadata['checkers'] = {}
for analyzer in analyzers:
metadata['checkers'][analyzer] = {}
for check, data in config_map[analyzer].checks().items():
enabled, _ = data
metadata['checkers'][analyzer].update({check: enabled})
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
config_map = manager.dict(config_map)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args, manager)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_capability = config_map[ClangSA.ANALYZER_NAME].ctu_capability
ctu_data = manager.dict({'ctu_dir': ctu_dir,
'ctu_func_map_cmd':
ctu_capability.mapping_tool_path,
'ctu_func_map_file':
ctu_capability.mapping_file_name,
'ctu_temp_fnmap_folder':
'tmpExternalFnMaps'})
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
config_map,
args.jobs,
skip_handler,
ctu_data,
statistics_data,
manager)
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
| 1 | 10,610 | If I enable the clangsa analyzer with the `--stats` flag the checkers will be disabled even if they were enabled previously. I think we should check here if stats was enabled. `if 'stats_enabled' in args and args.stats_enabled` we should leave them enabled. | Ericsson-codechecker | c |
@@ -1041,6 +1041,9 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash
// 3. calculate the fee and normalize by the default gas price
fee := core.CalculateRollupFee(*args.Data, uint64(gasUsed), dataPrice, executionPrice).Uint64() / defaultGasPrice
+ if fee < 21000 {
+ fee = 21000
+ }
return (hexutil.Uint64)(fee), nil
}
| 1 | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethapi
import (
"bytes"
"context"
"errors"
"fmt"
"math/big"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/diffdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/tyler-smith/go-bip39"
)
const (
defaultGasPrice = params.GWei
)
// PublicEthereumAPI provides an API to access Ethereum related information.
// It offers only methods that operate on public data that is freely available to anyone.
type PublicEthereumAPI struct {
b Backend
}
// NewPublicEthereumAPI creates a new Ethereum protocol API.
func NewPublicEthereumAPI(b Backend) *PublicEthereumAPI {
return &PublicEthereumAPI{b}
}
// GasPrice always returns 1 gwei. See `DoEstimateGas` below for context.
func (s *PublicEthereumAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) {
return (*hexutil.Big)(big.NewInt(defaultGasPrice)), nil
}
// ProtocolVersion returns the current Ethereum protocol version this node supports
func (s *PublicEthereumAPI) ProtocolVersion() hexutil.Uint {
return hexutil.Uint(s.b.ProtocolVersion())
}
// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not
// yet received the latest block headers from its pears. In case it is synchronizing:
// - startingBlock: block number this node started to synchronise from
// - currentBlock: block number this node is currently importing
// - highestBlock: block number of the highest block header this node has received from peers
// - pulledStates: number of state entries processed until now
// - knownStates: number of known state entries that still need to be pulled
func (s *PublicEthereumAPI) Syncing() (interface{}, error) {
progress := s.b.Downloader().Progress()
// Return not syncing if the synchronisation already completed
if progress.CurrentBlock >= progress.HighestBlock {
return false, nil
}
// Otherwise gather the block sync stats
return map[string]interface{}{
"startingBlock": hexutil.Uint64(progress.StartingBlock),
"currentBlock": hexutil.Uint64(progress.CurrentBlock),
"highestBlock": hexutil.Uint64(progress.HighestBlock),
"pulledStates": hexutil.Uint64(progress.PulledStates),
"knownStates": hexutil.Uint64(progress.KnownStates),
}, nil
}
// PublicTxPoolAPI offers and API for the transaction pool. It only operates on data that is non confidential.
type PublicTxPoolAPI struct {
b Backend
}
// NewPublicTxPoolAPI creates a new tx pool service that gives information about the transaction pool.
func NewPublicTxPoolAPI(b Backend) *PublicTxPoolAPI {
return &PublicTxPoolAPI{b}
}
// Content returns the transactions contained within the transaction pool.
func (s *PublicTxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction {
content := map[string]map[string]map[string]*RPCTransaction{
"pending": make(map[string]map[string]*RPCTransaction),
"queued": make(map[string]map[string]*RPCTransaction),
}
pending, queue := s.b.TxPoolContent()
// Flatten the pending transactions
for account, txs := range pending {
dump := make(map[string]*RPCTransaction)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx)
}
content["pending"][account.Hex()] = dump
}
// Flatten the queued transactions
for account, txs := range queue {
dump := make(map[string]*RPCTransaction)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx)
}
content["queued"][account.Hex()] = dump
}
return content
}
// Status returns the number of pending and queued transaction in the pool.
func (s *PublicTxPoolAPI) Status() map[string]hexutil.Uint {
pending, queue := s.b.Stats()
return map[string]hexutil.Uint{
"pending": hexutil.Uint(pending),
"queued": hexutil.Uint(queue),
}
}
// Inspect retrieves the content of the transaction pool and flattens it into an
// easily inspectable list.
func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string {
content := map[string]map[string]map[string]string{
"pending": make(map[string]map[string]string),
"queued": make(map[string]map[string]string),
}
pending, queue := s.b.TxPoolContent()
// Define a formatter to flatten a transaction into a string
var format = func(tx *types.Transaction) string {
if to := tx.To(); to != nil {
return fmt.Sprintf("%s: %v wei + %v gas × %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice())
}
return fmt.Sprintf("contract creation: %v wei + %v gas × %v wei", tx.Value(), tx.Gas(), tx.GasPrice())
}
// Flatten the pending transactions
for account, txs := range pending {
dump := make(map[string]string)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx)
}
content["pending"][account.Hex()] = dump
}
// Flatten the queued transactions
for account, txs := range queue {
dump := make(map[string]string)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx)
}
content["queued"][account.Hex()] = dump
}
return content
}
// PublicAccountAPI provides an API to access accounts managed by this node.
// It offers only methods that can retrieve accounts.
type PublicAccountAPI struct {
am *accounts.Manager
}
// NewPublicAccountAPI creates a new PublicAccountAPI.
func NewPublicAccountAPI(am *accounts.Manager) *PublicAccountAPI {
return &PublicAccountAPI{am: am}
}
// Accounts returns the collection of accounts this node manages
func (s *PublicAccountAPI) Accounts() []common.Address {
return s.am.Accounts()
}
// PrivateAccountAPI provides an API to access accounts managed by this node.
// It offers methods to create, (un)lock en list accounts. Some methods accept
// passwords and are therefore considered private by default.
type PrivateAccountAPI struct {
am *accounts.Manager
nonceLock *AddrLocker
b Backend
}
// NewPrivateAccountAPI create a new PrivateAccountAPI.
func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI {
return &PrivateAccountAPI{
am: b.AccountManager(),
nonceLock: nonceLock,
b: b,
}
}
// listAccounts will return a list of addresses for accounts this node manages.
func (s *PrivateAccountAPI) ListAccounts() []common.Address {
return s.am.Accounts()
}
// rawWallet is a JSON representation of an accounts.Wallet interface, with its
// data contents extracted into plain fields.
type rawWallet struct {
URL string `json:"url"`
Status string `json:"status"`
Failure string `json:"failure,omitempty"`
Accounts []accounts.Account `json:"accounts,omitempty"`
}
// ListWallets will return a list of wallets this node manages.
func (s *PrivateAccountAPI) ListWallets() []rawWallet {
wallets := make([]rawWallet, 0) // return [] instead of nil if empty
for _, wallet := range s.am.Wallets() {
status, failure := wallet.Status()
raw := rawWallet{
URL: wallet.URL().String(),
Status: status,
Accounts: wallet.Accounts(),
}
if failure != nil {
raw.Failure = failure.Error()
}
wallets = append(wallets, raw)
}
return wallets
}
// OpenWallet initiates a hardware wallet opening procedure, establishing a USB
// connection and attempting to authenticate via the provided passphrase. Note,
// the method may return an extra challenge requiring a second open (e.g. the
// Trezor PIN matrix challenge).
func (s *PrivateAccountAPI) OpenWallet(url string, passphrase *string) error {
wallet, err := s.am.Wallet(url)
if err != nil {
return err
}
pass := ""
if passphrase != nil {
pass = *passphrase
}
return wallet.Open(pass)
}
// DeriveAccount requests a HD wallet to derive a new account, optionally pinning
// it for later reuse.
func (s *PrivateAccountAPI) DeriveAccount(url string, path string, pin *bool) (accounts.Account, error) {
wallet, err := s.am.Wallet(url)
if err != nil {
return accounts.Account{}, err
}
derivPath, err := accounts.ParseDerivationPath(path)
if err != nil {
return accounts.Account{}, err
}
if pin == nil {
pin = new(bool)
}
return wallet.Derive(derivPath, *pin)
}
// NewAccount will create a new account and returns the address for the new account.
func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error) {
acc, err := fetchKeystore(s.am).NewAccount(password)
if err == nil {
log.Info("Your new key was generated", "address", acc.Address)
log.Warn("Please backup your key file!", "path", acc.URL.Path)
log.Warn("Please remember your password!")
return acc.Address, nil
}
return common.Address{}, err
}
// fetchKeystore retrives the encrypted keystore from the account manager.
func fetchKeystore(am *accounts.Manager) *keystore.KeyStore {
return am.Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
}
// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
// encrypting it with the passphrase.
func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
key, err := crypto.HexToECDSA(privkey)
if err != nil {
return common.Address{}, err
}
acc, err := fetchKeystore(s.am).ImportECDSA(key, password)
return acc.Address, err
}
// UnlockAccount will unlock the account associated with the given address with
// the given password for duration seconds. If duration is nil it will use a
// default of 300 seconds. It returns an indication if the account was unlocked.
func (s *PrivateAccountAPI) UnlockAccount(ctx context.Context, addr common.Address, password string, duration *uint64) (bool, error) {
// When the API is exposed by external RPC(http, ws etc), unless the user
// explicitly specifies to allow the insecure account unlocking, otherwise
// it is disabled.
if s.b.ExtRPCEnabled() && !s.b.AccountManager().Config().InsecureUnlockAllowed {
return false, errors.New("account unlock with HTTP access is forbidden")
}
const max = uint64(time.Duration(math.MaxInt64) / time.Second)
var d time.Duration
if duration == nil {
d = 300 * time.Second
} else if *duration > max {
return false, errors.New("unlock duration too large")
} else {
d = time.Duration(*duration) * time.Second
}
err := fetchKeystore(s.am).TimedUnlock(accounts.Account{Address: addr}, password, d)
if err != nil {
log.Warn("Failed account unlock attempt", "address", addr, "err", err)
}
return err == nil, err
}
// LockAccount will lock the account associated with the given address when it's unlocked.
func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool {
return fetchKeystore(s.am).Lock(addr) == nil
}
// signTransaction sets defaults and signs the given transaction
// NOTE: the caller needs to ensure that the nonceLock is held, if applicable,
// and release it after the transaction has been submitted to the tx pool
func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *SendTxArgs, passwd string) (*types.Transaction, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: args.From}
wallet, err := s.am.Find(account)
if err != nil {
return nil, err
}
// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil {
return nil, err
}
// Assemble the transaction and sign with the wallet
tx := args.toTransaction()
return wallet.SignTxWithPassphrase(account, passwd, tx, s.b.ChainConfig().ChainID)
}
// SendTransaction will create a transaction from the given arguments and
// tries to sign it with the key associated with args.To. If the given passwd isn't
// able to decrypt the key it fails.
func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) {
if args.Nonce == nil {
// Hold the addresse's mutex around signing to prevent concurrent assignment of
// the same nonce to multiple accounts.
s.nonceLock.LockAddr(args.From)
defer s.nonceLock.UnlockAddr(args.From)
}
signed, err := s.signTransaction(ctx, &args, passwd)
if err != nil {
log.Warn("Failed transaction send attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err)
return common.Hash{}, err
}
return SubmitTransaction(ctx, s.b, signed)
}
// SignTransaction will create a transaction from the given arguments and
// tries to sign it with the key associated with args.To. If the given passwd isn't
// able to decrypt the key it fails. The transaction is returned in RLP-form, not broadcast
// to other nodes
func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs, passwd string) (*SignTransactionResult, error) {
// No need to obtain the noncelock mutex, since we won't be sending this
// tx into the transaction pool, but right back to the user
if args.Gas == nil {
return nil, fmt.Errorf("gas not specified")
}
if args.GasPrice == nil {
return nil, fmt.Errorf("gasPrice not specified")
}
if args.Nonce == nil {
return nil, fmt.Errorf("nonce not specified")
}
signed, err := s.signTransaction(ctx, &args, passwd)
if err != nil {
log.Warn("Failed transaction sign attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err)
return nil, err
}
data, err := rlp.EncodeToBytes(signed)
if err != nil {
return nil, err
}
return &SignTransactionResult{data, signed}, nil
}
// Sign calculates an Ethereum ECDSA signature for:
// keccack256("\x19Ethereum Signed Message:\n" + len(message) + message))
//
// Note, the produced signature conforms to the secp256k1 curve R, S and V values,
// where the V value will be 27 or 28 for legacy reasons.
//
// The key used to calculate the signature is decrypted with the given password.
//
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign
func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: addr}
wallet, err := s.b.AccountManager().Find(account)
if err != nil {
return nil, err
}
// Assemble sign the data with the wallet
signature, err := wallet.SignTextWithPassphrase(account, passwd, data)
if err != nil {
log.Warn("Failed data sign attempt", "address", addr, "err", err)
return nil, err
}
signature[crypto.RecoveryIDOffset] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper
return signature, nil
}
// EcRecover returns the address for the account that was used to create the signature.
// Note, this function is compatible with eth_sign and personal_sign. As such it recovers
// the address of:
// hash = keccak256("\x19Ethereum Signed Message:\n"${message length}${message})
// addr = ecrecover(hash, signature)
//
// Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be 27 or 28 for legacy reasons.
//
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover
func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) {
if len(sig) != crypto.SignatureLength {
return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength)
}
if sig[crypto.RecoveryIDOffset] != 27 && sig[crypto.RecoveryIDOffset] != 28 {
return common.Address{}, fmt.Errorf("invalid Ethereum signature (V is not 27 or 28)")
}
sig[crypto.RecoveryIDOffset] -= 27 // Transform yellow paper V from 27/28 to 0/1
rpk, err := crypto.SigToPub(accounts.TextHash(data), sig)
if err != nil {
return common.Address{}, err
}
return crypto.PubkeyToAddress(*rpk), nil
}
// SignAndSendTransaction was renamed to SendTransaction. This method is deprecated
// and will be removed in the future. It primary goal is to give clients time to update.
func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) {
return s.SendTransaction(ctx, args, passwd)
}
// InitializeWallet initializes a new wallet at the provided URL, by generating and returning a new private key.
func (s *PrivateAccountAPI) InitializeWallet(ctx context.Context, url string) (string, error) {
wallet, err := s.am.Wallet(url)
if err != nil {
return "", err
}
entropy, err := bip39.NewEntropy(256)
if err != nil {
return "", err
}
mnemonic, err := bip39.NewMnemonic(entropy)
if err != nil {
return "", err
}
seed := bip39.NewSeed(mnemonic, "")
switch wallet := wallet.(type) {
case *scwallet.Wallet:
return mnemonic, wallet.Initialize(seed)
default:
return "", fmt.Errorf("specified wallet does not support initialization")
}
}
// Unpair deletes a pairing between wallet and geth.
func (s *PrivateAccountAPI) Unpair(ctx context.Context, url string, pin string) error {
wallet, err := s.am.Wallet(url)
if err != nil {
return err
}
switch wallet := wallet.(type) {
case *scwallet.Wallet:
return wallet.Unpair([]byte(pin))
default:
return fmt.Errorf("specified wallet does not support pairing")
}
}
// PublicBlockChainAPI provides an API to access the Ethereum blockchain.
// It offers only methods that operate on public data that is freely available to anyone.
type PublicBlockChainAPI struct {
b Backend
}
// NewPublicBlockChainAPI creates a new Ethereum blockchain API.
func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI {
return &PublicBlockChainAPI{b}
}
// ChainId returns the chainID value for transaction replay protection.
func (s *PublicBlockChainAPI) ChainId() *hexutil.Big {
return (*hexutil.Big)(s.b.ChainConfig().ChainID)
}
// BlockNumber returns the block number of the chain head.
func (s *PublicBlockChainAPI) BlockNumber() hexutil.Uint64 {
header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available
return hexutil.Uint64(header.Number.Uint64())
}
// GetBalance returns the amount of wei for the given address in the state of the
// given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta
// block numbers are also allowed.
func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) {
state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
return (*hexutil.Big)(state.GetOVMBalance(address)), state.Error()
}
// Result structs for GetProof
type AccountResult struct {
Address common.Address `json:"address"`
AccountProof []string `json:"accountProof"`
Balance *hexutil.Big `json:"balance"`
CodeHash common.Hash `json:"codeHash"`
Nonce hexutil.Uint64 `json:"nonce"`
StorageHash common.Hash `json:"storageHash"`
StorageProof []StorageResult `json:"storageProof"`
}
type StorageResult struct {
Key string `json:"key"`
Value *hexutil.Big `json:"value"`
Proof []string `json:"proof"`
}
// Result structs for GetStateDiffProof
type StateDiffProof struct {
Header *HeaderMeta `json:"header"`
Accounts []AccountResult `json:"accounts"`
}
type HeaderMeta struct {
Number *big.Int `json:"number"`
Hash common.Hash `json:"hash"`
StateRoot common.Hash `json:"stateRoot"`
Timestamp uint64 `json:"timestamp"`
}
func (s *PublicBlockChainAPI) GetStateDiff(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (diffdb.Diff, error) {
_, header, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if err != nil {
return nil, err
}
return s.b.GetDiff(new(big.Int).Add(header.Number, big.NewInt(1)))
}
// GetStateDiffProof returns the Merkle-proofs corresponding to all the accounts and
// storage slots which were touched for a given block number or hash.
func (s *PublicBlockChainAPI) GetStateDiffProof(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*StateDiffProof, error) {
state, header, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || header == nil || err != nil {
return nil, err
}
// get the changed accounts for this block
diffs, err := s.GetStateDiff(ctx, blockNrOrHash)
if err != nil {
return nil, err
}
// for each changed account, get their proof
var accounts []AccountResult
for address, keys := range diffs {
// need to convert the hashes to strings, we could maybe refactor getProof
// alternatively
keyStrings := make([]string, len(keys))
for i, key := range keys {
keyStrings[i] = key.Key.String()
}
// get the proofs
res, err := s.GetProof(ctx, address, keyStrings, blockNrOrHash)
if err != nil {
return nil, err
}
accounts = append(accounts, *res)
}
// add some metadata
stateDiffProof := &StateDiffProof{
Header: &HeaderMeta{
Number: header.Number,
Hash: header.Hash(),
StateRoot: header.Root,
Timestamp: header.Time,
},
Accounts: accounts,
}
return stateDiffProof, state.Error()
}
// GetProof returns the Merkle-proof for a given account and optionally some storage keys.
func (s *PublicBlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) {
state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
storageTrie := state.StorageTrie(address)
storageHash := types.EmptyRootHash
codeHash := state.GetCodeHash(address)
storageProof := make([]StorageResult, len(storageKeys))
// if we have a storageTrie, (which means the account exists), we can update the storagehash
if storageTrie != nil {
storageHash = storageTrie.Hash()
} else {
// no storageTrie means the account does not exist, so the codeHash is the hash of an empty bytearray.
codeHash = crypto.Keccak256Hash(nil)
}
// create the proof for the storageKeys
for i, key := range storageKeys {
if storageTrie != nil {
proof, storageError := state.GetStorageProof(address, common.HexToHash(key))
if storageError != nil {
return nil, storageError
}
storageProof[i] = StorageResult{key, (*hexutil.Big)(state.GetState(address, common.HexToHash(key)).Big()), common.ToHexArray(proof)}
} else {
storageProof[i] = StorageResult{key, &hexutil.Big{}, []string{}}
}
}
// create the accountProof
accountProof, proofErr := state.GetProof(address)
if proofErr != nil {
return nil, proofErr
}
return &AccountResult{
Address: address,
AccountProof: common.ToHexArray(accountProof),
Balance: (*hexutil.Big)(state.GetBalance(address)),
CodeHash: codeHash,
Nonce: hexutil.Uint64(state.GetNonce(address)),
StorageHash: storageHash,
StorageProof: storageProof,
}, state.Error()
}
// GetHeaderByNumber returns the requested canonical block header.
// * When blockNr is -1 the chain head is returned.
// * When blockNr is -2 the pending chain head is returned.
func (s *PublicBlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) {
header, err := s.b.HeaderByNumber(ctx, number)
if header != nil && err == nil {
response := s.rpcMarshalHeader(header)
if number == rpc.PendingBlockNumber {
// Pending header need to nil out a few fields
for _, field := range []string{"hash", "nonce", "miner"} {
response[field] = nil
}
}
return response, err
}
return nil, err
}
// GetHeaderByHash returns the requested header by hash.
func (s *PublicBlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) map[string]interface{} {
header, _ := s.b.HeaderByHash(ctx, hash)
if header != nil {
return s.rpcMarshalHeader(header)
}
return nil
}
// GetBlockByNumber returns the requested canonical block.
// * When blockNr is -1 the chain head is returned.
// * When blockNr is -2 the pending chain head is returned.
// * When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
block, err := s.b.BlockByNumber(ctx, number)
if block != nil && err == nil {
response, err := s.rpcMarshalBlock(block, true, fullTx)
if err == nil && number == rpc.PendingBlockNumber {
// Pending blocks need to nil out a few fields
for _, field := range []string{"hash", "nonce", "miner", "number"} {
response[field] = nil
}
}
return response, err
}
return nil, err
}
// GetBlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
// detail, otherwise only the transaction hash is returned.
func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) {
block, err := s.b.BlockByHash(ctx, hash)
if block != nil {
return s.rpcMarshalBlock(block, true, fullTx)
}
return nil, err
}
// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index. When fullTx is true
// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) {
block, err := s.b.BlockByNumber(ctx, blockNr)
if block != nil {
uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) {
log.Debug("Requested uncle not found", "number", blockNr, "hash", block.Hash(), "index", index)
return nil, nil
}
block = types.NewBlockWithHeader(uncles[index])
return s.rpcMarshalBlock(block, false, false)
}
return nil, err
}
// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index. When fullTx is true
// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (map[string]interface{}, error) {
block, err := s.b.BlockByHash(ctx, blockHash)
if block != nil {
uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) {
log.Debug("Requested uncle not found", "number", block.Number(), "hash", blockHash, "index", index)
return nil, nil
}
block = types.NewBlockWithHeader(uncles[index])
return s.rpcMarshalBlock(block, false, false)
}
return nil, err
}
// GetUncleCountByBlockNumber returns number of uncles in the block for the given block number
func (s *PublicBlockChainAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr rpc.BlockNumber) *hexutil.Uint {
if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil {
n := hexutil.Uint(len(block.Uncles()))
return &n
}
return nil
}
// GetUncleCountByBlockHash returns number of uncles in the block for the given block hash
func (s *PublicBlockChainAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash common.Hash) *hexutil.Uint {
if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil {
n := hexutil.Uint(len(block.Uncles()))
return &n
}
return nil
}
// GetCode returns the code stored at the given address in the state for the given block number.
func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
code := state.GetCode(address)
return code, state.Error()
}
// GetStorageAt returns the storage from the state at the given address, key and
// block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block
// numbers are also allowed.
func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
res := state.GetState(address, common.HexToHash(key))
return res[:], state.Error()
}
func (s *PublicBlockChainAPI) GetBlockRange(ctx context.Context, startNumber rpc.BlockNumber, endNumber rpc.BlockNumber, fullTx bool) ([]map[string]interface{}, error) {
// Basic assertions about start and end block numbers.
if endNumber < startNumber {
return nil, fmt.Errorf("Start of block range (%d) is greater than end of block range (%d)", startNumber, endNumber)
}
// Assert that the number of blocks is < 1k (? configurable?).
if endNumber-startNumber > 1000 {
return nil, fmt.Errorf("Requested block range is too large (max is 1000, requested %d blocks)", endNumber-startNumber)
}
// Make sure the end exists. If start doesn't exist, will be caught immediately below.
if _, err := s.GetBlockByNumber(ctx, endNumber, fullTx); err != nil {
return nil, fmt.Errorf("End of requested block range (%d) does not exist: %w", endNumber, err)
}
// Create an empty output array.
blocks := make([]map[string]interface{}, 0)
// For each block in range, get block and append to array.
for number := startNumber; number <= endNumber; number++ {
block, err := s.GetBlockByNumber(ctx, number, fullTx)
if block == nil || err != nil {
return nil, err
}
blocks = append(blocks, block)
}
return blocks, nil
}
// CallArgs represents the arguments for a call.
type CallArgs struct {
From *common.Address `json:"from"`
To *common.Address `json:"to"`
Gas *hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Value *hexutil.Big `json:"value"`
Data *hexutil.Bytes `json:"data"`
}
// account indicates the overriding fields of account during the execution of
// a message call.
// Note, state and stateDiff can't be specified at the same time. If state is
// set, message execution will only use the data in the given state. Otherwise
// if statDiff is set, all diff will be applied first and then execute the call
// message.
type account struct {
Nonce *hexutil.Uint64 `json:"nonce"`
Code *hexutil.Bytes `json:"code"`
Balance **hexutil.Big `json:"balance"`
State *map[common.Hash]common.Hash `json:"state"`
StateDiff *map[common.Hash]common.Hash `json:"stateDiff"`
}
func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides map[common.Address]account, vmCfg vm.Config, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, 0, false, err
}
// Set sender address or use a default if none specified
var addr common.Address
if args.From == nil {
if wallets := b.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
} else {
addr = *args.From
}
// Override the fields of specified contracts before execution.
for addr, account := range overrides {
// Override account nonce.
if account.Nonce != nil {
state.SetNonce(addr, uint64(*account.Nonce))
}
// Override account(contract) code.
if account.Code != nil {
state.SetCode(addr, *account.Code)
}
// Override account balance.
if account.Balance != nil {
state.SetBalance(addr, (*big.Int)(*account.Balance))
}
if account.State != nil && account.StateDiff != nil {
return nil, 0, false, fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
}
// Replace entire state if caller requires.
if account.State != nil {
state.SetStorage(addr, *account.State)
}
// Apply state diff into specified accounts.
if account.StateDiff != nil {
for key, value := range *account.StateDiff {
state.SetState(addr, key, value)
}
}
}
// Set default gas & gas price if none were set
gas := b.GasLimit()
if args.Gas != nil {
gas = uint64(*args.Gas)
}
if globalGasCap != nil && globalGasCap.Uint64() < gas {
log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
gas = globalGasCap.Uint64()
}
gasPrice := new(big.Int).SetUint64(defaultGasPrice)
if args.GasPrice != nil {
gasPrice = args.GasPrice.ToInt()
}
value := new(big.Int)
if args.Value != nil {
value = args.Value.ToInt()
}
var data []byte
if args.Data != nil {
data = []byte(*args.Data)
}
blockNumber := header.Number
timestamp := new(big.Int).SetUint64(header.Time)
// Create new call message
var msg core.Message
msg = types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, false, &addr, nil, types.QueueOriginSequencer, 0)
if vm.UsingOVM {
cfg := b.ChainConfig()
executionManager := cfg.StateDump.Accounts["OVM_ExecutionManager"]
stateManager := cfg.StateDump.Accounts["OVM_StateManager"]
block, err := b.BlockByNumber(ctx, rpc.BlockNumber(header.Number.Uint64()))
if err != nil {
return nil, 0, false, err
}
txs := block.Transactions()
if header.Number.Uint64() != 0 {
if len(txs) != 1 {
return nil, 0, false, fmt.Errorf("block %d has more than 1 transaction", header.Number.Uint64())
}
tx := txs[0]
blockNumber = tx.L1BlockNumber()
timestamp = new(big.Int).SetUint64(tx.L1Timestamp())
}
msg, err = core.EncodeSimulatedMessage(msg, timestamp, blockNumber, executionManager, stateManager)
if err != nil {
return nil, 0, false, err
}
}
// Setup context so it may be cancelled the call has completed
// or, in case of unmetered gas, setup a context with a timeout.
var cancel context.CancelFunc
if timeout > 0 {
ctx, cancel = context.WithTimeout(ctx, timeout)
} else {
ctx, cancel = context.WithCancel(ctx)
}
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
// Get a new instance of the EVM.
evm, vmError, err := b.GetEVM(ctx, msg, state, header)
if err != nil {
return nil, 0, false, err
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
<-ctx.Done()
evm.Cancel()
}()
// Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
if vm.UsingOVM {
evm.Context.EthCallSender = &addr
evm.Context.BlockNumber = blockNumber
evm.Context.Time = timestamp
}
res, gas, failed, err := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil {
return nil, 0, false, err
}
// If the timer caused an abort, return an appropriate error message
if evm.Cancelled() {
return nil, 0, false, fmt.Errorf("execution aborted (timeout = %v)", timeout)
}
return res, gas, failed, err
}
// Call executes the given transaction on the state for the given block number.
//
// Additionally, the caller can specify a batch of contract for fields overriding.
//
// Note, this function doesn't make and changes in the state/blockchain and is
// useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *map[common.Address]account) (hexutil.Bytes, error) {
var accounts map[common.Address]account
if overrides != nil {
accounts = *overrides
}
result, _, _, err := DoCall(ctx, s.b, args, blockNrOrHash, accounts, vm.Config{}, 5*time.Second, s.b.RPCGasCap())
return (hexutil.Bytes)(result), err
}
// Optimism note: The gasPrice in Optimism is modified to always return 1 gwei. We
// use the gasLimit field to communicate the entire user fee. This is done for
// for compatibility reasons with the existing Ethereum toolchain, so that the user
// fees can compensate for the additional costs the sequencer pays for publishing the
// transaction calldata
func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap *big.Int) (hexutil.Uint64, error) {
if args.Data == nil {
return 0, errors.New("transaction data cannot be nil")
}
// 1. get the gas that would be used by the transaction
gasUsed, err := legacyDoEstimateGas(ctx, b, args, blockNrOrHash, gasCap)
if err != nil {
return 0, err
}
// 2a. fetch the data price, depends on how the sequencer has chosen to update their values based on the
// l1 gas prices
dataPrice, err := b.SuggestDataPrice(ctx)
if err != nil {
return 0, err
}
// 2b. fetch the execution gas price, by the typical mempool dynamics
executionPrice, err := b.SuggestExecutionPrice(ctx)
if err != nil {
return 0, err
}
// 3. calculate the fee and normalize by the default gas price
fee := core.CalculateRollupFee(*args.Data, uint64(gasUsed), dataPrice, executionPrice).Uint64() / defaultGasPrice
return (hexutil.Uint64)(fee), nil
}
func legacyDoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap *big.Int) (hexutil.Uint64, error) {
// Binary search the gas requirement, as it may be higher than the amount used
var (
lo uint64 = params.TxGas - 1
hi uint64
cap uint64
)
if args.Gas != nil && uint64(*args.Gas) >= params.TxGas {
hi = uint64(*args.Gas)
} else {
// Retrieve the block to act as the gas ceiling
block, err := b.BlockByNumberOrHash(ctx, blockNrOrHash)
if err != nil {
return 0, err
}
hi = block.GasLimit()
}
if gasCap != nil && hi > gasCap.Uint64() {
log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
hi = gasCap.Uint64()
}
cap = hi
// Set sender address or use a default if none specified
if args.From == nil {
if wallets := b.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
args.From = &accounts[0].Address
}
}
}
// Use zero-address if none other is available
if args.From == nil {
args.From = &common.Address{}
}
// Create a helper to check if a gas allowance results in an executable transaction
executable := func(gas uint64) bool {
args.Gas = (*hexutil.Uint64)(&gas)
_, _, failed, err := DoCall(ctx, b, args, blockNrOrHash, nil, vm.Config{}, 0, gasCap)
if err != nil || failed {
return false
}
return true
}
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
if !executable(mid) {
lo = mid
} else {
hi = mid
}
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
if !executable(hi) {
return 0, fmt.Errorf("gas required exceeds allowance (%d) or always failing transaction", cap)
}
}
return hexutil.Uint64(hi), nil
}
// EstimateGas returns an estimate of the amount of gas needed to execute the
// given transaction against the current pending block.
func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (hexutil.Uint64, error) {
blockNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
return DoEstimateGas(ctx, s.b, args, blockNrOrHash, s.b.RPCGasCap())
}
// ExecutionResult groups all structured logs emitted by the EVM
// while replaying a transaction in debug mode as well as transaction
// execution status, the amount of gas used and the return value
type ExecutionResult struct {
Gas uint64 `json:"gas"`
Failed bool `json:"failed"`
ReturnValue string `json:"returnValue"`
StructLogs []StructLogRes `json:"structLogs"`
}
// StructLogRes stores a structured log emitted by the EVM while replaying a
// transaction in debug mode
type StructLogRes struct {
Pc uint64 `json:"pc"`
Op string `json:"op"`
Gas uint64 `json:"gas"`
GasCost uint64 `json:"gasCost"`
Depth int `json:"depth"`
Error error `json:"error,omitempty"`
Stack *[]string `json:"stack,omitempty"`
Memory *[]string `json:"memory,omitempty"`
Storage *map[string]string `json:"storage,omitempty"`
}
// FormatLogs formats EVM returned structured logs for json output
func FormatLogs(logs []vm.StructLog) []StructLogRes {
formatted := make([]StructLogRes, len(logs))
for index, trace := range logs {
formatted[index] = StructLogRes{
Pc: trace.Pc,
Op: trace.Op.String(),
Gas: trace.Gas,
GasCost: trace.GasCost,
Depth: trace.Depth,
Error: trace.Err,
}
if trace.Stack != nil {
stack := make([]string, len(trace.Stack))
for i, stackValue := range trace.Stack {
stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32))
}
formatted[index].Stack = &stack
}
if trace.Memory != nil {
memory := make([]string, 0, (len(trace.Memory)+31)/32)
for i := 0; i+32 <= len(trace.Memory); i += 32 {
memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32]))
}
formatted[index].Memory = &memory
}
if trace.Storage != nil {
storage := make(map[string]string)
for i, storageValue := range trace.Storage {
storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue)
}
formatted[index].Storage = &storage
}
}
return formatted
}
// RPCMarshalHeader converts the given header to the RPC output .
func RPCMarshalHeader(head *types.Header) map[string]interface{} {
return map[string]interface{}{
"number": (*hexutil.Big)(head.Number),
"hash": head.Hash(),
"parentHash": head.ParentHash,
"nonce": head.Nonce,
"mixHash": head.MixDigest,
"sha3Uncles": head.UncleHash,
"logsBloom": head.Bloom,
"stateRoot": head.Root,
"miner": head.Coinbase,
"difficulty": (*hexutil.Big)(head.Difficulty),
"extraData": hexutil.Bytes(head.Extra),
"size": hexutil.Uint64(head.Size()),
"gasLimit": hexutil.Uint64(head.GasLimit),
"gasUsed": hexutil.Uint64(head.GasUsed),
"timestamp": hexutil.Uint64(head.Time),
"transactionsRoot": head.TxHash,
"receiptsRoot": head.ReceiptHash,
}
}
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
// transaction hashes.
func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
fields := RPCMarshalHeader(block.Header())
fields["size"] = hexutil.Uint64(block.Size())
if inclTx {
formatTx := func(tx *types.Transaction) (interface{}, error) {
return tx.Hash(), nil
}
if fullTx {
formatTx = func(tx *types.Transaction) (interface{}, error) {
return newRPCTransactionFromBlockHash(block, tx.Hash()), nil
}
}
txs := block.Transactions()
transactions := make([]interface{}, len(txs))
var err error
for i, tx := range txs {
if transactions[i], err = formatTx(tx); err != nil {
return nil, err
}
}
fields["transactions"] = transactions
}
uncles := block.Uncles()
uncleHashes := make([]common.Hash, len(uncles))
for i, uncle := range uncles {
uncleHashes[i] = uncle.Hash()
}
fields["uncles"] = uncleHashes
return fields, nil
}
// rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires
// a `PublicBlockchainAPI`.
func (s *PublicBlockChainAPI) rpcMarshalHeader(header *types.Header) map[string]interface{} {
fields := RPCMarshalHeader(header)
fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(header.Hash()))
return fields
}
// rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires
// a `PublicBlockchainAPI`.
func (s *PublicBlockChainAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
fields, err := RPCMarshalBlock(b, inclTx, fullTx)
if err != nil {
return nil, err
}
if inclTx {
fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(b.Hash()))
}
return fields, err
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"`
From common.Address `json:"from"`
Gas hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Hash common.Hash `json:"hash"`
Input hexutil.Bytes `json:"input"`
Nonce hexutil.Uint64 `json:"nonce"`
To *common.Address `json:"to"`
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
Value *hexutil.Big `json:"value"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
QueueOrigin string `json:"queueOrigin"`
TxType string `json:"txType"`
L1TxOrigin *common.Address `json:"l1TxOrigin"`
L1BlockNumber *hexutil.Big `json:"l1BlockNumber"`
L1Timestamp hexutil.Uint64 `json:"l1Timestamp"`
Index *hexutil.Uint64 `json:"index"`
QueueIndex *hexutil.Uint64 `json:"queueIndex"`
RawTransaction hexutil.Bytes `json:"rawTransaction"`
}
// newRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available).
func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
var signer types.Signer = types.NewOVMSigner(tx.ChainId())
from, _ := types.Sender(signer, tx)
v, r, s := tx.RawSignatureValues()
result := &RPCTransaction{
From: from,
Gas: hexutil.Uint64(tx.Gas()),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
Hash: tx.Hash(),
Input: hexutil.Bytes(tx.Data()),
Nonce: hexutil.Uint64(tx.Nonce()),
To: tx.To(),
Value: (*hexutil.Big)(tx.Value()),
V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s),
}
if blockHash != (common.Hash{}) {
result.BlockHash = &blockHash
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
result.TransactionIndex = (*hexutil.Uint64)(&index)
}
if meta := tx.GetMeta(); meta != nil {
result.RawTransaction = meta.RawTransaction
result.L1TxOrigin = meta.L1MessageSender
result.L1Timestamp = (hexutil.Uint64)(meta.L1Timestamp)
if meta.L1BlockNumber != nil {
result.L1BlockNumber = (*hexutil.Big)(meta.L1BlockNumber)
}
if meta.QueueOrigin != nil {
switch meta.QueueOrigin.Uint64() {
case uint64(types.QueueOriginSequencer):
result.QueueOrigin = "sequencer"
case uint64(types.QueueOriginL1ToL2):
result.QueueOrigin = "l1"
}
}
if meta.Index != nil {
index := (hexutil.Uint64)(*meta.Index)
result.Index = &index
}
if meta.QueueIndex != nil {
queueIndex := (hexutil.Uint64)(*meta.QueueIndex)
result.QueueIndex = &queueIndex
}
switch meta.SignatureHashType {
case types.SighashEthSign:
result.TxType = "EthSign"
case types.SighashEIP155:
result.TxType = "EIP155"
case types.CreateEOA:
result.TxType = "CreateEOA"
}
}
return result
}
// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation
func newRPCPendingTransaction(tx *types.Transaction) *RPCTransaction {
return newRPCTransaction(tx, common.Hash{}, 0, 0)
}
// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation.
func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransaction {
txs := b.Transactions()
if index >= uint64(len(txs)) {
return nil
}
return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index)
}
// newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index.
func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.Bytes {
txs := b.Transactions()
if index >= uint64(len(txs)) {
return nil
}
blob, _ := rlp.EncodeToBytes(txs[index])
return blob
}
// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation.
func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction {
for idx, tx := range b.Transactions() {
if tx.Hash() == hash {
return newRPCTransactionFromBlockIndex(b, uint64(idx))
}
}
return nil
}
// PublicTransactionPoolAPI exposes methods for the RPC interface
type PublicTransactionPoolAPI struct {
b Backend
nonceLock *AddrLocker
}
// NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool.
func NewPublicTransactionPoolAPI(b Backend, nonceLock *AddrLocker) *PublicTransactionPoolAPI {
return &PublicTransactionPoolAPI{b, nonceLock}
}
// GetBlockTransactionCountByNumber returns the number of transactions in the block with the given block number.
func (s *PublicTransactionPoolAPI) GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) *hexutil.Uint {
if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil {
n := hexutil.Uint(len(block.Transactions()))
return &n
}
return nil
}
// GetBlockTransactionCountByHash returns the number of transactions in the block with the given hash.
func (s *PublicTransactionPoolAPI) GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) *hexutil.Uint {
if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil {
n := hexutil.Uint(len(block.Transactions()))
return &n
}
return nil
}
// GetTransactionByBlockNumberAndIndex returns the transaction for the given block number and index.
func (s *PublicTransactionPoolAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) *RPCTransaction {
if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil {
return newRPCTransactionFromBlockIndex(block, uint64(index))
}
return nil
}
// GetTransactionByBlockHashAndIndex returns the transaction for the given block hash and index.
func (s *PublicTransactionPoolAPI) GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) *RPCTransaction {
if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil {
return newRPCTransactionFromBlockIndex(block, uint64(index))
}
return nil
}
// GetRawTransactionByBlockNumberAndIndex returns the bytes of the transaction for the given block number and index.
func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) hexutil.Bytes {
if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil {
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
}
return nil
}
// GetRawTransactionByBlockHashAndIndex returns the bytes of the transaction for the given block hash and index.
func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) hexutil.Bytes {
if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil {
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
}
return nil
}
// GetTransactionCount returns the number of transactions the given address has sent for the given block number
func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Uint64, error) {
// Ask transaction pool for the nonce which includes pending transactions
if blockNr, ok := blockNrOrHash.Number(); ok && blockNr == rpc.PendingBlockNumber {
nonce, err := s.b.GetPoolNonce(ctx, address)
if err != nil {
return nil, err
}
return (*hexutil.Uint64)(&nonce), nil
}
// Resolve block number and use its state to ask for the nonce
state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
nonce := state.GetNonce(address)
return (*hexutil.Uint64)(&nonce), state.Error()
}
// GetTransactionByHash returns the transaction for the given hash
func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
// Try to return an already finalized transaction
tx, blockHash, blockNumber, index, err := s.b.GetTransaction(ctx, hash)
if err != nil {
return nil, err
}
if tx != nil {
return newRPCTransaction(tx, blockHash, blockNumber, index), nil
}
// No finalized transaction, try to retrieve it from the pool
if tx := s.b.GetPoolTransaction(hash); tx != nil {
return newRPCPendingTransaction(tx), nil
}
// Transaction unknown, return as such
return nil, nil
}
// GetRawTransactionByHash returns the bytes of the transaction for the given hash.
func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
// Retrieve a finalized transaction, or a pooled otherwise
tx, _, _, _, err := s.b.GetTransaction(ctx, hash)
if err != nil {
return nil, err
}
if tx == nil {
if tx = s.b.GetPoolTransaction(hash); tx == nil {
// Transaction not found anywhere, abort
return nil, nil
}
}
// Serialize to RLP and return
return rlp.EncodeToBytes(tx)
}
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) {
tx, blockHash, blockNumber, index := rawdb.ReadTransaction(s.b.ChainDb(), hash)
if tx == nil {
return nil, nil
}
receipts, err := s.b.GetReceipts(ctx, blockHash)
if err != nil {
return nil, err
}
if len(receipts) <= int(index) {
return nil, nil
}
receipt := receipts[index]
var signer types.Signer = types.FrontierSigner{}
if tx.Protected() {
signer = types.NewOVMSigner(tx.ChainId())
}
from, _ := types.Sender(signer, tx)
fields := map[string]interface{}{
"blockHash": blockHash,
"blockNumber": hexutil.Uint64(blockNumber),
"transactionHash": hash,
"transactionIndex": hexutil.Uint64(index),
"from": from,
"to": tx.To(),
"gasUsed": hexutil.Uint64(receipt.GasUsed),
"cumulativeGasUsed": hexutil.Uint64(receipt.CumulativeGasUsed),
"contractAddress": nil,
"logs": receipt.Logs,
"logsBloom": receipt.Bloom,
}
// Assign receipt status or post state.
if len(receipt.PostState) > 0 {
fields["root"] = hexutil.Bytes(receipt.PostState)
} else {
fields["status"] = hexutil.Uint(receipt.Status)
}
if receipt.Logs == nil {
fields["logs"] = [][]*types.Log{}
}
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
if receipt.ContractAddress != (common.Address{}) {
fields["contractAddress"] = receipt.ContractAddress
}
return fields, nil
}
// sign is a helper function that signs a transaction with the private key of the given address.
func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: addr}
wallet, err := s.b.AccountManager().Find(account)
if err != nil {
return nil, err
}
// Request the wallet to sign the transaction
return wallet.SignTx(account, tx, s.b.ChainConfig().ChainID)
}
// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool.
type SendTxArgs struct {
From common.Address `json:"from"`
To *common.Address `json:"to"`
Gas *hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Value *hexutil.Big `json:"value"`
Nonce *hexutil.Uint64 `json:"nonce"`
// We accept "data" and "input" for backwards-compatibility reasons. "input" is the
// newer name and should be preferred by clients.
Data *hexutil.Bytes `json:"data"`
Input *hexutil.Bytes `json:"input"`
L1BlockNumber *big.Int `json:"l1BlockNumber"`
L1MessageSender *common.Address `json:"l1MessageSender"`
SignatureHashType types.SignatureHashType `json:"signatureHashType"`
}
// setDefaults is a helper function that fills in default values for unspecified tx fields.
func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error {
if args.GasPrice == nil {
price, err := b.SuggestPrice(ctx)
if err != nil {
return err
}
args.GasPrice = (*hexutil.Big)(price)
}
if args.Value == nil {
args.Value = new(hexutil.Big)
}
if args.Nonce == nil {
nonce, err := b.GetPoolNonce(ctx, args.From)
if err != nil {
return err
}
args.Nonce = (*hexutil.Uint64)(&nonce)
}
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
}
if args.To == nil {
// Contract creation
var input []byte
if args.Data != nil {
input = *args.Data
} else if args.Input != nil {
input = *args.Input
}
if len(input) == 0 {
return errors.New(`contract creation without any data provided`)
}
}
// Estimate the gas usage if necessary.
if args.Gas == nil {
// For backwards-compatibility reason, we try both input and data
// but input is preferred.
input := args.Input
if input == nil {
input = args.Data
}
callArgs := CallArgs{
From: &args.From, // From shouldn't be nil
To: args.To,
GasPrice: args.GasPrice,
Value: args.Value,
Data: input,
}
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap())
if err != nil {
return err
}
args.Gas = &estimated
log.Trace("Estimate gas usage automatically", "gas", args.Gas)
}
return nil
}
func (args *SendTxArgs) toTransaction() *types.Transaction {
var input []byte
if args.Input != nil {
input = *args.Input
} else if args.Data != nil {
input = *args.Data
}
if args.To == nil {
tx := types.NewContractCreation(uint64(*args.Nonce), (*big.Int)(args.Value), uint64(*args.Gas), (*big.Int)(args.GasPrice), input)
txMeta := types.NewTransactionMeta(args.L1BlockNumber, 0, nil, types.SighashEIP155, types.QueueOriginSequencer, nil, nil, nil)
tx.SetTransactionMeta(txMeta)
return tx
}
tx := types.NewTransaction(uint64(*args.Nonce), *args.To, (*big.Int)(args.Value), uint64(*args.Gas), (*big.Int)(args.GasPrice), input)
txMeta := types.NewTransactionMeta(args.L1BlockNumber, 0, args.L1MessageSender, args.SignatureHashType, types.QueueOriginSequencer, nil, nil, nil)
tx.SetTransactionMeta(txMeta)
return tx
}
// SubmitTransaction is a helper function that submits tx to txPool and logs a message.
func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) {
if !tx.Protected() {
return common.Hash{}, errors.New("Cannot submit unprotected transaction")
}
if err := b.SendTx(ctx, tx); err != nil {
return common.Hash{}, err
}
if tx.To() == nil {
signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number())
from, err := types.Sender(signer, tx)
if err != nil {
return common.Hash{}, err
}
addr := crypto.CreateAddress(from, tx.Nonce())
log.Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", addr.Hex())
} else {
log.Info("Submitted transaction", "fullhash", tx.Hash().Hex(), "recipient", tx.To())
}
return tx.Hash(), nil
}
// SendTransaction creates a transaction for the given argument, sign it and submit it to the
// transaction pool.
func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) {
if s.b.IsVerifier() {
return common.Hash{}, errors.New("Cannot send transaction in verifier mode")
}
// Look up the wallet containing the requested signer
account := accounts.Account{Address: args.From}
wallet, err := s.b.AccountManager().Find(account)
if err != nil {
return common.Hash{}, err
}
if args.Nonce == nil {
// Hold the addresse's mutex around signing to prevent concurrent assignment of
// the same nonce to multiple accounts.
s.nonceLock.LockAddr(args.From)
defer s.nonceLock.UnlockAddr(args.From)
}
// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil {
return common.Hash{}, err
}
// Assemble the transaction and sign with the wallet
tx := args.toTransaction()
signed, err := wallet.SignTx(account, tx, s.b.ChainConfig().ChainID)
if err != nil {
return common.Hash{}, err
}
return SubmitTransaction(ctx, s.b, signed)
}
// FillTransaction fills the defaults (nonce, gas, gasPrice) on a given unsigned transaction,
// and returns it to the caller for further processing (signing + broadcast)
func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args SendTxArgs) (*SignTransactionResult, error) {
// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil {
return nil, err
}
// Assemble the transaction and obtain rlp
tx := args.toTransaction()
data, err := rlp.EncodeToBytes(tx)
if err != nil {
return nil, err
}
return &SignTransactionResult{data, tx}, nil
}
// SendRawTransaction will add the signed transaction to the transaction pool.
// The sender is responsible for signing the transaction and using the correct nonce.
func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) {
if s.b.IsVerifier() {
return common.Hash{}, errors.New("Cannot send raw transaction in verifier mode")
}
if s.b.IsSyncing() {
return common.Hash{}, errors.New("Cannot send raw transaction while syncing")
}
tx := new(types.Transaction)
if err := rlp.DecodeBytes(encodedTx, tx); err != nil {
return common.Hash{}, err
}
if new(big.Int).Mod(tx.GasPrice(), big.NewInt(1000000)).Cmp(big.NewInt(0)) != 0 {
return common.Hash{}, errors.New("Gas price must be a multiple of 1,000,000 wei")
}
// L1Timestamp and L1BlockNumber will be set by the miner
txMeta := types.NewTransactionMeta(nil, 0, nil, types.SighashEIP155, types.QueueOriginSequencer, nil, nil, nil)
tx.SetTransactionMeta(txMeta)
return SubmitTransaction(ctx, s.b, tx)
}
// SendRawEthSignTransaction will add the signed transaction to the mempool.
// The signature hash was computed with `eth_sign`, meaning that the
// `abi.encodedPacked` transaction was prefixed with the string
// "Ethereum Signed Message".
func (s *PublicTransactionPoolAPI) SendRawEthSignTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) {
if s.b.IsVerifier() {
return common.Hash{}, errors.New("Cannot send raw ethsign transaction in verifier mode")
}
if s.b.IsSyncing() {
return common.Hash{}, errors.New("Cannot send raw transaction while syncing")
}
tx := new(types.Transaction)
if err := rlp.DecodeBytes(encodedTx, tx); err != nil {
return common.Hash{}, err
}
if new(big.Int).Mod(tx.GasPrice(), big.NewInt(1000000)).Cmp(big.NewInt(0)) != 0 {
return common.Hash{}, errors.New("Gas price must be a multiple of 1,000,000 wei")
}
// L1Timestamp and L1BlockNumber will be set by the miner
txMeta := types.NewTransactionMeta(nil, 0, nil, types.SighashEthSign, types.QueueOriginSequencer, nil, nil, nil)
tx.SetTransactionMeta(txMeta)
return SubmitTransaction(ctx, s.b, tx)
}
// Sign calculates an ECDSA signature for:
// keccack256("\x19Ethereum Signed Message:\n" + len(message) + message).
//
// Note, the produced signature conforms to the secp256k1 curve R, S and V values,
// where the V value will be 27 or 28 for legacy reasons.
//
// The account associated with addr must be unlocked.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sign
func (s *PublicTransactionPoolAPI) Sign(addr common.Address, data hexutil.Bytes) (hexutil.Bytes, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: addr}
wallet, err := s.b.AccountManager().Find(account)
if err != nil {
return nil, err
}
// Sign the requested hash with the wallet
signature, err := wallet.SignText(account, data)
if err == nil {
signature[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper
}
return signature, err
}
// SignTransactionResult represents a RLP encoded signed transaction.
type SignTransactionResult struct {
Raw hexutil.Bytes `json:"raw"`
Tx *types.Transaction `json:"tx"`
}
// SignTransaction will sign the given transaction with the from account.
// The node needs to have the private key of the account corresponding with
// the given from address and it needs to be unlocked.
func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args SendTxArgs) (*SignTransactionResult, error) {
if args.Gas == nil {
return nil, fmt.Errorf("gas not specified")
}
if args.GasPrice == nil {
return nil, fmt.Errorf("gasPrice not specified")
}
if args.Nonce == nil {
return nil, fmt.Errorf("nonce not specified")
}
if err := args.setDefaults(ctx, s.b); err != nil {
return nil, err
}
tx, err := s.sign(args.From, args.toTransaction())
if err != nil {
return nil, err
}
data, err := rlp.EncodeToBytes(tx)
if err != nil {
return nil, err
}
return &SignTransactionResult{data, tx}, nil
}
// PendingTransactions returns the transactions that are in the transaction pool
// and have a from address that is one of the accounts this node manages.
func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, error) {
pending, err := s.b.GetPoolTransactions()
if err != nil {
return nil, err
}
accounts := make(map[common.Address]struct{})
for _, wallet := range s.b.AccountManager().Wallets() {
for _, account := range wallet.Accounts() {
accounts[account.Address] = struct{}{}
}
}
transactions := make([]*RPCTransaction, 0, len(pending))
for _, tx := range pending {
var signer types.Signer = types.HomesteadSigner{}
if tx.Protected() {
signer = types.NewOVMSigner(tx.ChainId())
}
from, _ := types.Sender(signer, tx)
if _, exists := accounts[from]; exists {
transactions = append(transactions, newRPCPendingTransaction(tx))
}
}
return transactions, nil
}
// Resend accepts an existing transaction and a new gas price and limit. It will remove
// the given transaction from the pool and reinsert it with the new gas price and limit.
func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) {
if sendArgs.Nonce == nil {
return common.Hash{}, fmt.Errorf("missing transaction nonce in transaction spec")
}
if err := sendArgs.setDefaults(ctx, s.b); err != nil {
return common.Hash{}, err
}
matchTx := sendArgs.toTransaction()
pending, err := s.b.GetPoolTransactions()
if err != nil {
return common.Hash{}, err
}
for _, p := range pending {
var signer types.Signer = types.HomesteadSigner{}
if p.Protected() {
signer = types.NewOVMSigner(p.ChainId())
}
wantSigHash := signer.Hash(matchTx)
if pFrom, err := types.Sender(signer, p); err == nil && pFrom == sendArgs.From && signer.Hash(p) == wantSigHash {
// Match. Re-sign and send the transaction.
if gasPrice != nil && (*big.Int)(gasPrice).Sign() != 0 {
sendArgs.GasPrice = gasPrice
}
if gasLimit != nil && *gasLimit != 0 {
sendArgs.Gas = gasLimit
}
signedTx, err := s.sign(sendArgs.From, sendArgs.toTransaction())
if err != nil {
return common.Hash{}, err
}
if err = s.b.SendTx(ctx, signedTx); err != nil {
return common.Hash{}, err
}
return signedTx.Hash(), nil
}
}
return common.Hash{}, fmt.Errorf("transaction %#x not found", matchTx.Hash())
}
// PublicRollupAPI is the collection of Ethereum APIs specific to the rollup
// functionality.
type PublicRollupAPI struct {
b Backend
}
// NewPublicRollupAPI creates a new API definition for the rollup methods of the
// Ethereum service.
func NewPublicRollupAPI(b Backend) *PublicRollupAPI {
return &PublicRollupAPI{b: b}
}
// TODO: deduplicate this
type EthContext struct {
BlockNumber uint64 `json:"blockNumber"`
Timestamp uint64 `json:"timestamp"`
}
// RollupContext represents the height of the rollup.
// Index is the last processed CanonicalTransactionChain index
// QueueIndex is the last processed `enqueue` index
// VerifiedIndex is the last processed CTC index that was batched
type RollupContext struct {
Index uint64 `json:"index"`
QueueIndex uint64 `json:"queueIndex"`
VerifiedIndex uint64 `json:"verifiedIndex"`
}
type rollupInfo struct {
Mode string `json:"mode"`
Syncing bool `json:"syncing"`
EthContext EthContext `json:"ethContext"`
RollupContext RollupContext `json:"rollupContext"`
}
func (api *PublicRollupAPI) GetInfo(ctx context.Context) rollupInfo {
mode := "sequencer"
if v := api.b.IsVerifier(); v {
mode = "verifier"
}
syncing := api.b.IsSyncing()
bn, ts := api.b.GetEthContext()
index, queueIndex, verifiedIndex := api.b.GetRollupContext()
return rollupInfo{
Mode: mode,
Syncing: syncing,
EthContext: EthContext{
BlockNumber: bn,
Timestamp: ts,
},
RollupContext: RollupContext{
Index: index,
QueueIndex: queueIndex,
VerifiedIndex: verifiedIndex,
},
}
}
// PrivatelRollupAPI provides private RPC methods to control the sequencer.
// These methods can be abused by external users and must be considered insecure for use by untrusted users.
type PrivateRollupAPI struct {
b Backend
}
// NewPrivateRollupAPI creates a new API definition for the rollup methods of the
// Ethereum service.
func NewPrivateRollupAPI(b Backend) *PrivateRollupAPI {
return &PrivateRollupAPI{b: b}
}
// SetDataPrice sets the gas price to be used when quoting calldata publishing costs
// to users
func (api *PrivateRollupAPI) SetDataPrice(ctx context.Context, gasPrice hexutil.Big) {
api.b.SetDataPrice(ctx, (*big.Int)(&gasPrice))
}
// SetExecutionPrice sets the gas price to be used when executing transactions on
func (api *PrivateRollupAPI) SetExecutionPrice(ctx context.Context, gasPrice hexutil.Big) {
api.b.SetExecutionPrice(ctx, (*big.Int)(&gasPrice))
}
// PublicDebugAPI is the collection of Ethereum APIs exposed over the public
// debugging endpoint.
type PublicDebugAPI struct {
b Backend
}
// NewPublicDebugAPI creates a new API definition for the public debug methods
// of the Ethereum service.
func NewPublicDebugAPI(b Backend) *PublicDebugAPI {
return &PublicDebugAPI{b: b}
}
// GetBlockRlp retrieves the RLP encoded for of a single block.
func (api *PublicDebugAPI) GetBlockRlp(ctx context.Context, number uint64) (string, error) {
block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number))
if block == nil {
return "", fmt.Errorf("block #%d not found", number)
}
encoded, err := rlp.EncodeToBytes(block)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", encoded), nil
}
// TestSignCliqueBlock fetches the given block number, and attempts to sign it as a clique header with the
// given address, returning the address of the recovered signature
//
// This is a temporary method to debug the externalsigner integration,
// TODO: Remove this method when the integration is mature
func (api *PublicDebugAPI) TestSignCliqueBlock(ctx context.Context, address common.Address, number uint64) (common.Address, error) {
block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number))
if block == nil {
return common.Address{}, fmt.Errorf("block #%d not found", number)
}
header := block.Header()
header.Extra = make([]byte, 32+65)
encoded := clique.CliqueRLP(header)
// Look up the wallet containing the requested signer
account := accounts.Account{Address: address}
wallet, err := api.b.AccountManager().Find(account)
if err != nil {
return common.Address{}, err
}
signature, err := wallet.SignData(account, accounts.MimetypeClique, encoded)
if err != nil {
return common.Address{}, err
}
sealHash := clique.SealHash(header).Bytes()
log.Info("test signing of clique block",
"Sealhash", fmt.Sprintf("%x", sealHash),
"signature", fmt.Sprintf("%x", signature))
pubkey, err := crypto.Ecrecover(sealHash, signature)
if err != nil {
return common.Address{}, err
}
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
return signer, nil
}
// PrintBlock retrieves a block and returns its pretty printed form.
func (api *PublicDebugAPI) PrintBlock(ctx context.Context, number uint64) (string, error) {
block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number))
if block == nil {
return "", fmt.Errorf("block #%d not found", number)
}
return spew.Sdump(block), nil
}
// SeedHash retrieves the seed hash of a block.
func (api *PublicDebugAPI) SeedHash(ctx context.Context, number uint64) (string, error) {
block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number))
if block == nil {
return "", fmt.Errorf("block #%d not found", number)
}
return fmt.Sprintf("0x%x", ethash.SeedHash(number)), nil
}
// PrivateDebugAPI is the collection of Ethereum APIs exposed over the private
// debugging endpoint.
type PrivateDebugAPI struct {
b Backend
}
// NewPrivateDebugAPI creates a new API definition for the private debug methods
// of the Ethereum service.
func NewPrivateDebugAPI(b Backend) *PrivateDebugAPI {
return &PrivateDebugAPI{b: b}
}
// ChaindbProperty returns leveldb properties of the key-value database.
func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) {
if property == "" {
property = "leveldb.stats"
} else if !strings.HasPrefix(property, "leveldb.") {
property = "leveldb." + property
}
return api.b.ChainDb().Stat(property)
}
// ChaindbCompact flattens the entire key-value database into a single level,
// removing all unused slots and merging all keys.
func (api *PrivateDebugAPI) ChaindbCompact() error {
for b := byte(0); b < 255; b++ {
log.Info("Compacting chain database", "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1))
if err := api.b.ChainDb().Compact([]byte{b}, []byte{b + 1}); err != nil {
log.Error("Database compaction failed", "err", err)
return err
}
}
return nil
}
// SetHead rewinds the head of the blockchain to a previous block.
func (api *PrivateDebugAPI) SetHead(number hexutil.Uint64) {
api.b.SetHead(uint64(number))
}
// PublicNetAPI offers network related RPC methods
type PublicNetAPI struct {
net *p2p.Server
networkVersion uint64
}
// NewPublicNetAPI creates a new net API instance.
func NewPublicNetAPI(net *p2p.Server, networkVersion uint64) *PublicNetAPI {
return &PublicNetAPI{net, networkVersion}
}
// Listening returns an indication if the node is listening for network connections.
func (s *PublicNetAPI) Listening() bool {
return true // always listening
}
// PeerCount returns the number of connected peers
func (s *PublicNetAPI) PeerCount() hexutil.Uint {
return hexutil.Uint(s.net.PeerCount())
}
// Version returns the current ethereum protocol version.
func (s *PublicNetAPI) Version() string {
return fmt.Sprintf("%d", s.networkVersion)
}
| 1 | 15,638 | Note that we probably should set the default gasPrice to `0.1 gwei` considering our minimum gas limit has to be 21k. At 21k the minimum fee would end up being `0.000021 ETH` which is just a little too high | ethereum-optimism-optimism | go |
@@ -132,7 +132,9 @@ func validateTrustedOperators(o *Options) error {
if o.TrustedKeys == nil {
o.TrustedKeys = make([]string, 0, 4)
}
- o.TrustedKeys = append(o.TrustedKeys, opc.Issuer)
+ if !opc.StrictSigningKeyUsage {
+ o.TrustedKeys = append(o.TrustedKeys, opc.Subject)
+ }
o.TrustedKeys = append(o.TrustedKeys, opc.SigningKeys...)
}
for _, key := range o.TrustedKeys { | 1 | // Copyright 2018-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"io/ioutil"
"net"
"regexp"
"strings"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
)
var nscDecoratedRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}[\n]*))`)
// All JWTs once encoded start with this
const jwtPrefix = "eyJ"
// ReadOperatorJWT will read a jwt file for an operator claim. This can be a decorated file.
func ReadOperatorJWT(jwtfile string) (*jwt.OperatorClaims, error) {
contents, err := ioutil.ReadFile(jwtfile)
if err != nil {
// Check to see if the JWT has been inlined.
if !strings.HasPrefix(jwtfile, jwtPrefix) {
return nil, err
}
// We may have an inline jwt here.
contents = []byte(jwtfile)
}
defer wipeSlice(contents)
var claim string
items := nscDecoratedRe.FindAllSubmatch(contents, -1)
if len(items) == 0 {
claim = string(contents)
} else {
// First result should be the JWT.
// We copy here so that if the file contained a seed file too we wipe appropriately.
raw := items[0][1]
tmp := make([]byte, len(raw))
copy(tmp, raw)
claim = string(tmp)
}
opc, err := jwt.DecodeOperatorClaims(claim)
if err != nil {
return nil, err
}
return opc, nil
}
// Just wipe slice with 'x', for clearing contents of nkey seed file.
func wipeSlice(buf []byte) {
for i := range buf {
buf[i] = 'x'
}
}
// validateTrustedOperators will check that we do not have conflicts with
// assigned trusted keys and trusted operators. If operators are defined we
// will expand the trusted keys in options.
func validateTrustedOperators(o *Options) error {
if len(o.TrustedOperators) == 0 {
return nil
}
if o.AllowNewAccounts {
return fmt.Errorf("operators do not allow dynamic creation of new accounts")
}
if o.AccountResolver == nil {
return fmt.Errorf("operators require an account resolver to be configured")
}
if len(o.Accounts) > 0 {
return fmt.Errorf("operators do not allow Accounts to be configured directly")
}
if len(o.Users) > 0 || len(o.Nkeys) > 0 {
return fmt.Errorf("operators do not allow users to be configured directly")
}
if len(o.TrustedOperators) > 0 && len(o.TrustedKeys) > 0 {
return fmt.Errorf("conflicting options for 'TrustedKeys' and 'TrustedOperators'")
}
if o.SystemAccount != "" {
foundSys := false
foundNonEmpty := false
for _, op := range o.TrustedOperators {
if op.SystemAccount != "" {
foundNonEmpty = true
}
if op.SystemAccount == o.SystemAccount {
foundSys = true
break
}
}
if foundNonEmpty && !foundSys {
return fmt.Errorf("system_account in config and operator JWT must be identical")
}
}
srvMajor, srvMinor, srvUpdate, _ := jwt.ParseServerVersion(strings.Split(VERSION, "-")[0])
for _, opc := range o.TrustedOperators {
if major, minor, update, err := jwt.ParseServerVersion(opc.AssertServerVersion); err != nil {
return fmt.Errorf("operator %s expects version %s got error instead: %s",
opc.Subject, opc.AssertServerVersion, err)
} else if major > srvMajor {
return fmt.Errorf("operator %s expected major version %d > server major version %d",
opc.Subject, major, srvMajor)
} else if srvMajor > major {
} else if minor > srvMinor {
return fmt.Errorf("operator %s expected minor version %d > server minor version %d",
opc.Subject, minor, srvMinor)
} else if srvMinor > minor {
} else if update > srvUpdate {
return fmt.Errorf("operator %s expected update version %d > server update version %d",
opc.Subject, update, srvUpdate)
}
}
// If we have operators, fill in the trusted keys.
// FIXME(dlc) - We had TrustedKeys before TrustedOperators. The jwt.OperatorClaims
// has a DidSign(). Use that longer term. For now we can expand in place.
for _, opc := range o.TrustedOperators {
if o.TrustedKeys == nil {
o.TrustedKeys = make([]string, 0, 4)
}
o.TrustedKeys = append(o.TrustedKeys, opc.Issuer)
o.TrustedKeys = append(o.TrustedKeys, opc.SigningKeys...)
}
for _, key := range o.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
return fmt.Errorf("trusted Keys %q are required to be a valid public operator nkey", key)
}
}
return nil
}
func validateSrc(claims *jwt.UserClaims, host string) bool {
if claims == nil {
return false
} else if len(claims.Src) == 0 {
return true
} else if host == "" {
return false
}
ip := net.ParseIP(host)
if ip == nil {
return false
}
for _, cidr := range claims.Src {
if _, net, err := net.ParseCIDR(cidr); err != nil {
return false // should not happen as this jwt is invalid
} else if net.Contains(ip) {
return true
}
}
return false
}
func validateTimes(claims *jwt.UserClaims) (bool, time.Duration) {
if claims == nil {
return false, time.Duration(0)
} else if len(claims.Times) == 0 {
return true, time.Duration(0)
}
now := time.Now()
loc := time.Local
if claims.Locale != "" {
var err error
if loc, err = time.LoadLocation(claims.Locale); err != nil {
return false, time.Duration(0) // parsing not expected to fail at this point
}
now = now.In(loc)
}
for _, timeRange := range claims.Times {
y, m, d := now.Date()
m = m - 1
d = d - 1
start, err := time.ParseInLocation("15:04:05", timeRange.Start, loc)
if err != nil {
return false, time.Duration(0) // parsing not expected to fail at this point
}
end, err := time.ParseInLocation("15:04:05", timeRange.End, loc)
if err != nil {
return false, time.Duration(0) // parsing not expected to fail at this point
}
if start.After(end) {
start = start.AddDate(y, int(m), d)
d++ // the intent is to be the next day
} else {
start = start.AddDate(y, int(m), d)
}
if start.Before(now) {
end = end.AddDate(y, int(m), d)
if end.After(now) {
return true, end.Sub(now)
}
}
}
return false, time.Duration(0)
}
| 1 | 12,359 | So during a config reload, this function is invoked, but prior to that the o.TrustedKeys slice is set to nil, so this code would work. However, I am wondering what happens to s.trustedKeys, I am not sure this is handled at all. (same with the new map you are adding). I am not sure if this is supposed to be supported with a config reload, but just wondering if this is not putting things into a bad state. | nats-io-nats-server | go |
@@ -64,15 +64,12 @@ func SecurityHandler(authenticators map[string]auth.Authenticator, next http.Han
}
return func(w http.ResponseWriter, r *http.Request) {
-
tokenHeader := r.Header.Get("Authorization")
tokens := strings.Split(tokenHeader, " ")
if len(tokens) < 2 {
w.WriteHeader(http.StatusUnauthorized)
- json.NewEncoder(w).Encode(&api.ClusterResponse{
- Error: fmt.Sprintf("Access denied, token is malformed"),
- })
+ fmt.Fprintf(w, "Access denied token is empty")
return
}
token := tokens[1] | 1 | package server
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/pkg/auth"
"github.com/libopenstorage/openstorage/pkg/auth/secrets"
osecrets "github.com/libopenstorage/openstorage/pkg/auth/secrets"
"github.com/libopenstorage/openstorage/pkg/util"
"github.com/libopenstorage/openstorage/volume"
volumedrivers "github.com/libopenstorage/openstorage/volume/drivers"
lsecrets "github.com/libopenstorage/secrets"
"github.com/portworx/sched-ops/k8s/core"
"github.com/sirupsen/logrus"
)
const (
// PVCNameLabelKey is used for kubernetes auth provider indicating the name of PVC
PVCNameLabelKey = "pvc"
// PVCNamespaceLabelKey is used for kubernetes auth provider indicating the namespace of the PVC
PVCNamespaceLabelKey = "namespace"
)
var (
// OverrideSchedDriverName is set by osd program to override the schedule driver
OverrideSchedDriverName = ""
)
// NewAuthMiddleware returns a negroni implementation of an http middleware
// which will intercept the management APIs
func NewAuthMiddleware() *authMiddleware {
return &authMiddleware{}
}
type authMiddleware struct {
}
// newSecurityMiddleware based on auth configuration returns SecurityHandler or just
func newSecurityMiddleware(authenticators map[string]auth.Authenticator) func(next http.HandlerFunc) http.HandlerFunc {
if auth.Enabled() {
return func(next http.HandlerFunc) http.HandlerFunc {
return SecurityHandler(authenticators, next)
}
}
return func(next http.HandlerFunc) http.HandlerFunc {
return next
}
}
// SecurityHandler implements Authentication and Authorization check at the same time
// this functionality where not moved to separate functions because of simplicity
func SecurityHandler(authenticators map[string]auth.Authenticator, next http.HandlerFunc) http.HandlerFunc {
if authenticators == nil {
return next
}
return func(w http.ResponseWriter, r *http.Request) {
tokenHeader := r.Header.Get("Authorization")
tokens := strings.Split(tokenHeader, " ")
if len(tokens) < 2 {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, token is malformed"),
})
return
}
token := tokens[1]
// Determine issuer
issuer, err := auth.TokenIssuer(token)
if err != nil {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, %v", err),
})
return
}
// Use http.Request context for cancellation propagation
ctx := r.Context()
// Authenticate user
var claims *auth.Claims
if authenticator, exists := authenticators[issuer]; exists {
claims, err = authenticator.AuthenticateToken(ctx, token)
if err != nil {
w.WriteHeader(http.StatusForbidden)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, %s", err.Error()),
})
return
}
if claims == nil {
w.WriteHeader(http.StatusForbidden)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, wrong claims provided"),
})
}
} else {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, no authenticator for issuer %s", issuer),
})
return
}
// Check if user has admin role to access that endpoint
isSystemAdmin := false
for _, role := range claims.Roles {
if role == "system.admin" {
isSystemAdmin = true
break
}
}
if !isSystemAdmin {
w.WriteHeader(http.StatusForbidden)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, user must have admin access"),
})
return
}
next.ServeHTTP(w, r)
}
}
func (a *authMiddleware) createWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "create"
_, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
requestBody := a.getBody(r)
var dcReq api.VolumeCreateRequest
var dcRes api.VolumeCreateResponse
if err := json.NewDecoder(requestBody).Decode(&dcReq); err != nil {
next(w, r)
return
}
spec := dcReq.GetSpec()
locator := dcReq.GetLocator()
tokenSecretContext, err := a.parseSecret(spec.VolumeLabels, locator.VolumeLabels)
if err != nil {
a.log(locator.Name, fn).WithError(err).Error("failed to parse secret")
dcRes.VolumeResponse = &api.VolumeResponse{Error: "failed to parse secret: " + err.Error()}
json.NewEncoder(w).Encode(&dcRes)
return
} else if tokenSecretContext == nil {
tokenSecretContext = &api.TokenSecretContext{}
}
// If no secret is provided, then the caller is accessing publicly
if tokenSecretContext.SecretName != "" {
token, err := osecrets.GetToken(tokenSecretContext)
if err != nil {
a.log(locator.Name, fn).WithError(err).Error("failed to get token")
dcRes.VolumeResponse = &api.VolumeResponse{Error: "failed to get token: " + err.Error()}
json.NewEncoder(w).Encode(&dcRes)
return
}
// Save a reference to the secret
// These values will be stored in the header for the create() server handler
// to take and place in the labels for the volume since we do not want to adjust
// the body of the request in this middleware. When create() gets these values
// from the headers, it will copy them to the labels of the volume so that
// we can track the secret in the rest of the middleware calls.
r.Header.Set(secrets.SecretNameKey, tokenSecretContext.SecretName)
r.Header.Set(secrets.SecretNamespaceKey, tokenSecretContext.SecretNamespace)
// If the source PVC was set, save it for the next layer to store on
// the labels of the volume
if len(tokenSecretContext.PvcName) != 0 && len(tokenSecretContext.PvcNamespace) != 0 {
r.Header.Set(api.KubernetesPvcNameKey, tokenSecretContext.PvcName)
r.Header.Set(api.KubernetesPvcNamespaceKey, tokenSecretContext.PvcNamespace)
}
a.insertToken(r, token)
}
next(w, r)
}
func (a *authMiddleware) setWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "set"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volumeID, err := a.parseID(r)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to parse volumeID")
next(w, r)
return
}
token, err := a.fetchSecretForVolume(d, volumeID)
if err != nil {
volumeResponse := &api.VolumeResponse{}
a.log(volumeID, fn).WithError(err).Error("Failed to fetch secret")
volumeResponse.Error = err.Error()
json.NewEncoder(w).Encode(volumeResponse)
return
}
if len(token) != 0 {
a.insertToken(r, token)
}
next(w, r)
}
func (a *authMiddleware) deleteWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "delete"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volumeID, err := a.parseID(r)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to parse volumeID")
next(w, r)
return
}
// Idempotency
vols, err := d.Inspect([]string{volumeID})
if err != nil || len(vols) == 0 || vols[0] == nil {
next(w, r)
return
}
token, err := a.fetchSecretForVolume(d, volumeID)
if err != nil {
volumeResponse := &api.VolumeResponse{}
a.log(volumeID, fn).WithError(err).Error("Failed to fetch secret")
volumeResponse.Error = err.Error()
json.NewEncoder(w).Encode(volumeResponse)
return
}
if len(token) != 0 {
a.insertToken(r, token)
}
next(w, r)
}
func (a *authMiddleware) inspectWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "inspect"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volumeID, err := a.parseID(r)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to parse volumeID")
next(w, r)
return
}
dk, err := d.Inspect([]string{volumeID})
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to inspect volume")
http.Error(w, err.Error(), http.StatusNotFound)
return
}
json.NewEncoder(w).Encode(dk)
}
func (a *authMiddleware) enumerateWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "enumerate"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volIDs, ok := r.URL.Query()[api.OptVolumeID]
if !ok || len(volIDs[0]) < 1 {
a.log("", fn).Error("Failed to parse VolumeID")
return
}
volumeID := volIDs[0]
token, err := a.fetchSecretForVolume(d, volumeID)
if err != nil {
volumeResponse := &api.VolumeResponse{}
a.log(volumeID, fn).WithError(err).Error("Failed to fetch secret")
volumeResponse.Error = err.Error()
json.NewEncoder(w).Encode(volumeResponse)
return
}
if len(token) != 0 {
a.insertToken(r, token)
}
next(w, r)
}
func (a *authMiddleware) isTokenProcessingRequired(r *http.Request) (volume.VolumeDriver, bool) {
// If a token has been passed, then return here
if len(r.Header.Get("Authorization")) > 0 {
return nil, false
}
// No token has been passed in the request. Determine
// if the request is from Kubernetes
userAgent := r.Header.Get("User-Agent")
if len(userAgent) > 0 {
// Check if the request is coming from a container orchestrator
clientName := strings.Split(userAgent, "/")
if len(clientName) > 0 {
if strings.HasSuffix(clientName[0], schedDriverPostFix) {
driverName := clientName[0]
if len(OverrideSchedDriverName) != 0 {
driverName = OverrideSchedDriverName
}
d, err := volumedrivers.Get(driverName)
if err != nil {
return nil, false
}
return d, true
}
}
}
return nil, false
}
func (a *authMiddleware) insertToken(r *http.Request, token string) {
// Set the token in header
if auth.IsJwtToken(token) {
r.Header.Set("Authorization", "bearer "+token)
} else {
r.Header.Set("Authorization", "Basic "+token)
}
}
func (a *authMiddleware) parseID(r *http.Request) (string, error) {
if id, err := a.parseParam(r, "id"); err == nil {
return id, nil
}
return "", fmt.Errorf("could not parse snap ID")
}
func (a *authMiddleware) parseParam(r *http.Request, param string) (string, error) {
vars := mux.Vars(r)
if id, ok := vars[param]; ok {
return id, nil
}
return "", fmt.Errorf("could not parse %s", param)
}
// This functions makes it possible to secure the model of accessing the secret by allowing
// the definition of secret access to come from the storage class, as done by CSI.
func (a *authMiddleware) getSecretInformationInKubernetes(
specLabels, locatorLabels map[string]string,
) (*api.TokenSecretContext, error) {
// Get pvc location and name
// For k8s fetch the actual annotations
pvcName, ok := getVolumeLabel(PVCNameLabelKey, specLabels, locatorLabels)
if !ok {
return nil, fmt.Errorf("Unable to authenticate request due to not able to determine name of the pvc from the volume")
}
pvcNamespace, ok := getVolumeLabel(PVCNamespaceLabelKey, specLabels, locatorLabels)
if !ok {
return nil, fmt.Errorf("Unable to authenticate request due to not able to determine namespace of the pvc from the volume")
}
// Get pvc object
pvc, err := core.Instance().GetPersistentVolumeClaim(pvcName, pvcNamespace)
if err != nil {
return nil, fmt.Errorf("Unable to get PVC information from Kubernetes: %v", err)
}
// Get storageclass for pvc object
sc, err := core.Instance().GetStorageClassForPVC(pvc)
if err != nil {
return nil, fmt.Errorf("Unable to get StorageClass information from Kubernetes: %v", err)
}
// Get secret namespace
secretNamespaceValue := sc.Parameters[osecrets.SecretNamespaceKey]
secretNameValue := sc.Parameters[osecrets.SecretNameKey]
if len(secretNameValue) == 0 && len(secretNamespaceValue) == 0 {
return &api.TokenSecretContext{}, nil
}
// Allow ${pvc.namespace} to be set in the storage class
namespaceParams := map[string]string{"pvc.namespace": pvc.GetNamespace()}
secretNamespace, err := util.ResolveTemplate(secretNamespaceValue, namespaceParams)
if err != nil {
return nil, err
}
// Get secret name
nameParams := make(map[string]string)
// Allow ${pvc.annotations['pvcNameKey']} to be set in the storage class
// See pkg/auth/secrets/secrets.go for more information
for k, v := range pvc.Annotations {
nameParams["pvc.annotations['"+k+"']"] = v
}
secretName, err := util.ResolveTemplate(secretNameValue, nameParams)
if err != nil {
return nil, err
}
return &api.TokenSecretContext{
SecretName: secretName,
SecretNamespace: secretNamespace,
PvcName: pvcName,
PvcNamespace: pvcNamespace,
}, nil
}
func (a *authMiddleware) parseSecret(
specLabels, locatorLabels map[string]string,
) (*api.TokenSecretContext, error) {
// Check if it is Kubernetes
if lsecrets.Instance().String() == lsecrets.TypeK8s {
return a.getSecretInformationInKubernetes(specLabels, locatorLabels)
}
// Not Kubernetes, try to get secret information from labels
return parseSecretFromLabels(specLabels, locatorLabels)
}
func parseSecretFromLabels(specLabels, locatorLabels map[string]string) (*api.TokenSecretContext, error) {
// Locator labels take precedence
secretName := locatorLabels[osecrets.SecretNameKey]
secretNamespace := locatorLabels[osecrets.SecretNamespaceKey]
if secretName == "" {
secretName = specLabels[osecrets.SecretNameKey]
}
if secretName == "" {
return nil, fmt.Errorf("secret name is empty")
}
if secretNamespace == "" {
secretNamespace = specLabels[osecrets.SecretNamespaceKey]
}
return &api.TokenSecretContext{
SecretName: secretName,
SecretNamespace: secretNamespace,
}, nil
}
func (a *authMiddleware) log(id, fn string) *logrus.Entry {
return logrus.WithFields(map[string]interface{}{
"ID": id,
"Component": "auth-middleware",
"Function": fn,
})
}
func (a *authMiddleware) getBody(r *http.Request) io.ReadCloser {
// Make a copy of the reader so that the next handler
// has access to the body
buf, _ := ioutil.ReadAll(r.Body)
rdr1 := ioutil.NopCloser(bytes.NewBuffer(buf))
rdr2 := ioutil.NopCloser(bytes.NewBuffer(buf))
r.Body = rdr2
return rdr1
}
func getVolumeLabel(key string, specLabels, locatorLabels map[string]string) (string, bool) {
if v, ok := locatorLabels[key]; ok {
return v, true
}
v, ok := specLabels[key]
return v, ok
}
func (a *authMiddleware) fetchSecretForVolume(d volume.VolumeDriver, id string) (string, error) {
vols, err := d.Inspect([]string{id})
if err != nil || len(vols) == 0 || vols[0] == nil {
return "", fmt.Errorf("Volume %s does not exist", id)
}
v := vols[0]
if v.GetLocator().GetVolumeLabels() == nil {
return "", nil
}
tokenSecretContext := &api.TokenSecretContext{
SecretName: v.GetLocator().GetVolumeLabels()[secrets.SecretNameKey],
SecretNamespace: v.GetLocator().GetVolumeLabels()[secrets.SecretNamespaceKey],
}
// If no secret is provided, then the caller is accessing publicly
if tokenSecretContext.SecretName == "" || tokenSecretContext.SecretNamespace == "" {
return "", nil
}
// Retrieve secret
token, err := osecrets.GetToken(tokenSecretContext)
if err != nil {
return "", fmt.Errorf("Failed to get token from secret %s/%s: %v",
tokenSecretContext.SecretNamespace,
tokenSecretContext.SecretName,
err)
}
return token, nil
}
| 1 | 8,594 | I think the logic should be to check for token len of 0 then check for token len of less than 2. The way the logic is now, it is hard to determine if it is empty or malformed. | libopenstorage-openstorage | go |
@@ -267,6 +267,15 @@ void wlr_output_set_subpixel(struct wlr_output *output,
wlr_output_schedule_done(output);
}
+void wlr_output_set_format(struct wlr_output *output,
+ enum wl_shm_format format) {
+ if (output->format == format) {
+ return;
+ }
+
+ output->format = format;
+}
+
void wlr_output_set_description(struct wlr_output *output, const char *desc) {
if (output->description != NULL && desc != NULL &&
strcmp(output->description, desc) == 0) { | 1 | #define _POSIX_C_SOURCE 200809L
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <tgmath.h>
#include <time.h>
#include <wayland-server-core.h>
#include <wlr/interfaces/wlr_output.h>
#include <wlr/render/interface.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_box.h>
#include <wlr/types/wlr_matrix.h>
#include <wlr/types/wlr_output.h>
#include <wlr/types/wlr_seat.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/util/log.h>
#include <wlr/util/region.h>
#include "util/global.h"
#include "util/signal.h"
#define OUTPUT_VERSION 3
static void send_geometry(struct wl_resource *resource) {
struct wlr_output *output = wlr_output_from_resource(resource);
wl_output_send_geometry(resource, 0, 0,
output->phys_width, output->phys_height, output->subpixel,
output->make, output->model, output->transform);
}
static void send_current_mode(struct wl_resource *resource) {
struct wlr_output *output = wlr_output_from_resource(resource);
if (output->current_mode != NULL) {
struct wlr_output_mode *mode = output->current_mode;
wl_output_send_mode(resource, WL_OUTPUT_MODE_CURRENT,
mode->width, mode->height, mode->refresh);
} else {
// Output has no mode
wl_output_send_mode(resource, WL_OUTPUT_MODE_CURRENT, output->width,
output->height, output->refresh);
}
}
static void send_scale(struct wl_resource *resource) {
struct wlr_output *output = wlr_output_from_resource(resource);
uint32_t version = wl_resource_get_version(resource);
if (version >= WL_OUTPUT_SCALE_SINCE_VERSION) {
wl_output_send_scale(resource, (uint32_t)ceil(output->scale));
}
}
static void send_done(struct wl_resource *resource) {
uint32_t version = wl_resource_get_version(resource);
if (version >= WL_OUTPUT_DONE_SINCE_VERSION) {
wl_output_send_done(resource);
}
}
static void output_handle_resource_destroy(struct wl_resource *resource) {
wl_list_remove(wl_resource_get_link(resource));
}
static void output_handle_release(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static const struct wl_output_interface output_impl = {
.release = output_handle_release,
};
static void output_bind(struct wl_client *wl_client, void *data,
uint32_t version, uint32_t id) {
// `output` can be NULL if the output global is being destroyed
struct wlr_output *output = data;
struct wl_resource *resource = wl_resource_create(wl_client,
&wl_output_interface, version, id);
if (resource == NULL) {
wl_client_post_no_memory(wl_client);
return;
}
wl_resource_set_implementation(resource, &output_impl, output,
output_handle_resource_destroy);
if (output == NULL) {
wl_list_init(wl_resource_get_link(resource));
return;
}
wl_list_insert(&output->resources, wl_resource_get_link(resource));
send_geometry(resource);
send_current_mode(resource);
send_scale(resource);
send_done(resource);
}
void wlr_output_create_global(struct wlr_output *output) {
if (output->global != NULL) {
return;
}
output->global = wl_global_create(output->display,
&wl_output_interface, OUTPUT_VERSION, output, output_bind);
if (output->global == NULL) {
wlr_log(WLR_ERROR, "Failed to allocate wl_output global");
}
}
void wlr_output_destroy_global(struct wlr_output *output) {
if (output->global == NULL) {
return;
}
// Make all output resources inert
struct wl_resource *resource, *tmp;
wl_resource_for_each_safe(resource, tmp, &output->resources) {
wl_resource_set_user_data(resource, NULL);
wl_list_remove(wl_resource_get_link(resource));
wl_list_init(wl_resource_get_link(resource));
}
wlr_global_destroy_safe(output->global, output->display);
output->global = NULL;
}
void wlr_output_update_enabled(struct wlr_output *output, bool enabled) {
if (output->enabled == enabled) {
return;
}
output->enabled = enabled;
wlr_signal_emit_safe(&output->events.enable, output);
}
static void output_update_matrix(struct wlr_output *output) {
wlr_matrix_projection(output->transform_matrix, output->width,
output->height, output->transform);
}
void wlr_output_enable(struct wlr_output *output, bool enable) {
if (output->enabled == enable) {
output->pending.committed &= ~WLR_OUTPUT_STATE_ENABLED;
return;
}
output->pending.committed |= WLR_OUTPUT_STATE_ENABLED;
output->pending.enabled = enable;
}
static void output_state_clear_mode(struct wlr_output_state *state) {
if (!(state->committed & WLR_OUTPUT_STATE_MODE)) {
return;
}
state->mode = NULL;
state->committed &= ~WLR_OUTPUT_STATE_MODE;
}
void wlr_output_set_mode(struct wlr_output *output,
struct wlr_output_mode *mode) {
output_state_clear_mode(&output->pending);
if (output->current_mode == mode) {
return;
}
output->pending.committed |= WLR_OUTPUT_STATE_MODE;
output->pending.mode_type = WLR_OUTPUT_STATE_MODE_FIXED;
output->pending.mode = mode;
}
void wlr_output_set_custom_mode(struct wlr_output *output, int32_t width,
int32_t height, int32_t refresh) {
output_state_clear_mode(&output->pending);
if (output->width == width && output->height == height &&
output->refresh == refresh) {
return;
}
output->pending.committed |= WLR_OUTPUT_STATE_MODE;
output->pending.mode_type = WLR_OUTPUT_STATE_MODE_CUSTOM;
output->pending.custom_mode.width = width;
output->pending.custom_mode.height = height;
output->pending.custom_mode.refresh = refresh;
}
void wlr_output_update_mode(struct wlr_output *output,
struct wlr_output_mode *mode) {
output->current_mode = mode;
if (mode != NULL) {
wlr_output_update_custom_mode(output, mode->width, mode->height,
mode->refresh);
} else {
wlr_output_update_custom_mode(output, 0, 0, 0);
}
}
void wlr_output_update_custom_mode(struct wlr_output *output, int32_t width,
int32_t height, int32_t refresh) {
if (output->width == width && output->height == height &&
output->refresh == refresh) {
return;
}
output->width = width;
output->height = height;
output_update_matrix(output);
output->refresh = refresh;
struct wl_resource *resource;
wl_resource_for_each(resource, &output->resources) {
send_current_mode(resource);
}
wlr_output_schedule_done(output);
wlr_signal_emit_safe(&output->events.mode, output);
}
void wlr_output_set_transform(struct wlr_output *output,
enum wl_output_transform transform) {
if (output->transform == transform) {
output->pending.committed &= ~WLR_OUTPUT_STATE_TRANSFORM;
return;
}
output->pending.committed |= WLR_OUTPUT_STATE_TRANSFORM;
output->pending.transform = transform;
}
void wlr_output_set_scale(struct wlr_output *output, float scale) {
if (output->scale == scale) {
output->pending.committed &= ~WLR_OUTPUT_STATE_SCALE;
return;
}
output->pending.committed |= WLR_OUTPUT_STATE_SCALE;
output->pending.scale = scale;
}
void wlr_output_enable_adaptive_sync(struct wlr_output *output, bool enabled) {
bool currently_enabled =
output->adaptive_sync_status != WLR_OUTPUT_ADAPTIVE_SYNC_DISABLED;
if (currently_enabled == enabled) {
output->pending.committed &= ~WLR_OUTPUT_STATE_ADAPTIVE_SYNC_ENABLED;
return;
}
output->pending.committed |= WLR_OUTPUT_STATE_ADAPTIVE_SYNC_ENABLED;
output->pending.adaptive_sync_enabled = enabled;
}
void wlr_output_set_subpixel(struct wlr_output *output,
enum wl_output_subpixel subpixel) {
if (output->subpixel == subpixel) {
return;
}
output->subpixel = subpixel;
struct wl_resource *resource;
wl_resource_for_each(resource, &output->resources) {
send_geometry(resource);
}
wlr_output_schedule_done(output);
}
void wlr_output_set_description(struct wlr_output *output, const char *desc) {
if (output->description != NULL && desc != NULL &&
strcmp(output->description, desc) == 0) {
return;
}
free(output->description);
if (desc != NULL) {
output->description = strdup(desc);
} else {
output->description = NULL;
}
wlr_signal_emit_safe(&output->events.description, output);
}
static void schedule_done_handle_idle_timer(void *data) {
struct wlr_output *output = data;
output->idle_done = NULL;
struct wl_resource *resource;
wl_resource_for_each(resource, &output->resources) {
uint32_t version = wl_resource_get_version(resource);
if (version >= WL_OUTPUT_DONE_SINCE_VERSION) {
wl_output_send_done(resource);
}
}
}
void wlr_output_schedule_done(struct wlr_output *output) {
if (output->idle_done != NULL) {
return; // Already scheduled
}
struct wl_event_loop *ev = wl_display_get_event_loop(output->display);
output->idle_done =
wl_event_loop_add_idle(ev, schedule_done_handle_idle_timer, output);
}
static void handle_display_destroy(struct wl_listener *listener, void *data) {
struct wlr_output *output =
wl_container_of(listener, output, display_destroy);
wlr_output_destroy_global(output);
}
void wlr_output_init(struct wlr_output *output, struct wlr_backend *backend,
const struct wlr_output_impl *impl, struct wl_display *display) {
assert(impl->attach_render && impl->rollback_render && impl->commit);
if (impl->set_cursor || impl->move_cursor) {
assert(impl->set_cursor && impl->move_cursor);
}
output->backend = backend;
output->impl = impl;
output->display = display;
wl_list_init(&output->modes);
output->transform = WL_OUTPUT_TRANSFORM_NORMAL;
output->scale = 1;
output->commit_seq = 0;
wl_list_init(&output->cursors);
wl_list_init(&output->resources);
wl_signal_init(&output->events.frame);
wl_signal_init(&output->events.damage);
wl_signal_init(&output->events.needs_frame);
wl_signal_init(&output->events.precommit);
wl_signal_init(&output->events.commit);
wl_signal_init(&output->events.present);
wl_signal_init(&output->events.enable);
wl_signal_init(&output->events.mode);
wl_signal_init(&output->events.scale);
wl_signal_init(&output->events.transform);
wl_signal_init(&output->events.description);
wl_signal_init(&output->events.destroy);
pixman_region32_init(&output->pending.damage);
const char *no_hardware_cursors = getenv("WLR_NO_HARDWARE_CURSORS");
if (no_hardware_cursors != NULL && strcmp(no_hardware_cursors, "1") == 0) {
wlr_log(WLR_DEBUG,
"WLR_NO_HARDWARE_CURSORS set, forcing software cursors");
output->software_cursor_locks = 1;
}
output->display_destroy.notify = handle_display_destroy;
wl_display_add_destroy_listener(display, &output->display_destroy);
output->frame_pending = true;
}
void wlr_output_destroy(struct wlr_output *output) {
if (!output) {
return;
}
wl_list_remove(&output->display_destroy.link);
wlr_output_destroy_global(output);
wlr_signal_emit_safe(&output->events.destroy, output);
// The backend is responsible for free-ing the list of modes
struct wlr_output_cursor *cursor, *tmp_cursor;
wl_list_for_each_safe(cursor, tmp_cursor, &output->cursors, link) {
wlr_output_cursor_destroy(cursor);
}
if (output->idle_frame != NULL) {
wl_event_source_remove(output->idle_frame);
}
if (output->idle_done != NULL) {
wl_event_source_remove(output->idle_done);
}
free(output->description);
pixman_region32_fini(&output->pending.damage);
if (output->impl && output->impl->destroy) {
output->impl->destroy(output);
} else {
free(output);
}
}
void wlr_output_transformed_resolution(struct wlr_output *output,
int *width, int *height) {
if (output->transform % 2 == 0) {
*width = output->width;
*height = output->height;
} else {
*width = output->height;
*height = output->width;
}
}
void wlr_output_effective_resolution(struct wlr_output *output,
int *width, int *height) {
wlr_output_transformed_resolution(output, width, height);
*width /= output->scale;
*height /= output->scale;
}
struct wlr_output_mode *wlr_output_preferred_mode(struct wlr_output *output) {
if (wl_list_empty(&output->modes)) {
return NULL;
}
struct wlr_output_mode *mode;
wl_list_for_each(mode, &output->modes, link) {
if (mode->preferred) {
return mode;
}
}
// No preferred mode, choose the last one
return wl_container_of(output->modes.prev, mode, link);
}
static void output_state_clear_buffer(struct wlr_output_state *state) {
if (!(state->committed & WLR_OUTPUT_STATE_BUFFER)) {
return;
}
wlr_buffer_unlock(state->buffer);
state->buffer = NULL;
state->committed &= ~WLR_OUTPUT_STATE_BUFFER;
}
bool wlr_output_attach_render(struct wlr_output *output, int *buffer_age) {
if (!output->impl->attach_render(output, buffer_age)) {
return false;
}
output_state_clear_buffer(&output->pending);
output->pending.committed |= WLR_OUTPUT_STATE_BUFFER;
output->pending.buffer_type = WLR_OUTPUT_STATE_BUFFER_RENDER;
return true;
}
bool wlr_output_preferred_read_format(struct wlr_output *output,
enum wl_shm_format *fmt) {
struct wlr_renderer *renderer = wlr_backend_get_renderer(output->backend);
if (!renderer->impl->preferred_read_format || !renderer->impl->read_pixels) {
return false;
}
if (!output->impl->attach_render(output, NULL)) {
return false;
}
*fmt = renderer->impl->preferred_read_format(renderer);
output->impl->rollback_render(output);
return true;
}
void wlr_output_set_damage(struct wlr_output *output,
pixman_region32_t *damage) {
pixman_region32_intersect_rect(&output->pending.damage, damage,
0, 0, output->width, output->height);
output->pending.committed |= WLR_OUTPUT_STATE_DAMAGE;
}
static void output_state_clear_gamma_lut(struct wlr_output_state *state) {
free(state->gamma_lut);
state->gamma_lut = NULL;
state->committed &= ~WLR_OUTPUT_STATE_GAMMA_LUT;
}
static void output_state_clear(struct wlr_output_state *state) {
output_state_clear_buffer(state);
output_state_clear_gamma_lut(state);
pixman_region32_clear(&state->damage);
state->committed = 0;
}
static void output_pending_resolution(struct wlr_output *output, int *width,
int *height) {
if (output->pending.committed & WLR_OUTPUT_STATE_MODE) {
switch (output->pending.mode_type) {
case WLR_OUTPUT_STATE_MODE_FIXED:
*width = output->pending.mode->width;
*height = output->pending.mode->height;
return;
case WLR_OUTPUT_STATE_MODE_CUSTOM:
*width = output->pending.custom_mode.width;
*height = output->pending.custom_mode.height;
return;
}
abort();
} else {
*width = output->width;
*height = output->height;
}
}
static bool output_basic_test(struct wlr_output *output) {
if (output->pending.committed & WLR_OUTPUT_STATE_BUFFER) {
if (output->frame_pending) {
wlr_log(WLR_DEBUG, "Tried to commit a buffer while a frame is pending");
return false;
}
if (output->pending.buffer_type == WLR_OUTPUT_STATE_BUFFER_SCANOUT) {
if (output->attach_render_locks > 0) {
return false;
}
// If the output has at least one software cursor, refuse to attach the
// buffer
struct wlr_output_cursor *cursor;
wl_list_for_each(cursor, &output->cursors, link) {
if (cursor->enabled && cursor->visible &&
cursor != output->hardware_cursor) {
return false;
}
}
// If the size doesn't match, reject buffer (scaling is not
// supported)
int pending_width, pending_height;
output_pending_resolution(output, &pending_width, &pending_height);
if (output->pending.buffer->width != pending_width ||
output->pending.buffer->height != pending_height) {
return false;
}
}
}
bool enabled = output->enabled;
if (output->pending.committed & WLR_OUTPUT_STATE_ENABLED) {
enabled = output->pending.enabled;
}
if (!enabled && output->pending.committed & WLR_OUTPUT_STATE_BUFFER) {
wlr_log(WLR_DEBUG, "Tried to commit a buffer on a disabled output");
return false;
}
if (!enabled && output->pending.committed & WLR_OUTPUT_STATE_MODE) {
wlr_log(WLR_DEBUG, "Tried to modeset a disabled output");
return false;
}
if (!enabled && output->pending.committed & WLR_OUTPUT_STATE_ADAPTIVE_SYNC_ENABLED) {
wlr_log(WLR_DEBUG, "Tried to enable adaptive sync on a disabled output");
return false;
}
return true;
}
bool wlr_output_test(struct wlr_output *output) {
if (!output_basic_test(output)) {
return false;
}
return output->impl->test(output);
}
bool wlr_output_commit(struct wlr_output *output) {
if (!output_basic_test(output)) {
wlr_log(WLR_ERROR, "Basic output test failed");
return false;
}
if ((output->pending.committed & WLR_OUTPUT_STATE_BUFFER) &&
output->idle_frame != NULL) {
wl_event_source_remove(output->idle_frame);
output->idle_frame = NULL;
}
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
struct wlr_output_event_precommit event = {
.output = output,
.when = &now,
};
wlr_signal_emit_safe(&output->events.precommit, &event);
if (!output->impl->commit(output)) {
output_state_clear(&output->pending);
return false;
}
if (output->pending.committed & WLR_OUTPUT_STATE_BUFFER) {
struct wlr_output_cursor *cursor;
wl_list_for_each(cursor, &output->cursors, link) {
if (!cursor->enabled || !cursor->visible || cursor->surface == NULL) {
continue;
}
wlr_surface_send_frame_done(cursor->surface, &now);
}
}
output->commit_seq++;
wlr_signal_emit_safe(&output->events.commit, output);
bool scale_updated = output->pending.committed & WLR_OUTPUT_STATE_SCALE;
if (scale_updated) {
output->scale = output->pending.scale;
wlr_signal_emit_safe(&output->events.scale, output);
}
if (output->pending.committed & WLR_OUTPUT_STATE_TRANSFORM) {
output->transform = output->pending.transform;
output_update_matrix(output);
wlr_signal_emit_safe(&output->events.transform, output);
}
bool geometry_updated = output->pending.committed &
(WLR_OUTPUT_STATE_MODE | WLR_OUTPUT_STATE_TRANSFORM);
if (geometry_updated || scale_updated) {
struct wl_resource *resource;
wl_resource_for_each(resource, &output->resources) {
if (geometry_updated) {
send_geometry(resource);
}
if (scale_updated) {
send_scale(resource);
}
}
wlr_output_schedule_done(output);
}
if (output->pending.committed & WLR_OUTPUT_STATE_BUFFER) {
output->frame_pending = true;
output->needs_frame = false;
}
output_state_clear(&output->pending);
return true;
}
void wlr_output_rollback(struct wlr_output *output) {
if (output->impl->rollback_render &&
(output->pending.committed & WLR_OUTPUT_STATE_BUFFER) &&
output->pending.buffer_type == WLR_OUTPUT_STATE_BUFFER_RENDER) {
output->impl->rollback_render(output);
}
output_state_clear(&output->pending);
}
void wlr_output_attach_buffer(struct wlr_output *output,
struct wlr_buffer *buffer) {
output_state_clear_buffer(&output->pending);
output->pending.committed |= WLR_OUTPUT_STATE_BUFFER;
output->pending.buffer_type = WLR_OUTPUT_STATE_BUFFER_SCANOUT;
output->pending.buffer = wlr_buffer_lock(buffer);
}
void wlr_output_send_frame(struct wlr_output *output) {
output->frame_pending = false;
wlr_signal_emit_safe(&output->events.frame, output);
}
static void schedule_frame_handle_idle_timer(void *data) {
struct wlr_output *output = data;
output->idle_frame = NULL;
if (!output->frame_pending) {
wlr_output_send_frame(output);
}
}
void wlr_output_schedule_frame(struct wlr_output *output) {
// Make sure the compositor commits a new frame. This is necessary to make
// clients which ask for frame callbacks without submitting a new buffer
// work.
wlr_output_update_needs_frame(output);
if (output->frame_pending || output->idle_frame != NULL) {
return;
}
// We're using an idle timer here in case a buffer swap happens right after
// this function is called
struct wl_event_loop *ev = wl_display_get_event_loop(output->display);
output->idle_frame =
wl_event_loop_add_idle(ev, schedule_frame_handle_idle_timer, output);
}
void wlr_output_send_present(struct wlr_output *output,
struct wlr_output_event_present *event) {
struct wlr_output_event_present _event = {0};
if (event == NULL) {
event = &_event;
event->commit_seq = output->commit_seq + 1;
}
event->output = output;
struct timespec now;
if (event->when == NULL) {
clockid_t clock = wlr_backend_get_presentation_clock(output->backend);
errno = 0;
if (clock_gettime(clock, &now) != 0) {
wlr_log_errno(WLR_ERROR, "failed to send output present event: "
"failed to read clock");
return;
}
event->when = &now;
}
wlr_signal_emit_safe(&output->events.present, event);
}
void wlr_output_set_gamma(struct wlr_output *output, size_t size,
const uint16_t *r, const uint16_t *g, const uint16_t *b) {
output_state_clear_gamma_lut(&output->pending);
output->pending.gamma_lut_size = size;
output->pending.gamma_lut = malloc(3 * size * sizeof(uint16_t));
if (output->pending.gamma_lut == NULL) {
wlr_log_errno(WLR_ERROR, "Allocation failed");
return;
}
memcpy(output->pending.gamma_lut, r, size * sizeof(uint16_t));
memcpy(output->pending.gamma_lut + size, g, size * sizeof(uint16_t));
memcpy(output->pending.gamma_lut + 2 * size, b, size * sizeof(uint16_t));
output->pending.committed |= WLR_OUTPUT_STATE_GAMMA_LUT;
}
size_t wlr_output_get_gamma_size(struct wlr_output *output) {
if (!output->impl->get_gamma_size) {
return 0;
}
return output->impl->get_gamma_size(output);
}
bool wlr_output_export_dmabuf(struct wlr_output *output,
struct wlr_dmabuf_attributes *attribs) {
if (!output->impl->export_dmabuf) {
return false;
}
return output->impl->export_dmabuf(output, attribs);
}
void wlr_output_update_needs_frame(struct wlr_output *output) {
if (output->needs_frame) {
return;
}
output->needs_frame = true;
wlr_signal_emit_safe(&output->events.needs_frame, output);
}
void wlr_output_damage_whole(struct wlr_output *output) {
int width, height;
wlr_output_transformed_resolution(output, &width, &height);
pixman_region32_t damage;
pixman_region32_init_rect(&damage, 0, 0, width, height);
struct wlr_output_event_damage event = {
.output = output,
.damage = &damage,
};
wlr_signal_emit_safe(&output->events.damage, &event);
pixman_region32_fini(&damage);
}
struct wlr_output *wlr_output_from_resource(struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_output_interface,
&output_impl));
return wl_resource_get_user_data(resource);
}
void wlr_output_lock_attach_render(struct wlr_output *output, bool lock) {
if (lock) {
++output->attach_render_locks;
} else {
assert(output->attach_render_locks > 0);
--output->attach_render_locks;
}
wlr_log(WLR_DEBUG, "%s direct scan-out on output '%s' (locks: %d)",
lock ? "Disabling" : "Enabling", output->name,
output->attach_render_locks);
}
static void output_cursor_damage_whole(struct wlr_output_cursor *cursor);
void wlr_output_lock_software_cursors(struct wlr_output *output, bool lock) {
if (lock) {
++output->software_cursor_locks;
} else {
assert(output->software_cursor_locks > 0);
--output->software_cursor_locks;
}
wlr_log(WLR_DEBUG, "%s hardware cursors on output '%s' (locks: %d)",
lock ? "Disabling" : "Enabling", output->name,
output->software_cursor_locks);
if (output->software_cursor_locks > 0 && output->hardware_cursor != NULL) {
assert(output->impl->set_cursor);
output->impl->set_cursor(output, NULL, 1,
WL_OUTPUT_TRANSFORM_NORMAL, 0, 0, true);
output_cursor_damage_whole(output->hardware_cursor);
output->hardware_cursor = NULL;
}
// If it's possible to use hardware cursors again, don't switch immediately
// since a recorder is likely to lock software cursors for the next frame
// again.
}
static void output_scissor(struct wlr_output *output, pixman_box32_t *rect) {
struct wlr_renderer *renderer = wlr_backend_get_renderer(output->backend);
assert(renderer);
struct wlr_box box = {
.x = rect->x1,
.y = rect->y1,
.width = rect->x2 - rect->x1,
.height = rect->y2 - rect->y1,
};
int ow, oh;
wlr_output_transformed_resolution(output, &ow, &oh);
enum wl_output_transform transform =
wlr_output_transform_invert(output->transform);
wlr_box_transform(&box, &box, transform, ow, oh);
wlr_renderer_scissor(renderer, &box);
}
static void output_cursor_get_box(struct wlr_output_cursor *cursor,
struct wlr_box *box);
static void output_cursor_render(struct wlr_output_cursor *cursor,
pixman_region32_t *damage) {
struct wlr_renderer *renderer =
wlr_backend_get_renderer(cursor->output->backend);
assert(renderer);
struct wlr_texture *texture = cursor->texture;
if (cursor->surface != NULL) {
texture = wlr_surface_get_texture(cursor->surface);
}
if (texture == NULL) {
return;
}
struct wlr_box box;
output_cursor_get_box(cursor, &box);
pixman_region32_t surface_damage;
pixman_region32_init(&surface_damage);
pixman_region32_union_rect(&surface_damage, &surface_damage, box.x, box.y,
box.width, box.height);
pixman_region32_intersect(&surface_damage, &surface_damage, damage);
if (!pixman_region32_not_empty(&surface_damage)) {
goto surface_damage_finish;
}
float matrix[9];
wlr_matrix_project_box(matrix, &box, WL_OUTPUT_TRANSFORM_NORMAL, 0,
cursor->output->transform_matrix);
int nrects;
pixman_box32_t *rects = pixman_region32_rectangles(&surface_damage, &nrects);
for (int i = 0; i < nrects; ++i) {
output_scissor(cursor->output, &rects[i]);
wlr_render_texture_with_matrix(renderer, texture, matrix, 1.0f);
}
wlr_renderer_scissor(renderer, NULL);
surface_damage_finish:
pixman_region32_fini(&surface_damage);
}
void wlr_output_render_software_cursors(struct wlr_output *output,
pixman_region32_t *damage) {
int width, height;
wlr_output_transformed_resolution(output, &width, &height);
pixman_region32_t render_damage;
pixman_region32_init(&render_damage);
pixman_region32_union_rect(&render_damage, &render_damage, 0, 0,
width, height);
if (damage != NULL) {
// Damage tracking supported
pixman_region32_intersect(&render_damage, &render_damage, damage);
}
if (pixman_region32_not_empty(&render_damage)) {
struct wlr_output_cursor *cursor;
wl_list_for_each(cursor, &output->cursors, link) {
if (!cursor->enabled || !cursor->visible ||
output->hardware_cursor == cursor) {
continue;
}
output_cursor_render(cursor, &render_damage);
}
}
pixman_region32_fini(&render_damage);
}
/**
* Returns the cursor box, scaled for its output.
*/
static void output_cursor_get_box(struct wlr_output_cursor *cursor,
struct wlr_box *box) {
box->x = cursor->x - cursor->hotspot_x;
box->y = cursor->y - cursor->hotspot_y;
box->width = cursor->width;
box->height = cursor->height;
}
static void output_cursor_damage_whole(struct wlr_output_cursor *cursor) {
struct wlr_box box;
output_cursor_get_box(cursor, &box);
pixman_region32_t damage;
pixman_region32_init_rect(&damage, box.x, box.y, box.width, box.height);
struct wlr_output_event_damage event = {
.output = cursor->output,
.damage = &damage,
};
wlr_signal_emit_safe(&cursor->output->events.damage, &event);
pixman_region32_fini(&damage);
}
static void output_cursor_reset(struct wlr_output_cursor *cursor) {
if (cursor->output->hardware_cursor != cursor) {
output_cursor_damage_whole(cursor);
}
if (cursor->surface != NULL) {
wl_list_remove(&cursor->surface_commit.link);
wl_list_remove(&cursor->surface_destroy.link);
if (cursor->visible) {
wlr_surface_send_leave(cursor->surface, cursor->output);
}
cursor->surface = NULL;
}
}
static void output_cursor_update_visible(struct wlr_output_cursor *cursor) {
struct wlr_box output_box;
output_box.x = output_box.y = 0;
wlr_output_transformed_resolution(cursor->output, &output_box.width,
&output_box.height);
struct wlr_box cursor_box;
output_cursor_get_box(cursor, &cursor_box);
struct wlr_box intersection;
bool visible =
wlr_box_intersection(&intersection, &output_box, &cursor_box);
if (cursor->surface != NULL) {
if (cursor->visible && !visible) {
wlr_surface_send_leave(cursor->surface, cursor->output);
}
if (!cursor->visible && visible) {
wlr_surface_send_enter(cursor->surface, cursor->output);
}
}
cursor->visible = visible;
}
static bool output_cursor_attempt_hardware(struct wlr_output_cursor *cursor) {
float scale = cursor->output->scale;
enum wl_output_transform transform = WL_OUTPUT_TRANSFORM_NORMAL;
struct wlr_texture *texture = cursor->texture;
if (cursor->surface != NULL) {
texture = wlr_surface_get_texture(cursor->surface);
scale = cursor->surface->current.scale;
transform = cursor->surface->current.transform;
}
if (cursor->output->software_cursor_locks > 0) {
return false;
}
struct wlr_output_cursor *hwcur = cursor->output->hardware_cursor;
if (cursor->output->impl->set_cursor && (hwcur == NULL || hwcur == cursor)) {
// If the cursor was hidden or was a software cursor, the hardware
// cursor position is outdated
assert(cursor->output->impl->move_cursor);
cursor->output->impl->move_cursor(cursor->output,
(int)cursor->x, (int)cursor->y);
if (cursor->output->impl->set_cursor(cursor->output, texture,
scale, transform, cursor->hotspot_x, cursor->hotspot_y, true)) {
cursor->output->hardware_cursor = cursor;
return true;
}
}
return false;
}
bool wlr_output_cursor_set_image(struct wlr_output_cursor *cursor,
const uint8_t *pixels, int32_t stride, uint32_t width, uint32_t height,
int32_t hotspot_x, int32_t hotspot_y) {
struct wlr_renderer *renderer =
wlr_backend_get_renderer(cursor->output->backend);
if (!renderer) {
// if the backend has no renderer, we can't draw a cursor, but this is
// actually okay, for ex. with the noop backend
return true;
}
output_cursor_reset(cursor);
cursor->width = width;
cursor->height = height;
cursor->hotspot_x = hotspot_x;
cursor->hotspot_y = hotspot_y;
output_cursor_update_visible(cursor);
wlr_texture_destroy(cursor->texture);
cursor->texture = NULL;
cursor->enabled = false;
if (pixels != NULL) {
cursor->texture = wlr_texture_from_pixels(renderer,
WL_SHM_FORMAT_ARGB8888, stride, width, height, pixels);
if (cursor->texture == NULL) {
return false;
}
cursor->enabled = true;
}
if (output_cursor_attempt_hardware(cursor)) {
return true;
}
wlr_log(WLR_DEBUG, "Falling back to software cursor on output '%s'",
cursor->output->name);
output_cursor_damage_whole(cursor);
return true;
}
static void output_cursor_commit(struct wlr_output_cursor *cursor,
bool update_hotspot) {
if (cursor->output->hardware_cursor != cursor) {
output_cursor_damage_whole(cursor);
}
struct wlr_surface *surface = cursor->surface;
assert(surface != NULL);
// Some clients commit a cursor surface with a NULL buffer to hide it.
cursor->enabled = wlr_surface_has_buffer(surface);
cursor->width = surface->current.width * cursor->output->scale;
cursor->height = surface->current.height * cursor->output->scale;
output_cursor_update_visible(cursor);
if (update_hotspot) {
cursor->hotspot_x -= surface->current.dx * cursor->output->scale;
cursor->hotspot_y -= surface->current.dy * cursor->output->scale;
}
if (output_cursor_attempt_hardware(cursor)) {
return;
}
// Fallback to software cursor
output_cursor_damage_whole(cursor);
}
static void output_cursor_handle_commit(struct wl_listener *listener,
void *data) {
struct wlr_output_cursor *cursor =
wl_container_of(listener, cursor, surface_commit);
output_cursor_commit(cursor, true);
}
static void output_cursor_handle_destroy(struct wl_listener *listener,
void *data) {
struct wlr_output_cursor *cursor = wl_container_of(listener, cursor,
surface_destroy);
output_cursor_reset(cursor);
}
void wlr_output_cursor_set_surface(struct wlr_output_cursor *cursor,
struct wlr_surface *surface, int32_t hotspot_x, int32_t hotspot_y) {
hotspot_x *= cursor->output->scale;
hotspot_y *= cursor->output->scale;
if (surface && surface == cursor->surface) {
// Only update the hotspot: surface hasn't changed
if (cursor->output->hardware_cursor != cursor) {
output_cursor_damage_whole(cursor);
}
cursor->hotspot_x = hotspot_x;
cursor->hotspot_y = hotspot_y;
if (cursor->output->hardware_cursor != cursor) {
output_cursor_damage_whole(cursor);
} else {
assert(cursor->output->impl->set_cursor);
cursor->output->impl->set_cursor(cursor->output, NULL,
1, WL_OUTPUT_TRANSFORM_NORMAL, hotspot_x, hotspot_y, false);
}
return;
}
output_cursor_reset(cursor);
cursor->surface = surface;
cursor->hotspot_x = hotspot_x;
cursor->hotspot_y = hotspot_y;
if (surface != NULL) {
wl_signal_add(&surface->events.commit, &cursor->surface_commit);
wl_signal_add(&surface->events.destroy, &cursor->surface_destroy);
cursor->visible = false;
output_cursor_commit(cursor, false);
} else {
cursor->enabled = false;
cursor->width = 0;
cursor->height = 0;
if (cursor->output->hardware_cursor == cursor) {
assert(cursor->output->impl->set_cursor);
cursor->output->impl->set_cursor(cursor->output, NULL, 1,
WL_OUTPUT_TRANSFORM_NORMAL, 0, 0, true);
}
}
}
bool wlr_output_cursor_move(struct wlr_output_cursor *cursor,
double x, double y) {
if (cursor->x == x && cursor->y == y) {
return true;
}
if (cursor->output->hardware_cursor != cursor) {
output_cursor_damage_whole(cursor);
}
bool was_visible = cursor->visible;
x *= cursor->output->scale;
y *= cursor->output->scale;
cursor->x = x;
cursor->y = y;
output_cursor_update_visible(cursor);
if (!was_visible && !cursor->visible) {
// Cursor is still hidden, do nothing
return true;
}
if (cursor->output->hardware_cursor != cursor) {
output_cursor_damage_whole(cursor);
return true;
}
assert(cursor->output->impl->move_cursor);
return cursor->output->impl->move_cursor(cursor->output, (int)x, (int)y);
}
struct wlr_output_cursor *wlr_output_cursor_create(struct wlr_output *output) {
struct wlr_output_cursor *cursor =
calloc(1, sizeof(struct wlr_output_cursor));
if (cursor == NULL) {
return NULL;
}
cursor->output = output;
wl_signal_init(&cursor->events.destroy);
wl_list_init(&cursor->surface_commit.link);
cursor->surface_commit.notify = output_cursor_handle_commit;
wl_list_init(&cursor->surface_destroy.link);
cursor->surface_destroy.notify = output_cursor_handle_destroy;
wl_list_insert(&output->cursors, &cursor->link);
cursor->visible = true; // default position is at (0, 0)
return cursor;
}
void wlr_output_cursor_destroy(struct wlr_output_cursor *cursor) {
if (cursor == NULL) {
return;
}
output_cursor_reset(cursor);
wlr_signal_emit_safe(&cursor->events.destroy, cursor);
if (cursor->output->hardware_cursor == cursor) {
// If this cursor was the hardware cursor, disable it
if (cursor->output->impl->set_cursor) {
cursor->output->impl->set_cursor(cursor->output, NULL, 1,
WL_OUTPUT_TRANSFORM_NORMAL, 0, 0, true);
}
cursor->output->hardware_cursor = NULL;
}
wlr_texture_destroy(cursor->texture);
wl_list_remove(&cursor->link);
free(cursor);
}
enum wl_output_transform wlr_output_transform_invert(
enum wl_output_transform tr) {
if ((tr & WL_OUTPUT_TRANSFORM_90) && !(tr & WL_OUTPUT_TRANSFORM_FLIPPED)) {
tr ^= WL_OUTPUT_TRANSFORM_180;
}
return tr;
}
enum wl_output_transform wlr_output_transform_compose(
enum wl_output_transform tr_a, enum wl_output_transform tr_b) {
uint32_t flipped = (tr_a ^ tr_b) & WL_OUTPUT_TRANSFORM_FLIPPED;
uint32_t rotation_mask = WL_OUTPUT_TRANSFORM_90 | WL_OUTPUT_TRANSFORM_180;
uint32_t rotated;
if (tr_b & WL_OUTPUT_TRANSFORM_FLIPPED) {
// When a rotation of k degrees is followed by a flip, the
// equivalent transform is a flip followed by a rotation of
// -k degrees.
rotated = (tr_b - tr_a) & rotation_mask;
} else {
rotated = (tr_a + tr_b) & rotation_mask;
}
return flipped | rotated;
}
| 1 | 15,769 | Nothing would change here even if this `if` statement were removed. | swaywm-wlroots | c |
@@ -38,10 +38,11 @@ assert_pyspark_version()
from databricks.koalas.frame import DataFrame
from databricks.koalas.indexes import Index, MultiIndex
from databricks.koalas.series import Series
-from databricks.koalas.typedef import Col, pandas_wraps
+from databricks.koalas.typedef import pandas_wraps
+from databricks.koalas.sql import sql
__all__ = ['read_csv', 'read_parquet', 'to_datetime', 'from_pandas',
- 'get_dummies', 'DataFrame', 'Series', 'Index', 'MultiIndex', 'Col', 'pandas_wraps',
+ 'get_dummies', 'DataFrame', 'Series', 'Index', 'MultiIndex', 'pandas_wraps',
'sql', 'range', 'concat', 'melt']
| 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from databricks.koalas.version import __version__
def assert_pyspark_version():
import logging
pyspark_ver = None
try:
import pyspark
except ImportError:
raise ImportError('Unable to import pyspark - consider doing a pip install with [spark] '
'extra to install pyspark with pip')
else:
pyspark_ver = getattr(pyspark, '__version__')
if pyspark_ver is None or pyspark_ver < '2.4':
logging.warning(
'Found pyspark version "{}" installed. pyspark>=2.4.0 is recommended.'
.format(pyspark_ver if pyspark_ver is not None else '<unknown version>'))
assert_pyspark_version()
from databricks.koalas.frame import DataFrame
from databricks.koalas.indexes import Index, MultiIndex
from databricks.koalas.series import Series
from databricks.koalas.typedef import Col, pandas_wraps
__all__ = ['read_csv', 'read_parquet', 'to_datetime', 'from_pandas',
'get_dummies', 'DataFrame', 'Series', 'Index', 'MultiIndex', 'Col', 'pandas_wraps',
'sql', 'range', 'concat', 'melt']
def _auto_patch():
import os
import logging
# Attach a usage logger.
logger_module = os.getenv("KOALAS_USAGE_LOGGER", None)
if logger_module is not None:
try:
from databricks.koalas import usage_logging
usage_logging.attach(logger_module)
except Exception as e:
from pyspark.util import _exception_message
logger = logging.getLogger('databricks.koalas.usage_logger')
logger.warning('Tried to attach usage logger `{}`, but an exception was raised: {}'
.format(logger_module, _exception_message(e)))
# Autopatching is on by default.
x = os.getenv("SPARK_KOALAS_AUTOPATCH", "true")
if x.lower() in ("true", "1", "enabled"):
logger = logging.getLogger('spark')
logger.info("Patching spark automatically. You can disable it by setting "
"SPARK_KOALAS_AUTOPATCH=false in your environment")
from pyspark.sql import dataframe as df
df.DataFrame.to_koalas = DataFrame.to_koalas
_auto_patch()
# Import after the usage logger is attached.
from databricks.koalas.namespace import *
from databricks.koalas.sql import sql
| 1 | 10,052 | `sql` module is imported later. | databricks-koalas | py |
@@ -69,6 +69,15 @@ class GARPNHead(RPNTestMixin, GuidedAnchorHead):
cfg,
rescale=False):
cfg = self.test_cfg if cfg is None else cfg
+
+ # refactor the nms cfg
+ # this is used for avoid breaking change
+ if 'nms' not in cfg:
+ cfg.nms = dict(type='nms', iou_threshold=cfg.nms_thr)
+ cfg.max_per_img = cfg.max_num
+
+ assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \
+ 'naive nms.'
mlvl_proposals = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx] | 1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmcv.ops import nms
from ..builder import HEADS
from .guided_anchor_head import GuidedAnchorHead
from .rpn_test_mixin import RPNTestMixin
@HEADS.register_module()
class GARPNHead(RPNTestMixin, GuidedAnchorHead):
"""Guided-Anchor-based RPN head."""
def __init__(self, in_channels, **kwargs):
super(GARPNHead, self).__init__(1, in_channels, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
super(GARPNHead, self)._init_layers()
def init_weights(self):
"""Initialize weights of the head."""
normal_init(self.rpn_conv, std=0.01)
super(GARPNHead, self).init_weights()
def forward_single(self, x):
"""Forward feature of a single scale level."""
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
(cls_score, bbox_pred, shape_pred,
loc_pred) = super(GARPNHead, self).forward_single(x)
return cls_score, bbox_pred, shape_pred, loc_pred
def loss(self,
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
img_metas,
gt_bboxes_ignore=None):
losses = super(GARPNHead, self).loss(
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
None,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'],
loss_rpn_bbox=losses['loss_bbox'],
loss_anchor_shape=losses['loss_shape'],
loss_anchor_loc=losses['loss_loc'])
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
mlvl_masks,
img_shape,
scale_factor,
cfg,
rescale=False):
cfg = self.test_cfg if cfg is None else cfg
mlvl_proposals = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
anchors = mlvl_anchors[idx]
mask = mlvl_masks[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
# if no location is kept, end.
if mask.sum() == 0:
continue
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
scores = rpn_cls_score.softmax(dim=1)[:, :-1]
# filter scores, bbox_pred w.r.t. mask.
# anchors are filtered in get_anchors() beforehand.
scores = scores[mask]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
4)[mask, :]
if scores.dim() == 0:
rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
anchors = anchors.unsqueeze(0)
scores = scores.unsqueeze(0)
# filter anchors, bbox_pred, scores w.r.t. scores
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
_, topk_inds = scores.topk(cfg.nms_pre)
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
scores = scores[topk_inds]
# get proposals w.r.t. anchors and rpn_bbox_pred
proposals = self.bbox_coder.decode(
anchors, rpn_bbox_pred, max_shape=img_shape)
# filter out too small bboxes
if cfg.min_bbox_size > 0:
w = proposals[:, 2] - proposals[:, 0]
h = proposals[:, 3] - proposals[:, 1]
valid_inds = torch.nonzero(
(w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size),
as_tuple=False).squeeze()
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
# NMS in current level
proposals, _ = nms(proposals, scores, cfg.nms_thr)
proposals = proposals[:cfg.nms_post, :]
mlvl_proposals.append(proposals)
proposals = torch.cat(mlvl_proposals, 0)
if cfg.nms_across_levels:
# NMS across multi levels
proposals, _ = nms(proposals[:, :4], proposals[:, -1], cfg.nms_thr)
proposals = proposals[:cfg.max_num, :]
else:
scores = proposals[:, 4]
num = min(cfg.max_num, proposals.shape[0])
_, topk_inds = scores.topk(num)
proposals = proposals[topk_inds, :]
return proposals
| 1 | 22,598 | I think it's strange, whether `assert cfg.nms.type == 'nms'` would be better. | open-mmlab-mmdetection | py |
@@ -33,14 +33,7 @@ import globalize from '../scripts/globalize';
});
options.buttons = items;
-
- return dialog.show(options).then(function (result) {
- if (result === 'ok') {
- return Promise.resolve();
- }
-
- return Promise.reject();
- });
+ return dialog.show(options);
}
return Promise.resolve(); | 1 |
import browser from '../scripts/browser';
import dialog from './dialog/dialog';
import globalize from '../scripts/globalize';
/* eslint-disable indent */
function replaceAll(originalString, strReplace, strWith) {
const reg = new RegExp(strReplace, 'ig');
return originalString.replace(reg, strWith);
}
export default function (text, title) {
let options;
if (typeof text === 'string') {
options = {
title: title,
text: text
};
} else {
options = text;
}
if (browser.tv && window.alert) {
alert(replaceAll(options.text || '', '<br/>', '\n'));
} else {
const items = [];
items.push({
name: globalize.translate('ButtonGotIt'),
id: 'ok',
type: 'submit'
});
options.buttons = items;
return dialog.show(options).then(function (result) {
if (result === 'ok') {
return Promise.resolve();
}
return Promise.reject();
});
}
return Promise.resolve();
}
/* eslint-enable indent */
| 1 | 18,667 | Doing a quick search, it looks like there are other places that would be handling this promise rejection. How did you confirm this is not needed? | jellyfin-jellyfin-web | js |
@@ -68,7 +68,7 @@ func TestCreateContract(t *testing.T) {
_, err = accountutil.LoadOrCreateAccount(sm, addr.String())
require.NoError(err)
hu := config.NewHeightUpgrade(&cfg.Genesis)
- stateDB := NewStateDBAdapter(sm, 0, hu.IsPre(config.Aleutian, 0), hash.ZeroHash256)
+ stateDB := NewStateDBAdapter(sm, 0, hu.IsPre(config.Aleutian, 0), hu.IsPost(config.Fairbank, 0), hash.ZeroHash256)
contract := addr.Bytes()
var evmContract common.Address
copy(evmContract[:], contract[:]) | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package evm
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db/batch"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_chainmanager"
"github.com/iotexproject/iotex-core/testutil"
)
func TestCreateContract(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
testTriePath, err := testutil.PathOfTempFile("trie")
require.NoError(err)
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
sm := mock_chainmanager.NewMockStateManager(ctrl)
cb := batch.NewCachedBatch()
sm.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(
func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
cfg, err := protocol.CreateStateConfig(opts...)
if err != nil {
return 0, err
}
val, err := cb.Get("state", cfg.Key)
if err != nil {
return 0, state.ErrStateNotExist
}
return 0, state.Deserialize(account, val)
}).AnyTimes()
sm.EXPECT().PutState(gomock.Any(), gomock.Any()).DoAndReturn(
func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
cfg, err := protocol.CreateStateConfig(opts...)
if err != nil {
return 0, err
}
ss, err := state.Serialize(account)
if err != nil {
return 0, err
}
cb.Put("state", cfg.Key, ss, "failed to put state")
return 0, nil
}).AnyTimes()
addr := identityset.Address(28)
_, err = accountutil.LoadOrCreateAccount(sm, addr.String())
require.NoError(err)
hu := config.NewHeightUpgrade(&cfg.Genesis)
stateDB := NewStateDBAdapter(sm, 0, hu.IsPre(config.Aleutian, 0), hash.ZeroHash256)
contract := addr.Bytes()
var evmContract common.Address
copy(evmContract[:], contract[:])
stateDB.SetCode(evmContract, bytecode)
// contract exist
codeHash := stateDB.GetCodeHash(evmContract)
var emptyEVMHash common.Hash
require.NotEqual(emptyEVMHash, codeHash)
v := stateDB.GetCode(evmContract)
require.Equal(bytecode, v)
// non-existing contract
addr1 := hash.Hash160b([]byte("random"))
var evmAddr1 common.Address
copy(evmAddr1[:], addr1[:])
h := stateDB.GetCodeHash(evmAddr1)
require.Equal(emptyEVMHash, h)
require.Nil(stateDB.GetCode(evmAddr1))
require.NoError(stateDB.CommitContracts())
stateDB.clear()
// reload same contract
contract1, err := accountutil.LoadOrCreateAccount(sm, addr.String())
require.NoError(err)
require.Equal(codeHash[:], contract1.CodeHash)
}
func TestLoadStoreCommit(t *testing.T) {
require := require.New(t)
testLoadStoreCommit := func(cfg config.Config, t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
sm, err := initMockStateManager(ctrl)
require.NoError(err)
cntr1, err := newContract(hash.BytesToHash160(c1[:]), &state.Account{}, sm)
require.NoError(err)
tests := []cntrTest{
{
cntr1,
[]code{
{c1, []byte("2nd contract creation")},
},
[]set{
{k1b, v1b[:], nil},
{k2b, v2b[:], nil},
},
},
{
cntr1,
[]code{
{c2, bytecode},
},
[]set{
{k1b, v4b[:], nil},
{k2b, v3b[:], nil},
{k3b, v2b[:], nil},
{k4b, v1b[:], nil},
},
},
{
cntr1,
nil,
[]set{
{k1b, v2b[:], nil},
{k2b, v1b[:], nil},
{k3b, v4b[:], nil},
{k4b, nil, nil},
},
},
}
for i, test := range tests {
c := test.contract
// set code
for _, e := range test.codes {
c.SetCode(hash.Hash256b(e.v), e.v)
}
// set states
for _, e := range test.states {
require.NoError(c.SetState(e.k, e.v))
if i > 0 {
// committed state == value of previous test's SetState()
committed := tests[i-1].states
for _, e := range committed {
v, err := c.GetCommittedState(e.k)
require.NoError(err)
require.Equal(e.v, v)
}
}
v, err := c.GetState(e.k)
require.NoError(err)
require.Equal(e.v, v)
}
require.NoError(c.Commit())
}
checks := []cntrTest{
{
cntr1,
[]code{
{c1, bytecode},
},
[]set{
{k1b, v2b[:], nil},
{k2b, v1b[:], nil},
{k3b, v4b[:], nil},
{k4b, nil, nil},
},
},
}
for _, test := range checks {
c := test.contract
// check code
for _, e := range test.codes {
v, err := c.GetCode()
require.NoError(err)
require.Equal(e.v, v)
chash := hash.Hash256b(e.v)
require.Equal(chash[:], c.SelfState().CodeHash)
require.NotEqual(hash.ZeroHash256, hash.BytesToHash256(chash[:]))
}
// check states
for _, e := range test.states {
v, err := c.GetState(e.k)
require.Equal(e.v, v)
if err != nil {
require.Equal(e.cause, errors.Cause(err))
}
}
}
}
testTriePath, err := testutil.PathOfTempFile("trie")
require.NoError(err)
defer func() {
testutil.CleanupPath(t, testTriePath)
}()
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
t.Run("contract load/store with stateDB", func(t *testing.T) {
testLoadStoreCommit(cfg, t)
})
testTriePath2, err := testutil.PathOfTempFile("trie")
require.NoError(err)
defer func() {
testutil.CleanupPath(t, testTriePath2)
}()
cfg.Chain.EnableTrielessStateDB = false
cfg.Chain.TrieDBPath = testTriePath2
t.Run("contract load/store with trie", func(t *testing.T) {
testLoadStoreCommit(cfg, t)
})
}
func TestSnapshot(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
sm, err := initMockStateManager(ctrl)
require.NoError(err)
s := &state.Account{
Balance: big.NewInt(5),
}
c1, err := newContract(
hash.BytesToHash160(identityset.Address(28).Bytes()),
s,
sm,
)
require.NoError(err)
require.NoError(c1.SetState(k2b, v2[:]))
c2 := c1.Snapshot()
require.NoError(c1.SelfState().AddBalance(big.NewInt(7)))
require.NoError(c1.SetState(k1b, v1[:]))
require.Equal(big.NewInt(12), c1.SelfState().Balance)
require.Equal(big.NewInt(5), c2.SelfState().Balance)
require.NotEqual(c1.RootHash(), c2.RootHash())
}
| 1 | 21,945 | let's use greenland in the unit tests as well | iotexproject-iotex-core | go |
@@ -48,8 +48,10 @@ def reportPassThrough(treeInterceptor,onlyIfChanged=True):
nvwave.playWaveFile(sound)
else:
if treeInterceptor.passThrough:
+ # Translators: The mode to interact with controls in documents
ui.message(_("Focus mode"))
else:
+ # Translators: The mode that presents text in a flat representation that can be navigated with the cursor keys like in a text document
ui.message(_("Browse mode"))
reportPassThrough.last = treeInterceptor.passThrough
reportPassThrough.last = False | 1 | #browseMode.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2007-2017 NV Access Limited, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import itertools
import collections
import winsound
import time
import weakref
import wx
from logHandler import log
import review
import scriptHandler
import eventHandler
import nvwave
import queueHandler
import gui
import ui
import cursorManager
from scriptHandler import isScriptWaiting, willSayAllResume
import aria
import controlTypes
import config
import textInfos
import braille
import speech
import sayAllHandler
import treeInterceptorHandler
import inputCore
import api
import gui.guiHelper
from NVDAObjects import NVDAObject
REASON_QUICKNAV = "quickNav"
def reportPassThrough(treeInterceptor,onlyIfChanged=True):
"""Reports the pass through mode if it has changed.
@param treeInterceptor: The current Browse Mode treeInterceptor.
@type treeInterceptor: L{BrowseModeTreeInterceptor}
@param onlyIfChanged: if true reporting will not happen if the last reportPassThrough reported the same thing.
@type onlyIfChanged: bool
"""
if not onlyIfChanged or treeInterceptor.passThrough != reportPassThrough.last:
if config.conf["virtualBuffers"]["passThroughAudioIndication"]:
sound = r"waves\focusMode.wav" if treeInterceptor.passThrough else r"waves\browseMode.wav"
nvwave.playWaveFile(sound)
else:
if treeInterceptor.passThrough:
ui.message(_("Focus mode"))
else:
ui.message(_("Browse mode"))
reportPassThrough.last = treeInterceptor.passThrough
reportPassThrough.last = False
def mergeQuickNavItemIterators(iterators,direction="next"):
"""
Merges multiple iterators that emit L{QuickNavItem} objects, yielding them from first to last.
They are sorted using min or max (__lt__ should be implemented on the L{QuickNavItem} objects).
@param iters: the iterators you want to merge.
@type iters: sequence of iterators that emit L{QuicknavItem} objects.
@param direction: the direction these iterators are searching (e.g. next, previous)
@type direction: string
"""
finder=min if direction=="next" else max
curValues=[]
# Populate a list with all iterators and their corisponding first value
for it in iterators:
try:
val=next(it)
except StopIteration:
continue
curValues.append((it,val))
# Until all iterators have been used up,
# Find the first (minimum or maximum) of all the values,
# emit that, and update the list with the next available value for the iterator whose value was emitted.
while len(curValues)>0:
first=finder(curValues,key=lambda x: x[1])
curValues.remove(first)
it,val=first
yield val
try:
newVal=next(it)
except StopIteration:
continue
curValues.append((it,newVal))
class QuickNavItem(object):
""" Emitted by L{BrowseModeTreeInterceptor._iterNodesByType}, this represents one of many positions in a browse mode document, based on the type of item being searched for (e.g. link, heading, table etc)."""
itemType=None #: The type of items searched for (e.g. link, heading, table etc)
label=None #: The label that should represent this item in the Elements list.
isAfterSelection=False #: Is this item positioned after the caret in the document? Used by the elements list to place its own selection.
def __init__(self,itemType,document):
"""
@param itemType: the type that was searched for (e.g. link, heading, table etc)
@ type itemType: string
@ param document: the browse mode document this item is a part of.
@type document: L{BrowseModeTreeInterceptor}
"""
self.itemType=itemType
self.document=document
def isChild(self,parent):
"""
Is this item a child of the given parent?
This is used when representing items in a hierarchical tree structure, such as the Elements List.
@param parent: the item of whom this item may be a child of.
@type parent: L{QuickNavItem}
@return: True if this item is a child, false otherwise.
@rtype: bool
"""
raise NotImplementedError
def report(self,readUnit=None):
"""
Reports the contents of this item.
@param readUnit: the optional unit (e.g. line, paragraph) that should be used to announce the item position when moved to. If not given, then the full sise of the item is used.
@type readUnit: a L{textInfos}.UNIT_* constant.
"""
raise NotImplementedError
def moveTo(self):
"""
Moves the browse mode caret or focus to this item.
"""
raise NotImplementedError
def activate(self):
"""
Activates this item's position. E.g. follows a link, presses a button etc.
"""
raise NotImplementedError
def rename(self,newName):
"""
Renames this item with the new name.
"""
raise NotImplementedError
@property
def isRenameAllowed(self):
return False
class TextInfoQuickNavItem(QuickNavItem):
""" Represents a quick nav item in a browse mode document who's positions are represented by a L{textInfos.TextInfo}. """
def __init__(self,itemType,document,textInfo):
"""
See L{QuickNavItem.__init__} for itemType and document argument definitions.
@param textInfo: the textInfo position this item represents.
@type textInfo: L{textInfos.TextInfo}
"""
self.textInfo=textInfo
super(TextInfoQuickNavItem,self).__init__(itemType,document)
def __lt__(self,other):
return self.textInfo.compareEndPoints(other.textInfo,"startToStart")<0
@property
def obj(self):
return self.textInfo.basePosition if isinstance(self.textInfo.basePosition,NVDAObject) else None
@property
def label(self):
return self.textInfo.text.strip()
def isChild(self,parent):
if parent.textInfo.isOverlapping(self.textInfo):
return True
return False
def report(self,readUnit=None):
info=self.textInfo
if readUnit:
fieldInfo = info.copy()
info.collapse()
info.move(readUnit, 1, endPoint="end")
if info.compareEndPoints(fieldInfo, "endToEnd") > 0:
# We've expanded past the end of the field, so limit to the end of the field.
info.setEndPoint(fieldInfo, "endToEnd")
speech.speakTextInfo(info, reason=controlTypes.REASON_FOCUS)
def activate(self):
self.textInfo.obj._activatePosition(self.textInfo)
def moveTo(self):
info=self.textInfo.copy()
info.collapse()
self.document._set_selection(info,reason=REASON_QUICKNAV)
@property
def isAfterSelection(self):
caret=self.document.makeTextInfo(textInfos.POSITION_CARET)
return self.textInfo.compareEndPoints(caret, "startToStart") > 0
def _getLabelForProperties(self, labelPropertyGetter):
"""
Fetches required properties for this L{TextInfoQuickNavItem} and constructs a label to be shown in an elements list.
This can be used by subclasses to implement the L{label} property.
@Param labelPropertyGetter: A callable taking 1 argument, specifying the property to fetch.
For example, if L{itemType} is landmark, the callable must return the landmark type when "landmark" is passed as the property argument.
Alternative property names might be name or value.
The callable must return None if the property doesn't exist.
An expected callable might be get method on a L{Dict},
or "lambda property: getattr(self.obj, property, None)" for an L{NVDAObject}.
"""
content = self.textInfo.text.strip()
if self.itemType is "heading":
# Output: displayed text of the heading.
return content
labelParts = None
name = labelPropertyGetter("name")
if self.itemType is "landmark":
landmark = aria.landmarkRoles.get(labelPropertyGetter("landmark"))
# Example output: main menu; navigation
labelParts = (name, landmark)
else:
role = labelPropertyGetter("role")
roleText = controlTypes.roleLabels[role]
# Translators: Reported label in the elements list for an element which which has no name and value
unlabeled = _("Unlabeled")
realStates = labelPropertyGetter("states")
positiveStates = " ".join(controlTypes.stateLabels[st] for st in controlTypes.processPositiveStates(role, realStates, controlTypes.REASON_FOCUS, realStates))
negativeStates = " ".join(controlTypes.negativeStateLabels[st] for st in controlTypes.processNegativeStates(role, realStates, controlTypes.REASON_FOCUS, realStates))
if self.itemType is "formField":
if role in (controlTypes.ROLE_BUTTON,controlTypes.ROLE_DROPDOWNBUTTON,controlTypes.ROLE_TOGGLEBUTTON,controlTypes.ROLE_SPLITBUTTON,controlTypes.ROLE_MENUBUTTON,controlTypes.ROLE_DROPDOWNBUTTONGRID,controlTypes.ROLE_SPINBUTTON,controlTypes.ROLE_TREEVIEWBUTTON):
# Example output: Mute; toggle button; pressed
labelParts = (content or name or unlabeled, roleText, positiveStates, negativeStates)
else:
# Example output: Find a repository...; edit; has auto complete; NVDA
labelParts = (name or unlabeled, roleText, positiveStates, negativeStates, content)
elif self.itemType in ("link", "button"):
# Example output: You have unread notifications; visited
labelParts = (content or name or unlabeled, positiveStates, negativeStates)
if labelParts:
label = "; ".join(lp for lp in labelParts if lp)
else:
label = content
return label
class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor):
scriptCategory = inputCore.SCRCAT_BROWSEMODE
disableAutoPassThrough = False
APPLICATION_ROLES = (controlTypes.ROLE_APPLICATION, controlTypes.ROLE_DIALOG)
def _get_currentNVDAObject(self):
raise NotImplementedError
ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES = frozenset({
controlTypes.ROLE_COMBOBOX,
controlTypes.ROLE_EDITABLETEXT,
controlTypes.ROLE_LIST,
controlTypes.ROLE_SLIDER,
controlTypes.ROLE_TABCONTROL,
controlTypes.ROLE_MENUBAR,
controlTypes.ROLE_POPUPMENU,
controlTypes.ROLE_TREEVIEW,
controlTypes.ROLE_TREEVIEWITEM,
controlTypes.ROLE_SPINBUTTON,
controlTypes.ROLE_TABLEROW,
controlTypes.ROLE_TABLECELL,
controlTypes.ROLE_TABLEROWHEADER,
controlTypes.ROLE_TABLECOLUMNHEADER,
})
SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES = frozenset({
controlTypes.ROLE_LISTITEM,
controlTypes.ROLE_RADIOBUTTON,
controlTypes.ROLE_TAB,
controlTypes.ROLE_MENUITEM,
controlTypes.ROLE_RADIOMENUITEM,
controlTypes.ROLE_CHECKMENUITEM,
})
def shouldPassThrough(self, obj, reason=None):
"""Determine whether pass through mode should be enabled (focus mode) or disabled (browse mode) for a given object.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@param reason: The reason for this query; one of the output reasons, L{REASON_QUICKNAV}, or C{None} for manual pass through mode activation by the user.
@return: C{True} if pass through mode (focus mode) should be enabled, C{False} if it should be disabled (browse mode).
"""
if reason and (
self.disableAutoPassThrough
or (reason == controlTypes.REASON_FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
or (reason == controlTypes.REASON_CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
):
# This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state.
return self.passThrough
if reason == REASON_QUICKNAV:
return False
states = obj.states
role = obj.role
if controlTypes.STATE_EDITABLE in states and controlTypes.STATE_UNAVAILABLE not in states:
return True
# Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable.
if not obj.isFocusable and controlTypes.STATE_FOCUSED not in states and role != controlTypes.ROLE_POPUPMENU:
return False
# many controls that are read-only should not switch to passThrough.
# However, certain controls such as combo boxes and readonly edits are read-only but still interactive.
# #5118: read-only ARIA grids should also be allowed (focusable table cells, rows and headers).
if controlTypes.STATE_READONLY in states and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER):
return False
# Any roles or states for which we always switch to passThrough
if role in self.ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES or controlTypes.STATE_EDITABLE in states:
return True
# focus is moving to this control. Perhaps after pressing tab or clicking a button that brings up a menu (via javascript)
if reason == controlTypes.REASON_FOCUS:
if role in self.SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES:
return True
# If this is a focus change, pass through should be enabled for certain ancestor containers.
# this is done last for performance considerations. Walking up the through the parents could be costly
while obj and obj != self.rootNVDAObject:
if obj.role == controlTypes.ROLE_TOOLBAR:
return True
obj = obj.parent
return False
def _get_shouldTrapNonCommandGestures(self):
return config.conf['virtualBuffers']['trapNonCommandGestures']
def script_trapNonCommandGesture(self,gesture):
winsound.PlaySound("default",1)
singleLetterNavEnabled=True #: Whether single letter navigation scripts should be active (true) or if these letters should fall to the application.
def getAlternativeScript(self,gesture,script):
if self.passThrough or not gesture.isCharacter:
return script
if not self.singleLetterNavEnabled:
return None
if not script and self.shouldTrapNonCommandGestures:
script=self.script_trapNonCommandGesture
return script
def script_toggleSingleLetterNav(self,gesture):
if self.singleLetterNavEnabled:
self.singleLetterNavEnabled=False
# Translators: Reported when single letter navigation in browse mode is turned off.
ui.message(_("Single letter navigation off"))
else:
self.singleLetterNavEnabled=True
# Translators: Reported when single letter navigation in browse mode is turned on.
ui.message(_("Single letter navigation on"))
# Translators: the description for the toggleSingleLetterNavigation command in browse mode.
script_toggleSingleLetterNav.__doc__=_("Toggles single letter navigation on and off. When on, single letter keys in browse mode jump to various kinds of elements on the page. When off, these keys are passed to the application")
def _get_ElementsListDialog(self):
return ElementsListDialog
def _iterNodesByType(self,itemType,direction="next",pos=None):
"""
Yields L{QuickNavItem} objects representing the ordered positions in this document according to the type being searched for (e.g. link, heading, table etc).
@param itemType: the type being searched for (e.g. link, heading, table etc)
@type itemType: string
@param direction: the direction in which to search (next, previous, up)
@ type direction: string
@param pos: the position in the document from where to start the search.
@type pos: Usually an L{textInfos.TextInfo}
@raise NotImplementedError: This type is not supported by this BrowseMode implementation
"""
raise NotImplementedError
def _iterNotLinkBlock(self, direction="next", pos=None):
raise NotImplementedError
def _quickNavScript(self,gesture, itemType, direction, errorMessage, readUnit):
if itemType=="notLinkBlock":
iterFactory=self._iterNotLinkBlock
else:
iterFactory=lambda direction,info: self._iterNodesByType(itemType,direction,info)
info=self.selection
try:
item = next(iterFactory(direction, info))
except NotImplementedError:
# Translators: a message when a particular quick nav command is not supported in the current document.
ui.message(_("Not supported in this document"))
return
except StopIteration:
ui.message(errorMessage)
return
item.moveTo()
if not gesture or not willSayAllResume(gesture):
item.report(readUnit=readUnit)
@classmethod
def addQuickNav(cls, itemType, key, nextDoc, nextError, prevDoc, prevError, readUnit=None):
"""Adds a script for the given quick nav item.
@param itemType: The type of item, I.E. "heading" "Link" ...
@param key: The quick navigation key to bind to the script. Shift is automatically added for the previous item gesture. E.G. h for heading
@param nextDoc: The command description to bind to the script that yields the next quick nav item.
@param nextError: The error message if there are no more quick nav items of type itemType in this direction.
@param prevDoc: The command description to bind to the script that yields the previous quick nav item.
@param prevError: The error message if there are no more quick nav items of type itemType in this direction.
@param readUnit: The unit (one of the textInfos.UNIT_* constants) to announce when moving to this type of item.
For example, only the line is read when moving to tables to avoid reading a potentially massive table.
If None, the entire item will be announced.
"""
scriptSuffix = itemType[0].upper() + itemType[1:]
scriptName = "next%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "next", nextError, readUnit)
script.__doc__ = nextDoc
script.__name__ = funcName
script.resumeSayAllMode=sayAllHandler.CURSOR_CARET
setattr(cls, funcName, script)
cls.__gestures["kb:%s" % key] = scriptName
scriptName = "previous%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "previous", prevError, readUnit)
script.__doc__ = prevDoc
script.__name__ = funcName
script.resumeSayAllMode=sayAllHandler.CURSOR_CARET
setattr(cls, funcName, script)
cls.__gestures["kb:shift+%s" % key] = scriptName
def script_elementsList(self,gesture):
# We need this to be a modal dialog, but it mustn't block this script.
def run():
gui.mainFrame.prePopup()
d = self.ElementsListDialog(self)
d.ShowModal()
d.Destroy()
gui.mainFrame.postPopup()
wx.CallAfter(run)
# Translators: the description for the Elements List command in browse mode.
script_elementsList.__doc__ = _("Lists various types of elements in this document")
def _activateNVDAObject(self, obj):
"""Activate an object in response to a user request.
This should generally perform the default action or click on the object.
@param obj: The object to activate.
@type obj: L{NVDAObjects.NVDAObject}
"""
obj.doAction()
def _activatePosition(self,obj=None):
if not obj:
obj=self.currentNVDAObject
if not obj:
return
if obj.role == controlTypes.ROLE_MATH:
import mathPres
try:
return mathPres.interactWithMathMl(obj.mathMl)
except (NotImplementedError, LookupError):
pass
return
if self.shouldPassThrough(obj):
obj.setFocus()
self.passThrough = True
reportPassThrough(self)
elif obj.role == controlTypes.ROLE_EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES:
obj.setFocus()
speech.speakObject(obj, reason=controlTypes.REASON_FOCUS)
else:
self._activateNVDAObject(obj)
def script_activatePosition(self,gesture):
self._activatePosition()
# Translators: the description for the activatePosition script on browseMode documents.
script_activatePosition.__doc__ = _("Activates the current object in the document")
def script_disablePassThrough(self, gesture):
if not self.passThrough or self.disableAutoPassThrough:
return gesture.send()
self.passThrough = False
self.disableAutoPassThrough = False
reportPassThrough(self)
script_disablePassThrough.ignoreTreeInterceptorPassThrough = True
__gestures={
"kb:NVDA+f7": "elementsList",
"kb:enter": "activatePosition",
"kb:numpadEnter": "activatePosition",
"kb:space": "activatePosition",
"kb:NVDA+shift+space":"toggleSingleLetterNav",
"kb:escape": "disablePassThrough",
}
# Add quick navigation scripts.
qn = BrowseModeTreeInterceptor.addQuickNav
qn("heading", key="h",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading"))
qn("heading1", key="1",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 1"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 1"))
qn("heading2", key="2",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 2"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 2"))
qn("heading3", key="3",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 3"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 3"))
qn("heading4", key="4",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 4"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 4"))
qn("heading5", key="5",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 5"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 5"))
qn("heading6", key="6",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 6"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 6"))
qn("table", key="t",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next table"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next table"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous table"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous table"),
readUnit=textInfos.UNIT_LINE)
qn("link", key="k",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous link"))
qn("visitedLink", key="v",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next visited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next visited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous visited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous visited link"))
qn("unvisitedLink", key="u",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next unvisited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next unvisited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous unvisited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous unvisited link"))
qn("formField", key="f",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next form field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next form field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous form field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous form field"),
readUnit=textInfos.UNIT_LINE)
qn("list", key="l",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list"),
readUnit=textInfos.UNIT_LINE)
qn("listItem", key="i",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list item"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list item"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list item"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list item"))
qn("button", key="b",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous button"))
qn("edit", key="e",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next edit field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next edit field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous edit field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous edit field"),
readUnit=textInfos.UNIT_LINE)
qn("frame", key="m",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next frame"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next frame"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous frame"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous frame"),
readUnit=textInfos.UNIT_LINE)
qn("separator", key="s",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next separator"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next separator"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous separator"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous separator"))
qn("radioButton", key="r",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next radio button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next radio button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous radio button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous radio button"))
qn("comboBox", key="c",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next combo box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next combo box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous combo box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous combo box"))
qn("checkBox", key="x",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next check box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next check box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous check box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous check box"))
qn("graphic", key="g",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next graphic"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next graphic"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous graphic"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous graphic"))
qn("blockQuote", key="q",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next block quote"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next block quote"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous block quote"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous block quote"))
qn("notLinkBlock", key="n",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("skips forward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no more text after a block of links"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("skips backward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no more text before a block of links"),
readUnit=textInfos.UNIT_LINE)
qn("landmark", key="d",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next landmark"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next landmark"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous landmark"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous landmark"),
readUnit=textInfos.UNIT_LINE)
qn("embeddedObject", key="o",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next embedded object"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next embedded object"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous embedded object"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous embedded object"))
qn("annotation", key="a",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next annotation"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next annotation"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous annotation"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous annotation"))
qn("error", key="w",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next error"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next error"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous error"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous error"))
del qn
class ElementsListDialog(wx.Dialog):
ELEMENT_TYPES = (
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("link", _("Lin&ks")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("heading", _("&Headings")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("formField", _("&Form fields")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("button", _("&Buttons")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("landmark", _("Lan&dmarks")),
)
Element = collections.namedtuple("Element", ("item", "parent"))
lastSelectedElementType=0
def __init__(self, document):
self.document = document
# Translators: The title of the browse mode Elements List dialog.
super(ElementsListDialog, self).__init__(gui.mainFrame, wx.ID_ANY, _("Elements List"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
contentsSizer = wx.BoxSizer(wx.VERTICAL)
# Translators: The label of a group of radio buttons to select the type of element
# in the browse mode Elements List dialog.
child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES))
child.SetSelection(self.lastSelectedElementType)
child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange)
contentsSizer.Add(child, flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
self.tree = wx.TreeCtrl(self, size=wx.Size(500, 600), style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS)
self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus)
self.tree.Bind(wx.EVT_CHAR, self.onTreeChar)
self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin)
self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd)
self.treeRoot = self.tree.AddRoot("root")
contentsSizer.Add(self.tree,flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
# Translators: The label of an editable text field to filter the elements
# in the browse mode Elements List dialog.
filterText = _("&Filter by:")
labeledCtrl = gui.guiHelper.LabeledControlHelper(self, filterText, wx.TextCtrl)
self.filterEdit = labeledCtrl.control
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
contentsSizer.Add(labeledCtrl.sizer)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
bHelper = gui.guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button to activate an element
# in the browse mode Elements List dialog.
self.activateButton = bHelper.addButton(self, label=_("&Activate"))
self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True))
# Translators: The label of a button to move to an element
# in the browse mode Elements List dialog.
self.moveButton = bHelper.addButton(self, label=_("&Move to"))
self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False))
bHelper.addButton(self, id=wx.ID_CANCEL)
contentsSizer.Add(bHelper.sizer, flag=wx.ALIGN_RIGHT)
mainSizer.Add(contentsSizer, border=gui.guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.tree.SetFocus()
self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0])
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
def onElementTypeChange(self, evt):
elementType=evt.GetInt()
# We need to make sure this gets executed after the focus event.
# Otherwise, NVDA doesn't seem to get the event.
queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0])
self.lastSelectedElementType=elementType
def initElementType(self, elType):
if elType in ("link","button"):
# Links and buttons can be activated.
self.activateButton.Enable()
self.SetAffirmativeId(self.activateButton.GetId())
else:
# No other element type can be activated.
self.activateButton.Disable()
self.SetAffirmativeId(self.moveButton.GetId())
# Gather the elements of this type.
self._elements = []
self._initialElement = None
parentElements = []
isAfterSelection=False
for item in self.document._iterNodesByType(elType):
# Find the parent element, if any.
for parent in reversed(parentElements):
if item.isChild(parent.item):
break
else:
# We're not a child of this parent, so this parent has no more children and can be removed from the stack.
parentElements.pop()
else:
# No parent found, so we're at the root.
# Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack.
parent = None
element=self.Element(item,parent)
self._elements.append(element)
if not isAfterSelection:
isAfterSelection=item.isAfterSelection
if not isAfterSelection:
# The element immediately preceding or overlapping the caret should be the initially selected element.
# Since we have not yet passed the selection, use this as the initial element.
try:
self._initialElement = self._elements[-1]
except IndexError:
# No previous element.
pass
# This could be the parent of a subsequent element, so add it to the parents stack.
parentElements.append(element)
# Start with no filtering.
self.filterEdit.ChangeValue("")
self.filter("", newElementType=True)
def filter(self, filterText, newElementType=False):
# If this is a new element type, use the element nearest the cursor.
# Otherwise, use the currently selected element.
defaultElement = self._initialElement if newElementType else self.tree.GetItemPyData(self.tree.GetSelection())
# Clear the tree.
self.tree.DeleteChildren(self.treeRoot)
# Populate the tree with elements matching the filter text.
elementsToTreeItems = {}
defaultItem = None
matched = False
#Do case-insensitive matching by lowering both filterText and each element's text.
filterText=filterText.lower()
for element in self._elements:
label=element.item.label
if filterText and filterText not in label.lower():
continue
matched = True
parent = element.parent
if parent:
parent = elementsToTreeItems.get(parent)
item = self.tree.AppendItem(parent or self.treeRoot, label)
self.tree.SetItemPyData(item, element)
elementsToTreeItems[element] = item
if element == defaultElement:
defaultItem = item
self.tree.ExpandAll()
if not matched:
# No items, so disable the buttons.
self.activateButton.Disable()
self.moveButton.Disable()
return
# If there's no default item, use the first item in the tree.
self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0])
# Enable the button(s).
# If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here.
if self.AffirmativeId == self.activateButton.Id:
self.activateButton.Enable()
self.moveButton.Enable()
def onTreeSetFocus(self, evt):
# Start with no search.
self._searchText = ""
self._searchCallLater = None
evt.Skip()
def onTreeChar(self, evt):
key = evt.KeyCode
if key == wx.WXK_RETURN:
# The enter key should be propagated to the dialog and thus activate the default button,
# but this is broken (wx ticket #3725).
# Therefore, we must catch the enter key here.
# Activate the current default button.
evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY)
button = self.FindWindowById(self.AffirmativeId)
if button.Enabled:
button.ProcessEvent(evt)
else:
wx.Bell()
elif key == wx.WXK_F2:
item=self.tree.GetSelection()
if item:
selectedItemType=self.tree.GetItemPyData(item).item
self.tree.EditLabel(item)
evt.Skip()
elif key >= wx.WXK_START or key == wx.WXK_BACK:
# Non-printable character.
self._searchText = ""
evt.Skip()
else:
# Search the list.
# We have to implement this ourselves, as tree views don't accept space as a search character.
char = unichr(evt.UnicodeKey).lower()
# IF the same character is typed twice, do the same search.
if self._searchText != char:
self._searchText += char
if self._searchCallLater:
self._searchCallLater.Restart()
else:
self._searchCallLater = wx.CallLater(1000, self._clearSearchText)
self.search(self._searchText)
def onTreeLabelEditBegin(self,evt):
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemPyData(item).item
if not selectedItemType.isRenameAllowed:
evt.Veto()
def onTreeLabelEditEnd(self,evt):
selectedItemNewName=evt.GetLabel()
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemPyData(item).item
selectedItemType.rename(selectedItemNewName)
def _clearSearchText(self):
self._searchText = ""
def search(self, searchText):
item = self.tree.GetSelection()
if not item:
# No items.
return
# First try searching from the current item.
# Failing that, search from the first item.
items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0]))
if len(searchText) == 1:
# If only a single character has been entered, skip (search after) the current item.
next(items)
for item in items:
if self.tree.GetItemText(item).lower().startswith(searchText):
self.tree.SelectItem(item)
return
# Not found.
wx.Bell()
def _iterReachableTreeItemsFromItem(self, item):
while item:
yield item
childItem = self.tree.GetFirstChild(item)[0]
if childItem and self.tree.IsExpanded(item):
# Has children and is reachable, so recurse.
for childItem in self._iterReachableTreeItemsFromItem(childItem):
yield childItem
item = self.tree.GetNextSibling(item)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.GetValue())
evt.Skip()
def onAction(self, activate):
self.Close()
# Save off the last selected element type on to the class so its used in initialization next time.
self.__class__.lastSelectedElementType=self.lastSelectedElementType
item = self.tree.GetSelection()
item = self.tree.GetItemPyData(item).item
if activate:
item.activate()
else:
def move():
speech.cancelSpeech()
item.moveTo()
item.report()
wx.CallLater(100, move)
class BrowseModeDocumentTextInfo(textInfos.TextInfo):
def getControlFieldSpeech(self, attrs, ancestorAttrs, fieldType, formatConfig=None, extraDetail=False, reason=None):
textList = []
landmark = attrs.get("landmark")
if formatConfig["reportLandmarks"] and fieldType == "start_addedToControlFieldStack" and landmark:
try:
textList.append(attrs["name"])
except KeyError:
pass
if landmark == "region":
# The word landmark is superfluous for regions.
textList.append(aria.landmarkRoles[landmark])
else:
textList.append(_("%s landmark") % aria.landmarkRoles[landmark])
textList.append(super(BrowseModeDocumentTextInfo, self).getControlFieldSpeech(attrs, ancestorAttrs, fieldType, formatConfig, extraDetail, reason))
return " ".join(textList)
def getControlFieldBraille(self, field, ancestors, reportStart, formatConfig):
textList = []
landmark = field.get("landmark")
if formatConfig["reportLandmarks"] and reportStart and landmark and field.get("_startOfNode"):
try:
textList.append(field["name"])
except KeyError:
pass
if landmark == "region":
# The word landmark is superfluous for regions.
textList.append(braille.landmarkLabels[landmark])
else:
# Translators: This is brailled to indicate a landmark (example output: lmk main).
textList.append(_("lmk %s") % braille.landmarkLabels[landmark])
text = super(BrowseModeDocumentTextInfo, self).getControlFieldBraille(field, ancestors, reportStart, formatConfig)
if text:
textList.append(text)
return " ".join(textList)
def _get_focusableNVDAObjectAtStart(self):
try:
item = next(self.obj._iterNodesByType("focusable", "up", self))
except StopIteration:
return self.obj.rootNVDAObject
if not item:
return self.obj.rootNVDAObject
return item.obj
class BrowseModeDocumentTreeInterceptor(cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor):
programmaticScrollMayFireEvent = False
def __init__(self,obj):
super(BrowseModeDocumentTreeInterceptor,self).__init__(obj)
self._lastProgrammaticScrollTime = None
self.documentConstantIdentifier = self.documentConstantIdentifier
self._lastFocusObj = None
self._hadFirstGainFocus = False
self._enteringFromOutside = True
# We need to cache this because it will be unavailable once the document dies.
if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"):
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {}
self._lastCaretPosition = None
#: True if the last caret move was due to a focus change.
self._lastCaretMoveWasFocus = False
def terminate(self):
if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition:
try:
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition
except AttributeError:
# The app module died.
pass
def _get_currentNVDAObject(self):
return self.makeTextInfo(textInfos.POSITION_CARET).NVDAObjectAtStart
def event_treeInterceptor_gainFocus(self):
"""Triggered when this browse mode document gains focus.
This event is only fired upon entering this treeInterceptor when it was not the current treeInterceptor before.
This is different to L{event_gainFocus}, which is fired when an object inside this treeInterceptor gains focus, even if that object is in the same treeInterceptor.
"""
doSayAll=False
hadFirstGainFocus=self._hadFirstGainFocus
if not hadFirstGainFocus:
# This treeInterceptor is gaining focus for the first time.
# Fake a focus event on the focus object, as the treeInterceptor may have missed the actual focus event.
focus = api.getFocusObject()
self.event_gainFocus(focus, lambda: focus.event_gainFocus())
if not self.passThrough:
# We only set the caret position if in browse mode.
# If in focus mode, the document must have forced the focus somewhere,
# so we don't want to override it.
initialPos = self._getInitialCaretPos()
if initialPos:
self.selection = self.makeTextInfo(initialPos)
reportPassThrough(self)
doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad']
self._hadFirstGainFocus = True
if not self.passThrough:
if doSayAll:
speech.speakObjectProperties(self.rootNVDAObject,name=True,states=True,reason=controlTypes.REASON_FOCUS)
sayAllHandler.readText(sayAllHandler.CURSOR_CARET)
else:
# Speak it like we would speak focus on any other document object.
# This includes when entering the treeInterceptor for the first time:
if not hadFirstGainFocus:
speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS)
else:
# And when coming in from an outside object
# #4069 But not when coming up from a non-rendered descendant.
ancestors=api.getFocusAncestors()
fdl=api.getFocusDifferenceLevel()
try:
tl=ancestors.index(self.rootNVDAObject)
except ValueError:
tl=len(ancestors)
if fdl<=tl:
speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS)
info = self.selection
if not info.isCollapsed:
speech.speakSelectionMessage(_("selected %s"), info.text)
else:
info.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(info, reason=controlTypes.REASON_CARET, unit=textInfos.UNIT_LINE)
reportPassThrough(self)
braille.handler.handleGainFocus(self)
def event_caret(self, obj, nextHandler):
if self.passThrough:
nextHandler()
def _activateLongDesc(self,controlField):
"""
Activates (presents) the long description for a particular field (usually a graphic).
@param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state.
@type controlField: dict
"""
raise NotImplementedError
def _activatePosition(self, info=None):
obj=None
if info:
obj=info.NVDAObjectAtStart
if not obj:
return
super(BrowseModeDocumentTreeInterceptor,self)._activatePosition(obj)
def _set_selection(self, info, reason=controlTypes.REASON_CARET):
super(BrowseModeDocumentTreeInterceptor, self)._set_selection(info)
if isScriptWaiting() or not info.isCollapsed:
return
# Save the last caret position for use in terminate().
# This must be done here because the buffer might be cleared just before terminate() is called,
# causing the last caret position to be lost.
caret = info.copy()
caret.collapse()
self._lastCaretPosition = caret.bookmark
review.handleCaretMove(caret)
if reason == controlTypes.REASON_FOCUS:
self._lastCaretMoveWasFocus = True
focusObj = api.getFocusObject()
if focusObj==self.rootNVDAObject:
return
else:
self._lastCaretMoveWasFocus = False
focusObj=info.focusableNVDAObjectAtStart
obj=info.NVDAObjectAtStart
if not obj:
log.debugWarning("Invalid NVDAObjectAtStart")
return
if obj==self.rootNVDAObject:
return
if focusObj and not eventHandler.isPendingEvents("gainFocus") and focusObj!=self.rootNVDAObject and focusObj != api.getFocusObject() and self._shouldSetFocusToObj(focusObj):
focusObj.setFocus()
obj.scrollIntoView()
if self.programmaticScrollMayFireEvent:
self._lastProgrammaticScrollTime = time.time()
self.passThrough=self.shouldPassThrough(focusObj,reason=reason)
# Queue the reporting of pass through mode so that it will be spoken after the actual content.
queueHandler.queueFunction(queueHandler.eventQueue, reportPassThrough, self)
def _shouldSetFocusToObj(self, obj):
"""Determine whether an object should receive focus.
Subclasses may extend or override this method.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
"""
return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.ROLE_EMBEDDEDOBJECT
def script_activateLongDesc(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand("character")
for field in reversed(info.getTextWithFields()):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
states=field.field.get('states')
if states and controlTypes.STATE_HASLONGDESC in states:
self._activateLongDesc(field.field)
break
else:
# Translators: the message presented when the activateLongDescription script cannot locate a long description to activate.
ui.message(_("No long description"))
# Translators: the description for the activateLongDescription script on browseMode documents.
script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.")
def event_caretMovementFailed(self, obj, nextHandler, gesture=None):
if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]:
return nextHandler()
if gesture.mainKeyName in ("home", "end"):
# Home, end, control+home and control+end should not disable pass through.
return nextHandler()
script = self.getScript(gesture)
if not script:
return nextHandler()
# We've hit the edge of the focused control.
# Therefore, move the virtual caret to the same edge of the field.
info = self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(info.UNIT_CONTROLFIELD)
if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"):
info.collapse()
else:
info.collapse(end=True)
info.move(textInfos.UNIT_CHARACTER, -1)
info.updateCaret()
scriptHandler.queueScript(script, gesture)
def script_collapseOrExpandControl(self, gesture):
oldFocus = api.getFocusObject()
oldFocusStates = oldFocus.states
gesture.send()
if controlTypes.STATE_COLLAPSED in oldFocusStates:
self.passThrough = True
elif not self.disableAutoPassThrough:
self.passThrough = False
reportPassThrough(self)
script_collapseOrExpandControl.ignoreTreeInterceptorPassThrough = True
def _tabOverride(self, direction):
"""Override the tab order if the virtual caret is not within the currently focused node.
This is done because many nodes are not focusable and it is thus possible for the virtual caret to be unsynchronised with the focus.
In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual caret.
If the virtual caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation.
Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned.
@param direction: The direction in which to move.
@type direction: str
@return: C{True} if the tab order was overridden, C{False} if not.
@rtype: bool
"""
if self._lastCaretMoveWasFocus:
# #5227: If the caret was last moved due to a focus change, don't override tab.
# This ensures that tabbing behaves as expected after tabbing hits an iframe document.
return False
focus = api.getFocusObject()
try:
focusInfo = self.makeTextInfo(focus)
except:
return False
# We only want to override the tab order if the caret is not within the focused node.
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
#Only check that the caret is within the focus for things that ar not documents
#As for documents we should always override
if focus.role!=controlTypes.ROLE_DOCUMENT or controlTypes.STATE_EDITABLE in focus.states:
# Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if focusInfo.isOverlapping(caretInfo):
return False
# If we reach here, we do want to override tab/shift+tab if possible.
# Find the next/previous focusable node.
try:
item = next(self._iterNodesByType("focusable", direction, caretInfo))
except StopIteration:
return False
obj=item.obj
newInfo=item.textInfo
if obj == api.getFocusObject():
# This node is already focused, so we need to move to and speak this node here.
newCaret = newInfo.copy()
newCaret.collapse()
self._set_selection(newCaret,reason=controlTypes.REASON_FOCUS)
if self.passThrough:
obj.event_gainFocus()
else:
speech.speakTextInfo(newInfo,reason=controlTypes.REASON_FOCUS)
else:
# This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest.
obj.setFocus()
return True
def script_tab(self, gesture):
if not self._tabOverride("next"):
gesture.send()
def script_shiftTab(self, gesture):
if not self._tabOverride("previous"):
gesture.send()
def event_focusEntered(self,obj,nextHandler):
if obj==self.rootNVDAObject:
self._enteringFromOutside = True
# Even if passThrough is enabled, we still completely drop focusEntered events here.
# In order to get them back when passThrough is enabled, we replay them with the _replayFocusEnteredEvents method in event_gainFocus.
# The reason for this is to ensure that focusEntered events are delayed until a focus event has had a chance to disable passthrough mode.
# As in this case we would not want them.
def _shouldIgnoreFocus(self, obj):
"""Determines whether focus on a given object should be ignored.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if focus on L{obj} should be ignored, C{False} otherwise.
@rtype: bool
"""
return False
def _postGainFocus(self, obj):
"""Executed after a gainFocus within the browseMode document.
This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler.
@param obj: The object that gained focus.
@type obj: L{NVDAObjects.NVDAObject}
"""
def _replayFocusEnteredEvents(self):
# We blocked the focusEntered events because we were in browse mode,
# but now that we've switched to focus mode, we need to fire them.
for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]:
try:
parent.event_focusEntered()
except:
log.exception("Error executing focusEntered event: %s" % parent)
def event_gainFocus(self, obj, nextHandler):
enteringFromOutside=self._enteringFromOutside
self._enteringFromOutside=False
if not self.isReady:
if self.passThrough:
self._replayFocusEnteredEvents()
nextHandler()
return
if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj:
# We're entering the document from outside (not returning from an inside object/application; #3145)
# and this was the last non-root node with focus, so ignore this focus event.
# Otherwise, if the user switches away and back to this document, the cursor will jump to this node.
# This is not ideal if the user was positioned over a node which cannot receive focus.
return
if obj==self.rootNVDAObject:
if self.passThrough:
self._replayFocusEnteredEvents()
return nextHandler()
return
if not self.passThrough and self._shouldIgnoreFocus(obj):
return
self._lastFocusObj=obj
try:
focusInfo = self.makeTextInfo(obj)
except:
# This object is not in the treeInterceptor, even though it resides beneath the document.
# Automatic pass through should be enabled in certain circumstances where this occurs.
if not self.passThrough and self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS):
self.passThrough=True
reportPassThrough(self)
self._replayFocusEnteredEvents()
return nextHandler()
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not self._hadFirstGainFocus or not focusInfo.isOverlapping(caretInfo):
# The virtual caret is not within the focus node.
oldPassThrough=self.passThrough
passThrough=self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS)
if not oldPassThrough and (passThrough or sayAllHandler.isRunning()):
# If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop.
# This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change.
speech.cancelSpeech()
self.passThrough=passThrough
if not self.passThrough:
# We read the info from the browseMode document instead of the control itself.
speech.speakTextInfo(focusInfo,reason=controlTypes.REASON_FOCUS)
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj,controlTypes.REASON_ONLYCACHE)
else:
# Although we are going to speak the object rather than textInfo content, we still need to silently speak the textInfo content so that the textInfo speech cache is updated correctly.
# Not doing this would cause later browseMode speaking to either not speak controlFields it had entered, or speak controlField exits after having already exited.
# See #7435 for a discussion on this.
speech.speakTextInfo(focusInfo,reason=controlTypes.REASON_ONLYCACHE)
self._replayFocusEnteredEvents()
nextHandler()
focusInfo.collapse()
self._set_selection(focusInfo,reason=controlTypes.REASON_FOCUS)
else:
# The virtual caret was already at the focused node.
if not self.passThrough:
# This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking.
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj,controlTypes.REASON_ONLYCACHE)
else:
self._replayFocusEnteredEvents()
return nextHandler()
self._postGainFocus(obj)
event_gainFocus.ignoreIsReady=True
def _handleScrollTo(self, obj):
"""Handle scrolling the browseMode document to a given object in response to an event.
Subclasses should call this from an event which indicates that the document has scrolled.
@postcondition: The virtual caret is moved to L{obj} and the buffer content for L{obj} is reported.
@param obj: The object to which the document should scroll.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if the document was scrolled, C{False} if not.
@rtype: bool
@note: If C{False} is returned, calling events should probably call their nextHandler.
"""
if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4:
# This event was probably caused by this browseMode document's call to scrollIntoView().
# Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point.
# However, pretend we handled it, as we don't want it to be passed on to the object either.
return True
try:
scrollInfo = self.makeTextInfo(obj)
except:
return False
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not scrollInfo.isOverlapping(caretInfo):
if scrollInfo.isCollapsed:
scrollInfo.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(scrollInfo,reason=controlTypes.REASON_CARET)
scrollInfo.collapse()
self.selection = scrollInfo
return True
return False
def _isNVDAObjectInApplication(self, obj):
"""Determine whether a given object is within an application.
The object is considered to be within an application if it or one of its ancestors has an application role.
This should only be called on objects beneath the treeInterceptor's root NVDAObject.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if L{obj} is within an application, C{False} otherwise.
@rtype: bool
"""
# We cache the result for each object we walk.
# There can be browse mode documents within other documents and the result might be different between these,
# so the cache must be maintained on the TreeInterceptor rather than the object itself.
try:
cache = self._isInAppCache
except AttributeError:
# Create this lazily, as this method isn't used by all browse mode implementations.
cache = self._isInAppCache = weakref.WeakKeyDictionary()
objs = []
def doResult(result):
# Cache this on descendants we've walked over.
for obj in objs:
cache[obj] = result
return result
while obj and obj != self.rootNVDAObject:
inApp = cache.get(obj)
if inApp is not None:
# We found a cached result.
return doResult(inApp)
objs.append(obj)
if obj.role in self.APPLICATION_ROLES:
return doResult(True)
# Cache container.
container = obj.container
obj.container = container
obj = container
return doResult(False)
def _get_documentConstantIdentifier(self):
"""Get the constant identifier for this document.
This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application.
Generally, the document URL should be used.
@return: The constant identifier for this document, C{None} if there is none.
"""
return None
def _get_shouldRememberCaretPositionAcrossLoads(self):
"""Specifies whether the position of the caret should be remembered when this document is loaded again.
This is useful when the browser remembers the scroll position for the document,
but does not communicate this information via APIs.
The remembered caret position is associated with this document using L{documentConstantIdentifier}.
@return: C{True} if the caret position should be remembered, C{False} if not.
@rtype: bool
"""
docConstId = self.documentConstantIdentifier
# Return True if the URL indicates that this is probably a web browser document.
# We do this check because we don't want to remember caret positions for email messages, etc.
return isinstance(docConstId, basestring) and docConstId.split("://", 1)[0] in ("http", "https", "ftp", "ftps", "file")
def _getInitialCaretPos(self):
"""Retrieve the initial position of the caret after the buffer has been loaded.
This position, if any, will be passed to L{makeTextInfo}.
Subclasses should extend this method.
@return: The initial position of the caret, C{None} if there isn't one.
@rtype: TextInfo position
"""
if self.shouldRememberCaretPositionAcrossLoads:
try:
return self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier]
except KeyError:
pass
return None
def getEnclosingContainerRange(self,range):
range=range.copy()
range.collapse()
try:
item = next(self._iterNodesByType("container", "up", range))
except (NotImplementedError,StopIteration):
try:
item = next(self._iterNodesByType("landmark", "up", range))
except (NotImplementedError,StopIteration):
return
return item.textInfo
def script_moveToStartOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
# Translators: Reported when the user attempts to move to the start or end of a container (list, table, etc.)
# But there is no container.
ui.message(_("Not in a container"))
return
container.collapse()
self._set_selection(container, reason=REASON_QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS)
script_moveToStartOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET
# Translators: Description for the Move to start of container command in browse mode.
script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table")
def script_movePastEndOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
ui.message(_("Not in a container"))
return
container.collapse(end=True)
docEnd=container.obj.makeTextInfo(textInfos.POSITION_LAST)
if container.compareEndPoints(docEnd,"endToEnd")>=0:
container=docEnd
# Translators: a message reported when:
# Review cursor is at the bottom line of the current navigator object.
# Landing at the end of a browse mode document when trying to jump to the end of the current container.
ui.message(_("Bottom"))
self._set_selection(container, reason=REASON_QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS)
script_movePastEndOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET
# Translators: Description for the Move past end of container command in browse mode.
script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table")
#: The controlField attribute name that should be used as the row number when navigating in a table. By default this is the same as the presentational attribute name
navigationalTableRowNumberAttributeName="table-rownumber"
#: The controlField attribute name that should be used as the column number when navigating in a table. By default this is the same as the presentational attribute name
navigationalTableColumnNumberAttributeName="table-columnnumber"
def _getTableCellCoords(self, info):
"""
Fetches information about the deepest table cell at the given position.
@param info: the position where the table cell should be looked for.
@type info: L{textInfos.TextInfo}
@returns: a tuple of table ID, row number, column number, row span, and column span.
@rtype: tuple
@raises: LookupError if there is no table cell at this position.
"""
if info.isCollapsed:
info = info.copy()
info.expand(textInfos.UNIT_CHARACTER)
fields=list(info.getTextWithFields())
# If layout tables should not be reported, we should First record the ID of all layout tables so that we can skip them when searching for the deepest table
layoutIDs=set()
if not config.conf["documentFormatting"]["includeLayoutTables"]:
for field in fields:
if isinstance(field, textInfos.FieldCommand) and field.command == "controlStart" and field.field.get('table-layout'):
tableID=field.field.get('table-id')
if tableID is not None:
layoutIDs.add(tableID)
for field in reversed(fields):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
# Not a control field.
continue
attrs = field.field
tableID=attrs.get('table-id')
if tableID is None or tableID in layoutIDs:
continue
if self.navigationalTableColumnNumberAttributeName in attrs and not attrs.get('table-layout'):
break
else:
raise LookupError("Not in a table cell")
return (attrs["table-id"],
attrs[self.navigationalTableRowNumberAttributeName], attrs[self.navigationalTableColumnNumberAttributeName],
attrs.get("table-rowsspanned", 1), attrs.get("table-columnsspanned", 1))
def _getTableCellAt(self,tableID,startPos,row,column):
"""
Starting from the given start position, Locates the table cell with the given row and column coordinates and table ID.
@param startPos: the position to start searching from.
@type startPos: L{textInfos.TextInfo}
@param tableID: the ID of the table.
@param row: the row number of the cell
@type row: int
@param column: the column number of the table cell
@type column: int
@returns: the table cell's position in the document
@rtype: L{textInfos.TextInfo}
@raises: LookupError if the cell does not exist
"""
raise NotImplementedError
_missingTableCellSearchLimit=3 #: The number of missing cells L{_getNearestTableCell} is allowed to skip over to locate the next available cell
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
"""
Locates the nearest table cell relative to another table cell in a given direction, given its coordinates.
For example, this is used to move to the cell in the next column, previous row, etc.
This method will skip over missing table cells (where L{_getTableCellAt} raises LookupError), up to the number of times set by _missingTableCellSearchLimit set on this instance.
@param tableID: the ID of the table
@param startPos: the position in the document to start searching from.
@type startPos: L{textInfos.TextInfo}
@param origRow: the row number of the starting cell
@type origRow: int
@param origCol: the column number of the starting cell
@type origCol: int
@param origRowSpan: the row span of the row of the starting cell
@type origRowSpan: int
@param origColSpan: the column span of the column of the starting cell
@type origColSpan: int
@param movement: the direction ("next" or "previous")
@type movement: string
@param axis: the axis of movement ("row" or "column")
@type axis: string
@returns: the position of the nearest table cell
@rtype: L{textInfos.TextInfo}
"""
if not axis:
raise ValueError("Axis must be row or column")
# Determine destination row and column.
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
# Try and fetch the cell at these coordinates, though if a cell is missing, try several more times moving the coordinates on by one cell each time
limit=self._missingTableCellSearchLimit
while limit>0:
limit-=1
if destCol < 1 or destRow<1:
# Optimisation: We're definitely at the edge of the column or row.
raise LookupError
try:
return self._getTableCellAt(tableID,startPos,destRow,destCol)
except LookupError:
pass
if axis=="row":
destRow+=1 if movement=="next" else -1
else:
destCol+=1 if movement=="next" else -1
raise LookupError
def _tableMovementScriptHelper(self, movement="next", axis=None):
if isScriptWaiting():
return
formatConfig=config.conf["documentFormatting"].copy()
formatConfig["reportTables"]=True
try:
tableID, origRow, origCol, origRowSpan, origColSpan = self._getTableCellCoords(self.selection)
except LookupError:
# Translators: The message reported when a user attempts to use a table movement command
# when the cursor is not within a table.
ui.message(_("Not in a table cell"))
return
try:
info = self._getNearestTableCell(tableID, self.selection, origRow, origCol, origRowSpan, origColSpan, movement, axis)
except LookupError:
# Translators: The message reported when a user attempts to use a table movement command
# but the cursor can't be moved in that direction because it is at the edge of the table.
ui.message(_("Edge of table"))
# Retrieve the cell on which we started.
info = self._getTableCellAt(tableID, self.selection,origRow, origCol)
speech.speakTextInfo(info,formatConfig=formatConfig,reason=controlTypes.REASON_CARET)
info.collapse()
self.selection = info
def script_nextRow(self, gesture):
self._tableMovementScriptHelper(axis="row", movement="next")
# Translators: the description for the next table row script on browseMode documents.
script_nextRow.__doc__ = _("moves to the next table row")
def script_previousRow(self, gesture):
self._tableMovementScriptHelper(axis="row", movement="previous")
# Translators: the description for the previous table row script on browseMode documents.
script_previousRow.__doc__ = _("moves to the previous table row")
def script_nextColumn(self, gesture):
self._tableMovementScriptHelper(axis="column", movement="next")
# Translators: the description for the next table column script on browseMode documents.
script_nextColumn.__doc__ = _("moves to the next table column")
def script_previousColumn(self, gesture):
self._tableMovementScriptHelper(axis="column", movement="previous")
# Translators: the description for the previous table column script on browseMode documents.
script_previousColumn.__doc__ = _("moves to the previous table column")
NOT_LINK_BLOCK_MIN_LEN = 30
def _isSuitableNotLinkBlock(self,range):
return len(range.text)>=self.NOT_LINK_BLOCK_MIN_LEN
def _iterNotLinkBlock(self, direction="next", pos=None):
links = self._iterNodesByType("link", direction=direction, pos=pos)
# We want to compare each link against the next link.
item1 = next(links)
while True:
item2 = next(links)
# If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar.
if direction=="previous":
range=item1.textInfo.copy()
range.collapse()
range.setEndPoint(item2.textInfo,"startToEnd")
else:
range=item2.textInfo.copy()
range.collapse()
range.setEndPoint(item1.textInfo,"startToEnd")
if self._isSuitableNotLinkBlock(range):
yield TextInfoQuickNavItem("notLinkBlock",self,range)
item1=item2
__gestures={
"kb:NVDA+d": "activateLongDesc",
"kb:alt+upArrow": "collapseOrExpandControl",
"kb:alt+downArrow": "collapseOrExpandControl",
"kb:tab": "tab",
"kb:shift+tab": "shiftTab",
"kb:shift+,": "moveToStartOfContainer",
"kb:,": "movePastEndOfContainer",
"kb:control+alt+downArrow": "nextRow",
"kb:control+alt+upArrow": "previousRow",
"kb:control+alt+rightArrow": "nextColumn",
"kb:control+alt+leftArrow": "previousColumn",
}
| 1 | 20,719 | Please split this into two lines | nvaccess-nvda | py |
@@ -77,6 +77,12 @@ namespace MvvmCross.Platform.Tvos.Views
public virtual bool ShowChildView(UIViewController viewController)
{
+ if (SelectedIndex > 5) // when more menu item is currently visible, selected index has value higher than 5
+ {
+ MoreNavigationController.PushViewController(viewController, true);
+ return true;
+ }
+
var navigationController = SelectedViewController as UINavigationController;
// if the current selected ViewController is not a NavigationController, then a child cannot be shown | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using System;
using System.Collections.Generic;
using System.Linq;
using MvvmCross.Platform.Tvos.Presenters;
using MvvmCross.Platform.Tvos.Presenters.Attributes;
using MvvmCross.ViewModels;
using UIKit;
namespace MvvmCross.Platform.Tvos.Views
{
public class MvxTabBarViewController
: MvxBaseTabBarViewController, IMvxTabBarViewController
{
private int _tabsCount = 0;
protected MvxTabBarViewController()
: base()
{
}
protected MvxTabBarViewController(IntPtr handle)
: base(handle)
{
}
public override void ViewWillDisappear(bool animated)
{
base.ViewWillDisappear(animated);
if (IsMovingFromParentViewController)
{
if (Mvx.TryResolve(out IMvxTvosViewPresenter iPresenter)
&& iPresenter is MvxTvosViewPresenter mvxTvosViewPresenter)
{
mvxTvosViewPresenter.CloseTabBarViewController();
};
}
}
public virtual void ShowTabView(UIViewController viewController, MvxTabPresentationAttribute attribute)
{
if (!string.IsNullOrEmpty(attribute.TabAccessibilityIdentifier))
viewController.View.AccessibilityIdentifier = attribute.TabAccessibilityIdentifier;
// setup Tab
SetTitleAndTabBarItem(viewController, attribute);
// add Tab
var currentTabs = new List<UIViewController>();
if (ViewControllers != null)
{
currentTabs = ViewControllers.ToList();
}
currentTabs.Add(viewController);
// update current Tabs
ViewControllers = currentTabs.ToArray();
}
protected virtual void SetTitleAndTabBarItem(UIViewController viewController, MvxTabPresentationAttribute attribute)
{
_tabsCount++;
viewController.Title = attribute.TabName;
if (!string.IsNullOrEmpty(attribute.TabIconName))
viewController.TabBarItem = new UITabBarItem(attribute.TabName, UIImage.FromBundle(attribute.TabIconName), _tabsCount);
if (!string.IsNullOrEmpty(attribute.TabSelectedIconName))
viewController.TabBarItem.SelectedImage = UIImage.FromBundle(attribute.TabSelectedIconName);
}
public virtual bool ShowChildView(UIViewController viewController)
{
var navigationController = SelectedViewController as UINavigationController;
// if the current selected ViewController is not a NavigationController, then a child cannot be shown
if (navigationController == null)
{
return false;
}
navigationController.PushViewController(viewController, true);
return true;
}
public virtual bool CloseChildViewModel(IMvxViewModel viewModel)
{
if (SelectedViewController is UINavigationController navController
&& navController.ViewControllers != null
&& navController.ViewControllers.Any())
{
// if the ViewModel to close if the last in the stack, close it animated
if (navController.TopViewController.GetIMvxTvosView().ViewModel == viewModel)
{
navController.PopViewController(true);
return true;
}
var controllers = navController.ViewControllers.ToList();
var controllerToClose = controllers.FirstOrDefault(vc => vc.GetIMvxTvosView().ViewModel == viewModel);
if (controllerToClose != null)
{
controllers.Remove(controllerToClose);
navController.ViewControllers = controllers.ToArray();
return true;
}
}
return false;
}
public virtual bool CloseTabViewModel(IMvxViewModel viewModel)
{
if (ViewControllers == null || !ViewControllers.Any())
return false;
// loop through plain Tabs
var plainToClose = ViewControllers.Where(v => !(v is UINavigationController))
.Select(v => v.GetIMvxTvosView())
.FirstOrDefault(mvxView => mvxView.ViewModel == viewModel);
if (plainToClose != null)
{
RemoveTabController((UIViewController)plainToClose);
return true;
}
// loop through nav stack Tabs
UIViewController toClose = null;
foreach (var vc in ViewControllers.Where(v => v is UINavigationController))
{
var root = ((UINavigationController)vc).ViewControllers.FirstOrDefault();
var vcFromRoot = root.GetIMvxTvosView();
if (root != null && vcFromRoot.ViewModel == viewModel)
{
toClose = root;
break;
}
}
if (toClose != null)
{
RemoveTabController((UIViewController)toClose);
return true;
}
return false;
}
public void PresentViewControllerWithNavigation(UIViewController controller,
bool animated = true,
Action completionHandler = null)
{
PresentViewController(new UINavigationController(controller), animated, completionHandler);
}
public virtual bool CanShowChildView()
{
return SelectedViewController is UINavigationController;
}
protected virtual void RemoveTabController(UIViewController toClose)
{
var newTabs = ViewControllers.Where(v => v != toClose);
ViewControllers = newTabs.ToArray();
}
}
public class MvxTabBarViewController<TViewModel> : MvxTabBarViewController
where TViewModel : IMvxViewModel
{
public new TViewModel ViewModel
{
get { return (TViewModel)base.ViewModel; }
set { base.ViewModel = value; }
}
public virtual UIViewController VisibleUIViewController
{
get
{
var topViewController = (SelectedViewController as UINavigationController)
?.TopViewController ?? SelectedViewController;
if (topViewController.PresentedViewController != null)
{
var presentedNavigationController = topViewController.PresentedViewController as UINavigationController;
if (presentedNavigationController != null)
{
return presentedNavigationController.TopViewController;
}
else
{
return topViewController.PresentedViewController;
}
}
else
{
return topViewController;
}
}
}
protected MvxTabBarViewController()
{
}
protected MvxTabBarViewController(IntPtr handle)
: base(handle)
{
}
}
}
| 1 | 13,767 | Is there any variable available instead of hardcoding 5? | MvvmCross-MvvmCross | .cs |
@@ -385,9 +385,7 @@ public class LoginActivity extends AccountAuthenticatorActivity
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
- if (requestCode == PasscodeManager.PASSCODE_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
- webviewHelper.onNewPasscode();
- } else if (requestCode == SPRequestHandler.IDP_REQUEST_CODE) {
+ if (requestCode == SPRequestHandler.IDP_REQUEST_CODE) {
spRequestHandler.handleIDPResponse(resultCode, data);
} else {
super.onActivityResult(requestCode, resultCode, data); | 1 | /*
* Copyright (c) 2011-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.ui;
import android.accounts.AccountAuthenticatorActivity;
import android.app.ActionBar;
import android.app.Activity;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.net.Uri;
import android.os.Bundle;
import android.os.Handler;
import android.os.Looper;
import android.security.KeyChain;
import android.text.TextUtils;
import android.view.KeyEvent;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.WindowManager;
import android.webkit.WebSettings;
import android.webkit.WebSettings.LayoutAlgorithm;
import android.webkit.WebView;
import android.widget.Button;
import android.widget.Toast;
import com.salesforce.androidsdk.R;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.accounts.UserAccountManager;
import com.salesforce.androidsdk.analytics.SalesforceAnalyticsManager;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.OAuth2;
import com.salesforce.androidsdk.auth.idp.IDPAccountPickerActivity;
import com.salesforce.androidsdk.auth.idp.IDPInititatedLoginReceiver;
import com.salesforce.androidsdk.auth.idp.SPRequestHandler;
import com.salesforce.androidsdk.config.RuntimeConfig;
import com.salesforce.androidsdk.config.RuntimeConfig.ConfigKey;
import com.salesforce.androidsdk.rest.ClientManager.LoginOptions;
import com.salesforce.androidsdk.security.PasscodeManager;
import com.salesforce.androidsdk.ui.OAuthWebviewHelper.OAuthWebviewHelperEvents;
import com.salesforce.androidsdk.util.AuthConfigTask;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import com.salesforce.androidsdk.util.SalesforceSDKLogger;
import com.salesforce.androidsdk.util.UriFragmentParser;
import java.util.List;
import java.util.Map;
/**
* Login Activity: takes care of authenticating the user.
* Authorization happens inside a web view. Once we get our authorization code,
* we swap it for an access and refresh token a create an account through the
* account manager to store them.
*
* The bulk of the work for this is actually managed by OAuthWebviewHelper class.
*/
public class LoginActivity extends AccountAuthenticatorActivity
implements OAuthWebviewHelperEvents {
public static final int PICK_SERVER_REQUEST_CODE = 10;
private static final String TAG = "LoginActivity";
private boolean wasBackgrounded;
private OAuthWebviewHelper webviewHelper;
private ChangeServerReceiver changeServerReceiver;
private boolean receiverRegistered;
private SPRequestHandler spRequestHandler;
private SPAuthCallback authCallback;
private String userHint;
private String spActivityName;
private Bundle spActivityExtras;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
// Getting login options from intent's extras.
final LoginOptions loginOptions = LoginOptions.fromBundle(getIntent().getExtras());
// Protect against screenshots.
getWindow().setFlags(WindowManager.LayoutParams.FLAG_SECURE,
WindowManager.LayoutParams.FLAG_SECURE);
// Fetches auth config if required.
try {
(new AuthConfigTask(null)).execute().get();
} catch (Exception e) {
SalesforceSDKLogger.e(TAG, "Exception occurred while fetching auth config", e);
}
// Setup content view.
setContentView(R.layout.sf__login);
if (SalesforceSDKManager.getInstance().isIDPLoginFlowEnabled()) {
final Button button = findViewById(R.id.sf__idp_login_button);
button.setVisibility(View.VISIBLE);
}
// Setup the WebView.
final WebView webView = findViewById(R.id.sf__oauth_webview);
final WebSettings webSettings = webView.getSettings();
webSettings.setUseWideViewPort(true);
webSettings.setLayoutAlgorithm(LayoutAlgorithm.NORMAL);
webSettings.setJavaScriptEnabled(true);
webSettings.setAllowFileAccessFromFileURLs(true);
webSettings.setJavaScriptCanOpenWindowsAutomatically(true);
webSettings.setDatabaseEnabled(true);
webSettings.setDomStorageEnabled(true);
EventsObservable.get().notifyEvent(EventType.AuthWebViewCreateComplete, webView);
webviewHelper = getOAuthWebviewHelper(this, loginOptions, webView, savedInstanceState);
// Let observers know
EventsObservable.get().notifyEvent(EventType.LoginActivityCreateComplete, this);
certAuthOrLogin();
if (!receiverRegistered) {
changeServerReceiver = new ChangeServerReceiver();
final IntentFilter changeServerFilter = new IntentFilter(ServerPickerActivity.CHANGE_SERVER_INTENT);
registerReceiver(changeServerReceiver, changeServerFilter);
receiverRegistered = true;
}
authCallback = new SPAuthCallback();
}
@Override
protected void onDestroy() {
if (receiverRegistered) {
unregisterReceiver(changeServerReceiver);
receiverRegistered = false;
}
super.onDestroy();
}
@Override
protected void onNewIntent(Intent intent) {
super.onNewIntent(intent);
// If this is a callback from Chrome, processes it and does nothing else.
if (isChromeCallback(intent)) {
completeAuthFlow(intent);
return;
}
// Reloads login page for every new intent to ensure the correct login server is selected.
webviewHelper.loadLoginPage();
// Launches IDP login flow directly for IDP initiated login flow.
if (intent != null) {
final Bundle extras = intent.getExtras();
if (extras != null) {
userHint = extras.getString(IDPInititatedLoginReceiver.USER_HINT_KEY);
spActivityName = extras.getString(IDPInititatedLoginReceiver.SP_ACTVITY_NAME_KEY);
spActivityExtras = extras.getBundle(IDPInititatedLoginReceiver.SP_ACTVITY_EXTRAS_KEY);
boolean isIdpInitFlow = extras.getBoolean(IDPInititatedLoginReceiver.IDP_INIT_LOGIN_KEY);
if (isIdpInitFlow) {
onIDPLoginClick(null);
}
}
}
}
protected void certAuthOrLogin() {
if (shouldUseCertBasedAuth()) {
final String alias = RuntimeConfig.getRuntimeConfig(this).getString(ConfigKey.ManagedAppCertAlias);
SalesforceSDKLogger.d(TAG, "Cert based login flow being triggered with alias: " + alias);
KeyChain.choosePrivateKeyAlias(this, webviewHelper, null, null, null, -1, alias);
} else {
SalesforceSDKLogger.d(TAG, "User agent login flow being triggered");
webviewHelper.loadLoginPage();
}
}
private boolean isChromeCallback(Intent intent) {
if (intent == null) {
return false;
}
final Uri uri = intent.getData();
if (uri == null) {
return false;
}
return true;
}
private void completeAuthFlow(Intent intent) {
final Uri uri = intent.getData();
final Map<String, String> params = UriFragmentParser.parse(uri);
final String error = params.get("error");
if (error != null) {
final String errorDesc = params.get("error_description");
webviewHelper.onAuthFlowError(error, errorDesc, null);
} else {
final OAuth2.TokenEndpointResponse tr = new OAuth2.TokenEndpointResponse(params);
webviewHelper.onAuthFlowComplete(tr);
}
}
/**
* Returns whether certificate based authentication flow should be used.
*
* @return True - if it should be used, False - otherwise.
*/
protected boolean shouldUseCertBasedAuth() {
return RuntimeConfig.getRuntimeConfig(this).getBoolean(ConfigKey.RequireCertAuth);
}
protected OAuthWebviewHelper getOAuthWebviewHelper(OAuthWebviewHelperEvents callback,
LoginOptions loginOptions, WebView webView, Bundle savedInstanceState) {
return new OAuthWebviewHelper(this, callback, loginOptions, webView, savedInstanceState);
}
@Override
protected void onResume() {
super.onResume();
if (wasBackgrounded) {
webviewHelper.clearView();
webviewHelper.loadLoginPage();
wasBackgrounded = false;
}
}
@Override
public void onSaveInstanceState(Bundle bundle) {
super.onSaveInstanceState(bundle);
webviewHelper.saveState(bundle);
}
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
// This allows sub classes to override the behavior by returning false.
if (fixBackButtonBehavior(keyCode)) {
return true;
}
return super.onKeyDown(keyCode, event);
}
/**
* A fix for back button behavior
*
* @return true if the fix was applied
* false if the key code was not handled
*/
protected boolean fixBackButtonBehavior(int keyCode) {
if (keyCode == KeyEvent.KEYCODE_BACK) {
/*
* If there are no accounts signed in, we need the login screen
* to go away, and go back to the home screen. However, if the
* login screen has been brought up from the switcher screen,
* the back button should take the user back to the previous screen.
*/
final UserAccountManager accMgr = SalesforceSDKManager.getInstance().getUserAccountManager();
if (accMgr.getAuthenticatedUsers() == null) {
wasBackgrounded = true;
moveTaskToBack(true);
return true;
} else {
wasBackgrounded = true;
finish();
return true;
}
}
return false;
}
/**************************************************************************************************
*
* Actions (Changer server / Clear cookies etc) are available through a menu
*
**************************************************************************************************/
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.sf__login, menu);
return super.onCreateOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int itemId = item.getItemId();
if (itemId == R.id.sf__menu_clear_cookies) {
onClearCookiesClick(null);
return true;
} else if (itemId == R.id.sf__menu_pick_server) {
onPickServerClick(null);
return true;
} else if (itemId == R.id.sf__menu_reload) {
onReloadClick(null);
return true;
} else {
return super.onOptionsItemSelected(item);
}
}
/**************************************************************************************************
*
* Callbacks from the OAuthWebviewHelper
*
**************************************************************************************************/
@Override
public void loadingLoginPage(String loginUrl) {
final ActionBar ab = getActionBar();
if (ab != null) {
ab.setTitle(loginUrl);
}
}
@Override
public void onAccountAuthenticatorResult(Bundle authResult) {
setAccountAuthenticatorResult(authResult);
}
/**************************************************************************************************
*
* Buttons click handlers
*
**************************************************************************************************/
/**
* Called when "Clear cookies" button is clicked.
* Clear cookies and reload login page.
* @param v
*/
public void onClearCookiesClick(View v) {
webviewHelper.clearCookies();
webviewHelper.loadLoginPage();
}
/**
* Called when the IDP login button is clicked.
*
* @param v IDP login button.
*/
public void onIDPLoginClick(View v) {
final String loginServer = SalesforceSDKManager.getInstance().getLoginServerManager().getSelectedLoginServer().url.trim();
SalesforceSDKLogger.d(TAG, "Launching IDP app for authentication with login host: " + loginServer);
spRequestHandler = new SPRequestHandler(loginServer, userHint, authCallback);
spRequestHandler.launchIDPApp(this);
}
/**
* Called when "Reload" button is clicked.
* Reloads login page.
* @param v
*/
public void onReloadClick(View v) {
webviewHelper.loadLoginPage();
}
/**
* Called when "Pick server" button is clicked.
* Start ServerPickerActivity
* @param v
*/
public void onPickServerClick(View v) {
final Intent i = new Intent(this, ServerPickerActivity.class);
startActivityForResult(i, PICK_SERVER_REQUEST_CODE);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == PasscodeManager.PASSCODE_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
webviewHelper.onNewPasscode();
} else if (requestCode == SPRequestHandler.IDP_REQUEST_CODE) {
spRequestHandler.handleIDPResponse(resultCode, data);
} else {
super.onActivityResult(requestCode, resultCode, data);
}
}
@Override
public void finish(UserAccount userAccount) {
initAnalyticsManager(userAccount);
final UserAccountManager userAccountManager = SalesforceSDKManager.getInstance().getUserAccountManager();
final List<UserAccount> authenticatedUsers = userAccountManager.getAuthenticatedUsers();
final int numAuthenticatedUsers = authenticatedUsers == null ? 0 : authenticatedUsers.size();
/*
* Sends user switch intents only if this login flow is not a login triggered due
* to an incoming authentication request from an SP app or first user to login on IDP.
* If it is an incoming SP request, we should add the user account but NOT switch to
* the user or send user switch intents unless it's the first user being logged in.
*/
boolean isFirstUserOrNotIDPFlow = !SalesforceSDKManager.getInstance().isIDPAppLoginFlowActive()
|| (numAuthenticatedUsers <= 1);
if (isFirstUserOrNotIDPFlow) {
final int userSwitchType;
if (numAuthenticatedUsers == 1) {
// We've already authenticated the first user, so there should be one.
userSwitchType = UserAccountManager.USER_SWITCH_TYPE_FIRST_LOGIN;
} else if (numAuthenticatedUsers > 1) {
// Otherwise we're logging in with an additional user.
userSwitchType = UserAccountManager.USER_SWITCH_TYPE_LOGIN;
} else {
// This should never happen but if it does, pass in the "unknown" value.
userSwitchType = UserAccountManager.USER_SWITCH_TYPE_DEFAULT;
}
userAccountManager.sendUserSwitchIntent(userSwitchType, null);
}
/*
* Passes back the added user account object if this is a login flow in the IDP app
* initiated by an incoming request for authentication from an SP app.
*/
if (userAccount != null && SalesforceSDKManager.getInstance().isIDPAppLoginFlowActive()) {
final Intent intent = new Intent(IDPAccountPickerActivity.IDP_LOGIN_COMPLETE_ACTION);
intent.putExtra(IDPAccountPickerActivity.USER_ACCOUNT_KEY, userAccount.toBundle());
sendBroadcast(intent);
}
// If the IDP app specified a component to launch after login, launches that component.
if (!TextUtils.isEmpty(spActivityName)) {
try {
final Intent intent = new Intent(this, Class.forName(spActivityName));
intent.addCategory(Intent.CATEGORY_DEFAULT);
intent.putExtra(IDPInititatedLoginReceiver.SP_ACTVITY_EXTRAS_KEY, spActivityExtras);
startActivity(intent);
} catch (Exception e) {
SalesforceSDKLogger.e(TAG, "Could not start activity", e);
}
}
// Cleans up some state before dismissing activity.
userHint = null;
spActivityName = null;
spActivityExtras = null;
finish();
}
private void initAnalyticsManager(UserAccount account) {
final SalesforceAnalyticsManager analyticsManager = SalesforceAnalyticsManager.getInstance(account);
if (analyticsManager != null) {
analyticsManager.updateLoggingPrefs();
}
}
public class ChangeServerReceiver extends BroadcastReceiver {
@Override
public void onReceive(Context context, Intent intent) {
if (intent != null && intent.getAction() != null) {
final String action = intent.getAction();
if (ServerPickerActivity.CHANGE_SERVER_INTENT.equals(action)) {
webviewHelper.loadLoginPage();
}
}
}
}
/**
* Callbacks for SP authentication flow.
*
* @author bhariharan
*/
public class SPAuthCallback {
/**
* Called when the flow was successful and token response is received.
*
* @param tokenResponse Token response.
*/
public void receivedTokenResponse(OAuth2.TokenEndpointResponse tokenResponse) {
webviewHelper.onAuthFlowComplete(tokenResponse);
}
/**
* Called when the flow was not successful.
*
* @param errorMessage Error message.
*/
public void receivedErrorResponse(final String errorMessage) {
final Handler toastHandler = new Handler(Looper.getMainLooper());
toastHandler.post(new Runnable() {
@Override
public void run() {
Toast.makeText(getApplicationContext(), errorMessage, Toast.LENGTH_LONG).show();
}
});
}
}
}
| 1 | 17,195 | The onNewPasscode method was taking care of creating the user account. Now the user account will already have been created. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -29,7 +29,8 @@ import (
)
const (
- MaxChainNameLength = 28
+ MaxChainNameLength = 28
+ defaultPostWriteInterval = 50 * time.Millisecond
)
var ( | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package iptables
import (
"bytes"
"fmt"
"reflect"
"regexp"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"github.com/projectcalico/felix/set"
)
const (
MaxChainNameLength = 28
)
var (
// List of all the top-level kernel-created chains by iptables table.
tableToKernelChains = map[string][]string{
"filter": []string{"INPUT", "FORWARD", "OUTPUT"},
"nat": []string{"PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"},
"mangle": []string{"PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"},
"raw": []string{"PREROUTING", "OUTPUT"},
}
// chainCreateRegexp matches iptables-save output lines for chain forward reference lines.
// It captures the name of the chain.
chainCreateRegexp = regexp.MustCompile(`^:(\S+)`)
// appendRegexp matches an iptables-save output line for an append operation.
appendRegexp = regexp.MustCompile(`^-A (\S+)`)
// Prometheus metrics.
countNumRestoreCalls = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_restore_calls",
Help: "Number of iptables-restore calls.",
})
countNumRestoreErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_restore_errors",
Help: "Number of iptables-restore errors.",
})
countNumSaveCalls = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_save_calls",
Help: "Number of iptables-save calls.",
})
countNumSaveErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_save_errors",
Help: "Number of iptables-save errors.",
})
gaugeNumChains = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "felix_iptables_chains",
Help: "Number of active iptables chains.",
}, []string{"ip_version", "table"})
gaugeNumRules = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "felix_iptables_rules",
Help: "Number of active iptables rules.",
}, []string{"ip_version", "table"})
countNumLinesExecuted = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "felix_iptables_lines_executed",
Help: "Number of iptables rule updates executed.",
}, []string{"ip_version", "table"})
)
func init() {
prometheus.MustRegister(countNumRestoreCalls)
prometheus.MustRegister(countNumRestoreErrors)
prometheus.MustRegister(countNumSaveCalls)
prometheus.MustRegister(countNumSaveErrors)
prometheus.MustRegister(gaugeNumChains)
prometheus.MustRegister(gaugeNumRules)
prometheus.MustRegister(countNumLinesExecuted)
}
// Table represents a single one of the iptables tables i.e. "raw", "nat", "filter", etc. It
// caches the desired state of that table, then attempts to bring it into sync when Apply() is
// called.
//
// API Model
//
// Table supports two classes of operation: "rule insertions" and "full chain updates".
//
// As the name suggests, rule insertions allow for inserting one or more rules into a pre-existing
// chain. Rule insertions are intended to be used to hook kernel chains (such as "FORWARD") in
// order to direct them to a Felix-owned chain. It is important to minimise the use of rule
// insertions because the top-level chains are shared resources, which can be modified by other
// applications. In addition, rule insertions are harder to clean up after an upgrade to a new
// version of Felix (because we need a way to recognise our rules in a crowded chain).
//
// Full chain updates replace the entire contents of a Felix-owned chain with a new set of rules.
// Limiting the operation to "replace whole chain" in this way significantly simplifies the API.
// Although the API operates on full chains, the dataplane write logic tries to avoid rewriting
// a whole chain if only part of it has changed (this was not the case in Felix 1.4). This
// prevents iptables counters from being reset unnecessarily.
//
// In either case, the actual dataplane updates are deferred until the next call to Apply() so
// chain updates and insertions may occur in any order as long as they are consistent (i.e. there
// are no references to non-existent chains) by the time Apply() is called.
//
// Design
//
// We had several goals in designing the iptables machinery in 2.0.0:
//
// (1) High performance. Felix needs to handle high churn of endpoints and rules.
//
// (2) Ability to restore rules, even if other applications accidentally break them: we found that
// other applications sometimes misuse iptables-save and iptables-restore to do a read, modify,
// write cycle. That behaviour is not safe under concurrent modification.
//
// (3) Avoid rewriting rules that haven't changed so that we don't reset iptables counters.
//
// (4) Avoid parsing iptables commands (for example, the output from iptables/iptables-save).
// This is very hard to do robustly because iptables rules do not necessarily round-trip through
// the kernel in the same form. In addition, the format could easily change due to changes or
// fixes in the iptables/iptables-save command.
//
// (5) Support for graceful restart. I.e. deferring potentially incorrect updates until we're
// in-sync with the datastore. For example, if we have 100 endpoints on a host, after a restart
// we don't want to write a "dispatch" chain when we learn about the first endpoint (possibly
// replacing an existing one that had all 100 endpoints in place and causing traffic to glitch);
// instead, we want to defer until we've seen all 100 and then do the write.
//
// (6) Improved handling of rule inserts vs Felix 1.4.x. Previous versions of Felix sometimes
// inserted special-case rules that were not marked as Calico rules in any sensible way making
// cleanup of those rules after an upgrade difficult.
//
// Implementation
//
// For high performance (goal 1), we use iptables-restore to do bulk updates to iptables. This is
// much faster than individual iptables calls.
//
// To allow us to restore rules after they are clobbered by another process (goal 2), we cache
// them at this layer. This means that we don't need a mechanism to ask the other layers of Felix
// to do a resync. Note: Table doesn't start a thread of its own so it relies on the main event
// loop to trigger any dataplane resync polls.
//
// There is tension between goals 3 and 4. In order to avoid full rewrites (goal 3), we need to
// know what rules are in place, but we also don't want to parse them to find out (goal 4)! As
// a compromise, we deterministically calculate an ID for each rule and store it in an iptables
// comment. Then, when we want to know what rules are in place, we _do_ parse the output from
// iptables-save, but only to read back the rule IDs. That limits the amount of parsing we need
// to do and keeps it manageable/robust.
//
// To support graceful restart (goal 5), we defer updates to the dataplane until Apply() is called,
// then we do an atomic update using iptables-restore. As long as the first Apply() call is
// after we're in sync, the dataplane won't be touched until the right time. Felix 1.4.x had a
// more complex mechanism to support partial updates during the graceful restart period but
// Felix 2.0.0 resyncs so quickly that the added complexity is not justified.
//
// To make it easier to manage rule insertions (goal 6), we add rule IDs to those too. With
// rule IDs in place, we can easily distinguish Calico rules from non-Calico rules without needing
// to know exactly which rules to expect. To deal with cleanup after upgrade from older versions
// that did not write rule IDs, we support special-case regexes to detect our old rules.
//
// Thread safety
//
// Table doesn't do any internal synchronization, its methods should only be called from one
// thread. To avoid conflicts in the dataplane itself, there should only be one instance of
// Table for each iptable table in an application.
type Table struct {
Name string
IPVersion uint8
// chainToInsertedRules maps from chain name to a list of rules to be inserted at the start
// of that chain. Rules are written with rule hash comments. The Table cleans up inserted
// rules with unknown hashes.
chainToInsertedRules map[string][]Rule
dirtyInserts set.Set
// chainToRuleFragments contains the desired state of our iptables chains, indexed by
// chain name. The values are slices of iptables fragments, such as
// "--match foo --jump DROP" (i.e. omitting the action and chain name, which are calculated
// as needed).
chainNameToChain map[string]*Chain
dirtyChains set.Set
inSyncWithDataPlane bool
// chainToDataplaneHashes contains the rule hashes that we think are in the dataplane.
// it is updated when we write to the dataplane but it can also be read back and compared
// to what we calculate from chainToContents.
chainToDataplaneHashes map[string][]string
// hashCommentPrefix holds the prefix that we prepend to our rule-tracking hashes.
hashCommentPrefix string
// hashCommentRegexp matches the rule-tracking comment, capturing the rule hash.
hashCommentRegexp *regexp.Regexp
// ourChainsRegexp matches the names of chains that are "ours", i.e. start with one of our
// prefixes.
ourChainsRegexp *regexp.Regexp
// oldInsertRegexp matches inserted rules from old pre rule-hash versions of felix.
oldInsertRegexp *regexp.Regexp
iptablesRestoreCmd string
iptablesSaveCmd string
// insertMode is either "insert" or "append"; whether we insert our rules or append them
// to top-level chains.
insertMode string
// Record when we did our most recent reads and writes of the table. We use these to
// calculate the next time we should force a refresh.
lastReadTime time.Time
lastWriteTime time.Time
postWriteInterval time.Duration
refreshInterval time.Duration
logCxt *log.Entry
gaugeNumChains prometheus.Gauge
gaugeNumRules prometheus.Gauge
countNumLinesExecuted prometheus.Counter
// Factory for making commands, used by UTs to shim exec.Command().
newCmd cmdFactory
// Shims for time.XXX functions:
timeSleep func(d time.Duration)
timeNow func() time.Time
}
type TableOptions struct {
HistoricChainPrefixes []string
ExtraCleanupRegexPattern string
InsertMode string
RefreshInterval time.Duration
// NewCmdOverride for tests, if non-nil, factory to use instead of the real exec.Command()
NewCmdOverride cmdFactory
// SleepOverride for tests, if non-nil, replacement for time.Sleep()
SleepOverride func(d time.Duration)
// NowOverride for tests, if non-nil, replacement for time.Now()
NowOverride func() time.Time
}
func NewTable(
name string,
ipVersion uint8,
hashPrefix string,
options TableOptions,
) *Table {
// Calculate the regex used to match the hash comment. The comment looks like this:
// --comment "cali:abcd1234_-".
hashCommentRegexp := regexp.MustCompile(`--comment "?` + hashPrefix + `([a-zA-Z0-9_-]+)"?`)
ourChainsPattern := "^(" + strings.Join(options.HistoricChainPrefixes, "|") + ")"
ourChainsRegexp := regexp.MustCompile(ourChainsPattern)
oldInsertRegexpParts := []string{}
for _, prefix := range options.HistoricChainPrefixes {
part := fmt.Sprintf("(?:-j|--jump) %s", prefix)
oldInsertRegexpParts = append(oldInsertRegexpParts, part)
}
if options.ExtraCleanupRegexPattern != "" {
oldInsertRegexpParts = append(oldInsertRegexpParts,
options.ExtraCleanupRegexPattern)
}
oldInsertPattern := strings.Join(oldInsertRegexpParts, "|")
oldInsertRegexp := regexp.MustCompile(oldInsertPattern)
// Pre-populate the insert table with empty lists for each kernel chain. Ensures that we
// clean up any chains that we hooked on a previous run.
inserts := map[string][]Rule{}
dirtyInserts := set.New()
for _, kernelChain := range tableToKernelChains[name] {
inserts[kernelChain] = []Rule{}
dirtyInserts.Add(kernelChain)
}
var insertMode string
switch options.InsertMode {
case "", "insert":
insertMode = "insert"
case "append":
insertMode = "append"
default:
log.WithField("insertMode", options.InsertMode).Panic("Unknown insert mode")
}
// Allow override of exec.Command() and time.Sleep() for test purposes.
newCmd := newRealCmd
if options.NewCmdOverride != nil {
newCmd = options.NewCmdOverride
}
sleep := time.Sleep
if options.SleepOverride != nil {
sleep = options.SleepOverride
}
now := time.Now
if options.NowOverride != nil {
now = options.NowOverride
}
table := &Table{
Name: name,
IPVersion: ipVersion,
chainToInsertedRules: inserts,
dirtyInserts: dirtyInserts,
chainNameToChain: map[string]*Chain{},
dirtyChains: set.New(),
chainToDataplaneHashes: map[string][]string{},
logCxt: log.WithFields(log.Fields{
"ipVersion": ipVersion,
"table": name,
}),
hashCommentPrefix: hashPrefix,
hashCommentRegexp: hashCommentRegexp,
ourChainsRegexp: ourChainsRegexp,
oldInsertRegexp: oldInsertRegexp,
insertMode: insertMode,
// Initialise the write tracking as if we'd just done a write, this will trigger
// us to recheck the dataplane at exponentially increasing intervals at startup.
// Note: if we didn't do this, the calculation logic would need to be modified
// to cope with zero values for these fields.
lastWriteTime: now(),
postWriteInterval: 50 * time.Millisecond,
refreshInterval: options.RefreshInterval,
newCmd: newCmd,
timeSleep: sleep,
timeNow: now,
gaugeNumChains: gaugeNumChains.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
gaugeNumRules: gaugeNumRules.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
countNumLinesExecuted: countNumLinesExecuted.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
}
if ipVersion == 4 {
table.iptablesRestoreCmd = "iptables-restore"
table.iptablesSaveCmd = "iptables-save"
} else {
table.iptablesRestoreCmd = "ip6tables-restore"
table.iptablesSaveCmd = "ip6tables-save"
}
return table
}
func (t *Table) SetRuleInsertions(chainName string, rules []Rule) {
t.logCxt.WithField("chainName", chainName).Debug("Updating rule insertions")
oldRules := t.chainToInsertedRules[chainName]
t.chainToInsertedRules[chainName] = rules
numRulesDelta := len(rules) - len(oldRules)
t.gaugeNumRules.Add(float64(numRulesDelta))
t.dirtyInserts.Add(chainName)
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("insertion")
}
func (t *Table) UpdateChains(chains []*Chain) {
for _, chain := range chains {
t.UpdateChain(chain)
}
}
func (t *Table) UpdateChain(chain *Chain) {
t.logCxt.WithField("chainName", chain.Name).Info("Queueing update of chain.")
oldNumRules := 0
if oldChain := t.chainNameToChain[chain.Name]; oldChain != nil {
oldNumRules = len(oldChain.Rules)
}
t.chainNameToChain[chain.Name] = chain
numRulesDelta := len(chain.Rules) - oldNumRules
t.gaugeNumRules.Add(float64(numRulesDelta))
t.dirtyChains.Add(chain.Name)
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("chain update")
}
func (t *Table) RemoveChains(chains []*Chain) {
for _, chain := range chains {
t.RemoveChainByName(chain.Name)
}
}
func (t *Table) RemoveChainByName(name string) {
t.logCxt.WithField("chainName", name).Info("Queing deletion of chain.")
if oldChain, known := t.chainNameToChain[name]; known {
t.gaugeNumRules.Sub(float64(len(oldChain.Rules)))
delete(t.chainNameToChain, name)
t.dirtyChains.Add(name)
}
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("chain removal")
}
func (t *Table) loadDataplaneState() {
// Load the hashes from the dataplane.
t.logCxt.Info("Loading current iptables state and checking it is correct.")
t.lastReadTime = t.timeNow()
dataplaneHashes := t.getHashesFromDataplane()
// Check that the rules we think we've programmed are still there and mark any inconsistent
// chains for refresh.
for chainName, expectedHashes := range t.chainToDataplaneHashes {
logCxt := t.logCxt.WithField("chainName", chainName)
if t.dirtyChains.Contains(chainName) || t.dirtyInserts.Contains(chainName) {
// Already an update pending for this chain; no point in flagging it as
// out-of-sync.
logCxt.Debug("Skipping known-dirty chain")
continue
}
dpHashes := dataplaneHashes[chainName]
if !t.ourChainsRegexp.MatchString(chainName) {
// Not one of our chains so it may be one that we're inserting rules into.
insertedRules := t.chainToInsertedRules[chainName]
if len(insertedRules) == 0 {
// This chain shouldn't have any inserts, make sure that's the
// case. This case also covers the case where a chain was removed,
// making dpHashes nil.
dataplaneHasInserts := false
for _, hash := range dpHashes {
if hash != "" {
dataplaneHasInserts = true
break
}
}
if dataplaneHasInserts {
logCxt.WithField("actualRuleIDs", dpHashes).Warn(
"Chain had unexpected inserts, marking for resync")
t.dirtyInserts.Add(chainName)
}
continue
}
// Re-calculate the expected rule insertions based on the current length
// of the chain (since other processes may have inserted/removed rules
// from the chain, throwing off the numbers).
expectedHashes, _ = t.expectedHashesForInsertChain(
chainName,
numEmptyStrings(dpHashes),
)
if !reflect.DeepEqual(dpHashes, expectedHashes) {
logCxt.WithFields(log.Fields{
"expectedRuleIDs": expectedHashes,
"actualRuleIDs": dpHashes,
}).Warn("Detected out-of-sync inserts, marking for resync")
t.dirtyInserts.Add(chainName)
}
} else {
// One of our chains, should match exactly.
if !reflect.DeepEqual(dpHashes, expectedHashes) {
logCxt.Warn("Detected out-of-sync Calico chain, marking for resync")
t.dirtyChains.Add(chainName)
}
}
}
// Now scan for chains that shouldn't be there and mark for deletion.
t.logCxt.Debug("Scanning for unexpected iptables chains")
for chainName, dataplaneHashes := range dataplaneHashes {
logCxt := t.logCxt.WithField("chainName", chainName)
if t.dirtyChains.Contains(chainName) || t.dirtyInserts.Contains(chainName) {
// Already an update pending for this chain.
logCxt.Debug("Skipping known-dirty chain")
continue
}
if _, ok := t.chainToDataplaneHashes[chainName]; ok {
// Chain expected, we'll have checked its contents above.
logCxt.Debug("Skipping expected chain")
continue
}
if !t.ourChainsRegexp.MatchString(chainName) {
// Non-calico chain that is not tracked in chainToDataplaneHashes. We
// haven't seen the chain before and we haven't been asked to insert
// anything into it. Check that it doesn't have an rule insertions in it
// from a previous run of Felix.
for _, hash := range dataplaneHashes {
if hash != "" {
logCxt.Info("Found unexpected insert, marking for cleanup")
t.dirtyInserts.Add(chainName)
break
}
}
continue
}
// Chain exists in dataplane but not in memory, mark as dirty so we'll clean it up.
logCxt.Info("Found unexpected chain, marking for cleanup")
t.dirtyChains.Add(chainName)
}
t.logCxt.Debug("Finished loading iptables state")
t.chainToDataplaneHashes = dataplaneHashes
t.inSyncWithDataPlane = true
}
// expectedHashesForInsertChain calculates the expected hashes for a whole top-level chain
// given our inserts. If we're in append mode, that consists of numNonCalicoRules empty strings
// followed by our hashes; in insert mode, the opposite way round. To avoid recalculation, it
// returns the rule hashes as a second output.
func (t *Table) expectedHashesForInsertChain(
chainName string,
numNonCalicoRules int,
) (allHashes, ourHashes []string) {
insertedRules := t.chainToInsertedRules[chainName]
allHashes = make([]string, len(insertedRules)+numNonCalicoRules)
ourHashes = calculateRuleInsertHashes(chainName, insertedRules)
offset := 0
if t.insertMode == "append" {
log.Debug("In append mode, returning our hashes at end.")
offset = numNonCalicoRules
}
for i, hash := range ourHashes {
allHashes[i+offset] = hash
}
return
}
// getHashesFromDataplane loads the current state of our table and parses out the hashes that we
// add to rules. It returns a map with an entry for each chain in the table. Each entry is a slice
// containing the hashes for the rules in that table. Rules with no hashes are represented by
// an empty string.
func (t *Table) getHashesFromDataplane() map[string][]string {
retries := 3
retryDelay := 100 * time.Millisecond
// Retry a few times before we panic. This deals with any transient errors and it prevents
// us from spamming a panic into the log when we're being gracefully shut down by a SIGTERM.
for {
cmd := t.newCmd(t.iptablesSaveCmd, "-t", t.Name)
countNumSaveCalls.Inc()
output, err := cmd.Output()
if err != nil {
countNumSaveErrors.Inc()
t.logCxt.WithError(err).Warnf("%s command failed", t.iptablesSaveCmd)
if retries > 0 {
retries--
t.timeSleep(retryDelay)
retryDelay *= 2
} else {
t.logCxt.Panicf("%s command failed after retries", t.iptablesSaveCmd)
}
continue
}
buf := bytes.NewBuffer(output)
return t.getHashesFromBuffer(buf)
}
}
// getHashesFromBuffer parses a buffer containing iptables-save output for this table, extracting
// our rule hashes. Entries in the returned map are indexed by chain name. For rules that we
// wrote, the hash is extracted from a comment that we added to the rule. For rules written by
// previous versions of Felix, returns a dummy non-zero value. For rules not written by Felix,
// returns a zero string. Hence, the lengths of the returned values are the lengths of the chains
// whether written by Felix or not.
func (t *Table) getHashesFromBuffer(buf *bytes.Buffer) map[string][]string {
newHashes := map[string][]string{}
for {
// Read the next line of the output.
line, err := buf.ReadString('\n')
if err != nil { // EOF
break
}
// Look for lines of the form ":chain-name - [0:0]", which are forward declarations
// for (possibly empty) chains.
logCxt := t.logCxt.WithField("line", line)
logCxt.Debug("Parsing line")
captures := chainCreateRegexp.FindStringSubmatch(line)
if captures != nil {
// Chain forward-reference, make sure the chain exists.
chainName := captures[1]
logCxt.WithField("chainName", chainName).Debug("Found forward-reference")
newHashes[chainName] = []string{}
continue
}
// Look for append lines, such as "-A chain-name -m foo --foo bar"; these are the
// actual rules.
captures = appendRegexp.FindStringSubmatch(line)
if captures == nil {
// Skip any non-append lines.
logCxt.Debug("Not an append, skipping")
continue
}
chainName := captures[1]
// Look for one of our hashes on the rule. We record a zero hash for unknown rules
// so that they get cleaned up. Note: we're implicitly capturing the first match
// of the regex. When writing the rules, we ensure that the hash is written as the
// first comment.
hash := ""
captures = t.hashCommentRegexp.FindStringSubmatch(line)
if captures != nil {
hash = captures[1]
logCxt.WithField("hash", hash).Debug("Found hash in rule")
} else if t.oldInsertRegexp.FindString(line) != "" {
logCxt.WithFields(log.Fields{
"rule": line,
"chainName": chainName,
}).Info("Found inserted rule from previous Felix version, marking for cleanup.")
hash = "OLD INSERT RULE"
}
newHashes[chainName] = append(newHashes[chainName], hash)
}
t.logCxt.Debugf("Read hashes from dataplane: %#v", newHashes)
return newHashes
}
func (t *Table) InvalidateDataplaneCache(reason string) {
logCxt := t.logCxt.WithField("reason", reason)
if !t.inSyncWithDataPlane {
logCxt.Debug("Would invalidate dataplane cache but it was already invalid.")
return
}
logCxt.Info("Invalidating dataplane cache")
t.inSyncWithDataPlane = false
}
func (t *Table) Apply() (rescheduleAfter time.Duration) {
now := t.timeNow()
// We _think_ we're in sync, check if there are any reasons to think we might
// not be in sync.
lastReadToNow := now.Sub(t.lastReadTime)
invalidated := false
if t.refreshInterval > 0 && lastReadToNow > t.refreshInterval {
// Too long since we've forced a refresh.
t.InvalidateDataplaneCache("refresh timer")
invalidated = true
}
// To workaround the possibility of another process clobbering our updates, we refresh the
// dataplane after we do a write at exponentially increasing intervals. We do a refresh
// if the delta from the last write to now is twice the delta from the last read.
for t.postWriteInterval != 0 &&
t.postWriteInterval < time.Hour &&
!now.Before(t.lastWriteTime.Add(t.postWriteInterval)) {
t.postWriteInterval *= 2
t.logCxt.WithField("newPostWriteInterval", t.postWriteInterval).Debug("Updating post-write interval")
if !invalidated {
t.InvalidateDataplaneCache("post update")
invalidated = true
}
}
// Retry until we succeed. There are several reasons that updating iptables may fail:
//
// - A concurrent write may invalidate iptables-restore's compare-and-swap; this manifests
// as a failure on the COMMIT line.
// - Another process may have clobbered some of our state, resulting in inconsistencies
// in what we try to program. This could manifest in a number of ways depending on what
// the other process did.
// - Random transient failure.
//
// It's also possible that we're bugged and trying to write bad data so we give up
// eventually.
retries := 10
backoffTime := 1 * time.Millisecond
failedAtLeastOnce := false
for {
if !t.inSyncWithDataPlane {
// We have reason to believe that our picture of the dataplane is out of
// sync. Refresh it. This may mark more chains as dirty.
t.loadDataplaneState()
}
if err := t.applyUpdates(); err != nil {
if retries > 0 {
retries--
t.logCxt.WithError(err).Warn("Failed to program iptables, will retry")
t.timeSleep(backoffTime)
backoffTime *= 2
t.logCxt.WithError(err).Warn("Retrying...")
failedAtLeastOnce = true
continue
} else {
t.logCxt.WithError(err).Error("Failed to program iptables, loading diags before panic.")
cmd := t.newCmd(t.iptablesSaveCmd, "-t", t.Name)
output, err2 := cmd.Output()
if err2 != nil {
t.logCxt.WithError(err2).Error("Failed to load iptables state")
} else {
t.logCxt.WithField("iptablesState", string(output)).Error("Current state of iptables")
}
t.logCxt.WithError(err).Panic("Failed to program iptables, giving up after retries")
}
}
if failedAtLeastOnce {
t.logCxt.Warn("Succeeded after retry.")
}
break
}
t.gaugeNumChains.Set(float64(len(t.chainNameToChain)))
// Check whether we need to be rescheduled and how soon.
if t.refreshInterval > 0 {
// Refresh interval is set, start with that.
lastReadToNow = now.Sub(t.lastReadTime)
rescheduleAfter = t.refreshInterval - lastReadToNow
}
if t.postWriteInterval < time.Hour {
postWriteReched := t.lastWriteTime.Add(t.postWriteInterval).Sub(now)
if postWriteReched <= 0 {
rescheduleAfter = 1 * time.Millisecond
} else if t.refreshInterval <= 0 || postWriteReched < rescheduleAfter {
rescheduleAfter = postWriteReched
}
}
return
}
func (t *Table) applyUpdates() error {
var inputBuf bytes.Buffer
// iptables-restore input starts with a line indicating the table name.
tableNameLine := fmt.Sprintf("*%s\n", t.Name)
inputBuf.WriteString(tableNameLine)
// Make a pass over the dirty chains and generate a forward reference for any that need to
// be created or flushed.
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
chainNeedsToBeFlushed := false
if _, ok := t.chainNameToChain[chainName]; !ok {
// About to delete this chain, flush it first to sever dependencies.
chainNeedsToBeFlushed = true
} else if _, ok := t.chainToDataplaneHashes[chainName]; !ok {
// Chain doesn't exist in dataplane, mark it for creation.
chainNeedsToBeFlushed = true
}
if chainNeedsToBeFlushed {
inputBuf.WriteString(fmt.Sprintf(":%s - -\n", chainName))
t.countNumLinesExecuted.Inc()
}
return nil
})
// Make a second pass over the dirty chains. This time, we write out the rule changes.
newHashes := map[string][]string{}
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if chain, ok := t.chainNameToChain[chainName]; ok {
// Chain update or creation. Scan the chain against its previous hashes
// and replace/append/delete as appropriate.
previousHashes := t.chainToDataplaneHashes[chainName]
currentHashes := chain.RuleHashes()
newHashes[chainName] = currentHashes
for i := 0; i < len(previousHashes) || i < len(currentHashes); i++ {
var line string
if i < len(previousHashes) && i < len(currentHashes) {
if previousHashes[i] == currentHashes[i] {
continue
}
// Hash doesn't match, replace the rule.
ruleNum := i + 1 // 1-indexed.
prefixFrag := t.commentFrag(currentHashes[i])
line = chain.Rules[i].RenderReplace(chainName, ruleNum, prefixFrag)
} else if i < len(previousHashes) {
// previousHashes was longer, remove the old rules from the end.
ruleNum := len(currentHashes) + 1 // 1-indexed
line = deleteRule(chainName, ruleNum)
} else {
// currentHashes was longer. Append.
prefixFrag := t.commentFrag(currentHashes[i])
line = chain.Rules[i].RenderAppend(chainName, prefixFrag)
}
inputBuf.WriteString(line)
inputBuf.WriteString("\n")
t.countNumLinesExecuted.Inc()
}
}
return nil // Delay clearing the set until we've programmed iptables.
})
// Now calculate iptables updates for our inserted rules, which are used to hook top-level
// chains.
t.dirtyInserts.Iter(func(item interface{}) error {
chainName := item.(string)
previousHashes := t.chainToDataplaneHashes[chainName]
// Calculate the hashes for our inserted rules.
newChainHashes, newRuleHashes := t.expectedHashesForInsertChain(
chainName, numEmptyStrings(previousHashes))
if reflect.DeepEqual(newChainHashes, previousHashes) {
// Chain is in sync, skip to next one.
return nil
}
// For simplicity, if we've discovered that we're out-of-sync, remove all our
// rules from this chain, then re-insert/re-append them below.
//
// Remove in reverse order so that we don't disturb the rule numbers of rules we're
// about to remove.
for i := len(previousHashes) - 1; i >= 0; i-- {
if previousHashes[i] != "" {
ruleNum := i + 1
line := deleteRule(chainName, ruleNum)
inputBuf.WriteString(line)
inputBuf.WriteString("\n")
t.countNumLinesExecuted.Inc()
}
}
rules := t.chainToInsertedRules[chainName]
if t.insertMode == "insert" {
t.logCxt.Debug("Rendering insert rules.")
// Since each insert is pushed onto the top of the chain, do the inserts in
// reverse order so that they end up in the correct order in the final
// state of the chain.
for i := len(rules) - 1; i >= 0; i-- {
prefixFrag := t.commentFrag(newRuleHashes[i])
line := rules[i].RenderInsert(chainName, prefixFrag)
inputBuf.WriteString(line)
inputBuf.WriteString("\n")
t.countNumLinesExecuted.Inc()
}
} else {
t.logCxt.Debug("Rendering append rules.")
for i := 0; i < len(rules); i++ {
prefixFrag := t.commentFrag(newRuleHashes[i])
line := rules[i].RenderAppend(chainName, prefixFrag)
inputBuf.WriteString(line)
inputBuf.WriteString("\n")
t.countNumLinesExecuted.Inc()
}
}
newHashes[chainName] = newChainHashes
return nil // Delay clearing the set until we've programmed iptables.
})
// Do deletions at the end. This ensures that we don't try to delete any chains that
// are still referenced (because we'll have removed the references in the modify pass
// above). Note: if a chain is being deleted at the same time as a chain that it refers to
// then we'll issue a create+flush instruction in the very first pass, which will sever the
// references.
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if _, ok := t.chainNameToChain[chainName]; !ok {
// Chain deletion
inputBuf.WriteString(fmt.Sprintf("--delete-chain %s\n", chainName))
t.countNumLinesExecuted.Inc()
newHashes[chainName] = nil
}
return nil // Delay clearing the set until we've programmed iptables.
})
if inputBuf.Len() > len(tableNameLine) {
// We've figured out that we need to make some changes, finish off the input then
// execute iptables-restore. iptables-restore input ends with a COMMIT.
inputBuf.WriteString("COMMIT\n")
// Annoying to have to copy the buffer here but reading from a buffer is
// destructive so if we want to trace out the contents after a failure, we have to
// take a copy.
input := inputBuf.String()
t.logCxt.WithField("iptablesInput", input).Debug("Writing to iptables")
var outputBuf, errBuf bytes.Buffer
cmd := t.newCmd(t.iptablesRestoreCmd, "--noflush", "--verbose")
cmd.SetStdin(&inputBuf)
cmd.SetStdout(&outputBuf)
cmd.SetStderr(&errBuf)
countNumRestoreCalls.Inc()
err := cmd.Run()
if err != nil {
t.logCxt.WithFields(log.Fields{
"output": outputBuf.String(),
"errorOutput": errBuf.String(),
"error": err,
"input": input,
}).Warn("Failed to execute ip(6)tables-restore command")
t.inSyncWithDataPlane = false
countNumRestoreErrors.Inc()
return err
}
t.lastWriteTime = t.timeNow()
t.postWriteInterval = 50 * time.Millisecond
}
// Now we've successfully updated iptables, clear the dirty sets. We do this even if we
// found there was nothing to do above, since we may have found out that a dirty chain
// was actually a no-op update.
t.dirtyChains = set.New()
t.dirtyInserts = set.New()
// Store off the updates.
for chainName, hashes := range newHashes {
if hashes == nil {
delete(t.chainToDataplaneHashes, chainName)
} else {
t.chainToDataplaneHashes[chainName] = hashes
}
}
return nil
}
func (t *Table) commentFrag(hash string) string {
return fmt.Sprintf(`-m comment --comment "%s%s"`, t.hashCommentPrefix, hash)
}
func deleteRule(chainName string, ruleNum int) string {
return fmt.Sprintf("-D %s %d", chainName, ruleNum)
}
func calculateRuleInsertHashes(chainName string, rules []Rule) []string {
chain := Chain{
Name: chainName,
Rules: rules,
}
return (&chain).RuleHashes()
}
func numEmptyStrings(strs []string) int {
count := 0
for _, s := range strs {
if s == "" {
count++
}
}
return count
}
| 1 | 15,436 | This is really a minimum, isn't it, so `minPostWriteInterval` perhaps? IIUC it never gets used as a default, because the default IptablesPostWriteCheckInterval is 1s, which is larger than this. | projectcalico-felix | go |
@@ -1,11 +1,9 @@
<ul class="bookmarkTools nav nav-pills">
- <li class="cite">
- <%= link_to t('blacklight.tools.cite'), citation_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @response.documents.map {|doc| doc.id}), {:id => 'citeLink', :name => 'citation', :class => 'btn btn-default', :data => {:ajax_modal => "trigger"}} %>
- </li>
-
- <li class="email">
- <%= link_to t('blacklight.tools.email'), email_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @response.documents.map {|doc| doc.id}), :class=>"btn btn-default", :id => "emailLink", :data => {:ajax_modal => "trigger"} %>
- </li>
+ <% BookmarksController.document_actions.each do |action| %>
+ <li class="#{action}">
+ <%= link_to t("blacklight.tools.#{action}"), self.send("#{action}_bookmarks_path"), {:id => "#{action}Link", :name => action, :class => 'btn btn-default', :data => {:ajax_modal => "trigger"}} %>
+ </li>
+ <% end %>
<%= render_marc_tools %>
</ul> | 1 | <ul class="bookmarkTools nav nav-pills">
<li class="cite">
<%= link_to t('blacklight.tools.cite'), citation_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @response.documents.map {|doc| doc.id}), {:id => 'citeLink', :name => 'citation', :class => 'btn btn-default', :data => {:ajax_modal => "trigger"}} %>
</li>
<li class="email">
<%= link_to t('blacklight.tools.email'), email_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @response.documents.map {|doc| doc.id}), :class=>"btn btn-default", :id => "emailLink", :data => {:ajax_modal => "trigger"} %>
</li>
<%= render_marc_tools %>
</ul>
| 1 | 5,536 | I wonder if you can use the controller's context instead of using it explicitly? | projectblacklight-blacklight | rb |
@@ -94,6 +94,7 @@ func (e *Executor) ensureCanaryClean(ctx context.Context) model.StageStatus {
resources := strings.Split(value, ",")
if err := e.removeCanaryResources(ctx, resources); err != nil {
+ e.LogPersister.AppendErrorf("Unable to remove canary resources: %v", err)
return model.StageStatus_STAGE_FAILURE
}
return model.StageStatus_STAGE_SUCCESS | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"fmt"
"strings"
provider "github.com/pipe-cd/pipe/pkg/app/piped/cloudprovider/kubernetes"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
canaryVariant = "canary"
addedCanaryResourcesMetadataKey = "canary-resources"
)
func (e *Executor) ensureCanaryRollout(ctx context.Context) model.StageStatus {
var (
commitHash = e.Deployment.Trigger.Commit.Hash
options = e.StageConfig.K8sCanaryRolloutStageOptions
)
if options == nil {
e.LogPersister.AppendErrorf("Malformed configuration for stage %s", e.Stage.Name)
return model.StageStatus_STAGE_FAILURE
}
// Load the manifests at the triggered commit.
e.LogPersister.AppendInfof("Loading manifests at commit %s for handling", commitHash)
manifests, err := e.loadManifests(ctx)
if err != nil {
e.LogPersister.AppendErrorf("Failed while loading manifests (%v)", err)
return model.StageStatus_STAGE_FAILURE
}
e.LogPersister.AppendSuccessf("Successfully loaded %d manifests", len(manifests))
if len(manifests) == 0 {
e.LogPersister.AppendError("This application has no Kubernetes manifests to handle")
return model.StageStatus_STAGE_FAILURE
}
// Find and generate workload & service manifests for CANARY variant.
canaryManifests, err := e.generateCanaryManifests(manifests, *options)
if err != nil {
e.LogPersister.AppendErrorf("Unable to generate manifests for CANARY variant (%v)", err)
return model.StageStatus_STAGE_FAILURE
}
// Add builtin annotations for tracking application live state.
e.addBuiltinAnnontations(canaryManifests, canaryVariant, commitHash)
// Store added resource keys into metadata for cleaning later.
addedResources := make([]string, 0, len(canaryManifests))
for _, m := range canaryManifests {
addedResources = append(addedResources, m.Key.String())
}
metadata := strings.Join(addedResources, ",")
err = e.MetadataStore.Set(ctx, addedCanaryResourcesMetadataKey, metadata)
if err != nil {
e.LogPersister.AppendErrorf("Unable to save deployment metadata (%v)", err)
return model.StageStatus_STAGE_FAILURE
}
// Start rolling out the resources for CANARY variant.
e.LogPersister.AppendInfo("Start rolling out CANARY variant...")
if err := e.applyManifests(ctx, canaryManifests); err != nil {
return model.StageStatus_STAGE_FAILURE
}
e.LogPersister.AppendSuccess("Successfully rolled out CANARY variant")
return model.StageStatus_STAGE_SUCCESS
}
func (e *Executor) ensureCanaryClean(ctx context.Context) model.StageStatus {
value, ok := e.MetadataStore.Get(addedCanaryResourcesMetadataKey)
if !ok {
e.LogPersister.AppendError("Unable to determine the applied CANARY resources")
return model.StageStatus_STAGE_FAILURE
}
resources := strings.Split(value, ",")
if err := e.removeCanaryResources(ctx, resources); err != nil {
return model.StageStatus_STAGE_FAILURE
}
return model.StageStatus_STAGE_SUCCESS
}
func (e *Executor) removeCanaryResources(ctx context.Context, resources []string) error {
if len(resources) == 0 {
return nil
}
var (
workloadKeys = make([]provider.ResourceKey, 0)
serviceKeys = make([]provider.ResourceKey, 0)
)
for _, r := range resources {
key, err := provider.DecodeResourceKey(r)
if err != nil {
e.LogPersister.AppendErrorf("Had an error while decoding CANARY resource key: %s, %v", r, err)
continue
}
if key.IsWorkload() {
workloadKeys = append(workloadKeys, key)
} else {
serviceKeys = append(serviceKeys, key)
}
}
// We delete the service first to close all incoming connections.
e.LogPersister.AppendInfo("Starting finding and deleting service resources of CANARY variant")
if err := e.deleteResources(ctx, serviceKeys); err != nil {
return err
}
// Next, delete all workloads.
e.LogPersister.AppendInfo("Starting finding and deleting workload resources of CANARY variant")
if err := e.deleteResources(ctx, workloadKeys); err != nil {
return err
}
return nil
}
func (e *Executor) generateCanaryManifests(manifests []provider.Manifest, opts config.K8sCanaryRolloutStageOptions) ([]provider.Manifest, error) {
var (
workloadKind, workloadName string
serviceName string
generateService bool
canaryManifests []provider.Manifest
suffix = canaryVariant
)
// Apply the specified configuration if they are present.
if sc := e.config.CanaryVariant; sc != nil {
var ok bool
if sc.Suffix != "" {
suffix = sc.Suffix
}
generateService = sc.Service.Create
workloadKind, workloadName, ok = config.ParseVariantResourceReference(sc.Workload.Reference)
if !ok {
return nil, fmt.Errorf("malformed workload reference: %s", sc.Workload.Reference)
}
_, serviceName, ok = config.ParseVariantResourceReference(sc.Service.Reference)
if !ok {
return nil, fmt.Errorf("malformed service reference: %s", sc.Service.Reference)
}
}
if workloadKind == "" {
workloadKind = provider.KindDeployment
}
workloads := findManifests(workloadKind, workloadName, manifests)
if len(workloads) == 0 {
return nil, fmt.Errorf("unable to find any workload manifests for CANARY variant")
}
// Find service manifests and duplicate them for CANARY variant.
if generateService {
services := findManifests(provider.KindService, serviceName, manifests)
if len(services) == 0 {
return nil, fmt.Errorf("unable to find any service for name=%q", serviceName)
}
// Because the loaded maninests are read-only
// so we duplicate them to avoid updating the shared manifests data in cache.
services = duplicateManifests(services, "")
generatedServices, err := generateVariantServiceManifests(services, canaryVariant, suffix)
if err != nil {
return nil, err
}
canaryManifests = append(canaryManifests, generatedServices...)
}
// Find config map manifests and duplicate them for CANARY variant.
configMaps := findConfigMapManifests(manifests)
configMaps = duplicateManifests(configMaps, suffix)
canaryManifests = append(canaryManifests, configMaps...)
// Find secret manifests and duplicate them for CANARY variant.
secrets := findSecretManifests(manifests)
secrets = duplicateManifests(secrets, suffix)
canaryManifests = append(canaryManifests, secrets...)
// Generate new workload manifests for CANARY variant.
// The generated ones will mount to the new ConfigMaps and Secrets.
replicasCalculator := func(cur *int32) int32 {
if cur == nil {
return 1
}
num := opts.Replicas.Calculate(int(*cur), 1)
return int32(num)
}
// We don't need to duplicate the workload manifests
// because generateVariantWorkloadManifests function is already making a duplicate while decoding.
// workloads = duplicateManifests(workloads, suffix)
generatedWorkloads, err := generateVariantWorkloadManifests(workloads, configMaps, secrets, canaryVariant, suffix, replicasCalculator)
if err != nil {
return nil, err
}
canaryManifests = append(canaryManifests, generatedWorkloads...)
return canaryManifests, nil
}
| 1 | 8,483 | Can you add this log for `baseline` too? | pipe-cd-pipe | go |
@@ -148,6 +148,10 @@ Blockly.Msg.CANNOT_DELETE_VARIABLE_PROCEDURE = 'Can\'t delete the variable "%1"
/// dropdown choice - Delete the currently selected variable.
Blockly.Msg.DELETE_VARIABLE = 'Delete the "%1" variable';
+// Broadcast Message creation
+/// dropdown choice - Create a new message.
+Blockly.Msg.NEW_MESSAGE = 'New message...';
+
// Colour Blocks.
/// url - Information about colour.
Blockly.Msg.COLOUR_PICKER_HELPURL = 'https://en.wikipedia.org/wiki/Color'; | 1 | /**
* @license
* Visual Blocks Language
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview English strings.
* @author [email protected] (Neil Fraser)
*
* After modifying this file, either run "build.py" from the parent directory,
* or run (from this directory):
* ../i18n/js_to_json.py
* to regenerate json/{en,qqq,synonyms}.json.
*
* To convert all of the json files to .js files, run:
* ../i18n/create_messages.py json/*.json
*/
'use strict';
goog.provide('Blockly.Msg.en');
goog.require('Blockly.Msg');
/**
* Due to the frequency of long strings, the 80-column wrap rule need not apply
* to message files.
*/
/**
* Each message is preceded with a tripple-slash comment that becomes the
* message descriptor. The build process extracts these descriptors, adds
* them to msg/json/qqq.json, and they show up in the translation console.
*/
/// {{Notranslate}} Hue value for all logic blocks.
Blockly.Msg.LOGIC_HUE = '210';
/// {{Notranslate}} Hue value for all loop blocks.
Blockly.Msg.LOOPS_HUE = '120';
/// {{Notranslate}} Hue value for all math blocks.
Blockly.Msg.MATH_HUE = '230';
/// {{Notranslate}} Hue value for all text blocks.
Blockly.Msg.TEXTS_HUE = '160';
/// {{Notranslate}} Hue value for all list blocks.
Blockly.Msg.LISTS_HUE = '260';
/// {{Notranslate}} Hue value for all colour blocks.
Blockly.Msg.COLOUR_HUE = '20';
/// {{Notranslate}} Hue value for all variable blocks.
Blockly.Msg.VARIABLES_HUE = '330';
/// {{Notranslate}} Hue value for all procedure blocks.
Blockly.Msg.PROCEDURES_HUE = '290';
/// default name - A simple, general default name for a variable, preferably short.
/// For more context, see
/// [[Translating:Blockly#infrequent_message_types]].\n{{Identical|Item}}
Blockly.Msg.VARIABLES_DEFAULT_NAME = 'item';
/// button text - Button that sets a calendar to today's date.\n{{Identical|Today}}
Blockly.Msg.TODAY = 'Today';
// Context menus.
/// context menu - Make a copy of the selected block (and any blocks it contains).\n{{Identical|Duplicate}}
Blockly.Msg.DUPLICATE_BLOCK = 'Duplicate';
/// context menu - Add a descriptive comment to the selected block.
Blockly.Msg.ADD_COMMENT = 'Add Comment';
/// context menu - Remove the descriptive comment from the selected block.
Blockly.Msg.REMOVE_COMMENT = 'Remove Comment';
/// context menu - Change from 'external' to 'inline' mode for displaying blocks used as inputs to the selected block. See [[Translating:Blockly#context_menus]].
Blockly.Msg.EXTERNAL_INPUTS = 'External Inputs';
/// context menu - Change from 'internal' to 'external' mode for displaying blocks used as inputs to the selected block. See [[Translating:Blockly#context_menus]].
Blockly.Msg.INLINE_INPUTS = 'Inline Inputs';
/// context menu - Permanently delete the selected block.
Blockly.Msg.DELETE_BLOCK = 'Delete Block';
/// context menu - Permanently delete the %1 selected blocks.\n\nParameters:\n* %1 - an integer greater than 1.
Blockly.Msg.DELETE_X_BLOCKS = 'Delete %1 Blocks';
/// confirmation prompt - Question the user if they really wanted to permanently delete all %1 blocks.\n\nParameters:\n* %1 - an integer greater than 1.
Blockly.Msg.DELETE_ALL_BLOCKS = 'Delete all %1 blocks?';
/// context menu - Reposition all the blocks so that they form a neat line.
Blockly.Msg.CLEAN_UP = 'Clean up Blocks';
/// context menu - Make the appearance of the selected block smaller by hiding some information about it.
Blockly.Msg.COLLAPSE_BLOCK = 'Collapse Block';
/// context menu - Make the appearance of all blocks smaller by hiding some information about it. Use the same terminology as in the previous message.
Blockly.Msg.COLLAPSE_ALL = 'Collapse Blocks';
/// context menu - Restore the appearance of the selected block by showing information about it that was hidden (collapsed) earlier.
Blockly.Msg.EXPAND_BLOCK = 'Expand Block';
/// context menu - Restore the appearance of all blocks by showing information about it that was hidden (collapsed) earlier. Use the same terminology as in the previous message.
Blockly.Msg.EXPAND_ALL = 'Expand Blocks';
/// context menu - Make the selected block have no effect (unless reenabled).
Blockly.Msg.DISABLE_BLOCK = 'Disable Block';
/// context menu - Make the selected block have effect (after having been disabled earlier).
Blockly.Msg.ENABLE_BLOCK = 'Enable Block';
/// context menu - Provide helpful information about the selected block.\n{{Identical|Help}}
Blockly.Msg.HELP = 'Help';
/// context menu - Undo the previous action.\n{{Identical|Undo}}
Blockly.Msg.UNDO = 'Undo';
/// context menu - Undo the previous undo action.\n{{Identical|Redo}}
Blockly.Msg.REDO = 'Redo';
/// context menu - Edit the currently selected procedure.
Blockly.Msg.EDIT_PROCEDURE = 'Edit';
// TODO(#1136): Pick text for this.
/// context menu - Bring the definition of the procedure into view.
Blockly.Msg.SHOW_PROCEDURE_DEFINITION = 'Go to definition';
// Variable renaming.
/// prompt - This message is only seen in the Opera browser. With most browsers, users can edit numeric values in blocks by just clicking and typing. Opera does not allows this, so we have to open a new window and prompt users with this message to chanage a value.
Blockly.Msg.CHANGE_VALUE_TITLE = 'Change value:';
/// dropdown choice - When the user clicks on a variable block, this is one of the dropdown menu choices. It is used to rename the current variable. See [https://github.com/google/blockly/wiki/Variables#dropdown-menu https://github.com/google/blockly/wiki/Variables#dropdown-menu].
Blockly.Msg.RENAME_VARIABLE = 'Rename variable...';
/// prompt - Prompts the user to enter the new name for the selected variable. See [https://github.com/google/blockly/wiki/Variables#dropdown-menu https://github.com/google/blockly/wiki/Variables#dropdown-menu].\n\nParameters:\n* %1 - the name of the variable to be renamed.
Blockly.Msg.RENAME_VARIABLE_TITLE = 'Rename all "%1" variables to:';
// Variable creation
/// button text - Text on the button used to launch the variable creation dialogue.
Blockly.Msg.NEW_VARIABLE = 'Create variable...';
/// prompt - Prompts the user to enter the name for a new variable. See [https://github.com/google/blockly/wiki/Variables#dropdown-menu https://github.com/google/blockly/wiki/Variables#dropdown-menu].
Blockly.Msg.NEW_VARIABLE_TITLE = 'New variable name:';
/// alert - Tells the user that the name they entered is already in use.
Blockly.Msg.VARIABLE_ALREADY_EXISTS = 'A variable named "%1" already exists.';
/// alert - Tells the user that the name they entered is already in use for another type.
Blockly.Msg.VARIABLE_ALREADY_EXISTS_FOR_ANOTHER_TYPE = 'A variable named "%1" already exists for another variable of type "%2".';
/// alert - Tells the user that the name they entered is already in use for a procedure.
Blockly.Msg.PROCEDURE_ALREADY_EXISTS = 'A procedure named "%1" already exists.';
// List creation
/// button text - Text on the button used to launch the list creation dialogue.
Blockly.Msg.NEW_LIST = 'Create list...';
// Variable deletion.
/// confirm - Ask the user to confirm their deletion of multiple uses of a variable.
Blockly.Msg.DELETE_VARIABLE_CONFIRMATION = 'Delete %1 uses of the "%2" variable?';
/// alert - Tell the user that they can't delete a variable because it's part of the definition of a function.
Blockly.Msg.CANNOT_DELETE_VARIABLE_PROCEDURE = 'Can\'t delete the variable "%1" because it\'s part of the definition of the function "%2"';
/// dropdown choice - Delete the currently selected variable.
Blockly.Msg.DELETE_VARIABLE = 'Delete the "%1" variable';
// Colour Blocks.
/// url - Information about colour.
Blockly.Msg.COLOUR_PICKER_HELPURL = 'https://en.wikipedia.org/wiki/Color';
/// tooltip - See [https://github.com/google/blockly/wiki/Colour#picking-a-colour-from-a-palette https://github.com/google/blockly/wiki/Colour#picking-a-colour-from-a-palette].
Blockly.Msg.COLOUR_PICKER_TOOLTIP = 'Choose a colour from the palette.';
/// url - A link that displays a random colour each time you visit it.
Blockly.Msg.COLOUR_RANDOM_HELPURL = 'http://randomcolour.com';
/// block text - Title of block that generates a colour at random.
Blockly.Msg.COLOUR_RANDOM_TITLE = 'random colour';
/// tooltip - See [https://github.com/google/blockly/wiki/Colour#generating-a-random-colour https://github.com/google/blockly/wiki/Colour#generating-a-random-colour].
Blockly.Msg.COLOUR_RANDOM_TOOLTIP = 'Choose a colour at random.';
/// url - A link for color codes with percentages (0-100%) for each component, instead of the more common 0-255, which may be more difficult for beginners.
Blockly.Msg.COLOUR_RGB_HELPURL = 'http://www.december.com/html/spec/colorper.html';
/// block text - Title of block for [https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components].
Blockly.Msg.COLOUR_RGB_TITLE = 'colour with';
/// block input text - The amount of red (from 0 to 100) to use when [https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components].\n{{Identical|Red}}
Blockly.Msg.COLOUR_RGB_RED = 'red';
/// block input text - The amount of green (from 0 to 100) to use when [https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components].
Blockly.Msg.COLOUR_RGB_GREEN = 'green';
/// block input text - The amount of blue (from 0 to 100) to use when [https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components].\n{{Identical|Blue}}
Blockly.Msg.COLOUR_RGB_BLUE = 'blue';
/// tooltip - See [https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components https://github.com/google/blockly/wiki/Colour#creating-a-colour-from-red-green-and-blue-components].
Blockly.Msg.COLOUR_RGB_TOOLTIP = 'Create a colour with the specified amount of red, green, and blue. All values must be between 0 and 100.';
/// url - A useful link that displays blending of two colors.
Blockly.Msg.COLOUR_BLEND_HELPURL = 'http://meyerweb.com/eric/tools/color-blend/';
/// block text - A verb for blending two shades of paint.
Blockly.Msg.COLOUR_BLEND_TITLE = 'blend';
/// block input text - The first of two colours to [https://github.com/google/blockly/wiki/Colour#blending-colours blend].
Blockly.Msg.COLOUR_BLEND_COLOUR1 = 'colour 1';
/// block input text - The second of two colours to [https://github.com/google/blockly/wiki/Colour#blending-colours blend].
Blockly.Msg.COLOUR_BLEND_COLOUR2 = 'colour 2';
/// block input text - The proportion of the [https://github.com/google/blockly/wiki/Colour#blending-colours blend] containing the first color; the remaining proportion is of the second colour. For example, if the first colour is red and the second color blue, a ratio of 1 would yield pure red, a ratio of .5 would yield purple (equal amounts of red and blue), and a ratio of 0 would yield pure blue.\n{{Identical|Ratio}}
Blockly.Msg.COLOUR_BLEND_RATIO = 'ratio';
/// tooltip - See [https://github.com/google/blockly/wiki/Colour#blending-colours https://github.com/google/blockly/wiki/Colour#blending-colours].
Blockly.Msg.COLOUR_BLEND_TOOLTIP = 'Blends two colours together with a given ratio (0.0 - 1.0).';
/// dropdown - Label of the "hue" color component slider
Blockly.Msg.COLOUR_HUE_LABEL = 'Color';
/// dropdown - Label of the "saturation" color component slider
Blockly.Msg.COLOUR_SATURATION_LABEL = 'Saturation';
/// dropdown - Label of the "brightness" color component slider
Blockly.Msg.COLOUR_BRIGHTNESS_LABEL = 'Brightness';
// Loop Blocks.
/// url - Describes 'repeat loops' in computer programs; consider using the translation of the page [https://en.wikipedia.org/wiki/Control_flow http://en.wikipedia.org/wiki/Control_flow].
Blockly.Msg.CONTROLS_REPEAT_HELPURL = 'https://en.wikipedia.org/wiki/For_loop';
/// block input text - Title of [https://github.com/google/blockly/wiki/Loops#repeat repeat block].\n\nParameters:\n* %1 - the number of times the body of the loop should be repeated.
Blockly.Msg.CONTROLS_REPEAT_TITLE = 'repeat %1 times';
/// block text - Preceding the blocks in the body of the loop. See [https://github.com/google/blockly/wiki/Loops https://github.com/google/blockly/wiki/Loops].\n{{Identical|Do}}
Blockly.Msg.CONTROLS_REPEAT_INPUT_DO = 'do';
/// tooltip - See [https://github.com/google/blockly/wiki/Loops#repeat https://github.com/google/blockly/wiki/Loops#repeat].
Blockly.Msg.CONTROLS_REPEAT_TOOLTIP = 'Do some statements several times.';
/// url - Describes 'while loops' in computer programs; consider using the translation of [https://en.wikipedia.org/wiki/While_loop https://en.wikipedia.org/wiki/While_loop], if present, or [https://en.wikipedia.org/wiki/Control_flow https://en.wikipedia.org/wiki/Control_flow].
Blockly.Msg.CONTROLS_WHILEUNTIL_HELPURL = 'https://github.com/google/blockly/wiki/Loops#repeat';
Blockly.Msg.CONTROLS_WHILEUNTIL_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
/// dropdown - Specifies that a loop should [https://github.com/google/blockly/wiki/Loops#repeat-while repeat while] the following condition is true.
Blockly.Msg.CONTROLS_WHILEUNTIL_OPERATOR_WHILE = 'repeat while';
/// dropdown - Specifies that a loop should [https://github.com/google/blockly/wiki/Loops#repeat-until repeat until] the following condition becomes true.
Blockly.Msg.CONTROLS_WHILEUNTIL_OPERATOR_UNTIL = 'repeat until';
/// tooltip - See [https://github.com/google/blockly/wiki/Loops#repeat-while Loops#repeat-while https://github.com/google/blockly/wiki/Loops#repeat-while Loops#repeat-while].
Blockly.Msg.CONTROLS_WHILEUNTIL_TOOLTIP_WHILE = 'While a value is true, then do some statements.';
/// tooltip - See [https://github.com/google/blockly/wiki/Loops#repeat-until https://github.com/google/blockly/wiki/Loops#repeat-until].
Blockly.Msg.CONTROLS_WHILEUNTIL_TOOLTIP_UNTIL = 'While a value is false, then do some statements.';
/// url - Describes 'for loops' in computer programs. Consider using your language's translation of [https://en.wikipedia.org/wiki/For_loop https://en.wikipedia.org/wiki/For_loop], if present.
Blockly.Msg.CONTROLS_FOR_HELPURL = 'https://github.com/google/blockly/wiki/Loops#count-with';
/// tooltip - See [https://github.com/google/blockly/wiki/Loops#count-with https://github.com/google/blockly/wiki/Loops#count-with].\n\nParameters:\n* %1 - the name of the loop variable.
Blockly.Msg.CONTROLS_FOR_TOOLTIP = 'Have the variable "%1" take on the values from the start number to the end number, counting by the specified interval, and do the specified blocks.';
/// block text - Repeatedly counts a variable (%1)
/// starting with a (usually lower) number in a range (%2),
/// ending with a (usually higher) number in a range (%3), and counting the
/// iterations by a number of steps (%4). As in
/// [https://github.com/google/blockly/wiki/Loops#count-with
/// https://github.com/google/blockly/wiki/Loops#count-with].
/// [[File:Blockly-count-with.png]]
Blockly.Msg.CONTROLS_FOR_TITLE = 'count with %1 from %2 to %3 by %4';
Blockly.Msg.CONTROLS_FOR_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
/// url - Describes 'for-each loops' in computer programs. Consider using your language's translation of [https://en.wikipedia.org/wiki/Foreach https://en.wikipedia.org/wiki/Foreach] if present.
Blockly.Msg.CONTROLS_FOREACH_HELPURL = 'https://github.com/google/blockly/wiki/Loops#for-each';
/// block text - Title of [https://github.com/google/blockly/wiki/Loops#for-each for each block].
/// Sequentially assigns every item in array %2 to the valiable %1.
Blockly.Msg.CONTROLS_FOREACH_TITLE = 'for each item %1 in list %2';
Blockly.Msg.CONTROLS_FOREACH_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
/// block text - Description of [https://github.com/google/blockly/wiki/Loops#for-each for each blocks].\n\nParameters:\n* %1 - the name of the loop variable.
Blockly.Msg.CONTROLS_FOREACH_TOOLTIP = 'For each item in a list, set the variable "%1" to the item, and then do some statements.';
/// url - Describes control flow in computer programs. Consider using your language's translation of [https://en.wikipedia.org/wiki/Control_flow https://en.wikipedia.org/wiki/Control_flow], if it exists.
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_HELPURL = 'https://github.com/google/blockly/wiki/Loops#loop-termination-blocks';
/// dropdown - The current loop should be exited. See [https://github.com/google/blockly/wiki/Loops#break https://github.com/google/blockly/wiki/Loops#break].
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_OPERATOR_BREAK = 'break out of loop';
/// dropdown - The current iteration of the loop should be ended and the next should begin. See [https://github.com/google/blockly/wiki/Loops#continue-with-next-iteration https://github.com/google/blockly/wiki/Loops#continue-with-next-iteration].
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_OPERATOR_CONTINUE = 'continue with next iteration of loop';
/// tooltip - See [https://github.com/google/blockly/wiki/Loops#break-out-of-loop https://github.com/google/blockly/wiki/Loops#break-out-of-loop].
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_TOOLTIP_BREAK = 'Break out of the containing loop.';
/// tooltip - See [https://github.com/google/blockly/wiki/Loops#continue-with-next-iteration https://github.com/google/blockly/wiki/Loops#continue-with-next-iteration].
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_TOOLTIP_CONTINUE = 'Skip the rest of this loop, and continue with the next iteration.';
/// warning - The user has tried placing a block outside of a loop (for each, while, repeat, etc.), but this type of block may only be used within a loop. See [https://github.com/google/blockly/wiki/Loops#loop-termination-blocks https://github.com/google/blockly/wiki/Loops#loop-termination-blocks].
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_WARNING = 'Warning: This block may only be used within a loop.';
// Logic Blocks.
/// url - Describes conditional statements (if-then-else) in computer programs. Consider using your language's translation of [https://en.wikipedia.org/wiki/If_else https://en.wikipedia.org/wiki/If_else], if present.
Blockly.Msg.CONTROLS_IF_HELPURL = 'https://github.com/google/blockly/wiki/IfElse';
/// tooltip - Describes [https://github.com/google/blockly/wiki/IfElse#if-blocks 'if' blocks]. Consider using your language's translation of [https://en.wikipedia.org/wiki/If_statement https://en.wikipedia.org/wiki/If_statement], if present.
Blockly.Msg.CONTROLS_IF_TOOLTIP_1 = 'If a value is true, then do some statements.';
/// tooltip - Describes [https://github.com/google/blockly/wiki/IfElse#if-else-blocks if-else blocks]. Consider using your language's translation of [https://en.wikipedia.org/wiki/If_statement https://en.wikipedia.org/wiki/If_statement], if present.
Blockly.Msg.CONTROLS_IF_TOOLTIP_2 = 'If a value is true, then do the first block of statements. Otherwise, do the second block of statements.';
/// tooltip - Describes [https://github.com/google/blockly/wiki/IfElse#if-else-if-blocks if-else-if blocks]. Consider using your language's translation of [https://en.wikipedia.org/wiki/If_statement https://en.wikipedia.org/wiki/If_statement], if present.
Blockly.Msg.CONTROLS_IF_TOOLTIP_3 = 'If the first value is true, then do the first block of statements. Otherwise, if the second value is true, do the second block of statements.';
/// tooltip - Describes [https://github.com/google/blockly/wiki/IfElse#if-else-if-else-blocks if-else-if-else blocks]. Consider using your language's translation of [https://en.wikipedia.org/wiki/If_statement https://en.wikipedia.org/wiki/If_statement], if present.
Blockly.Msg.CONTROLS_IF_TOOLTIP_4 = 'If the first value is true, then do the first block of statements. Otherwise, if the second value is true, do the second block of statements. If none of the values are true, do the last block of statements.';
/// block text - See [https://github.com/google/blockly/wiki/IfElse https://github.com/google/blockly/wiki/IfElse].
/// It is recommended, but not essential, that this have text in common with the translation of 'else if'\n{{Identical|If}}
Blockly.Msg.CONTROLS_IF_MSG_IF = 'if';
/// block text - See [https://github.com/google/blockly/wiki/IfElse https://github.com/google/blockly/wiki/IfElse]. The English words "otherwise if" would probably be clearer than "else if", but the latter is used because it is traditional and shorter.
Blockly.Msg.CONTROLS_IF_MSG_ELSEIF = 'else if';
/// block text - See [https://github.com/google/blockly/wiki/IfElse https://github.com/google/blockly/wiki/IfElse]. The English word "otherwise" would probably be superior to "else", but the latter is used because it is traditional and shorter.
Blockly.Msg.CONTROLS_IF_MSG_ELSE = 'else';
Blockly.Msg.CONTROLS_IF_MSG_THEN = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
Blockly.Msg.CONTROLS_IF_IF_TITLE_IF = Blockly.Msg.CONTROLS_IF_MSG_IF;
/// tooltip - Describes [https://github.com/google/blockly/wiki/IfElse#block-modification if block modification].
Blockly.Msg.CONTROLS_IF_IF_TOOLTIP = 'Add, remove, or reorder sections to reconfigure this if block.';
Blockly.Msg.CONTROLS_IF_ELSEIF_TITLE_ELSEIF = Blockly.Msg.CONTROLS_IF_MSG_ELSEIF;
/// tooltip - Describes the 'else if' subblock during [https://github.com/google/blockly/wiki/IfElse#block-modification if block modification].
Blockly.Msg.CONTROLS_IF_ELSEIF_TOOLTIP = 'Add a condition to the if block.';
Blockly.Msg.CONTROLS_IF_ELSE_TITLE_ELSE = Blockly.Msg.CONTROLS_IF_MSG_ELSE;
/// tooltip - Describes the 'else' subblock during [https://github.com/google/blockly/wiki/IfElse#block-modification if block modification].
Blockly.Msg.CONTROLS_IF_ELSE_TOOLTIP = 'Add a final, catch-all condition to the if block.';
/// button text - Text on a button inside a dialogue window, which will accept or acknowledge the contents of the dialogue when pressed.
Blockly.Msg.IOS_OK = 'OK';
/// button text - Text on a button inside a dialogue window, which will close or cancel the dialogue when pressed.
Blockly.Msg.IOS_CANCEL = 'Cancel';
/// alert - Title text for an error dialogue.
Blockly.Msg.IOS_ERROR = 'Error';
/// header text - Title of a section that displays a list of parameters (aka. "inputs") that have been defined for a procedure. This is used inside a dialogue window to configure a procedure.
Blockly.Msg.IOS_PROCEDURES_INPUTS = 'INPUTS';
/// button text - Text on a button which will add a parameter (aka. "input") to a procedure. This is used inside a dialogue window to configure a procedure. NOTE: The "+" should be preserved at the beginning of the text.
Blockly.Msg.IOS_PROCEDURES_ADD_INPUT = '+ Add Input';
/// option text - Text describing an option to allow statements to be added within a procedure. This is used inside a dialogue window to configure a procedure.
Blockly.Msg.IOS_PROCEDURES_ALLOW_STATEMENTS = 'Allow statements';
/// alert - Error message when duplicate parameters (aka. "inputs") have been defined on a procedure. This is used inside a dialogue window to configure procedure parameters.
Blockly.Msg.IOS_PROCEDURES_DUPLICATE_INPUTS_ERROR = 'This function has duplicate inputs.';
/// button text - Text on a button which will open a variable creation dialogue when pressed. NOTE: The "+" should be preserved at the beginning of the text.
Blockly.Msg.IOS_VARIABLES_ADD_VARIABLE = '+ Add Variable';
/// button text - Text on a button inside a variable creation dialogue, which will add a variable when pressed.
Blockly.Msg.IOS_VARIABLES_ADD_BUTTON = 'Add';
/// button text - Text on a button inside a variable rename dialogue, which will rename a variable when pressed.
Blockly.Msg.IOS_VARIABLES_RENAME_BUTTON = 'Rename';
/// button text - Text on a button inside a variable deletion dialogue, which will delete a variable when pressed.
Blockly.Msg.IOS_VARIABLES_DELETE_BUTTON = 'Delete';
/// placeholder text - Placeholder text used inside a text input, where a variable name should be entered.
Blockly.Msg.IOS_VARIABLES_VARIABLE_NAME = 'Variable name';
/// alert - Error message that is displayed when the user attempts to create a variable without a name.
Blockly.Msg.IOS_VARIABLES_EMPTY_NAME_ERROR = 'You can\'t use an empty variable name.';
/// url - Information about comparisons.
Blockly.Msg.LOGIC_COMPARE_HELPURL = 'https://en.wikipedia.org/wiki/Inequality_(mathematics)';
/// tooltip - Describes the equals (=) block.
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_EQ = 'Return true if both inputs equal each other.';
/// tooltip - Describes the not equals (≠) block.
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_NEQ = 'Return true if both inputs are not equal to each other.';
/// tooltip - Describes the less than (<) block.
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_LT = 'Return true if the first input is smaller than the second input.';
/// tooltip - Describes the less than or equals (≤) block.
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_LTE = 'Return true if the first input is smaller than or equal to the second input.';
/// tooltip - Describes the greater than (>) block.
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_GT = 'Return true if the first input is greater than the second input.';
/// tooltip - Describes the greater than or equals (≥) block.
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_GTE = 'Return true if the first input is greater than or equal to the second input.';
/// url - Information about the Boolean conjunction ("and") and disjunction ("or") operators. Consider using the translation of [https://en.wikipedia.org/wiki/Boolean_logic https://en.wikipedia.org/wiki/Boolean_logic], if it exists in your language.
Blockly.Msg.LOGIC_OPERATION_HELPURL = 'https://github.com/google/blockly/wiki/Logic#logical-operations';
/// tooltip - See [https://en.wikipedia.org/wiki/Logical_conjunction https://en.wikipedia.org/wiki/Logical_conjunction].
Blockly.Msg.LOGIC_OPERATION_TOOLTIP_AND = 'Return true if both inputs are true.';
/// block text - See [https://en.wikipedia.org/wiki/Logical_conjunction https://en.wikipedia.org/wiki/Logical_conjunction].\n{{Identical|And}}
Blockly.Msg.LOGIC_OPERATION_AND = 'and';
/// block text - See [https://en.wikipedia.org/wiki/Disjunction https://en.wikipedia.org/wiki/Disjunction].
Blockly.Msg.LOGIC_OPERATION_TOOLTIP_OR = 'Return true if at least one of the inputs is true.';
/// block text - See [https://en.wikipedia.org/wiki/Disjunction https://en.wikipedia.org/wiki/Disjunction].\n{{Identical|Or}}
Blockly.Msg.LOGIC_OPERATION_OR = 'or';
/// url - Information about logical negation. The translation of [https://en.wikipedia.org/wiki/Logical_negation https://en.wikipedia.org/wiki/Logical_negation] is recommended if it exists in the target language.
Blockly.Msg.LOGIC_NEGATE_HELPURL = 'https://github.com/google/blockly/wiki/Logic#not';
/// block text - This is a unary operator that returns ''false'' when the input is ''true'', and ''true'' when the input is ''false''.
/// \n\nParameters:\n* %1 - the input (which should be either the value "true" or "false")
Blockly.Msg.LOGIC_NEGATE_TITLE = 'not %1';
/// tooltip - See [https://en.wikipedia.org/wiki/Logical_negation https://en.wikipedia.org/wiki/Logical_negation].
Blockly.Msg.LOGIC_NEGATE_TOOLTIP = 'Returns true if the input is false. Returns false if the input is true.';
/// url - Information about the logic values ''true'' and ''false''. Consider using the translation of [https://en.wikipedia.org/wiki/Truth_value https://en.wikipedia.org/wiki/Truth_value] if it exists in your language.
Blockly.Msg.LOGIC_BOOLEAN_HELPURL = 'https://github.com/google/blockly/wiki/Logic#values';
/// block text - The word for the [https://en.wikipedia.org/wiki/Truth_value logical value] ''true''.\n{{Identical|True}}
Blockly.Msg.LOGIC_BOOLEAN_TRUE = 'true';
/// block text - The word for the [https://en.wikipedia.org/wiki/Truth_value logical value] ''false''.\n{{Identical|False}}
Blockly.Msg.LOGIC_BOOLEAN_FALSE = 'false';
/// tooltip - Indicates that the block returns either of the two possible [https://en.wikipedia.org/wiki/Truth_value logical values].
Blockly.Msg.LOGIC_BOOLEAN_TOOLTIP = 'Returns either true or false.';
/// url - Provide a link to the translation of [https://en.wikipedia.org/wiki/Nullable_type https://en.wikipedia.org/wiki/Nullable_type], if it exists in your language; otherwise, do not worry about translating this advanced concept.
Blockly.Msg.LOGIC_NULL_HELPURL = 'https://en.wikipedia.org/wiki/Nullable_type';
/// block text - In computer languages, ''null'' is a special value that indicates that no value has been set. You may use your language's word for "nothing" or "invalid".\n{{Identical|Null}}
Blockly.Msg.LOGIC_NULL = 'null';
/// tooltip - This should use the word from the previous message.
Blockly.Msg.LOGIC_NULL_TOOLTIP = 'Returns null.';
/// url - Describes the programming language operator known as the ''ternary'' or ''conditional'' operator. It is recommended that you use the translation of [https://en.wikipedia.org/wiki/%3F: https://en.wikipedia.org/wiki/%3F:] if it exists.
Blockly.Msg.LOGIC_TERNARY_HELPURL = 'https://en.wikipedia.org/wiki/%3F:';
/// block input text - Label for the input whose value determines which of the other two inputs is returned. In some programming languages, this is called a ''''predicate''''.
Blockly.Msg.LOGIC_TERNARY_CONDITION = 'test';
/// block input text - Indicates that the following input should be returned (used as output) if the test input is true. Remember to try to keep block text terse (short).
Blockly.Msg.LOGIC_TERNARY_IF_TRUE = 'if true';
/// block input text - Indicates that the following input should be returned (used as output) if the test input is false.
Blockly.Msg.LOGIC_TERNARY_IF_FALSE = 'if false';
/// tooltip - See [https://en.wikipedia.org/wiki/%3F: https://en.wikipedia.org/wiki/%3F:].
Blockly.Msg.LOGIC_TERNARY_TOOLTIP = 'Check the condition in "test". If the condition is true, returns the "if true" value; otherwise returns the "if false" value.';
// Math Blocks.
/// url - Information about (real) numbers.
Blockly.Msg.MATH_NUMBER_HELPURL = 'https://en.wikipedia.org/wiki/Number';
/// tooltip - Any positive or negative number, not necessarily an integer.
Blockly.Msg.MATH_NUMBER_TOOLTIP = 'A number.';
/// {{optional}}\nmath - The symbol for the binary operation addition.
Blockly.Msg.MATH_ADDITION_SYMBOL = '+';
/// {{optional}}\nmath - The symbol for the binary operation indicating that the right operand should be
/// subtracted from the left operand.
Blockly.Msg.MATH_SUBTRACTION_SYMBOL = '-';
/// {{optional}}\nmath - The binary operation indicating that the left operand should be divided by
/// the right operand.
Blockly.Msg.MATH_DIVISION_SYMBOL = '÷';
/// {{optional}}\nmath - The symbol for the binary operation multiplication.
Blockly.Msg.MATH_MULTIPLICATION_SYMBOL = '×';
/// {{optional}}\nmath - The symbol for the binary operation exponentiation. Specifically, if the
/// value of the left operand is L and the value of the right operand (the exponent) is
/// R, multiply L by itself R times. (Fractional and negative exponents are also legal.)
Blockly.Msg.MATH_POWER_SYMBOL = '^';
/// math - The short name of the trigonometric function
/// [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent sine].
Blockly.Msg.MATH_TRIG_SIN = 'sin';
/// math - The short name of the trigonometric function
/// [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent cosine].
Blockly.Msg.MATH_TRIG_COS = 'cos';
/// math - The short name of the trigonometric function
/// [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent tangent].
Blockly.Msg.MATH_TRIG_TAN = 'tan';
/// math - The short name of the ''inverse of'' the trigonometric function
/// [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent sine].
Blockly.Msg.MATH_TRIG_ASIN = 'asin';
/// math - The short name of the ''inverse of'' the trigonometric function
/// [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent cosine].
Blockly.Msg.MATH_TRIG_ACOS = 'acos';
/// math - The short name of the ''inverse of'' the trigonometric function
/// [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent tangent].
Blockly.Msg.MATH_TRIG_ATAN = 'atan';
/// url - Information about addition, subtraction, multiplication, division, and exponentiation.
Blockly.Msg.MATH_ARITHMETIC_HELPURL = 'https://en.wikipedia.org/wiki/Arithmetic';
/// tooltip - See [https://en.wikipedia.org/wiki/Addition https://en.wikipedia.org/wiki/Addition].
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_ADD = 'Return the sum of the two numbers.';
/// tooltip - See [https://en.wikipedia.org/wiki/Subtraction https://en.wikipedia.org/wiki/Subtraction].
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_MINUS = 'Return the difference of the two numbers.';
/// tooltip - See [https://en.wikipedia.org/wiki/Multiplication https://en.wikipedia.org/wiki/Multiplication].
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_MULTIPLY = 'Return the product of the two numbers.';
/// tooltip - See [https://en.wikipedia.org/wiki/Division_(mathematics) https://en.wikipedia.org/wiki/Division_(mathematics)].
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_DIVIDE = 'Return the quotient of the two numbers.';
/// tooltip - See [https://en.wikipedia.org/wiki/Exponentiation https://en.wikipedia.org/wiki/Exponentiation].
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_POWER = 'Return the first number raised to the power of the second number.';
/// url - Information about the square root operation.
Blockly.Msg.MATH_SINGLE_HELPURL = 'https://en.wikipedia.org/wiki/Square_root';
/// dropdown - This computes the positive [https://en.wikipedia.org/wiki/Square_root square root] of its input. For example, the square root of 16 is 4.
Blockly.Msg.MATH_SINGLE_OP_ROOT = 'square root';
/// tooltip - Please use the same term as in the previous message.
Blockly.Msg.MATH_SINGLE_TOOLTIP_ROOT = 'Return the square root of a number.';
/// dropdown - This leaves positive numeric inputs changed and inverts negative inputs. For example, the absolute value of 5 is 5; the absolute value of -5 is also 5. For more information, see [https://en.wikipedia.org/wiki/Absolute_value https://en.wikipedia.org/wiki/Absolute_value].
Blockly.Msg.MATH_SINGLE_OP_ABSOLUTE = 'absolute';
/// tooltip - Please use the same term as in the previous message.
Blockly.Msg.MATH_SINGLE_TOOLTIP_ABS = 'Return the absolute value of a number.';
/// tooltip - Calculates '''0-n''', where '''n''' is the single numeric input.
Blockly.Msg.MATH_SINGLE_TOOLTIP_NEG = 'Return the negation of a number.';
/// tooltip - Calculates the [https://en.wikipedia.org/wiki/Natural_logarithm|natural logarithm] of its single numeric input.
Blockly.Msg.MATH_SINGLE_TOOLTIP_LN = 'Return the natural logarithm of a number.';
/// tooltip - Calculates the [https://en.wikipedia.org/wiki/Common_logarithm common logarithm] of its single numeric input.
Blockly.Msg.MATH_SINGLE_TOOLTIP_LOG10 = 'Return the base 10 logarithm of a number.';
/// tooltip - Multiplies [https://en.wikipedia.org/wiki/E_(mathematical_constant) e] by itself n times, where n is the single numeric input.
Blockly.Msg.MATH_SINGLE_TOOLTIP_EXP = 'Return e to the power of a number.';
/// tooltip - Multiplies 10 by itself n times, where n is the single numeric input.
Blockly.Msg.MATH_SINGLE_TOOLTIP_POW10 = 'Return 10 to the power of a number.';
/// url - Information about the trigonometric functions sine, cosine, tangent, and their inverses (ideally using degrees, not radians).
Blockly.Msg.MATH_TRIG_HELPURL = 'https://en.wikipedia.org/wiki/Trigonometric_functions';
/// tooltip - Return the [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent sine] of an [https://en.wikipedia.org/wiki/Degree_(angle) angle in degrees], not radians.
Blockly.Msg.MATH_TRIG_TOOLTIP_SIN = 'Return the sine of a degree (not radian).';
/// tooltip - Return the [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent cosine] of an [https://en.wikipedia.org/wiki/Degree_(angle) angle in degrees], not radians.
Blockly.Msg.MATH_TRIG_TOOLTIP_COS = 'Return the cosine of a degree (not radian).';
/// tooltip - Return the [https://en.wikipedia.org/wiki/Trigonometric_functions#Sine.2C_cosine_and_tangent tangent] of an [https://en.wikipedia.org/wiki/Degree_(angle) angle in degrees], not radians.
Blockly.Msg.MATH_TRIG_TOOLTIP_TAN = 'Return the tangent of a degree (not radian).';
/// tooltip - The [https://en.wikipedia.org/wiki/Inverse_trigonometric_functions inverse] of the [https://en.wikipedia.org/wiki/Cosine#Sine.2C_cosine_and_tangent sine function], using [https://en.wikipedia.org/wiki/Degree_(angle) degrees], not radians.
Blockly.Msg.MATH_TRIG_TOOLTIP_ASIN = 'Return the arcsine of a number.';
/// tooltip - The [https://en.wikipedia.org/wiki/Inverse_trigonometric_functions inverse] of the [https://en.wikipedia.org/wiki/Cosine#Sine.2C_cosine_and_tangent cosine] function, using [https://en.wikipedia.org/wiki/Degree_(angle) degrees], not radians.
Blockly.Msg.MATH_TRIG_TOOLTIP_ACOS = 'Return the arccosine of a number.';
/// tooltip - The [https://en.wikipedia.org/wiki/Inverse_trigonometric_functions inverse] of the [https://en.wikipedia.org/wiki/Cosine#Sine.2C_cosine_and_tangent tangent] function, using [https://en.wikipedia.org/wiki/Degree_(angle) degrees], not radians.
Blockly.Msg.MATH_TRIG_TOOLTIP_ATAN = 'Return the arctangent of a number.';
/// url - Information about the mathematical constants Pi (π), e, the golden ratio (φ), √ 2, √ 1/2, and infinity (∞).
Blockly.Msg.MATH_CONSTANT_HELPURL = 'https://en.wikipedia.org/wiki/Mathematical_constant';
/// tooltip - Provides the specified [https://en.wikipedia.org/wiki/Mathematical_constant mathematical constant].
Blockly.Msg.MATH_CONSTANT_TOOLTIP = 'Return one of the common constants: π (3.141…), e (2.718…), φ (1.618…), sqrt(2) (1.414…), sqrt(½) (0.707…), or ∞ (infinity).';
/// dropdown - A number is '''even''' if it is a multiple of 2. For example, 4 is even (yielding true), but 3 is not (false).
Blockly.Msg.MATH_IS_EVEN = 'is even';
/// dropdown - A number is '''odd''' if it is not a multiple of 2. For example, 3 is odd (yielding true), but 4 is not (false). The opposite of "odd" is "even".
Blockly.Msg.MATH_IS_ODD = 'is odd';
/// dropdown - A number is [https://en.wikipedia.org/wiki/Prime prime] if it cannot be evenly divided by any positive integers except for 1 and itself. For example, 5 is prime, but 6 is not because 2 × 3 = 6.
Blockly.Msg.MATH_IS_PRIME = 'is prime';
/// dropdown - A number is '''whole''' if it is an [https://en.wikipedia.org/wiki/Integer integer]. For example, 5 is whole, but 5.1 is not.
Blockly.Msg.MATH_IS_WHOLE = 'is whole';
/// dropdown - A number is '''positive''' if it is greater than 0. (0 is neither negative nor positive.)
Blockly.Msg.MATH_IS_POSITIVE = 'is positive';
/// dropdown - A number is '''negative''' if it is less than 0. (0 is neither negative nor positive.)
Blockly.Msg.MATH_IS_NEGATIVE = 'is negative';
/// dropdown - A number x is divisible by y if y goes into x evenly. For example, 10 is divisible by 5, but 10 is not divisible by 3.
Blockly.Msg.MATH_IS_DIVISIBLE_BY = 'is divisible by';
/// tooltip - This block lets the user specify via a dropdown menu whether to check if the numeric input is even, odd, prime, whole, positive, negative, or divisible by a given value.
Blockly.Msg.MATH_IS_TOOLTIP = 'Check if a number is an even, odd, prime, whole, positive, negative, or if it is divisible by certain number. Returns true or false.';
/// url - Information about incrementing (increasing the value of) a variable.
/// For other languages, just use the translation of the Wikipedia page about
/// addition ([https://en.wikipedia.org/wiki/Addition https://en.wikipedia.org/wiki/Addition]).
Blockly.Msg.MATH_CHANGE_HELPURL = 'https://en.wikipedia.org/wiki/Programming_idiom#Incrementing_a_counter';
/// - As in: ''change'' [the value of variable] ''item'' ''by'' 1 (e.g., if the variable named 'item' had the value 5, change it to 6).
/// %1 is a variable name.
/// %2 is the amount of change.
Blockly.Msg.MATH_CHANGE_TITLE = 'change %1 by %2';
Blockly.Msg.MATH_CHANGE_TITLE_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME;
/// tooltip - This updates the value of the variable by adding to it the following numeric input.\n\nParameters:\n* %1 - the name of the variable whose value should be increased.
Blockly.Msg.MATH_CHANGE_TOOLTIP = 'Add a number to variable "%1".';
/// url - Information about how numbers are rounded to the nearest integer
Blockly.Msg.MATH_ROUND_HELPURL = 'https://en.wikipedia.org/wiki/Rounding';
/// tooltip - See [https://en.wikipedia.org/wiki/Rounding https://en.wikipedia.org/wiki/Rounding].
Blockly.Msg.MATH_ROUND_TOOLTIP = 'Round a number up or down.';
/// dropdown - This rounds its input to the nearest whole number. For example, 3.4 is rounded to 3.
Blockly.Msg.MATH_ROUND_OPERATOR_ROUND = 'round';
/// dropdown - This rounds its input up to the nearest whole number. For example, if the input was 2.2, the result would be 3.
Blockly.Msg.MATH_ROUND_OPERATOR_ROUNDUP = 'round up';
/// dropdown - This rounds its input down to the nearest whole number. For example, if the input was 3.8, the result would be 3.
Blockly.Msg.MATH_ROUND_OPERATOR_ROUNDDOWN = 'round down';
/// url - Information about applying a function to a list of numbers. (We were unable to find such information in English. Feel free to skip this and any other URLs that are difficult.)
Blockly.Msg.MATH_ONLIST_HELPURL = '';
/// dropdown - This computes the sum of the numeric elements in the list. For example, the sum of the list {1, 4} is 5.
Blockly.Msg.MATH_ONLIST_OPERATOR_SUM = 'sum of list';
/// tooltip - Please use the same term for "sum" as in the previous message.
Blockly.Msg.MATH_ONLIST_TOOLTIP_SUM = 'Return the sum of all the numbers in the list.';
/// dropdown - This finds the smallest (minimum) number in a list. For example, the smallest number in the list [-5, 0, 3] is -5.
Blockly.Msg.MATH_ONLIST_OPERATOR_MIN = 'min of list';
/// tooltip - Please use the same term for "min" or "minimum" as in the previous message.
Blockly.Msg.MATH_ONLIST_TOOLTIP_MIN = 'Return the smallest number in the list.';
/// dropdown - This finds the largest (maximum) number in a list. For example, the largest number in the list [-5, 0, 3] is 3.
Blockly.Msg.MATH_ONLIST_OPERATOR_MAX = 'max of list';
/// tooltip
Blockly.Msg.MATH_ONLIST_TOOLTIP_MAX = 'Return the largest number in the list.';
/// dropdown - This adds up all of the numbers in a list and divides the sum by the number of elements in the list. For example, the [https://en.wikipedia.org/wiki/Arithmetic_mean average] of the list [1, 2, 3, 4] is 2.5 (10/4).
Blockly.Msg.MATH_ONLIST_OPERATOR_AVERAGE = 'average of list';
/// tooltip - See [https://en.wikipedia.org/wiki/Arithmetic_mean https://en.wikipedia.org/wiki/Arithmetic_mean] for more informatin.
Blockly.Msg.MATH_ONLIST_TOOLTIP_AVERAGE = 'Return the average (arithmetic mean) of the numeric values in the list.';
/// dropdown - This finds the [https://en.wikipedia.org/wiki/Median median] of the numeric values in a list. For example, the median of the list {1, 2, 7, 12, 13} is 7.
Blockly.Msg.MATH_ONLIST_OPERATOR_MEDIAN = 'median of list';
/// tooltip - See [https://en.wikipedia.org/wiki/Median median https://en.wikipedia.org/wiki/Median median] for more information.
Blockly.Msg.MATH_ONLIST_TOOLTIP_MEDIAN = 'Return the median number in the list.';
/// dropdown - This finds the most common numbers ([https://en.wikipedia.org/wiki/Mode_(statistics) modes]) in a list. For example, the modes of the list {1, 3, 9, 3, 9} are {3, 9}.
Blockly.Msg.MATH_ONLIST_OPERATOR_MODE = 'modes of list';
/// tooltip - See [https://en.wikipedia.org/wiki/Mode_(statistics) https://en.wikipedia.org/wiki/Mode_(statistics)] for more information.
Blockly.Msg.MATH_ONLIST_TOOLTIP_MODE = 'Return a list of the most common item(s) in the list.';
/// dropdown - This finds the [https://en.wikipedia.org/wiki/Standard_deviation standard deviation] of the numeric values in a list.
Blockly.Msg.MATH_ONLIST_OPERATOR_STD_DEV = 'standard deviation of list';
/// tooltip - See [https://en.wikipedia.org/wiki/Standard_deviation https://en.wikipedia.org/wiki/Standard_deviation] for more information.
Blockly.Msg.MATH_ONLIST_TOOLTIP_STD_DEV = 'Return the standard deviation of the list.';
/// dropdown - This choose an element at random from a list. Each element is chosen with equal probability.
Blockly.Msg.MATH_ONLIST_OPERATOR_RANDOM = 'random item of list';
/// tooltip - Please use same term for 'random' as in previous entry.
Blockly.Msg.MATH_ONLIST_TOOLTIP_RANDOM = 'Return a random element from the list.';
/// url - information about the modulo (remainder) operation.
Blockly.Msg.MATH_MODULO_HELPURL = 'https://en.wikipedia.org/wiki/Modulo_operation';
/// block text - Title of block providing the remainder when dividing the first numerical input by the second. For example, the remainder of 10 divided by 3 is 1.\n\nParameters:\n* %1 - the dividend (10, in our example)\n* %2 - the divisor (3 in our example).
Blockly.Msg.MATH_MODULO_TITLE = 'remainder of %1 ÷ %2';
/// tooltip - For example, the remainder of 10 divided by 3 is 1.
Blockly.Msg.MATH_MODULO_TOOLTIP = 'Return the remainder from dividing the two numbers.';
/// url - Information about constraining a numeric value to be in a specific range. (The English URL is not ideal. Recall that translating URLs is the lowest priority.)
Blockly.Msg.MATH_CONSTRAIN_HELPURL = 'https://en.wikipedia.org/wiki/Clamping_(graphics)';
/// block text - The title of the block that '''constrain'''s (forces) a number to be in a given range.
///For example, if the number 150 is constrained to be between 5 and 100, the result will be 100.
///\n\nParameters:\n* %1 - the value to constrain (e.g., 150)\n* %2 - the minimum value (e.g., 5)\n* %3 - the maximum value (e.g., 100).
Blockly.Msg.MATH_CONSTRAIN_TITLE = 'constrain %1 low %2 high %3';
/// tooltip - This compares a number ''x'' to a low value ''L'' and a high value ''H''. If ''x'' is less then ''L'', the result is ''L''. If ''x'' is greater than ''H'', the result is ''H''. Otherwise, the result is ''x''.
Blockly.Msg.MATH_CONSTRAIN_TOOLTIP = 'Constrain a number to be between the specified limits (inclusive).';
/// url - Information about how computers generate random numbers.
Blockly.Msg.MATH_RANDOM_INT_HELPURL = 'https://en.wikipedia.org/wiki/Random_number_generation';
/// block text - The title of the block that generates a random integer (whole number) in the specified range. For example, if the range is from 5 to 7, this returns 5, 6, or 7 with equal likelihood. %1 is a placeholder for the lower number, %2 is the placeholder for the larger number.
Blockly.Msg.MATH_RANDOM_INT_TITLE = 'random integer from %1 to %2';
/// tooltip - Return a random integer between two values specified as inputs. For example, if one input was 7 and another 9, any of the numbers 7, 8, or 9 could be produced.
Blockly.Msg.MATH_RANDOM_INT_TOOLTIP = 'Return a random integer between the two specified limits, inclusive.';
/// url - Information about how computers generate random numbers (specifically, numbers in the range from 0 to just below 1).
Blockly.Msg.MATH_RANDOM_FLOAT_HELPURL = 'https://en.wikipedia.org/wiki/Random_number_generation';
/// block text - The title of the block that generates a random number greater than or equal to 0 and less than 1.
Blockly.Msg.MATH_RANDOM_FLOAT_TITLE_RANDOM = 'random fraction';
/// tooltip - Return a random fraction between 0 and 1. The value may be equal to 0 but must be less than 1.
Blockly.Msg.MATH_RANDOM_FLOAT_TOOLTIP = 'Return a random fraction between 0.0 (inclusive) and 1.0 (exclusive).';
// Text Blocks.
/// url - Information about how computers represent text (sometimes referred to as ''string''s).
Blockly.Msg.TEXT_TEXT_HELPURL = 'https://en.wikipedia.org/wiki/String_(computer_science)';
/// tooltip - See [https://github.com/google/blockly/wiki/Text https://github.com/google/blockly/wiki/Text].
Blockly.Msg.TEXT_TEXT_TOOLTIP = 'A letter, word, or line of text.';
/// url - Information on concatenating/appending pieces of text.
Blockly.Msg.TEXT_JOIN_HELPURL = 'https://github.com/google/blockly/wiki/Text#text-creation';
/// block text - See [https://github.com/google/blockly/wiki/Text#text-creation https://github.com/google/blockly/wiki/Text#text-creation].
Blockly.Msg.TEXT_JOIN_TITLE_CREATEWITH = 'create text with';
/// tooltip - See [https://github.com/google/blockly/wiki/Text#text-creation create text with] for more information.
Blockly.Msg.TEXT_JOIN_TOOLTIP = 'Create a piece of text by joining together any number of items.';
/// block text - This is shown when the programmer wants to change the number of pieces of text being joined together. See [https://github.com/google/blockly/wiki/Text#text-creation https://github.com/google/blockly/wiki/Text#text-creation], specifically the last picture in the 'Text creation' section.\n{{Identical|Join}}
Blockly.Msg.TEXT_CREATE_JOIN_TITLE_JOIN = 'join';
/// tooltip - See [https://github.com/google/blockly/wiki/Text#text-creation https://github.com/google/blockly/wiki/Text#text-creation], specifically the last picture in the 'Text creation' section.
Blockly.Msg.TEXT_CREATE_JOIN_TOOLTIP = 'Add, remove, or reorder sections to reconfigure this text block.';
Blockly.Msg.TEXT_CREATE_JOIN_ITEM_TITLE_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME;
/// block text - See [https://github.com/google/blockly/wiki/Text#text-creation https://github.com/google/blockly/wiki/Text#text-creation], specifically the last picture in the 'Text creation' section.
Blockly.Msg.TEXT_CREATE_JOIN_ITEM_TOOLTIP = 'Add an item to the text.';
/// url - This and the other text-related URLs are going to be hard to translate. As always, it is okay to leave untranslated or paste in the English-language URL. For these URLs, you might also consider a general URL about how computers represent text (such as the translation of [https://en.wikipedia.org/wiki/String_(computer_science) this Wikipedia page]).
Blockly.Msg.TEXT_APPEND_HELPURL = 'https://github.com/google/blockly/wiki/Text#text-modification';
/// block input text - Message preceding the name of a variable to which text should be appended.
/// [[File:blockly-append-text.png]]
Blockly.Msg.TEXT_APPEND_TO = 'to';
/// block input text - Message following the variable and preceding the piece of text that should
/// be appended, as shown below.
/// [[File:blockly-append-text.png]]
Blockly.Msg.TEXT_APPEND_APPENDTEXT = 'append text';
Blockly.Msg.TEXT_APPEND_VARIABLE = Blockly.Msg.VARIABLES_DEFAULT_NAME;
/// tooltip - See [https://github.com/google/blockly/wiki/Text#text-modification https://github.com/google/blockly/wiki/Text#text-modification] for more information.\n\nParameters:\n* %1 - the name of the variable to which text should be appended
Blockly.Msg.TEXT_APPEND_TOOLTIP = 'Append some text to variable "%1".';
/// url - Information about text on computers (usually referred to as 'strings').
Blockly.Msg.TEXT_LENGTH_HELPURL = 'https://github.com/google/blockly/wiki/Text#text-modification';
/// block text - See [https://github.com/google/blockly/wiki/Text#text-length https://github.com/google/blockly/wiki/Text#text-length].
/// \n\nParameters:\n* %1 - the piece of text to take the length of
Blockly.Msg.TEXT_LENGTH_TITLE = 'length of %1';
/// tooltip - See [https://github.com/google/blockly/wiki/Text#text-length https://github.com/google/blockly/wiki/Text#text-length].
Blockly.Msg.TEXT_LENGTH_TOOLTIP = 'Returns the number of letters (including spaces) in the provided text.';
/// url - Information about empty pieces of text on computers (usually referred to as 'empty strings').
Blockly.Msg.TEXT_ISEMPTY_HELPURL = 'https://github.com/google/blockly/wiki/Text#checking-for-empty-text';
/// block text - See [https://github.com/google/blockly/wiki/Text#checking-for-empty-text https://github.com/google/blockly/wiki/Text#checking-for-empty-text].
/// \n\nParameters:\n* %1 - the piece of text to test for emptiness
Blockly.Msg.TEXT_ISEMPTY_TITLE = '%1 is empty';
/// tooltip - See [https://github.com/google/blockly/wiki/Text#checking-for-empty-text https://github.com/google/blockly/wiki/Text#checking-for-empty-text].
Blockly.Msg.TEXT_ISEMPTY_TOOLTIP = 'Returns true if the provided text is empty.';
/// url - Information about finding a character in a piece of text.
Blockly.Msg.TEXT_INDEXOF_HELPURL = 'https://github.com/google/blockly/wiki/Text#finding-text';
/// tooltip - %1 will be replaced by either the number 0 or -1 depending on the indexing mode. See [https://github.com/google/blockly/wiki/Text#finding-text https://github.com/google/blockly/wiki/Text#finding-text].
Blockly.Msg.TEXT_INDEXOF_TOOLTIP = 'Returns the index of the first/last occurrence of the first text in the second text. Returns %1 if text is not found.';
/// block text - Title of blocks allowing users to find text. See
/// [https://github.com/google/blockly/wiki/Text#finding-text
/// https://github.com/google/blockly/wiki/Text#finding-text].
/// [[File:Blockly-find-text.png]].
Blockly.Msg.TEXT_INDEXOF_INPUT_INTEXT = 'in text';
/// dropdown - See [https://github.com/google/blockly/wiki/Text#finding-text
/// https://github.com/google/blockly/wiki/Text#finding-text].
/// [[File:Blockly-find-text.png]].
Blockly.Msg.TEXT_INDEXOF_OPERATOR_FIRST = 'find first occurrence of text';
/// dropdown - See [https://github.com/google/blockly/wiki/Text#finding-text
/// https://github.com/google/blockly/wiki/Text#finding-text]. This would
/// replace "find first occurrence of text" below. (For more information on
/// how common text is factored out of dropdown menus, see
/// [https://translatewiki.net/wiki/Translating:Blockly#Drop-Down_Menus
/// https://translatewiki.net/wiki/Translating:Blockly#Drop-Down_Menus)].)
/// [[File:Blockly-find-text.png]].
Blockly.Msg.TEXT_INDEXOF_OPERATOR_LAST = 'find last occurrence of text';
/// block text - Optional text to follow the rightmost block in a
/// [https://github.com/google/blockly/wiki/Text#finding-text
/// https://github.com/google/blockly/wiki/Text#finding-text in text ... find block]
/// (after the "a" in the below picture). This will be the empty string in most languages.
/// [[File:Blockly-find-text.png]].
Blockly.Msg.TEXT_INDEXOF_TAIL = '';
/// url - Information about extracting characters (letters, number, symbols, etc.) from text.
Blockly.Msg.TEXT_CHARAT_HELPURL = 'https://github.com/google/blockly/wiki/Text#extracting-text';
/// block text - Appears before the piece of text from which a letter (or number,
/// punctuation character, etc.) should be extracted, as shown below. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-single-character
/// https://github.com/google/blockly/wiki/Text#extracting-a-single-character].
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_INPUT_INTEXT = 'in text';
/// dropdown - Indicates that the letter (or number, punctuation character, etc.) with the
/// specified index should be obtained from the preceding piece of text. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-single-character
/// https://github.com/google/blockly/wiki/Text#extracting-a-single-character].
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_FROM_START = 'get letter #';
/// block text - Indicates that the letter (or number, punctuation character, etc.) with the
/// specified index from the end of a given piece of text should be obtained. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-single-character
/// https://github.com/google/blockly/wiki/Text#extracting-a-single-character].
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_FROM_END = 'get letter # from end';
/// block text - Indicates that the first letter of the following piece of text should be
/// retrieved. See [https://github.com/google/blockly/wiki/Text#extracting-a-single-character
/// https://github.com/google/blockly/wiki/Text#extracting-a-single-character].
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_FIRST = 'get first letter';
/// block text - Indicates that the last letter (or number, punctuation mark, etc.) of the
/// following piece of text should be retrieved. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-single-character
/// https://github.com/google/blockly/wiki/Text#extracting-a-single-character].
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_LAST = 'get last letter';
/// block text - Indicates that any letter (or number, punctuation mark, etc.) in the
/// following piece of text should be randomly selected. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-single-character
/// https://github.com/google/blockly/wiki/Text#extracting-a-single-character].
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_RANDOM = 'get random letter';
/// block text - Text that goes after the rightmost block/dropdown when getting a single letter from
/// a piece of text, as in [https://blockly-demo.appspot.com/static/apps/code/index.html#3m23km these
/// blocks] or shown below. For most languages, this will be blank.
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_TAIL = '';
/// tooltip - See [https://github.com/google/blockly/wiki/Text#extracting-a-single-character
/// https://github.com/google/blockly/wiki/Text#extracting-a-single-character].
/// [[File:Blockly-text-get.png]]
Blockly.Msg.TEXT_CHARAT_TOOLTIP = 'Returns the letter at the specified position.';
/// See [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text].
Blockly.Msg.TEXT_GET_SUBSTRING_TOOLTIP = 'Returns a specified portion of the text.';
/// url - Information about extracting characters from text. Reminder: urls are the
/// lowest priority translations. Feel free to skip.
Blockly.Msg.TEXT_GET_SUBSTRING_HELPURL = 'https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text';
/// block text - Precedes a piece of text from which a portion should be extracted.
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_INPUT_IN_TEXT = 'in text';
/// dropdown - Indicates that the following number specifies the position (relative to the start
/// position) of the beginning of the region of text that should be obtained from the preceding
/// piece of text. See [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text].
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_START_FROM_START = 'get substring from letter #';
/// dropdown - Indicates that the following number specifies the position (relative to the end
/// position) of the beginning of the region of text that should be obtained from the preceding
/// piece of text. See [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text].
/// Note: If {{msg-Blockly|ORDINAL_NUMBER_SUFFIX}} is defined, it will
/// automatically appear ''after'' this and any other
/// [https://translatewiki.net/wiki/Translating:Blockly#Ordinal_numbers ordinal numbers]
/// on this block.
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_START_FROM_END = 'get substring from letter # from end';
/// block text - Indicates that a region starting with the first letter of the preceding piece
/// of text should be extracted. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text].
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_START_FIRST = 'get substring from first letter';
/// dropdown - Indicates that the following number specifies the position (relative to
/// the start position) of the end of the region of text that should be obtained from the
/// preceding piece of text. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text].
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_END_FROM_START = 'to letter #';
/// dropdown - Indicates that the following number specifies the position (relative to the
/// end position) of the end of the region of text that should be obtained from the preceding
/// piece of text. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text].
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_END_FROM_END = 'to letter # from end';
/// block text - Indicates that a region ending with the last letter of the preceding piece
/// of text should be extracted. See
/// [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text].
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_END_LAST = 'to last letter';
/// block text - Text that should go after the rightmost block/dropdown when
/// [https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text
/// extracting a region of text]. In most languages, this will be the empty string.
/// [[File:Blockly-get-substring.png]]
Blockly.Msg.TEXT_GET_SUBSTRING_TAIL = '';
/// url - Information about the case of letters (upper-case and lower-case).
Blockly.Msg.TEXT_CHANGECASE_HELPURL = 'https://github.com/google/blockly/wiki/Text#adjusting-text-case';
/// tooltip - Describes a block to adjust the case of letters. For more information on this block,
/// see [https://github.com/google/blockly/wiki/Text#adjusting-text-case
/// https://github.com/google/blockly/wiki/Text#adjusting-text-case].
Blockly.Msg.TEXT_CHANGECASE_TOOLTIP = 'Return a copy of the text in a different case.';
/// block text - Indicates that all of the letters in the following piece of text should be
/// capitalized. If your language does not use case, you may indicate that this is not
/// applicable to your language. For more information on this block, see
/// [https://github.com/google/blockly/wiki/Text#adjusting-text-case
/// https://github.com/google/blockly/wiki/Text#adjusting-text-case].
Blockly.Msg.TEXT_CHANGECASE_OPERATOR_UPPERCASE = 'to UPPER CASE';
/// block text - Indicates that all of the letters in the following piece of text should be converted to lower-case. If your language does not use case, you may indicate that this is not applicable to your language. For more information on this block, see [https://github.com/google/blockly/wiki/Text#adjusting-text-case https://github.com/google/blockly/wiki/Text#adjusting-text-case].
Blockly.Msg.TEXT_CHANGECASE_OPERATOR_LOWERCASE = 'to lower case';
/// block text - Indicates that the first letter of each of the following words should be capitalized and the rest converted to lower-case. If your language does not use case, you may indicate that this is not applicable to your language. For more information on this block, see [https://github.com/google/blockly/wiki/Text#adjusting-text-case https://github.com/google/blockly/wiki/Text#adjusting-text-case].
Blockly.Msg.TEXT_CHANGECASE_OPERATOR_TITLECASE = 'to Title Case';
/// url - Information about trimming (removing) text off the beginning and ends of pieces of text.
Blockly.Msg.TEXT_TRIM_HELPURL = 'https://github.com/google/blockly/wiki/Text#trimming-removing-spaces';
/// tooltip - See [https://github.com/google/blockly/wiki/Text#trimming-removing-spaces
/// https://github.com/google/blockly/wiki/Text#trimming-removing-spaces].
Blockly.Msg.TEXT_TRIM_TOOLTIP = 'Return a copy of the text with spaces removed from one or both ends.';
/// dropdown - Removes spaces from the beginning and end of a piece of text. See
/// [https://github.com/google/blockly/wiki/Text#trimming-removing-spaces
/// https://github.com/google/blockly/wiki/Text#trimming-removing-spaces]. Note that neither
/// this nor the other options modify the original piece of text (that follows);
/// the block just returns a version of the text without the specified spaces.
Blockly.Msg.TEXT_TRIM_OPERATOR_BOTH = 'trim spaces from both sides of';
/// dropdown - Removes spaces from the beginning of a piece of text. See
/// [https://github.com/google/blockly/wiki/Text#trimming-removing-spaces
/// https://github.com/google/blockly/wiki/Text#trimming-removing-spaces].
/// Note that in right-to-left scripts, this will remove spaces from the right side.
Blockly.Msg.TEXT_TRIM_OPERATOR_LEFT = 'trim spaces from left side of';
/// dropdown - Removes spaces from the end of a piece of text. See
/// [https://github.com/google/blockly/wiki/Text#trimming-removing-spaces
/// https://github.com/google/blockly/wiki/Text#trimming-removing-spaces].
/// Note that in right-to-left scripts, this will remove spaces from the left side.
Blockly.Msg.TEXT_TRIM_OPERATOR_RIGHT = 'trim spaces from right side of';
/// url - Information about displaying text on computers.
Blockly.Msg.TEXT_PRINT_HELPURL = 'https://github.com/google/blockly/wiki/Text#printing-text';
/// block text - Display the input on the screen. See
/// [https://github.com/google/blockly/wiki/Text#printing-text
/// https://github.com/google/blockly/wiki/Text#printing-text].
/// \n\nParameters:\n* %1 - the value to print
Blockly.Msg.TEXT_PRINT_TITLE = 'print %1';
/// tooltip - See [https://github.com/google/blockly/wiki/Text#printing-text
/// https://github.com/google/blockly/wiki/Text#printing-text].
Blockly.Msg.TEXT_PRINT_TOOLTIP = 'Print the specified text, number or other value.';
/// url - Information about getting text from users.
Blockly.Msg.TEXT_PROMPT_HELPURL = 'https://github.com/google/blockly/wiki/Text#getting-input-from-the-user';
/// dropdown - Specifies that a piece of text should be requested from the user with
/// the following message. See [https://github.com/google/blockly/wiki/Text#printing-text
/// https://github.com/google/blockly/wiki/Text#printing-text].
Blockly.Msg.TEXT_PROMPT_TYPE_TEXT = 'prompt for text with message';
/// dropdown - Specifies that a number should be requested from the user with the
/// following message. See [https://github.com/google/blockly/wiki/Text#printing-text
/// https://github.com/google/blockly/wiki/Text#printing-text].
Blockly.Msg.TEXT_PROMPT_TYPE_NUMBER = 'prompt for number with message';
/// dropdown - Precedes the message with which the user should be prompted for
/// a number. See [https://github.com/google/blockly/wiki/Text#printing-text
/// https://github.com/google/blockly/wiki/Text#printing-text].
Blockly.Msg.TEXT_PROMPT_TOOLTIP_NUMBER = 'Prompt for user for a number.';
/// dropdown - Precedes the message with which the user should be prompted for some text.
/// See [https://github.com/google/blockly/wiki/Text#printing-text
/// https://github.com/google/blockly/wiki/Text#printing-text].
Blockly.Msg.TEXT_PROMPT_TOOLTIP_TEXT = 'Prompt for user for some text.';
/// block text - Title of a block that counts the number of instances of
/// a smaller pattern (%1) inside a longer string (%2).
Blockly.Msg.TEXT_COUNT_MESSAGE0 = 'count %1 in %2';
/// url - Information about counting how many times a string appears in another string.
Blockly.Msg.TEXT_COUNT_HELPURL = 'https://github.com/google/blockly/wiki/Text#counting-substrings';
/// tooltip - Short description of a block that counts how many times some text occurs within some other text.
Blockly.Msg.TEXT_COUNT_TOOLTIP = 'Count how many times some text occurs within some other text.';
/// block text - Title of a block that returns a copy of text (%3) with all
/// instances of some smaller text (%1) replaced with other text (%2).
Blockly.Msg.TEXT_REPLACE_MESSAGE0 = 'replace %1 with %2 in %3';
/// url - Information about replacing each copy text (or string, in computer lingo) with other text.
Blockly.Msg.TEXT_REPLACE_HELPURL = 'https://github.com/google/blockly/wiki/Text#replacing-substrings';
/// tooltip - Short description of a block that replaces copies of text in a large text with other text.
Blockly.Msg.TEXT_REPLACE_TOOLTIP = 'Replace all occurances of some text within some other text.';
/// block text - Title of block that returns a copy of text (%1) with the order
/// of letters and characters reversed.
Blockly.Msg.TEXT_REVERSE_MESSAGE0 = 'reverse %1';
/// url - Information about reversing a letters/characters in text.
Blockly.Msg.TEXT_REVERSE_HELPURL = 'https://github.com/google/blockly/wiki/Text#reversing-text';
/// tooltip - See [https://github.com/google/blockly/wiki/Text].
Blockly.Msg.TEXT_REVERSE_TOOLTIP = 'Reverses the order of the characters in the text.';
// Lists Blocks.
/// url - Information on empty lists.
Blockly.Msg.LISTS_CREATE_EMPTY_HELPURL = 'https://github.com/google/blockly/wiki/Lists#create-empty-list';
/// block text - See [https://github.com/google/blockly/wiki/Lists#create-empty-list https://github.com/google/blockly/wiki/Lists#create-empty-list].
Blockly.Msg.LISTS_CREATE_EMPTY_TITLE = 'create empty list';
/// block text - See [https://github.com/google/blockly/wiki/Lists#create-empty-list https://github.com/google/blockly/wiki/Lists#create-empty-list].
Blockly.Msg.LISTS_CREATE_EMPTY_TOOLTIP = 'Returns a list, of length 0, containing no data records';
/// url - Information on building lists.
Blockly.Msg.LISTS_CREATE_WITH_HELPURL = 'https://github.com/google/blockly/wiki/Lists#create-list-with';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#create-list-with https://github.com/google/blockly/wiki/Lists#create-list-with].
Blockly.Msg.LISTS_CREATE_WITH_TOOLTIP = 'Create a list with any number of items.';
/// block text - See [https://github.com/google/blockly/wiki/Lists#create-list-with https://github.com/google/blockly/wiki/Lists#create-list-with].
Blockly.Msg.LISTS_CREATE_WITH_INPUT_WITH = 'create list with';
/// block text - This appears in a sub-block when [https://github.com/google/blockly/wiki/Lists#changing-number-of-inputs changing the number of inputs in a ''''create list with'''' block].\n{{Identical|List}}
Blockly.Msg.LISTS_CREATE_WITH_CONTAINER_TITLE_ADD = 'list';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#changing-number-of-inputs https://github.com/google/blockly/wiki/Lists#changing-number-of-inputs].
Blockly.Msg.LISTS_CREATE_WITH_CONTAINER_TOOLTIP = 'Add, remove, or reorder sections to reconfigure this list block.';
Blockly.Msg.LISTS_CREATE_WITH_ITEM_TITLE = Blockly.Msg.VARIABLES_DEFAULT_NAME;
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#changing-number-of-inputs https://github.com/google/blockly/wiki/Lists#changing-number-of-inputs].
Blockly.Msg.LISTS_CREATE_WITH_ITEM_TOOLTIP = 'Add an item to the list.';
/// url - Information about [https://github.com/google/blockly/wiki/Lists#create-list-with creating a list with multiple copies of a single item].
Blockly.Msg.LISTS_REPEAT_HELPURL = 'https://github.com/google/blockly/wiki/Lists#create-list-with';
/// url - See [https://github.com/google/blockly/wiki/Lists#create-list-with creating a list with multiple copies of a single item].
Blockly.Msg.LISTS_REPEAT_TOOLTIP = 'Creates a list consisting of the given value repeated the specified number of times.';
/// block text - See [https://github.com/google/blockly/wiki/Lists#create-list-with
/// https://github.com/google/blockly/wiki/Lists#create-list-with].
///\n\nParameters:\n* %1 - the item (text) to be repeated\n* %2 - the number of times to repeat it
Blockly.Msg.LISTS_REPEAT_TITLE = 'create list with item %1 repeated %2 times';
/// url - Information about how the length of a list is computed (i.e., by the total number of elements, not the number of different elements).
Blockly.Msg.LISTS_LENGTH_HELPURL = 'https://github.com/google/blockly/wiki/Lists#length-of';
/// block text - See [https://github.com/google/blockly/wiki/Lists#length-of https://github.com/google/blockly/wiki/Lists#length-of].
/// \n\nParameters:\n* %1 - the list whose length is desired
Blockly.Msg.LISTS_LENGTH_TITLE = 'length of %1';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#length-of https://github.com/google/blockly/wiki/Lists#length-of Blockly:Lists:length of].
Blockly.Msg.LISTS_LENGTH_TOOLTIP = 'Returns the length of a list.';
/// url - See [https://github.com/google/blockly/wiki/Lists#is-empty https://github.com/google/blockly/wiki/Lists#is-empty].
Blockly.Msg.LISTS_ISEMPTY_HELPURL = 'https://github.com/google/blockly/wiki/Lists#is-empty';
/// block text - See [https://github.com/google/blockly/wiki/Lists#is-empty
/// https://github.com/google/blockly/wiki/Lists#is-empty].
/// \n\nParameters:\n* %1 - the list to test
Blockly.Msg.LISTS_ISEMPTY_TITLE = '%1 is empty';
/// block tooltip - See [https://github.com/google/blockly/wiki/Lists#is-empty
/// https://github.com/google/blockly/wiki/Lists#is-empty].
Blockly.Msg.LISTS_ISEMPTY_TOOLTIP = 'Returns true if the list is empty.';
/// block text - Title of blocks operating on [https://github.com/google/blockly/wiki/Lists lists].
Blockly.Msg.LISTS_INLIST = 'in list';
/// url - See [https://github.com/google/blockly/wiki/Lists#getting-items-from-a-list
/// https://github.com/google/blockly/wiki/Lists#getting-items-from-a-list].
Blockly.Msg.LISTS_INDEX_OF_HELPURL = 'https://github.com/google/blockly/wiki/Lists#getting-items-from-a-list';
Blockly.Msg.LISTS_INDEX_OF_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
/// dropdown - See [https://github.com/google/blockly/wiki/Lists#finding-items-in-a-list
/// Lists#finding-items-in-a-list].
/// [[File:Blockly-list-find.png]]
Blockly.Msg.LISTS_INDEX_OF_FIRST = 'find first occurrence of item';
/// dropdown - See [https://github.com/google/blockly/wiki/Lists#finding-items-in-a-list
/// https://github.com/google/blockly/wiki/Lists#finding-items-in-a-list].
/// [[File:Blockly-list-find.png]]
Blockly.Msg.LISTS_INDEX_OF_LAST = 'find last occurrence of item';
/// tooltip - %1 will be replaced by either the number 0 or -1 depending on the indexing mode. See [https://github.com/google/blockly/wiki/Lists#finding-items-in-a-list
/// https://github.com/google/blockly/wiki/Lists#finding-items-in-a-list].
/// [[File:Blockly-list-find.png]]
Blockly.Msg.LISTS_INDEX_OF_TOOLTIP = 'Returns the index of the first/last occurrence of the item in the list. Returns %1 if item is not found.';
Blockly.Msg.LISTS_GET_INDEX_HELPURL = Blockly.Msg.LISTS_INDEX_OF_HELPURL;
/// dropdown - Indicates that the user wishes to
/// [https://github.com/google/blockly/wiki/Lists#getting-a-single-item
/// get an item from a list] without removing it from the list.
Blockly.Msg.LISTS_GET_INDEX_GET = 'get';
/// dropdown - Indicates that the user wishes to
/// [https://github.com/google/blockly/wiki/Lists#getting-a-single-item
/// get and remove an item from a list], as opposed to merely getting
/// it without modifying the list.
Blockly.Msg.LISTS_GET_INDEX_GET_REMOVE = 'get and remove';
/// dropdown - Indicates that the user wishes to
/// [https://github.com/google/blockly/wiki/Lists#removing-an-item
/// remove an item from a list].\n{{Identical|Remove}}
Blockly.Msg.LISTS_GET_INDEX_REMOVE = 'remove';
/// dropdown - Indicates that an index relative to the front of the list should be used to
/// [https://github.com/google/blockly/wiki/Lists#getting-a-single-item get and/or remove
/// an item from a list]. Note: If {{msg-Blockly|ORDINAL_NUMBER_SUFFIX}} is defined, it will
/// automatically appear ''after'' this number (and any other ordinal numbers on this block).
/// See [[Translating:Blockly#Ordinal_numbers]] for more information on ordinal numbers in Blockly.
/// [[File:Blockly-list-get-item.png]]
Blockly.Msg.LISTS_GET_INDEX_FROM_START = '#';
/// dropdown - Indicates that an index relative to the end of the list should be used
/// to [https://github.com/google/blockly/wiki/Lists#getting-a-single-item access an item in a list].
/// [[File:Blockly-list-get-item.png]]
Blockly.Msg.LISTS_GET_INDEX_FROM_END = '# from end';
/// dropdown - Indicates that the '''first''' item should be
/// [https://github.com/google/blockly/wiki/Lists#getting-a-single-item accessed in a list].
/// [[File:Blockly-list-get-item.png]]
Blockly.Msg.LISTS_GET_INDEX_FIRST = 'first';
/// dropdown - Indicates that the '''last''' item should be
/// [https://github.com/google/blockly/wiki/Lists#getting-a-single-item accessed in a list].
/// [[File:Blockly-list-get-item.png]]
Blockly.Msg.LISTS_GET_INDEX_LAST = 'last';
/// dropdown - Indicates that a '''random''' item should be
/// [https://github.com/google/blockly/wiki/Lists#getting-a-single-item accessed in a list].
/// [[File:Blockly-list-get-item.png]]
Blockly.Msg.LISTS_GET_INDEX_RANDOM = 'random';
/// block text - Text that should go after the rightmost block/dropdown when
/// [https://github.com/google/blockly/wiki/Lists#getting-a-single-item
/// accessing an item from a list]. In most languages, this will be the empty string.
/// [[File:Blockly-list-get-item.png]]
Blockly.Msg.LISTS_GET_INDEX_TAIL = '';
Blockly.Msg.LISTS_GET_INDEX_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
/// tooltip - Indicates the ordinal number that the first item in a list is referenced by. %1 will be replaced by either "#0" or "#1" depending on the indexing mode.
Blockly.Msg.LISTS_INDEX_FROM_START_TOOLTIP = '%1 is the first item.';
/// tooltip - Indicates the ordinal number that the last item in a list is referenced by. %1 will be replaced by either "#0" or "#1" depending on the indexing mode.
Blockly.Msg.LISTS_INDEX_FROM_END_TOOLTIP = '%1 is the last item.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for more information.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_FROM = 'Returns the item at the specified position in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for more information.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_FIRST = 'Returns the first item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for more information.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_LAST = 'Returns the last item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for more information.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_RANDOM = 'Returns a random item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for '#' or '# from end'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_FROM = 'Removes and returns the item at the specified position in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for 'first'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_FIRST = 'Removes and returns the first item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for 'last'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_LAST = 'Removes and returns the last item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for 'random'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_RANDOM = 'Removes and returns a random item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for '#' or '# from end'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_FROM = 'Removes the item at the specified position in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for 'first'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_FIRST = 'Removes the first item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for 'last'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_LAST = 'Removes the last item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-and-removing-an-item] (for remove and return) and [https://github.com/google/blockly/wiki/Lists#getting-a-single-item] for 'random'.
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_RANDOM = 'Removes a random item in a list.';
/// url - Information about putting items in lists.
Blockly.Msg.LISTS_SET_INDEX_HELPURL = 'https://github.com/google/blockly/wiki/Lists#in-list--set';
Blockly.Msg.LISTS_SET_INDEX_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
/// block text - [https://github.com/google/blockly/wiki/Lists#in-list--set
/// Replaces an item in a list].
/// [[File:Blockly-in-list-set-insert.png]]
Blockly.Msg.LISTS_SET_INDEX_SET = 'set';
/// block text - [https://github.com/google/blockly/wiki/Lists#in-list--insert-at
/// Inserts an item into a list].
/// [[File:Blockly-in-list-set-insert.png]]
Blockly.Msg.LISTS_SET_INDEX_INSERT = 'insert at';
/// block text - The word(s) after the position in the list and before the item to be set/inserted.
/// [[File:Blockly-in-list-set-insert.png]]
Blockly.Msg.LISTS_SET_INDEX_INPUT_TO = 'as';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "set" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_FROM = 'Sets the item at the specified position in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "set" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_FIRST = 'Sets the first item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "set" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_LAST = 'Sets the last item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "set" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_RANDOM = 'Sets a random item in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "insert" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_FROM = 'Inserts the item at the specified position in a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "insert" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_FIRST = 'Inserts the item at the start of a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "insert" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_LAST = 'Append the item to the end of a list.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-single-item} (even though the page describes the "get" block, the idea is the same for the "insert" block).
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_RANDOM = 'Inserts the item randomly in a list.';
/// url - Information describing extracting a sublist from an existing list.
Blockly.Msg.LISTS_GET_SUBLIST_HELPURL = 'https://github.com/google/blockly/wiki/Lists#getting-a-sublist';
Blockly.Msg.LISTS_GET_SUBLIST_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
/// dropdown - Indicates that an index relative to the front of the list should be used
/// to specify the beginning of the range from which to
/// [https://github.com/google/blockly/wiki/Lists#getting-a-sublist get a sublist].
/// [[File:Blockly-get-sublist.png]]
/// Note: If {{msg-Blockly|ORDINAL_NUMBER_SUFFIX}} is defined, it will
/// automatically appear ''after'' this number (and any other ordinal numbers on this block).
/// See [[Translating:Blockly#Ordinal_numbers]] for more information on ordinal numbers in Blockly.
Blockly.Msg.LISTS_GET_SUBLIST_START_FROM_START = 'get sub-list from #';
/// dropdown - Indicates that an index relative to the end of the list should be used
/// to specify the beginning of the range from which to
/// [https://github.com/google/blockly/wiki/Lists#getting-a-sublist get a sublist].
Blockly.Msg.LISTS_GET_SUBLIST_START_FROM_END = 'get sub-list from # from end';
/// dropdown - Indicates that the
/// [https://github.com/google/blockly/wiki/Lists#getting-a-sublist sublist to extract]
/// should begin with the list's first item.
Blockly.Msg.LISTS_GET_SUBLIST_START_FIRST = 'get sub-list from first';
/// dropdown - Indicates that an index relative to the front of the list should be
/// used to specify the end of the range from which to
/// [https://github.com/google/blockly/wiki/Lists#getting-a-sublist get a sublist].
/// [[File:Blockly-get-sublist.png]]
Blockly.Msg.LISTS_GET_SUBLIST_END_FROM_START = 'to #';
/// dropdown - Indicates that an index relative to the end of the list should be
/// used to specify the end of the range from which to
/// [https://github.com/google/blockly/wiki/Lists#getting-a-sublist get a sublist].
/// [[File:Blockly-get-sublist.png]]
Blockly.Msg.LISTS_GET_SUBLIST_END_FROM_END = 'to # from end';
/// dropdown - Indicates that the '''last''' item in the given list should be
/// [https://github.com/google/blockly/wiki/Lists#getting-a-sublist the end
/// of the selected sublist].
/// [[File:Blockly-get-sublist.png]]
Blockly.Msg.LISTS_GET_SUBLIST_END_LAST = 'to last';
/// block text - This appears in the rightmost position ("tail") of the
/// sublist block, as described at
/// [https://github.com/google/blockly/wiki/Lists#getting-a-sublist
/// https://github.com/google/blockly/wiki/Lists#getting-a-sublist].
/// In English and most other languages, this is the empty string.
/// [[File:Blockly-get-sublist.png]]
Blockly.Msg.LISTS_GET_SUBLIST_TAIL = '';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#getting-a-sublist
/// https://github.com/google/blockly/wiki/Lists#getting-a-sublist] for more information.
/// [[File:Blockly-get-sublist.png]]
Blockly.Msg.LISTS_GET_SUBLIST_TOOLTIP = 'Creates a copy of the specified portion of a list.';
/// {{optional}}\nurl - Information describing sorting a list.
Blockly.Msg.LISTS_SORT_HELPURL = 'https://github.com/google/blockly/wiki/Lists#sorting-a-list';
/// Sort as type %1 (numeric or alphabetic) in order %2 (ascending or descending) a list of items %3.\n{{Identical|Sort}}
Blockly.Msg.LISTS_SORT_TITLE = 'sort %1 %2 %3';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#sorting-a-list].
Blockly.Msg.LISTS_SORT_TOOLTIP = 'Sort a copy of a list.';
/// sorting order or direction from low to high value for numeric, or A-Z for alphabetic.\n{{Identical|Ascending}}
Blockly.Msg.LISTS_SORT_ORDER_ASCENDING = 'ascending';
/// sorting order or direction from high to low value for numeric, or Z-A for alphabetic.\n{{Identical|Descending}}
Blockly.Msg.LISTS_SORT_ORDER_DESCENDING = 'descending';
/// sort by treating each item as a number.
Blockly.Msg.LISTS_SORT_TYPE_NUMERIC = 'numeric';
/// sort by treating each item alphabetically, case-sensitive.
Blockly.Msg.LISTS_SORT_TYPE_TEXT = 'alphabetic';
/// sort by treating each item alphabetically, ignoring differences in case.
Blockly.Msg.LISTS_SORT_TYPE_IGNORECASE = 'alphabetic, ignore case';
/// url - Information describing splitting text into a list, or joining a list into text.
Blockly.Msg.LISTS_SPLIT_HELPURL = 'https://github.com/google/blockly/wiki/Lists#splitting-strings-and-joining-lists';
/// dropdown - Indicates that text will be split up into a list (e.g. "a-b-c" -> ["a", "b", "c"]).
Blockly.Msg.LISTS_SPLIT_LIST_FROM_TEXT = 'make list from text';
/// dropdown - Indicates that a list will be joined together to form text (e.g. ["a", "b", "c"] -> "a-b-c").
Blockly.Msg.LISTS_SPLIT_TEXT_FROM_LIST = 'make text from list';
/// block text - Prompts for a letter to be used as a separator when splitting or joining text.
Blockly.Msg.LISTS_SPLIT_WITH_DELIMITER = 'with delimiter';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#make-list-from-text
/// https://github.com/google/blockly/wiki/Lists#make-list-from-text] for more information.
Blockly.Msg.LISTS_SPLIT_TOOLTIP_SPLIT = 'Split text into a list of texts, breaking at each delimiter.';
/// tooltip - See [https://github.com/google/blockly/wiki/Lists#make-text-from-list
/// https://github.com/google/blockly/wiki/Lists#make-text-from-list] for more information.
Blockly.Msg.LISTS_SPLIT_TOOLTIP_JOIN = 'Join a list of texts into one text, separated by a delimiter.';
/// url - Information describing reversing a list.
Blockly.Msg.LISTS_REVERSE_HELPURL = 'https://github.com/google/blockly/wiki/Lists#reversing-a-list';
/// block text - Title of block that returns a copy of a list (%1) with the order of items reversed.
Blockly.Msg.LISTS_REVERSE_MESSAGE0 = 'reverse %1';
/// tooltip - Short description for a block that reverses a copy of a list.
Blockly.Msg.LISTS_REVERSE_TOOLTIP = 'Reverse a copy of a list.';
/// grammar - Text that follows an ordinal number (a number that indicates
/// position relative to other numbers). In most languages, such text appears
/// before the number, so this should be blank. An exception is Hungarian.
/// See [[Translating:Blockly#Ordinal_numbers]] for more information.
Blockly.Msg.ORDINAL_NUMBER_SUFFIX = '';
// Variables Blocks.
/// url - Information about ''variables'' in computer programming. Consider using your language's translation of [https://en.wikipedia.org/wiki/Variable_(computer_science) https://en.wikipedia.org/wiki/Variable_(computer_science)], if it exists.
Blockly.Msg.VARIABLES_GET_HELPURL = 'https://github.com/google/blockly/wiki/Variables#get';
/// tooltip - This gets the value of the named variable without modifying it.
Blockly.Msg.VARIABLES_GET_TOOLTIP = 'Returns the value of this variable.';
/// context menu - Selecting this creates a block to set (change) the value of this variable.
/// \n\nParameters:\n* %1 - the name of the variable.
Blockly.Msg.VARIABLES_GET_CREATE_SET = 'Create "set %1"';
/// url - Information about ''variables'' in computer programming. Consider using your language's translation of [https://en.wikipedia.org/wiki/Variable_(computer_science) https://en.wikipedia.org/wiki/Variable_(computer_science)], if it exists.
Blockly.Msg.VARIABLES_SET_HELPURL = 'https://github.com/google/blockly/wiki/Variables#set';
/// block text - Change the value of a mathematical variable: '''set [the value of] x to 7'''.\n\nParameters:\n* %1 - the name of the variable.\n* %2 - the value to be assigned.
Blockly.Msg.VARIABLES_SET = 'set %1 to %2';
/// tooltip - This initializes or changes the value of the named variable.
Blockly.Msg.VARIABLES_SET_TOOLTIP = 'Sets this variable to be equal to the input.';
/// context menu - Selecting this creates a block to get (change) the value of
/// this variable.\n\nParameters:\n* %1 - the name of the variable.
Blockly.Msg.VARIABLES_SET_CREATE_GET = 'Create "get %1"';
// Procedures Blocks.
/// url - Information about defining [https://en.wikipedia.org/wiki/Subroutine functions] that do not have return values.
Blockly.Msg.PROCEDURES_DEFNORETURN_HELPURL = 'https://en.wikipedia.org/wiki/Subroutine';
/// block text - This precedes the name of the function when defining it. See
/// [https://blockly-demo.appspot.com/static/apps/code/index.html?lang=en#c84aoc this sample
/// function definition].
Blockly.Msg.PROCEDURES_DEFNORETURN_TITLE = 'to';
/// default name - This acts as a placeholder for the name of a function on a
/// function definition block, as shown on
/// [https://blockly-demo.appspot.com/static/apps/code/index.html?lang=en#w7cfju this block].
/// The user will replace it with the function's name.
Blockly.Msg.PROCEDURES_DEFNORETURN_PROCEDURE = 'do something';
/// block text - This precedes the list of parameters on a function's defiition block. See
/// [https://blockly-demo.appspot.com/static/apps/code/index.html?lang=en#voztpd this sample
/// function with parameters].
Blockly.Msg.PROCEDURES_BEFORE_PARAMS = 'with:';
/// block text - This precedes the list of parameters on a function's caller block. See
/// [https://blockly-demo.appspot.com/static/apps/code/index.html?lang=en#voztpd this sample
/// function with parameters].
Blockly.Msg.PROCEDURES_CALL_BEFORE_PARAMS = 'with:';
/// block text - This appears next to the function's "body", the blocks that should be
/// run when the function is called, as shown in
/// [https://blockly-demo.appspot.com/static/apps/code/index.html?lang=en#voztpd this sample
/// function definition].
Blockly.Msg.PROCEDURES_DEFNORETURN_DO = '';
/// tooltip
Blockly.Msg.PROCEDURES_DEFNORETURN_TOOLTIP = 'Creates a function with no output.';
/// Placeholder text that the user is encouraged to replace with a description of what their function does.
Blockly.Msg.PROCEDURES_DEFNORETURN_COMMENT = 'Describe this function...';
/// url - Information about defining [https://en.wikipedia.org/wiki/Subroutine functions] that have return values.
Blockly.Msg.PROCEDURES_DEFRETURN_HELPURL = 'https://en.wikipedia.org/wiki/Subroutine';
Blockly.Msg.PROCEDURES_DEFRETURN_TITLE = Blockly.Msg.PROCEDURES_DEFNORETURN_TITLE;
Blockly.Msg.PROCEDURES_DEFRETURN_PROCEDURE = Blockly.Msg.PROCEDURES_DEFNORETURN_PROCEDURE;
Blockly.Msg.PROCEDURES_DEFRETURN_DO = Blockly.Msg.PROCEDURES_DEFNORETURN_DO;
Blockly.Msg.PROCEDURES_DEFRETURN_COMMENT = Blockly.Msg.PROCEDURES_DEFNORETURN_COMMENT;
/// block text - This imperative or infinite verb precedes the value that is used as the return value
/// (output) of this function. See
/// [https://blockly-demo.appspot.com/static/apps/code/index.html?lang=en#6ot5y5 this sample
/// function that returns a value].
Blockly.Msg.PROCEDURES_DEFRETURN_RETURN = 'return';
/// tooltip
Blockly.Msg.PROCEDURES_DEFRETURN_TOOLTIP = 'Creates a function with an output.';
/// Label for a checkbox that controls if statements are allowed in a function.
Blockly.Msg.PROCEDURES_ALLOW_STATEMENTS = 'allow statements';
/// alert - The user has created a function with two parameters that have the same name. Every parameter must have a different name.
Blockly.Msg.PROCEDURES_DEF_DUPLICATE_WARNING = 'Warning: This function has duplicate parameters.';
/// url - Information about calling [https://en.wikipedia.org/wiki/Subroutine functions] that do not return values.
Blockly.Msg.PROCEDURES_CALLNORETURN_HELPURL = 'https://en.wikipedia.org/wiki/Subroutine';
/// tooltip - This block causes the body (blocks inside) of the named function definition to be run.
Blockly.Msg.PROCEDURES_CALLNORETURN_TOOLTIP = 'Run the user-defined function "%1".';
/// url - Information about calling [https://en.wikipedia.org/wiki/Subroutine functions] that return values.
Blockly.Msg.PROCEDURES_CALLRETURN_HELPURL = 'https://en.wikipedia.org/wiki/Subroutine';
/// tooltip - This block causes the body (blocks inside) of the named function definition to be run.\n\nParameters:\n* %1 - the name of the function.
Blockly.Msg.PROCEDURES_CALLRETURN_TOOLTIP = 'Run the user-defined function "%1" and use its output.';
/// block text - This text appears on a block in a window that appears when the user clicks
/// on the plus sign or star on a function definition block. It refers to the set of parameters
/// (referred to by the simpler term "inputs") to the function. See
/// [[Translating:Blockly#function_definitions]].
Blockly.Msg.PROCEDURES_MUTATORCONTAINER_TITLE = 'inputs';
/// tooltip
Blockly.Msg.PROCEDURES_MUTATORCONTAINER_TOOLTIP = 'Add, remove, or reorder inputs to this function.';
/// block text - This text appears on a block in a window that appears when the user clicks
/// on the plus sign or star on a function definition block]. It appears on the block for
/// adding an individual parameter (referred to by the simpler term "inputs") to the function.
/// See [[Translating:Blockly#function_definitions]].
Blockly.Msg.PROCEDURES_MUTATORARG_TITLE = 'input name:';
/// tooltip
Blockly.Msg.PROCEDURES_MUTATORARG_TOOLTIP = 'Add an input to the function.';
/// context menu - This appears on the context menu for function calls. Selecting
/// it causes the corresponding function definition to be highlighted (as shown at
/// [[Translating:Blockly#context_menus]].
Blockly.Msg.PROCEDURES_HIGHLIGHT_DEF = 'Highlight function definition';
/// context menu - This appears on the context menu for function definitions.
/// Selecting it creates a block to call the function.\n\nParameters:\n* %1 - the name of the function.\n{{Identical|Create}}
Blockly.Msg.PROCEDURES_CREATE_DO = 'Create "%1"';
/// tooltip - If the first value is true, this causes the second value to be returned
/// immediately from the enclosing function.
Blockly.Msg.PROCEDURES_IFRETURN_TOOLTIP = 'If a value is true, then return a second value.';
/// {{optional}}\nurl - Information about guard clauses.
Blockly.Msg.PROCEDURES_IFRETURN_HELPURL = 'http://c2.com/cgi/wiki?GuardClause';
/// warning - This appears if the user tries to use this block outside of a function definition.
Blockly.Msg.PROCEDURES_IFRETURN_WARNING = 'Warning: This block may be used only within a function definition.';
| 1 | 8,883 | Let's call this "NEW_BROADCAST_MESSAGE". Msg.NEW_MESSAGE will be confusing down the line. | LLK-scratch-blocks | js |
@@ -128,11 +128,17 @@ type Container struct {
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe ContainerStatus `json:"KnownStatus"`
+ // TransitionDependenciesMap is a map of the dependent container status to other
+ // dependencies that must be satisfied in order for this container to transition.
+ TransitionDependenciesMap map[ContainerStatus]TransitionDependencySet
+
// TransitionDependencySet is a set of dependencies that must be satisfied
// in order for this container to transition. Each transition dependency
// specifies a resource upon which the transition is dependent, a status
// that depends on the resource, and the state of the dependency that
// satisfies.
+ // Deprecated: Use TransitionDependenciesMap instead. TransitionDependencySet is
+ // retained for compatibility with old state files.
TransitionDependencySet TransitionDependencySet `json:"TransitionDependencySet"`
// SteadyStateDependencies is a list of containers that must be in "steady state" before | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import (
"fmt"
"strconv"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/aws-sdk-go/aws"
)
const (
// defaultContainerSteadyStateStatus defines the container status at
// which the container is assumed to be in steady state. It is set
// to 'ContainerRunning' unless overridden
defaultContainerSteadyStateStatus = ContainerRunning
// awslogsAuthExecutionRole is the string value passed in the task payload
// that specifies that the log driver should be authenticated using the
// execution role
awslogsAuthExecutionRole = "ExecutionRole"
// dockerHealthCheckType is the type of container health check provided by docker
dockerHealthCheckType = "docker"
)
// DockerConfig represents additional metadata about a container to run. It's
// remodeled from the `ecsacs` api model file. Eventually it should not exist
// once this remodeling is refactored out.
type DockerConfig struct {
// Config is the configuration used to create container
Config *string `json:"config"`
// HostConfig is the configuration of container related to host resource
HostConfig *string `json:"hostConfig"`
// Version specifies the docker client API version to use
Version *string `json:"version"`
}
// HealthStatus contains the health check result returned by docker
type HealthStatus struct {
// Status is the container health status
Status ContainerHealthStatus `json:"status,omitempty"`
// Since is the timestamp when container health status changed
Since *time.Time `json:"statusSince,omitempty"`
// ExitCode is the exitcode of health check if failed
ExitCode int `json:"exitCode,omitempty"`
// Output is the output of health check
Output string `json:"output,omitempty"`
}
// Container is the internal representation of a container in the ECS agent
type Container struct {
// Name is the name of the container specified in the task definition
Name string
// Image is the image name specified in the task definition
Image string
// ImageID is the local ID of the image used in the container
ImageID string
// Command is the command to run in the container which is specified in the task definition
Command []string
// CPU is the cpu limitation of the container which is specified in the task definition
CPU uint `json:"Cpu"`
// Memory is the memory limitation of the container which is specified in the task definition
Memory uint
// Links contains a list of containers to link, corresponding to docker option: --link
Links []string
// VolumesFrom contains a list of container's volume to use, corresponding to docker option: --volumes-from
VolumesFrom []VolumeFrom `json:"volumesFrom"`
// MountPoints contains a list of volume mount paths
MountPoints []MountPoint `json:"mountPoints"`
// Ports contains a list of ports binding configuration
Ports []PortBinding `json:"portMappings"`
// Essential denotes whether the container is essential or not
Essential bool
// EntryPoint is entrypoint of the container, corresponding to docker option: --entrypoint
EntryPoint *[]string
// Environment is the environment variable set in the container
Environment map[string]string `json:"environment"`
// Overrides contains the configuration to override of a container
Overrides ContainerOverrides `json:"overrides"`
// DockerConfig is the configuration used to create the container
DockerConfig DockerConfig `json:"dockerConfig"`
// RegistryAuthentication is the auth data used to pull image
RegistryAuthentication *RegistryAuthenticationData `json:"registryAuthentication"`
// HealthCheckType is the mechnism to use for the container health check
// currently it only supports 'DOCKER'
HealthCheckType string `json:"healthCheckType,omitempty"`
// Health contains the health check information of container health check
Health HealthStatus `json:"-"`
// LogsAuthStrategy specifies how the logs driver for the container will be
// authenticated
LogsAuthStrategy string
// lock is used for fields that are accessed and updated concurrently
lock sync.RWMutex
// DesiredStatusUnsafe represents the state where the container should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler,
// though the agent may also set the DesiredStatusUnsafe if a different "essential"
// container in the task exits. The DesiredStatus is almost always either
// ContainerRunning or ContainerStopped.
// NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `GetDesiredStatus`
// and `SetDesiredStatus`.
// TODO DesiredStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
DesiredStatusUnsafe ContainerStatus `json:"desiredStatus"`
// KnownStatusUnsafe represents the state where the container is.
// NOTE: Do not access `KnownStatusUnsafe` directly. Instead, use `GetKnownStatus`
// and `SetKnownStatus`.
// TODO KnownStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe ContainerStatus `json:"KnownStatus"`
// TransitionDependencySet is a set of dependencies that must be satisfied
// in order for this container to transition. Each transition dependency
// specifies a resource upon which the transition is dependent, a status
// that depends on the resource, and the state of the dependency that
// satisfies.
TransitionDependencySet TransitionDependencySet `json:"TransitionDependencySet"`
// SteadyStateDependencies is a list of containers that must be in "steady state" before
// this one is created
// Note: Current logic requires that the containers specified here are run
// before this container can even be pulled.
//
// Deprecated: Use TransitionDependencySet instead. SteadyStateDependencies is retained for compatibility with old
// state files.
SteadyStateDependencies []string `json:"RunDependencies"`
// Type specifies the container type. Except the 'Normal' type, all other types
// are not directly specified by task definitions, but created by the agent. The
// JSON tag is retained as this field's previous name 'IsInternal' for maintaining
// backwards compatibility. Please see JSON parsing hooks for this type for more
// details
Type ContainerType `json:"IsInternal"`
// AppliedStatus is the status that has been "applied" (e.g., we've called Pull,
// Create, Start, or Stop) but we don't yet know that the application was successful.
AppliedStatus ContainerStatus
// ApplyingError is an error that occurred trying to transition the container
// to its desired state. It is propagated to the backend in the form
// 'Name: ErrorString' as the 'reason' field.
ApplyingError *DefaultNamedError
// SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS
// SubmitContainerStateChange API.
// TODO SentStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON is
// handled properly so that the state storage continues to work.
SentStatusUnsafe ContainerStatus `json:"SentStatus"`
// MetadataFileUpdated is set to true when we have completed updating the
// metadata file
MetadataFileUpdated bool `json:"metadataFileUpdated"`
// KnownExitCodeUnsafe specifies the exit code for the container.
// It is exposed outside of the package so that it's marshalled/unmarshalled in
// the JSON body while saving the state.
// NOTE: Do not access KnownExitCodeUnsafe directly. Instead, use `GetKnownExitCode`
// and `SetKnownExitCode`.
KnownExitCodeUnsafe *int `json:"KnownExitCode"`
// KnownPortBindings is an array of port bindings for the container.
KnownPortBindings []PortBinding
// SteadyStateStatusUnsafe specifies the steady state status for the container
// If uninitialized, it's assumed to be set to 'ContainerRunning'. Even though
// it's not only supposed to be set when the container is being created, it's
// exposed outside of the package so that it's marshalled/unmarshalled in the
// the JSON body while saving the state
SteadyStateStatusUnsafe *ContainerStatus `json:"SteadyStateStatus,omitempty"`
createdAt time.Time
startedAt time.Time
finishedAt time.Time
labels map[string]string
}
// DockerContainer is a mapping between containers-as-docker-knows-them and
// containers-as-we-know-them.
// This is primarily used in DockerState, but lives here such that tasks and
// containers know how to convert themselves into Docker's desired config format
type DockerContainer struct {
DockerID string `json:"DockerId"`
DockerName string // needed for linking
Container *Container
}
// String returns a human readable string representation of DockerContainer
func (dc *DockerContainer) String() string {
if dc == nil {
return "nil"
}
return fmt.Sprintf("Id: %s, Name: %s, Container: %s", dc.DockerID, dc.DockerName, dc.Container.String())
}
// NewContainerWithSteadyState creates a new Container object with the specified
// steady state. Containers that need the non default steady state set will
// use this method instead of setting it directly
func NewContainerWithSteadyState(steadyState ContainerStatus) *Container {
steadyStateStatus := steadyState
return &Container{
SteadyStateStatusUnsafe: &steadyStateStatus,
}
}
// KnownTerminal returns true if the container's known status is STOPPED
func (c *Container) KnownTerminal() bool {
return c.GetKnownStatus().Terminal()
}
// DesiredTerminal returns true if the container's desired status is STOPPED
func (c *Container) DesiredTerminal() bool {
return c.GetDesiredStatus().Terminal()
}
// GetKnownStatus returns the known status of the container
func (c *Container) GetKnownStatus() ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownStatusUnsafe
}
// SetKnownStatus sets the known status of the container
func (c *Container) SetKnownStatus(status ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownStatusUnsafe = status
}
// GetDesiredStatus gets the desired status of the container
func (c *Container) GetDesiredStatus() ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.DesiredStatusUnsafe
}
// SetDesiredStatus sets the desired status of the container
func (c *Container) SetDesiredStatus(status ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.DesiredStatusUnsafe = status
}
// GetSentStatus safely returns the SentStatusUnsafe of the container
func (c *Container) GetSentStatus() ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.SentStatusUnsafe
}
// SetSentStatus safely sets the SentStatusUnsafe of the container
func (c *Container) SetSentStatus(status ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.SentStatusUnsafe = status
}
// SetKnownExitCode sets exit code field in container struct
func (c *Container) SetKnownExitCode(i *int) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownExitCodeUnsafe = i
}
// GetKnownExitCode returns the container exit code
func (c *Container) GetKnownExitCode() *int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownExitCodeUnsafe
}
// SetRegistryAuthCredentials sets the credentials for pulling image from ECR
func (c *Container) SetRegistryAuthCredentials(credential credentials.IAMRoleCredentials) {
c.lock.Lock()
defer c.lock.Unlock()
c.RegistryAuthentication.ECRAuthData.SetPullCredentials(credential)
}
// ShouldPullWithExecutionRole returns whether this container has its own ECR credentials
func (c *Container) ShouldPullWithExecutionRole() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.RegistryAuthentication != nil &&
c.RegistryAuthentication.Type == "ecr" &&
c.RegistryAuthentication.ECRAuthData != nil &&
c.RegistryAuthentication.ECRAuthData.UseExecutionRole
}
// String returns a human readable string representation of this object
func (c *Container) String() string {
ret := fmt.Sprintf("%s(%s) (%s->%s)", c.Name, c.Image,
c.GetKnownStatus().String(), c.GetDesiredStatus().String())
if c.GetKnownExitCode() != nil {
ret += " - Exit: " + strconv.Itoa(*c.GetKnownExitCode())
}
return ret
}
// GetSteadyStateStatus returns the steady state status for the container. If
// Container.steadyState is not initialized, the default steady state status
// defined by `defaultContainerSteadyStateStatus` is returned. The 'pause'
// container's steady state differs from that of other containers, as the
// 'pause' container can reach its teady state once networking resources
// have been provisioned for it, which is done in the `ContainerResourcesProvisioned`
// state
func (c *Container) GetSteadyStateStatus() ContainerStatus {
if c.SteadyStateStatusUnsafe == nil {
return defaultContainerSteadyStateStatus
}
return *c.SteadyStateStatusUnsafe
}
// IsKnownSteadyState returns true if the `KnownState` of the container equals
// the `steadyState` defined for the container
func (c *Container) IsKnownSteadyState() bool {
knownStatus := c.GetKnownStatus()
return knownStatus == c.GetSteadyStateStatus()
}
// GetNextKnownStateProgression returns the state that the container should
// progress to based on its `KnownState`. The progression is
// incremental until the container reaches its steady state. From then on,
// it transitions to `ContainerStopped`.
//
// For example:
// a. if the steady state of the container is defined as `ContainerRunning`,
// the progression is:
// Container: None -> Pulled -> Created -> Running* -> Stopped -> Zombie
//
// b. if the steady state of the container is defined as `ContainerResourcesProvisioned`,
// the progression is:
// Container: None -> Pulled -> Created -> Running -> Provisioned* -> Stopped -> Zombie
//
// c. if the steady state of the container is defined as `ContainerCreated`,
// the progression is:
// Container: None -> Pulled -> Created* -> Stopped -> Zombie
func (c *Container) GetNextKnownStateProgression() ContainerStatus {
if c.IsKnownSteadyState() {
return ContainerStopped
}
return c.GetKnownStatus() + 1
}
// IsInternal returns true if the container type is either `ContainerEmptyHostVolume`
// or `ContainerCNIPause`. It returns false otherwise
func (c *Container) IsInternal() bool {
if c.Type == ContainerNormal {
return false
}
return true
}
// IsRunning returns true if the container's known status is either RUNNING
// or RESOURCES_PROVISIONED. It returns false otherwise
func (c *Container) IsRunning() bool {
return c.GetKnownStatus().IsRunning()
}
// IsMetadataFileUpdated returns true if the metadata file has been once the
// metadata file is ready and will no longer change
func (c *Container) IsMetadataFileUpdated() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.MetadataFileUpdated
}
// SetMetadataFileUpdated sets the container's MetadataFileUpdated status to true
func (c *Container) SetMetadataFileUpdated() {
c.lock.Lock()
defer c.lock.Unlock()
c.MetadataFileUpdated = true
}
// IsEssential returns whether the container is an essential container or not
func (c *Container) IsEssential() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.Essential
}
// AWSLogAuthExecutionRole returns true if the auth is by execution role
func (c *Container) AWSLogAuthExecutionRole() bool {
return c.LogsAuthStrategy == awslogsAuthExecutionRole
}
// SetCreatedAt sets the timestamp for container's creation time
func (c *Container) SetCreatedAt(createdAt time.Time) {
if createdAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.createdAt = createdAt
}
// SetStartedAt sets the timestamp for container's start time
func (c *Container) SetStartedAt(startedAt time.Time) {
if startedAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.startedAt = startedAt
}
// SetFinishedAt sets the timestamp for container's stopped time
func (c *Container) SetFinishedAt(finishedAt time.Time) {
if finishedAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.finishedAt = finishedAt
}
// GetCreatedAt sets the timestamp for container's creation time
func (c *Container) GetCreatedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.createdAt
}
// GetStartedAt sets the timestamp for container's start time
func (c *Container) GetStartedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.startedAt
}
// GetFinishedAt sets the timestamp for container's stopped time
func (c *Container) GetFinishedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.finishedAt
}
// SetLabels sets the labels for a container
func (c *Container) SetLabels(labels map[string]string) {
c.lock.Lock()
defer c.lock.Unlock()
c.labels = labels
}
// GetLabels gets the labels for a container
func (c *Container) GetLabels() map[string]string {
c.lock.RLock()
defer c.lock.RUnlock()
return c.labels
}
// HealthStatusShouldBeReported returns true if the health check is defined in
// the task definition
func (c *Container) HealthStatusShouldBeReported() bool {
return c.HealthCheckType == dockerHealthCheckType
}
// SetHealthStatus sets the container health status
func (c *Container) SetHealthStatus(health HealthStatus) {
c.lock.Lock()
defer c.lock.Unlock()
if c.Health.Status == health.Status {
return
}
c.Health.Status = health.Status
c.Health.Since = aws.Time(time.Now())
c.Health.Output = health.Output
// Set the health exit code if the health check failed
if c.Health.Status == ContainerUnhealthy {
c.Health.ExitCode = health.ExitCode
}
}
// GetHealthStatus returns the container health information
func (c *Container) GetHealthStatus() HealthStatus {
c.lock.RLock()
defer c.lock.RUnlock()
// Copy the pointer to avoid race condition
copyHealth := c.Health
if c.Health.Since != nil {
copyHealth.Since = aws.Time(aws.TimeValue(c.Health.Since))
}
return copyHealth
}
| 1 | 19,147 | A better way to do this (in your future PR) would be to: 1. get rid of `TransitionDependencySet` altogether 2. tag `TransitionDependenciesMap` as `"TransitionDependencySet"` in JSON 3. update the JSON marshaler to deal with `"TransitionDependencySet"` tag. If it's a map, no big deal. Else, construct the `TransitionDependenciesMap` map. Doing that would mean that we don't need to have duplicate code, names for this. | aws-amazon-ecs-agent | go |
@@ -81,10 +81,10 @@ def file_list_to_folder(df: pd.DataFrame, limit: int, offset: int) -> dict:
axis=1
).to_dict(orient='records')
objects = folder[~folder.logical_key.str.contains('/')].to_dict(orient='records')
- returned_results = len(prefixes) + len(objects)
- except AttributeError as err:
- # Pandas will raise an attribute error if the DataFrame has
- # no rows with a non-null logical_key. We expect that case if
+
+ except (AttributeError, KeyError) as err:
+ # Pandas will raise an AttributeError or KeyError if the DataFrame
+ # has no rows with a non-null logical_key. We expect that case if
# either: (1) the package is empty (has zero package entries)
# or, (2) zero package entries match the prefix filter. The
# choice to allow this to raise the exception instead of | 1 | """
Provide a virtual-file-system view of a package's logical keys.
"""
import json
import os
import boto3
import botocore
import pandas as pd
from t4_lambda_shared.decorator import api, validate
from t4_lambda_shared.utils import (
get_default_origins,
make_json_response,
query_manifest_content,
sql_escape,
)
SCHEMA = {
'type': 'object',
'properties': {
'bucket': {
'type': 'string'
},
'manifest': {
'type': 'string'
},
'access_key': {
'type': 'string'
},
'secret_key': {
'type': 'string'
},
'session_token': {
'type': 'string'
},
'prefix': {
'type': 'string'
},
'logical_key': {
'type': 'string'
},
'offset': {
'type': 'integer'
},
'limit': {
'type': 'integer'
}
},
'required': ['bucket', 'manifest'],
'additionalProperties': False
}
def file_list_to_folder(df: pd.DataFrame, limit: int, offset: int) -> dict:
"""
Post process a set of logical keys to return only the
top-level folder view (a special case of the s3-select
lambda).
"""
try:
groups = df.groupby(df.logical_key.str.extract('([^/]+/?).*')[0], dropna=True)
folder = groups.agg(
size=('size', 'sum'),
physical_key=('physical_key', 'first')
)
folder.reset_index(inplace=True) # move the logical_key from the index to column[0]
folder.rename(columns={0: 'logical_key'}, inplace=True) # name the new column
# Sort to ensure consistent paging
folder.sort_values(by=['logical_key'], inplace=True)
# Page response (folders and files) based on limit & offset
total_results = len(folder.index)
folder = folder.iloc[offset:offset+limit]
# Do not return physical_key for prefixes
prefixes = folder[folder.logical_key.str.contains('/')].drop(
['physical_key'],
axis=1
).to_dict(orient='records')
objects = folder[~folder.logical_key.str.contains('/')].to_dict(orient='records')
returned_results = len(prefixes) + len(objects)
except AttributeError as err:
# Pandas will raise an attribute error if the DataFrame has
# no rows with a non-null logical_key. We expect that case if
# either: (1) the package is empty (has zero package entries)
# or, (2) zero package entries match the prefix filter. The
# choice to allow this to raise the exception instead of
# testing for the empty case ahead of time optimizes the
# case where the result set is large.
prefixes = []
objects = []
return dict(
total=total_results,
returned=returned_results,
prefixes=prefixes,
objects=objects
)
def create_s3_client(
*,
aws_access_key_id: str,
aws_secret_access_key: str,
aws_session_token: str
):
"""
Create an S3 Client using caller-provided credentials.
"""
assert aws_access_key_id and aws_secret_access_key and aws_session_token
session = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token
)
return session.client('s3')
@api(cors_origins=get_default_origins())
@validate(SCHEMA)
def lambda_handler(request):
"""
Parse a manifest to return a folder-like view of its contents (logical keys).
Returns:
JSON response
"""
bucket = request.args['bucket']
key = request.args['manifest']
prefix = request.args.get('prefix')
logical_key = request.args.get('logical_key')
access_key = request.args.get('access_key')
secret_key = request.args.get('secret_key')
session_token = request.args.get('session_token')
limit = request.args.get('limit', 1000)
offset = request.args.get('offset', 0)
allow_anonymous_access = bool(os.getenv('ALLOW_ANONYMOUS_ACCESS'))
# If credentials are passed in, use them
# for the client. If no credentials are supplied, test that
# the manifest object is publicly accessible. If so, create
# an s3 client using the underlying IAM role's permissions.
if access_key and secret_key and session_token:
s3_client = create_s3_client(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token
)
elif (
allow_anonymous_access and
access_key is None and
secret_key is None and
session_token is None
):
# Test to see if the target key is publicly accessible. If not, the call
# below will raise and exception and return a 403 response
anons3 = boto3.client(
's3',
config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
try:
anons3.head_object(Bucket=bucket, Key=key)
except botocore.exceptions.ClientError as error:
if error.response.get('Error'):
code = error.response['Error']['Code']
if code == '403':
return make_json_response(
403,
{
'title': 'Access Denied',
'detail': f"Access denied reading manifest: {key}"
}
)
raise error
# Use the default S3 client configuration
s3_client = boto3.client('s3')
else:
return make_json_response(
401,
{
'title': 'Incomplete credentials',
'detail': "access_key, secret_key and session_token are required"
}
)
assert s3_client
# Get details of a single file in the package
if logical_key is not None:
sql_stmt = f"SELECT s.* FROM s3object s WHERE s.logical_key = '{sql_escape(logical_key)}' LIMIT 1"
response_data = json.load(query_manifest_content(
s3_client,
bucket=bucket,
key=key,
sql_stmt=sql_stmt
))
else:
# Call s3 select to fetch only logical keys matching the
# desired prefix (folder path)
prefix_length = len(prefix) if prefix is not None else 0
sql_stmt = (
f"SELECT SUBSTRING(s.logical_key, {prefix_length + 1}) AS logical_key"
", s.\"size\", s.physical_keys[0] as physical_key FROM s3object s"
)
if prefix:
sql_stmt += f" WHERE SUBSTRING(s.logical_key, 1, {prefix_length}) = '{sql_escape(prefix)}'"
result = query_manifest_content(
s3_client,
bucket=bucket,
key=key,
sql_stmt=sql_stmt
)
# Parse the response into a logical folder view
if result is not None:
df = pd.read_json(
result,
lines=True,
dtype=dict(
logical_key='string',
physical_key='string'
)
)
else:
df = pd.DataFrame()
response_data = file_list_to_folder(df, limit, offset)
# Fetch package-level or directory-level metadata
if prefix:
sql_stmt = f"SELECT s.meta FROM s3object s WHERE s.logical_key = '{sql_escape(prefix)}'"
else:
sql_stmt = "SELECT s.* FROM s3object s WHERE s.logical_key is NULL"
result = query_manifest_content(
s3_client,
bucket=bucket,
key=key,
sql_stmt=sql_stmt
)
meta = json.load(result) if result else {}
response_data.update(dict(meta=meta))
ret_val = make_json_response(
200,
{
'contents': response_data
}
)
return ret_val
| 1 | 20,707 | It's very error-prone to have so large `try` clause with such non-specific exceptions handled. Consider rewriting it with either of these: 1. Check that expected columns are present in `DataFrame` (`{'physical_key', 'logical_key', 'size'}.issubset(df.columns)`). 2. Normalizing output of query by using `COALESCE()` (or normalizing `DataFrame` itself?). Also it looks like a row with package metadata isn't used here, so might make sense to filter out it with `WHERE logical_key IS NOT MISSING`. | quiltdata-quilt | py |
@@ -116,6 +116,7 @@ var (
MirrorQueueLength int
PullRequestQueueLength int
PreferredLicenses []string
+ DisableHttpGit bool
// Repository editor settings
Editor struct { | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package setting
import (
"fmt"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/Unknwon/com"
_ "github.com/go-macaron/cache/memcache"
_ "github.com/go-macaron/cache/redis"
"github.com/go-macaron/session"
_ "github.com/go-macaron/session/redis"
"github.com/strk/go-libravatar"
"gopkg.in/ini.v1"
"github.com/gogits/gogs/modules/bindata"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/user"
)
type Scheme string
const (
HTTP Scheme = "http"
HTTPS Scheme = "https"
FCGI Scheme = "fcgi"
UNIX_SOCKET Scheme = "unix"
)
type LandingPage string
const (
LANDING_PAGE_HOME LandingPage = "/"
LANDING_PAGE_EXPLORE LandingPage = "/explore"
)
var (
// Build information should only be set by -ldflags.
BuildTime string
BuildGitHash string
// App settings
AppVer string
AppName string
AppUrl string
AppSubUrl string
AppSubUrlDepth int // Number of slashes
AppPath string
AppDataPath string
// Server settings
Protocol Scheme
Domain string
HTTPAddr, HTTPPort string
LocalURL string
OfflineMode bool
DisableRouterLog bool
CertFile, KeyFile string
StaticRootPath string
EnableGzip bool
LandingPageURL LandingPage
UnixSocketPermission uint32
SSH struct {
Disabled bool `ini:"DISABLE_SSH"`
StartBuiltinServer bool `ini:"START_SSH_SERVER"`
Domain string `ini:"SSH_DOMAIN"`
Port int `ini:"SSH_PORT"`
ListenPort int `ini:"SSH_LISTEN_PORT"`
RootPath string `ini:"SSH_ROOT_PATH"`
KeyTestPath string `ini:"SSH_KEY_TEST_PATH"`
KeygenPath string `ini:"SSH_KEYGEN_PATH"`
MinimumKeySizeCheck bool `ini:"-"`
MinimumKeySizes map[string]int `ini:"-"`
}
// Security settings
InstallLock bool
SecretKey string
LogInRememberDays int
CookieUserName string
CookieRememberName string
ReverseProxyAuthUser string
// Database settings
UseSQLite3 bool
UseMySQL bool
UsePostgreSQL bool
UseTiDB bool
// Webhook settings
Webhook struct {
QueueLength int
DeliverTimeout int
SkipTLSVerify bool
Types []string
PagingNum int
}
// Repository settings
Repository struct {
AnsiCharset string
ForcePrivate bool
MaxCreationLimit int
MirrorQueueLength int
PullRequestQueueLength int
PreferredLicenses []string
// Repository editor settings
Editor struct {
LineWrapExtensions []string
PreviewableFileModes []string
} `ini:"-"`
// Repository upload settings
Upload struct {
Enabled bool
TempPath string
AllowedTypes []string `delim:"|"`
FileMaxSize int64
MaxFiles int
} `ini:"-"`
}
RepoRootPath string
ScriptType string
// UI settings
UI struct {
ExplorePagingNum int
IssuePagingNum int
FeedMaxCommitNum int
ThemeColorMetaTag string
MaxDisplayFileSize int64
Admin struct {
UserPagingNum int
RepoPagingNum int
NoticePagingNum int
OrgPagingNum int
} `ini:"ui.admin"`
User struct {
RepoPagingNum int
} `ini:"ui.user"`
}
// Markdown sttings
Markdown struct {
EnableHardLineBreak bool
CustomURLSchemes []string `ini:"CUSTOM_URL_SCHEMES"`
FileExtensions []string
}
// Picture settings
AvatarUploadPath string
GravatarSource string
DisableGravatar bool
EnableFederatedAvatar bool
LibravatarService *libravatar.Libravatar
// Log settings
LogRootPath string
LogModes []string
LogConfigs []string
// Attachment settings
AttachmentPath string
AttachmentAllowedTypes string
AttachmentMaxSize int64
AttachmentMaxFiles int
AttachmentEnabled bool
// Time settings
TimeFormat string
// Cache settings
CacheAdapter string
CacheInterval int
CacheConn string
// Session settings
SessionConfig session.Options
CSRFCookieName = "_csrf"
// Cron tasks
Cron struct {
UpdateMirror struct {
Enabled bool
RunAtStart bool
Schedule string
} `ini:"cron.update_mirrors"`
RepoHealthCheck struct {
Enabled bool
RunAtStart bool
Schedule string
Timeout time.Duration
Args []string `delim:" "`
} `ini:"cron.repo_health_check"`
CheckRepoStats struct {
Enabled bool
RunAtStart bool
Schedule string
} `ini:"cron.check_repo_stats"`
}
// Git settings
Git struct {
DisableDiffHighlight bool
MaxGitDiffLines int
MaxGitDiffLineCharacters int
MaxGitDiffFiles int
GCArgs []string `delim:" "`
Timeout struct {
Migrate int
Mirror int
Clone int
Pull int
GC int `ini:"GC"`
} `ini:"git.timeout"`
}
// Mirror settings
Mirror struct {
DefaultInterval int
}
// API settings
API struct {
MaxResponseItems int
}
// I18n settings
Langs, Names []string
dateLangs map[string]string
// Highlight settings are loaded in modules/template/hightlight.go
// Other settings
ShowFooterBranding bool
ShowFooterVersion bool
ShowFooterTemplateLoadTime bool
SupportMiniWinService bool
// Global setting objects
Cfg *ini.File
CustomPath string // Custom directory path
CustomConf string
ProdMode bool
RunUser string
IsWindows bool
HasRobotsTxt bool
)
// DateLang transforms standard language locale name to corresponding value in datetime plugin.
func DateLang(lang string) string {
name, ok := dateLangs[lang]
if ok {
return name
}
return "en"
}
// execPath returns the executable path.
func execPath() (string, error) {
file, err := exec.LookPath(os.Args[0])
if err != nil {
return "", err
}
return filepath.Abs(file)
}
func init() {
IsWindows = runtime.GOOS == "windows"
log.NewLogger(0, "console", `{"level": 0}`)
var err error
if AppPath, err = execPath(); err != nil {
log.Fatal(4, "fail to get app path: %v\n", err)
}
// Note: we don't use path.Dir here because it does not handle case
// which path starts with two "/" in Windows: "//psf/Home/..."
AppPath = strings.Replace(AppPath, "\\", "/", -1)
}
// WorkDir returns absolute path of work directory.
func WorkDir() (string, error) {
wd := os.Getenv("GOGS_WORK_DIR")
if len(wd) > 0 {
return wd, nil
}
i := strings.LastIndex(AppPath, "/")
if i == -1 {
return AppPath, nil
}
return AppPath[:i], nil
}
func forcePathSeparator(path string) {
if strings.Contains(path, "\\") {
log.Fatal(4, "Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
}
}
// IsRunUserMatchCurrentUser returns false if configured run user does not match
// actual user that runs the app. The first return value is the actual user name.
// This check is ignored under Windows since SSH remote login is not the main
// method to login on Windows.
func IsRunUserMatchCurrentUser(runUser string) (string, bool) {
if IsWindows {
return "", true
}
currentUser := user.CurrentUsername()
return currentUser, runUser == currentUser
}
// NewContext initializes configuration context.
// NOTE: do not print any log except error.
func NewContext() {
workDir, err := WorkDir()
if err != nil {
log.Fatal(4, "Fail to get work directory: %v", err)
}
Cfg, err = ini.Load(bindata.MustAsset("conf/app.ini"))
if err != nil {
log.Fatal(4, "Fail to parse 'conf/app.ini': %v", err)
}
CustomPath = os.Getenv("GOGS_CUSTOM")
if len(CustomPath) == 0 {
CustomPath = workDir + "/custom"
}
if len(CustomConf) == 0 {
CustomConf = CustomPath + "/conf/app.ini"
}
if com.IsFile(CustomConf) {
if err = Cfg.Append(CustomConf); err != nil {
log.Fatal(4, "Fail to load custom conf '%s': %v", CustomConf, err)
}
} else {
log.Warn("Custom config '%s' not found, ignore this if you're running first time", CustomConf)
}
Cfg.NameMapper = ini.AllCapsUnderscore
homeDir, err := com.HomeDir()
if err != nil {
log.Fatal(4, "Fail to get home directory: %v", err)
}
homeDir = strings.Replace(homeDir, "\\", "/", -1)
LogRootPath = Cfg.Section("log").Key("ROOT_PATH").MustString(path.Join(workDir, "log"))
forcePathSeparator(LogRootPath)
sec := Cfg.Section("server")
AppName = Cfg.Section("").Key("APP_NAME").MustString("Gogs: Go Git Service")
AppUrl = sec.Key("ROOT_URL").MustString("http://localhost:3000/")
if AppUrl[len(AppUrl)-1] != '/' {
AppUrl += "/"
}
// Check if has app suburl.
url, err := url.Parse(AppUrl)
if err != nil {
log.Fatal(4, "Invalid ROOT_URL '%s': %s", AppUrl, err)
}
// Suburl should start with '/' and end without '/', such as '/{subpath}'.
// This value is empty if site does not have sub-url.
AppSubUrl = strings.TrimSuffix(url.Path, "/")
AppSubUrlDepth = strings.Count(AppSubUrl, "/")
Protocol = HTTP
if sec.Key("PROTOCOL").String() == "https" {
Protocol = HTTPS
CertFile = sec.Key("CERT_FILE").String()
KeyFile = sec.Key("KEY_FILE").String()
} else if sec.Key("PROTOCOL").String() == "fcgi" {
Protocol = FCGI
} else if sec.Key("PROTOCOL").String() == "unix" {
Protocol = UNIX_SOCKET
UnixSocketPermissionRaw := sec.Key("UNIX_SOCKET_PERMISSION").MustString("666")
UnixSocketPermissionParsed, err := strconv.ParseUint(UnixSocketPermissionRaw, 8, 32)
if err != nil || UnixSocketPermissionParsed > 0777 {
log.Fatal(4, "Fail to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
}
UnixSocketPermission = uint32(UnixSocketPermissionParsed)
}
Domain = sec.Key("DOMAIN").MustString("localhost")
HTTPAddr = sec.Key("HTTP_ADDR").MustString("0.0.0.0")
HTTPPort = sec.Key("HTTP_PORT").MustString("3000")
LocalURL = sec.Key("LOCAL_ROOT_URL").MustString(string(Protocol) + "://localhost:" + HTTPPort + "/")
OfflineMode = sec.Key("OFFLINE_MODE").MustBool()
DisableRouterLog = sec.Key("DISABLE_ROUTER_LOG").MustBool()
StaticRootPath = sec.Key("STATIC_ROOT_PATH").MustString(workDir)
AppDataPath = sec.Key("APP_DATA_PATH").MustString("data")
EnableGzip = sec.Key("ENABLE_GZIP").MustBool()
switch sec.Key("LANDING_PAGE").MustString("home") {
case "explore":
LandingPageURL = LANDING_PAGE_EXPLORE
default:
LandingPageURL = LANDING_PAGE_HOME
}
SSH.RootPath = path.Join(homeDir, ".ssh")
SSH.KeyTestPath = os.TempDir()
if err = Cfg.Section("server").MapTo(&SSH); err != nil {
log.Fatal(4, "Fail to map SSH settings: %v", err)
}
// When disable SSH, start builtin server value is ignored.
if SSH.Disabled {
SSH.StartBuiltinServer = false
}
if !SSH.Disabled && !SSH.StartBuiltinServer {
if err := os.MkdirAll(SSH.RootPath, 0700); err != nil {
log.Fatal(4, "Fail to create '%s': %v", SSH.RootPath, err)
} else if err = os.MkdirAll(SSH.KeyTestPath, 0644); err != nil {
log.Fatal(4, "Fail to create '%s': %v", SSH.KeyTestPath, err)
}
}
SSH.MinimumKeySizeCheck = sec.Key("MINIMUM_KEY_SIZE_CHECK").MustBool()
SSH.MinimumKeySizes = map[string]int{}
minimumKeySizes := Cfg.Section("ssh.minimum_key_sizes").Keys()
for _, key := range minimumKeySizes {
if key.MustInt() != -1 {
SSH.MinimumKeySizes[strings.ToLower(key.Name())] = key.MustInt()
}
}
sec = Cfg.Section("security")
InstallLock = sec.Key("INSTALL_LOCK").MustBool()
SecretKey = sec.Key("SECRET_KEY").String()
LogInRememberDays = sec.Key("LOGIN_REMEMBER_DAYS").MustInt()
CookieUserName = sec.Key("COOKIE_USERNAME").String()
CookieRememberName = sec.Key("COOKIE_REMEMBER_NAME").String()
ReverseProxyAuthUser = sec.Key("REVERSE_PROXY_AUTHENTICATION_USER").MustString("X-WEBAUTH-USER")
sec = Cfg.Section("attachment")
AttachmentPath = sec.Key("PATH").MustString(path.Join(AppDataPath, "attachments"))
if !filepath.IsAbs(AttachmentPath) {
AttachmentPath = path.Join(workDir, AttachmentPath)
}
AttachmentAllowedTypes = strings.Replace(sec.Key("ALLOWED_TYPES").MustString("image/jpeg,image/png"), "|", ",", -1)
AttachmentMaxSize = sec.Key("MAX_SIZE").MustInt64(4)
AttachmentMaxFiles = sec.Key("MAX_FILES").MustInt(5)
AttachmentEnabled = sec.Key("ENABLE").MustBool(true)
TimeFormat = map[string]string{
"ANSIC": time.ANSIC,
"UnixDate": time.UnixDate,
"RubyDate": time.RubyDate,
"RFC822": time.RFC822,
"RFC822Z": time.RFC822Z,
"RFC850": time.RFC850,
"RFC1123": time.RFC1123,
"RFC1123Z": time.RFC1123Z,
"RFC3339": time.RFC3339,
"RFC3339Nano": time.RFC3339Nano,
"Kitchen": time.Kitchen,
"Stamp": time.Stamp,
"StampMilli": time.StampMilli,
"StampMicro": time.StampMicro,
"StampNano": time.StampNano,
}[Cfg.Section("time").Key("FORMAT").MustString("RFC1123")]
RunUser = Cfg.Section("").Key("RUN_USER").String()
// Does not check run user when the install lock is off.
if InstallLock {
currentUser, match := IsRunUserMatchCurrentUser(RunUser)
if !match {
log.Fatal(4, "Expect user '%s' but current user is: %s", RunUser, currentUser)
}
}
// Determine and create root git repository path.
sec = Cfg.Section("repository")
RepoRootPath = sec.Key("ROOT").MustString(path.Join(homeDir, "gogs-repositories"))
forcePathSeparator(RepoRootPath)
if !filepath.IsAbs(RepoRootPath) {
RepoRootPath = path.Join(workDir, RepoRootPath)
} else {
RepoRootPath = path.Clean(RepoRootPath)
}
ScriptType = sec.Key("SCRIPT_TYPE").MustString("bash")
if err = Cfg.Section("repository").MapTo(&Repository); err != nil {
log.Fatal(4, "Fail to map Repository settings: %v", err)
} else if err = Cfg.Section("repository.editor").MapTo(&Repository.Editor); err != nil {
log.Fatal(4, "Fail to map Repository.Editor settings: %v", err)
} else if err = Cfg.Section("repository.upload").MapTo(&Repository.Upload); err != nil {
log.Fatal(4, "Fail to map Repository.Upload settings: %v", err)
}
if !filepath.IsAbs(Repository.Upload.TempPath) {
Repository.Upload.TempPath = path.Join(workDir, Repository.Upload.TempPath)
}
sec = Cfg.Section("picture")
AvatarUploadPath = sec.Key("AVATAR_UPLOAD_PATH").MustString(path.Join(AppDataPath, "avatars"))
forcePathSeparator(AvatarUploadPath)
if !filepath.IsAbs(AvatarUploadPath) {
AvatarUploadPath = path.Join(workDir, AvatarUploadPath)
}
switch source := sec.Key("GRAVATAR_SOURCE").MustString("gravatar"); source {
case "duoshuo":
GravatarSource = "http://gravatar.duoshuo.com/avatar/"
case "gravatar":
GravatarSource = "https://secure.gravatar.com/avatar/"
default:
GravatarSource = source
}
DisableGravatar = sec.Key("DISABLE_GRAVATAR").MustBool()
EnableFederatedAvatar = sec.Key("ENABLE_FEDERATED_AVATAR").MustBool()
if OfflineMode {
DisableGravatar = true
EnableFederatedAvatar = false
}
if DisableGravatar {
EnableFederatedAvatar = false
}
if EnableFederatedAvatar {
LibravatarService = libravatar.New()
parts := strings.Split(GravatarSource, "/")
if len(parts) >= 3 {
if parts[0] == "https:" {
LibravatarService.SetUseHTTPS(true)
LibravatarService.SetSecureFallbackHost(parts[2])
} else {
LibravatarService.SetUseHTTPS(false)
LibravatarService.SetFallbackHost(parts[2])
}
}
}
if err = Cfg.Section("ui").MapTo(&UI); err != nil {
log.Fatal(4, "Fail to map UI settings: %v", err)
} else if err = Cfg.Section("markdown").MapTo(&Markdown); err != nil {
log.Fatal(4, "Fail to map Markdown settings: %v", err)
} else if err = Cfg.Section("cron").MapTo(&Cron); err != nil {
log.Fatal(4, "Fail to map Cron settings: %v", err)
} else if err = Cfg.Section("git").MapTo(&Git); err != nil {
log.Fatal(4, "Fail to map Git settings: %v", err)
} else if err = Cfg.Section("mirror").MapTo(&Mirror); err != nil {
log.Fatal(4, "Fail to map Mirror settings: %v", err)
} else if err = Cfg.Section("api").MapTo(&API); err != nil {
log.Fatal(4, "Fail to map API settings: %v", err)
}
if Mirror.DefaultInterval <= 0 {
Mirror.DefaultInterval = 24
}
Langs = Cfg.Section("i18n").Key("LANGS").Strings(",")
Names = Cfg.Section("i18n").Key("NAMES").Strings(",")
dateLangs = Cfg.Section("i18n.datelang").KeysHash()
ShowFooterBranding = Cfg.Section("other").Key("SHOW_FOOTER_BRANDING").MustBool()
ShowFooterVersion = Cfg.Section("other").Key("SHOW_FOOTER_VERSION").MustBool()
ShowFooterTemplateLoadTime = Cfg.Section("other").Key("SHOW_FOOTER_TEMPLATE_LOAD_TIME").MustBool()
HasRobotsTxt = com.IsFile(path.Join(CustomPath, "robots.txt"))
}
var Service struct {
ActiveCodeLives int
ResetPwdCodeLives int
RegisterEmailConfirm bool
DisableRegistration bool
ShowRegistrationButton bool
RequireSignInView bool
EnableNotifyMail bool
EnableReverseProxyAuth bool
EnableReverseProxyAutoRegister bool
EnableCaptcha bool
}
func newService() {
sec := Cfg.Section("service")
Service.ActiveCodeLives = sec.Key("ACTIVE_CODE_LIVE_MINUTES").MustInt(180)
Service.ResetPwdCodeLives = sec.Key("RESET_PASSWD_CODE_LIVE_MINUTES").MustInt(180)
Service.DisableRegistration = sec.Key("DISABLE_REGISTRATION").MustBool()
Service.ShowRegistrationButton = sec.Key("SHOW_REGISTRATION_BUTTON").MustBool(!Service.DisableRegistration)
Service.RequireSignInView = sec.Key("REQUIRE_SIGNIN_VIEW").MustBool()
Service.EnableReverseProxyAuth = sec.Key("ENABLE_REVERSE_PROXY_AUTHENTICATION").MustBool()
Service.EnableReverseProxyAutoRegister = sec.Key("ENABLE_REVERSE_PROXY_AUTO_REGISTRATION").MustBool()
Service.EnableCaptcha = sec.Key("ENABLE_CAPTCHA").MustBool()
}
var logLevels = map[string]string{
"Trace": "0",
"Debug": "1",
"Info": "2",
"Warn": "3",
"Error": "4",
"Critical": "5",
}
func newLogService() {
log.Info("%s %s", AppName, AppVer)
if len(BuildTime) > 0 {
log.Info("Build Time: %s", BuildTime)
log.Info("Build Git Hash: %s", BuildGitHash)
}
// Get and check log mode.
LogModes = strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
LogConfigs = make([]string, len(LogModes))
for i, mode := range LogModes {
mode = strings.TrimSpace(mode)
sec, err := Cfg.GetSection("log." + mode)
if err != nil {
log.Fatal(4, "Unknown log mode: %s", mode)
}
validLevels := []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"}
// Log level.
levelName := Cfg.Section("log."+mode).Key("LEVEL").In(
Cfg.Section("log").Key("LEVEL").In("Trace", validLevels),
validLevels)
level, ok := logLevels[levelName]
if !ok {
log.Fatal(4, "Unknown log level: %s", levelName)
}
// Generate log configuration.
switch mode {
case "console":
LogConfigs[i] = fmt.Sprintf(`{"level":%s}`, level)
case "file":
logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "gogs.log"))
if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
panic(err.Error())
}
LogConfigs[i] = fmt.Sprintf(
`{"level":%s,"filename":"%s","rotate":%v,"maxlines":%d,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
logPath,
sec.Key("LOG_ROTATE").MustBool(true),
sec.Key("MAX_LINES").MustInt(1000000),
1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
sec.Key("DAILY_ROTATE").MustBool(true),
sec.Key("MAX_DAYS").MustInt(7))
case "conn":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
sec.Key("RECONNECT_ON_MSG").MustBool(),
sec.Key("RECONNECT").MustBool(),
sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
sec.Key("ADDR").MustString(":7020"))
case "smtp":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":"%s","subject":"%s"}`, level,
sec.Key("USER").MustString("[email protected]"),
sec.Key("PASSWD").MustString("******"),
sec.Key("HOST").MustString("127.0.0.1:25"),
sec.Key("RECEIVERS").MustString("[]"),
sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
case "database":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
sec.Key("DRIVER").String(),
sec.Key("CONN").String())
}
log.NewLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, LogConfigs[i])
log.Info("Log Mode: %s(%s)", strings.Title(mode), levelName)
}
}
func newCacheService() {
CacheAdapter = Cfg.Section("cache").Key("ADAPTER").In("memory", []string{"memory", "redis", "memcache"})
switch CacheAdapter {
case "memory":
CacheInterval = Cfg.Section("cache").Key("INTERVAL").MustInt(60)
case "redis", "memcache":
CacheConn = strings.Trim(Cfg.Section("cache").Key("HOST").String(), "\" ")
default:
log.Fatal(4, "Unknown cache adapter: %s", CacheAdapter)
}
log.Info("Cache Service Enabled")
}
func newSessionService() {
SessionConfig.Provider = Cfg.Section("session").Key("PROVIDER").In("memory",
[]string{"memory", "file", "redis", "mysql"})
SessionConfig.ProviderConfig = strings.Trim(Cfg.Section("session").Key("PROVIDER_CONFIG").String(), "\" ")
SessionConfig.CookieName = Cfg.Section("session").Key("COOKIE_NAME").MustString("i_like_gogits")
SessionConfig.CookiePath = AppSubUrl
SessionConfig.Secure = Cfg.Section("session").Key("COOKIE_SECURE").MustBool()
SessionConfig.Gclifetime = Cfg.Section("session").Key("GC_INTERVAL_TIME").MustInt64(86400)
SessionConfig.Maxlifetime = Cfg.Section("session").Key("SESSION_LIFE_TIME").MustInt64(86400)
log.Info("Session Service Enabled")
}
// Mailer represents mail service.
type Mailer struct {
QueueLength int
Name string
Host string
From string
User, Passwd string
DisableHelo bool
HeloHostname string
SkipVerify bool
UseCertificate bool
CertFile, KeyFile string
EnableHTMLAlternative bool
}
var (
MailService *Mailer
)
func newMailService() {
sec := Cfg.Section("mailer")
// Check mailer setting.
if !sec.Key("ENABLED").MustBool() {
return
}
MailService = &Mailer{
QueueLength: sec.Key("SEND_BUFFER_LEN").MustInt(100),
Name: sec.Key("NAME").MustString(AppName),
Host: sec.Key("HOST").String(),
User: sec.Key("USER").String(),
Passwd: sec.Key("PASSWD").String(),
DisableHelo: sec.Key("DISABLE_HELO").MustBool(),
HeloHostname: sec.Key("HELO_HOSTNAME").String(),
SkipVerify: sec.Key("SKIP_VERIFY").MustBool(),
UseCertificate: sec.Key("USE_CERTIFICATE").MustBool(),
CertFile: sec.Key("CERT_FILE").String(),
KeyFile: sec.Key("KEY_FILE").String(),
EnableHTMLAlternative: sec.Key("ENABLE_HTML_ALTERNATIVE").MustBool(),
}
MailService.From = sec.Key("FROM").MustString(MailService.User)
log.Info("Mail Service Enabled")
}
func newRegisterMailService() {
if !Cfg.Section("service").Key("REGISTER_EMAIL_CONFIRM").MustBool() {
return
} else if MailService == nil {
log.Warn("Register Mail Service: Mail Service is not enabled")
return
}
Service.RegisterEmailConfirm = true
log.Info("Register Mail Service Enabled")
}
func newNotifyMailService() {
if !Cfg.Section("service").Key("ENABLE_NOTIFY_MAIL").MustBool() {
return
} else if MailService == nil {
log.Warn("Notify Mail Service: Mail Service is not enabled")
return
}
Service.EnableNotifyMail = true
log.Info("Notify Mail Service Enabled")
}
func newWebhookService() {
sec := Cfg.Section("webhook")
Webhook.QueueLength = sec.Key("QUEUE_LENGTH").MustInt(1000)
Webhook.DeliverTimeout = sec.Key("DELIVER_TIMEOUT").MustInt(5)
Webhook.SkipTLSVerify = sec.Key("SKIP_TLS_VERIFY").MustBool()
Webhook.Types = []string{"gogs", "slack"}
Webhook.PagingNum = sec.Key("PAGING_NUM").MustInt(10)
}
func NewServices() {
newService()
newLogService()
newCacheService()
newSessionService()
newMailService()
newRegisterMailService()
newNotifyMailService()
newWebhookService()
}
| 1 | 12,065 | Change to `DisableHTTPGit` | gogs-gogs | go |
@@ -71,10 +71,9 @@ class UserMailer < ActionMailer::Base
end
def feedback_notification(recipient, plan, requestor)
- return unless @user.org.present? && recipient.active?
+ return unless recipient.active?
@user = requestor
- @org = @user.org
@plan = plan
@recipient = recipient
| 1 | # frozen_string_literal: true
class UserMailer < ActionMailer::Base
prepend_view_path "app/views/branded/"
include MailerHelper
helper MailerHelper
helper FeedbacksHelper
default from: Rails.configuration.x.organisation.email
def welcome_notification(user)
@user = user
FastGettext.with_locale FastGettext.default_locale do
mail(to: @user.email,
subject: _("Welcome to %{tool_name}") % {
tool_name: ApplicationService.application_name
})
end
end
def question_answered(data, user, answer, _options_string)
@user = user
@answer = answer
@data = data
FastGettext.with_locale FastGettext.default_locale do
mail(to: data["email"],
subject: data["subject"])
end
end
def sharing_notification(role, user, inviter:)
@role = role
@user = user
@inviter = inviter
subject = _("A Data Management Plan in %{tool_name} has been shared "\
"with you") % {
tool_name: ApplicationService.application_name
}
FastGettext.with_locale FastGettext.default_locale do
mail(to: @role.user.email, subject: subject)
end
end
def permissions_change_notification(role, user)
return unless user.active?
@role = role
@user = user
FastGettext.with_locale FastGettext.default_locale do
mail(to: @role.user.email,
subject: _("Changed permissions on a Data Management Plan in %{tool_name}") % {
tool_name: ApplicationService.application_name
})
end
end
def plan_access_removed(user, plan, current_user)
return unless user.active?
@user = user
@plan = plan
@current_user = current_user
FastGettext.with_locale FastGettext.default_locale do
mail(to: @user.email,
subject: (_("Permissions removed on a DMP in %{tool_name}") % {
tool_name: ApplicationService.application_name
}).to_s)
end
end
def feedback_notification(recipient, plan, requestor)
return unless @user.org.present? && recipient.active?
@user = requestor
@org = @user.org
@plan = plan
@recipient = recipient
FastGettext.with_locale FastGettext.default_locale do
mail(to: recipient.email,
subject: _("%{application_name}: %{user_name} requested feedback on a plan") % {
application_name: ApplicationService.application_name, user_name: @user.name(false)
})
end
end
def feedback_complete(recipient, plan, requestor)
return unless recipient.active?
@requestor = requestor
@user = recipient
@plan = plan
@phase = plan.phases.first
FastGettext.with_locale FastGettext.default_locale do
sender = Rails.configuration.x.organisation.do_not_reply_email ||
Rails.configuration.x.organisation.email
mail(
to: recipient.email,
from: sender,
subject: _("%{application_name}: Expert feedback has been provided for %{plan_title}") % {
application_name: ApplicationService.application_name, plan_title: @plan.title
}
)
end
end
def feedback_confirmation(recipient, plan, requestor)
return unless user.org.present? && recipient.active?
user = requestor
org = user.org
plan = plan
# Use the generic feedback confirmation message unless the Org has specified one
subject = org.feedback_email_subject || feedback_confirmation_default_subject
message = org.feedback_email_msg || feedback_confirmation_default_message
@body = feedback_constant_to_text(message, user, plan, org)
FastGettext.with_locale FastGettext.default_locale do
mail(to: recipient.email,
subject: feedback_constant_to_text(subject, user, plan, org))
end
end
def plan_visibility(user, plan)
return unless user.active?
@user = user
@plan = plan
FastGettext.with_locale FastGettext.default_locale do
mail(to: @user.email,
subject: _("DMP Visibility Changed: %{plan_title}") % { plan_title: @plan.title })
end
end
# commenter - User who wrote the comment
# plan - Plan for which the comment is associated to
# answer - Answer commented on
def new_comment(commenter, plan, answer)
return unless commenter.is_a?(User) && plan.is_a?(Plan)
owner = plan.owner
return unless owner.present? && owner.active?
@commenter = commenter
@plan = plan
@answer = answer
FastGettext.with_locale FastGettext.default_locale do
mail(to: plan.owner.email, subject:
_("%{tool_name}: A new comment was added to %{plan_title}") % {
tool_name: ApplicationService.application_name, plan_title: plan.title
})
end
end
def admin_privileges(user)
return unless user.active?
@user = user
FastGettext.with_locale FastGettext.default_locale do
mail(to: user.email, subject:
_("Administrator privileges granted in %{tool_name}") % {
tool_name: ApplicationService.application_name
})
end
end
def api_credentials(api_client)
return unless @api_client.contact_email.present?
@api_client = api_client
FastGettext.with_locale FastGettext.default_locale do
mail(to: @api_client.contact_email,
subject: _("%{tool_name} API changes") % {
tool_name: ApplicationService.application_name
})
end
end
end
| 1 | 19,096 | org was not used in the mail content | DMPRoadmap-roadmap | rb |
@@ -128,6 +128,9 @@ export default Service.extend({
return this.lazyLoader.loadStyle('dark', 'assets/ghost-dark.css', true).then(() => {
$('link[title=dark]').prop('disabled', !nightShift);
$('link[title=light]').prop('disabled', nightShift);
+ }).catch((err) => {
+ $('link[title=dark]').prop('disabled', true);
+ $('link[title=light]').prop('disabled', false);
});
}
}); | 1 | import $ from 'jquery';
import Ember from 'ember';
import EmberError from '@ember/error';
import RSVP from 'rsvp';
import Service, {inject as service} from '@ember/service';
import {computed} from '@ember/object';
import {set} from '@ember/object';
export function feature(name, options = {}) {
let {user, onChange} = options;
let watchedProps = user ? [`accessibility.${name}`] : [`config.${name}`, `labs.${name}`];
return computed.apply(Ember, watchedProps.concat({
get() {
let enabled = false;
if (user) {
enabled = this.get(`accessibility.${name}`);
} else if (this.get(`config.${name}`)) {
enabled = this.get(`config.${name}`);
} else {
enabled = this.get(`labs.${name}`) || false;
}
if (options.developer) {
enabled = enabled && this.get('config.enableDeveloperExperiments');
}
return enabled;
},
set(key, value) {
this.update(key, value, options);
if (onChange) {
// value must be passed here because the value isn't set until
// the setter function returns
this.get(onChange).bind(this)(value);
}
return value;
}
}));
}
export default Service.extend({
store: service(),
config: service(),
session: service(),
settings: service(),
notifications: service(),
lazyLoader: service(),
publicAPI: feature('publicAPI'),
subscribers: feature('subscribers'),
members: feature('members'),
nightShift: feature('nightShift', {user: true, onChange: '_setAdminTheme'}),
_user: null,
labs: computed('settings.labs', function () {
let labs = this.get('settings.labs');
try {
return JSON.parse(labs) || {};
} catch (e) {
return {};
}
}),
accessibility: computed('_user.accessibility', function () {
let accessibility = this.get('_user.accessibility');
try {
return JSON.parse(accessibility) || {};
} catch (e) {
return {};
}
}),
fetch() {
return RSVP.hash({
settings: this.settings.fetch(),
user: this.get('session.user')
}).then(({user}) => {
this.set('_user', user);
return this._setAdminTheme().then(() => true);
});
},
update(key, value, options = {}) {
let serviceProperty = options.user ? 'accessibility' : 'labs';
let model = this.get(options.user ? '_user' : 'settings');
let featureObject = this.get(serviceProperty);
// set the new key value for either the labs property or the accessibility property
set(featureObject, key, value);
// update the 'labs' or 'accessibility' key of the model
model.set(serviceProperty, JSON.stringify(featureObject));
return model.save().then(() => {
// return the labs key value that we get from the server
this.notifyPropertyChange(serviceProperty);
return this.get(`${serviceProperty}.${key}`);
}).catch((error) => {
model.rollbackAttributes();
this.notifyPropertyChange(serviceProperty);
// we'll always have an errors object unless we hit a
// validation error
if (!error) {
throw new EmberError(`Validation of the feature service ${options.user ? 'user' : 'settings'} model failed when updating ${serviceProperty}.`);
}
this.notifications.showAPIError(error);
return this.get(`${serviceProperty}.${key}`);
});
},
_setAdminTheme(enabled) {
let nightShift = enabled;
if (typeof nightShift === 'undefined') {
nightShift = enabled || this.nightShift;
}
return this.lazyLoader.loadStyle('dark', 'assets/ghost-dark.css', true).then(() => {
$('link[title=dark]').prop('disabled', !nightShift);
$('link[title=light]').prop('disabled', nightShift);
});
}
});
| 1 | 9,272 | Is this gonna cause a lint issue, because of unused var? Maybe we should log it out? | TryGhost-Admin | js |
@@ -321,7 +321,11 @@ module Beaker
def hack_etc_hosts hosts, opts
etc_hosts = "127.0.0.1\tlocalhost localhost.localdomain\n"
hosts.each do |host|
- etc_hosts += "#{host['ip'].to_s}\t#{host[:vmhostname] || host.name}\n"
+ if host['hypervisor'] == 'docker'
+ etc_hosts += "#{host['docker_container'].json["NetworkSettings"]["IPAddress"]}\t#{host.name}\n"
+ else
+ etc_hosts += "#{host['ip'].to_s}\t#{host[:vmhostname] || host.name}\n"
+ end
end
hosts.each do |host|
set_etc_hosts(host, etc_hosts) | 1 | require 'pathname'
[ 'command', "dsl/patterns" ].each do |lib|
require "beaker/#{lib}"
end
module Beaker
#Provides convienience methods for commonly run actions on hosts
module HostPrebuiltSteps
include Beaker::DSL::Patterns
NTPSERVER = 'pool.ntp.org'
SLEEPWAIT = 5
TRIES = 5
UNIX_PACKAGES = ['curl', 'ntpdate']
WINDOWS_PACKAGES = ['curl']
PSWINDOWS_PACKAGES = []
SLES_PACKAGES = ['curl', 'ntp']
DEBIAN_PACKAGES = ['curl', 'ntpdate', 'lsb-release']
CUMULUS_PACKAGES = ['addons', 'ntpdate', 'lsb-release']
ETC_HOSTS_PATH = "/etc/hosts"
ETC_HOSTS_PATH_SOLARIS = "/etc/inet/hosts"
ROOT_KEYS_SCRIPT = "https://raw.githubusercontent.com/puppetlabs/puppetlabs-sshkeys/master/templates/scripts/manage_root_authorized_keys"
ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s"
APT_CFG = %q{ Acquire::http::Proxy "http://proxy.puppetlabs.net:3128/"; }
IPS_PKG_REPO="http://solaris-11-internal-repo.delivery.puppetlabs.net"
#Run timesync on the provided hosts
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def timesync host, opts
logger = opts[:logger]
ntp_server = opts[:ntp_server] ? opts[:ntp_server] : NTPSERVER
block_on host do |host|
logger.notify "Update system time sync for '#{host.name}'"
if host['platform'].include? 'windows'
# The exit code of 5 is for Windows 2008 systems where the w32tm /register command
# is not actually necessary.
host.exec(Command.new("w32tm /register"), :acceptable_exit_codes => [0,5])
host.exec(Command.new("net start w32time"), :acceptable_exit_codes => [0,2])
host.exec(Command.new("w32tm /config /manualpeerlist:#{ntp_server} /syncfromflags:manual /update"))
host.exec(Command.new("w32tm /resync"))
logger.notify "NTP date succeeded on #{host}"
else
case
when host['platform'] =~ /sles-/
ntp_command = "sntp #{ntp_server}"
else
ntp_command = "ntpdate -t 20 #{ntp_server}"
end
success=false
try = 0
until try >= TRIES do
try += 1
if host.exec(Command.new(ntp_command), :acceptable_exit_codes => (0..255)).exit_code == 0
success=true
break
end
sleep SLEEPWAIT
end
if success
logger.notify "NTP date succeeded on #{host} after #{try} tries"
else
raise "NTP date was not successful after #{try} tries"
end
end
end
rescue => e
report_and_raise(logger, e, "timesync (--ntp)")
end
# Validate that hosts are prepared to be used as SUTs, if packages are missing attempt to
# install them.
#
# Verifies the presence of #{HostPrebuiltSteps::UNIX_PACKAGES} on unix platform hosts,
# {HostPrebuiltSteps::SLES_PACKAGES} on SUSE platform hosts,
# {HostPrebuiltSteps::DEBIAN_PACKAGES} on debian platform hosts,
# {HostPrebuiltSteps::CUMULUS_PACKAGES} on cumulus platform hosts,
# {HostPrebuiltSteps::WINDOWS_PACKAGES} on cygwin-installed windows platform hosts,
# and {HostPrebuiltSteps::PSWINDOWS_PACKAGES} on non-cygwin windows platform hosts.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def validate_host host, opts
logger = opts[:logger]
block_on host do |host|
case
when host['platform'] =~ /sles-/
check_and_install_packages_if_needed(host, SLES_PACKAGES)
when host['platform'] =~ /debian/
check_and_install_packages_if_needed(host, DEBIAN_PACKAGES)
when host['platform'] =~ /cumulus/
check_and_install_packages_if_needed(host, CUMULUS_PACKAGES)
when (host['platform'] =~ /windows/ and host.is_cygwin?)
check_and_install_packages_if_needed(host, WINDOWS_PACKAGES)
when (host['platform'] =~ /windows/ and not host.is_cygwin?)
check_and_install_packages_if_needed(host, PSWINDOWS_PACKAGES)
when host['platform'] !~ /debian|aix|solaris|windows|sles-|osx-|cumulus/
check_and_install_packages_if_needed(host, UNIX_PACKAGES)
end
end
rescue => e
report_and_raise(logger, e, "validate")
end
# Installs the given packages if they aren't already on a host
#
# @param [Host] host Host to act on
# @param [Array<String>] package_list List of package names to install
def check_and_install_packages_if_needed host, package_list
package_list.each do |pkg|
if not host.check_for_package pkg
host.install_package pkg
end
end
end
#Install a set of authorized keys using {HostPrebuiltSteps::ROOT_KEYS_SCRIPT}. This is a
#convenience method to allow for easy login to hosts after they have been provisioned with
#Beaker.
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def sync_root_keys host, opts
# JJM This step runs on every system under test right now. We're anticipating
# issues on Windows and maybe Solaris. We will likely need to filter this step
# but we're deliberately taking the approach of "assume it will work, fix it
# when reality dictates otherwise"
logger = opts[:logger]
block_on host do |host|
logger.notify "Sync root authorized_keys from github on #{host.name}"
# Allow all exit code, as this operation is unlikely to cause problems if it fails.
if host['platform'] =~ /solaris|eos/
host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "bash"), :acceptable_exit_codes => (0..255))
else
host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "env PATH=/usr/gnu/bin:$PATH bash"), :acceptable_exit_codes => (0..255))
end
end
rescue => e
report_and_raise(logger, e, "sync_root_keys")
end
#Determine the Extra Packages for Enterprise Linux URL for the provided Enterprise Linux host.
# @param [Host, Array<Host>] host One host to act on. Will use host epel_url, epel_arch and epel_pkg
# before using defaults provided in opts.
# @return [String, String, String] The URL, arch and package name for EPL for the provided host
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [String] :epel_url Link to download
# @option opts [String] :epel_arch Architecture to download (i386, x86_64, etc), defaults to i386
# @option opts [String] :epel_6_pkg Package to download from provided link for el-6
# @option opts [String] :epel_5_pkg Package to download from provided link for el-5
# @raise [Exception] Raises an error if the host provided's platform != /el-(5|6)/
def epel_info_for host, opts
if !el_based?(host)
raise "epel_info_for! not available for #{host.name} on platform #{host['platform']}"
end
version = host['platform'].version
if version == '6'
url = "#{host[:epel_url] || opts[:epel_url]}/#{version}"
pkg = host[:epel_pkg] || opts[:epel_6_pkg]
elsif version == '5'
url = "#{host[:epel_url] || opts[:epel_url]}/#{version}"
pkg = host[:epel_pkg] || opts[:epel_5_pkg]
else
raise "epel_info_for does not support el version #{version}, on #{host.name}"
end
return url, host[:epel_arch] || opts[:epel_arch] || 'i386', pkg
end
# Run 'apt-get update' on the provided host or hosts.
# If the platform of the provided host is not ubuntu, debian or cumulus: do nothing.
#
# @param [Host, Array<Host>] hosts One or more hosts to act upon
def apt_get_update hosts
block_on hosts do |host|
if host[:platform] =~ /ubuntu|debian|cumulus/
host.exec(Command.new("apt-get update"))
end
end
end
#Create a file on host or hosts at the provided file path with the provided file contents.
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [String] file_path The path at which the new file will be created on the host or hosts.
# @param [String] file_content The contents of the file to be created on the host or hosts.
def copy_file_to_remote(host, file_path, file_content)
block_on host do |host|
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
host.do_scp_to(tempfile.path, file_path, @options)
end
end
end
# On ubuntu, debian, or cumulus host or hosts: alter apt configuration to use
# the internal Puppet Labs proxy {HostPrebuiltSteps::APT_CFG} proxy.
# On solaris-11 host or hosts: alter pkg to point to
# the internal Puppet Labs proxy {HostPrebuiltSteps::IPS_PKG_REPO}.
#
# Do nothing for other platform host or hosts.
#
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def proxy_config( host, opts )
logger = opts[:logger]
block_on host do |host|
case
when host['platform'] =~ /ubuntu|debian|cumulus/
host.exec(Command.new("if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi"))
copy_file_to_remote(host, '/etc/apt/apt.conf', APT_CFG)
apt_get_update(host)
when host['platform'] =~ /solaris-11/
host.exec(Command.new("/usr/bin/pkg unset-publisher solaris || :"))
host.exec(Command.new("/usr/bin/pkg set-publisher -g %s solaris" % IPS_PKG_REPO))
else
logger.debug "#{host}: repo proxy configuration not modified"
end
end
rescue => e
report_and_raise(logger, e, "proxy_config")
end
#Install EPEL on host or hosts with platform = /el-(5|6)/. Do nothing on host or hosts of other platforms.
# @param [Host, Array<Host>] host One or more hosts to act upon. Will use individual host epel_url, epel_arch
# and epel_pkg before using defaults provided in opts.
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :debug If true, print verbose rpm information when installing EPEL
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
# @option opts [String] :epel_url Link to download from
# @option opts [String] :epel_arch Architecture of epel to download (i386, x86_64, etc)
# @option opts [String] :epel_6_pkg Package to download from provided link for el-6
# @option opts [String] :epel_5_pkg Package to download from provided link for el-5
def add_el_extras( host, opts )
#add_el_extras
#only supports el-* platforms
logger = opts[:logger]
debug_opt = opts[:debug] ? 'vh' : ''
block_on host do |host|
case
when el_based?(host) && ['5','6'].include?(host['platform'].version)
result = host.exec(Command.new('rpm -qa | grep epel-release'), :acceptable_exit_codes => [0,1])
if result.exit_code == 1
url, arch, pkg = epel_info_for host, opts
host.exec(Command.new("rpm -i#{debug_opt} #{url}/#{arch}/#{pkg}"))
#update /etc/yum.repos.d/epel.repo for new baseurl
host.exec(Command.new("sed -i -e 's;#baseurl.*$;baseurl=#{Regexp.escape(url)}/\$basearch;' /etc/yum.repos.d/epel.repo"))
#remove mirrorlist
host.exec(Command.new("sed -i -e '/mirrorlist/d' /etc/yum.repos.d/epel.repo"))
host.exec(Command.new('yum clean all && yum makecache'))
end
else
logger.debug "#{host}: package repo configuration not modified"
end
end
rescue => e
report_and_raise(logger, e, "add_repos")
end
#Determine the domain name of the provided host from its /etc/resolv.conf
# @param [Host] host the host to act upon
def get_domain_name(host)
domain = nil
search = nil
resolv_conf = host.exec(Command.new("cat /etc/resolv.conf")).stdout
resolv_conf.each_line { |line|
if line =~ /^\s*domain\s+(\S+)/
domain = $1
elsif line =~ /^\s*search\s+(\S+)/
search = $1
end
}
return domain if domain
return search if search
end
#Determine the ip address of the provided host
# @param [Host] host the host to act upon
# @deprecated use {Host#get_ip}
def get_ip(host)
host.get_ip
end
#Append the provided string to the /etc/hosts file of the provided host
# @param [Host] host the host to act upon
# @param [String] etc_hosts The string to append to the /etc/hosts file
def set_etc_hosts(host, etc_hosts)
host.exec(Command.new("echo '#{etc_hosts}' > /etc/hosts"))
end
#Make it possible to log in as root by copying the current users ssh keys to the root account
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def copy_ssh_to_root host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug "Give root a copy of current user's keys, on #{host.name}"
if host['platform'] =~ /windows/ and host.is_cygwin?
host.exec(Command.new('cp -r .ssh /cygdrive/c/Users/Administrator/.'))
host.exec(Command.new('chown -R Administrator /cygdrive/c/Users/Administrator/.ssh'))
elsif host['platform'] =~ /windows/ and not host.is_cygwin?
host.exec(Command.new("if exist .ssh (xcopy .ssh C:\\Users\\Administrator\\.ssh /s /e)"))
elsif host['platform'] =~ /osx/
host.exec(Command.new('sudo cp -r .ssh /var/root/.'), {:pty => true})
else
host.exec(Command.new('sudo su -c "cp -r .ssh /root/."'), {:pty => true})
end
end
end
#Update /etc/hosts to make it possible for each provided host to reach each other host by name.
#Assumes that each provided host has host[:ip] set.
# @param [Host, Array<Host>] hosts An array of hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def hack_etc_hosts hosts, opts
etc_hosts = "127.0.0.1\tlocalhost localhost.localdomain\n"
hosts.each do |host|
etc_hosts += "#{host['ip'].to_s}\t#{host[:vmhostname] || host.name}\n"
end
hosts.each do |host|
set_etc_hosts(host, etc_hosts)
end
end
# Update sshd_config on debian, ubuntu, centos, el, redhat, cumulus, and fedora boxes to allow for root login
#
# Does nothing on other platfoms.
#
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def enable_root_login host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug "Update /etc/ssh/sshd_config to allow root login"
# note: this sed command only works on gnu sed
if host['platform'] =~ /osx/
host.exec(Command.new("sudo sed -i '' 's/#PermitRootLogin no/PermitRootLogin Yes/g' /etc/sshd_config"))
host.exec(Command.new("sudo sed -i '' 's/#PermitRootLogin yes/PermitRootLogin Yes/g' /etc/sshd_config"))
else
host.exec(Command.new("sudo su -c \"sed -ri 's/^#?PermitRootLogin no|^#?PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config\""), {:pty => true})
end
#restart sshd
if host['platform'] =~ /debian|ubuntu|cumulus/
host.exec(Command.new("sudo su -c \"service ssh restart\""), {:pty => true})
elsif host['platform'] =~ /centos|el-|redhat|fedora|eos/
host.exec(Command.new("sudo -E /sbin/service sshd reload"), {:pty => true})
else
@logger.warn("Attempting to update ssh on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
#Disable SELinux on centos, does nothing on other platforms
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def disable_se_linux host, opts
logger = opts[:logger]
block_on host do |host|
if host['platform'] =~ /centos|el-|redhat|fedora|eos/
@logger.debug("Disabling se_linux on #{host.name}")
host.exec(Command.new("sudo su -c \"setenforce 0\""), {:pty => true})
else
@logger.warn("Attempting to disable SELinux on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
#Disable iptables on centos, does nothing on other platforms
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def disable_iptables host, opts
logger = opts[:logger]
block_on host do |host|
if host['platform'] =~ /centos|el-|redhat|fedora|eos/
logger.debug("Disabling iptables on #{host.name}")
host.exec(Command.new("sudo su -c \"/etc/init.d/iptables stop\""), {:pty => true})
else
logger.warn("Attempting to disable iptables on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
# Setup files for enabling requests to pass to a proxy server
# This works for the APT package manager on debian, ubuntu, and cumulus
# and YUM package manager on el, centos, fedora and redhat.
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def package_proxy host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug("enabling proxy support on #{host.name}")
case host['platform']
when /ubuntu/, /debian/, /cumulus/
host.exec(Command.new("echo 'Acquire::http::Proxy \"#{opts[:package_proxy]}/\";' >> /etc/apt/apt.conf.d/10proxy"))
when /^el-/, /centos/, /fedora/, /redhat/, /eos/
host.exec(Command.new("echo 'proxy=#{opts[:package_proxy]}/' >> /etc/yum.conf"))
else
logger.debug("Attempting to enable package manager proxy support on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
# Merge the two provided hashes so that an array of values is created from collisions
# @param [Hash] h1 The first hash
# @param [Hash] h2 The second hash
# @return [Hash] A merged hash with arrays of values where collisions between the two hashes occured.
# @example
# > h1 = {:PATH=>"/1st/path"}
# > h2 = {:PATH=>"/2nd/path"}
# > additive_hash_merge(h1, h2)
# => {:PATH=>["/1st/path", "/2nd/path"]}
def additive_hash_merge h1, h2
merged_hash = {}
normalized_h2 = h2.inject({}) { |h, (k, v)| h[k.to_s.upcase] = v; h }
h1.each_pair do |key, val|
normalized_key = key.to_s.upcase
if normalized_h2.has_key?(normalized_key)
merged_hash[key] = [h1[key], normalized_h2[normalized_key]]
merged_hash[key] = merged_hash[key].uniq #remove dupes
end
end
merged_hash
end
# 'echo' the provided value on the given host
# @param [Host] host The host to execute the 'echo' on
# @param [String] val The string to 'echo' on the host
def echo_on_host host, val
#val = val.gsub(/"/, "\"").gsub(/\(/, "\(")
if host.is_cygwin?
host.exec(Command.new("echo \"#{val}\"")).stdout.chomp
else
host.exec(Command.new("echo #{val}")).stdout.chomp
end
end
# Create the hash of default environment from host (:host_env), global options hash (:host_env) and default PE/Foss puppet variables
# @param [Host] host The host to construct the environment hash for, host specific environment should be in :host_env in a hash
# @param [Hash] opts Hash of options, including optional global host_env to be applied to each provided host
# @return [Hash] A hash of environment variables for provided host
def construct_env host, opts
env = additive_hash_merge(host[:host_env], opts[:host_env])
#Add PATH
#prepend any PATH already set for this host
env['PATH'] = (%w(puppetbindir facterbindir hierabindir) << env['PATH']).compact.reject(&:empty?)
#get the PATH defaults
env['PATH'].map! { |val| host[val] }
env['PATH'] = env['PATH'].compact.reject(&:empty?)
#run the paths through echo to see if they have any subcommands that need processing
env['PATH'].map! { |val| echo_on_host(host, val) }
env.each_key do |key|
separator = host['pathseparator']
if key == 'PATH' && host.is_cygwin?
separator = ':'
end
env[key] = env[key].join(separator)
end
env
end
# Add a host specific set of env vars to each provided host's ~/.ssh/environment
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
def set_env host, opts
logger = opts[:logger]
block_on host do |host|
env = construct_env(host, opts)
logger.debug("setting local environment on #{host.name}")
case host['platform']
when /windows/
if host.is_cygwin?
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/sshd_config"))
host.exec(Command.new("cygrunsrv -E sshd"))
host.exec(Command.new("cygrunsrv -S sshd"))
env['CYGWIN'] = 'nodosfilewarning'
else
#nothing to do here
end
when /osx/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/sshd_config"))
host.exec(Command.new("launchctl unload /System/Library/LaunchDaemons/ssh.plist"))
host.exec(Command.new("launchctl load /System/Library/LaunchDaemons/ssh.plist"))
when /debian|ubuntu|cumulus/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("service ssh restart"))
when /el-|centos|fedora|redhat|oracle|scientific|eos/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("/sbin/service sshd restart"))
when /sles/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("rcsshd restart"))
when /solaris/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("svcadm restart svc:/network/ssh:default"))
when /aix/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("stopsrc -g ssh"))
host.exec(Command.new("startsrc -g ssh"))
end
if host['platform'] !~ /windows/ or (host['platform'] =~ /windows/ and host.is_cygwin?)
#ensure that ~/.ssh/environment exists
host.exec(Command.new("mkdir -p #{Pathname.new(host[:ssh_env_file]).dirname}"))
host.exec(Command.new("chmod 0600 #{Pathname.new(host[:ssh_env_file]).dirname}"))
host.exec(Command.new("touch #{host[:ssh_env_file]}"))
#add the constructed env vars to this host
host.add_env_var('PATH', '$PATH')
end
#add the env var set to this test host
env.each_pair do |var, value|
host.add_env_var(var, value)
end
#close the host to re-establish the connection with the new sshd settings
host.close
# print out the working env
if host.is_cygwin?
host.exec(Command.new("cat #{host[:ssh_env_file]}"))
else
host.exec(Command.new("SET"))
end
end
end
private
# A helper to tell whether a host is el-based
# @param [Host] host the host to act upon
#
# @return [Boolean] if the host is el_based
def el_based? host
['centos','redhat','scientific','el','oracle'].include?(host['platform'].variant)
end
end
end
| 1 | 9,144 | My understanding is that this will always be set to the actual containers IP, and never to the `DOCKER_HOST` IP if that env_var is set. | voxpupuli-beaker | rb |
@@ -49,11 +49,14 @@ function ResetButton( { children } ) {
* the navigate call starting, we will just set a debounce to keep the spinner for 3 seconds.
*/
const debouncedSetInProgress = useDebounce( setInProgress, 3000 );
- const mediatedSetInProgress = ( bool ) => bool ? setInProgress( true ) : debouncedSetInProgress( false );
+ const mediatedSetInProgress = useCallback(
+ ( bool ) => bool ? setInProgress( true ) : debouncedSetInProgress( false ),
+ [ debouncedSetInProgress ]
+ );
useEffect( () => {
mediatedSetInProgress( isDoingReset || isNavigatingToPostResetURL );
- }, [ isDoingReset, isNavigatingToPostResetURL ] );
+ }, [ isDoingReset, isNavigatingToPostResetURL, mediatedSetInProgress ] );
useEffect( () => {
const handleCloseModal = ( event ) => { | 1 | /**
* ResetButton component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { __ } from '@wordpress/i18n';
import { Fragment, useState, useEffect, useCallback, createInterpolateElement } from '@wordpress/element';
import { ESCAPE } from '@wordpress/keycodes';
import { useDebounce } from '../hooks/useDebounce';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { clearWebStorage } from '../util';
import Dialog from './Dialog';
import Modal from './Modal';
import Link from './Link';
import { CORE_SITE } from '../googlesitekit/datastore/site/constants';
import { CORE_LOCATION } from '../googlesitekit/datastore/location/constants';
const { useSelect, useDispatch } = Data;
function ResetButton( { children } ) {
const postResetURL = useSelect( ( select ) => select( CORE_SITE ).getAdminURL( 'googlesitekit-splash', { notification: 'reset_success' } ) );
const isDoingReset = useSelect( ( select ) => select( CORE_SITE ).isDoingReset() );
const isNavigatingToPostResetURL = useSelect( ( select ) => select( CORE_LOCATION ).isNavigatingTo( postResetURL || '' ) );
const [ inProgress, setInProgress ] = useState( false );
const [ dialogActive, setDialogActive ] = useState( false );
/*
* Using debounce here because the spinner has to render across two separate calls.
* Rather than risk it flickering on and off in between the reset call completing and
* the navigate call starting, we will just set a debounce to keep the spinner for 3 seconds.
*/
const debouncedSetInProgress = useDebounce( setInProgress, 3000 );
const mediatedSetInProgress = ( bool ) => bool ? setInProgress( true ) : debouncedSetInProgress( false );
useEffect( () => {
mediatedSetInProgress( isDoingReset || isNavigatingToPostResetURL );
}, [ isDoingReset, isNavigatingToPostResetURL ] );
useEffect( () => {
const handleCloseModal = ( event ) => {
if ( ESCAPE === event.keyCode ) {
// Only close the modal if the "Escape" key is pressed.
setDialogActive( false );
}
};
if ( dialogActive ) {
// When the dialogActive changes and it is set to true(has opened), add the event listener.
global.addEventListener( 'keyup', handleCloseModal, false );
}
// Remove the event listener when the dialog is removed; there's no need
// to have it attached when it won't be used.
return () => {
if ( dialogActive ) {
// When the dialogActive is true(is open) and its value changes, remove the event listener.
global.removeEventListener( 'keyup', handleCloseModal );
}
};
}, [ dialogActive ] );
const { reset } = useDispatch( CORE_SITE );
const { navigateTo } = useDispatch( CORE_LOCATION );
const handleUnlinkConfirm = useCallback( async () => {
await reset();
clearWebStorage();
navigateTo( postResetURL );
}, [ reset, postResetURL ] );
const toggleDialogActive = useCallback( () => {
setDialogActive( ! dialogActive );
}, [ dialogActive ] );
const openDialog = useCallback( () => {
setDialogActive( true );
}, [] );
return (
<Fragment>
<Link
className="googlesitekit-reset-button"
onClick={ openDialog }
inherit
>
{ children || __( 'Reset Site Kit', 'google-site-kit' ) }
</Link>
<Modal>
<Dialog
dialogActive={ dialogActive }
handleConfirm={ handleUnlinkConfirm }
handleDialog={ toggleDialogActive }
title={ __( 'Reset Site Kit', 'google-site-kit' ) }
subtitle={ createInterpolateElement(
__( `Resetting will disconnect all users and remove all Site Kit settings and data within WordPress. <br />You and any other users who wish to use Site Kit will need to reconnect to restore access.`, 'google-site-kit' ),
{
br: <br />,
} ) }
confirmButton={ __( 'Reset', 'google-site-kit' ) }
danger
inProgress={ inProgress }
/>
</Modal>
</Fragment>
);
}
export default ResetButton;
| 1 | 37,753 | this dependency to a callback also has to be a callback (eslint now warns us) | google-site-kit-wp | js |
@@ -95,7 +95,7 @@ func (c *CStorPoolController) reconcile(key string) error {
return c.updateStatus(csp)
}
-func (c *CStorPoolController) destroy(csp *apis.NewTestCStorPool) error {
+func (c *CStorPoolController) destroy(csp *apis.CStorPoolInstance) error {
var phase apis.CStorPoolPhase
// DeletePool is to delete cstor zpool. | 1 | /*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package poolcontroller
import (
"fmt"
"github.com/golang/glog"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common"
zpool "github.com/openebs/maya/cmd/cstor-pool-mgmt/pool/v1alpha2"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
)
// reconcile will ensure that pool for given
// key is created and running
func (c *CStorPoolController) reconcile(key string) error {
var err error
var isImported bool
csp, err := c.getCSPObjFromKey(key)
if err != nil || csp == nil {
return err
}
if IsReconcileDisabled(csp) {
c.recorder.Event(csp,
corev1.EventTypeWarning,
fmt.Sprintf("reconcile is disabled via %q annotation", string(apis.OpenEBSDisableReconcileKey)),
"Skipping reconcile")
return nil
}
if IsDestroyed(csp) {
return c.destroy(csp)
}
// take a lock for common package for updating variables
common.SyncResources.Mux.Lock()
// try to import pool
isImported, err = zpool.Import(csp)
if isImported {
if err != nil {
c.recorder.Event(csp,
corev1.EventTypeWarning,
string(common.FailureImported),
fmt.Sprintf("Failed to import pool due to '%s'", err.Error()))
common.SyncResources.Mux.Unlock()
return err
}
zpool.CheckImportedPoolVolume()
common.SyncResources.Mux.Unlock()
return c.update(csp)
}
if IsEmptyStatus(csp) || IsPendingStatus(csp) {
err = zpool.Create(csp)
if err != nil {
// We will try to create it in next event
_ = zpool.Delete(csp)
c.recorder.Event(csp,
corev1.EventTypeWarning,
string(common.FailureCreate),
fmt.Sprintf("Failed to create pool due to '%s'", err.Error()))
common.SyncResources.Mux.Unlock()
return err
}
c.recorder.Event(csp,
corev1.EventTypeNormal,
string(common.SuccessCreated),
fmt.Sprintf("Pool created successfully"))
}
common.SyncResources.Mux.Unlock()
return c.updateStatus(csp)
}
func (c *CStorPoolController) destroy(csp *apis.NewTestCStorPool) error {
var phase apis.CStorPoolPhase
// DeletePool is to delete cstor zpool.
// It will also clear the label for relevant disk
err := zpool.Delete(csp)
if err != nil {
c.recorder.Event(csp,
corev1.EventTypeWarning,
string(common.FailureDestroy),
fmt.Sprintf("Failed to delete pool due to '%s'", err.Error()))
phase = apis.CStorPoolStatusDeletionFailed
goto updatestatus
}
// removeFinalizer is to remove finalizer of cStorPool resource.
err = c.removeFinalizer(csp)
if err != nil {
// Object will exist. Let's set status as offline
phase = apis.CStorPoolStatusDeletionFailed
goto updatestatus
}
glog.Infof("Pool %s deleted successfully", csp.Name)
return nil
updatestatus:
csp.Status.Phase = phase
_, _ = zpool.OpenEBSClient.
OpenebsV1alpha1().
NewTestCStorPools(csp.Namespace).
Update(csp)
return err
}
func (c *CStorPoolController) update(csp *apis.NewTestCStorPool) error {
err := zpool.Update(csp)
if err != nil {
c.recorder.Event(csp,
corev1.EventTypeWarning,
string(common.FailedSynced),
fmt.Sprintf("Failed to update pool due to '%s'", err.Error()))
return err
}
return c.updateStatus(csp)
}
func (c *CStorPoolController) updateStatus(csp *apis.NewTestCStorPool) error {
var status apis.CStorPoolStatus
var err error
pool := zpool.PoolName(csp)
state, er := zpool.GetPropertyValue(pool, "health")
if er != nil {
err = zpool.ErrorWrapf(err, "Failed to fetch health")
} else {
status.Phase = apis.CStorPoolPhase(state)
}
freeSize, er := zpool.GetPropertyValue(pool, "free")
if er != nil {
err = zpool.ErrorWrapf(err, "Failed to fetch free size")
} else {
status.Capacity.Free = freeSize
}
usedSize, er := zpool.GetPropertyValue(pool, "allocated")
if er != nil {
err = zpool.ErrorWrapf(err, "Failed to fetch used size")
} else {
status.Capacity.Used = usedSize
}
totalSize, er := zpool.GetPropertyValue(pool, "size")
if er != nil {
err = zpool.ErrorWrapf(err, "Failed to fetch total size")
} else {
status.Capacity.Total = totalSize
}
if err != nil {
c.recorder.Event(csp,
corev1.EventTypeWarning,
string(common.FailureStatusSync),
fmt.Sprintf("Failed to sync due to '%s'", err.Error()))
return err
}
if IsStatusChange(csp.Status, status) {
csp.Status = status
_, err = zpool.OpenEBSClient.
OpenebsV1alpha1().
NewTestCStorPools(csp.Namespace).
Update(csp)
return err
}
return nil
}
// getCSPObjFromKey returns object corresponding to the resource key
func (c *CStorPoolController) getCSPObjFromKey(key string) (*apis.NewTestCStorPool, error) {
// Convert the key(namespace/name) string into a distinct name
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil, nil
}
csp, err := c.clientset.
OpenebsV1alpha1().
NewTestCStorPools(ns).
Get(name, metav1.GetOptions{})
if err != nil {
// The cStorPool resource may no longer exist, in which case we stop
// processing.
if k8serror.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("CSP '%s' in work queue no longer exists", key))
return nil, nil
}
return nil, err
}
return csp, nil
}
// removeFinalizer is to remove finalizer of cstorpool resource.
func (c *CStorPoolController) removeFinalizer(csp *apis.NewTestCStorPool) error {
if len(csp.Finalizers) == 0 {
return nil
}
csp.Finalizers = []string{}
_, err := c.clientset.
OpenebsV1alpha1().
NewTestCStorPools(csp.Namespace).
Update(csp)
if err != nil {
return err
}
glog.Infof("Removed Finalizer: %v, %v",
csp.Name,
string(csp.GetUID()))
return nil
}
| 1 | 17,143 | directory name as well needs change.. | openebs-maya | go |
@@ -348,7 +348,8 @@ class PluginManager
public function findByIdentifier($identifier)
{
if (!isset($this->plugins[$identifier])) {
- $identifier = $this->normalizeIdentifier($identifier);
+ $code = $this->getIdentifier($identifier);
+ $identifier = $this->normalizeIdentifier($code);
}
if (!isset($this->plugins[$identifier])) { | 1 | <?php namespace System\Classes;
use Db;
use App;
use Str;
use File;
use Lang;
use Log;
use View;
use Config;
use Schema;
use RecursiveIteratorIterator;
use RecursiveDirectoryIterator;
use SystemException;
/**
* Plugin manager
*
* @package october\system
* @author Alexey Bobkov, Samuel Georges
*/
class PluginManager
{
use \October\Rain\Support\Traits\Singleton;
/**
* The application instance, since Plugins are an extension of a Service Provider
*/
protected $app;
/**
* Container object used for storing plugin information objects.
*/
protected $plugins;
/**
* @var array A map of plugins and their directory paths.
*/
protected $pathMap = [];
/**
* @var bool Check if all plugins have had the register() method called.
*/
protected $registered = false;
/**
* @var bool Check if all plugins have had the boot() method called.
*/
protected $booted = false;
/**
* @var string Path to the disarm file.
*/
protected $metaFile;
/**
* @var array Collection of disabled plugins
*/
protected $disabledPlugins = [];
/**
* @var array Cache of registration method results.
*/
protected $registrationMethodCache = [];
/**
* @var boolean Prevent all plugins from registering or booting
*/
public static $noInit = false;
/**
* Initializes the plugin manager
*/
protected function init()
{
$this->bindContainerObjects();
$this->metaFile = storage_path('cms/disabled.json');
$this->loadDisabled();
$this->loadPlugins();
if ($this->app->runningInBackend()) {
$this->loadDependencies();
}
}
/**
* These objects are "soft singletons" and may be lost when
* the IoC container reboots. This provides a way to rebuild
* for the purposes of unit testing.
*/
public function bindContainerObjects()
{
$this->app = App::make('app');
}
/**
* Finds all available plugins and loads them in to the $plugins array.
* @return array
*/
public function loadPlugins()
{
$this->plugins = [];
/**
* Locate all plugins and binds them to the container
*/
foreach ($this->getPluginNamespaces() as $namespace => $path) {
$this->loadPlugin($namespace, $path);
}
$this->sortDependencies();
return $this->plugins;
}
/**
* Loads a single plugin in to the manager.
* @param string $namespace Eg: Acme\Blog
* @param string $path Eg: plugins_path().'/acme/blog';
* @return void
*/
public function loadPlugin($namespace, $path)
{
$className = $namespace.'\Plugin';
$classPath = $path.'/Plugin.php';
try {
// Autoloader failed?
if (!class_exists($className)) {
include_once $classPath;
}
// Not a valid plugin!
if (!class_exists($className)) {
return;
}
$classObj = new $className($this->app);
} catch (\Throwable $e) {
Log::error('Plugin ' . $className . ' could not be instantiated.', [
'message' => $e->getMessage(),
'file' => $e->getFile(),
'line' => $e->getLine(),
'trace' => $e->getTraceAsString()
]);
return;
}
$classId = $this->getIdentifier($classObj);
/*
* Check for disabled plugins
*/
if ($this->isDisabled($classId)) {
$classObj->disabled = true;
}
$this->plugins[$classId] = $classObj;
$this->pathMap[$classId] = $path;
return $classObj;
}
/**
* Runs the register() method on all plugins. Can only be called once.
* @return void
*/
public function registerAll($force = false)
{
if ($this->registered && !$force) {
return;
}
foreach ($this->plugins as $pluginId => $plugin) {
$this->registerPlugin($plugin, $pluginId);
}
$this->registered = true;
}
/**
* Unregisters all plugins: the negative of registerAll().
* @return void
*/
public function unregisterAll()
{
$this->registered = false;
$this->plugins = [];
}
/**
* Registers a single plugin object.
* @param PluginBase $plugin
* @param string $pluginId
* @return void
*/
public function registerPlugin($plugin, $pluginId = null)
{
if (!$pluginId) {
$pluginId = $this->getIdentifier($plugin);
}
if (!$plugin) {
return;
}
$pluginPath = $this->getPluginPath($plugin);
$pluginNamespace = strtolower($pluginId);
/*
* Register language namespaces
*/
$langPath = $pluginPath . '/lang';
if (File::isDirectory($langPath)) {
Lang::addNamespace($pluginNamespace, $langPath);
}
if ($plugin->disabled) {
return;
}
/*
* Register plugin class autoloaders
*/
$autoloadPath = $pluginPath . '/vendor/autoload.php';
if (File::isFile($autoloadPath)) {
ComposerManager::instance()->autoload($pluginPath . '/vendor');
}
if (!self::$noInit || $plugin->elevated) {
$plugin->register();
}
/*
* Register configuration path
*/
$configPath = $pluginPath . '/config';
if (File::isDirectory($configPath)) {
Config::package($pluginNamespace, $configPath, $pluginNamespace);
}
/*
* Register views path
*/
$viewsPath = $pluginPath . '/views';
if (File::isDirectory($viewsPath)) {
View::addNamespace($pluginNamespace, $viewsPath);
}
/*
* Add init, if available
*/
$initFile = $pluginPath . '/init.php';
if (!self::$noInit && File::exists($initFile)) {
require $initFile;
}
/*
* Add routes, if available
*/
$routesFile = $pluginPath . '/routes.php';
if (File::exists($routesFile)) {
require $routesFile;
}
}
/**
* Runs the boot() method on all plugins. Can only be called once.
*/
public function bootAll($force = false)
{
if ($this->booted && !$force) {
return;
}
foreach ($this->plugins as $plugin) {
$this->bootPlugin($plugin);
}
$this->booted = true;
}
/**
* Registers a single plugin object.
* @param PluginBase $plugin
* @return void
*/
public function bootPlugin($plugin)
{
if (!$plugin || $plugin->disabled) {
return;
}
if (!self::$noInit || $plugin->elevated) {
$plugin->boot();
}
}
/**
* Returns the directory path to a plugin
*/
public function getPluginPath($id)
{
$classId = $this->getIdentifier($id);
if (!isset($this->pathMap[$classId])) {
return null;
}
return File::normalizePath($this->pathMap[$classId]);
}
/**
* Check if a plugin exists and is enabled.
* @param string $id Plugin identifier, eg: Namespace.PluginName
* @return boolean
*/
public function exists($id)
{
return !(!$this->findByIdentifier($id) || $this->isDisabled($id));
}
/**
* Returns an array with all registered plugins
* The index is the plugin namespace, the value is the plugin information object.
*/
public function getPlugins()
{
return array_diff_key($this->plugins, $this->disabledPlugins);
}
/**
* Returns a plugin registration class based on its namespace (Author\Plugin).
*/
public function findByNamespace($namespace)
{
if (!$this->hasPlugin($namespace)) {
return null;
}
$classId = $this->getIdentifier($namespace);
return $this->plugins[$classId];
}
/**
* Returns a plugin registration class based on its identifier (Author.Plugin).
*/
public function findByIdentifier($identifier)
{
if (!isset($this->plugins[$identifier])) {
$identifier = $this->normalizeIdentifier($identifier);
}
if (!isset($this->plugins[$identifier])) {
return null;
}
return $this->plugins[$identifier];
}
/**
* Checks to see if a plugin has been registered.
*/
public function hasPlugin($namespace)
{
$classId = $this->getIdentifier($namespace);
$normalized = $this->normalizeIdentifier($classId);
return isset($this->plugins[$normalized]);
}
/**
* Returns a flat array of vendor plugin namespaces and their paths
*/
public function getPluginNamespaces()
{
$classNames = [];
foreach ($this->getVendorAndPluginNames() as $vendorName => $vendorList) {
foreach ($vendorList as $pluginName => $pluginPath) {
$namespace = '\\'.$vendorName.'\\'.$pluginName;
$namespace = Str::normalizeClassName($namespace);
$classNames[$namespace] = $pluginPath;
}
}
return $classNames;
}
/**
* Returns a 2 dimensional array of vendors and their plugins.
*/
public function getVendorAndPluginNames()
{
$plugins = [];
$dirPath = plugins_path();
if (!File::isDirectory($dirPath)) {
return $plugins;
}
$it = new RecursiveIteratorIterator(
new RecursiveDirectoryIterator($dirPath, RecursiveDirectoryIterator::FOLLOW_SYMLINKS)
);
$it->setMaxDepth(2);
$it->rewind();
while ($it->valid()) {
if (($it->getDepth() > 1) && $it->isFile() && (strtolower($it->getFilename()) == "plugin.php")) {
$filePath = dirname($it->getPathname());
$pluginName = basename($filePath);
$vendorName = basename(dirname($filePath));
$plugins[$vendorName][$pluginName] = $filePath;
}
$it->next();
}
return $plugins;
}
/**
* Resolves a plugin identifier from a plugin class name or object.
* @param mixed Plugin class name or object
* @return string Identifier in format of Vendor.Plugin
*/
public function getIdentifier($namespace)
{
$namespace = Str::normalizeClassName($namespace);
if (strpos($namespace, '\\') === null) {
return $namespace;
}
$parts = explode('\\', $namespace);
$slice = array_slice($parts, 1, 2);
$namespace = implode('.', $slice);
return $namespace;
}
/**
* Takes a human plugin code (acme.blog) and makes it authentic (Acme.Blog)
* @param string $id
* @return string
*/
public function normalizeIdentifier($identifier)
{
foreach ($this->plugins as $id => $object) {
if (strtolower($id) == strtolower($identifier)) {
return $id;
}
}
return $identifier;
}
/**
* Spins over every plugin object and collects the results of a method call.
* @param string $methodName
* @return array
*/
public function getRegistrationMethodValues($methodName)
{
if (isset($this->registrationMethodCache[$methodName])) {
return $this->registrationMethodCache[$methodName];
}
$results = [];
$plugins = $this->getPlugins();
foreach ($plugins as $id => $plugin) {
if (!method_exists($plugin, $methodName)) {
continue;
}
$results[$id] = $plugin->{$methodName}();
}
return $this->registrationMethodCache[$methodName] = $results;
}
//
// Disability
//
public function clearDisabledCache()
{
File::delete($this->metaFile);
$this->disabledPlugins = [];
}
/**
* Loads all disables plugins from the meta file.
*/
protected function loadDisabled()
{
$path = $this->metaFile;
if (($configDisabled = Config::get('cms.disablePlugins')) && is_array($configDisabled)) {
foreach ($configDisabled as $disabled) {
$this->disabledPlugins[$disabled] = true;
}
}
if (File::exists($path)) {
$disabled = json_decode(File::get($path), true) ?: [];
$this->disabledPlugins = array_merge($this->disabledPlugins, $disabled);
}
else {
$this->populateDisabledPluginsFromDb();
$this->writeDisabled();
}
}
/**
* Determines if a plugin is disabled by looking at the meta information
* or the application configuration.
* @return boolean
*/
public function isDisabled($id)
{
$code = $this->getIdentifier($id);
if (array_key_exists($code, $this->disabledPlugins)) {
return true;
}
return false;
}
/**
* Write the disabled plugins to a meta file.
*/
protected function writeDisabled()
{
File::put($this->metaFile, json_encode($this->disabledPlugins));
}
/**
* Populates information about disabled plugins from database
* @return void
*/
protected function populateDisabledPluginsFromDb()
{
if (!App::hasDatabase()) {
return;
}
if (!Schema::hasTable('system_plugin_versions')) {
return;
}
$disabled = Db::table('system_plugin_versions')->where('is_disabled', 1)->lists('code');
foreach ($disabled as $code) {
$this->disabledPlugins[$code] = true;
}
}
/**
* Disables a single plugin in the system.
* @param string $id Plugin code/namespace
* @param bool $isUser Set to true if disabled by the user
* @return bool
*/
public function disablePlugin($id, $isUser = false)
{
$code = $this->getIdentifier($id);
if (array_key_exists($code, $this->disabledPlugins)) {
return false;
}
$this->disabledPlugins[$code] = $isUser;
$this->writeDisabled();
if ($pluginObj = $this->findByIdentifier($code)) {
$pluginObj->disabled = true;
}
return true;
}
/**
* Enables a single plugin in the system.
* @param string $id Plugin code/namespace
* @param bool $isUser Set to true if enabled by the user
* @return bool
*/
public function enablePlugin($id, $isUser = false)
{
$code = $this->getIdentifier($id);
if (!array_key_exists($code, $this->disabledPlugins)) {
return false;
}
// Prevent system from enabling plugins disabled by the user
if (!$isUser && $this->disabledPlugins[$code] === true) {
return false;
}
unset($this->disabledPlugins[$code]);
$this->writeDisabled();
if ($pluginObj = $this->findByIdentifier($code)) {
$pluginObj->disabled = false;
}
return true;
}
//
// Dependencies
//
/**
* Scans the system plugins to locate any dependencies that are not currently
* installed. Returns an array of plugin codes that are needed.
*
* PluginManager::instance()->findMissingDependencies();
*
* @return array
*/
public function findMissingDependencies()
{
$missing = [];
foreach ($this->plugins as $id => $plugin) {
if (!$required = $this->getDependencies($plugin)) {
continue;
}
foreach ($required as $require) {
if ($this->hasPlugin($require)) {
continue;
}
if (!in_array($require, $missing)) {
$missing[] = $require;
}
}
}
return $missing;
}
/**
* Cross checks all plugins and their dependancies, if not met plugins
* are disabled and vice versa.
* @return void
*/
protected function loadDependencies()
{
foreach ($this->plugins as $id => $plugin) {
if (!$required = $this->getDependencies($plugin)) {
continue;
}
$disable = false;
foreach ($required as $require) {
if (!$pluginObj = $this->findByIdentifier($require)) {
$disable = true;
}
elseif ($pluginObj->disabled) {
$disable = true;
}
}
if ($disable) {
$this->disablePlugin($id);
}
else {
$this->enablePlugin($id);
}
}
}
/**
* Sorts a collection of plugins, in the order that they should be actioned,
* according to their given dependencies. Least dependent come first.
* @return array Collection of sorted plugin identifiers
*/
protected function sortDependencies()
{
ksort($this->plugins);
/*
* Canvas the dependency tree
*/
$checklist = $this->plugins;
$result = [];
$loopCount = 0;
while (count($checklist)) {
if (++$loopCount > 2048) {
throw new SystemException('Too much recursion! Check for circular dependencies in your plugins.');
}
foreach ($checklist as $code => $plugin) {
/*
* Get dependencies and remove any aliens
*/
$depends = $this->getDependencies($plugin) ?: [];
$depends = array_filter($depends, function ($pluginCode) {
return isset($this->plugins[$pluginCode]);
});
/*
* No dependencies
*/
if (!$depends) {
array_push($result, $code);
unset($checklist[$code]);
continue;
}
/*
* Find dependencies that have not been checked
*/
$depends = array_diff($depends, $result);
if (count($depends) > 0) {
continue;
}
/*
* All dependencies are checked
*/
array_push($result, $code);
unset($checklist[$code]);
}
}
/*
* Reassemble plugin map
*/
$sortedPlugins = [];
foreach ($result as $code) {
$sortedPlugins[$code] = $this->plugins[$code];
}
return $this->plugins = $sortedPlugins;
}
/**
* Returns the plugin identifiers that are required by the supplied plugin.
* @param string $plugin Plugin identifier, object or class
* @return array
*/
public function getDependencies($plugin)
{
if (is_string($plugin) && (!$plugin = $this->findByIdentifier($plugin))) {
return false;
}
if (!isset($plugin->require) || !$plugin->require) {
return null;
}
return is_array($plugin->require) ? $plugin->require : [$plugin->require];
}
/**
* @deprecated Plugins are now sorted by default. See getPlugins()
* Remove if year >= 2022
*/
public function sortByDependencies($plugins = null)
{
traceLog('PluginManager::sortByDependencies is deprecated. Plugins are now sorted by default. Use PluginManager::getPlugins()');
return array_keys($plugins ?: $this->getPlugins());
}
//
// Management
//
/**
* Completely roll back and delete a plugin from the system.
* @param string $id Plugin code/namespace
* @return void
*/
public function deletePlugin($id)
{
/*
* Rollback plugin
*/
UpdateManager::instance()->rollbackPlugin($id);
/*
* Delete from file system
*/
if ($pluginPath = self::instance()->getPluginPath($id)) {
File::deleteDirectory($pluginPath);
}
}
/**
* Tears down a plugin's database tables and rebuilds them.
* @param string $id Plugin code/namespace
* @return void
*/
public function refreshPlugin($id)
{
$manager = UpdateManager::instance();
$manager->rollbackPlugin($id);
$manager->updatePlugin($id);
}
}
| 1 | 17,725 | Why is the getIdentifier() call required here? | octobercms-october | php |
@@ -92,7 +92,11 @@ std::string MakeCamel(const std::string &in, bool first) {
}
void Parser::Message(const std::string &msg) {
+#ifndef FLATBUFFERS_PLATFORM_NO_FILE_SUPPORT
error_ = file_being_parsed_.length() ? AbsolutePath(file_being_parsed_) : "";
+#else // FLATBUFFERS_PLATFORM_NO_FILE_SUPPORT
+ error_ = file_being_parsed_.length() ? file_being_parsed_ : "";
+#endif // FLATBUFFERS_PLATFORM_NO_FILE_SUPPORT
// clang-format off
#ifdef _WIN32
error_ += "(" + NumToString(line_) + ")"; // MSVC alike | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <list>
#include <string>
#include <math.h>
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
namespace flatbuffers {
const double kPi = 3.14159265358979323846;
const char *const kTypeNames[] = {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, RTYPE) \
IDLTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
// clang-format on
nullptr
};
const char kTypeSizes[] = {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, RTYPE) \
sizeof(CTYPE),
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
// clang-format on
};
// The enums in the reflection schema should match the ones we use internally.
// Compare the last element to check if these go out of sync.
static_assert(BASE_TYPE_UNION == static_cast<BaseType>(reflection::Union),
"enums don't match");
// Any parsing calls have to be wrapped in this macro, which automates
// handling of recursive error checking a bit. It will check the received
// CheckedError object, and return straight away on error.
#define ECHECK(call) \
{ \
auto ce = (call); \
if (ce.Check()) return ce; \
}
// These two functions are called hundreds of times below, so define a short
// form:
#define NEXT() ECHECK(Next())
#define EXPECT(tok) ECHECK(Expect(tok))
static bool ValidateUTF8(const std::string &str) {
const char *s = &str[0];
const char *const sEnd = s + str.length();
while (s < sEnd) {
if (FromUTF8(&s) < 0) { return false; }
}
return true;
}
// Convert an underscore_based_indentifier in to camelCase.
// Also uppercases the first character if first is true.
std::string MakeCamel(const std::string &in, bool first) {
std::string s;
for (size_t i = 0; i < in.length(); i++) {
if (!i && first)
s += static_cast<char>(toupper(in[0]));
else if (in[i] == '_' && i + 1 < in.length())
s += static_cast<char>(toupper(in[++i]));
else
s += in[i];
}
return s;
}
void Parser::Message(const std::string &msg) {
error_ = file_being_parsed_.length() ? AbsolutePath(file_being_parsed_) : "";
// clang-format off
#ifdef _WIN32
error_ += "(" + NumToString(line_) + ")"; // MSVC alike
#else
if (file_being_parsed_.length()) error_ += ":";
error_ += NumToString(line_) + ":0"; // gcc alike
#endif
// clang-format on
error_ += ": " + msg;
}
void Parser::Warning(const std::string &msg) { Message("warning: " + msg); }
CheckedError Parser::Error(const std::string &msg) {
Message("error: " + msg);
return CheckedError(true);
}
inline CheckedError NoError() { return CheckedError(false); }
CheckedError Parser::RecurseError() {
return Error("maximum parsing recursion of " + NumToString(kMaxParsingDepth) +
" reached");
}
inline std::string OutOfRangeErrorMsg(int64_t val, const std::string &op,
int64_t limit) {
const std::string cause = NumToString(val) + op + NumToString(limit);
return "constant does not fit (" + cause + ")";
}
// Ensure that integer values we parse fit inside the declared integer type.
CheckedError Parser::CheckInRange(int64_t val, int64_t min, int64_t max) {
if (val < min)
return Error(OutOfRangeErrorMsg(val, " < ", min));
else if (val > max)
return Error(OutOfRangeErrorMsg(val, " > ", max));
else
return NoError();
}
// atot: templated version of atoi/atof: convert a string to an instance of T.
template<typename T>
inline CheckedError atot(const char *s, Parser &parser, T *val) {
int64_t i = StringToInt(s);
const int64_t min = flatbuffers::numeric_limits<T>::min();
const int64_t max = flatbuffers::numeric_limits<T>::max();
*val = (T)i; // Assign this first to make ASAN happy.
return parser.CheckInRange(i, min, max);
}
template<>
inline CheckedError atot<uint64_t>(const char *s, Parser &parser,
uint64_t *val) {
(void)parser;
*val = StringToUInt(s);
return NoError();
}
template<>
inline CheckedError atot<bool>(const char *s, Parser &parser, bool *val) {
(void)parser;
*val = 0 != atoi(s);
return NoError();
}
template<>
inline CheckedError atot<float>(const char *s, Parser &parser, float *val) {
(void)parser;
*val = static_cast<float>(strtod(s, nullptr));
return NoError();
}
template<>
inline CheckedError atot<double>(const char *s, Parser &parser, double *val) {
(void)parser;
*val = strtod(s, nullptr);
return NoError();
}
template<>
inline CheckedError atot<Offset<void>>(const char *s, Parser &parser,
Offset<void> *val) {
(void)parser;
*val = Offset<void>(atoi(s));
return NoError();
}
std::string Namespace::GetFullyQualifiedName(const std::string &name,
size_t max_components) const {
// Early exit if we don't have a defined namespace.
if (components.empty() || !max_components) { return name; }
std::string stream_str;
for (size_t i = 0; i < std::min(components.size(), max_components); i++) {
if (i) { stream_str += '.'; }
stream_str += std::string(components[i]);
}
if (name.length()) {
stream_str += '.';
stream_str += name;
}
return stream_str;
}
// Declare tokens we'll use. Single character tokens are represented by their
// ascii character code (e.g. '{'), others above 256.
// clang-format off
#define FLATBUFFERS_GEN_TOKENS(TD) \
TD(Eof, 256, "end of file") \
TD(StringConstant, 257, "string constant") \
TD(IntegerConstant, 258, "integer constant") \
TD(FloatConstant, 259, "float constant") \
TD(Identifier, 260, "identifier")
#ifdef __GNUC__
__extension__ // Stop GCC complaining about trailing comma with -Wpendantic.
#endif
enum {
#define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) kToken ## NAME = VALUE,
FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN)
#undef FLATBUFFERS_TOKEN
};
static std::string TokenToString(int t) {
static const char * const tokens[] = {
#define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) STRING,
FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN)
#undef FLATBUFFERS_TOKEN
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, RTYPE) \
IDLTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (t < 256) { // A single ascii char token.
std::string s;
s.append(1, static_cast<char>(t));
return s;
} else { // Other tokens.
return tokens[t - 256];
}
}
// clang-format on
std::string Parser::TokenToStringId(int t) const {
return t == kTokenIdentifier ? attribute_ : TokenToString(t);
}
// Parses exactly nibbles worth of hex digits into a number, or error.
CheckedError Parser::ParseHexNum(int nibbles, uint64_t *val) {
for (int i = 0; i < nibbles; i++)
if (!isxdigit(static_cast<unsigned char>(cursor_[i])))
return Error("escape code must be followed by " + NumToString(nibbles) +
" hex digits");
std::string target(cursor_, cursor_ + nibbles);
*val = StringToUInt(target.c_str(), nullptr, 16);
cursor_ += nibbles;
return NoError();
}
CheckedError Parser::SkipByteOrderMark() {
if (static_cast<unsigned char>(*cursor_) != 0xef) return NoError();
cursor_++;
if (static_cast<unsigned char>(*cursor_) != 0xbb)
return Error("invalid utf-8 byte order mark");
cursor_++;
if (static_cast<unsigned char>(*cursor_) != 0xbf)
return Error("invalid utf-8 byte order mark");
cursor_++;
return NoError();
}
bool IsIdentifierStart(char c) {
return isalpha(static_cast<unsigned char>(c)) || c == '_';
}
CheckedError Parser::Next() {
doc_comment_.clear();
bool seen_newline = cursor_ == source_;
attribute_.clear();
for (;;) {
char c = *cursor_++;
token_ = c;
switch (c) {
case '\0':
cursor_--;
token_ = kTokenEof;
return NoError();
case ' ':
case '\r':
case '\t': break;
case '\n':
line_++;
seen_newline = true;
break;
case '{':
case '}':
case '(':
case ')':
case '[':
case ']':
case ',':
case ':':
case ';':
case '=': return NoError();
case '.':
if (!isdigit(static_cast<unsigned char>(*cursor_)))
return NoError();
return Error("floating point constant can\'t start with \".\"");
case '\"':
case '\'': {
int unicode_high_surrogate = -1;
while (*cursor_ != c) {
if (*cursor_ < ' ' && static_cast<signed char>(*cursor_) >= 0)
return Error("illegal character in string constant");
if (*cursor_ == '\\') {
cursor_++;
if (unicode_high_surrogate != -1 && *cursor_ != 'u') {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
switch (*cursor_) {
case 'n':
attribute_ += '\n';
cursor_++;
break;
case 't':
attribute_ += '\t';
cursor_++;
break;
case 'r':
attribute_ += '\r';
cursor_++;
break;
case 'b':
attribute_ += '\b';
cursor_++;
break;
case 'f':
attribute_ += '\f';
cursor_++;
break;
case '\"':
attribute_ += '\"';
cursor_++;
break;
case '\'':
attribute_ += '\'';
cursor_++;
break;
case '\\':
attribute_ += '\\';
cursor_++;
break;
case '/':
attribute_ += '/';
cursor_++;
break;
case 'x': { // Not in the JSON standard
cursor_++;
uint64_t val;
ECHECK(ParseHexNum(2, &val));
attribute_ += static_cast<char>(val);
break;
}
case 'u': {
cursor_++;
uint64_t val;
ECHECK(ParseHexNum(4, &val));
if (val >= 0xD800 && val <= 0xDBFF) {
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (multiple high surrogates)");
} else {
unicode_high_surrogate = static_cast<int>(val);
}
} else if (val >= 0xDC00 && val <= 0xDFFF) {
if (unicode_high_surrogate == -1) {
return Error(
"illegal Unicode sequence (unpaired low surrogate)");
} else {
int code_point = 0x10000 +
((unicode_high_surrogate & 0x03FF) << 10) +
(val & 0x03FF);
ToUTF8(code_point, &attribute_);
unicode_high_surrogate = -1;
}
} else {
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
ToUTF8(static_cast<int>(val), &attribute_);
}
break;
}
default: return Error("unknown escape code in string constant");
}
} else { // printable chars + UTF-8 bytes
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
attribute_ += *cursor_++;
}
}
if (unicode_high_surrogate != -1) {
return Error("illegal Unicode sequence (unpaired high surrogate)");
}
cursor_++;
if (!opts.allow_non_utf8 && !ValidateUTF8(attribute_)) {
return Error("illegal UTF-8 sequence");
}
token_ = kTokenStringConstant;
return NoError();
}
case '/':
if (*cursor_ == '/') {
const char *start = ++cursor_;
while (*cursor_ && *cursor_ != '\n' && *cursor_ != '\r') cursor_++;
if (*start == '/') { // documentation comment
if (!seen_newline)
return Error(
"a documentation comment should be on a line on its own");
doc_comment_.push_back(std::string(start + 1, cursor_));
}
break;
} else if (*cursor_ == '*') {
cursor_++;
// TODO: make nested.
while (*cursor_ != '*' || cursor_[1] != '/') {
if (*cursor_ == '\n') line_++;
if (!*cursor_) return Error("end of file in comment");
cursor_++;
}
cursor_ += 2;
break;
}
// fall thru
default:
if (IsIdentifierStart(c)) {
// Collect all chars of an identifier:
const char *start = cursor_ - 1;
while (isalnum(static_cast<unsigned char>(*cursor_)) || *cursor_ == '_')
cursor_++;
attribute_.append(start, cursor_);
token_ = kTokenIdentifier;
return NoError();
} else if (isdigit(static_cast<unsigned char>(c)) || c == '-') {
const char *start = cursor_ - 1;
if (c == '-' && *cursor_ == '0' &&
(cursor_[1] == 'x' || cursor_[1] == 'X')) {
++start;
++cursor_;
attribute_.append(&c, &c + 1);
c = '0';
}
if (c == '0' && (*cursor_ == 'x' || *cursor_ == 'X')) {
cursor_++;
while (isxdigit(static_cast<unsigned char>(*cursor_))) cursor_++;
attribute_.append(start + 2, cursor_);
attribute_ = NumToString(static_cast<int64_t>(
StringToUInt(attribute_.c_str(), nullptr, 16)));
token_ = kTokenIntegerConstant;
return NoError();
}
while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++;
if (*cursor_ == '.' || *cursor_ == 'e' || *cursor_ == 'E') {
if (*cursor_ == '.') {
cursor_++;
while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++;
}
// See if this float has a scientific notation suffix. Both JSON
// and C++ (through strtod() we use) have the same format:
if (*cursor_ == 'e' || *cursor_ == 'E') {
cursor_++;
if (*cursor_ == '+' || *cursor_ == '-') cursor_++;
while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++;
}
token_ = kTokenFloatConstant;
} else {
token_ = kTokenIntegerConstant;
}
attribute_.append(start, cursor_);
return NoError();
}
std::string ch;
ch = c;
if (c < ' ' || c > '~') ch = "code: " + NumToString(c);
return Error("illegal character: " + ch);
}
}
}
// Check if a given token is next.
bool Parser::Is(int t) const { return t == token_; }
bool Parser::IsIdent(const char *id) const {
return token_ == kTokenIdentifier && attribute_ == id;
}
// Expect a given token to be next, consume it, or error if not present.
CheckedError Parser::Expect(int t) {
if (t != token_) {
return Error("expecting: " + TokenToString(t) +
" instead got: " + TokenToStringId(token_));
}
NEXT();
return NoError();
}
CheckedError Parser::ParseNamespacing(std::string *id, std::string *last) {
while (Is('.')) {
NEXT();
*id += ".";
*id += attribute_;
if (last) *last = attribute_;
EXPECT(kTokenIdentifier);
}
return NoError();
}
EnumDef *Parser::LookupEnum(const std::string &id) {
// Search thru parent namespaces.
for (int components = static_cast<int>(current_namespace_->components.size());
components >= 0; components--) {
auto ed = enums_.Lookup(
current_namespace_->GetFullyQualifiedName(id, components));
if (ed) return ed;
}
return nullptr;
}
StructDef *Parser::LookupStruct(const std::string &id) const {
auto sd = structs_.Lookup(id);
if (sd) sd->refcount++;
return sd;
}
CheckedError Parser::ParseTypeIdent(Type &type) {
std::string id = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&id, nullptr));
auto enum_def = LookupEnum(id);
if (enum_def) {
type = enum_def->underlying_type;
if (enum_def->is_union) type.base_type = BASE_TYPE_UNION;
} else {
type.base_type = BASE_TYPE_STRUCT;
type.struct_def = LookupCreateStruct(id);
}
return NoError();
}
// Parse any IDL type.
CheckedError Parser::ParseType(Type &type) {
if (token_ == kTokenIdentifier) {
if (IsIdent("bool")) {
type.base_type = BASE_TYPE_BOOL;
NEXT();
} else if (IsIdent("byte") || IsIdent("int8")) {
type.base_type = BASE_TYPE_CHAR;
NEXT();
} else if (IsIdent("ubyte") || IsIdent("uint8")) {
type.base_type = BASE_TYPE_UCHAR;
NEXT();
} else if (IsIdent("short") || IsIdent("int16")) {
type.base_type = BASE_TYPE_SHORT;
NEXT();
} else if (IsIdent("ushort") || IsIdent("uint16")) {
type.base_type = BASE_TYPE_USHORT;
NEXT();
} else if (IsIdent("int") || IsIdent("int32")) {
type.base_type = BASE_TYPE_INT;
NEXT();
} else if (IsIdent("uint") || IsIdent("uint32")) {
type.base_type = BASE_TYPE_UINT;
NEXT();
} else if (IsIdent("long") || IsIdent("int64")) {
type.base_type = BASE_TYPE_LONG;
NEXT();
} else if (IsIdent("ulong") || IsIdent("uint64")) {
type.base_type = BASE_TYPE_ULONG;
NEXT();
} else if (IsIdent("float") || IsIdent("float32")) {
type.base_type = BASE_TYPE_FLOAT;
NEXT();
} else if (IsIdent("double") || IsIdent("float64")) {
type.base_type = BASE_TYPE_DOUBLE;
NEXT();
} else if (IsIdent("string")) {
type.base_type = BASE_TYPE_STRING;
NEXT();
} else {
ECHECK(ParseTypeIdent(type));
}
} else if (token_ == '[') {
NEXT();
Type subtype;
ECHECK(Recurse([&]() { return ParseType(subtype); }));
if (subtype.base_type == BASE_TYPE_VECTOR) {
// We could support this, but it will complicate things, and it's
// easier to work around with a struct around the inner vector.
return Error("nested vector types not supported (wrap in table first).");
}
type = Type(BASE_TYPE_VECTOR, subtype.struct_def, subtype.enum_def);
type.element = subtype.base_type;
EXPECT(']');
} else {
return Error("illegal type syntax");
}
return NoError();
}
CheckedError Parser::AddField(StructDef &struct_def, const std::string &name,
const Type &type, FieldDef **dest) {
auto &field = *new FieldDef();
field.value.offset =
FieldIndexToOffset(static_cast<voffset_t>(struct_def.fields.vec.size()));
field.name = name;
field.file = struct_def.file;
field.value.type = type;
if (struct_def.fixed) { // statically compute the field offset
auto size = InlineSize(type);
auto alignment = InlineAlignment(type);
// structs_ need to have a predictable format, so we need to align to
// the largest scalar
struct_def.minalign = std::max(struct_def.minalign, alignment);
struct_def.PadLastField(alignment);
field.value.offset = static_cast<voffset_t>(struct_def.bytesize);
struct_def.bytesize += size;
}
if (struct_def.fields.Add(name, &field))
return Error("field already exists: " + name);
*dest = &field;
return NoError();
}
CheckedError Parser::ParseField(StructDef &struct_def) {
std::string name = attribute_;
if (LookupStruct(name))
return Error("field name can not be the same as table/struct name");
std::vector<std::string> dc = doc_comment_;
EXPECT(kTokenIdentifier);
EXPECT(':');
Type type;
ECHECK(ParseType(type));
if (struct_def.fixed && !IsScalar(type.base_type) && !IsStruct(type))
return Error("structs_ may contain only scalar or struct fields");
FieldDef *typefield = nullptr;
if (type.base_type == BASE_TYPE_UNION) {
// For union fields, add a second auto-generated field to hold the type,
// with a special suffix.
ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(),
type.enum_def->underlying_type, &typefield));
} else if (type.base_type == BASE_TYPE_VECTOR &&
type.element == BASE_TYPE_UNION) {
// Only cpp, js and ts supports the union vector feature so far.
if (!SupportsVectorOfUnions()) {
return Error(
"Vectors of unions are not yet supported in all "
"the specified programming languages.");
}
// For vector of union fields, add a second auto-generated vector field to
// hold the types, with a special suffix.
Type union_vector(BASE_TYPE_VECTOR, nullptr, type.enum_def);
union_vector.element = BASE_TYPE_UTYPE;
ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(), union_vector,
&typefield));
}
FieldDef *field;
ECHECK(AddField(struct_def, name, type, &field));
if (token_ == '=') {
NEXT();
if (!IsScalar(type.base_type) ||
(struct_def.fixed && field->value.constant != "0"))
return Error(
"default values currently only supported for scalars in tables");
ECHECK(ParseSingleValue(&field->name, field->value));
}
if (type.enum_def &&
!type.enum_def->is_union &&
!type.enum_def->attributes.Lookup("bit_flags") &&
!type.enum_def->ReverseLookup(StringToInt(
field->value.constant.c_str()))) {
return Error("default value of " + field->value.constant + " for field " +
name + " is not part of enum " + type.enum_def->name);
}
if (IsFloat(type.base_type)) {
if (!strpbrk(field->value.constant.c_str(), ".eE"))
field->value.constant += ".0";
}
if (type.enum_def && IsScalar(type.base_type) && !struct_def.fixed &&
!type.enum_def->attributes.Lookup("bit_flags") &&
!type.enum_def->ReverseLookup(StringToInt(
field->value.constant.c_str())))
Warning("enum " + type.enum_def->name +
" does not have a declaration for this field\'s default of " +
field->value.constant);
field->doc_comment = dc;
ECHECK(ParseMetaData(&field->attributes));
field->deprecated = field->attributes.Lookup("deprecated") != nullptr;
auto hash_name = field->attributes.Lookup("hash");
if (hash_name) {
switch ((type.base_type == BASE_TYPE_VECTOR) ? type.element : type.base_type) {
case BASE_TYPE_SHORT:
case BASE_TYPE_USHORT: {
if (FindHashFunction16(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 16 bit types: " +
hash_name->constant);
break;
}
case BASE_TYPE_INT:
case BASE_TYPE_UINT: {
if (FindHashFunction32(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 32 bit types: " +
hash_name->constant);
break;
}
case BASE_TYPE_LONG:
case BASE_TYPE_ULONG: {
if (FindHashFunction64(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 64 bit types: " +
hash_name->constant);
break;
}
default:
return Error(
"only short, ushort, int, uint, long and ulong data types support hashing.");
}
}
auto cpp_type = field->attributes.Lookup("cpp_type");
if (cpp_type) {
if (!hash_name)
return Error("cpp_type can only be used with a hashed field");
/// forcing cpp_ptr_type to 'naked' if unset
auto cpp_ptr_type = field->attributes.Lookup("cpp_ptr_type");
if (!cpp_ptr_type) {
auto val = new Value();
val->type = cpp_type->type;
val->constant = "naked";
field->attributes.Add("cpp_ptr_type", val);
}
}
if (field->deprecated && struct_def.fixed)
return Error("can't deprecate fields in a struct");
field->required = field->attributes.Lookup("required") != nullptr;
if (field->required &&
(struct_def.fixed || IsScalar(type.base_type)))
return Error("only non-scalar fields in tables may be 'required'");
field->key = field->attributes.Lookup("key") != nullptr;
if (field->key) {
if (struct_def.has_key) return Error("only one field may be set as 'key'");
struct_def.has_key = true;
if (!IsScalar(type.base_type)) {
field->required = true;
if (type.base_type != BASE_TYPE_STRING)
return Error("'key' field must be string or scalar type");
}
}
auto field_native_custom_alloc =
field->attributes.Lookup("native_custom_alloc");
if (field_native_custom_alloc)
return Error(
"native_custom_alloc can only be used with a table or struct "
"definition");
field->native_inline = field->attributes.Lookup("native_inline") != nullptr;
if (field->native_inline && !IsStruct(field->value.type))
return Error("native_inline can only be defined on structs'");
auto nested = field->attributes.Lookup("nested_flatbuffer");
if (nested) {
if (nested->type.base_type != BASE_TYPE_STRING)
return Error(
"nested_flatbuffer attribute must be a string (the root type)");
if (type.base_type != BASE_TYPE_VECTOR || type.element != BASE_TYPE_UCHAR)
return Error(
"nested_flatbuffer attribute may only apply to a vector of ubyte");
// This will cause an error if the root type of the nested flatbuffer
// wasn't defined elsewhere.
LookupCreateStruct(nested->constant);
// Keep a pointer to StructDef in FieldDef to simplify re-use later
auto nested_qualified_name =
current_namespace_->GetFullyQualifiedName(nested->constant);
field->nested_flatbuffer = LookupStruct(nested_qualified_name);
}
if (field->attributes.Lookup("flexbuffer")) {
field->flexbuffer = true;
uses_flexbuffers_ = true;
if (type.base_type != BASE_TYPE_VECTOR ||
type.element != BASE_TYPE_UCHAR)
return Error("flexbuffer attribute may only apply to a vector of ubyte");
}
if (typefield) {
if (!IsScalar(typefield->value.type.base_type)) {
// this is a union vector field
typefield->required = field->required;
}
// If this field is a union, and it has a manually assigned id,
// the automatically added type field should have an id as well (of N - 1).
auto attr = field->attributes.Lookup("id");
if (attr) {
auto id = atoi(attr->constant.c_str());
auto val = new Value();
val->type = attr->type;
val->constant = NumToString(id - 1);
typefield->attributes.Add("id", val);
}
}
EXPECT(';');
return NoError();
}
CheckedError Parser::ParseString(Value &val) {
auto s = attribute_;
EXPECT(kTokenStringConstant);
val.constant = NumToString(builder_.CreateString(s).o);
return NoError();
}
CheckedError Parser::ParseComma() {
if (!opts.protobuf_ascii_alike) EXPECT(',');
return NoError();
}
CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field,
size_t parent_fieldn,
const StructDef *parent_struct_def) {
switch (val.type.base_type) {
case BASE_TYPE_UNION: {
FLATBUFFERS_ASSERT(field);
std::string constant;
// Find corresponding type field we may have already parsed.
for (auto elem = field_stack_.rbegin();
elem != field_stack_.rbegin() + parent_fieldn; ++elem) {
auto &type = elem->second->value.type;
if (type.base_type == BASE_TYPE_UTYPE &&
type.enum_def == val.type.enum_def) {
constant = elem->first.constant;
break;
}
}
if (constant.empty()) {
// We haven't seen the type field yet. Sadly a lot of JSON writers
// output these in alphabetical order, meaning it comes after this
// value. So we scan past the value to find it, then come back here.
auto type_name = field->name + UnionTypeFieldSuffix();
FLATBUFFERS_ASSERT(parent_struct_def);
auto type_field = parent_struct_def->fields.Lookup(type_name);
FLATBUFFERS_ASSERT(type_field); // Guaranteed by ParseField().
// Remember where we are in the source file, so we can come back here.
auto backup = *static_cast<ParserState *>(this);
ECHECK(SkipAnyJsonValue()); // The table.
ECHECK(ParseComma());
auto next_name = attribute_;
if (Is(kTokenStringConstant)) {
NEXT();
} else {
EXPECT(kTokenIdentifier);
}
if (next_name != type_name)
return Error("missing type field after this union value: " +
type_name);
EXPECT(':');
Value type_val = type_field->value;
ECHECK(ParseAnyValue(type_val, type_field, 0, nullptr));
constant = type_val.constant;
// Got the information we needed, now rewind:
*static_cast<ParserState *>(this) = backup;
}
uint8_t enum_idx;
ECHECK(atot(constant.c_str(), *this, &enum_idx));
auto enum_val = val.type.enum_def->ReverseLookup(enum_idx);
if (!enum_val) return Error("illegal type id for: " + field->name);
if (enum_val->union_type.base_type == BASE_TYPE_STRUCT) {
ECHECK(ParseTable(*enum_val->union_type.struct_def, &val.constant,
nullptr));
if (enum_val->union_type.struct_def->fixed) {
// All BASE_TYPE_UNION values are offsets, so turn this into one.
SerializeStruct(*enum_val->union_type.struct_def, val);
builder_.ClearOffsets();
val.constant = NumToString(builder_.GetSize());
}
} else if (enum_val->union_type.base_type == BASE_TYPE_STRING) {
ECHECK(ParseString(val));
} else {
FLATBUFFERS_ASSERT(false);
}
break;
}
case BASE_TYPE_STRUCT:
ECHECK(ParseTable(*val.type.struct_def, &val.constant, nullptr));
break;
case BASE_TYPE_STRING: {
ECHECK(ParseString(val));
break;
}
case BASE_TYPE_VECTOR: {
uoffset_t off;
ECHECK(ParseVector(val.type.VectorType(), &off));
val.constant = NumToString(off);
break;
}
case BASE_TYPE_INT:
case BASE_TYPE_UINT:
case BASE_TYPE_LONG:
case BASE_TYPE_ULONG: {
if (field && field->attributes.Lookup("hash") &&
(token_ == kTokenIdentifier || token_ == kTokenStringConstant)) {
ECHECK(ParseHash(val, field));
} else {
ECHECK(ParseSingleValue(field ? &field->name : nullptr, val));
}
break;
}
default: ECHECK(ParseSingleValue(field ? &field->name : nullptr, val)); break;
}
return NoError();
}
void Parser::SerializeStruct(const StructDef &struct_def, const Value &val) {
FLATBUFFERS_ASSERT(val.constant.length() == struct_def.bytesize);
builder_.Align(struct_def.minalign);
builder_.PushBytes(reinterpret_cast<const uint8_t *>(val.constant.c_str()),
struct_def.bytesize);
builder_.AddStructOffset(val.offset, builder_.GetSize());
}
CheckedError Parser::ParseTableDelimiters(size_t &fieldn,
const StructDef *struct_def,
ParseTableDelimitersBody body,
void *state) {
// We allow tables both as JSON object{ .. } with field names
// or vector[..] with all fields in order
char terminator = '}';
bool is_nested_vector = struct_def && Is('[');
if (is_nested_vector) {
NEXT();
terminator = ']';
} else {
EXPECT('{');
}
for (;;) {
if ((!opts.strict_json || !fieldn) && Is(terminator)) break;
std::string name;
if (is_nested_vector) {
if (fieldn >= struct_def->fields.vec.size()) {
return Error("too many unnamed fields in nested array");
}
name = struct_def->fields.vec[fieldn]->name;
} else {
name = attribute_;
if (Is(kTokenStringConstant)) {
NEXT();
} else {
EXPECT(opts.strict_json ? kTokenStringConstant : kTokenIdentifier);
}
if (!opts.protobuf_ascii_alike || !(Is('{') || Is('['))) EXPECT(':');
}
ECHECK(body(name, fieldn, struct_def, state));
if (Is(terminator)) break;
ECHECK(ParseComma());
}
NEXT();
if (is_nested_vector && fieldn != struct_def->fields.vec.size()) {
return Error("wrong number of unnamed fields in table vector");
}
return NoError();
}
CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value,
uoffset_t *ovalue) {
size_t fieldn_outer = 0;
auto err = ParseTableDelimiters(
fieldn_outer, &struct_def,
[](const std::string &name, size_t &fieldn,
const StructDef *struct_def_inner, void *state) -> CheckedError {
auto *parser = static_cast<Parser *>(state);
if (name == "$schema") {
ECHECK(parser->Expect(kTokenStringConstant));
return NoError();
}
auto field = struct_def_inner->fields.Lookup(name);
if (!field) {
if (!parser->opts.skip_unexpected_fields_in_json) {
return parser->Error("unknown field: " + name);
} else {
ECHECK(parser->SkipAnyJsonValue());
}
} else {
if (parser->IsIdent("null")) {
ECHECK(parser->Next()); // Ignore this field.
} else {
Value val = field->value;
if (field->flexbuffer) {
flexbuffers::Builder builder(1024,
flexbuffers::BUILDER_FLAG_SHARE_ALL);
ECHECK(parser->ParseFlexBufferValue(&builder));
builder.Finish();
// Force alignment for nested flexbuffer
parser->builder_.ForceVectorAlignment(builder.GetSize(), sizeof(uint8_t),
sizeof(largest_scalar_t));
auto off = parser->builder_.CreateVector(builder.GetBuffer());
val.constant = NumToString(off.o);
} else if (field->nested_flatbuffer) {
ECHECK(parser->ParseNestedFlatbuffer(val, field, fieldn,
struct_def_inner));
} else {
ECHECK(parser->Recurse([&]() {
return parser->ParseAnyValue(val, field, fieldn,
struct_def_inner);
}));
}
// Hardcoded insertion-sort with error-check.
// If fields are specified in order, then this loop exits
// immediately.
auto elem = parser->field_stack_.rbegin();
for (; elem != parser->field_stack_.rbegin() + fieldn; ++elem) {
auto existing_field = elem->second;
if (existing_field == field)
return parser->Error("field set more than once: " +
field->name);
if (existing_field->value.offset < field->value.offset) break;
}
// Note: elem points to before the insertion point, thus .base()
// points to the correct spot.
parser->field_stack_.insert(elem.base(),
std::make_pair(val, field));
fieldn++;
}
}
return NoError();
},
this);
ECHECK(err);
// Check if all required fields are parsed.
for (auto field_it = struct_def.fields.vec.begin();
field_it != struct_def.fields.vec.end(); ++field_it) {
auto required_field = *field_it;
if (!required_field->required) { continue; }
bool found = false;
for (auto pf_it = field_stack_.end() - fieldn_outer;
pf_it != field_stack_.end(); ++pf_it) {
auto parsed_field = pf_it->second;
if (parsed_field == required_field) {
found = true;
break;
}
}
if (!found) {
return Error("required field is missing: " + required_field->name +
" in " + struct_def.name);
}
}
if (struct_def.fixed && fieldn_outer != struct_def.fields.vec.size())
return Error("struct: wrong number of initializers: " + struct_def.name);
auto start = struct_def.fixed ? builder_.StartStruct(struct_def.minalign)
: builder_.StartTable();
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size;
size /= 2) {
// Go through elements in reverse, since we're building the data backwards.
for (auto it = field_stack_.rbegin();
it != field_stack_.rbegin() + fieldn_outer; ++it) {
auto &field_value = it->first;
auto field = it->second;
if (!struct_def.sortbysize ||
size == SizeOf(field_value.type.base_type)) {
switch (field_value.type.base_type) {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, RTYPE) \
case BASE_TYPE_ ## ENUM: \
builder_.Pad(field->padding); \
if (struct_def.fixed) { \
CTYPE val; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
builder_.PushElement(val); \
} else { \
CTYPE val, valdef; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
ECHECK(atot(field->value.constant.c_str(), *this, &valdef)); \
builder_.AddElement(field_value.offset, val, valdef); \
} \
break;
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD);
#undef FLATBUFFERS_TD
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, RTYPE) \
case BASE_TYPE_ ## ENUM: \
builder_.Pad(field->padding); \
if (IsStruct(field->value.type)) { \
SerializeStruct(*field->value.type.struct_def, field_value); \
} else { \
CTYPE val; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
builder_.AddOffset(field_value.offset, val); \
} \
break;
FLATBUFFERS_GEN_TYPES_POINTER(FLATBUFFERS_TD);
#undef FLATBUFFERS_TD
// clang-format on
}
}
}
}
for (size_t i = 0; i < fieldn_outer; i++) field_stack_.pop_back();
if (struct_def.fixed) {
builder_.ClearOffsets();
builder_.EndStruct();
FLATBUFFERS_ASSERT(value);
// Temporarily store this struct in the value string, since it is to
// be serialized in-place elsewhere.
value->assign(
reinterpret_cast<const char *>(builder_.GetCurrentBufferPointer()),
struct_def.bytesize);
builder_.PopBytes(struct_def.bytesize);
FLATBUFFERS_ASSERT(!ovalue);
} else {
auto val = builder_.EndTable(start);
if (ovalue) *ovalue = val;
if (value) *value = NumToString(val);
}
return NoError();
}
CheckedError Parser::ParseVectorDelimiters(size_t &count,
ParseVectorDelimitersBody body,
void *state) {
EXPECT('[');
for (;;) {
if ((!opts.strict_json || !count) && Is(']')) break;
ECHECK(body(count, state));
count++;
if (Is(']')) break;
ECHECK(ParseComma());
}
NEXT();
return NoError();
}
CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue) {
size_t count = 0;
std::pair<Parser *, const Type &> parser_and_type_state(this, type);
auto err = ParseVectorDelimiters(
count,
[](size_t &, void *state) -> CheckedError {
auto *parser_and_type =
static_cast<std::pair<Parser *, const Type &> *>(state);
auto *parser = parser_and_type->first;
Value val;
val.type = parser_and_type->second;
ECHECK(parser->Recurse([&]() {
return parser->ParseAnyValue(val, nullptr, 0, nullptr);
}));
parser->field_stack_.push_back(std::make_pair(val, nullptr));
return NoError();
},
&parser_and_type_state);
ECHECK(err);
builder_.StartVector(count * InlineSize(type) / InlineAlignment(type),
InlineAlignment(type));
for (size_t i = 0; i < count; i++) {
// start at the back, since we're building the data backwards.
auto &val = field_stack_.back().first;
switch (val.type.base_type) {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, RTYPE) \
case BASE_TYPE_ ## ENUM: \
if (IsStruct(val.type)) SerializeStruct(*val.type.struct_def, val); \
else { \
CTYPE elem; \
ECHECK(atot(val.constant.c_str(), *this, &elem)); \
builder_.PushElement(elem); \
} \
break;
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
// clang-format on
}
field_stack_.pop_back();
}
builder_.ClearOffsets();
*ovalue = builder_.EndVector(count);
return NoError();
}
CheckedError Parser::ParseNestedFlatbuffer(Value &val, FieldDef *field,
size_t fieldn,
const StructDef *parent_struct_def) {
if (token_ == '[') { // backwards compat for 'legacy' ubyte buffers
ECHECK(ParseAnyValue(val, field, fieldn, parent_struct_def));
} else {
auto cursor_at_value_begin = cursor_;
ECHECK(SkipAnyJsonValue());
std::string substring(cursor_at_value_begin - 1, cursor_ - 1);
// Create and initialize new parser
Parser nested_parser;
FLATBUFFERS_ASSERT(field->nested_flatbuffer);
nested_parser.root_struct_def_ = field->nested_flatbuffer;
nested_parser.enums_ = enums_;
nested_parser.opts = opts;
nested_parser.uses_flexbuffers_ = uses_flexbuffers_;
// Parse JSON substring into new flatbuffer builder using nested_parser
if (!nested_parser.Parse(substring.c_str(), nullptr, nullptr)) {
ECHECK(Error(nested_parser.error_));
}
// Force alignment for nested flatbuffer
builder_.ForceVectorAlignment(nested_parser.builder_.GetSize(), sizeof(uint8_t),
nested_parser.builder_.GetBufferMinAlignment());
auto off = builder_.CreateVector(nested_parser.builder_.GetBufferPointer(),
nested_parser.builder_.GetSize());
val.constant = NumToString(off.o);
// Clean nested_parser before destruction to avoid deleting the elements in
// the SymbolTables
nested_parser.enums_.dict.clear();
nested_parser.enums_.vec.clear();
}
return NoError();
}
CheckedError Parser::ParseMetaData(SymbolTable<Value> *attributes) {
if (Is('(')) {
NEXT();
for (;;) {
auto name = attribute_;
if (false == (Is(kTokenIdentifier) || Is(kTokenStringConstant)))
return Error("attribute name must be either identifier or string: " +
name);
if (known_attributes_.find(name) == known_attributes_.end())
return Error("user define attributes must be declared before use: " +
name);
NEXT();
auto e = new Value();
attributes->Add(name, e);
if (Is(':')) {
NEXT();
ECHECK(ParseSingleValue(&name, *e));
}
if (Is(')')) {
NEXT();
break;
}
EXPECT(',');
}
}
return NoError();
}
CheckedError Parser::TryTypedValue(const std::string *name, int dtoken, bool check, Value &e,
BaseType req, bool *destmatch) {
bool match = dtoken == token_;
if (match) {
*destmatch = true;
e.constant = attribute_;
if (!check) {
if (e.type.base_type == BASE_TYPE_NONE) {
e.type.base_type = req;
} else {
return Error(std::string("type mismatch: expecting: ") +
kTypeNames[e.type.base_type] +
", found: " + kTypeNames[req] +
", name: " + (name ? *name : "") +
", value: " + e.constant);
}
}
NEXT();
}
return NoError();
}
CheckedError Parser::ParseEnumFromString(Type &type, int64_t *result) {
*result = 0;
// Parse one or more enum identifiers, separated by spaces.
const char *next = attribute_.c_str();
do {
const char *divider = strchr(next, ' ');
std::string word;
if (divider) {
word = std::string(next, divider);
next = divider + strspn(divider, " ");
} else {
word = next;
next += word.length();
}
if (type.enum_def) { // The field has an enum type
auto enum_val = type.enum_def->vals.Lookup(word);
if (!enum_val)
return Error("unknown enum value: " + word +
", for enum: " + type.enum_def->name);
*result |= enum_val->value;
} else { // No enum type, probably integral field.
if (!IsInteger(type.base_type))
return Error("not a valid value for this field: " + word);
// TODO: could check if its a valid number constant here.
const char *dot = strrchr(word.c_str(), '.');
if (!dot)
return Error("enum values need to be qualified by an enum type");
std::string enum_def_str(word.c_str(), dot);
std::string enum_val_str(dot + 1, word.c_str() + word.length());
auto enum_def = LookupEnum(enum_def_str);
if (!enum_def) return Error("unknown enum: " + enum_def_str);
auto enum_val = enum_def->vals.Lookup(enum_val_str);
if (!enum_val) return Error("unknown enum value: " + enum_val_str);
*result |= enum_val->value;
}
} while (*next);
return NoError();
}
CheckedError Parser::ParseHash(Value &e, FieldDef *field) {
FLATBUFFERS_ASSERT(field);
Value *hash_name = field->attributes.Lookup("hash");
switch (e.type.base_type) {
case BASE_TYPE_SHORT: {
auto hash = FindHashFunction16(hash_name->constant.c_str());
int16_t hashed_value = static_cast<int16_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_USHORT: {
auto hash = FindHashFunction16(hash_name->constant.c_str());
uint16_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_INT: {
auto hash = FindHashFunction32(hash_name->constant.c_str());
int32_t hashed_value = static_cast<int32_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_UINT: {
auto hash = FindHashFunction32(hash_name->constant.c_str());
uint32_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_LONG: {
auto hash = FindHashFunction64(hash_name->constant.c_str());
int64_t hashed_value = static_cast<int64_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_ULONG: {
auto hash = FindHashFunction64(hash_name->constant.c_str());
uint64_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
default: FLATBUFFERS_ASSERT(0);
}
NEXT();
return NoError();
}
CheckedError Parser::TokenError() {
return Error("cannot parse value starting with: " + TokenToStringId(token_));
}
CheckedError Parser::ParseSingleValue(const std::string *name, Value &e) {
// First see if this could be a conversion function:
if (token_ == kTokenIdentifier && *cursor_ == '(') {
auto functionname = attribute_;
NEXT();
EXPECT('(');
ECHECK(ParseSingleValue(name, e));
EXPECT(')');
// clang-format off
#define FLATBUFFERS_FN_DOUBLE(name, op) \
if (functionname == name) { \
auto x = strtod(e.constant.c_str(), nullptr); \
e.constant = NumToString(op); \
}
FLATBUFFERS_FN_DOUBLE("deg", x / kPi * 180);
FLATBUFFERS_FN_DOUBLE("rad", x * kPi / 180);
FLATBUFFERS_FN_DOUBLE("sin", sin(x));
FLATBUFFERS_FN_DOUBLE("cos", cos(x));
FLATBUFFERS_FN_DOUBLE("tan", tan(x));
FLATBUFFERS_FN_DOUBLE("asin", asin(x));
FLATBUFFERS_FN_DOUBLE("acos", acos(x));
FLATBUFFERS_FN_DOUBLE("atan", atan(x));
// TODO(wvo): add more useful conversion functions here.
#undef FLATBUFFERS_FN_DOUBLE
// clang-format on
// Then check if this could be a string/identifier enum value:
} else if (e.type.base_type != BASE_TYPE_STRING &&
e.type.base_type != BASE_TYPE_BOOL &&
e.type.base_type != BASE_TYPE_NONE &&
(token_ == kTokenIdentifier || token_ == kTokenStringConstant)) {
if (IsIdentifierStart(attribute_[0])) { // Enum value.
int64_t val;
ECHECK(ParseEnumFromString(e.type, &val));
e.constant = NumToString(val);
NEXT();
} else { // Numeric constant in string.
if (IsInteger(e.type.base_type)) {
char *end;
e.constant = NumToString(StringToInt(attribute_.c_str(), &end));
if (*end) return Error("invalid integer: " + attribute_);
} else if (IsFloat(e.type.base_type)) {
char *end;
e.constant = NumToString(strtod(attribute_.c_str(), &end));
if (*end) return Error("invalid float: " + attribute_);
} else {
FLATBUFFERS_ASSERT(0); // Shouldn't happen, we covered all types.
e.constant = "0";
}
NEXT();
}
} else {
bool match = false;
ECHECK(TryTypedValue(name, kTokenIntegerConstant, IsScalar(e.type.base_type), e,
BASE_TYPE_INT, &match));
ECHECK(TryTypedValue(name, kTokenFloatConstant, IsFloat(e.type.base_type), e,
BASE_TYPE_FLOAT, &match));
ECHECK(TryTypedValue(name, kTokenStringConstant,
e.type.base_type == BASE_TYPE_STRING, e,
BASE_TYPE_STRING, &match));
auto istrue = IsIdent("true");
if (istrue || IsIdent("false")) {
attribute_ = NumToString(istrue);
ECHECK(TryTypedValue(name, kTokenIdentifier, IsBool(e.type.base_type), e,
BASE_TYPE_BOOL, &match));
}
if (!match) return TokenError();
}
return NoError();
}
StructDef *Parser::LookupCreateStruct(const std::string &name,
bool create_if_new, bool definition) {
std::string qualified_name = current_namespace_->GetFullyQualifiedName(name);
// See if it exists pre-declared by an unqualified use.
auto struct_def = LookupStruct(name);
if (struct_def && struct_def->predecl) {
if (definition) {
// Make sure it has the current namespace, and is registered under its
// qualified name.
struct_def->defined_namespace = current_namespace_;
structs_.Move(name, qualified_name);
}
return struct_def;
}
// See if it exists pre-declared by an qualified use.
struct_def = LookupStruct(qualified_name);
if (struct_def && struct_def->predecl) {
if (definition) {
// Make sure it has the current namespace.
struct_def->defined_namespace = current_namespace_;
}
return struct_def;
}
if (!definition) {
// Search thru parent namespaces.
for (size_t components = current_namespace_->components.size();
components && !struct_def; components--) {
struct_def = LookupStruct(
current_namespace_->GetFullyQualifiedName(name, components - 1));
}
}
if (!struct_def && create_if_new) {
struct_def = new StructDef();
if (definition) {
structs_.Add(qualified_name, struct_def);
struct_def->name = name;
struct_def->defined_namespace = current_namespace_;
} else {
// Not a definition.
// Rather than failing, we create a "pre declared" StructDef, due to
// circular references, and check for errors at the end of parsing.
// It is defined in the current namespace, as the best guess what the
// final namespace will be.
structs_.Add(name, struct_def);
struct_def->name = name;
struct_def->defined_namespace = current_namespace_;
struct_def->original_location.reset(
new std::string(file_being_parsed_ + ":" + NumToString(line_)));
}
}
return struct_def;
}
CheckedError Parser::ParseEnum(bool is_union, EnumDef **dest) {
std::vector<std::string> enum_comment = doc_comment_;
NEXT();
std::string enum_name = attribute_;
EXPECT(kTokenIdentifier);
EnumDef *enum_def;
ECHECK(StartEnum(enum_name, is_union, &enum_def));
enum_def->doc_comment = enum_comment;
if (!is_union && !opts.proto_mode) {
// Give specialized error message, since this type spec used to
// be optional in the first FlatBuffers release.
if (!Is(':')) {
return Error(
"must specify the underlying integer type for this"
" enum (e.g. \': short\', which was the default).");
} else {
NEXT();
}
// Specify the integer type underlying this enum.
ECHECK(ParseType(enum_def->underlying_type));
if (!IsInteger(enum_def->underlying_type.base_type))
return Error("underlying enum type must be integral");
// Make this type refer back to the enum it was derived from.
enum_def->underlying_type.enum_def = enum_def;
}
ECHECK(ParseMetaData(&enum_def->attributes));
EXPECT('{');
if (is_union) enum_def->vals.Add("NONE", new EnumVal("NONE", 0));
for (;;) {
if (opts.proto_mode && attribute_ == "option") {
ECHECK(ParseProtoOption());
} else {
auto value_name = attribute_;
auto full_name = value_name;
std::vector<std::string> value_comment = doc_comment_;
EXPECT(kTokenIdentifier);
if (is_union) {
ECHECK(ParseNamespacing(&full_name, &value_name));
if (opts.union_value_namespacing) {
// Since we can't namespace the actual enum identifiers, turn
// namespace parts into part of the identifier.
value_name = full_name;
std::replace(value_name.begin(), value_name.end(), '.', '_');
}
}
auto prevsize = enum_def->vals.vec.size();
auto value = !enum_def->vals.vec.empty()
? enum_def->vals.vec.back()->value + 1
: 0;
auto &ev = *new EnumVal(value_name, value);
if (enum_def->vals.Add(value_name, &ev))
return Error("enum value already exists: " + value_name);
ev.doc_comment = value_comment;
if (is_union) {
if (Is(':')) {
NEXT();
ECHECK(ParseType(ev.union_type));
if (ev.union_type.base_type != BASE_TYPE_STRUCT &&
ev.union_type.base_type != BASE_TYPE_STRING)
return Error("union value type may only be table/struct/string");
enum_def->uses_type_aliases = true;
} else {
ev.union_type = Type(BASE_TYPE_STRUCT, LookupCreateStruct(full_name));
}
}
if (Is('=')) {
NEXT();
ev.value = StringToInt(attribute_.c_str());
EXPECT(kTokenIntegerConstant);
if (!opts.proto_mode && prevsize &&
enum_def->vals.vec[prevsize - 1]->value >= ev.value)
return Error("enum values must be specified in ascending order");
}
if (is_union) {
if (ev.value < 0 || ev.value >= 256)
return Error("union enum value must fit in a ubyte");
}
if (opts.proto_mode && Is('[')) {
NEXT();
// ignore attributes on enums.
while (token_ != ']') NEXT();
NEXT();
}
}
if (!Is(opts.proto_mode ? ';' : ',')) break;
NEXT();
if (Is('}')) break;
}
EXPECT('}');
if (enum_def->attributes.Lookup("bit_flags")) {
for (auto it = enum_def->vals.vec.begin(); it != enum_def->vals.vec.end();
++it) {
if (static_cast<size_t>((*it)->value) >=
SizeOf(enum_def->underlying_type.base_type) * 8)
return Error("bit flag out of range of underlying integral type");
(*it)->value = 1LL << (*it)->value;
}
}
if (dest) *dest = enum_def;
types_.Add(current_namespace_->GetFullyQualifiedName(enum_def->name),
new Type(BASE_TYPE_UNION, nullptr, enum_def));
return NoError();
}
CheckedError Parser::StartStruct(const std::string &name, StructDef **dest) {
auto &struct_def = *LookupCreateStruct(name, true, true);
if (!struct_def.predecl) return Error("datatype already exists: " + name);
struct_def.predecl = false;
struct_def.name = name;
struct_def.file = file_being_parsed_;
// Move this struct to the back of the vector just in case it was predeclared,
// to preserve declaration order.
*std::remove(structs_.vec.begin(), structs_.vec.end(), &struct_def) =
&struct_def;
*dest = &struct_def;
return NoError();
}
CheckedError Parser::CheckClash(std::vector<FieldDef *> &fields,
StructDef *struct_def, const char *suffix,
BaseType basetype) {
auto len = strlen(suffix);
for (auto it = fields.begin(); it != fields.end(); ++it) {
auto &fname = (*it)->name;
if (fname.length() > len &&
fname.compare(fname.length() - len, len, suffix) == 0 &&
(*it)->value.type.base_type != BASE_TYPE_UTYPE) {
auto field =
struct_def->fields.Lookup(fname.substr(0, fname.length() - len));
if (field && field->value.type.base_type == basetype)
return Error("Field " + fname +
" would clash with generated functions for field " +
field->name);
}
}
return NoError();
}
bool Parser::SupportsVectorOfUnions() const {
return opts.lang_to_generate != 0 &&
(opts.lang_to_generate & ~(IDLOptions::kCpp | IDLOptions::kJs |
IDLOptions::kTs | IDLOptions::kPhp |
IDLOptions::kJava | IDLOptions::kCSharp)) == 0;
}
Namespace *Parser::UniqueNamespace(Namespace *ns) {
for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it) {
if (ns->components == (*it)->components) {
delete ns;
return *it;
}
}
namespaces_.push_back(ns);
return ns;
}
static bool compareFieldDefs(const FieldDef *a, const FieldDef *b) {
auto a_id = atoi(a->attributes.Lookup("id")->constant.c_str());
auto b_id = atoi(b->attributes.Lookup("id")->constant.c_str());
return a_id < b_id;
}
CheckedError Parser::ParseDecl() {
std::vector<std::string> dc = doc_comment_;
bool fixed = IsIdent("struct");
if (!fixed && !IsIdent("table")) return Error("declaration expected");
NEXT();
std::string name = attribute_;
EXPECT(kTokenIdentifier);
StructDef *struct_def;
ECHECK(StartStruct(name, &struct_def));
struct_def->doc_comment = dc;
struct_def->fixed = fixed;
ECHECK(ParseMetaData(&struct_def->attributes));
struct_def->sortbysize =
struct_def->attributes.Lookup("original_order") == nullptr && !fixed;
EXPECT('{');
while (token_ != '}') ECHECK(ParseField(*struct_def));
auto force_align = struct_def->attributes.Lookup("force_align");
if (fixed && force_align) {
auto align = static_cast<size_t>(atoi(force_align->constant.c_str()));
if (force_align->type.base_type != BASE_TYPE_INT ||
align < struct_def->minalign || align > FLATBUFFERS_MAX_ALIGNMENT ||
align & (align - 1))
return Error(
"force_align must be a power of two integer ranging from the"
"struct\'s natural alignment to " +
NumToString(FLATBUFFERS_MAX_ALIGNMENT));
struct_def->minalign = align;
}
struct_def->PadLastField(struct_def->minalign);
// Check if this is a table that has manual id assignments
auto &fields = struct_def->fields.vec;
if (!struct_def->fixed && fields.size()) {
size_t num_id_fields = 0;
for (auto it = fields.begin(); it != fields.end(); ++it) {
if ((*it)->attributes.Lookup("id")) num_id_fields++;
}
// If any fields have ids..
if (num_id_fields) {
// Then all fields must have them.
if (num_id_fields != fields.size())
return Error(
"either all fields or no fields must have an 'id' attribute");
// Simply sort by id, then the fields are the same as if no ids had
// been specified.
std::sort(fields.begin(), fields.end(), compareFieldDefs);
// Verify we have a contiguous set, and reassign vtable offsets.
for (int i = 0; i < static_cast<int>(fields.size()); i++) {
if (i != atoi(fields[i]->attributes.Lookup("id")->constant.c_str()))
return Error("field id\'s must be consecutive from 0, id " +
NumToString(i) + " missing or set twice");
fields[i]->value.offset = FieldIndexToOffset(static_cast<voffset_t>(i));
}
}
}
ECHECK(
CheckClash(fields, struct_def, UnionTypeFieldSuffix(), BASE_TYPE_UNION));
ECHECK(CheckClash(fields, struct_def, "Type", BASE_TYPE_UNION));
ECHECK(CheckClash(fields, struct_def, "_length", BASE_TYPE_VECTOR));
ECHECK(CheckClash(fields, struct_def, "Length", BASE_TYPE_VECTOR));
ECHECK(CheckClash(fields, struct_def, "_byte_vector", BASE_TYPE_STRING));
ECHECK(CheckClash(fields, struct_def, "ByteVector", BASE_TYPE_STRING));
EXPECT('}');
types_.Add(current_namespace_->GetFullyQualifiedName(struct_def->name),
new Type(BASE_TYPE_STRUCT, struct_def, nullptr));
return NoError();
}
CheckedError Parser::ParseService() {
std::vector<std::string> service_comment = doc_comment_;
NEXT();
auto service_name = attribute_;
EXPECT(kTokenIdentifier);
auto &service_def = *new ServiceDef();
service_def.name = service_name;
service_def.file = file_being_parsed_;
service_def.doc_comment = service_comment;
service_def.defined_namespace = current_namespace_;
if (services_.Add(current_namespace_->GetFullyQualifiedName(service_name),
&service_def))
return Error("service already exists: " + service_name);
ECHECK(ParseMetaData(&service_def.attributes));
EXPECT('{');
do {
std::vector<std::string> doc_comment = doc_comment_;
auto rpc_name = attribute_;
EXPECT(kTokenIdentifier);
EXPECT('(');
Type reqtype, resptype;
ECHECK(ParseTypeIdent(reqtype));
EXPECT(')');
EXPECT(':');
ECHECK(ParseTypeIdent(resptype));
if (reqtype.base_type != BASE_TYPE_STRUCT || reqtype.struct_def->fixed ||
resptype.base_type != BASE_TYPE_STRUCT || resptype.struct_def->fixed)
return Error("rpc request and response types must be tables");
auto &rpc = *new RPCCall();
rpc.name = rpc_name;
rpc.request = reqtype.struct_def;
rpc.response = resptype.struct_def;
rpc.doc_comment = doc_comment;
if (service_def.calls.Add(rpc_name, &rpc))
return Error("rpc already exists: " + rpc_name);
ECHECK(ParseMetaData(&rpc.attributes));
EXPECT(';');
} while (token_ != '}');
NEXT();
return NoError();
}
bool Parser::SetRootType(const char *name) {
root_struct_def_ = LookupStruct(name);
if (!root_struct_def_)
root_struct_def_ =
LookupStruct(current_namespace_->GetFullyQualifiedName(name));
return root_struct_def_ != nullptr;
}
void Parser::MarkGenerated() {
// This function marks all existing definitions as having already
// been generated, which signals no code for included files should be
// generated.
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
(*it)->generated = true;
}
for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) {
if (!(*it)->predecl) { (*it)->generated = true; }
}
for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) {
(*it)->generated = true;
}
}
CheckedError Parser::ParseNamespace() {
NEXT();
auto ns = new Namespace();
namespaces_.push_back(ns); // Store it here to not leak upon error.
if (token_ != ';') {
for (;;) {
ns->components.push_back(attribute_);
EXPECT(kTokenIdentifier);
if (Is('.')) NEXT() else break;
}
}
namespaces_.pop_back();
current_namespace_ = UniqueNamespace(ns);
EXPECT(';');
return NoError();
}
static bool compareEnumVals(const EnumVal *a, const EnumVal *b) {
return a->value < b->value;
}
// Best effort parsing of .proto declarations, with the aim to turn them
// in the closest corresponding FlatBuffer equivalent.
// We parse everything as identifiers instead of keywords, since we don't
// want protobuf keywords to become invalid identifiers in FlatBuffers.
CheckedError Parser::ParseProtoDecl() {
bool isextend = IsIdent("extend");
if (IsIdent("package")) {
// These are identical in syntax to FlatBuffer's namespace decl.
ECHECK(ParseNamespace());
} else if (IsIdent("message") || isextend) {
std::vector<std::string> struct_comment = doc_comment_;
NEXT();
StructDef *struct_def = nullptr;
Namespace *parent_namespace = nullptr;
if (isextend) {
if (Is('.')) NEXT(); // qualified names may start with a . ?
auto id = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&id, nullptr));
struct_def = LookupCreateStruct(id, false);
if (!struct_def)
return Error("cannot extend unknown message type: " + id);
} else {
std::string name = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(StartStruct(name, &struct_def));
// Since message definitions can be nested, we create a new namespace.
auto ns = new Namespace();
// Copy of current namespace.
*ns = *current_namespace_;
// But with current message name.
ns->components.push_back(name);
ns->from_table++;
parent_namespace = current_namespace_;
current_namespace_ = UniqueNamespace(ns);
}
struct_def->doc_comment = struct_comment;
ECHECK(ParseProtoFields(struct_def, isextend, false));
if (!isextend) { current_namespace_ = parent_namespace; }
if (Is(';')) NEXT();
} else if (IsIdent("enum")) {
// These are almost the same, just with different terminator:
EnumDef *enum_def;
ECHECK(ParseEnum(false, &enum_def));
if (Is(';')) NEXT();
// Protobuf allows them to be specified in any order, so sort afterwards.
auto &v = enum_def->vals.vec;
std::sort(v.begin(), v.end(), compareEnumVals);
// Temp: remove any duplicates, as .fbs files can't handle them.
for (auto it = v.begin(); it != v.end();) {
if (it != v.begin() && it[0]->value == it[-1]->value)
it = v.erase(it);
else
++it;
}
} else if (IsIdent("syntax")) { // Skip these.
NEXT();
EXPECT('=');
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("option")) { // Skip these.
ECHECK(ParseProtoOption());
EXPECT(';');
} else if (IsIdent("service")) { // Skip these.
NEXT();
EXPECT(kTokenIdentifier);
ECHECK(ParseProtoCurliesOrIdent());
} else {
return Error("don\'t know how to parse .proto declaration starting with " +
TokenToStringId(token_));
}
return NoError();
}
CheckedError Parser::StartEnum(const std::string &enum_name, bool is_union,
EnumDef **dest) {
auto &enum_def = *new EnumDef();
enum_def.name = enum_name;
enum_def.file = file_being_parsed_;
enum_def.doc_comment = doc_comment_;
enum_def.is_union = is_union;
enum_def.defined_namespace = current_namespace_;
if (enums_.Add(current_namespace_->GetFullyQualifiedName(enum_name),
&enum_def))
return Error("enum already exists: " + enum_name);
enum_def.underlying_type.base_type = is_union ? BASE_TYPE_UTYPE
: BASE_TYPE_INT;
enum_def.underlying_type.enum_def = &enum_def;
if (dest) *dest = &enum_def;
return NoError();
}
CheckedError Parser::ParseProtoFields(StructDef *struct_def, bool isextend,
bool inside_oneof) {
EXPECT('{');
while (token_ != '}') {
if (IsIdent("message") || IsIdent("extend") || IsIdent("enum")) {
// Nested declarations.
ECHECK(ParseProtoDecl());
} else if (IsIdent("extensions")) { // Skip these.
NEXT();
EXPECT(kTokenIntegerConstant);
if (Is(kTokenIdentifier)) {
NEXT(); // to
NEXT(); // num
}
EXPECT(';');
} else if (IsIdent("option")) { // Skip these.
ECHECK(ParseProtoOption());
EXPECT(';');
} else if (IsIdent("reserved")) { // Skip these.
NEXT();
while (!Is(';')) { NEXT(); } // A variety of formats, just skip.
NEXT();
} else {
std::vector<std::string> field_comment = doc_comment_;
// Parse the qualifier.
bool required = false;
bool repeated = false;
bool oneof = false;
if (!inside_oneof) {
if (IsIdent("optional")) {
// This is the default.
NEXT();
} else if (IsIdent("required")) {
required = true;
NEXT();
} else if (IsIdent("repeated")) {
repeated = true;
NEXT();
} else if (IsIdent("oneof")) {
oneof = true;
NEXT();
} else {
// can't error, proto3 allows decls without any of the above.
}
}
StructDef *anonymous_struct = nullptr;
EnumDef *oneof_union = nullptr;
Type type;
if (IsIdent("group") || oneof) {
if (!oneof) NEXT();
if (oneof && opts.proto_oneof_union) {
auto name = MakeCamel(attribute_, true) + "Union";
ECHECK(StartEnum(name, true, &oneof_union));
type = Type(BASE_TYPE_UNION, nullptr, oneof_union);
} else {
auto name = "Anonymous" + NumToString(anonymous_counter++);
ECHECK(StartStruct(name, &anonymous_struct));
type = Type(BASE_TYPE_STRUCT, anonymous_struct);
}
} else {
ECHECK(ParseTypeFromProtoType(&type));
}
// Repeated elements get mapped to a vector.
if (repeated) {
type.element = type.base_type;
type.base_type = BASE_TYPE_VECTOR;
if (type.element == BASE_TYPE_VECTOR) {
// We have a vector or vectors, which FlatBuffers doesn't support.
// For now make it a vector of string (since the source is likely
// "repeated bytes").
// TODO(wvo): A better solution would be to wrap this in a table.
type.element = BASE_TYPE_STRING;
}
}
std::string name = attribute_;
EXPECT(kTokenIdentifier);
if (!oneof) {
// Parse the field id. Since we're just translating schemas, not
// any kind of binary compatibility, we can safely ignore these, and
// assign our own.
EXPECT('=');
EXPECT(kTokenIntegerConstant);
}
FieldDef *field = nullptr;
if (isextend) {
// We allow a field to be re-defined when extending.
// TODO: are there situations where that is problematic?
field = struct_def->fields.Lookup(name);
}
if (!field) ECHECK(AddField(*struct_def, name, type, &field));
field->doc_comment = field_comment;
if (!IsScalar(type.base_type)) field->required = required;
// See if there's a default specified.
if (Is('[')) {
NEXT();
for (;;) {
auto key = attribute_;
ECHECK(ParseProtoKey());
EXPECT('=');
auto val = attribute_;
ECHECK(ParseProtoCurliesOrIdent());
if (key == "default") {
// Temp: skip non-numeric defaults (enums).
auto numeric = strpbrk(val.c_str(), "0123456789-+.");
if (IsScalar(type.base_type) && numeric == val.c_str())
field->value.constant = val;
} else if (key == "deprecated") {
field->deprecated = val == "true";
}
if (!Is(',')) break;
NEXT();
}
EXPECT(']');
}
if (anonymous_struct) {
ECHECK(ParseProtoFields(anonymous_struct, false, oneof));
if (Is(';')) NEXT();
} else if (oneof_union) {
// Parse into a temporary StructDef, then transfer fields into an
// EnumDef describing the oneof as a union.
StructDef oneof_struct;
ECHECK(ParseProtoFields(&oneof_struct, false, oneof));
if (Is(';')) NEXT();
for (auto field_it = oneof_struct.fields.vec.begin();
field_it != oneof_struct.fields.vec.end(); ++field_it) {
const auto &oneof_field = **field_it;
const auto &oneof_type = oneof_field.value.type;
if (oneof_type.base_type != BASE_TYPE_STRUCT ||
!oneof_type.struct_def || oneof_type.struct_def->fixed)
return Error("oneof '" + name +
"' cannot be mapped to a union because member '" +
oneof_field.name + "' is not a table type.");
auto enum_val = new EnumVal(oneof_type.struct_def->name,
oneof_union->vals.vec.size());
enum_val->union_type = oneof_type;
enum_val->doc_comment = oneof_field.doc_comment;
oneof_union->vals.Add(oneof_field.name, enum_val);
}
} else {
EXPECT(';');
}
}
}
NEXT();
return NoError();
}
CheckedError Parser::ParseProtoKey() {
if (token_ == '(') {
NEXT();
// Skip "(a.b)" style custom attributes.
while (token_ == '.' || token_ == kTokenIdentifier) NEXT();
EXPECT(')');
while (Is('.')) {
NEXT();
EXPECT(kTokenIdentifier);
}
} else {
EXPECT(kTokenIdentifier);
}
return NoError();
}
CheckedError Parser::ParseProtoCurliesOrIdent() {
if (Is('{')) {
NEXT();
for (int nesting = 1; nesting;) {
if (token_ == '{')
nesting++;
else if (token_ == '}')
nesting--;
NEXT();
}
} else {
NEXT(); // Any single token.
}
return NoError();
}
CheckedError Parser::ParseProtoOption() {
NEXT();
ECHECK(ParseProtoKey());
EXPECT('=');
ECHECK(ParseProtoCurliesOrIdent());
return NoError();
}
// Parse a protobuf type, and map it to the corresponding FlatBuffer one.
CheckedError Parser::ParseTypeFromProtoType(Type *type) {
struct type_lookup {
const char *proto_type;
BaseType fb_type, element;
};
static type_lookup lookup[] = {
{ "float", BASE_TYPE_FLOAT, BASE_TYPE_NONE },
{ "double", BASE_TYPE_DOUBLE, BASE_TYPE_NONE },
{ "int32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "int64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "uint32", BASE_TYPE_UINT, BASE_TYPE_NONE },
{ "uint64", BASE_TYPE_ULONG, BASE_TYPE_NONE },
{ "sint32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "sint64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "fixed32", BASE_TYPE_UINT, BASE_TYPE_NONE },
{ "fixed64", BASE_TYPE_ULONG, BASE_TYPE_NONE },
{ "sfixed32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "sfixed64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "bool", BASE_TYPE_BOOL, BASE_TYPE_NONE },
{ "string", BASE_TYPE_STRING, BASE_TYPE_NONE },
{ "bytes", BASE_TYPE_VECTOR, BASE_TYPE_UCHAR },
{ nullptr, BASE_TYPE_NONE, BASE_TYPE_NONE }
};
for (auto tl = lookup; tl->proto_type; tl++) {
if (attribute_ == tl->proto_type) {
type->base_type = tl->fb_type;
type->element = tl->element;
NEXT();
return NoError();
}
}
if (Is('.')) NEXT(); // qualified names may start with a . ?
ECHECK(ParseTypeIdent(*type));
return NoError();
}
CheckedError Parser::SkipAnyJsonValue() {
switch (token_) {
case '{': {
size_t fieldn_outer = 0;
return ParseTableDelimiters(
fieldn_outer, nullptr,
[](const std::string &, size_t &fieldn, const StructDef *,
void *state) -> CheckedError {
auto *parser = static_cast<Parser *>(state);
ECHECK(parser->Recurse([&]() {
return parser->SkipAnyJsonValue();
}));
fieldn++;
return NoError();
},
this);
}
case '[': {
size_t count = 0;
return ParseVectorDelimiters(
count,
[](size_t &, void *state) -> CheckedError {
auto *parser = static_cast<Parser *>(state);
return parser->Recurse([&]() {
return parser->SkipAnyJsonValue();
});
},
this);
}
case kTokenStringConstant:
case kTokenIntegerConstant:
case kTokenFloatConstant: NEXT(); break;
default:
if (IsIdent("true") || IsIdent("false") || IsIdent("null")) {
NEXT();
} else
return TokenError();
}
return NoError();
}
CheckedError Parser::ParseFlexBufferValue(flexbuffers::Builder *builder) {
switch (token_) {
case '{': {
std::pair<Parser *, flexbuffers::Builder *> parser_and_builder_state(
this, builder);
auto start = builder->StartMap();
size_t fieldn_outer = 0;
auto err = ParseTableDelimiters(
fieldn_outer, nullptr,
[](const std::string &name, size_t &fieldn, const StructDef *,
void *state) -> CheckedError {
auto *parser_and_builder =
static_cast<std::pair<Parser *, flexbuffers::Builder *> *>(
state);
auto *parser = parser_and_builder->first;
auto *current_builder = parser_and_builder->second;
current_builder->Key(name);
ECHECK(parser->ParseFlexBufferValue(current_builder));
fieldn++;
return NoError();
},
&parser_and_builder_state);
ECHECK(err);
builder->EndMap(start);
break;
}
case '[': {
auto start = builder->StartVector();
size_t count = 0;
std::pair<Parser *, flexbuffers::Builder *> parser_and_builder_state(
this, builder);
ECHECK(ParseVectorDelimiters(
count,
[](size_t &, void *state) -> CheckedError {
auto *parser_and_builder =
static_cast<std::pair<Parser *, flexbuffers::Builder *> *>(
state);
return parser_and_builder->first->ParseFlexBufferValue(
parser_and_builder->second);
},
&parser_and_builder_state));
builder->EndVector(start, false, false);
break;
}
case kTokenStringConstant:
builder->String(attribute_);
EXPECT(kTokenStringConstant);
break;
case kTokenIntegerConstant:
builder->Int(StringToInt(attribute_.c_str()));
EXPECT(kTokenIntegerConstant);
break;
case kTokenFloatConstant:
builder->Double(strtod(attribute_.c_str(), nullptr));
EXPECT(kTokenFloatConstant);
break;
default:
if (IsIdent("true")) {
builder->Bool(true);
NEXT();
} else if (IsIdent("false")) {
builder->Bool(false);
NEXT();
} else if (IsIdent("null")) {
builder->Null();
NEXT();
} else
return TokenError();
}
return NoError();
}
bool Parser::ParseFlexBuffer(const char *source, const char *source_filename,
flexbuffers::Builder *builder) {
auto ok = !StartParseFile(source, source_filename).Check() &&
!ParseFlexBufferValue(builder).Check();
if (ok) builder->Finish();
return ok;
}
bool Parser::Parse(const char *source, const char **include_paths,
const char *source_filename) {
return !ParseRoot(source, include_paths, source_filename).Check();
}
CheckedError Parser::StartParseFile(const char *source,
const char *source_filename) {
file_being_parsed_ = source_filename ? source_filename : "";
source_ = cursor_ = source;
line_ = 1;
error_.clear();
ECHECK(SkipByteOrderMark());
NEXT();
if (Is(kTokenEof)) return Error("input file is empty");
return NoError();
}
CheckedError Parser::ParseRoot(const char *source, const char **include_paths,
const char *source_filename) {
ECHECK(DoParse(source, include_paths, source_filename, nullptr));
// Check that all types were defined.
for (auto it = structs_.vec.begin(); it != structs_.vec.end();) {
auto &struct_def = **it;
if (struct_def.predecl) {
if (opts.proto_mode) {
// Protos allow enums to be used before declaration, so check if that
// is the case here.
EnumDef *enum_def = nullptr;
for (size_t components =
struct_def.defined_namespace->components.size() + 1;
components && !enum_def; components--) {
auto qualified_name =
struct_def.defined_namespace->GetFullyQualifiedName(
struct_def.name, components - 1);
enum_def = LookupEnum(qualified_name);
}
if (enum_def) {
// This is pretty slow, but a simple solution for now.
auto initial_count = struct_def.refcount;
for (auto struct_it = structs_.vec.begin();
struct_it != structs_.vec.end(); ++struct_it) {
auto &sd = **struct_it;
for (auto field_it = sd.fields.vec.begin();
field_it != sd.fields.vec.end(); ++field_it) {
auto &field = **field_it;
if (field.value.type.struct_def == &struct_def) {
field.value.type.struct_def = nullptr;
field.value.type.enum_def = enum_def;
auto &bt = field.value.type.base_type == BASE_TYPE_VECTOR
? field.value.type.element
: field.value.type.base_type;
FLATBUFFERS_ASSERT(bt == BASE_TYPE_STRUCT);
bt = enum_def->underlying_type.base_type;
struct_def.refcount--;
enum_def->refcount++;
}
}
}
if (struct_def.refcount)
return Error("internal: " + NumToString(struct_def.refcount) + "/" +
NumToString(initial_count) +
" use(s) of pre-declaration enum not accounted for: " +
enum_def->name);
structs_.dict.erase(structs_.dict.find(struct_def.name));
it = structs_.vec.erase(it);
delete &struct_def;
continue; // Skip error.
}
}
auto err = "type referenced but not defined (check namespace): " +
struct_def.name;
if (struct_def.original_location)
err += ", originally at: " + *struct_def.original_location;
return Error(err);
}
++it;
}
// This check has to happen here and not earlier, because only now do we
// know for sure what the type of these are.
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
auto &enum_def = **it;
if (enum_def.is_union) {
for (auto val_it = enum_def.vals.vec.begin();
val_it != enum_def.vals.vec.end(); ++val_it) {
auto &val = **val_it;
if (!SupportsVectorOfUnions() && val.union_type.struct_def &&
val.union_type.struct_def->fixed)
return Error(
"only tables can be union elements in the generated language: " +
val.name);
}
}
}
return NoError();
}
CheckedError Parser::DoParse(const char *source, const char **include_paths,
const char *source_filename,
const char *include_filename) {
if (source_filename &&
included_files_.find(source_filename) == included_files_.end()) {
included_files_[source_filename] = include_filename ? include_filename : "";
files_included_per_file_[source_filename] = std::set<std::string>();
}
if (!include_paths) {
static const char *current_directory[] = { "", nullptr };
include_paths = current_directory;
}
field_stack_.clear();
builder_.Clear();
// Start with a blank namespace just in case this file doesn't have one.
current_namespace_ = empty_namespace_;
ECHECK(StartParseFile(source, source_filename));
// Includes must come before type declarations:
for (;;) {
// Parse pre-include proto statements if any:
if (opts.proto_mode && (attribute_ == "option" || attribute_ == "syntax" ||
attribute_ == "package")) {
ECHECK(ParseProtoDecl());
} else if (IsIdent("native_include")) {
NEXT();
vector_emplace_back(&native_included_files_, attribute_);
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("include") || (opts.proto_mode && IsIdent("import"))) {
NEXT();
if (opts.proto_mode && attribute_ == "public") NEXT();
auto name = flatbuffers::PosixPath(attribute_.c_str());
EXPECT(kTokenStringConstant);
// Look for the file in include_paths.
std::string filepath;
for (auto paths = include_paths; paths && *paths; paths++) {
filepath = flatbuffers::ConCatPathFileName(*paths, name);
if (FileExists(filepath.c_str())) break;
}
if (filepath.empty())
return Error("unable to locate include file: " + name);
if (source_filename)
files_included_per_file_[source_filename].insert(filepath);
if (included_files_.find(filepath) == included_files_.end()) {
// We found an include file that we have not parsed yet.
// Load it and parse it.
std::string contents;
if (!LoadFile(filepath.c_str(), true, &contents))
return Error("unable to load include file: " + name);
ECHECK(DoParse(contents.c_str(), include_paths, filepath.c_str(),
name.c_str()));
// We generally do not want to output code for any included files:
if (!opts.generate_all) MarkGenerated();
// Reset these just in case the included file had them, and the
// parent doesn't.
root_struct_def_ = nullptr;
file_identifier_.clear();
file_extension_.clear();
// This is the easiest way to continue this file after an include:
// instead of saving and restoring all the state, we simply start the
// file anew. This will cause it to encounter the same include
// statement again, but this time it will skip it, because it was
// entered into included_files_.
// This is recursive, but only go as deep as the number of include
// statements.
return DoParse(source, include_paths, source_filename,
include_filename);
}
EXPECT(';');
} else {
break;
}
}
// Now parse all other kinds of declarations:
while (token_ != kTokenEof) {
if (opts.proto_mode) {
ECHECK(ParseProtoDecl());
} else if (IsIdent("namespace")) {
ECHECK(ParseNamespace());
} else if (token_ == '{') {
if (!root_struct_def_)
return Error("no root type set to parse json with");
if (builder_.GetSize()) {
return Error("cannot have more than one json object in a file");
}
uoffset_t toff;
ECHECK(ParseTable(*root_struct_def_, nullptr, &toff));
if (opts.size_prefixed) {
builder_.FinishSizePrefixed(Offset<Table>(toff), file_identifier_.length()
? file_identifier_.c_str()
: nullptr);
} else {
builder_.Finish(Offset<Table>(toff), file_identifier_.length()
? file_identifier_.c_str()
: nullptr);
}
} else if (IsIdent("enum")) {
ECHECK(ParseEnum(false, nullptr));
} else if (IsIdent("union")) {
ECHECK(ParseEnum(true, nullptr));
} else if (IsIdent("root_type")) {
NEXT();
auto root_type = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&root_type, nullptr));
if (opts.root_type.empty()) {
if (!SetRootType(root_type.c_str()))
return Error("unknown root type: " + root_type);
if (root_struct_def_->fixed)
return Error("root type must be a table");
}
EXPECT(';');
} else if (IsIdent("file_identifier")) {
NEXT();
file_identifier_ = attribute_;
EXPECT(kTokenStringConstant);
if (file_identifier_.length() != FlatBufferBuilder::kFileIdentifierLength)
return Error("file_identifier must be exactly " +
NumToString(FlatBufferBuilder::kFileIdentifierLength) +
" characters");
EXPECT(';');
} else if (IsIdent("file_extension")) {
NEXT();
file_extension_ = attribute_;
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("include")) {
return Error("includes must come before declarations");
} else if (IsIdent("attribute")) {
NEXT();
auto name = attribute_;
if (Is(kTokenIdentifier)) {
NEXT();
} else {
EXPECT(kTokenStringConstant);
}
EXPECT(';');
known_attributes_[name] = false;
} else if (IsIdent("rpc_service")) {
ECHECK(ParseService());
} else {
ECHECK(ParseDecl());
}
}
return NoError();
}
std::set<std::string> Parser::GetIncludedFilesRecursive(
const std::string &file_name) const {
std::set<std::string> included_files;
std::list<std::string> to_process;
if (file_name.empty()) return included_files;
to_process.push_back(file_name);
while (!to_process.empty()) {
std::string current = to_process.front();
to_process.pop_front();
included_files.insert(current);
// Workaround the lack of const accessor in C++98 maps.
auto &new_files =
(*const_cast<std::map<std::string, std::set<std::string>> *>(
&files_included_per_file_))[current];
for (auto it = new_files.begin(); it != new_files.end(); ++it) {
if (included_files.find(*it) == included_files.end())
to_process.push_back(*it);
}
}
return included_files;
}
// Schema serialization functionality:
template<typename T> bool compareName(const T *a, const T *b) {
return a->defined_namespace->GetFullyQualifiedName(a->name) <
b->defined_namespace->GetFullyQualifiedName(b->name);
}
template<typename T> void AssignIndices(const std::vector<T *> &defvec) {
// Pre-sort these vectors, such that we can set the correct indices for them.
auto vec = defvec;
std::sort(vec.begin(), vec.end(), compareName<T>);
for (int i = 0; i < static_cast<int>(vec.size()); i++) vec[i]->index = i;
}
void Parser::Serialize() {
builder_.Clear();
AssignIndices(structs_.vec);
AssignIndices(enums_.vec);
std::vector<Offset<reflection::Object>> object_offsets;
for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
object_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
std::vector<Offset<reflection::Enum>> enum_offsets;
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
enum_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
std::vector<Offset<reflection::Service>> service_offsets;
for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
service_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
auto schema_offset = reflection::CreateSchema(
builder_,
builder_.CreateVectorOfSortedTables(&object_offsets),
builder_.CreateVectorOfSortedTables(&enum_offsets),
builder_.CreateString(file_identifier_),
builder_.CreateString(file_extension_),
(root_struct_def_ ? root_struct_def_->serialized_location : 0),
builder_.CreateVectorOfSortedTables(&service_offsets));
if (opts.size_prefixed) {
builder_.FinishSizePrefixed(schema_offset, reflection::SchemaIdentifier());
} else {
builder_.Finish(schema_offset, reflection::SchemaIdentifier());
}
}
Offset<reflection::Object> StructDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::Field>> field_offsets;
for (auto it = fields.vec.begin(); it != fields.vec.end(); ++it) {
field_offsets.push_back((*it)->Serialize(
builder, static_cast<uint16_t>(it - fields.vec.begin()), parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
return reflection::CreateObject(
*builder,
builder->CreateString(qualified_name),
builder->CreateVectorOfSortedTables(&field_offsets),
fixed,
static_cast<int>(minalign),
static_cast<int>(bytesize),
SerializeAttributes(builder, parser),
parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0);
}
Offset<reflection::Field> FieldDef::Serialize(FlatBufferBuilder *builder,
uint16_t id,
const Parser &parser) const {
return reflection::CreateField(
*builder, builder->CreateString(name), value.type.Serialize(builder), id,
value.offset,
IsInteger(value.type.base_type) ? StringToInt(value.constant.c_str()) : 0,
IsFloat(value.type.base_type) ? strtod(value.constant.c_str(), nullptr)
: 0.0,
deprecated, required, key, SerializeAttributes(builder, parser),
parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0);
// TODO: value.constant is almost always "0", we could save quite a bit of
// space by sharing it. Same for common values of value.type.
}
Offset<reflection::RPCCall> RPCCall::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
return reflection::CreateRPCCall(
*builder,
builder->CreateString(name),
request->serialized_location,
response->serialized_location,
SerializeAttributes(builder, parser),
parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0);
}
Offset<reflection::Service> ServiceDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::RPCCall>> servicecall_offsets;
for (auto it = calls.vec.begin(); it != calls.vec.end(); ++it) {
servicecall_offsets.push_back((*it)->Serialize(builder, parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
return reflection::CreateService(
*builder,
builder->CreateString(qualified_name),
builder->CreateVector(servicecall_offsets),
SerializeAttributes(builder, parser),
parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0);
}
Offset<reflection::Enum> EnumDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::EnumVal>> enumval_offsets;
for (auto it = vals.vec.begin(); it != vals.vec.end(); ++it) {
enumval_offsets.push_back((*it)->Serialize(builder, parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
return reflection::CreateEnum(
*builder,
builder->CreateString(qualified_name),
builder->CreateVector(enumval_offsets),
is_union,
underlying_type.Serialize(builder),
SerializeAttributes(builder, parser),
parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0);
}
Offset<reflection::EnumVal> EnumVal::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
return reflection::CreateEnumVal(
*builder,
builder->CreateString(name),
value,
union_type.struct_def ? union_type.struct_def->serialized_location : 0,
union_type.Serialize(builder),
parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0);
}
Offset<reflection::Type> Type::Serialize(FlatBufferBuilder *builder) const {
return reflection::CreateType(
*builder,
static_cast<reflection::BaseType>(base_type),
static_cast<reflection::BaseType>(element),
struct_def ? struct_def->index : (enum_def ? enum_def->index : -1));
}
flatbuffers::Offset<
flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>>
Definition::SerializeAttributes(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<flatbuffers::Offset<reflection::KeyValue>> attrs;
for (auto kv = attributes.dict.begin(); kv != attributes.dict.end(); ++kv) {
auto it = parser.known_attributes_.find(kv->first);
FLATBUFFERS_ASSERT(it != parser.known_attributes_.end());
if (parser.opts.binary_schema_builtins || !it->second) {
attrs.push_back(reflection::CreateKeyValue(
*builder, builder->CreateString(kv->first),
builder->CreateString(kv->second->constant)));
}
}
if (attrs.size()) {
return builder->CreateVectorOfSortedTables(&attrs);
} else {
return 0;
}
}
std::string Parser::ConformTo(const Parser &base) {
for (auto sit = structs_.vec.begin(); sit != structs_.vec.end(); ++sit) {
auto &struct_def = **sit;
auto qualified_name =
struct_def.defined_namespace->GetFullyQualifiedName(struct_def.name);
auto struct_def_base = base.LookupStruct(qualified_name);
if (!struct_def_base) continue;
for (auto fit = struct_def.fields.vec.begin();
fit != struct_def.fields.vec.end(); ++fit) {
auto &field = **fit;
auto field_base = struct_def_base->fields.Lookup(field.name);
if (field_base) {
if (field.value.offset != field_base->value.offset)
return "offsets differ for field: " + field.name;
if (field.value.constant != field_base->value.constant)
return "defaults differ for field: " + field.name;
if (!EqualByName(field.value.type, field_base->value.type))
return "types differ for field: " + field.name;
} else {
// Doesn't have to exist, deleting fields is fine.
// But we should check if there is a field that has the same offset
// but is incompatible (in the case of field renaming).
for (auto fbit = struct_def_base->fields.vec.begin();
fbit != struct_def_base->fields.vec.end(); ++fbit) {
field_base = *fbit;
if (field.value.offset == field_base->value.offset) {
if (!EqualByName(field.value.type, field_base->value.type))
return "field renamed to different type: " + field.name;
break;
}
}
}
}
}
for (auto eit = enums_.vec.begin(); eit != enums_.vec.end(); ++eit) {
auto &enum_def = **eit;
auto qualified_name =
enum_def.defined_namespace->GetFullyQualifiedName(enum_def.name);
auto enum_def_base = base.enums_.Lookup(qualified_name);
if (!enum_def_base) continue;
for (auto evit = enum_def.vals.vec.begin(); evit != enum_def.vals.vec.end();
++evit) {
auto &enum_val = **evit;
auto enum_val_base = enum_def_base->vals.Lookup(enum_val.name);
if (enum_val_base) {
if (enum_val.value != enum_val_base->value)
return "values differ for enum: " + enum_val.name;
}
}
}
return "";
}
} // namespace flatbuffers
| 1 | 13,499 | Looks more appropriate to stick this in `AbsolutePath`.. actually that already has `FLATBUFFERS_NO_ABSOLUTE_PATH_RESOLUTION` | google-flatbuffers | java |
@@ -1,4 +1,8 @@
-const html = require('yo-yo')
+const { h } = require('preact')
+const hyperx = require('hyperx')
+const html = hyperx(h)
+
+let inputEl
module.exports = (props) => {
const input = html` | 1 | const html = require('yo-yo')
module.exports = (props) => {
const input = html`
<input class="UppyDashboard-input"
hidden="true"
aria-hidden="true"
tabindex="-1"
type="file"
name="files[]"
multiple="true"
onchange=${props.handleInputChange} />`
return html`
<span>
${props.acquirers.length === 0
? props.i18n('dropPaste')
: props.i18n('dropPasteImport')
}
<button type="button"
class="UppyDashboard-browse"
onclick=${(ev) => {
input.click()
}}>${props.i18n('browse')}</button>
${input}
</span>
`
}
| 1 | 10,253 | this might interfere when we have multiple dashboards. maybe ActionBrowseTagline needs to be a Component so we can store the reference to the element on it. | transloadit-uppy | js |
@@ -376,8 +376,15 @@ func (b *BlockServerRemote) Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.
context kbfsblock.Context) (
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, err error) {
ctx = rpc.WithFireNow(ctx)
+ var res keybase1.GetBlockRes
b.log.LazyTrace(ctx, "BServer: Get %s", id)
+
+ // Once the block has been retrieved, cache it.
defer func() {
+ // But don't cache it if it's archived data.
+ if res.Status == keybase1.BlockStatus_ARCHIVED {
+ return
+ }
b.log.LazyTrace(ctx, "BServer: Get %s done (err=%v)", id, err)
if err != nil {
b.deferLog.CWarningf( | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
const (
// BServerDefaultPingIntervalSeconds is the default interval on which the
// client should contact the block server.
BServerDefaultPingIntervalSeconds = 10
// BServerPingTimeout is how long to wait for a ping response
// before breaking the connection and trying to reconnect.
BServerPingTimeout = 30 * time.Second
)
// blockServerRemoteAuthTokenRefresher is a helper struct for
// refreshing auth tokens and managing connections.
type blockServerRemoteClientHandler struct {
name string
log logger.Logger
deferLog logger.Logger
csg CurrentSessionGetter
authToken *kbfscrypto.AuthToken
srvRemote rpc.Remote
connOpts rpc.ConnectionOpts
rpcLogFactory rpc.LogFactory
pinger pinger
connMu sync.RWMutex
conn *rpc.Connection
client keybase1.BlockInterface
}
func newBlockServerRemoteClientHandler(name string, log logger.Logger,
signer kbfscrypto.Signer, csg CurrentSessionGetter, srvRemote rpc.Remote,
rpcLogFactory rpc.LogFactory) *blockServerRemoteClientHandler {
deferLog := log.CloneWithAddedDepth(1)
b := &blockServerRemoteClientHandler{
name: name,
log: log,
deferLog: deferLog,
csg: csg,
srvRemote: srvRemote,
rpcLogFactory: rpcLogFactory,
}
b.pinger = pinger{
name: name,
doPing: b.pingOnce,
timeout: BServerPingTimeout,
log: log,
}
b.authToken = kbfscrypto.NewAuthToken(
signer, kbfsblock.ServerTokenServer, kbfsblock.ServerTokenExpireIn,
"libkbfs_bserver_remote", VersionString(), b)
constBackoff := backoff.NewConstantBackOff(RPCReconnectInterval)
b.connOpts = rpc.ConnectionOpts{
DontConnectNow: true, // connect only on-demand
WrapErrorFunc: libkb.WrapError,
TagsFunc: libkb.LogTagsFromContext,
ReconnectBackoff: func() backoff.BackOff { return constBackoff },
DialerTimeout: dialerTimeout,
InitialReconnectBackoffWindow: func() time.Duration { return bserverReconnectBackoffWindow },
}
b.initNewConnection()
return b
}
func (b *blockServerRemoteClientHandler) initNewConnection() {
b.connMu.Lock()
defer b.connMu.Unlock()
if b.conn != nil {
b.conn.Shutdown()
}
b.conn = rpc.NewTLSConnection(
b.srvRemote, kbfscrypto.GetRootCerts(
b.srvRemote.Peek(), libkb.GetBundledCAsFromHost),
kbfsblock.ServerErrorUnwrapper{}, b, b.rpcLogFactory,
logger.LogOutputWithDepthAdder{Logger: b.log},
rpc.DefaultMaxFrameLength, b.connOpts)
b.client = keybase1.BlockClient{Cli: b.conn.GetClient()}
}
func (b *blockServerRemoteClientHandler) reconnect() error {
b.connMu.Lock()
defer b.connMu.Unlock()
if b.conn != nil {
ctx, cancel := context.WithTimeout(
context.Background(), reconnectTimeout)
defer cancel()
return b.conn.ForceReconnect(ctx)
}
b.initNewConnection()
return nil
}
func (b *blockServerRemoteClientHandler) shutdown() {
if b.authToken != nil {
b.authToken.Shutdown()
}
b.connMu.Lock()
defer b.connMu.Unlock()
if b.conn != nil {
b.conn.Shutdown()
}
// cancel the ping ticker
b.pinger.cancelTicker()
}
func (b *blockServerRemoteClientHandler) getConn() *rpc.Connection {
b.connMu.RLock()
defer b.connMu.RUnlock()
return b.conn
}
func (b *blockServerRemoteClientHandler) getClient() keybase1.BlockInterface {
b.connMu.RLock()
defer b.connMu.RUnlock()
return b.client
}
type ctxBServerResetKeyType int
const (
// ctxBServerResetKey identifies whether the current context has
// already passed through `BServerRemote.resetAuth`.
ctxBServerResetKey ctxBServerResetKeyType = iota
)
// resetAuth is called to reset the authorization on a BlockServer
// connection.
func (b *blockServerRemoteClientHandler) resetAuth(
ctx context.Context, c keybase1.BlockInterface) (err error) {
ctx = context.WithValue(ctx, ctxBServerResetKey, b.name)
defer func() {
b.deferLog.CDebugf(
ctx, "BlockServerRemote: resetAuth called, err: %#v", err)
}()
session, err := b.csg.GetCurrentSession(ctx)
if err != nil {
b.log.CDebugf(
ctx, "%s: User logged out, skipping resetAuth", b.name)
return nil
}
// request a challenge
challenge, err := c.GetSessionChallenge(ctx)
if err != nil {
return err
}
// get a new signature
signature, err := b.authToken.Sign(ctx, session.Name,
session.UID, session.VerifyingKey, challenge)
if err != nil {
return err
}
return c.AuthenticateSession(ctx, signature)
}
// RefreshAuthToken implements the AuthTokenRefreshHandler interface.
func (b *blockServerRemoteClientHandler) RefreshAuthToken(
ctx context.Context) {
if v := ctx.Value(ctxBServerResetKey); v == b.name {
b.log.CDebugf(ctx, "Avoiding resetAuth recursion")
return
}
if err := b.resetAuth(ctx, b.client); err != nil {
b.log.CDebugf(ctx, "%s: error refreshing auth token: %v", b.name, err)
}
}
var _ kbfscrypto.AuthTokenRefreshHandler = (*blockServerRemoteClientHandler)(nil)
// HandlerName implements the ConnectionHandler interface.
func (b *blockServerRemoteClientHandler) HandlerName() string {
return b.name
}
// OnConnect implements the ConnectionHandler interface.
func (b *blockServerRemoteClientHandler) OnConnect(ctx context.Context,
conn *rpc.Connection, client rpc.GenericClient, _ *rpc.Server) error {
// reset auth -- using client here would cause problematic recursion.
c := keybase1.BlockClient{Cli: client}
err := b.resetAuth(ctx, c)
if err != nil {
return err
}
// Start pinging.
b.pinger.resetTicker(BServerDefaultPingIntervalSeconds)
return nil
}
// OnConnectError implements the ConnectionHandler interface.
func (b *blockServerRemoteClientHandler) OnConnectError(err error, wait time.Duration) {
b.log.Warning("%s: connection error: %v; retrying in %s", b.name, err, wait)
if b.authToken != nil {
b.authToken.Shutdown()
}
b.pinger.cancelTicker()
// TODO: it might make sense to show something to the user if this is
// due to authentication, for example.
}
// OnDoCommandError implements the ConnectionHandler interface.
func (b *blockServerRemoteClientHandler) OnDoCommandError(err error, wait time.Duration) {
b.log.Warning("%s: DoCommand error: %v; retrying in %s", b.name, err, wait)
}
// OnDisconnected implements the ConnectionHandler interface.
func (b *blockServerRemoteClientHandler) OnDisconnected(ctx context.Context,
status rpc.DisconnectStatus) {
if status == rpc.StartingNonFirstConnection {
b.log.CWarningf(ctx, "%s: disconnected", b.name)
}
if b.authToken != nil {
b.authToken.Shutdown()
}
b.pinger.cancelTicker()
}
// ShouldRetry implements the ConnectionHandler interface.
func (b *blockServerRemoteClientHandler) ShouldRetry(rpcName string, err error) bool {
// Do not let connection.go's DoCommand retry any batch rpcs
// since batchDowngradeReferences already handles retries.
switch rpcName {
case "keybase.1.block.delReferenceWithCount":
return false
case "keybase.1.block.archiveReferenceWithCount":
return false
}
return kbfsblock.IsThrottleError(err)
}
// ShouldRetryOnConnect implements the ConnectionHandler interface.
func (b *blockServerRemoteClientHandler) ShouldRetryOnConnect(err error) bool {
_, inputCanceled := err.(libkb.InputCanceledError)
return !inputCanceled
}
var _ rpc.ConnectionHandler = (*blockServerRemoteClientHandler)(nil)
func (b *blockServerRemoteClientHandler) pingOnce(ctx context.Context) {
_, err := b.getClient().BlockPing(ctx)
if err == context.DeadlineExceeded {
b.log.CDebugf(
ctx, "%s: Ping timeout -- reinitializing connection", b.name)
if err = b.reconnect(); err != nil {
b.log.CDebugf(ctx, "reconnect error: %v", err)
}
} else if err != nil {
b.log.CDebugf(ctx, "%s: ping error %s", b.name, err)
}
}
type blockServerRemoteConfig interface {
diskBlockCacheGetter
codecGetter
signerGetter
currentSessionGetterGetter
logMaker
}
// BlockServerRemote implements the BlockServer interface and
// represents a remote KBFS block server.
type BlockServerRemote struct {
config blockServerRemoteConfig
shutdownFn func()
log traceLogger
deferLog traceLogger
blkSrvRemote rpc.Remote
putConn *blockServerRemoteClientHandler
getConn *blockServerRemoteClientHandler
}
// Test that BlockServerRemote fully implements the BlockServer interface.
var _ BlockServer = (*BlockServerRemote)(nil)
// NewBlockServerRemote constructs a new BlockServerRemote for the
// given address.
func NewBlockServerRemote(config blockServerRemoteConfig,
blkSrvRemote rpc.Remote, rpcLogFactory rpc.LogFactory) *BlockServerRemote {
log := config.MakeLogger("BSR")
deferLog := log.CloneWithAddedDepth(1)
bs := &BlockServerRemote{
config: config,
log: traceLogger{log},
deferLog: traceLogger{deferLog},
blkSrvRemote: blkSrvRemote,
}
// Use two separate auth clients -- one for writes and one for
// reads. This allows small reads to avoid getting trapped behind
// large asynchronous writes. TODO: use some real network QoS to
// achieve better prioritization within the actual network.
bs.putConn = newBlockServerRemoteClientHandler(
"BlockServerRemotePut", log, config.Signer(),
config.CurrentSessionGetter(), blkSrvRemote, rpcLogFactory)
bs.getConn = newBlockServerRemoteClientHandler(
"BlockServerRemoteGet", log, config.Signer(),
config.CurrentSessionGetter(), blkSrvRemote, rpcLogFactory)
bs.shutdownFn = func() {
bs.putConn.shutdown()
bs.getConn.shutdown()
}
return bs
}
// For testing.
func newBlockServerRemoteWithClient(config blockServerRemoteConfig,
client keybase1.BlockInterface) *BlockServerRemote {
log := config.MakeLogger("BSR")
deferLog := log.CloneWithAddedDepth(1)
bs := &BlockServerRemote{
config: config,
log: traceLogger{log},
deferLog: traceLogger{deferLog},
putConn: &blockServerRemoteClientHandler{
log: log,
deferLog: deferLog,
client: client,
},
getConn: &blockServerRemoteClientHandler{
log: log,
deferLog: deferLog,
client: client,
},
}
return bs
}
// RemoteAddress returns the remote bserver this client is talking to
func (b *BlockServerRemote) RemoteAddress() string {
return b.blkSrvRemote.String()
}
// RefreshAuthToken implements the AuthTokenRefreshHandler interface.
func (b *BlockServerRemote) RefreshAuthToken(ctx context.Context) {
b.putConn.RefreshAuthToken(ctx)
b.getConn.RefreshAuthToken(ctx)
}
// Get implements the BlockServer interface for BlockServerRemote.
func (b *BlockServerRemote) Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context) (
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, err error) {
ctx = rpc.WithFireNow(ctx)
b.log.LazyTrace(ctx, "BServer: Get %s", id)
defer func() {
b.log.LazyTrace(ctx, "BServer: Get %s done (err=%v)", id, err)
if err != nil {
b.deferLog.CWarningf(
ctx, "Get id=%s tlf=%s context=%s sz=%d err=%v",
id, tlfID, context, len(buf), err)
} else {
b.deferLog.CDebugf(
ctx, "Get id=%s tlf=%s context=%s sz=%d",
id, tlfID, context, len(buf))
dbc := b.config.DiskBlockCache()
if dbc != nil {
// This used to be called in a goroutine to prevent blocking
// the `Get`. But we need this cached synchronously so prefetch
// operations can work correctly.
dbc.Put(ctx, tlfID, id, buf, serverHalf)
}
}
}()
arg := kbfsblock.MakeGetBlockArg(tlfID, id, context)
res, err := b.getConn.getClient().GetBlock(ctx, arg)
return kbfsblock.ParseGetBlockRes(res, err)
}
// GetEncodedSize implements the BlockServer interface for BlockServerRemote.
func (b *BlockServerRemote) GetEncodedSize(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context) (
size uint32, status keybase1.BlockStatus, err error) {
ctx = rpc.WithFireNow(ctx)
b.log.LazyTrace(ctx, "BServer: GetEncodedSize %s", id)
defer func() {
b.log.LazyTrace(
ctx, "BServer: GetEncodedSize %s done (err=%v)", id, err)
if err != nil {
b.deferLog.CWarningf(
ctx, "GetEncodedSize id=%s tlf=%s context=%s err=%v",
id, tlfID, context, err)
} else {
b.deferLog.CDebugf(
ctx, "GetEncodedSize id=%s tlf=%s context=%s sz=%d status=%s",
id, tlfID, context, size, status)
}
}()
arg := kbfsblock.MakeGetBlockArg(tlfID, id, context)
arg.SizeOnly = true
res, err := b.getConn.getClient().GetBlock(ctx, arg)
if err != nil {
return 0, 0, nil
}
return uint32(res.Size), res.Status, nil
}
// Put implements the BlockServer interface for BlockServerRemote.
func (b *BlockServerRemote) Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
bContext kbfsblock.Context, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) {
ctx = rpc.WithFireNow(ctx)
dbc := b.config.DiskBlockCache()
if dbc != nil {
dbc.Put(ctx, tlfID, id, buf, serverHalf)
}
size := len(buf)
b.log.LazyTrace(ctx, "BServer: Put %s", id)
defer func() {
b.log.LazyTrace(ctx, "BServer: Put %s done (err=%v)", id, err)
if err != nil {
b.deferLog.CWarningf(
ctx, "Put id=%s tlf=%s context=%s sz=%d err=%v",
id, tlfID, bContext, size, err)
} else {
b.deferLog.CDebugf(
ctx, "Put id=%s tlf=%s context=%s sz=%d",
id, tlfID, bContext, size)
}
}()
arg := kbfsblock.MakePutBlockArg(tlfID, id, bContext, buf, serverHalf)
// Handle OverQuota errors at the caller
return b.putConn.getClient().PutBlock(ctx, arg)
}
// PutAgain implements the BlockServer interface for BlockServerRemote
func (b *BlockServerRemote) PutAgain(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
bContext kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) {
ctx = rpc.WithFireNow(ctx)
dbc := b.config.DiskBlockCache()
if dbc != nil {
dbc.Put(ctx, tlfID, id, buf, serverHalf)
}
size := len(buf)
b.log.LazyTrace(ctx, "BServer: Put %s", id)
defer func() {
b.log.LazyTrace(ctx, "BServer: Put %s done (err=%v)", id, err)
if err != nil {
b.deferLog.CWarningf(
ctx, "Put id=%s tlf=%s context=%s sz=%d err=%v",
id, tlfID, bContext, size, err)
} else {
b.deferLog.CDebugf(
ctx, "Put id=%s tlf=%s context=%s sz=%d",
id, tlfID, bContext, size)
}
}()
arg := kbfsblock.MakePutBlockAgainArg(tlfID, id, bContext, buf, serverHalf)
// Handle OverQuota errors at the caller
return b.putConn.getClient().PutBlockAgain(ctx, arg)
}
// AddBlockReference implements the BlockServer interface for BlockServerRemote
func (b *BlockServerRemote) AddBlockReference(ctx context.Context, tlfID tlf.ID,
id kbfsblock.ID, context kbfsblock.Context) (err error) {
ctx = rpc.WithFireNow(ctx)
b.log.LazyTrace(ctx, "BServer: AddRef %s", id)
defer func() {
b.log.LazyTrace(ctx, "BServer: AddRef %s done (err=%v)", id, err)
if err != nil {
b.deferLog.CWarningf(
ctx, "AddBlockReference id=%s tlf=%s context=%s err=%v",
id, tlfID, context, err)
} else {
b.deferLog.CDebugf(
ctx, "AddBlockReference id=%s tlf=%s context=%s",
id, tlfID, context)
}
}()
arg := kbfsblock.MakeAddReferenceArg(tlfID, id, context)
// Handle OverQuota errors at the caller
return b.putConn.getClient().AddReference(ctx, arg)
}
// RemoveBlockReferences implements the BlockServer interface for
// BlockServerRemote
func (b *BlockServerRemote) RemoveBlockReferences(ctx context.Context,
tlfID tlf.ID, contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error) {
ctx = rpc.WithFireNow(ctx)
// TODO: Define a more compact printout of contexts.
b.log.LazyTrace(ctx, "BServer: RemRef %v", contexts)
defer func() {
b.log.LazyTrace(ctx, "BServer: RemRef %v done (err=%v)", contexts, err)
if err != nil {
b.deferLog.CWarningf(ctx, "RemoveBlockReferences batch size=%d err=%v", len(contexts), err)
} else {
b.deferLog.CDebugf(ctx, "RemoveBlockReferences batch size=%d", len(contexts))
}
}()
doneRefs, err := kbfsblock.BatchDowngradeReferences(ctx, b.log, tlfID, contexts, false, b.putConn.getClient())
return kbfsblock.GetLiveCounts(doneRefs), err
}
// ArchiveBlockReferences implements the BlockServer interface for
// BlockServerRemote
func (b *BlockServerRemote) ArchiveBlockReferences(ctx context.Context,
tlfID tlf.ID, contexts kbfsblock.ContextMap) (err error) {
ctx = rpc.WithFireNow(ctx)
b.log.LazyTrace(ctx, "BServer: ArchiveRef %v", contexts)
defer func() {
b.log.LazyTrace(ctx, "BServer: ArchiveRef %v done (err=%v)", contexts, err)
if err != nil {
b.deferLog.CWarningf(ctx, "ArchiveBlockReferences batch size=%d err=%v", len(contexts), err)
} else {
b.deferLog.CDebugf(ctx, "ArchiveBlockReferences batch size=%d", len(contexts))
}
}()
_, err = kbfsblock.BatchDowngradeReferences(ctx, b.log, tlfID, contexts, true, b.putConn.getClient())
return err
}
// IsUnflushed implements the BlockServer interface for BlockServerRemote.
func (b *BlockServerRemote) IsUnflushed(
_ context.Context, _ tlf.ID, _ kbfsblock.ID) (
bool, error) {
return false, nil
}
// GetUserQuotaInfo implements the BlockServer interface for BlockServerRemote
func (b *BlockServerRemote) GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.QuotaInfo, err error) {
ctx = rpc.WithFireNow(ctx)
b.log.LazyTrace(ctx, "BServer: GetUserQuotaInfo")
defer func() {
b.log.LazyTrace(ctx, "BServer: GetUserQuotaInfo done (err=%v)", err)
}()
res, err := b.getConn.getClient().GetUserQuotaInfo(ctx)
return kbfsblock.ParseGetQuotaInfoRes(b.config.Codec(), res, err)
}
// GetTeamQuotaInfo implements the BlockServer interface for BlockServerRemote
func (b *BlockServerRemote) GetTeamQuotaInfo(
ctx context.Context, tid keybase1.TeamID) (
info *kbfsblock.QuotaInfo, err error) {
ctx = rpc.WithFireNow(ctx)
b.log.LazyTrace(ctx, "BServer: GetTeamQuotaInfo")
defer func() {
b.log.LazyTrace(ctx, "BServer: GetTeamQuotaInfo done (err=%v)", err)
}()
res, err := b.getConn.getClient().GetTeamQuotaInfo(ctx, tid)
return kbfsblock.ParseGetQuotaInfoRes(b.config.Codec(), res, err)
}
// Shutdown implements the BlockServer interface for BlockServerRemote.
func (b *BlockServerRemote) Shutdown(ctx context.Context) {
if b.shutdownFn != nil {
b.shutdownFn()
}
b.getConn.shutdown()
b.putConn.shutdown()
}
| 1 | 20,660 | Should this be inside the `else` below and right after `CDebugf` so that we still get the logs? | keybase-kbfs | go |
@@ -540,4 +540,5 @@ var errorCodeMap = map[string]gcerrors.ErrorCode{
dyn.ErrCodeTransactionCanceledException: gcerr.FailedPrecondition,
dyn.ErrCodeTransactionInProgressException: gcerr.InvalidArgument,
dyn.ErrCodeIdempotentParameterMismatchException: gcerr.InvalidArgument,
+ "ValidationException": gcerr.InvalidArgument,
} | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package dynamodocstore provides a docstore implementation backed by AWS
// DynamoDB.
// Use OpenCollection to construct a *docstore.Collection.
//
// URLs
//
// For docstore.OpenCollection, dynamodocstore registers for the scheme
// "dynamodb". The default URL opener will use an AWS session with the default
// credentials and configuration; see
// https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ for more details.
// To customize the URL opener, or for more details on the URL format, see
// URLOpener.
// See https://godoc.org/gocloud.dev#hdr-URLs for background information.
//
// As
//
// dynamodocstore exposes the following types for As:
// - Query.BeforeQuery: *dynamodb.QueryInput or *dynamodb.ScanInput
// - DocumentIterator: *dynamodb.QueryOutput or *dynamodb.ScanOutput
package dynamodocstore
import (
"context"
"errors"
"fmt"
"net/url"
"strings"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
dyn "github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/expression"
gcaws "gocloud.dev/aws"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/docstore"
"gocloud.dev/internal/docstore/driver"
"gocloud.dev/internal/gcerr"
)
func init() {
docstore.DefaultURLMux().RegisterCollection(Scheme, new(lazySessionOpener))
}
type lazySessionOpener struct {
init sync.Once
opener *URLOpener
err error
}
func (o *lazySessionOpener) OpenCollectionURL(ctx context.Context, u *url.URL) (*docstore.Collection, error) {
o.init.Do(func() {
sess, err := session.NewSessionWithOptions(session.Options{SharedConfigState: session.SharedConfigEnable})
if err != nil {
o.err = err
return
}
o.opener = &URLOpener{
ConfigProvider: sess,
}
})
if o.err != nil {
return nil, fmt.Errorf("open collection %s: %v", u, o.err)
}
return o.opener.OpenCollectionURL(ctx, u)
}
// Scheme is the URL scheme dynamodb registers its URLOpener under on
// docstore.DefaultMux.
const Scheme = "dynamodb"
// URLOpener opens dynamodb URLs like
// "dynamodb://mytable?partition_key=partkey&sort_key=sortkey".
//
// The URL Host is used as the table name. See
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html
// for more details.
//
// The following query parameters are supported:
//
// - partition_key (required): the path to the partition key of a table or an index.
// - sort_key: the path to the sort key of a table or an index.
//
// See https://godoc.org/gocloud.dev/aws#ConfigFromURLParams for supported query
// parameters for overriding the aws.Session from the URL.
type URLOpener struct {
// ConfigProvider must be set to a non-nil value.
ConfigProvider client.ConfigProvider
}
// OpenCollectionURL opens the collection at the URL's path. See the package doc for more details.
func (o *URLOpener) OpenCollectionURL(_ context.Context, u *url.URL) (*docstore.Collection, error) {
db, tableName, partitionKey, sortKey, err := o.processURL(u)
if err != nil {
return nil, err
}
return OpenCollection(db, tableName, partitionKey, sortKey)
}
func (o *URLOpener) processURL(u *url.URL) (db *dyn.DynamoDB, tableName, partitionKey, sortKey string, err error) {
q := u.Query()
partitionKey = q.Get("partition_key")
if partitionKey == "" {
return nil, "", "", "", fmt.Errorf("open collection %s: partition_key is required to open a table", u)
}
q.Del("partition_key")
sortKey = q.Get("sort_key")
q.Del("sort_key")
tableName = u.Host
if tableName == "" {
return nil, "", "", "", fmt.Errorf("open collection %s: URL's host cannot be empty (the table name)", u)
}
if u.Path != "" {
return nil, "", "", "", fmt.Errorf("open collection %s: URL path must be empty, only the host is needed", u)
}
configProvider := &gcaws.ConfigOverrider{
Base: o.ConfigProvider,
}
overrideCfg, err := gcaws.ConfigFromURLParams(q)
if err != nil {
return nil, "", "", "", fmt.Errorf("open collection %s: %v", u, err)
}
configProvider.Configs = append(configProvider.Configs, overrideCfg)
db, err = Dial(configProvider)
if err != nil {
return nil, "", "", "", fmt.Errorf("open collection %s: %v", u, err)
}
return db, tableName, partitionKey, sortKey, nil
}
// Dial gets an AWS DynamoDB service client.
func Dial(p client.ConfigProvider) (*dyn.DynamoDB, error) {
if p == nil {
return nil, errors.New("getting Dynamo service: no AWS session provided")
}
return dyn.New(p), nil
}
type collection struct {
db *dyn.DynamoDB
table string // DynamoDB table name
partitionKey string
sortKey string
description *dyn.TableDescription
}
// OpenCollection creates a *docstore.Collection representing a DynamoDB collection.
func OpenCollection(db *dyn.DynamoDB, tableName, partitionKey, sortKey string) (*docstore.Collection, error) {
c, err := newCollection(db, tableName, partitionKey, sortKey)
if err != nil {
return nil, err
}
return docstore.NewCollection(c), nil
}
func newCollection(db *dyn.DynamoDB, tableName, partitionKey, sortKey string) (*collection, error) {
out, err := db.DescribeTable(&dynamodb.DescribeTableInput{TableName: &tableName})
if err != nil {
return nil, err
}
return &collection{
db: db,
table: tableName,
partitionKey: partitionKey,
sortKey: sortKey,
description: out.Table,
}, nil
}
func (c *collection) KeyFields() []string {
if c.sortKey == "" {
return []string{c.partitionKey}
}
return []string{c.partitionKey, c.sortKey}
}
func (c *collection) RunActions(ctx context.Context, actions []*driver.Action, unordered bool) driver.ActionListError {
if unordered {
panic("unordered unimplemented")
}
groups := c.splitActions(actions)
nRun := 0 // number of actions successfully run
var err error
for _, g := range groups {
if g[0].Kind == driver.Get {
err = c.runGets(ctx, g)
} else {
err = c.runWrites(ctx, g)
}
if err != nil {
return driver.ActionListError{{nRun, err}}
}
nRun += len(g)
}
return nil
}
// splitActions divides the actions slice into sub-slices, each of which can be
// passed to run a dynamo transaction operation.
// splitActions doesn't change the order of the input slice.
func (c *collection) splitActions(actions []*driver.Action) [][]*driver.Action {
var (
groups [][]*driver.Action // the actions, split; the return value
cur []*driver.Action // the group currently being constructed
wm = make(map[[2]interface{}]bool) // writes group cannot contain duplicate items
)
collect := func() { // called when the current group is known to be finished
if len(cur) > 0 {
groups = append(groups, cur)
cur = nil
wm = make(map[[2]interface{}]bool)
}
}
for _, a := range actions {
if len(cur) > 0 && c.shouldSplit(cur[len(cur)-1], a, wm) ||
len(cur) >= 10 { // each transaction can run up to 10 operations.
collect()
}
cur = append(cur, a)
if a.Kind != driver.Get {
if keys := c.primaryKey(a); keys[0] != nil {
wm[keys] = true
}
}
}
collect()
return groups
}
func (c *collection) shouldSplit(curr, next *driver.Action, wm map[[2]interface{}]bool) bool {
if (curr.Kind == driver.Get) != (next.Kind == driver.Get) { // different kind
return true
}
if curr.Kind == driver.Get { // both are Get's
return false
}
keys := c.primaryKey(next)
if keys[0] == nil {
return false
}
_, ok := wm[keys]
return ok // different Write's in one transaction cannot target the same item
}
// primaryKey tries to get the primary key from the doc, which is the partition
// key if there is no sort key, or the combination of both keys. If there is not
// a key, it returns an array with two nil's.
func (c *collection) primaryKey(a *driver.Action) [2]interface{} {
var keys [2]interface{}
var err error
keys[0], err = a.Doc.GetField(c.partitionKey)
if err != nil {
return keys
}
if c.sortKey != "" {
keys[1], _ = a.Doc.GetField(c.sortKey) // ignore error since keys[1] would be nil in that case
}
return keys
}
func (c *collection) runGets(ctx context.Context, actions []*driver.Action) error {
// Assume all actions Kinds are Get's.
tgs := make([]*dyn.TransactGetItem, len(actions))
for i, a := range actions {
tg, err := c.toTransactGet(a.Doc, a.FieldPaths)
if err != nil {
return err
}
tgs[i] = tg
}
out, err := c.db.TransactGetItemsWithContext(ctx, &dyn.TransactGetItemsInput{TransactItems: tgs})
if err != nil {
return err
}
for i, res := range out.Responses {
if err := decodeDoc(&dyn.AttributeValue{M: res.Item}, actions[i].Doc); err != nil {
return err
}
}
return nil
}
func (c *collection) runWrites(ctx context.Context, actions []*driver.Action) error {
tws := make([]*dyn.TransactWriteItem, len(actions))
for i, a := range actions {
var pc *expression.ConditionBuilder
var err error
if a.Kind != driver.Create {
pc, err = revisionPrecondition(a.Doc)
if err != nil {
return err
}
}
var tw *dyn.TransactWriteItem
switch a.Kind {
case driver.Create:
cb := expression.AttributeNotExists(expression.Name(c.partitionKey))
tw, err = c.toTransactPut(ctx, a.Kind, a.Doc, &cb)
case driver.Replace:
if pc == nil {
c := expression.AttributeExists(expression.Name(c.partitionKey))
pc = &c
}
tw, err = c.toTransactPut(ctx, a.Kind, a.Doc, pc)
case driver.Put:
tw, err = c.toTransactPut(ctx, a.Kind, a.Doc, pc)
case driver.Delete:
tw, err = c.toTransactDelete(ctx, a.Doc, pc)
case driver.Update:
cb := expression.AttributeExists(expression.Name(c.partitionKey))
if pc != nil {
cb = cb.And(*pc)
}
tw, err = c.toTransactUpdate(ctx, a.Doc, a.Mods, &cb)
default:
panic("wrong action passed in; writes should be of kind Create, Replace, Put, Delete or Update")
}
if err != nil {
return err
}
tws[i] = tw
}
_, err := c.db.TransactWriteItemsWithContext(ctx, &dyn.TransactWriteItemsInput{
ClientRequestToken: aws.String(driver.UniqueString()),
TransactItems: tws,
})
if err != nil {
return err
}
for i, a := range actions {
if a.Kind == driver.Create {
if _, err := a.Doc.GetField(c.partitionKey); err != nil && gcerrors.Code(err) == gcerrors.NotFound {
actions[i].Doc.SetField(c.partitionKey, *tws[i].Put.Item[c.partitionKey].S)
}
}
}
return nil
}
func (c *collection) missingKeyField(m map[string]*dyn.AttributeValue) string {
if _, ok := m[c.partitionKey]; !ok {
return c.partitionKey
}
if _, ok := m[c.sortKey]; !ok && c.sortKey != "" {
return c.sortKey
}
return ""
}
func (c *collection) toTransactPut(ctx context.Context, k driver.ActionKind, doc driver.Document, condition *expression.ConditionBuilder) (*dyn.TransactWriteItem, error) {
av, err := encodeDoc(doc)
if err != nil {
return nil, err
}
mf := c.missingKeyField(av.M)
if k != driver.Create && mf != "" {
return nil, fmt.Errorf("missing key field %q", mf)
}
if mf == c.partitionKey {
av.M[c.partitionKey] = new(dyn.AttributeValue).SetS(driver.UniqueString())
}
if c.sortKey != "" && mf == c.sortKey {
// It doesn't make sense to generate a random sort key.
return nil, fmt.Errorf("missing sort key %q", c.sortKey)
}
if av.M[docstore.RevisionField], err = encodeValue(driver.UniqueString()); err != nil {
return nil, err
}
put := &dyn.Put{
TableName: &c.table,
Item: av.M,
}
if condition != nil {
ce, err := expression.NewBuilder().WithCondition(*condition).Build()
if err != nil {
return nil, err
}
put.ExpressionAttributeNames = ce.Names()
put.ExpressionAttributeValues = ce.Values()
put.ConditionExpression = ce.Condition()
}
return &dyn.TransactWriteItem{Put: put}, nil
}
func (c *collection) toTransactGet(doc driver.Document, fieldpaths [][]string) (*dyn.TransactGetItem, error) {
av, err := encodeDocKeyFields(doc, c.partitionKey, c.sortKey)
if err != nil {
return nil, err
}
get := &dyn.Get{
TableName: &c.table,
Key: av.M,
}
if len(fieldpaths) > 0 {
// Construct a projection expression for the field paths.
// See https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ProjectionExpressions.html.
nbs := []expression.NameBuilder{expression.Name(docstore.RevisionField)}
for _, fp := range fieldpaths {
nbs = append(nbs, expression.Name(strings.Join(fp, ".")))
}
expr, err := expression.NewBuilder().
WithProjection(expression.AddNames(expression.ProjectionBuilder{}, nbs...)).
Build()
if err != nil {
return nil, err
}
get.ProjectionExpression = expr.Projection()
get.ExpressionAttributeNames = expr.Names()
}
return &dyn.TransactGetItem{Get: get}, nil
}
func (c *collection) toTransactDelete(ctx context.Context, doc driver.Document, condition *expression.ConditionBuilder) (*dyn.TransactWriteItem, error) {
av, err := encodeDocKeyFields(doc, c.partitionKey, c.sortKey)
if err != nil {
return nil, err
}
del := &dyn.Delete{
TableName: &c.table,
Key: av.M,
}
if condition != nil {
ce, err := expression.NewBuilder().WithCondition(*condition).Build()
if err != nil {
return nil, err
}
del.ExpressionAttributeNames = ce.Names()
del.ExpressionAttributeValues = ce.Values()
del.ConditionExpression = ce.Condition()
}
return &dyn.TransactWriteItem{Delete: del}, nil
}
func (c *collection) toTransactUpdate(ctx context.Context, doc driver.Document, mods []driver.Mod, condition *expression.ConditionBuilder) (*dyn.TransactWriteItem, error) {
if len(mods) == 0 {
return nil, nil
}
av, err := encodeDocKeyFields(doc, c.partitionKey, c.sortKey)
if err != nil {
return nil, err
}
var ub expression.UpdateBuilder
for _, m := range mods {
// TODO(shantuo): check for invalid field paths
fp := strings.Join(m.FieldPath, ".")
if m.Value == nil {
ub = ub.Remove(expression.Name(fp))
} else {
ub = ub.Set(expression.Name(fp), expression.Value(m.Value))
}
}
ub = ub.Set(expression.Name(docstore.RevisionField), expression.Value(driver.UniqueString()))
ce, err := expression.NewBuilder().WithCondition(*condition).WithUpdate(ub).Build()
if err != nil {
return nil, err
}
return &dyn.TransactWriteItem{
Update: &dyn.Update{
TableName: &c.table,
Key: av.M,
ConditionExpression: ce.Condition(),
UpdateExpression: ce.Update(),
ExpressionAttributeNames: ce.Names(),
ExpressionAttributeValues: ce.Values(),
},
}, nil
}
// revisionPrecondition returns a DynamoDB expression that asserts that the
// stored document's revision matches the revision of doc.
func revisionPrecondition(doc driver.Document) (*expression.ConditionBuilder, error) {
v, err := doc.GetField(docstore.RevisionField)
if err != nil { // field not present
return nil, nil
}
if v == nil { // field is present, but nil
return nil, nil
}
rev, ok := v.(string)
if !ok {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil,
"%s field contains wrong type: got %T, want string",
docstore.RevisionField, v)
}
if rev == "" {
return nil, nil
}
// Value encodes rev to an attribute value.
cb := expression.Name(docstore.RevisionField).Equal(expression.Value(rev))
return &cb, nil
}
func (c *collection) ErrorCode(err error) gcerr.ErrorCode {
ae, ok := err.(awserr.Error)
if !ok {
return gcerr.Unknown
}
ec, ok := errorCodeMap[ae.Code()]
if !ok {
return gcerr.Unknown
}
return ec
}
var errorCodeMap = map[string]gcerrors.ErrorCode{
dyn.ErrCodeConditionalCheckFailedException: gcerr.FailedPrecondition,
dyn.ErrCodeProvisionedThroughputExceededException: gcerr.ResourceExhausted,
dyn.ErrCodeResourceNotFoundException: gcerr.NotFound,
dyn.ErrCodeItemCollectionSizeLimitExceededException: gcerr.ResourceExhausted,
dyn.ErrCodeTransactionConflictException: gcerr.Internal,
dyn.ErrCodeRequestLimitExceeded: gcerr.ResourceExhausted,
dyn.ErrCodeInternalServerError: gcerr.Internal,
dyn.ErrCodeTransactionCanceledException: gcerr.FailedPrecondition,
dyn.ErrCodeTransactionInProgressException: gcerr.InvalidArgument,
dyn.ErrCodeIdempotentParameterMismatchException: gcerr.InvalidArgument,
}
| 1 | 17,138 | Weird they don't expose the type for dynamo and this seems a pretty common error. | google-go-cloud | go |
@@ -33,10 +33,11 @@ namespace OpenTelemetry.Trace.Configuration
/// Enables OpenTelemetry.
/// </summary>
/// <param name="configureOpenTelemetryBuilder">Function that configures OpenTelemetryBuilder.</param>
+ /// <returns><see cref="IDisposable"/> to be disposed on application shutdown.</returns>
/// <remarks>
/// Basic implementation only. Most logic from TracerBuilder will be ported here.
/// </remarks>
- public static void EnableOpenTelemetry(Action<OpenTelemetryBuilder> configureOpenTelemetryBuilder)
+ public static IDisposable EnableOpenTelemetry(Action<OpenTelemetryBuilder> configureOpenTelemetryBuilder)
{
var openTelemetryBuilder = new OpenTelemetryBuilder();
configureOpenTelemetryBuilder(openTelemetryBuilder); | 1 | // <copyright file="OpenTelemetrySdk.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
using OpenTelemetry.Trace.Export;
using OpenTelemetry.Trace.Samplers;
namespace OpenTelemetry.Trace.Configuration
{
public class OpenTelemetrySdk
{
static OpenTelemetrySdk()
{
Activity.DefaultIdFormat = ActivityIdFormat.W3C;
Activity.ForceDefaultIdFormat = true;
}
/// <summary>
/// Enables OpenTelemetry.
/// </summary>
/// <param name="configureOpenTelemetryBuilder">Function that configures OpenTelemetryBuilder.</param>
/// <remarks>
/// Basic implementation only. Most logic from TracerBuilder will be ported here.
/// </remarks>
public static void EnableOpenTelemetry(Action<OpenTelemetryBuilder> configureOpenTelemetryBuilder)
{
var openTelemetryBuilder = new OpenTelemetryBuilder();
configureOpenTelemetryBuilder(openTelemetryBuilder);
ActivitySampler sampler = openTelemetryBuilder.Sampler ?? new AlwaysOnActivitySampler();
ActivityProcessor activityProcessor;
if (openTelemetryBuilder.ProcessingPipeline == null)
{
// if there are no pipelines are configured, use noop processor
activityProcessor = new NoopActivityProcessor();
}
else
{
activityProcessor = openTelemetryBuilder.ProcessingPipeline.Build();
}
// This is what subscribes to Activities.
// Think of this as the replacement for DiagnosticListener.AllListeners.Subscribe(onNext => diagnosticListener.Subscribe(..));
ActivityListener listener = new ActivityListener
{
// Callback when Activity is started.
ActivityStarted = activityProcessor.OnStart,
// Callback when Activity is started.
ActivityStopped = activityProcessor.OnEnd,
// Function which takes ActivitySource and returns true/false to indicate if it should be subscribed to
// or not
ShouldListenTo = (activitySource) => openTelemetryBuilder.ActivitySourceNames.Contains(activitySource.Name.ToUpperInvariant()),
// The following parameter is not used now.
GetRequestedDataUsingParentId = (ref ActivityCreationOptions<string> options) => ActivityDataRequest.AllData,
// This delegate informs ActivitySource about sampling decision.
// Following simple behavior is enabled now:
// If Sampler returns IsSampled as true, returns ActivityDataRequest.AllDataAndRecorded
// This creates Activity and sets its IsAllDataRequested to true.
// Library authors can check activity.IsAllDataRequested and avoid
// doing any additional telemetry population.
// Activity.IsAllDataRequested is the equivalent of Span.IsRecording
//
// If Sampler returns IsSampled as false, returns ActivityDataRequest.None
// This prevents Activity from being created at all.
GetRequestedDataUsingContext = (ref ActivityCreationOptions<ActivityContext> options) =>
{
var shouldSample = sampler.ShouldSample(
options.Parent,
options.Parent.TraceId,
default(ActivitySpanId), // Passing default SpanId here. The actual SpanId is not known before actual Activity creation
options.Name,
options.Kind,
options.Tags,
options.Links);
if (shouldSample.IsSampled)
{
return ActivityDataRequest.AllDataAndRecorded;
}
else
{
return ActivityDataRequest.None;
}
// TODO: Improve this to properly use ActivityDataRequest.AllData, PropagationData as well.
},
};
ActivitySource.AddActivityListener(listener);
}
}
}
| 1 | 14,064 | @cijothomas I couldn't make the unit tests work without a way to destroy the ActivityListener we create internally so I return it as an IDisposable here. But we'll also need to also stop any ActivityProcessors/ActivityExporters on application shutdown right? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -592,7 +592,7 @@ class Commands:
@command('wp')
async def payto(self, destination, amount, fee=None, feerate=None, from_addr=None, from_coins=None, change_addr=None,
- nocheck=False, unsigned=False, rbf=None, password=None, locktime=None, wallet: Abstract_Wallet = None):
+ nocheck=False, unsigned=False, rbf=None, password=None, locktime=None, addtransaction=True, wallet: Abstract_Wallet = None):
"""Create a transaction. """
self.nocheck = nocheck
tx_fee = satoshis(fee) | 1 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import datetime
import copy
import argparse
import json
import ast
import base64
import operator
import asyncio
import inspect
from functools import wraps, partial
from itertools import repeat
from decimal import Decimal
from typing import Optional, TYPE_CHECKING, Dict, List
from .import util, ecc
from .util import bfh, bh2u, format_satoshis, json_decode, json_encode, is_hash256_str, is_hex_str, to_bytes, timestamp_to_datetime
from .util import standardize_path
from . import bitcoin
from .bitcoin import is_address, hash_160, COIN
from .bip32 import BIP32Node
from .i18n import _
from .transaction import (Transaction, multisig_script, TxOutput, PartialTransaction, PartialTxOutput,
tx_from_any, PartialTxInput, TxOutpoint)
from .invoices import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .synchronizer import Notifier
from .wallet import Abstract_Wallet, create_new_wallet, restore_wallet_from_text, Deterministic_Wallet
from .address_synchronizer import TX_HEIGHT_LOCAL
from .mnemonic import Mnemonic
from .lnutil import SENT, RECEIVED
from .lnutil import LnFeatures
from .lnutil import ln_dummy_address
from .lnpeer import channel_id_from_funding_tx
from .plugin import run_hook
from .version import ELECTRUM_VERSION
from .simple_config import SimpleConfig
from .invoices import LNInvoice
from . import submarine_swaps
if TYPE_CHECKING:
from .network import Network
from .daemon import Daemon
known_commands = {} # type: Dict[str, Command]
class NotSynchronizedException(Exception):
pass
def satoshis(amount):
# satoshi conversion must not be performed by the parser
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
def format_satoshis(x):
return str(Decimal(x)/COIN) if x is not None else None
def json_normalize(x):
# note: The return value of commands, when going through the JSON-RPC interface,
# is json-encoded. The encoder used there cannot handle some types, e.g. electrum.util.Satoshis.
# note: We should not simply do "json_encode(x)" here, as then later x would get doubly json-encoded.
# see #5868
return json_decode(json_encode(x))
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.__code__.co_varnames[1:func.__code__.co_argcount]
self.defaults = func.__defaults__
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
# sanity checks
if self.requires_password:
assert self.requires_wallet
for varname in ('wallet_path', 'wallet'):
if varname in varnames:
assert varname in self.options
assert not ('wallet_path' in varnames and 'wallet' in varnames)
if self.requires_wallet:
assert 'wallet' in varnames
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
async def func_wrapper(*args, **kwargs):
cmd_runner = args[0] # type: Commands
cmd = known_commands[func.__name__] # type: Command
password = kwargs.get('password')
daemon = cmd_runner.daemon
if daemon:
if 'wallet_path' in cmd.options and kwargs.get('wallet_path') is None:
kwargs['wallet_path'] = daemon.config.get_wallet_path()
if cmd.requires_wallet and kwargs.get('wallet') is None:
kwargs['wallet'] = daemon.config.get_wallet_path()
if 'wallet' in cmd.options:
wallet_path = kwargs.get('wallet', None)
if isinstance(wallet_path, str):
wallet = daemon.get_wallet(wallet_path)
if wallet is None:
raise Exception('wallet not loaded')
kwargs['wallet'] = wallet
wallet = kwargs.get('wallet') # type: Optional[Abstract_Wallet]
if cmd.requires_wallet and not wallet:
raise Exception('wallet not loaded')
if cmd.requires_password and password is None and wallet.has_password():
raise Exception('Password required')
return await func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, *, config: 'SimpleConfig',
network: 'Network' = None,
daemon: 'Daemon' = None, callback=None):
self.config = config
self.daemon = daemon
self.network = network
self._callback = callback
def _run(self, method, args, password_getter=None, **kwargs):
"""This wrapper is called from unit tests and the Qt python console."""
cmd = known_commands[method]
password = kwargs.get('password', None)
wallet = kwargs.get('wallet', None)
if (cmd.requires_password and wallet and wallet.has_password()
and password is None):
password = password_getter()
if password is None:
return
f = getattr(self, method)
if cmd.requires_password:
kwargs['password'] = password
if 'wallet' in kwargs:
sig = inspect.signature(f)
if 'wallet' not in sig.parameters:
kwargs.pop('wallet')
coro = f(*args, **kwargs)
fut = asyncio.run_coroutine_threadsafe(coro, asyncio.get_event_loop())
result = fut.result()
if self._callback:
self._callback()
return result
@command('')
async def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('n')
async def getinfo(self):
""" network info """
net_params = self.network.get_parameters()
response = {
'path': self.network.config.path,
'server': net_params.server.host,
'blockchain_height': self.network.get_local_height(),
'server_height': self.network.get_server_height(),
'spv_nodes': len(self.network.get_interfaces()),
'connected': self.network.is_connected(),
'auto_connect': net_params.auto_connect,
'version': ELECTRUM_VERSION,
'default_wallet': self.config.get_wallet_path(),
'fee_per_kb': self.config.fee_per_kb(),
}
return response
@command('n')
async def stop(self):
"""Stop daemon"""
self.daemon.stop()
return "Daemon stopped"
@command('n')
async def list_wallets(self):
"""List wallets open in daemon"""
return [{'path': path, 'synchronized': w.is_up_to_date()}
for path, w in self.daemon.get_wallets().items()]
@command('n')
async def load_wallet(self, wallet_path=None, password=None):
"""Open wallet in daemon"""
wallet = self.daemon.load_wallet(wallet_path, password, manual_upgrades=False)
if wallet is not None:
run_hook('load_wallet', wallet, None)
response = wallet is not None
return response
@command('n')
async def close_wallet(self, wallet_path=None):
"""Close wallet"""
return self.daemon.stop_wallet(wallet_path)
@command('')
async def create(self, passphrase=None, password=None, encrypt_file=True, seed_type=None, wallet_path=None):
"""Create a new wallet.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
d = create_new_wallet(path=wallet_path,
passphrase=passphrase,
password=password,
encrypt_file=encrypt_file,
seed_type=seed_type,
config=self.config)
return {
'seed': d['seed'],
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('')
async def restore(self, text, passphrase=None, password=None, encrypt_file=True, wallet_path=None):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
# TODO create a separate command that blocks until wallet is synced
d = restore_wallet_from_text(text,
path=wallet_path,
passphrase=passphrase,
password=password,
encrypt_file=encrypt_file,
config=self.config)
return {
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('wp')
async def password(self, password=None, new_password=None, wallet: Abstract_Wallet = None):
"""Change wallet password. """
if wallet.storage.is_encrypted_with_hw_device() and new_password:
raise Exception("Can't change the password of a wallet encrypted with a hw device.")
b = wallet.storage.is_encrypted()
wallet.update_password(password, new_password, encrypt_storage=b)
wallet.save_db()
return {'password':wallet.has_password()}
@command('w')
async def get(self, key, wallet: Abstract_Wallet = None):
"""Return item from wallet storage"""
return wallet.db.get(key)
@command('')
async def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@classmethod
def _setconfig_normalize_value(cls, key, value):
if key not in ('rpcuser', 'rpcpassword'):
value = json_decode(value)
try:
value = ast.literal_eval(value)
except:
pass
return value
@command('')
async def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
value = self._setconfig_normalize_value(key, value)
self.config.set_key(key, value)
return True
@command('')
async def get_ssl_domain(self):
"""Check and return the SSL domain set in ssl_keyfile and ssl_certfile
"""
return self.config.get_ssl_domain()
@command('')
async def make_seed(self, nbits=132, language=None, seed_type=None):
"""Create a seed"""
from .mnemonic import Mnemonic
s = Mnemonic(language).make_seed(seed_type, num_bits=nbits)
return s
@command('n')
async def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
return await self.network.get_history_for_scripthash(sh)
@command('w')
async def listunspent(self, wallet: Abstract_Wallet = None):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
coins = []
for txin in wallet.get_utxos():
d = txin.to_json()
v = d.pop("value_sats")
d["value"] = str(Decimal(v)/COIN) if v is not None else None
coins.append(d)
return coins
@command('n')
async def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
return await self.network.listunspent_for_scripthash(sh)
@command('')
async def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':satoshi_amount}.
"""
keypairs = {}
inputs = [] # type: List[PartialTxInput]
locktime = jsontx.get('lockTime', 0)
for txin_dict in jsontx.get('inputs'):
if txin_dict.get('prevout_hash') is not None and txin_dict.get('prevout_n') is not None:
prevout = TxOutpoint(txid=bfh(txin_dict['prevout_hash']), out_idx=int(txin_dict['prevout_n']))
elif txin_dict.get('output'):
prevout = TxOutpoint.from_str(txin_dict['output'])
else:
raise Exception("missing prevout for txin")
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = int(txin_dict['value'])
nsequence = txin_dict.get('nsequence', None)
if nsequence is not None:
txin.nsequence = nsequence
sec = txin_dict.get('privkey')
if sec:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
keypairs[pubkey] = privkey, compressed
txin.script_type = txin_type
txin.pubkeys = [bfh(pubkey)]
txin.num_sig = 1
inputs.append(txin)
outputs = [PartialTxOutput.from_address_and_value(txout['address'], int(txout['value']))
for txout in jsontx.get('outputs')]
tx = PartialTransaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.serialize()
@command('wp')
async def signtransaction(self, tx, privkey=None, password=None, wallet: Abstract_Wallet = None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = PartialTransaction(tx)
if privkey:
txin_type, privkey2, compressed = bitcoin.deserialize_privkey(privkey)
pubkey = ecc.ECPrivkey(privkey2).get_public_key_bytes(compressed=compressed).hex()
tx.sign({pubkey:(privkey2, compressed)})
else:
wallet.sign_transaction(tx, password)
return tx.serialize()
@command('')
async def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = tx_from_any(tx)
return tx.to_json()
@command('n')
async def broadcast(self, tx):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
await self.network.broadcast_transaction(tx)
return tx.txid()
@command('')
async def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(bfh(redeem_script)))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
async def freeze(self, address, wallet: Abstract_Wallet = None):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return wallet.set_frozen_state_of_addresses([address], True)
@command('w')
async def unfreeze(self, address, wallet: Abstract_Wallet = None):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return wallet.set_frozen_state_of_addresses([address], False)
@command('wp')
async def getprivatekeys(self, address, password=None, wallet: Abstract_Wallet = None):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if isinstance(address, str):
address = address.strip()
if is_address(address):
return wallet.export_private_key(address, password)
domain = address
return [wallet.export_private_key(address, password) for address in domain]
@command('wp')
async def getprivatekeyforpath(self, path, password=None, wallet: Abstract_Wallet = None):
"""Get private key corresponding to derivation path (address index).
'path' can be either a str such as "m/0/50", or a list of ints such as [0, 50].
"""
return wallet.export_private_key_for_path(path, password)
@command('w')
async def ismine(self, address, wallet: Abstract_Wallet = None):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return wallet.is_mine(address)
@command('')
async def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum listaddresses | electrum getprivatekeys - '"
@command('')
async def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
async def getpubkeys(self, address, wallet: Abstract_Wallet = None):
"""Return the public keys for a wallet address. """
return wallet.get_public_keys(address)
@command('w')
async def getbalance(self, wallet: Abstract_Wallet = None):
"""Return the balance of your wallet. """
c, u, x = wallet.get_balance()
l = wallet.lnworker.get_balance() if wallet.lnworker else None
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
if l:
out["lightning"] = str(Decimal(l)/COIN)
return out
@command('n')
async def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
out = await self.network.get_balance_for_scripthash(sh)
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
async def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return await self.network.get_merkle_for_transaction(txid, int(height))
@command('n')
async def getservers(self):
"""Return the list of known servers (candidates for connecting)."""
return self.network.get_servers()
@command('')
async def version(self):
"""Return the version of Electrum."""
from .version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
async def getmpk(self, wallet: Abstract_Wallet = None):
"""Get master public key. Return your wallet\'s master public key"""
return wallet.get_master_public_key()
@command('wp')
async def getmasterprivate(self, password=None, wallet: Abstract_Wallet = None):
"""Get master private key. Return your wallet\'s master private key"""
return str(wallet.keystore.get_master_private_key(password))
@command('')
async def convert_xkey(self, xkey, xtype):
"""Convert xtype of a master key. e.g. xpub -> ypub"""
try:
node = BIP32Node.from_xkey(xkey)
except:
raise Exception('xkey should be a master public/private key')
return node._replace(xtype=xtype).to_xkey()
@command('wp')
async def getseed(self, password=None, wallet: Abstract_Wallet = None):
"""Get seed phrase. Print the generation seed of your wallet."""
s = wallet.get_seed(password)
return s
@command('wp')
async def importprivkey(self, privkey, password=None, wallet: Abstract_Wallet = None):
"""Import a private key."""
if not wallet.can_import_privkey():
return "Error: This type of wallet cannot import private keys. Try to create a new wallet with that key."
try:
addr = wallet.import_private_key(privkey, password)
out = "Keypair imported: " + addr
except Exception as e:
out = "Error: " + repr(e)
return out
def _resolver(self, x, wallet):
if x is None:
return None
out = wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise Exception('cannot verify alias', x)
return out['address']
@command('n')
async def sweep(self, privkey, destination, fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
from .wallet import sweep
tx_fee = satoshis(fee)
privkeys = privkey.split()
self.nocheck = nocheck
#dest = self._resolver(destination)
tx = sweep(privkeys,
network=self.network,
config=self.config,
to_address=destination,
fee=tx_fee,
imax=imax)
return tx.serialize() if tx else None
@command('wp')
async def signmessage(self, address, message, password=None, wallet: Abstract_Wallet = None):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = wallet.sign_message(address, message, password)
return base64.b64encode(sig).decode('ascii')
@command('')
async def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
message = util.to_bytes(message)
return ecc.verify_message_with_address(address, sig, message)
@command('wp')
async def payto(self, destination, amount, fee=None, feerate=None, from_addr=None, from_coins=None, change_addr=None,
nocheck=False, unsigned=False, rbf=None, password=None, locktime=None, wallet: Abstract_Wallet = None):
"""Create a transaction. """
self.nocheck = nocheck
tx_fee = satoshis(fee)
domain_addr = from_addr.split(',') if from_addr else None
domain_coins = from_coins.split(',') if from_coins else None
change_addr = self._resolver(change_addr, wallet)
domain_addr = None if domain_addr is None else map(self._resolver, domain_addr, repeat(wallet))
amount_sat = satoshis(amount)
outputs = [PartialTxOutput.from_address_and_value(destination, amount_sat)]
tx = wallet.create_transaction(
outputs,
fee=tx_fee,
feerate=feerate,
change_addr=change_addr,
domain_addr=domain_addr,
domain_coins=domain_coins,
unsigned=unsigned,
rbf=rbf,
password=password,
locktime=locktime)
return tx.serialize()
@command('wp')
async def paytomany(self, outputs, fee=None, feerate=None, from_addr=None, from_coins=None, change_addr=None,
nocheck=False, unsigned=False, rbf=None, password=None, locktime=None, wallet: Abstract_Wallet = None):
"""Create a multi-output transaction. """
self.nocheck = nocheck
tx_fee = satoshis(fee)
domain_addr = from_addr.split(',') if from_addr else None
domain_coins = from_coins.split(',') if from_coins else None
change_addr = self._resolver(change_addr, wallet)
domain_addr = None if domain_addr is None else map(self._resolver, domain_addr, repeat(wallet))
final_outputs = []
for address, amount in outputs:
address = self._resolver(address, wallet)
amount_sat = satoshis(amount)
final_outputs.append(PartialTxOutput.from_address_and_value(address, amount_sat))
tx = wallet.create_transaction(
final_outputs,
fee=tx_fee,
feerate=feerate,
change_addr=change_addr,
domain_addr=domain_addr,
domain_coins=domain_coins,
unsigned=unsigned,
rbf=rbf,
password=password,
locktime=locktime)
return tx.serialize()
@command('w')
async def onchain_history(self, year=None, show_addresses=False, show_fiat=False, wallet: Abstract_Wallet = None):
"""Wallet onchain history. Returns the transaction history of your wallet."""
kwargs = {
'show_addresses': show_addresses,
}
if year:
import time
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year+1, 1, 1)
kwargs['from_timestamp'] = time.mktime(start_date.timetuple())
kwargs['to_timestamp'] = time.mktime(end_date.timetuple())
if show_fiat:
from .exchange_rate import FxThread
fx = FxThread(self.config, None)
kwargs['fx'] = fx
return json_normalize(wallet.get_detailed_history(**kwargs))
@command('w')
async def init_lightning(self, wallet: Abstract_Wallet = None):
"""Enable lightning payments"""
wallet.init_lightning()
return "Lightning keys have been created."
@command('w')
async def remove_lightning(self, wallet: Abstract_Wallet = None):
"""Disable lightning payments"""
wallet.remove_lightning()
@command('w')
async def lightning_history(self, show_fiat=False, wallet: Abstract_Wallet = None):
""" lightning history """
lightning_history = wallet.lnworker.get_history() if wallet.lnworker else []
return json_normalize(lightning_history)
@command('w')
async def setlabel(self, key, label, wallet: Abstract_Wallet = None):
"""Assign a label to an item. Item may be a bitcoin address or a
transaction ID"""
wallet.set_label(key, label)
@command('w')
async def listcontacts(self, wallet: Abstract_Wallet = None):
"""Show your list of contacts"""
return wallet.contacts
@command('w')
async def getalias(self, key, wallet: Abstract_Wallet = None):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return wallet.contacts.resolve(key)
@command('w')
async def searchcontacts(self, query, wallet: Abstract_Wallet = None):
"""Search through contacts, return matching entries. """
results = {}
for key, value in wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
async def listaddresses(self, receiving=False, change=False, labels=False, frozen=False, unused=False, funded=False, balance=False, wallet: Abstract_Wallet = None):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in wallet.get_addresses():
if frozen and not wallet.is_frozen_address(addr):
continue
if receiving and wallet.is_change(addr):
continue
if change and not wallet.is_change(addr):
continue
if unused and wallet.is_used(addr):
continue
if funded and wallet.is_empty(addr):
continue
item = addr
if labels or balance:
item = (item,)
if balance:
item += (format_satoshis(sum(wallet.get_addr_balance(addr))),)
if labels:
item += (repr(wallet.labels.get(addr, '')),)
out.append(item)
return out
@command('n')
async def gettransaction(self, txid, wallet: Abstract_Wallet = None):
"""Retrieve a transaction. """
tx = None
if wallet:
tx = wallet.db.get_transaction(txid)
if tx is None:
raw = await self.network.get_transaction(txid)
if raw:
tx = Transaction(raw)
else:
raise Exception("Unknown transaction")
if tx.txid() != txid:
raise Exception("Mismatching txid")
return tx.serialize()
@command('')
async def encrypt(self, pubkey, message) -> str:
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
if not is_hex_str(pubkey):
raise Exception(f"pubkey must be a hex string instead of {repr(pubkey)}")
try:
message = to_bytes(message)
except TypeError:
raise Exception(f"message must be a string-like object instead of {repr(message)}")
public_key = ecc.ECPubkey(bfh(pubkey))
encrypted = public_key.encrypt_message(message)
return encrypted.decode('utf-8')
@command('wp')
async def decrypt(self, pubkey, encrypted, password=None, wallet: Abstract_Wallet = None) -> str:
"""Decrypt a message encrypted with a public key."""
if not is_hex_str(pubkey):
raise Exception(f"pubkey must be a hex string instead of {repr(pubkey)}")
if not isinstance(encrypted, (str, bytes, bytearray)):
raise Exception(f"encrypted must be a string-like object instead of {repr(encrypted)}")
decrypted = wallet.decrypt_message(pubkey, encrypted, password)
return decrypted.decode('utf-8')
@command('w')
async def getrequest(self, key, wallet: Abstract_Wallet = None):
"""Return a payment request"""
r = wallet.get_request(key)
if not r:
raise Exception("Request not found")
return wallet.export_request(r)
#@command('w')
#async def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
async def list_requests(self, pending=False, expired=False, paid=False, wallet: Abstract_Wallet = None):
"""List the payment requests you made."""
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
out = wallet.get_sorted_requests()
if f is not None:
out = list(filter(lambda x: x.status==f, out))
return [wallet.export_request(x) for x in out]
@command('w')
async def createnewaddress(self, wallet: Abstract_Wallet = None):
"""Create a new receiving address, beyond the gap limit of the wallet"""
return wallet.create_new_address(False)
@command('w')
async def changegaplimit(self, new_limit, iknowwhatimdoing=False, wallet: Abstract_Wallet = None):
"""Change the gap limit of the wallet."""
if not iknowwhatimdoing:
raise Exception("WARNING: Are you SURE you want to change the gap limit?\n"
"It makes recovering your wallet from seed difficult!\n"
"Please do your research and make sure you understand the implications.\n"
"Typically only merchants and power users might want to do this.\n"
"To proceed, try again, with the --iknowwhatimdoing option.")
if not isinstance(wallet, Deterministic_Wallet):
raise Exception("This wallet is not deterministic.")
return wallet.change_gap_limit(new_limit)
@command('wn')
async def getminacceptablegap(self, wallet: Abstract_Wallet = None):
"""Returns the minimum value for gap limit that would be sufficient to discover all
known addresses in the wallet.
"""
if not isinstance(wallet, Deterministic_Wallet):
raise Exception("This wallet is not deterministic.")
if not wallet.is_up_to_date():
raise NotSynchronizedException("Wallet not fully synchronized.")
return wallet.min_acceptable_gap()
@command('w')
async def getunusedaddress(self, wallet: Abstract_Wallet = None):
"""Returns the first unused address of the wallet, or None if all addresses are used.
An address is considered as used if it has received a transaction, or if it is used in a payment request."""
return wallet.get_unused_address()
@command('w')
async def add_request(self, amount, memo='', expiration=3600, force=False, wallet: Abstract_Wallet = None):
"""Create a payment request, using the first unused address of the wallet.
The address will be considered as used after this operation.
If no payment is received, the address will be considered as unused if the payment request is deleted from the wallet."""
addr = wallet.get_unused_address()
if addr is None:
if force:
addr = wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = wallet.make_payment_request(addr, amount, memo, expiration)
wallet.add_payment_request(req)
return wallet.export_request(req)
@command('wn')
async def add_lightning_request(self, amount, memo='', expiration=3600, wallet: Abstract_Wallet = None):
amount_sat = int(satoshis(amount))
key = await wallet.lnworker._add_request_coro(amount_sat, memo, expiration)
return wallet.get_formatted_request(key)
@command('w')
async def addtransaction(self, tx, wallet: Abstract_Wallet = None):
""" Add a transaction to the wallet history """
tx = Transaction(tx)
if not wallet.add_transaction(tx):
return False
wallet.save_db()
return tx.txid()
@command('wp')
async def signrequest(self, address, password=None, wallet: Abstract_Wallet = None):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise Exception('No alias in your configuration')
alias_addr = wallet.contacts.resolve(alias)['address']
wallet.sign_payment_request(address, alias, alias_addr, password)
@command('w')
async def rmrequest(self, address, wallet: Abstract_Wallet = None):
"""Remove a payment request"""
return wallet.remove_payment_request(address)
@command('w')
async def clear_requests(self, wallet: Abstract_Wallet = None):
"""Remove all payment requests"""
wallet.clear_requests()
return True
@command('w')
async def clear_invoices(self, wallet: Abstract_Wallet = None):
"""Remove all invoices"""
wallet.clear_invoices()
return True
@command('n')
async def notify(self, address: str, URL: Optional[str]):
"""Watch an address. Every time the address changes, a http POST is sent to the URL.
Call with an empty URL to stop watching an address.
"""
if not hasattr(self, "_notifier"):
self._notifier = Notifier(self.network)
if URL:
await self._notifier.start_watching_addr(address, URL)
else:
await self._notifier.stop_watching_addr(address)
return True
@command('wn')
async def is_synchronized(self, wallet: Abstract_Wallet = None):
""" return wallet synchronization status """
return wallet.is_up_to_date()
@command('n')
async def getfeerate(self, fee_method=None, fee_level=None):
"""Return current suggested fee rate (in sat/kvByte), according to config
settings or supplied parameters.
"""
if fee_method is None:
dyn, mempool = None, None
elif fee_method.lower() == 'static':
dyn, mempool = False, False
elif fee_method.lower() == 'eta':
dyn, mempool = True, False
elif fee_method.lower() == 'mempool':
dyn, mempool = True, True
else:
raise Exception('Invalid fee estimation method: {}'.format(fee_method))
if fee_level is not None:
fee_level = Decimal(fee_level)
return self.config.fee_per_kb(dyn=dyn, mempool=mempool, fee_level=fee_level)
@command('w')
async def removelocaltx(self, txid, wallet: Abstract_Wallet = None):
"""Remove a 'local' transaction from the wallet, and its dependent
transactions.
"""
if not is_hash256_str(txid):
raise Exception(f"{repr(txid)} is not a txid")
height = wallet.get_tx_height(txid).height
to_delete = {txid}
if height != TX_HEIGHT_LOCAL:
raise Exception(f'Only local transactions can be removed. '
f'This tx has height: {height} != {TX_HEIGHT_LOCAL}')
to_delete |= wallet.get_depending_transactions(txid)
for tx_hash in to_delete:
wallet.remove_transaction(tx_hash)
wallet.save_db()
@command('wn')
async def get_tx_status(self, txid, wallet: Abstract_Wallet = None):
"""Returns some information regarding the tx. For now, only confirmations.
The transaction must be related to the wallet.
"""
if not is_hash256_str(txid):
raise Exception(f"{repr(txid)} is not a txid")
if not wallet.db.get_transaction(txid):
raise Exception("Transaction not in wallet.")
return {
"confirmations": wallet.get_tx_height(txid).conf,
}
@command('')
async def help(self):
# for the python console
return sorted(known_commands.keys())
# lightning network commands
@command('wn')
async def add_peer(self, connection_string, timeout=20, gossip=False, wallet: Abstract_Wallet = None):
lnworker = self.network.lngossip if gossip else wallet.lnworker
await lnworker.add_peer(connection_string)
return True
@command('wn')
async def list_peers(self, gossip=False, wallet: Abstract_Wallet = None):
lnworker = self.network.lngossip if gossip else wallet.lnworker
return [{
'node_id':p.pubkey.hex(),
'address':p.transport.name(),
'initialized':p.is_initialized(),
'features': str(LnFeatures(p.features)),
'channels': [c.funding_outpoint.to_str() for c in p.channels.values()],
} for p in lnworker.peers.values()]
@command('wpn')
async def open_channel(self, connection_string, amount, push_amount=0, password=None, wallet: Abstract_Wallet = None):
funding_sat = satoshis(amount)
push_sat = satoshis(push_amount)
dummy_output = PartialTxOutput.from_address_and_value(ln_dummy_address(), funding_sat)
funding_tx = wallet.mktx(outputs = [dummy_output], rbf=False, sign=False, nonlocal_only=True)
chan, funding_tx = await wallet.lnworker._open_channel_coroutine(connect_str=connection_string,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_sat=push_sat,
password=password)
return chan.funding_outpoint.to_str()
@command('')
async def decode_invoice(self, invoice: str):
invoice = LNInvoice.from_bech32(invoice)
return invoice.to_debug_json()
@command('wn')
async def lnpay(self, invoice, attempts=1, timeout=30, wallet: Abstract_Wallet = None):
lnworker = wallet.lnworker
lnaddr = lnworker._check_invoice(invoice)
payment_hash = lnaddr.paymenthash
wallet.save_invoice(LNInvoice.from_bech32(invoice))
success, log = await lnworker._pay(invoice, attempts=attempts)
return {
'payment_hash': payment_hash.hex(),
'success': success,
'preimage': lnworker.get_preimage(payment_hash).hex() if success else None,
'log': [x.formatted_tuple() for x in log]
}
@command('w')
async def nodeid(self, wallet: Abstract_Wallet = None):
listen_addr = self.config.get('lightning_listen')
return bh2u(wallet.lnworker.node_keypair.pubkey) + (('@' + listen_addr) if listen_addr else '')
@command('w')
async def list_channels(self, wallet: Abstract_Wallet = None):
# we output the funding_outpoint instead of the channel_id because lnd uses channel_point (funding outpoint) to identify channels
from .lnutil import LOCAL, REMOTE, format_short_channel_id
l = list(wallet.lnworker.channels.items())
return [
{
'short_channel_id': format_short_channel_id(chan.short_channel_id) if chan.short_channel_id else None,
'channel_id': bh2u(chan.channel_id),
'channel_point': chan.funding_outpoint.to_str(),
'state': chan.get_state().name,
'peer_state': chan.peer_state.name,
'remote_pubkey': bh2u(chan.node_id),
'local_balance': chan.balance(LOCAL)//1000,
'remote_balance': chan.balance(REMOTE)//1000,
'local_reserve': chan.config[REMOTE].reserve_sat, # their config has our reserve
'remote_reserve': chan.config[LOCAL].reserve_sat,
'local_unsettled_sent': chan.balance_tied_up_in_htlcs_by_direction(LOCAL, direction=SENT) // 1000,
'remote_unsettled_sent': chan.balance_tied_up_in_htlcs_by_direction(REMOTE, direction=SENT) // 1000,
} for channel_id, chan in l
]
@command('wn')
async def dumpgraph(self, wallet: Abstract_Wallet = None):
return list(map(bh2u, wallet.lnworker.channel_db.nodes.keys()))
@command('n')
async def inject_fees(self, fees):
import ast
self.network.config.fee_estimates = ast.literal_eval(fees)
self.network.notify('fee')
@command('wn')
async def enable_htlc_settle(self, b: bool, wallet: Abstract_Wallet = None):
e = wallet.lnworker.enable_htlc_settle
e.set() if b else e.clear()
@command('n')
async def clear_ln_blacklist(self):
self.network.path_finder.blacklist.clear()
@command('w')
async def list_invoices(self, wallet: Abstract_Wallet = None):
l = wallet.get_invoices()
return [wallet.export_invoice(x) for x in l]
@command('wn')
async def close_channel(self, channel_point, force=False, wallet: Abstract_Wallet = None):
txid, index = channel_point.split(':')
chan_id, _ = channel_id_from_funding_tx(txid, int(index))
coro = wallet.lnworker.force_close_channel(chan_id) if force else wallet.lnworker.close_channel(chan_id)
return await coro
@command('w')
async def export_channel_backup(self, channel_point, wallet: Abstract_Wallet = None):
txid, index = channel_point.split(':')
chan_id, _ = channel_id_from_funding_tx(txid, int(index))
return wallet.lnworker.export_channel_backup(chan_id)
@command('w')
async def import_channel_backup(self, encrypted, wallet: Abstract_Wallet = None):
return wallet.lnbackups.import_channel_backup(encrypted)
@command('wn')
async def get_channel_ctx(self, channel_point, iknowwhatimdoing=False, wallet: Abstract_Wallet = None):
""" return the current commitment transaction of a channel """
if not iknowwhatimdoing:
raise Exception("WARNING: this command is potentially unsafe.\n"
"To proceed, try again, with the --iknowwhatimdoing option.")
txid, index = channel_point.split(':')
chan_id, _ = channel_id_from_funding_tx(txid, int(index))
chan = wallet.lnworker.channels[chan_id]
tx = chan.force_close_tx()
return tx.serialize()
@command('wn')
async def get_watchtower_ctn(self, channel_point, wallet: Abstract_Wallet = None):
""" return the local watchtower's ctn of channel. used in regtests """
return await self.network.local_watchtower.sweepstore.get_ctn(channel_point, None)
@command('wnp')
async def normal_swap(self, onchain_amount, lightning_amount, password=None, wallet: Abstract_Wallet = None):
"""
Normal submarine swap: send on-chain BTC, receive on Lightning
Note that your funds will be locked for 24h if you do not have enough incoming capacity.
"""
sm = wallet.lnworker.swap_manager
if lightning_amount == 'dryrun':
await sm.get_pairs()
onchain_amount_sat = satoshis(onchain_amount)
lightning_amount_sat = sm.get_recv_amount(onchain_amount_sat, is_reverse=False)
txid = None
elif onchain_amount == 'dryrun':
await sm.get_pairs()
lightning_amount_sat = satoshis(lightning_amount)
onchain_amount_sat = sm.get_send_amount(lightning_amount_sat, is_reverse=False)
txid = None
else:
lightning_amount_sat = satoshis(lightning_amount)
onchain_amount_sat = satoshis(onchain_amount)
txid = await wallet.lnworker.swap_manager.normal_swap(lightning_amount_sat, onchain_amount_sat, password)
return {
'txid': txid,
'lightning_amount': format_satoshis(lightning_amount_sat),
'onchain_amount': format_satoshis(onchain_amount_sat),
}
@command('wn')
async def reverse_swap(self, lightning_amount, onchain_amount, wallet: Abstract_Wallet = None):
"""Reverse submarine swap: send on Lightning, receive on-chain
"""
sm = wallet.lnworker.swap_manager
if onchain_amount == 'dryrun':
await sm.get_pairs()
lightning_amount_sat = satoshis(lightning_amount)
onchain_amount_sat = sm.get_recv_amount(lightning_amount_sat, is_reverse=True)
success = None
elif lightning_amount == 'dryrun':
await sm.get_pairs()
onchain_amount_sat = satoshis(onchain_amount)
lightning_amount_sat = sm.get_send_amount(onchain_amount_sat, is_reverse=True)
success = None
else:
lightning_amount_sat = satoshis(lightning_amount)
onchain_amount_sat = satoshis(onchain_amount)
success = await wallet.lnworker.swap_manager.reverse_swap(lightning_amount_sat, onchain_amount_sat)
return {
'success': success,
'lightning_amount': format_satoshis(lightning_amount_sat),
'onchain_amount': format_satoshis(onchain_amount_sat),
}
def eval_bool(x: str) -> bool:
if x == 'false': return False
if x == 'true': return True
try:
return bool(ast.literal_eval(x))
except:
return bool(x)
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Bitcoin address, contact or alias',
'address': 'Bitcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in BTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in BTC).',
'outputs': 'list of ["address", amount]',
'redeem_script': 'redeem script (hexadecimal)',
'lightning_amount': "Amount sent or received in a submarine swap. Set it to 'dryrun' to receive a value",
'onchain_amount': "Amount sent or received in a submarine swap. Set it to 'dryrun' to receive a value",
}
command_options = {
'password': ("-W", "Password"),
'new_password':(None, "New Password"),
'encrypt_file':(None, "Whether the file on disk should be encrypted with the provided password"),
'receiving': (None, "Show only receiving addresses"),
'change': (None, "Show only change addresses"),
'frozen': (None, "Show only frozen addresses"),
'unused': (None, "Show only unused addresses"),
'funded': (None, "Show only funded addresses"),
'balance': ("-b", "Show the balances of listed addresses"),
'labels': ("-l", "Show the labels of listed addresses"),
'nocheck': (None, "Do not verify aliases"),
'imax': (None, "Maximum number of inputs"),
'fee': ("-f", "Transaction fee (absolute, in BTC)"),
'feerate': (None, "Transaction fee rate (in sat/byte)"),
'from_addr': ("-F", "Source address (must be a wallet address; use sweep to spend from non-wallet address)."),
'from_coins': (None, "Source coins (must be in wallet; use sweep to spend from non-wallet address)."),
'change_addr': ("-c", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "Number of bits of entropy"),
'seed_type': (None, "The type of seed to create, e.g. 'standard' or 'segwit'"),
'language': ("-L", "Default language for wordlist"),
'passphrase': (None, "Seed extension"),
'privkey': (None, "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "Do not sign transaction"),
'rbf': (None, "Whether to signal opt-in Replace-By-Fee in the transaction (true/false)"),
'locktime': (None, "Set locktime block number"),
'domain': ("-D", "List of addresses"),
'memo': ("-m", "Description of the request"),
'expiration': (None, "Time in seconds"),
'attempts': (None, "Number of payment attempts"),
'timeout': (None, "Timeout in seconds"),
'force': (None, "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "Show only pending requests."),
'push_amount': (None, 'Push initial amount (in BTC)'),
'expired': (None, "Show only expired requests."),
'paid': (None, "Show only paid requests."),
'show_addresses': (None, "Show input and output addresses"),
'show_fiat': (None, "Show fiat value of transactions"),
'show_fees': (None, "Show miner fees paid by transactions"),
'year': (None, "Show history for a given year"),
'fee_method': (None, "Fee estimation method to use"),
'fee_level': (None, "Float between 0.0 and 1.0, representing fee slider position"),
'from_height': (None, "Only show transactions that confirmed after given block height"),
'to_height': (None, "Only show transactions that confirmed before given block height"),
'iknowwhatimdoing': (None, "Acknowledge that I understand the full implications of what I am about to do"),
'gossip': (None, "Apply command to gossip node instead of wallet"),
}
# don't use floats because of rounding errors
from .transaction import convert_raw_tx_to_hex
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'year': int,
'from_height': int,
'to_height': int,
'tx': convert_raw_tx_to_hex,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
'locktime': int,
'fee_method': str,
'fee_level': json_loads,
'encrypt_file': eval_bool,
'rbf': eval_bool,
'timeout': float,
'attempts': int,
}
config_variables = {
'addrequest': {
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
# workaround https://bugs.python.org/issue23058
# see https://github.com/nickstenning/honcho/pull/121
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser {!r} (choices: {})').format(*tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-f", "--serverfingerprint", dest="serverfingerprint", default=None, help="only allow connecting to servers with a matching SSL certificate SHA256 fingerprint." + " " +
"To calculate this yourself: '$ openssl x509 -noout -fingerprint -sha256 -inform pem -in mycertfile.crt'. Enter as 64 hex chars.")
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=None, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port] (or 'none' to disable proxy), where type is socks4,socks5 or http")
parser.add_argument("--noonion", action="store_true", dest="noonion", default=None, help="do not try to connect to onion servers")
parser.add_argument("--skipmerklecheck", action="store_true", dest="skipmerklecheck", default=None, help="Tolerate invalid merkle proofs from server")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", dest="verbosity", help="Set verbosity (log levels)", default='')
group.add_argument("-V", dest="verbosity_shortcuts", help="Set verbosity (shortcut-filter list)", default='')
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum_data' directory")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
group.add_argument("--regtest", action="store_true", dest="regtest", default=False, help="Use Regtest")
group.add_argument("--simnet", action="store_true", dest="simnet", default=False, help="Use Simnet")
group.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
def add_wallet_option(parser):
parser.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
parser.add_argument("--forgetconfig", action="store_true", dest="forget_config", default=False, help="Forget config on exit")
def get_parser():
# create main parser
parser = argparse.ArgumentParser(
epilog="Run 'electrum help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="bitcoin URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
parser_gui.add_argument("--daemon", action="store_true", dest="daemon", default=False, help="keep daemon running after GUI is closed")
add_wallet_option(parser_gui)
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("-d", "--detached", action="store_true", dest="detach", default=False, help="run daemon in detached mode")
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
for optname, default in zip(cmd.options, cmd.defaults):
if optname in ['wallet_path', 'wallet']:
add_wallet_option(p)
continue
a, help = command_options[optname]
b = '--' + optname
action = "store_true" if default is False else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
add_global_options(p)
for param in cmd.params:
if param in ['wallet_path', 'wallet']:
continue
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
| 1 | 13,791 | The default should be "False", as "True" would be a significant change in behaviour. | spesmilo-electrum | py |
@@ -161,8 +161,16 @@ class Package(object):
# Serialize DataFrame to chosen format
if enumformat is PackageFormat.HDF5:
- with pd.HDFStore(storepath, mode='w') as store:
- store[self.DF_NAME] = df
+ # HACK: Force the use of old pickle to ensure Python 2/3 compatibility.
+ from pandas.compat import cPickle
+ old_protocol = cPickle.HIGHEST_PROTOCOL
+ try:
+ cPickle.HIGHEST_PROTOCOL = 2
+ with pd.HDFStore(storepath, mode='w') as store:
+ store[self.DF_NAME] = df
+ finally:
+ cPickle.HIGHEST_PROTOCOL = old_protocol
+
elif enumformat is PackageFormat.PARQUET:
# switch parquet lib
parqlib = self.get_parquet_lib() | 1 | from enum import Enum
import json
import os
from shutil import copyfile
import tempfile
import zlib
import pandas as pd
import requests
from six import iteritems
try:
import fastparquet
except ImportError:
fastparquet = None
try:
import pyarrow as pa
from pyarrow import parquet
except ImportError:
pa = None
try:
from pyspark.sql import SparkSession
except ImportError:
SparkSession = None
from .const import TargetType
from .core import (decode_node, encode_node, hash_contents,
FileNode, RootNode, GroupNode, TableNode,
PackageFormat)
from .hashing import digest_file
ZLIB_LEVEL = 2
ZLIB_METHOD = zlib.DEFLATED # The only supported one.
ZLIB_WBITS = zlib.MAX_WBITS | 16 # Add a gzip header and checksum.
CHUNK_SIZE = 4096
class ParquetLib(Enum):
ARROW = 'pyarrow'
FASTPARQUET = 'fastparquet'
SPARK = 'pyspark'
class PackageException(Exception):
"""
Exception class for Package handling
"""
pass
class Package(object):
BUILD_DIR = 'build'
OBJ_DIR = 'objs'
TMP_OBJ_DIR = 'objs/tmp'
DF_NAME = 'df'
__parquet_lib = None
@classmethod
def get_parquet_lib(cls):
if not cls.__parquet_lib:
parq_env = os.environ.get('QUILT_PARQUET_LIBRARY')
if parq_env:
cls.__parquet_lib = ParquetLib(parq_env)
else:
if SparkSession is not None:
cls.__parquet_lib = ParquetLib.SPARK
elif pa is not None:
cls.__parquet_lib = ParquetLib.ARROW
elif fastparquet is not None:
cls.__parquet_lib = ParquetLib.FASTPARQUET
else:
msg = "One of the following libraries is requried to read"
msg += " Parquet packages: %s" % [l.value for l in ParquetLib]
raise PackageException(msg)
return cls.__parquet_lib
@classmethod
def reset_parquet_lib(cls):
cls.__parquet_lib = None
def __init__(self, user, package, path, pkg_dir):
self._user = user
self._package = package
self._pkg_dir = pkg_dir
self._path = path
def file(self, hash_list):
"""
Returns the path to an object file that matches the given hash.
"""
assert isinstance(hash_list, list)
assert len(hash_list) == 1, "File objects must be contained in one file."
filehash = hash_list[0]
return self._object_path(filehash)
def _read_hdf5(self, hash_list):
assert len(hash_list) == 1, "Multi-file DFs not supported in HDF5."
filehash = hash_list[0]
with pd.HDFStore(self._object_path(filehash), 'r') as store:
return store.get(self.DF_NAME)
def _read_parquet_arrow(self, hash_list):
if pa is None:
raise PackageException("Module pyarrow is required for ArrowPackage.")
assert len(hash_list) == 1, "Multi-file DFs not supported for Arrow Packages (yet)."
filehash = hash_list[0]
nt = 8
fpath = self._object_path(filehash)
table = parquet.read_table(fpath, nthreads=nt)
df = table.to_pandas()
return df
def _read_parquet_fastparquet(self, hash_list):
assert len(hash_list) == 1, "Multi-file DFs not supported yet."
filehash = hash_list[0]
pfile = fastparquet.ParquetFile(self._object_path(filehash))
return pfile.to_pandas()
def _read_parquet_spark(self, hash_list):
if SparkSession is None:
raise PackageException("Module SparkSession from pyspark.sql is required for " +
"SparkPackage.")
spark = SparkSession.builder.getOrCreate()
assert len(hash_list) == 1, "Multi-file DFs not supported yet."
filehash = hash_list[0]
df = spark.read.parquet(self._object_path(filehash))
return df
def _dataframe(self, hash_list, pkgformat):
"""
Creates a DataFrame from a set of objects (identified by hashes).
"""
enumformat = PackageFormat(pkgformat)
if enumformat is PackageFormat.HDF5:
return self._read_hdf5(hash_list)
elif enumformat is PackageFormat.PARQUET:
parqlib = self.get_parquet_lib()
if parqlib is ParquetLib.SPARK:
return self._read_parquet_spark(hash_list)
elif parqlib is ParquetLib.ARROW:
return self._read_parquet_arrow(hash_list)
elif parqlib is ParquetLib.FASTPARQUET:
return self._read_parquet_fastparquet(hash_list)
else:
assert False, "Unimplemented Parquet Library %s" % parqlib
else:
assert False, "Unimplemented package format: %s" % enumformat
def save_df(self, df, name, path, ext, target):
"""
Save a DataFrame to the store.
"""
enumformat = PackageFormat(self.get_contents().format)
buildfile = name.lstrip('/').replace('/', '.')
storepath = self._temporary_object_path(buildfile)
# Serialize DataFrame to chosen format
if enumformat is PackageFormat.HDF5:
with pd.HDFStore(storepath, mode='w') as store:
store[self.DF_NAME] = df
elif enumformat is PackageFormat.PARQUET:
# switch parquet lib
parqlib = self.get_parquet_lib()
if parqlib is ParquetLib.FASTPARQUET:
fastparquet.write(storepath, df)
elif parqlib is ParquetLib.ARROW:
table = pa.Table.from_pandas(df)
parquet.write_table(table, storepath)
else:
assert False, "Unimplemented ParquetLib %s" % parqlib
else:
assert False, "Unimplemented PackageFormat %s" % enumformat
# Move serialized DataFrame to object store
filehash = digest_file(storepath)
self._add_to_contents(buildfile, filehash, ext, path, target)
os.rename(storepath, self._object_path(filehash))
def save_file(self, srcfile, name, path):
"""
Save a (raw) file to the store.
"""
filehash = digest_file(srcfile)
fullname = name.lstrip('/').replace('/', '.')
self._add_to_contents(fullname, filehash, '', path, 'file')
objpath = self._object_path(filehash)
if not os.path.exists(objpath):
copyfile(srcfile, objpath)
def get_contents(self):
"""
Returns a dictionary with the contents of the package.
"""
try:
with open(self._path, 'r') as contents_file:
contents = json.load(contents_file, object_hook=decode_node)
if not isinstance(contents, RootNode):
contents = RootNode(contents.children, PackageFormat.default.value)
except IOError:
contents = RootNode(dict(), PackageFormat.default)
return contents
def clear_contents(self):
"""
Removes the package's contents file.
"""
os.remove(self._path)
def save_contents(self, contents):
"""
Saves an updated version of the package's contents.
"""
with open(self._path, 'w') as contents_file:
json.dump(contents, contents_file, default=encode_node, indent=2, sort_keys=True)
def init_contents(self, pkgformat):
# Verify the format is recognized
enumformat = PackageFormat(pkgformat)
contents = RootNode(dict(), enumformat.value)
self.save_contents(contents)
def get(self, path):
"""
Read a group or object from the store.
"""
key = path.lstrip('/')
ipath = key.split('/') if key else []
ptr = self.get_contents()
pkgformat = ptr.format
path_so_far = []
for node_name in ipath:
path_so_far += [node_name]
ptr = ptr.children.get(node_name)
if ptr is None:
raise PackageException("Key {path} Not Found in Package {owner}/{pkg}".format(
path="/".join(path_so_far),
owner=self._user,
pkg=self._package))
node = ptr
if isinstance(node, GroupNode):
return node
elif isinstance(node, TableNode):
return self._dataframe(node.hashes, pkgformat)
elif isinstance(node, FileNode):
return self.file(node.hashes)
else:
assert False, "Unhandled Node {node}".format(node=node)
def get_hash(self):
"""
Returns the hash digest of the package data.
"""
return hash_contents(self.get_contents())
def get_path(self):
"""
Returns the path to the package's contents file.
"""
return self._path
def install(self, contents, urls):
"""
Download and install a package locally.
"""
# Download individual object files and store
# in object dir. Verify individual file hashes.
# Verify global hash?
for download_hash, url in iteritems(urls):
# download and install
response = requests.get(url, stream=True)
if not response.ok:
msg = "Download {hash} failed: error {code}"
raise PackageException(msg.format(hash=download_hash, code=response.status_code))
local_filename = self._object_path(download_hash)
with open(local_filename, 'wb') as output_file:
# `requests` will automatically un-gzip the content, as long as
# the 'Content-Encoding: gzip' header is set.
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
output_file.write(chunk)
file_hash = digest_file(local_filename)
if file_hash != download_hash:
os.remove(local_filename)
raise PackageException("Mismatched hash! Expected %s, got %s." %
(download_hash, file_hash))
self.save_contents(contents)
class UploadFile(object):
"""
Helper class to manage temporary package files uploaded by push.
"""
def __init__(self, store, objhash):
self._store = store
self._hash = objhash
self._temp_file = None
def __enter__(self):
self._temp_file = tempfile.TemporaryFile()
with open(self._store._object_path(self._hash), 'rb') as input_file:
zlib_obj = zlib.compressobj(ZLIB_LEVEL, ZLIB_METHOD, ZLIB_WBITS)
for chunk in iter(lambda: input_file.read(CHUNK_SIZE), b''):
self._temp_file.write(zlib_obj.compress(chunk))
self._temp_file.write(zlib_obj.flush())
self._temp_file.seek(0)
return self._temp_file
def __exit__(self, type, value, traceback):
self._temp_file.close()
def tempfile(self, hash):
"""
Create and return a temporary file for uploading to a registry.
"""
return self.UploadFile(self, hash)
def _object_path(self, objhash):
"""
Returns the path to an object file based on its hash.
"""
return os.path.join(self._pkg_dir, self.OBJ_DIR, objhash)
def _temporary_object_path(self, name):
"""
Returns the path to a temporary object, before we know its hash.
"""
return os.path.join(self._pkg_dir, self.TMP_OBJ_DIR, name)
def _add_to_contents(self, fullname, objhash, ext, path, target):
"""
Adds an object (name-hash mapping) to the package's contents.
"""
contents = self.get_contents()
ipath = fullname.split('.')
leaf = ipath.pop()
ptr = contents
for node in ipath:
ptr = ptr.children.setdefault(node, GroupNode(dict()))
try:
target_type = TargetType(target)
if target_type is TargetType.PANDAS:
node_cls = TableNode
elif target_type is TargetType.FILE:
node_cls = FileNode
else:
assert False, "Unhandled TargetType {tt}".format(tt=target_type)
except ValueError:
raise PackageException("Unrecognized target {tgt}".format(tgt=target))
ptr.children[leaf] = node_cls(
hashes=[objhash],
metadata=dict(
q_ext=ext,
q_path=path,
q_target=target
)
)
self.save_contents(contents)
| 1 | 14,941 | since goal is to restore old_protocol, shouldn't we get it from pandas if possible? e.g. this will break if their source code changes. | quiltdata-quilt | py |
@@ -299,7 +299,7 @@ class ErrorBaseline
"\n" .
' php-version="' .
"\n " .
- implode("\n ", explode(' 	', $matches[2])) .
+ str_replace(' 	', "\n ", $matches[2]).
"\n" .
' "' .
"\n" . | 1 | <?php
namespace Psalm;
use function array_filter;
use function array_intersect;
use function array_map;
use function array_merge;
use function array_reduce;
use function explode;
use function get_loaded_extensions;
use function implode;
use function ksort;
use const LIBXML_NOBLANKS;
use function min;
use const PHP_VERSION;
use function phpversion;
use function preg_replace_callback;
use Psalm\Internal\Analyzer\IssueData;
use Psalm\Internal\Provider\FileProvider;
use RuntimeException;
use function str_replace;
use function strpos;
use function usort;
use function count;
use function array_values;
class ErrorBaseline
{
/**
* @param array<string,array<string,array{o:int, s:array<int, string>}>> $existingIssues
*
*
* @psalm-pure
*/
public static function countTotalIssues(array $existingIssues): int
{
$totalIssues = 0;
foreach ($existingIssues as $existingIssue) {
$totalIssues += array_reduce(
$existingIssue,
/**
* @param array{o:int, s:array<int, string>} $existingIssue
*/
function (int $carry, array $existingIssue): int {
return $carry + $existingIssue['o'];
},
0
);
}
return $totalIssues;
}
/**
* @param array<string, list<IssueData>> $issues
*
*/
public static function create(
FileProvider $fileProvider,
string $baselineFile,
array $issues,
bool $include_php_versions
): void {
$groupedIssues = self::countIssueTypesByFile($issues);
self::writeToFile($fileProvider, $baselineFile, $groupedIssues, $include_php_versions);
}
/**
* @return array<string,array<string,array{o:int, s:array<int, string>}>>
*
* @throws Exception\ConfigException
*/
public static function read(FileProvider $fileProvider, string $baselineFile): array
{
if (!$fileProvider->fileExists($baselineFile)) {
throw new Exception\ConfigException("{$baselineFile} does not exist or is not readable");
}
$xmlSource = $fileProvider->getContents($baselineFile);
$baselineDoc = new \DOMDocument();
$baselineDoc->loadXML($xmlSource, LIBXML_NOBLANKS);
/** @var \DOMNodeList $filesElement */
$filesElement = $baselineDoc->getElementsByTagName('files');
if ($filesElement->length === 0) {
throw new Exception\ConfigException('Baseline file does not contain <files>');
}
$files = [];
/** @var \DOMElement $filesElement */
$filesElement = $filesElement[0];
foreach ($filesElement->getElementsByTagName('file') as $file) {
$fileName = $file->getAttribute('src');
$fileName = str_replace('\\', '/', $fileName);
$files[$fileName] = [];
foreach ($file->childNodes as $issue) {
if (!$issue instanceof \DOMElement) {
continue;
}
$issueType = $issue->tagName;
$files[$fileName][$issueType] = [
'o' => (int)$issue->getAttribute('occurrences'),
's' => [],
];
$codeSamples = $issue->getElementsByTagName('code');
foreach ($codeSamples as $codeSample) {
$files[$fileName][$issueType]['s'][] = $codeSample->textContent;
}
}
}
return $files;
}
/**
* @param array<string, list<IssueData>> $issues
*
* @return array<string,array<string,array{o:int, s:array<int, string>}>>
*
* @throws Exception\ConfigException
*/
public static function update(
FileProvider $fileProvider,
string $baselineFile,
array $issues,
bool $include_php_versions
): array {
$existingIssues = self::read($fileProvider, $baselineFile);
$newIssues = self::countIssueTypesByFile($issues);
foreach ($existingIssues as $file => &$existingIssuesCount) {
if (!isset($newIssues[$file])) {
unset($existingIssues[$file]);
continue;
}
foreach ($existingIssuesCount as $issueType => $existingIssueType) {
if (!isset($newIssues[$file][$issueType])) {
unset($existingIssuesCount[$issueType]);
continue;
}
$existingIssuesCount[$issueType]['o'] = min(
$existingIssueType['o'],
$newIssues[$file][$issueType]['o']
);
$existingIssuesCount[$issueType]['s'] = array_intersect(
$existingIssueType['s'],
$newIssues[$file][$issueType]['s']
);
}
}
$groupedIssues = array_filter($existingIssues);
self::writeToFile($fileProvider, $baselineFile, $groupedIssues, $include_php_versions);
return $groupedIssues;
}
/**
* @param array<string, list<IssueData>> $issues
*
* @return array<string,array<string,array{o:int, s:array<int, string>}>>
*/
private static function countIssueTypesByFile(array $issues): array
{
if ($issues === []) {
return [];
}
$groupedIssues = array_reduce(
array_merge(...array_values($issues)),
/**
* @param array<string,array<string,array{o:int, s:array<int, string>}>> $carry
*
* @return array<string,array<string,array{o:int, s:array<int, string>}>>
*/
function (array $carry, IssueData $issue): array {
if ($issue->severity !== Config::REPORT_ERROR) {
return $carry;
}
$fileName = $issue->file_name;
$fileName = str_replace('\\', '/', $fileName);
$issueType = $issue->type;
if (!isset($carry[$fileName])) {
$carry[$fileName] = [];
}
if (!isset($carry[$fileName][$issueType])) {
$carry[$fileName][$issueType] = ['o' => 0, 's' => []];
}
++$carry[$fileName][$issueType]['o'];
if (!strpos($issue->selected_text, "\n")) {
$carry[$fileName][$issueType]['s'][] = $issue->selected_text;
}
return $carry;
},
[]
);
// Sort files first
ksort($groupedIssues);
foreach ($groupedIssues as &$issues) {
ksort($issues);
}
return $groupedIssues;
}
/**
* @param array<string,array<string,array{o:int, s:array<int, string>}>> $groupedIssues
*
*/
private static function writeToFile(
FileProvider $fileProvider,
string $baselineFile,
array $groupedIssues,
bool $include_php_versions
): void {
$baselineDoc = new \DOMDocument('1.0', 'UTF-8');
$filesNode = $baselineDoc->createElement('files');
$filesNode->setAttribute('psalm-version', PSALM_VERSION);
if ($include_php_versions) {
$extensions = array_merge(get_loaded_extensions(), get_loaded_extensions(true));
usort($extensions, 'strnatcasecmp');
$filesNode->setAttribute('php-version', implode(';' . "\n\t", array_merge(
[
('php:' . PHP_VERSION),
],
array_map(
function (string $extension) : string {
return $extension . ':' . phpversion($extension);
},
$extensions
)
)));
}
foreach ($groupedIssues as $file => $issueTypes) {
$fileNode = $baselineDoc->createElement('file');
$fileNode->setAttribute('src', $file);
foreach ($issueTypes as $issueType => $existingIssueType) {
$issueNode = $baselineDoc->createElement($issueType);
$issueNode->setAttribute('occurrences', (string)$existingIssueType['o']);
\sort($existingIssueType['s']);
foreach ($existingIssueType['s'] as $selection) {
$codeNode = $baselineDoc->createElement('code');
$codeNode->textContent = $selection;
$issueNode->appendChild($codeNode);
}
$fileNode->appendChild($issueNode);
}
$filesNode->appendChild($fileNode);
}
$baselineDoc->appendChild($filesNode);
$baselineDoc->formatOutput = true;
$xml = preg_replace_callback(
'/<files (psalm-version="[^"]+") (?:php-version="(.+)"(\/?>)\n)/',
/**
* @param array<int, string> $matches
*/
function (array $matches) : string {
return
'<files' .
"\n " .
$matches[1] .
"\n" .
' php-version="' .
"\n " .
implode("\n ", explode(' 	', $matches[2])) .
"\n" .
' "' .
"\n" .
$matches[3] .
"\n";
},
$baselineDoc->saveXML()
);
if ($xml === null) {
throw new RuntimeException('Failed to reformat opening attributes!');
}
$fileProvider->setContents($baselineFile, $xml);
}
}
| 1 | 9,155 | This should consume less resources because str_replace don't need to assign arrays with all the values | vimeo-psalm | php |
@@ -240,6 +240,10 @@ static bool check_arg_types(pass_opt_t* opt, ast_t* params, ast_t* positional,
{
errorframe_t frame = NULL;
ast_error_frame(&frame, arg, "argument not a subtype of parameter");
+ ast_error_frame(&frame, arg, "argument type is %s",
+ ast_print_type(a_type));
+ ast_error_frame(&frame, arg, "parameter type is %s",
+ ast_print_type(p_type));
errorframe_append(&frame, &info);
if (ast_childcount(arg) > 1) | 1 | #include "call.h"
#include "postfix.h"
#include "control.h"
#include "literal.h"
#include "reference.h"
#include "../ast/astbuild.h"
#include "../pkg/package.h"
#include "../pass/expr.h"
#include "../pass/sugar.h"
#include "../type/alias.h"
#include "../type/assemble.h"
#include "../type/lookup.h"
#include "../type/reify.h"
#include "../type/safeto.h"
#include "../type/sanitise.h"
#include "../type/subtype.h"
#include "../type/viewpoint.h"
#include "ponyassert.h"
static bool insert_apply(pass_opt_t* opt, ast_t** astp)
{
// Sugar .apply()
ast_t* ast = *astp;
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
ast_t* dot = ast_from(ast, TK_DOT);
ast_add(dot, ast_from_string(ast, "apply"));
ast_swap(lhs, dot);
ast_add(dot, lhs);
if(!expr_dot(opt, &dot))
return false;
return expr_call(opt, astp);
}
bool method_check_type_params(pass_opt_t* opt, ast_t** astp)
{
ast_t* lhs = *astp;
ast_t* type = ast_type(lhs);
if(is_typecheck_error(type))
return false;
ast_t* typeparams = ast_childidx(type, 1);
pony_assert(ast_id(type) == TK_FUNTYPE);
if(ast_id(typeparams) == TK_NONE)
return true;
BUILD(typeargs, ast_parent(lhs), NODE(TK_TYPEARGS));
if(!reify_defaults(typeparams, typeargs, true, opt))
{
ast_free_unattached(typeargs);
return false;
}
if(!check_constraints(lhs, typeparams, typeargs, true, opt))
{
ast_free_unattached(typeargs);
return false;
}
type = reify(type, typeparams, typeargs, opt, true);
typeparams = ast_childidx(type, 1);
ast_replace(&typeparams, ast_from(typeparams, TK_NONE));
REPLACE(astp, NODE(ast_id(lhs), TREE(lhs) TREE(typeargs)));
ast_settype(*astp, type);
return true;
}
static bool extend_positional_args(pass_opt_t* opt, ast_t* params,
ast_t* positional)
{
// Fill out the positional args to be as long as the param list.
size_t param_len = ast_childcount(params);
size_t arg_len = ast_childcount(positional);
if(arg_len > param_len)
{
ast_error(opt->check.errors, positional, "too many arguments");
ast_error_continue(opt->check.errors, params, "definition is here");
return false;
}
while(arg_len < param_len)
{
ast_setid(positional, TK_POSITIONALARGS);
ast_append(positional, ast_from(positional, TK_NONE));
arg_len++;
}
return true;
}
static bool apply_named_args(pass_opt_t* opt, ast_t* params, ast_t* positional,
ast_t* namedargs)
{
ast_t* namedarg = ast_pop(namedargs);
while(namedarg != NULL)
{
AST_GET_CHILDREN(namedarg, arg_id, arg);
ast_t* param = ast_child(params);
size_t param_index = 0;
while(param != NULL)
{
AST_GET_CHILDREN(param, param_id);
if(ast_name(arg_id) == ast_name(param_id))
break;
param = ast_sibling(param);
param_index++;
}
if(param == NULL)
{
if(ast_id(namedarg) == TK_UPDATEARG)
{
ast_error(opt->check.errors, arg_id,
"cannot use sugar, update() has no parameter named \"value\"");
return false;
}
ast_error(opt->check.errors, arg_id, "not a parameter name");
return false;
}
ast_t* arg_replace = ast_childidx(positional, param_index);
if(ast_id(arg_replace) != TK_NONE)
{
ast_error(opt->check.errors, arg_id,
"named argument is already supplied");
ast_error_continue(opt->check.errors, arg_replace,
"supplied argument is here");
return false;
}
// Extract named argument expression to avoid copying it
ast_free(ast_pop(namedarg)); // ID
arg = ast_pop(namedarg); // Expression
ast_replace(&arg_replace, arg);
namedarg = ast_pop(namedargs);
}
ast_setid(namedargs, TK_NONE);
return true;
}
static bool apply_default_arg(pass_opt_t* opt, ast_t* param, ast_t** argp)
{
// Pick up a default argument.
AST_GET_CHILDREN(param, id, type, def_arg);
if(ast_id(def_arg) == TK_NONE)
{
ast_error(opt->check.errors, *argp, "not enough arguments");
ast_error_continue(opt->check.errors, param, "definition is here");
return false;
}
pony_assert(ast_id(def_arg) == TK_SEQ);
if(ast_id(ast_child(def_arg)) == TK_LOCATION)
{
// Default argument is __loc. Expand call location.
ast_t* arg = *argp;
ast_t* location = expand_location(arg);
ast_add(arg, location);
ast_setid(arg, TK_SEQ);
if(!ast_passes_subtree(&location, opt, PASS_EXPR))
return false;
}
else
{
// Just use default argument.
ast_replace(argp, def_arg);
}
if(!ast_passes_subtree(argp, opt, PASS_EXPR))
return false;
return true;
}
static bool check_arg_types(pass_opt_t* opt, ast_t* params, ast_t* positional,
bool partial)
{
// Check positional args vs params.
ast_t* param = ast_child(params);
ast_t* arg = ast_child(positional);
while(arg != NULL)
{
if(ast_id(arg) == TK_NONE)
{
if(partial)
{
// Don't check missing arguments for partial application.
arg = ast_sibling(arg);
param = ast_sibling(param);
continue;
} else {
// Pick up a default argument if we can.
if(!apply_default_arg(opt, param, &arg))
return false;
}
}
ast_t* p_type = ast_childidx(param, 1);
if(!coerce_literals(&arg, p_type, opt))
return false;
ast_t* arg_type = ast_type(arg);
if(is_typecheck_error(arg_type))
return false;
if(ast_checkflag(arg, AST_FLAG_JUMPS_AWAY))
{
ast_error(opt->check.errors, arg,
"can't use a control expression in an argument");
return false;
}
ast_t* a_type = alias(arg_type);
errorframe_t info = NULL;
if(!is_subtype(a_type, p_type, &info, opt))
{
errorframe_t frame = NULL;
ast_error_frame(&frame, arg, "argument not a subtype of parameter");
errorframe_append(&frame, &info);
if (ast_childcount(arg) > 1)
ast_error_frame(&frame, arg,
"note that arguments must be separated by a comma");
if(ast_checkflag(ast_type(arg), AST_FLAG_INCOMPLETE))
ast_error_frame(&frame, arg,
"this might be possible if all fields were already defined");
errorframe_report(&frame, opt->check.errors);
ast_free_unattached(a_type);
return false;
}
ast_free_unattached(a_type);
arg = ast_sibling(arg);
param = ast_sibling(param);
}
return true;
}
static bool auto_recover_call(ast_t* ast, ast_t* receiver_type,
ast_t* positional, ast_t* result)
{
switch(ast_id(ast))
{
case TK_FUNREF:
case TK_FUNAPP:
case TK_FUNCHAIN:
break;
default:
pony_assert(0);
break;
}
// We can recover the receiver (ie not alias the receiver type) if all
// arguments are safe and the result is either safe or unused.
// The result of a chained method is always unused.
ast_t* call = ast_parent(ast);
if(is_result_needed(call) && !safe_to_autorecover(receiver_type, result))
return false;
ast_t* arg = ast_child(positional);
while(arg != NULL)
{
if(ast_id(arg) != TK_NONE)
{
ast_t* arg_type = ast_type(arg);
if(is_typecheck_error(arg_type))
return false;
ast_t* a_type = alias(arg_type);
bool ok = safe_to_autorecover(receiver_type, a_type);
ast_free_unattached(a_type);
if(!ok)
return false;
}
arg = ast_sibling(arg);
}
return true;
}
static ast_t* method_receiver(ast_t* method)
{
ast_t* receiver = ast_child(method);
// Dig through function qualification.
if((ast_id(receiver) == TK_FUNREF) || (ast_id(receiver) == TK_FUNAPP) ||
(ast_id(receiver) == TK_FUNCHAIN))
receiver = ast_child(receiver);
return receiver;
}
static ast_t* method_receiver_type(ast_t* method)
{
ast_t* receiver = ast_child(method);
// Dig through function qualification.
if((ast_id(receiver) == TK_FUNREF) || (ast_id(receiver) == TK_FUNAPP) ||
(ast_id(receiver) == TK_FUNCHAIN))
receiver = ast_child(receiver);
ast_t* r_type = ast_type(receiver);
return r_type;
}
static bool check_receiver_cap(pass_opt_t* opt, ast_t* ast, bool* recovered)
{
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
ast_t* type = ast_type(lhs);
if(is_typecheck_error(type))
return false;
AST_GET_CHILDREN(type, cap, typeparams, params, result);
// Receiver type, alias of receiver type, and target type.
ast_t* r_type = method_receiver_type(lhs);
if(is_typecheck_error(r_type))
return false;
ast_t* t_type = set_cap_and_ephemeral(r_type, ast_id(cap), TK_NONE);
ast_t* a_type;
// If we can recover the receiver, we don't alias it here.
bool can_recover = auto_recover_call(lhs, r_type, positional, result);
bool cap_recover = false;
switch(ast_id(cap))
{
case TK_ISO:
case TK_TRN:
case TK_VAL:
case TK_TAG:
break;
case TK_REF:
case TK_BOX:
cap_recover = true;
break;
default:
pony_assert(0);
}
if(can_recover && cap_recover)
{
a_type = r_type;
if(recovered != NULL)
*recovered = true;
}
else
{
a_type = alias(r_type);
if(recovered != NULL)
*recovered = false;
}
errorframe_t info = NULL;
bool ok = is_subtype(a_type, t_type, &info, opt);
if(!ok)
{
errorframe_t frame = NULL;
ast_error_frame(&frame, ast,
"receiver type is not a subtype of target type");
ast_error_frame(&frame, ast_child(lhs),
"receiver type: %s", ast_print_type(a_type));
ast_error_frame(&frame, cap,
"target type: %s", ast_print_type(t_type));
errorframe_append(&frame, &info);
if(ast_checkflag(ast_type(method_receiver(lhs)), AST_FLAG_INCOMPLETE))
ast_error_frame(&frame, method_receiver(lhs),
"this might be possible if all fields were already defined");
if(!can_recover && cap_recover && is_subtype(r_type, t_type, NULL, opt))
{
ast_error_frame(&frame, ast,
"this would be possible if the arguments and return value "
"were all sendable");
}
errorframe_report(&frame, opt->check.errors);
}
if(a_type != r_type)
ast_free_unattached(a_type);
ast_free_unattached(r_type);
ast_free_unattached(t_type);
return ok;
}
static bool is_receiver_safe(typecheck_t* t, ast_t* ast)
{
switch(ast_id(ast))
{
case TK_THIS:
case TK_FLETREF:
case TK_FVARREF:
case TK_EMBEDREF:
case TK_PARAMREF:
case TK_TUPLEELEMREF:
{
ast_t* type = ast_type(ast);
return sendable(type);
}
case TK_LETREF:
case TK_VARREF:
{
ast_t* def = (ast_t*)ast_data(ast);
pony_assert(def != NULL);
ast_t* def_recover = ast_nearest(def, TK_RECOVER);
if(t->frame->recover == def_recover)
return true;
ast_t* type = ast_type(ast);
return sendable(type);
}
default:
// Unsafe receivers inside expressions are catched before we get there.
return true;
}
}
static bool check_nonsendable_recover(pass_opt_t* opt, ast_t* ast)
{
if(opt->check.frame->recover != NULL)
{
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
ast_t* type = ast_type(lhs);
AST_GET_CHILDREN(type, cap, typeparams, params, result);
// If the method is tag, the call is always safe.
if(ast_id(cap) == TK_TAG)
return true;
ast_t* receiver = ast_child(lhs);
// Dig through function qualification.
if((ast_id(receiver) == TK_FUNREF) || (ast_id(receiver) == TK_FUNAPP) ||
(ast_id(receiver) == TK_FUNCHAIN))
receiver = ast_child(receiver);
if(!is_receiver_safe(&opt->check, receiver))
{
ast_t* arg = ast_child(positional);
bool args_sendable = true;
while(arg != NULL)
{
if(ast_id(arg) != TK_NONE)
{
// Don't typecheck arg_type, this was already done in
// auto_recover_call.
ast_t* arg_type = ast_type(arg);
if(!sendable(arg_type))
{
args_sendable = false;
break;
}
}
arg = ast_sibling(arg);
}
if(!args_sendable || !sendable(result))
{
ast_error(opt->check.errors, ast, "can't call method on non-sendable "
"object inside of a recover expression");
ast_error_continue(opt->check.errors, ast, "this would be possible if "
"the arguments and return value were all sendable");
return false;
}
}
}
return true;
}
static bool method_application(pass_opt_t* opt, ast_t* ast, bool partial)
{
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
if(!method_check_type_params(opt, &lhs))
return false;
ast_t* type = ast_type(lhs);
if(is_typecheck_error(type))
return false;
AST_GET_CHILDREN(type, cap, typeparams, params, result);
if(!extend_positional_args(opt, params, positional))
return false;
if(!apply_named_args(opt, params, positional, namedargs))
return false;
if(!check_arg_types(opt, params, positional, partial))
return false;
switch(ast_id(lhs))
{
case TK_FUNREF:
case TK_FUNAPP:
if(ast_id(ast_child(type)) != TK_AT)
{
if(!check_receiver_cap(opt, ast, NULL))
return false;
if(!check_nonsendable_recover(opt, ast))
return false;
} else {
ast_t* receiver = ast_child(lhs);
// Dig through function qualification.
if((ast_id(receiver) == TK_FUNREF) || (ast_id(receiver) == TK_FUNAPP) ||
(ast_id(receiver) == TK_FUNCHAIN))
receiver = ast_child(receiver);
ast_t* recv_type = ast_type(receiver);
if(!is_known(recv_type) && (ast_id(receiver) == TK_TYPEREF))
{
ast_error(opt->check.errors, lhs, "a bare method cannot be called on "
"an abstract type reference");
return false;
}
}
break;
default: {}
}
return true;
}
static bool method_call(pass_opt_t* opt, ast_t* ast)
{
if(!method_application(opt, ast, false))
return false;
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
ast_t* type = ast_type(lhs);
if(is_typecheck_error(type))
return false;
AST_GET_CHILDREN(type, cap, typeparams, params, result);
ast_settype(ast, result);
return true;
}
static token_id partial_application_cap(pass_opt_t* opt, ast_t* ftype,
ast_t* receiver, ast_t* positional)
{
// Check if the apply method in the generated object literal can accept a box
// receiver. If not, it must be a ref receiver. It can accept a box receiver
// if box->receiver <: lhs->receiver and box->arg <: lhs->param.
AST_GET_CHILDREN(ftype, cap, typeparams, params, result);
ast_t* type = ast_type(receiver);
ast_t* view_type = viewpoint_type(ast_from(type, TK_BOX), type);
ast_t* need_type = set_cap_and_ephemeral(type, ast_id(cap), TK_NONE);
bool ok = is_subtype(view_type, need_type, NULL, opt);
ast_free_unattached(view_type);
ast_free_unattached(need_type);
if(!ok)
return TK_REF;
ast_t* param = ast_child(params);
ast_t* arg = ast_child(positional);
while(arg != NULL)
{
if(ast_id(arg) != TK_NONE)
{
type = ast_type(arg);
view_type = viewpoint_type(ast_from(type, TK_BOX), type);
need_type = ast_childidx(param, 1);
ok = is_subtype(view_type, need_type, NULL, opt);
ast_free_unattached(view_type);
ast_free_unattached(need_type);
if(!ok)
return TK_REF;
}
arg = ast_sibling(arg);
param = ast_sibling(param);
}
return TK_BOX;
}
// Sugar for partial application, which we convert to a lambda.
static bool partial_application(pass_opt_t* opt, ast_t** astp)
{
/* Example that we refer to throughout this function.
* ```pony
* class C
* fun f[T](a: A, b: B = b_default): R
*
* let recv: T = ...
* recv~f[T2](foo)
* ```
*
* Partial call is converted to:
* ```pony
* {(b: B = b_default)($0 = recv, a = foo): R => $0.f[T2](a, consume b) }
* ```
*/
ast_t* ast = *astp;
typecheck_t* t = &opt->check;
if(!method_application(opt, ast, true))
return false;
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
// LHS must be an application, possibly wrapped in another application
// if the method had type parameters for qualification.
pony_assert(ast_id(lhs) == TK_FUNAPP || ast_id(lhs) == TK_BEAPP ||
ast_id(lhs) == TK_NEWAPP);
AST_GET_CHILDREN(lhs, receiver, method);
ast_t* type_args = NULL;
if(ast_id(receiver) == ast_id(lhs))
{
type_args = method;
AST_GET_CHILDREN_NO_DECL(receiver, receiver, method);
}
// Look up the original method definition for this method call.
deferred_reification_t* method_def = lookup(opt, lhs, ast_type(receiver),
ast_name(method));
ast_t* method_ast = method_def->ast;
// The deferred reification doesn't own the underlying AST so we can free it
// safely.
deferred_reify_free(method_def);
pony_assert(ast_id(method_ast) == TK_FUN || ast_id(method_ast) == TK_BE ||
ast_id(method_ast) == TK_NEW);
// The TK_FUNTYPE of the LHS.
ast_t* type = ast_type(lhs);
pony_assert(ast_id(type) == TK_FUNTYPE);
if(is_typecheck_error(type))
return false;
AST_GET_CHILDREN(type, cap, type_params, target_params, result);
bool bare = ast_id(cap) == TK_AT;
token_id apply_cap = TK_AT;
if(!bare)
apply_cap = partial_application_cap(opt, type, receiver, positional);
token_id can_error = ast_id(ast_childidx(method_ast, 5));
const char* recv_name = package_hygienic_id(t);
// Build lambda expression.
ast_t* call_receiver = NULL;
if(bare)
{
ast_t* arg = ast_child(positional);
while(arg != NULL)
{
if(ast_id(arg) != TK_NONE)
{
ast_error(opt->check.errors, arg, "the partial application of a bare "
"method cannot take arguments");
return false;
}
arg = ast_sibling(arg);
}
ast_t* receiver_type = ast_type(receiver);
if(is_bare(receiver_type))
{
// Partial application on a bare object, simply return the object itself.
ast_replace(astp, receiver);
return true;
}
AST_GET_CHILDREN(receiver_type, recv_type_package, recv_type_name);
const char* recv_package_str = ast_name(recv_type_package);
const char* recv_name_str = ast_name(recv_type_name);
ast_t* module = ast_nearest(ast, TK_MODULE);
ast_t* package = ast_parent(module);
ast_t* pkg_id = package_id(package);
const char* pkg_str = ast_name(pkg_id);
const char* pkg_alias = NULL;
if(recv_package_str != pkg_str)
pkg_alias = package_alias_from_id(module, recv_package_str);
ast_free_unattached(pkg_id);
if(pkg_alias != NULL)
{
// `package.Type.f`
BUILD_NO_DECL(call_receiver, ast,
NODE(TK_DOT,
NODE(TK_DOT,
NODE(TK_REFERENCE, ID(pkg_alias))
ID(recv_name_str))
TREE(method)));
} else {
// `Type.f`
BUILD_NO_DECL(call_receiver, ast,
NODE(TK_DOT,
NODE(TK_REFERENCE, ID(recv_name_str))
TREE(method)));
}
} else {
// `$0.f`
BUILD_NO_DECL(call_receiver, ast,
NODE(TK_DOT,
NODE(TK_REFERENCE, ID(recv_name))
TREE(method)));
}
ast_t* captures = NULL;
if(bare)
{
captures = ast_from(receiver, TK_NONE);
} else {
// Build captures. We always have at least one capture, for receiver.
// Capture: `$0 = recv`
BUILD_NO_DECL(captures, receiver,
NODE(TK_LAMBDACAPTURES,
NODE(TK_LAMBDACAPTURE,
ID(recv_name)
NONE // Infer type.
TREE(receiver))));
}
// Process arguments.
ast_t* target_param = ast_child(target_params);
ast_t* lambda_params = ast_from(target_params, TK_NONE);
ast_t* lambda_call_args = ast_from(positional, TK_NONE);
ast_t* given_arg = ast_child(positional);
while(given_arg != NULL)
{
pony_assert(target_param != NULL);
const char* target_p_name = ast_name(ast_child(target_param));
if(ast_id(given_arg) == TK_NONE)
{
// This argument is not supplied already, must be a lambda parameter.
// Like `b` in example above.
// Build a new a new TK_PARAM node rather than copying the target one,
// since the target has already been processed to expr pass, and we need
// a clean one.
AST_GET_CHILDREN(target_param, p_id, p_type, p_default);
// Parameter: `b: B = b_default`
BUILD(lambda_param, target_param,
NODE(TK_PARAM,
TREE(p_id)
TREE(sanitise_type(p_type))
TREE(p_default)));
ast_append(lambda_params, lambda_param);
ast_setid(lambda_params, TK_PARAMS);
// Argument: `consume b`
BUILD(target_arg, lambda_param,
NODE(TK_SEQ,
NODE(TK_CONSUME,
NONE
NODE(TK_REFERENCE, ID(target_p_name)))));
ast_append(lambda_call_args, target_arg);
ast_setid(lambda_call_args, TK_POSITIONALARGS);
}
else
{
// This argument is supplied to the partial, capture it.
// Like `a` in example above.
// Capture: `a = foo`
BUILD(capture, given_arg,
NODE(TK_LAMBDACAPTURE,
ID(target_p_name)
NONE
TREE(given_arg)));
ast_append(captures, capture);
// Argument: `a`
BUILD(target_arg, given_arg,
NODE(TK_SEQ,
NODE(TK_REFERENCE, ID(target_p_name))));
ast_append(lambda_call_args, target_arg);
ast_setid(lambda_call_args, TK_POSITIONALARGS);
}
given_arg = ast_sibling(given_arg);
target_param = ast_sibling(target_param);
}
pony_assert(target_param == NULL);
if(type_args != NULL)
{
// The partial call has type args, add them to the actual call in apply().
// `$0.f[T2]`
BUILD(qualified, type_args,
NODE(TK_QUALIFY,
TREE(call_receiver)
TREE(type_args)));
call_receiver = qualified;
}
REPLACE(astp,
NODE((bare ? TK_BARELAMBDA : TK_LAMBDA),
NODE(apply_cap)
NONE // Lambda function name.
NONE // Lambda type params.
TREE(lambda_params)
TREE(captures)
TREE(sanitise_type(result))
NODE(can_error)
NODE(TK_SEQ,
NODE(TK_CALL,
TREE(call_receiver)
TREE(lambda_call_args)
NONE // Named args.
NODE(can_error)))
NONE)); // Lambda reference capability.
// Need to preserve various lambda children.
ast_setflag(ast_childidx(*astp, 2), AST_FLAG_PRESERVE); // Type params.
ast_setflag(ast_childidx(*astp, 3), AST_FLAG_PRESERVE); // Parameters.
ast_setflag(ast_childidx(*astp, 5), AST_FLAG_PRESERVE); // Return type.
ast_setflag(ast_childidx(*astp, 7), AST_FLAG_PRESERVE); // Body.
// Catch up to this pass.
return ast_passes_subtree(astp, opt, PASS_EXPR);
}
static bool method_chain(pass_opt_t* opt, ast_t* ast)
{
if(!method_application(opt, ast, false))
return false;
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
ast_t* type = ast_type(lhs);
if(ast_id(ast_child(type)) == TK_AT)
{
ast_error(opt->check.errors, ast, "a bare method cannot be chained");
return false;
}
// We check the receiver cap now instead of in method_application because
// we need to know whether the receiver was recovered.
ast_t* r_type = method_receiver_type(lhs);
if(ast_id(lhs) == TK_FUNCHAIN)
{
bool recovered;
if(!check_receiver_cap(opt, ast, &recovered))
return false;
if(!check_nonsendable_recover(opt, ast))
return false;
ast_t* f_type = ast_type(lhs);
token_id f_cap = ast_id(ast_child(f_type));
ast_t* c_type = chain_type(r_type, f_cap, recovered);
ast_settype(ast, c_type);
} else {
ast_settype(ast, r_type);
}
return true;
}
bool expr_call(pass_opt_t* opt, ast_t** astp)
{
ast_t* ast = *astp;
if(!literal_call(ast, opt))
return false;
// Type already set by literal handler. Check for infertype, which is a
// marker for typechecking default arguments.
ast_t* type = ast_type(ast);
if((type != NULL) && (ast_id(type) != TK_INFERTYPE))
return true;
AST_GET_CHILDREN(ast, lhs, positional, namedargs, question);
switch(ast_id(lhs))
{
case TK_NEWREF:
case TK_NEWBEREF:
case TK_BEREF:
case TK_FUNREF:
return method_call(opt, ast);
case TK_NEWAPP:
case TK_BEAPP:
case TK_FUNAPP:
return partial_application(opt, astp);
case TK_BECHAIN:
case TK_FUNCHAIN:
return method_chain(opt, ast);
default: {}
}
return insert_apply(opt, astp);
}
| 1 | 12,093 | It would be better to pass `param` as the second argument here, so that the parameter declaration is printed alongside the type. Same thing for the changes in `ffi.c` and `lambda.c`. | ponylang-ponyc | c |
@@ -27,6 +27,7 @@ import org.joda.time.DateTime;
import azkaban.utils.JSONUtils;
+
public class Trigger {
private static Logger logger = Logger.getLogger(Trigger.class); | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import azkaban.utils.JSONUtils;
public class Trigger {
private static Logger logger = Logger.getLogger(Trigger.class);
private int triggerId = -1;
private long lastModifyTime;
private long submitTime;
private String submitUser;
private String source;
private TriggerStatus status = TriggerStatus.READY;
private Condition triggerCondition;
private Condition expireCondition;
private List<TriggerAction> actions;
private List<TriggerAction> expireActions;
private Map<String, Object> info = new HashMap<String, Object>();
private Map<String, Object> context = new HashMap<String, Object>();
private static ActionTypeLoader actionTypeLoader;
private boolean resetOnTrigger = true;
private boolean resetOnExpire = true;
private long nextCheckTime = -1;
@SuppressWarnings("unused")
private Trigger() throws TriggerManagerException {
throw new TriggerManagerException("Triggers should always be specified");
}
public void updateNextCheckTime() {
this.nextCheckTime =
Math.min(triggerCondition.getNextCheckTime(),
expireCondition.getNextCheckTime());
}
public long getNextCheckTime() {
return nextCheckTime;
}
public void setNextCheckTime(long nct) {
this.nextCheckTime = nct;
}
public long getSubmitTime() {
return submitTime;
}
public String getSubmitUser() {
return submitUser;
}
public TriggerStatus getStatus() {
return status;
}
public void setStatus(TriggerStatus status) {
this.status = status;
}
public Condition getTriggerCondition() {
return triggerCondition;
}
public Condition getExpireCondition() {
return expireCondition;
}
public List<TriggerAction> getActions() {
return actions;
}
public List<TriggerAction> getExpireActions() {
return expireActions;
}
public Map<String, Object> getInfo() {
return info;
}
public void setInfo(Map<String, Object> info) {
this.info = info;
}
public Map<String, Object> getContext() {
return context;
}
public void setContext(Map<String, Object> context) {
this.context = context;
}
public Trigger(long lastModifyTime, long submitTime, String submitUser,
String source, Condition triggerCondition, Condition expireCondition,
List<TriggerAction> actions, List<TriggerAction> expireActions,
Map<String, Object> info, Map<String, Object> context) {
this.lastModifyTime = lastModifyTime;
this.submitTime = submitTime;
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = expireActions;
this.info = info;
this.context = context;
}
public Trigger(long lastModifyTime, long submitTime, String submitUser,
String source, Condition triggerCondition, Condition expireCondition,
List<TriggerAction> actions, List<TriggerAction> expireActions) {
this.lastModifyTime = lastModifyTime;
this.submitTime = submitTime;
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = expireActions;
}
public Trigger(String submitUser, String source, Condition triggerCondition,
Condition expireCondition, List<TriggerAction> actions,
List<TriggerAction> expireActions) {
this.lastModifyTime = DateTime.now().getMillis();
this.submitTime = DateTime.now().getMillis();
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = expireActions;
}
public Trigger(String submitUser, String source, Condition triggerCondition,
Condition expireCondition, List<TriggerAction> actions) {
this.lastModifyTime = DateTime.now().getMillis();
this.submitTime = DateTime.now().getMillis();
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = new ArrayList<TriggerAction>();
}
public Trigger(long lastModifyTime, long submitTime, String submitUser,
String source, Condition triggerCondition, Condition expireCondition,
List<TriggerAction> actions) {
this.lastModifyTime = lastModifyTime;
this.submitTime = submitTime;
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = new ArrayList<TriggerAction>();
}
public Trigger(int triggerId, long lastModifyTime, long submitTime,
String submitUser, String source, Condition triggerCondition,
Condition expireCondition, List<TriggerAction> actions,
List<TriggerAction> expireActions, Map<String, Object> info,
Map<String, Object> context) {
this.triggerId = triggerId;
this.lastModifyTime = lastModifyTime;
this.submitTime = submitTime;
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = expireActions;
this.info = info;
this.context = context;
}
public Trigger(int triggerId, long lastModifyTime, long submitTime,
String submitUser, String source, Condition triggerCondition,
Condition expireCondition, List<TriggerAction> actions,
List<TriggerAction> expireActions) {
this.triggerId = triggerId;
this.lastModifyTime = lastModifyTime;
this.submitTime = submitTime;
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = expireActions;
}
public Trigger(int triggerId, long lastModifyTime, long submitTime,
String submitUser, String source, Condition triggerCondition,
Condition expireCondition, List<TriggerAction> actions) {
this.triggerId = triggerId;
this.lastModifyTime = lastModifyTime;
this.submitTime = submitTime;
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.expireActions = new ArrayList<TriggerAction>();
}
public static synchronized void setActionTypeLoader(ActionTypeLoader loader) {
Trigger.actionTypeLoader = loader;
}
public static ActionTypeLoader getActionTypeLoader() {
return actionTypeLoader;
}
public boolean isResetOnTrigger() {
return resetOnTrigger;
}
public void setResetOnTrigger(boolean resetOnTrigger) {
this.resetOnTrigger = resetOnTrigger;
}
public boolean isResetOnExpire() {
return resetOnExpire;
}
public void setResetOnExpire(boolean resetOnExpire) {
this.resetOnExpire = resetOnExpire;
}
public long getLastModifyTime() {
return lastModifyTime;
}
public void setLastModifyTime(long lastModifyTime) {
this.lastModifyTime = lastModifyTime;
}
public void setTriggerId(int id) {
this.triggerId = id;
}
public int getTriggerId() {
return triggerId;
}
public boolean triggerConditionMet() {
return triggerCondition.isMet();
}
public boolean expireConditionMet() {
return expireCondition.isMet();
}
public void resetTriggerConditions() {
triggerCondition.resetCheckers();
updateNextCheckTime();
}
public void resetExpireCondition() {
expireCondition.resetCheckers();
updateNextCheckTime();
}
public List<TriggerAction> getTriggerActions() {
return actions;
}
public Map<String, Object> toJson() {
Map<String, Object> jsonObj = new HashMap<String, Object>();
jsonObj.put("triggerCondition", triggerCondition.toJson());
jsonObj.put("expireCondition", expireCondition.toJson());
List<Object> actionsJson = new ArrayList<Object>();
for (TriggerAction action : actions) {
Map<String, Object> oneActionJson = new HashMap<String, Object>();
oneActionJson.put("type", action.getType());
oneActionJson.put("actionJson", action.toJson());
actionsJson.add(oneActionJson);
}
jsonObj.put("actions", actionsJson);
List<Object> expireActionsJson = new ArrayList<Object>();
for (TriggerAction expireAction : expireActions) {
Map<String, Object> oneExpireActionJson = new HashMap<String, Object>();
oneExpireActionJson.put("type", expireAction.getType());
oneExpireActionJson.put("actionJson", expireAction.toJson());
expireActionsJson.add(oneExpireActionJson);
}
jsonObj.put("expireActions", expireActionsJson);
jsonObj.put("resetOnTrigger", String.valueOf(resetOnTrigger));
jsonObj.put("resetOnExpire", String.valueOf(resetOnExpire));
jsonObj.put("submitUser", submitUser);
jsonObj.put("source", source);
jsonObj.put("submitTime", String.valueOf(submitTime));
jsonObj.put("lastModifyTime", String.valueOf(lastModifyTime));
jsonObj.put("triggerId", String.valueOf(triggerId));
jsonObj.put("status", status.toString());
jsonObj.put("info", info);
jsonObj.put("context", context);
return jsonObj;
}
public String getSource() {
return source;
}
@SuppressWarnings("unchecked")
public static Trigger fromJson(Object obj) throws Exception {
if (actionTypeLoader == null) {
throw new Exception("Trigger Action Type loader not initialized.");
}
Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
Trigger trigger = null;
try {
logger.info("Decoding for " + JSONUtils.toJSON(obj));
Condition triggerCond =
Condition.fromJson(jsonObj.get("triggerCondition"));
Condition expireCond = Condition.fromJson(jsonObj.get("expireCondition"));
List<TriggerAction> actions = new ArrayList<TriggerAction>();
List<Object> actionsJson = (List<Object>) jsonObj.get("actions");
for (Object actObj : actionsJson) {
Map<String, Object> oneActionJson = (HashMap<String, Object>) actObj;
String type = (String) oneActionJson.get("type");
TriggerAction act =
actionTypeLoader.createActionFromJson(type,
oneActionJson.get("actionJson"));
actions.add(act);
}
List<TriggerAction> expireActions = new ArrayList<TriggerAction>();
List<Object> expireActionsJson =
(List<Object>) jsonObj.get("expireActions");
for (Object expireActObj : expireActionsJson) {
Map<String, Object> oneExpireActionJson =
(HashMap<String, Object>) expireActObj;
String type = (String) oneExpireActionJson.get("type");
TriggerAction expireAct =
actionTypeLoader.createActionFromJson(type,
oneExpireActionJson.get("actionJson"));
expireActions.add(expireAct);
}
boolean resetOnTrigger =
Boolean.valueOf((String) jsonObj.get("resetOnTrigger"));
boolean resetOnExpire =
Boolean.valueOf((String) jsonObj.get("resetOnExpire"));
String submitUser = (String) jsonObj.get("submitUser");
String source = (String) jsonObj.get("source");
long submitTime = Long.valueOf((String) jsonObj.get("submitTime"));
long lastModifyTime =
Long.valueOf((String) jsonObj.get("lastModifyTime"));
int triggerId = Integer.valueOf((String) jsonObj.get("triggerId"));
TriggerStatus status =
TriggerStatus.valueOf((String) jsonObj.get("status"));
Map<String, Object> info = (Map<String, Object>) jsonObj.get("info");
Map<String, Object> context =
(Map<String, Object>) jsonObj.get("context");
if (context == null) {
context = new HashMap<String, Object>();
}
for (ConditionChecker checker : triggerCond.getCheckers().values()) {
checker.setContext(context);
}
for (ConditionChecker checker : expireCond.getCheckers().values()) {
checker.setContext(context);
}
for (TriggerAction action : actions) {
action.setContext(context);
}
for (TriggerAction action : expireActions) {
action.setContext(context);
}
trigger =
new Trigger(triggerId, lastModifyTime, submitTime, submitUser,
source, triggerCond, expireCond, actions, expireActions, info,
context);
trigger.setResetOnExpire(resetOnExpire);
trigger.setResetOnTrigger(resetOnTrigger);
trigger.setStatus(status);
} catch (Exception e) {
e.printStackTrace();
logger.error("Failed to decode the trigger.", e);
throw new Exception("Failed to decode the trigger.", e);
}
return trigger;
}
public String getDescription() {
StringBuffer actionsString = new StringBuffer();
for (TriggerAction act : actions) {
actionsString.append(", ");
actionsString.append(act.getDescription());
}
return "Trigger from " + getSource() + " with trigger condition of "
+ triggerCondition.getExpression() + " and expire condition of "
+ expireCondition.getExpression() + actionsString;
}
public void stopCheckers() {
for (ConditionChecker checker : triggerCondition.getCheckers().values()) {
checker.stopChecker();
}
for (ConditionChecker checker : expireCondition.getCheckers().values()) {
checker.stopChecker();
}
}
}
| 1 | 13,121 | Do we need to set the members to final like submitTime, submitUser ...? They are only used by getter method. | azkaban-azkaban | java |
@@ -13,7 +13,7 @@ class MediaLibraryItem extends SystemMediaLibraryItem
{
public function __construct()
{
- traceLog('Class Cms\Classes\MediaLibraryItem has been deprecated, use System\Classes\MediaLibraryItem instead.');
+ traceLog('Class ' . __CLASS__ . ' has been deprecated, use ' . SystemMediaLibraryItem::class . ' instead.');
parent::__construct(...func_get_args());
}
} | 1 | <?php namespace Cms\Classes;
use System\Classes\MediaLibraryItem as SystemMediaLibraryItem;
/**
* Represents a file or folder in the Media Library.
*
* @package october\cms
* @author Alexey Bobkov, Samuel Georges
* @deprecated Use System\Classes\MediaLibraryItem. Remove if year >= 2020.
*/
class MediaLibraryItem extends SystemMediaLibraryItem
{
public function __construct()
{
traceLog('Class Cms\Classes\MediaLibraryItem has been deprecated, use System\Classes\MediaLibraryItem instead.');
parent::__construct(...func_get_args());
}
}
| 1 | 12,954 | That makes this error message incorrect; if someone has extended Cms\Classes\MediaLibraryItem, then this error message will read "Class My\Classes\CustomMediaLibraryItem has been deprecated". | octobercms-october | php |
@@ -97,6 +97,6 @@ class Factory implements \Zend\ServiceManager\FactoryInterface
$config = $sm->get('VuFind\Config')->get('config');
// Create service:
- return new \VuFind\Mailer\Mailer($this->getTransport($config));
+ return new \VuFind\Mailer\Mailer($this->getTransport($config), $config);
}
} | 1 | <?php
/**
* Factory for instantiating Mailer objects
*
* PHP version 5
*
* Copyright (C) Villanova University 2009.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Mailer
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
namespace VuFind\Mailer;
use Zend\Mail\Transport\InMemory;
use Zend\Mail\Transport\Smtp;
use Zend\Mail\Transport\SmtpOptions;
use Zend\ServiceManager\ServiceLocatorInterface;
/**
* Factory for instantiating Mailer objects
*
* @category VuFind
* @package Mailer
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*
* @codeCoverageIgnore
*/
class Factory implements \Zend\ServiceManager\FactoryInterface
{
/**
* Build the mail transport object.
*
* @param \Zend\Config\Config $config Configuration
*
* @return InMemory|Smtp
*/
protected function getTransport($config)
{
// In test mode? Return fake object:
if (isset($config->Mail->testOnly) && $config->Mail->testOnly) {
return new InMemory();
}
// Create mail transport:
$settings = [
'host' => $config->Mail->host, 'port' => $config->Mail->port
];
if (isset($config->Mail->username) && isset($config->Mail->password)) {
$settings['connection_class'] = 'login';
$settings['connection_config'] = [
'username' => $config->Mail->username,
'password' => $config->Mail->password
];
if (isset($config->Mail->secure)) {
// always set user defined secure connection
$settings['connection_config']['ssl'] = $config->Mail->secure;
} else {
// set default secure connection based on configured port
if ($settings['port'] == '587') {
$settings['connection_config']['ssl'] = 'tls';
} elseif ($settings['port'] == '487') {
$settings['connection_config']['ssl'] = 'ssl';
}
}
}
return new Smtp(new SmtpOptions($settings));
}
/**
* Create service
*
* @param ServiceLocatorInterface $sm Service manager
*
* @return mixed
*/
public function createService(ServiceLocatorInterface $sm)
{
// Load configurations:
$config = $sm->get('VuFind\Config')->get('config');
// Create service:
return new \VuFind\Mailer\Mailer($this->getTransport($config));
}
}
| 1 | 26,083 | Is it worth injecting the whole configuration object for just one setting? Would it be more generic to pass an $options array with the override_from in it? I'm not completely opposed to passing $config, but since we've avoided it so far, it seems like it would be cleaner to maintain that separation. Simply adding a getOptions method to the factory would make this a lot more readable. | vufind-org-vufind | php |
@@ -44,3 +44,14 @@ const (
SDA_PIN = 26
SCL_PIN = 27
)
+
+// SPI pins
+const (
+ SPI0_SCK_PIN = 25
+ SPI0_MOSI_PIN = 23
+ SPI0_MISO_PIN = 24
+
+ SPI1_SCK_PIN = 2
+ SPI1_MOSI_PIN = 3
+ SPI1_MISO_PIN = 4
+) | 1 | // +build nrf,pca10040
package machine
// The PCA10040 has a low-frequency (32kHz) crystal oscillator on board.
const HasLowFrequencyCrystal = true
// LEDs on the PCA10040 (nRF52832 dev board)
const (
LED = LED1
LED1 = 17
LED2 = 18
LED3 = 19
LED4 = 20
)
// Buttons on the PCA10040 (nRF52832 dev board)
const (
BUTTON = BUTTON1
BUTTON1 = 13
BUTTON2 = 14
BUTTON3 = 15
BUTTON4 = 16
)
// UART pins for NRF52840-DK
const (
UART_TX_PIN = 6
UART_RX_PIN = 8
)
// ADC pins
const (
ADC0 = 3
ADC1 = 4
ADC2 = 28
ADC3 = 29
ADC4 = 30
ADC5 = 31
)
// I2C pins
const (
SDA_PIN = 26
SCL_PIN = 27
)
| 1 | 6,156 | Just curious: where do these pin numbers come from? I see you took the SPI0 pin numbers from the Arduino board layout which is good, but I can't find such a relation for `SPI1`. | tinygo-org-tinygo | go |
@@ -450,10 +450,14 @@ public final class MethodTypeResolution {
Class<?> contextClass = context.getType();
// search the class
- for (Method method : contextClass.getDeclaredMethods()) {
- if (isMethodApplicable(method, methodName, argArity, accessingClass, typeArguments)) {
- result.add(getTypeDefOfMethod(context, method, typeArguments));
+ try {
+ for (Method method : contextClass.getDeclaredMethods()) {
+ if (isMethodApplicable(method, methodName, argArity, accessingClass, typeArguments)) {
+ result.add(getTypeDefOfMethod(context, method, typeArguments));
+ }
}
+ } catch (final LinkageError ignored) {
+ // TODO : This is an incomplete classpath, report the missing class
}
// search it's supertype | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.typeresolution;
import static net.sourceforge.pmd.lang.java.typeresolution.typeinference.InferenceRuleType.LOOSE_INVOCATION;
import static net.sourceforge.pmd.lang.java.typeresolution.typeinference.InferenceRuleType.SUBTYPE;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.java.ast.ASTArgumentList;
import net.sourceforge.pmd.lang.java.ast.ASTExpression;
import net.sourceforge.pmd.lang.java.ast.ASTMemberSelector;
import net.sourceforge.pmd.lang.java.ast.ASTTypeArguments;
import net.sourceforge.pmd.lang.java.ast.TypeNode;
import net.sourceforge.pmd.lang.java.typeresolution.typedefinition.JavaTypeDefinition;
import net.sourceforge.pmd.lang.java.typeresolution.typeinference.Bound;
import net.sourceforge.pmd.lang.java.typeresolution.typeinference.Constraint;
import net.sourceforge.pmd.lang.java.typeresolution.typeinference.TypeInferenceResolver;
import net.sourceforge.pmd.lang.java.typeresolution.typeinference.TypeInferenceResolver.ResolutionFailedException;
import net.sourceforge.pmd.lang.java.typeresolution.typeinference.Variable;
public final class MethodTypeResolution {
private MethodTypeResolution() {}
private static final Logger LOG = Logger.getLogger(MethodTypeResolution.class.getName());
private static final List<Class<?>> PRIMITIVE_SUBTYPE_ORDER;
private static final List<Class<?>> BOXED_PRIMITIVE_SUBTYPE_ORDER;
static {
List<Class<?>> primitiveList = new ArrayList<>();
primitiveList.add(double.class);
primitiveList.add(float.class);
primitiveList.add(long.class);
primitiveList.add(int.class);
primitiveList.add(short.class);
primitiveList.add(byte.class);
primitiveList.add(char.class); // this is here for convenience, not really in order
PRIMITIVE_SUBTYPE_ORDER = Collections.unmodifiableList(primitiveList);
List<Class<?>> boxedList = new ArrayList<>();
boxedList.add(Double.class);
boxedList.add(Float.class);
boxedList.add(Long.class);
boxedList.add(Integer.class);
boxedList.add(Short.class);
boxedList.add(Byte.class);
boxedList.add(Character.class);
BOXED_PRIMITIVE_SUBTYPE_ORDER = Collections.unmodifiableList(boxedList);
}
public static boolean checkSubtypeability(MethodType method, MethodType subtypeableMethod) {
List<JavaTypeDefinition> subtypeableParams = subtypeableMethod.getParameterTypes();
List<JavaTypeDefinition> methodParams = method.getParameterTypes();
if (!method.getMethod().isVarArgs() && !subtypeableMethod.getMethod().isVarArgs()) {
for (int index = 0; index < subtypeableParams.size(); ++index) {
if (!isSubtypeable(methodParams.get(index), subtypeableParams.get(index))) {
return false;
}
}
} else if (method.getMethod().isVarArgs() && subtypeableMethod.getMethod().isVarArgs()) {
if (methodParams.size() < subtypeableParams.size()) {
for (int index = 0; index < subtypeableParams.size(); ++index) {
if (!isSubtypeable(method.getArgTypeIncludingVararg(index),
subtypeableMethod.getArgTypeIncludingVararg(index))) {
return false;
}
}
} else {
for (int index = 0; index < methodParams.size(); ++index) {
if (!isSubtypeable(method.getArgTypeIncludingVararg(index),
subtypeableMethod.getArgTypeIncludingVararg(index))) {
return false;
}
}
}
} else {
throw new IllegalStateException("These methods can only be vararg at the same time:\n"
+ method.toString() + "\n" + subtypeableMethod.toString());
}
return true;
}
/**
* Look for methods be subtypeability.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2.2
*/
public static List<MethodType> selectMethodsFirstPhase(JavaTypeDefinition context,
List<MethodType> methodsToSearch, ASTArgumentList argList) {
// TODO: check if explicit type arguments are applicable to the type parameter bounds
List<MethodType> selectedMethods = new ArrayList<>();
outter:
for (int methodIndex = 0; methodIndex < methodsToSearch.size(); ++methodIndex) {
MethodType methodType = methodsToSearch.get(methodIndex);
if (argList == null) {
selectedMethods.add(methodType);
// vararg methods are considered fixed arity here - varargs are dealt with in the 3rd phase
} else if (getArity(methodType.getMethod()) == argList.jjtGetNumChildren()) {
if (!methodType.isParameterized()) {
// https://docs.oracle.com/javase/specs/jls/se8/html/jls-18.html#jls-18.5.1
// ...
// To test for applicability by strict invocation:
// ... or if there exists an i (1 ≤ i ≤ n) such that ei is pertinent to applicability
// (§15.12.2.2) and either i) ei is a standalone expression of a primitive type but Fi is a
// reference type, or ii) Fi is a primitive type but ei is not a standalone expression of a
// primitive type; then the method is not applicable and there is no need to proceed with inference.
Class<?>[] methodParameterTypes = methodType.getMethod().getParameterTypes();
for (int argIndex = 0; argIndex < argList.jjtGetNumChildren(); ++argIndex) {
if (((ASTExpression) argList.jjtGetChild(argIndex)).isStandAlonePrimitive()) {
if (!methodParameterTypes[argIndex].isPrimitive()) {
continue outter; // this method is not applicable
}
} else if (methodParameterTypes[argIndex].isPrimitive()) {
continue outter; // this method is not applicable
}
}
methodType = parameterizeInvocation(context, methodType.getMethod(), argList);
}
// check subtypeability of each argument to the corresponding parameter
boolean methodIsApplicable = true;
// try each arguments if it's subtypeable
for (int argIndex = 0; argIndex < argList.jjtGetNumChildren(); ++argIndex) {
if (!isSubtypeable(methodType.getParameterTypes().get(argIndex),
(ASTExpression) argList.jjtGetChild(argIndex))) {
methodIsApplicable = false;
break;
}
// TODO: add unchecked conversion in an else if branch
}
if (methodIsApplicable) {
selectedMethods.add(methodType);
}
}
}
return selectedMethods;
}
public static MethodType parameterizeInvocation(JavaTypeDefinition context, Method method,
ASTArgumentList argList) {
// variables are set up by the call to produceInitialBounds
List<Variable> variables = new ArrayList<>();
List<Bound> initialBounds = new ArrayList<>();
produceInitialBounds(method, context, variables, initialBounds);
List<JavaTypeDefinition> resolvedTypeParameters = TypeInferenceResolver
.inferTypes(produceInitialConstraints(method, argList, variables), initialBounds, variables);
return getTypeDefOfMethod(context, method, resolvedTypeParameters);
}
public static List<Constraint> produceInitialConstraints(Method method, ASTArgumentList argList,
List<Variable> variables) {
List<Constraint> result = new ArrayList<>();
Type[] methodParameters = method.getGenericParameterTypes();
TypeVariable<Method>[] methodTypeParameters = method.getTypeParameters();
// TODO: add support for variable arity methods
for (int i = 0; i < methodParameters.length; i++) {
int typeParamIndex = -1;
if (methodParameters[i] instanceof TypeVariable) {
typeParamIndex = JavaTypeDefinition
.getGenericTypeIndex(methodTypeParameters, ((TypeVariable) methodParameters[i]).getName());
}
if (typeParamIndex != -1) {
// TODO: we are cheating here, it should be a contraint of the form 'var -> expression' not 'var->type'
result.add(new Constraint(((TypeNode) argList.jjtGetChild(i)).getTypeDefinition(),
variables.get(typeParamIndex), LOOSE_INVOCATION));
}
}
return result;
}
public static void produceInitialBounds(Method method, JavaTypeDefinition context,
List<Variable> variables, List<Bound> initialBounds) {
// https://docs.oracle.com/javase/specs/jls/se8/html/jls-18.html#jls-18.1.3
// When inference begins, a bound set is typically generated from a list of type parameter declarations P1,
// ..., Pp and associated inference variables α1, ..., αp. Such a bound set is constructed as follows. For
// each l (1 ≤ l ≤ p):
TypeVariable<Method>[] typeVariables = method.getTypeParameters();
variables.clear();
for (int i = 0; i < typeVariables.length; ++i) {
variables.add(new Variable());
}
for (int currVarIndex = 0; currVarIndex < typeVariables.length; ++currVarIndex) {
Type[] bounds = typeVariables[currVarIndex].getBounds();
boolean currVarHasNoProperUpperBound = true;
for (Type bound : bounds) {
// Otherwise, for each type T delimited by & in the TypeBound, the bound αl <: T[P1:=α1, ..., Pp:=αp]
// appears in the set; if this results in no proper upper bounds for αl (only dependencies), then the
// bound α <: Object also appears in the set.
int boundVarIndex = -1;
if (bound instanceof TypeVariable) {
boundVarIndex =
JavaTypeDefinition.getGenericTypeIndex(typeVariables, ((TypeVariable) bound).getName());
}
if (boundVarIndex != -1) {
initialBounds.add(new Bound(variables.get(currVarIndex), variables.get(boundVarIndex), SUBTYPE));
} else {
currVarHasNoProperUpperBound = false;
initialBounds.add(new Bound(variables.get(currVarIndex), context.resolveTypeDefinition(bound),
SUBTYPE));
}
}
// If Pl has no TypeBound, the bound αl <: Object appears in the set.
if (currVarHasNoProperUpperBound) {
initialBounds.add(new Bound(variables.get(currVarIndex), JavaTypeDefinition.forClass(Object.class),
SUBTYPE));
}
}
}
/**
* Look for methods be method conversion.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2.3
*/
public static List<MethodType> selectMethodsSecondPhase(List<MethodType> methodsToSearch, ASTArgumentList argList) {
// TODO: check if explicit type arguments are applicable to the type parameter bounds
List<MethodType> selectedMethods = new ArrayList<>();
for (int methodIndex = 0; methodIndex < methodsToSearch.size(); ++methodIndex) {
MethodType methodType = methodsToSearch.get(methodIndex);
if (!methodType.isParameterized()) {
throw new ResolutionFailedException();
}
if (argList == null) {
selectedMethods.add(methodType);
// vararg methods are considered fixed arity here, see 3rd phase
} else if (getArity(methodType.getMethod()) == argList.jjtGetNumChildren()) {
// check method convertability of each argument to the corresponding parameter
boolean methodIsApplicable = true;
// try each arguments if it's method convertible
for (int argIndex = 0; argIndex < argList.jjtGetNumChildren(); ++argIndex) {
if (!isMethodConvertible(methodType.getParameterTypes().get(argIndex),
(ASTExpression) argList.jjtGetChild(argIndex))) {
methodIsApplicable = false;
break;
}
// TODO: add unchecked conversion in an else if branch
}
if (methodIsApplicable) {
selectedMethods.add(methodType);
}
}
}
return selectedMethods;
}
/**
* Look for methods considering varargs as well.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2.4
*/
public static List<MethodType> selectMethodsThirdPhase(List<MethodType> methodsToSearch, ASTArgumentList argList) {
// TODO: check if explicit type arguments are applicable to the type parameter bounds
List<MethodType> selectedMethods = new ArrayList<>();
for (int methodIndex = 0; methodIndex < methodsToSearch.size(); ++methodIndex) {
MethodType methodType = methodsToSearch.get(methodIndex);
if (!methodType.isParameterized()) {
throw new ResolutionFailedException();
}
if (argList == null) {
selectedMethods.add(methodType);
// now we consider varargs as not fixed arity
// if we reach here and the method is not a vararg, then we didn't find a resolution in earlier phases
} else if (methodType.isVararg()) { // check subtypeability of each argument to the corresponding parameter
boolean methodIsApplicable = true;
List<JavaTypeDefinition> methodParameters = methodType.getParameterTypes();
JavaTypeDefinition varargComponentType = methodType.getVarargComponentType();
// try each arguments if it's method convertible
for (int argIndex = 0; argIndex < argList.jjtGetNumChildren(); ++argIndex) {
JavaTypeDefinition parameterType = argIndex < methodParameters.size() - 1
? methodParameters.get(argIndex) : varargComponentType;
if (!isMethodConvertible(parameterType, (ASTExpression) argList.jjtGetChild(argIndex))) {
methodIsApplicable = false;
break;
}
// TODO: If k != n, or if k = n and An cannot be converted by method invocation conversion to
// Sn[], then the type which is the erasure (§4.6) of Sn is accessible at the point of invocation.
// TODO: add unchecked conversion in an else if branch
}
if (methodIsApplicable) {
selectedMethods.add(methodType);
}
} else if (!methodType.isVararg()) {
// TODO: Remove check for vararg here, once we can detect and use return types of method calls
LOG.log(Level.FINE, "Method {0} couldn't be resolved", String.valueOf(methodType));
}
}
return selectedMethods;
}
/**
* Searches a list of methods by trying the three phases of method overload resolution.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2
*/
public static JavaTypeDefinition getBestMethodReturnType(JavaTypeDefinition context, List<MethodType> methods,
ASTArgumentList arguments) {
try {
List<MethodType> selectedMethods = selectMethodsFirstPhase(context, methods, arguments);
if (!selectedMethods.isEmpty()) {
return selectMostSpecificMethod(selectedMethods).getReturnType();
}
selectedMethods = selectMethodsSecondPhase(methods, arguments);
if (!selectedMethods.isEmpty()) {
return selectMostSpecificMethod(selectedMethods).getReturnType();
}
selectedMethods = selectMethodsThirdPhase(methods, arguments);
if (!selectedMethods.isEmpty()) {
return selectMostSpecificMethod(selectedMethods).getReturnType();
}
return null;
} catch (ResolutionFailedException e) {
return null;
}
}
/**
* Most specific method selection.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2.5
*/
public static MethodType selectMostSpecificMethod(List<MethodType> selectedMethods) {
MethodType mostSpecific = selectedMethods.get(0);
for (int methodIndex = 1; methodIndex < selectedMethods.size(); ++methodIndex) {
MethodType nextMethod = selectedMethods.get(methodIndex);
if (checkSubtypeability(mostSpecific, nextMethod)) {
if (checkSubtypeability(nextMethod, mostSpecific)) { // both are maximally specific
mostSpecific = selectAmongMaximallySpecific(mostSpecific, nextMethod);
} else {
mostSpecific = nextMethod;
}
}
}
return mostSpecific;
}
/**
* Select maximally specific method.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2.5
*/
public static MethodType selectAmongMaximallySpecific(MethodType first, MethodType second) {
if (first.isAbstract()) {
if (second.isAbstract()) {
// https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2.5
// the bottom of the section is relevant here, we can't resolve this type
// TODO: resolve this
// we obviously don't know the runtime type. Let's return the first as the most specific
return first;
} else { // second one isn't abstract
return second;
}
} else if (second.isAbstract()) {
return first; // first isn't abstract, second one is
} else {
return first; // TODO: once shadowing and overriding methods is done, add exception back
// throw new IllegalStateException("None of the maximally specific methods are abstract.\n"
// + first.toString() + "\n" + second.toString());
}
}
/**
* Looks for potentially applicable methods in a given type definition.
*/
public static List<MethodType> getApplicableMethods(JavaTypeDefinition context,
String methodName,
List<JavaTypeDefinition> typeArguments,
int argArity,
Class<?> accessingClass) {
List<MethodType> result = new ArrayList<>();
if (context == null) {
return result;
}
// TODO: shadowing, overriding
// TODO: add multiple upper bounds
Class<?> contextClass = context.getType();
// search the class
for (Method method : contextClass.getDeclaredMethods()) {
if (isMethodApplicable(method, methodName, argArity, accessingClass, typeArguments)) {
result.add(getTypeDefOfMethod(context, method, typeArguments));
}
}
// search it's supertype
if (!contextClass.equals(Object.class)) {
List<MethodType> inheritedMethods = getApplicableMethods(context.resolveTypeDefinition(contextClass.getGenericSuperclass()),
methodName, typeArguments, argArity, accessingClass);
// but only add the found methods of the supertype, if they have not been overridden
// TODO: verify whether this simplified overriding detection is good enough and at the correct place
for (MethodType inherited : inheritedMethods) {
if (!result.contains(inherited)) {
result.add(inherited);
}
}
}
// search it's interfaces
for (Type interfaceType : contextClass.getGenericInterfaces()) {
result.addAll(getApplicableMethods(context.resolveTypeDefinition(interfaceType),
methodName, typeArguments, argArity, accessingClass));
}
return result;
}
public static MethodType getTypeDefOfMethod(JavaTypeDefinition context, Method method,
List<JavaTypeDefinition> typeArguments) {
if (typeArguments.isEmpty() && isGeneric(method)) {
return MethodType.build(method);
}
JavaTypeDefinition returnType = context.resolveTypeDefinition(method.getGenericReturnType(),
method, typeArguments);
List<JavaTypeDefinition> argTypes = new ArrayList<>();
for (Type argType : method.getGenericParameterTypes()) {
argTypes.add(context.resolveTypeDefinition(argType, method, typeArguments));
}
return MethodType.build(returnType, argTypes, method);
}
/**
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.2.1
* Potential applicability.
*/
public static boolean isMethodApplicable(Method method, String methodName, int argArity,
Class<?> accessingClass, List<JavaTypeDefinition> typeArguments) {
if (method.getName().equals(methodName) // name matches
// is visible
&& isMemberVisibleFromClass(method.getDeclaringClass(), method.getModifiers(), accessingClass)
// if method is vararg with arity n, then the invocation's arity >= n - 1
&& (!method.isVarArgs() || (argArity >= getArity(method) - 1))
// if the method isn't vararg, then arity matches
&& (method.isVarArgs() || (argArity == getArity(method)))
// isn't generic or arity of type arguments matches that of parameters
&& (!isGeneric(method) || typeArguments.isEmpty()
|| method.getTypeParameters().length == typeArguments.size())) {
return true;
}
return false;
}
/**
* Given a class, the modifiers of on of it's member and the class that is trying to access that member,
* returns true is the member is accessible from the accessingClass Class.
*
* @param classWithMember The Class with the member.
* @param modifiers The modifiers of that member.
* @param accessingClass The Class trying to access the member.
* @return True if the member is visible from the accessingClass Class.
*/
public static boolean isMemberVisibleFromClass(Class<?> classWithMember, int modifiers, Class<?> accessingClass) {
if (accessingClass == null) {
return false;
}
// public members
if (Modifier.isPublic(modifiers)) {
return true;
}
boolean areInTheSamePackage = false;
if (accessingClass.getPackage() != null) { // if null, then it's in the default package
// if calssWithMember.getPackage() is null, result will be false
areInTheSamePackage = accessingClass.getPackage().getName().equals(
classWithMember.getPackage().getName());
}
// protected members
if (Modifier.isProtected(modifiers)) {
if (areInTheSamePackage || classWithMember.isAssignableFrom(accessingClass)) {
return true;
}
// private members
} else if (Modifier.isPrivate(modifiers)) {
if (classWithMember.equals(accessingClass)) {
return true;
}
// package private members
} else if (areInTheSamePackage) {
return true;
}
return false;
}
public static boolean isGeneric(Method method) {
return method.getTypeParameters().length != 0;
}
public static boolean isGeneric(Class<?> clazz) {
return clazz.getTypeParameters().length != 0;
}
public static int getArity(Method method) {
return method.getParameterTypes().length;
}
public static boolean isMethodConvertible(JavaTypeDefinition parameter, ASTExpression argument) {
if (argument.getTypeDefinition() == null) {
LOG.log(Level.FINE, "No type information for node {0}", argument.toString());
return true;
}
return isMethodConvertible(parameter, argument.getTypeDefinition());
}
/**
* Method invocation conversion rules.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-5.html#jls-5.3
*/
public static boolean isMethodConvertible(JavaTypeDefinition parameter, JavaTypeDefinition argument) {
// covers identity conversion, widening primitive conversion, widening reference conversion, null
// covers if both are primitive or bot are boxed primitive
if (isSubtypeable(parameter, argument)) {
return true;
}
// covers boxing
int indexInPrimitive = PRIMITIVE_SUBTYPE_ORDER.indexOf(argument.getType());
if (indexInPrimitive != -1 // arg is primitive
&& isSubtypeable(parameter,
JavaTypeDefinition.forClass(BOXED_PRIMITIVE_SUBTYPE_ORDER.get(indexInPrimitive)))) {
return true;
}
// covers unboxing
int indexInBoxed = BOXED_PRIMITIVE_SUBTYPE_ORDER.indexOf(argument.getType());
if (indexInBoxed != -1 // arg is boxed primitive
&& isSubtypeable(parameter,
JavaTypeDefinition.forClass(PRIMITIVE_SUBTYPE_ORDER.get(indexInBoxed)))) {
return true;
}
// TODO: add raw unchecked conversion part
return false;
}
public static boolean isSubtypeable(JavaTypeDefinition parameter, ASTExpression argument) {
if (argument.getTypeDefinition() == null) {
LOG.log(Level.FINE, "No type information for node {0}", argument.toString());
return true;
}
return isSubtypeable(parameter, argument.getTypeDefinition());
}
public static boolean isSubtypeable(Class<?> parameter, Class<?> argument) {
return isSubtypeable(JavaTypeDefinition.forClass(parameter), JavaTypeDefinition.forClass(argument));
}
/**
* Subtypeability rules.
* https://docs.oracle.com/javase/specs/jls/se7/html/jls-4.html#jls-4.10
*/
public static boolean isSubtypeable(JavaTypeDefinition parameter, JavaTypeDefinition argument) {
// null types are always applicable
if (argument.getType() == null) {
return true;
}
// this covers arrays, simple class/interface cases
if (parameter.getType().isAssignableFrom(argument.getType())) {
if (!parameter.isGeneric() || parameter.isRawType() || argument.isRawType()) {
return true;
}
// parameter is a non-raw generic type
// argument is a non-generic or a non-raw generic type
// example result: List<String>.getAsSuper(Collection) becomes Collection<String>
JavaTypeDefinition argSuper = argument.getAsSuper(parameter.getType());
// argSuper can't be null because isAssignableFrom check above returned true
// right now we only check if generic arguments are the same
// TODO: add support for wildcard types
// (future note: can't call subtype as it is recursively, infinite types)
//return parameter.equals(argSuper);
// TODO: this ignores the check for generic types!!
return parameter.getType().equals(argSuper.getType());
}
int indexOfParameter = PRIMITIVE_SUBTYPE_ORDER.indexOf(parameter.getType());
if (indexOfParameter != -1) {
if (argument.getType() == char.class) {
if (indexOfParameter <= 3) { // <= 3 because short and byte are not compatible with char
return true;
}
} else {
int indexOfArg = PRIMITIVE_SUBTYPE_ORDER.indexOf(argument.getType());
if (indexOfArg != -1 && indexOfParameter <= indexOfArg) {
return true;
}
}
}
return false;
}
public static JavaTypeDefinition boxPrimitive(JavaTypeDefinition def) {
if (!def.isPrimitive()) {
return null;
}
return JavaTypeDefinition.forClass(BOXED_PRIMITIVE_SUBTYPE_ORDER.get(PRIMITIVE_SUBTYPE_ORDER.indexOf(def.getType())));
}
public static List<JavaTypeDefinition> getMethodExplicitTypeArugments(Node node) {
ASTMemberSelector memberSelector = node.getFirstChildOfType(ASTMemberSelector.class);
if (memberSelector == null) {
return Collections.emptyList();
}
ASTTypeArguments typeArguments = memberSelector.getFirstChildOfType(ASTTypeArguments.class);
if (typeArguments == null) {
return Collections.emptyList();
}
List<JavaTypeDefinition> result = new ArrayList<>();
for (int childIndex = 0; childIndex < typeArguments.jjtGetNumChildren(); ++childIndex) {
result.add(((TypeNode) typeArguments.jjtGetChild(childIndex)).getTypeDefinition());
}
return result;
}
}
| 1 | 13,007 | Isn't that already too late? I'd expect, that the LinkageError occurs the first time, we have the class instance (the classloader tried to load the class) - or do we get a partially linked class back, and we get the exception when retrieving the methods? | pmd-pmd | java |
@@ -31,7 +31,7 @@ import (
const (
supportedVersion = 0
maxVersion = 254
- traceparentHeader = "traceparent"
+ TraceparentHeader = "Traceparent"
)
type httpTraceContextPropagator struct{} | 1 | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package propagation
import (
"context"
"encoding/hex"
"fmt"
"regexp"
"strconv"
"strings"
"go.opentelemetry.io/api/trace"
"go.opentelemetry.io/api/core"
apipropagation "go.opentelemetry.io/api/propagation"
)
const (
supportedVersion = 0
maxVersion = 254
traceparentHeader = "traceparent"
)
type httpTraceContextPropagator struct{}
var _ apipropagation.TextFormatPropagator = httpTraceContextPropagator{}
var traceCtxRegExp = regexp.MustCompile("^[0-9a-f]{2}-[a-f0-9]{32}-[a-f0-9]{16}-[a-f0-9]{2}-?")
func (hp httpTraceContextPropagator) Inject(ctx context.Context, supplier apipropagation.Supplier) {
sc := trace.CurrentSpan(ctx).SpanContext()
if sc.IsValid() {
h := fmt.Sprintf("%.2x-%.16x%.16x-%.16x-%.2x",
supportedVersion,
sc.TraceID.High,
sc.TraceID.Low,
sc.SpanID,
sc.TraceFlags&core.TraceFlagsSampled)
supplier.Set(traceparentHeader, h)
}
}
func (hp httpTraceContextPropagator) Extract(ctx context.Context, supplier apipropagation.Supplier) core.SpanContext {
h := supplier.Get(traceparentHeader)
if h == "" {
return core.EmptySpanContext()
}
h = strings.Trim(h, "-")
if !traceCtxRegExp.MatchString(h) {
return core.EmptySpanContext()
}
sections := strings.Split(h, "-")
if len(sections) < 4 {
return core.EmptySpanContext()
}
if len(sections[0]) != 2 {
return core.EmptySpanContext()
}
ver, err := hex.DecodeString(sections[0])
if err != nil {
return core.EmptySpanContext()
}
version := int(ver[0])
if version > maxVersion {
return core.EmptySpanContext()
}
if version == 0 && len(sections) != 4 {
return core.EmptySpanContext()
}
if len(sections[1]) != 32 {
return core.EmptySpanContext()
}
result, err := strconv.ParseUint(sections[1][0:16], 16, 64)
if err != nil {
return core.EmptySpanContext()
}
var sc core.SpanContext
sc.TraceID.High = result
result, err = strconv.ParseUint(sections[1][16:32], 16, 64)
if err != nil {
return core.EmptySpanContext()
}
sc.TraceID.Low = result
if len(sections[2]) != 16 {
return core.EmptySpanContext()
}
result, err = strconv.ParseUint(sections[2][0:], 16, 64)
if err != nil {
return core.EmptySpanContext()
}
sc.SpanID = result
if len(sections[3]) != 2 {
return core.EmptySpanContext()
}
opts, err := hex.DecodeString(sections[3])
if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
return core.EmptySpanContext()
}
sc.TraceFlags = opts[0] &^ core.TraceFlagsUnused
if !sc.IsValid() {
return core.EmptySpanContext()
}
return sc
}
func (hp httpTraceContextPropagator) GetAllKeys() []string {
return []string{traceparentHeader}
}
// HttpTraceContextPropagator creates a new text format propagator that propagates SpanContext
// in W3C TraceContext format.
func HttpTraceContextPropagator() apipropagation.TextFormatPropagator {
return httpTraceContextPropagator{}
}
| 1 | 9,880 | nit:s/Traceparent/traceparent/ - As per the spec header name is all lowercase. | open-telemetry-opentelemetry-go | go |
@@ -484,3 +484,16 @@ type ErrDockerfileNotFound struct {
func (e *ErrDockerfileNotFound) Error() string {
return fmt.Sprintf("no Dockerfiles found within %s or a sub-directory level below", e.dir)
}
+
+// RelPath returns the path relative to the current working directory.
+func RelPath(fullPath string) (string, error) {
+ wkdir, err := os.Getwd()
+ if err != nil {
+ return "", fmt.Errorf("get working directory: %w", err)
+ }
+ path, err := filepath.Rel(wkdir, fullPath)
+ if err != nil {
+ return "", fmt.Errorf("get relative path of file: %w", err)
+ }
+ return path, nil
+} | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package workspace contains functionality to manage a user's local workspace. This includes
// creating an application directory, reading and writing a summary file to associate the workspace with the application,
// and managing infrastructure-as-code files. The typical workspace will be structured like:
// .
// ├── copilot (application directory)
// │ ├── .workspace (workspace summary)
// │ └── my-service
// │ │ └── manifest.yml (service manifest)
// │ ├── buildspec.yml (buildspec for the pipeline's build stage)
// │ └── pipeline.yml (pipeline manifest)
// └── my-service-src (customer service code)
package workspace
import (
"encoding"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/spf13/afero"
"gopkg.in/yaml.v3"
)
const (
// CopilotDirName is the name of the directory where generated infrastructure code for an application will be stored.
CopilotDirName = "copilot"
// SummaryFileName is the name of the file that is associated with the application.
SummaryFileName = ".workspace"
addonsDirName = "addons"
maximumParentDirsToSearch = 5
pipelineFileName = "pipeline.yml"
manifestFileName = "manifest.yml"
buildspecFileName = "buildspec.yml"
ymlFileExtension = ".yml"
dockerfileName = "Dockerfile"
)
// Summary is a description of what's associated with this workspace.
type Summary struct {
Application string `yaml:"application"` // Name of the application.
}
// Workspace typically represents a Git repository where the user has its infrastructure-as-code files as well as source files.
type Workspace struct {
workingDir string
copilotDir string
fsUtils *afero.Afero
}
// New returns a workspace, used for reading and writing to user's local workspace.
func New() (*Workspace, error) {
fs := afero.NewOsFs()
fsUtils := &afero.Afero{Fs: fs}
workingDir, err := os.Getwd()
if err != nil {
return nil, err
}
ws := Workspace{
workingDir: workingDir,
fsUtils: fsUtils,
}
return &ws, nil
}
// Create creates the copilot directory (if it doesn't already exist) in the current working directory,
// and saves a summary with the application name.
func (ws *Workspace) Create(appName string) error {
// Create an application directory, if one doesn't exist
if err := ws.createCopilotDir(); err != nil {
return err
}
// Grab an existing workspace summary, if one exists.
summary, err := ws.Summary()
if err == nil {
// If a summary exists, but is registered to a different application, throw an error.
if summary.Application != appName {
return &errHasExistingApplication{existingAppName: summary.Application}
}
// Otherwise our work is all done.
return nil
}
// If there isn't an existing workspace summary, create it.
var notFound *errNoAssociatedApplication
if errors.As(err, ¬Found) {
return ws.writeSummary(appName)
}
return err
}
// Summary returns a summary of the workspace - including the application name.
func (ws *Workspace) Summary() (*Summary, error) {
summaryPath, err := ws.summaryPath()
if err != nil {
return nil, err
}
summaryFileExists, _ := ws.fsUtils.Exists(summaryPath) // If an err occurs, return no applications.
if summaryFileExists {
value, err := ws.fsUtils.ReadFile(summaryPath)
if err != nil {
return nil, err
}
wsSummary := Summary{}
return &wsSummary, yaml.Unmarshal(value, &wsSummary)
}
return nil, &errNoAssociatedApplication{}
}
// ServiceNames returns the names of the services in the workspace.
func (ws *Workspace) ServiceNames() ([]string, error) {
return ws.workloadNames(func(wlType string) bool {
for _, t := range manifest.ServiceTypes {
if wlType == t {
return true
}
}
return false
})
}
// JobNames returns the names of all jobs in the workspace.
func (ws *Workspace) JobNames() ([]string, error) {
return ws.workloadNames(func(wlType string) bool {
for _, t := range manifest.JobTypes {
if wlType == t {
return true
}
}
return false
})
}
// workloadNames returns the name of all workloads (either services or jobs) in the workspace.
func (ws *Workspace) workloadNames(match func(string) bool) ([]string, error) {
copilotPath, err := ws.CopilotDirPath()
if err != nil {
return nil, err
}
files, err := ws.fsUtils.ReadDir(copilotPath)
if err != nil {
return nil, fmt.Errorf("read directory %s: %w", copilotPath, err)
}
var names []string
for _, f := range files {
if !f.IsDir() {
continue
}
if exists, _ := ws.fsUtils.Exists(filepath.Join(copilotPath, f.Name(), manifestFileName)); !exists {
// Swallow the error because we don't want to include any services that we don't have permissions to read.
continue
}
manifestBytes, err := ws.readWorkloadManifest(f.Name())
if err != nil {
return nil, fmt.Errorf("read manifest for workload %s: %w", f.Name(), err)
}
wlType, err := ws.readWorkloadType(manifestBytes)
if err != nil {
return nil, err
}
if match(wlType) {
names = append(names, f.Name())
}
}
return names, nil
}
// ReadServiceManifest returns the contents of the service's manifest under copilot/{name}/manifest.yml.
func (ws *Workspace) ReadServiceManifest(name string) ([]byte, error) {
mf, err := ws.readWorkloadManifest(name)
if err != nil {
return nil, fmt.Errorf("read service %s manifest file: %w", name, err)
}
return mf, nil
}
// ReadJobManifest returns the contents of the job's manifest under copilot/{name}/manifest.yml.
func (ws *Workspace) ReadJobManifest(name string) ([]byte, error) {
mf, err := ws.readWorkloadManifest(name)
if err != nil {
return nil, fmt.Errorf("read job %s manifest file: %w", name, err)
}
return mf, nil
}
func (ws *Workspace) readWorkloadManifest(name string) ([]byte, error) {
return ws.read(name, manifestFileName)
}
// ReadPipelineManifest returns the contents of the pipeline manifest under copilot/pipeline.yml.
func (ws *Workspace) ReadPipelineManifest() ([]byte, error) {
pmPath, err := ws.pipelineManifestPath()
if err != nil {
return nil, err
}
manifestExists, err := ws.fsUtils.Exists(pmPath)
if err != nil {
return nil, err
}
if !manifestExists {
return nil, ErrNoPipelineInWorkspace
}
return ws.read(pipelineFileName)
}
// WriteServiceManifest writes the service's manifest under the copilot/{name}/ directory.
func (ws *Workspace) WriteServiceManifest(marshaler encoding.BinaryMarshaler, name string) (string, error) {
data, err := marshaler.MarshalBinary()
if err != nil {
return "", fmt.Errorf("marshal service %s manifest to binary: %w", name, err)
}
return ws.write(data, name, manifestFileName)
}
// WriteJobManifest writes the job's manifest under the copilot/{name}/ directory.
func (ws *Workspace) WriteJobManifest(marshaler encoding.BinaryMarshaler, name string) (string, error) {
data, err := marshaler.MarshalBinary()
if err != nil {
return "", fmt.Errorf("marshal job %s manifest to binary: %w", name, err)
}
return ws.write(data, name, manifestFileName)
}
// WritePipelineBuildspec writes the pipeline buildspec under the copilot/ directory.
// If successful returns the full path of the file, otherwise returns an empty string and the error.
func (ws *Workspace) WritePipelineBuildspec(marshaler encoding.BinaryMarshaler) (string, error) {
data, err := marshaler.MarshalBinary()
if err != nil {
return "", fmt.Errorf("marshal pipeline buildspec to binary: %w", err)
}
return ws.write(data, buildspecFileName)
}
// WritePipelineManifest writes the pipeline manifest under the copilot directory.
// If successful returns the full path of the file, otherwise returns an empty string and the error.
func (ws *Workspace) WritePipelineManifest(marshaler encoding.BinaryMarshaler) (string, error) {
data, err := marshaler.MarshalBinary()
if err != nil {
return "", fmt.Errorf("marshal pipeline manifest to binary: %w", err)
}
return ws.write(data, pipelineFileName)
}
// DeleteWorkspaceFile removes the .workspace file under copilot/ directory.
// This will be called during app delete, we do not want to delete any other generated files.
func (ws *Workspace) DeleteWorkspaceFile() error {
return ws.fsUtils.Remove(filepath.Join(CopilotDirName, SummaryFileName))
}
// ReadAddonsDir returns a list of file names under a service's "addons/" directory.
func (ws *Workspace) ReadAddonsDir(svcName string) ([]string, error) {
copilotPath, err := ws.CopilotDirPath()
if err != nil {
return nil, err
}
var names []string
files, err := ws.fsUtils.ReadDir(filepath.Join(copilotPath, svcName, addonsDirName))
if err != nil {
return nil, err
}
for _, f := range files {
names = append(names, f.Name())
}
return names, nil
}
// ReadAddon returns the contents of a file under the service's "addons/" directory.
func (ws *Workspace) ReadAddon(svc, fname string) ([]byte, error) {
return ws.read(svc, addonsDirName, fname)
}
// WriteAddon writes the content of an addon file under "{svc}/addons/{name}.yml".
// If successful returns the full path of the file, otherwise an empty string and an error.
func (ws *Workspace) WriteAddon(content encoding.BinaryMarshaler, svc, name string) (string, error) {
data, err := content.MarshalBinary()
if err != nil {
return "", fmt.Errorf("marshal binary addon content: %w", err)
}
fname := name + ymlFileExtension
return ws.write(data, svc, addonsDirName, fname)
}
// FileStat wraps the os.Stat function.
type FileStat interface {
Stat(name string) (os.FileInfo, error)
}
// IsInGitRepository returns true if the current working directory is a git repository.
func IsInGitRepository(fs FileStat) bool {
_, err := fs.Stat(".git")
return !os.IsNotExist(err)
}
func (ws *Workspace) writeSummary(appName string) error {
summaryPath, err := ws.summaryPath()
if err != nil {
return err
}
workspaceSummary := Summary{
Application: appName,
}
serializedWorkspaceSummary, err := yaml.Marshal(workspaceSummary)
if err != nil {
return err
}
return ws.fsUtils.WriteFile(summaryPath, serializedWorkspaceSummary, 0644)
}
func (ws *Workspace) pipelineManifestPath() (string, error) {
copilotPath, err := ws.CopilotDirPath()
if err != nil {
return "", err
}
pipelineManifestPath := filepath.Join(copilotPath, pipelineFileName)
return pipelineManifestPath, nil
}
func (ws *Workspace) summaryPath() (string, error) {
copilotPath, err := ws.CopilotDirPath()
if err != nil {
return "", err
}
workspaceSummaryPath := filepath.Join(copilotPath, SummaryFileName)
return workspaceSummaryPath, nil
}
func (ws *Workspace) createCopilotDir() error {
// First check to see if a manifest directory already exists
existingWorkspace, _ := ws.CopilotDirPath()
if existingWorkspace != "" {
return nil
}
return ws.fsUtils.Mkdir(CopilotDirName, 0755)
}
// CopilotDirPath returns the absolute path to the workspace's copilot dir.
func (ws *Workspace) CopilotDirPath() (string, error) {
if ws.copilotDir != "" {
return ws.copilotDir, nil
}
// Are we in the application directory?
inCopilotDir := filepath.Base(ws.workingDir) == CopilotDirName
if inCopilotDir {
ws.copilotDir = ws.workingDir
return ws.copilotDir, nil
}
searchingDir := ws.workingDir
for try := 0; try < maximumParentDirsToSearch; try++ {
currentDirectoryPath := filepath.Join(searchingDir, CopilotDirName)
inCurrentDirPath, err := ws.fsUtils.DirExists(currentDirectoryPath)
if err != nil {
return "", err
}
if inCurrentDirPath {
ws.copilotDir = currentDirectoryPath
return ws.copilotDir, nil
}
searchingDir = filepath.Dir(searchingDir)
}
return "", &errWorkspaceNotFound{
CurrentDirectory: ws.workingDir,
ManifestDirectoryName: CopilotDirName,
NumberOfLevelsChecked: maximumParentDirsToSearch,
}
}
func (ws *Workspace) readWorkloadType(dat []byte) (string, error) {
wl := struct {
Type string `yaml:"type"`
}{}
if err := yaml.Unmarshal(dat, &wl); err != nil {
return "", err
}
return wl.Type, nil
}
// write flushes the data to a file under the copilot directory joined by path elements.
func (ws *Workspace) write(data []byte, elem ...string) (string, error) {
copilotPath, err := ws.CopilotDirPath()
if err != nil {
return "", err
}
pathElems := append([]string{copilotPath}, elem...)
filename := filepath.Join(pathElems...)
if err := ws.fsUtils.MkdirAll(filepath.Dir(filename), 0755 /* -rwxr-xr-x */); err != nil {
return "", fmt.Errorf("create directories for file %s: %w", filename, err)
}
exist, err := ws.fsUtils.Exists(filename)
if err != nil {
return "", fmt.Errorf("check if manifest file %s exists: %w", filename, err)
}
if exist {
return "", &ErrFileExists{FileName: filename}
}
if err := ws.fsUtils.WriteFile(filename, data, 0644 /* -rw-r--r-- */); err != nil {
return "", fmt.Errorf("write manifest file: %w", err)
}
return filename, nil
}
// read returns the contents of the file under the copilot directory joined by path elements.
func (ws *Workspace) read(elem ...string) ([]byte, error) {
copilotPath, err := ws.CopilotDirPath()
if err != nil {
return nil, err
}
pathElems := append([]string{copilotPath}, elem...)
return ws.fsUtils.ReadFile(filepath.Join(pathElems...))
}
// ListDockerfiles returns the list of Dockerfiles within the current
// working directory and a sub-directory level below. If an error occurs while
// reading directories, or no Dockerfiles found returns the error.
func (ws *Workspace) ListDockerfiles() ([]string, error) {
wdFiles, err := ws.fsUtils.ReadDir(ws.workingDir)
if err != nil {
return nil, fmt.Errorf("read directory: %w", err)
}
var directories []string
for _, wdFile := range wdFiles {
// Add current directory if a Dockerfile exists, otherwise continue.
if !wdFile.IsDir() {
if wdFile.Name() == dockerfileName {
directories = append(directories, filepath.Dir(wdFile.Name()))
}
continue
}
// Add sub-directories containing a Dockerfile one level below current directory.
subFiles, err := ws.fsUtils.ReadDir(wdFile.Name())
if err != nil {
return nil, fmt.Errorf("read directory: %w", err)
}
for _, f := range subFiles {
// NOTE: ignore directories in sub-directories.
if f.IsDir() {
continue
}
if f.Name() == dockerfileName {
directories = append(directories, wdFile.Name())
}
}
}
if len(directories) == 0 {
return nil, &ErrDockerfileNotFound{
dir: ws.workingDir,
}
}
sort.Strings(directories)
dockerfiles := make([]string, 0, len(directories))
for _, dir := range directories {
file := dir + "/" + dockerfileName
dockerfiles = append(dockerfiles, file)
}
return dockerfiles, nil
}
// ErrDockerfileNotFound is returned when no Dockerfiles could be found in the current
// working directory or in any directories one level down from it.
type ErrDockerfileNotFound struct {
dir string
}
func (e *ErrDockerfileNotFound) Error() string {
return fmt.Sprintf("no Dockerfiles found within %s or a sub-directory level below", e.dir)
}
| 1 | 15,487 | We need to have unit tests for it since it is now a public function. Also I'm not a fan for making this movement since `relPath` doesn't consume any info in this pkg. | aws-copilot-cli | go |
@@ -5,16 +5,11 @@
*/
package edu.harvard.iq.dataverse.datasetutility;
-import edu.harvard.iq.dataverse.DataFile;
+import edu.harvard.iq.dataverse.*;
import edu.harvard.iq.dataverse.DataFile.ChecksumType;
-import edu.harvard.iq.dataverse.DataFileServiceBean;
-import edu.harvard.iq.dataverse.Dataset;
-import edu.harvard.iq.dataverse.DatasetServiceBean;
-import edu.harvard.iq.dataverse.DatasetVersion;
-import edu.harvard.iq.dataverse.EjbDataverseEngine;
-import edu.harvard.iq.dataverse.FileMetadata;
-import edu.harvard.iq.dataverse.PermissionServiceBean;
+import edu.harvard.iq.dataverse.api.Files;
import edu.harvard.iq.dataverse.api.Util;
+import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest; | 1 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package edu.harvard.iq.dataverse.datasetutility;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.DataFile.ChecksumType;
import edu.harvard.iq.dataverse.DataFileServiceBean;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DatasetServiceBean;
import edu.harvard.iq.dataverse.DatasetVersion;
import edu.harvard.iq.dataverse.EjbDataverseEngine;
import edu.harvard.iq.dataverse.FileMetadata;
import edu.harvard.iq.dataverse.PermissionServiceBean;
import edu.harvard.iq.dataverse.api.Util;
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.impl.AbstractCreateDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.CreateNewDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDataFileCommand;
import edu.harvard.iq.dataverse.engine.command.impl.RestrictFileCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetVersionCommand;
import edu.harvard.iq.dataverse.ingest.IngestServiceBean;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.FileUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
import edu.harvard.iq.dataverse.util.json.JsonPrinter;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.ResourceBundle;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.ejb.EJBException;
import javax.json.JsonObjectBuilder;
import javax.validation.ConstraintViolation;
import javax.ws.rs.core.Response;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.io.IOUtils;
import org.ocpsoft.common.util.Strings;
/**
* Methods to add or replace a single file.
*
* Usage example:
*
* // (1) Instantiate the class
*
* AddReplaceFileHelper addFileHelper = new AddReplaceFileHelper(dvRequest2,
* this.ingestService,
* this.datasetService,
* this.fileService,
* this.permissionSvc,
* this.commandEngine);
*
* // (2) Run file "ADD"
*
* addFileHelper.runAddFileByDatasetId(datasetId,
* newFileName,
* newFileContentType,
* newFileInputStream);
* // (2a) Check for errors
* if (addFileHelper.hasError()){
* // get some errors
* System.out.println(addFileHelper.getErrorMessagesAsString("\n"));
* }
*
*
* // OR (3) Run file "REPLACE"
*
* addFileHelper.runReplaceFile(datasetId,
* newFileName,
* newFileContentType,
* newFileInputStream,
* fileToReplaceId);
* // (2a) Check for errors
* if (addFileHelper.hasError()){
* // get some errors
* System.out.println(addFileHelper.getErrorMessagesAsString("\n"));
* }
*
*
*
* @author rmp553
*/
public class AddReplaceFileHelper{
private static final Logger logger = Logger.getLogger(AddReplaceFileHelper.class.getCanonicalName());
public static String FILE_ADD_OPERATION = "FILE_ADD_OPERATION";
public static String FILE_REPLACE_OPERATION = "FILE_REPLACE_OPERATION";
public static String FILE_REPLACE_FORCE_OPERATION = "FILE_REPLACE_FORCE_OPERATION";
private String currentOperation;
// -----------------------------------
// All the needed EJBs, passed to the constructor
// -----------------------------------
private IngestServiceBean ingestService;
private DatasetServiceBean datasetService;
private DataFileServiceBean fileService;
private PermissionServiceBean permissionService;
private EjbDataverseEngine commandEngine;
private SystemConfig systemConfig;
// -----------------------------------
// Instance variables directly added
// -----------------------------------
private Dataset dataset; // constructor (for add, not replace)
private DataverseRequest dvRequest; // constructor
private InputStream newFileInputStream; // step 30
private String newFileName; // step 30
private String newFileContentType; // step 30
private String newStorageIdentifier; // step 30
private String newCheckSum; // step 30
private ChecksumType newCheckSumType; //step 30
// -- Optional
private DataFile fileToReplace; // step 25
// -----------------------------------
// Instance variables derived from other input
// -----------------------------------
private User user;
private DatasetVersion workingVersion;
private DatasetVersion clone;
List<DataFile> initialFileList;
List<DataFile> finalFileList;
// -----------------------------------
// Ingested files
// -----------------------------------
private List<DataFile> newlyAddedFiles;
private List<FileMetadata> newlyAddedFileMetadatas;
// -----------------------------------
// For error handling
// -----------------------------------
private boolean errorFound;
private List<String> errorMessages;
private Response.Status httpErrorCode; // optional
// For Force Replace, this becomes a warning rather than an error
//
private boolean contentTypeWarningFound;
private String contentTypeWarningString;
private boolean duplicateFileErrorFound;
private String duplicateFileErrorString;
private boolean duplicateFileWarningFound;
private String duplicateFileWarningString;
private String duplicateFileComponentMessage;
public String getDuplicateFileComponentMessage() {
return duplicateFileComponentMessage;
}
public void setDuplicateFileComponentMessage(String duplicateFileComponentMessage) {
this.duplicateFileComponentMessage = duplicateFileComponentMessage;
}
public boolean isDuplicateFileErrorFound() {
return duplicateFileErrorFound;
}
public void setDuplicateFileErrorFound(boolean duplicateFileErrorFound) {
this.duplicateFileErrorFound = duplicateFileErrorFound;
}
public String getDuplicateFileErrorString() {
return duplicateFileErrorString;
}
public void setDuplicateFileErrorString(String duplicateFileErrorString) {
this.duplicateFileErrorString = duplicateFileErrorString;
}
public boolean isDuplicateFileWarningFound() {
return duplicateFileWarningFound;
}
public void setDuplicateFileWarningFound(boolean duplicateFileWarningFound) {
this.duplicateFileWarningFound = duplicateFileWarningFound;
}
public String getDuplicateFileWarningString() {
return duplicateFileWarningString;
}
public void setDuplicateFileWarningString(String duplicateFileWarningString) {
this.duplicateFileWarningString = duplicateFileWarningString;
}
public void resetFileHelper(){
initErrorHandling();
// operation
currentOperation = null;
// dataset level
dataset = null;
// file to replace
fileToReplace = null;
newFileInputStream = null;
newFileName = null;
newFileContentType = null;
// file lists
initialFileList = null;
finalFileList = null;
// final files
newlyAddedFiles = null;
newlyAddedFileMetadatas = null;
}
/**
* MAIN CONSTRUCTOR -- minimal requirements
*
* @param dataset
* @param ingestService
* @param datasetService
* @param dvRequest
*/
public AddReplaceFileHelper(DataverseRequest dvRequest,
IngestServiceBean ingestService,
DatasetServiceBean datasetService,
DataFileServiceBean fileService,
PermissionServiceBean permissionService,
EjbDataverseEngine commandEngine,
SystemConfig systemConfig){
// ---------------------------------
// make sure DataverseRequest isn't null and has a user
// ---------------------------------
if (dvRequest == null){
throw new NullPointerException("dvRequest cannot be null");
}
if (dvRequest.getUser() == null){
throw new NullPointerException("dvRequest cannot have a null user");
}
// ---------------------------------
// make sure services aren't null
// ---------------------------------
if (ingestService == null){
throw new NullPointerException("ingestService cannot be null");
}
if (datasetService == null){
throw new NullPointerException("datasetService cannot be null");
}
if (fileService == null){
throw new NullPointerException("fileService cannot be null");
}
if (permissionService == null){
throw new NullPointerException("ingestService cannot be null");
}
if (commandEngine == null){
throw new NullPointerException("commandEngine cannot be null");
}
if (systemConfig == null) {
throw new NullPointerException("systemConfig cannot be null");
}
// ---------------------------------
this.ingestService = ingestService;
this.datasetService = datasetService;
this.fileService = fileService;
this.permissionService = permissionService;
this.commandEngine = commandEngine;
this.systemConfig = systemConfig;
initErrorHandling();
// Initiate instance vars
this.dataset = null;
this.dvRequest = dvRequest;
this.user = dvRequest.getUser();
}
/**
*
* @param chosenDataset
* @param newFileName
* @param newFileContentType
* @param newFileInputStream
* @param optionalFileParams
* @return
*/
public boolean runAddFileByDataset(Dataset chosenDataset,
String newFileName,
String newFileContentType,
String newStorageIdentifier,
InputStream newFileInputStream,
OptionalFileParams optionalFileParams){
msgt(">> runAddFileByDatasetId");
initErrorHandling();
this.currentOperation = FILE_ADD_OPERATION;
if (!this.step_001_loadDataset(chosenDataset)){
return false;
}
//return this.runAddFile(this.dataset, newFileName, newFileContentType, newFileInputStream, optionalFileParams);
return this.runAddReplaceFile(dataset, newFileName, newFileContentType, newStorageIdentifier, newFileInputStream, optionalFileParams);
}
/**
* After the constructor, this method is called to add a file
*
* @param dataset
* @param newFileName
* @param newFileContentType
* @param newFileInputStream
* @return
*/
/*
public boolean runAddFile(Dataset dataset,
String newFileName,
String newFileContentType,
InputStream newFileInputStream,
OptionalFileParams optionalFileParams){
msgt(">> runAddFile");
initErrorHandling();
if (this.hasError()){
return false;
}
this.currentOperation = FILE_ADD_OPERATION;
return this.runAddReplaceFile(dataset, newFileName, newFileContentType, newFileInputStream, optionalFileParams);
}*/
/**
* After the constructor, this method is called to replace a file
*
* @param dataset
* @param newFileName
* @param newFileContentType
* @param newStorageIdentifier2
* @param newFileInputStream
* @return
*/
public boolean runForceReplaceFile(Long oldFileId,
String newFileName,
String newFileContentType,
String newStorageIdentifier,
InputStream newFileInputStream,
OptionalFileParams optionalFileParams){
msgt(">> runForceReplaceFile");
initErrorHandling();
this.currentOperation = FILE_REPLACE_FORCE_OPERATION;
if (oldFileId==null){
this.addErrorSevere(getBundleErr("existing_file_to_replace_id_is_null"));
return false;
}
// Loads local variable "fileToReplace"
//
if (!this.step_005_loadFileToReplaceById(oldFileId)){
return false;
}
return this.runAddReplaceFile(fileToReplace.getOwner(), newFileName, newFileContentType, newStorageIdentifier, newFileInputStream, optionalFileParams);
}
public boolean runReplaceFile(Long oldFileId,
String newFileName,
String newFileContentType,
String newStorageIdentifier,
InputStream newFileInputStream,
OptionalFileParams optionalFileParams){
msgt(">> runReplaceFile");
initErrorHandling();
this.currentOperation = FILE_REPLACE_OPERATION;
if (oldFileId==null){
this.addErrorSevere(getBundleErr("existing_file_to_replace_id_is_null"));
return false;
}
// Loads local variable "fileToReplace"
//
if (!this.step_005_loadFileToReplaceById(oldFileId)){
return false;
}
return this.runAddReplaceFile(fileToReplace.getOwner(), newFileName, newFileContentType, newStorageIdentifier, newFileInputStream, optionalFileParams);
}
/**
* Here we're going to run through the steps to ADD or REPLACE a file
*
* The difference between ADD and REPLACE (add/delete) is:
*
* oldFileId - For ADD, set to null
* oldFileId - For REPLACE, set to id of file to replace
*
* This has now been broken into Phase 1 and Phase 2
*
* The APIs will use this method and call Phase 1 & Phase 2 consecutively
*
* The UI will call Phase 1 on initial upload and
* then run Phase 2 if the user chooses to save the changes.
* @param newStorageIdentifier
*
*
* @return
*/
private boolean runAddReplaceFile(Dataset owner,
String newFileName, String newFileContentType,
String newStorageIdentifier, InputStream newFileInputStream,
OptionalFileParams optionalFileParams){
// Run "Phase 1" - Initial ingest of file + error check
// But don't save the dataset version yet
//
boolean phase1Success = runAddReplacePhase1(owner,
newFileName,
newFileContentType,
newStorageIdentifier,
newFileInputStream,
optionalFileParams
);
if (!phase1Success){
return false;
}
return runAddReplacePhase2();
}
/**
* Note: UI replace is always a "force replace" which means
* the replacement file can have a different content type
*
* @param oldFileId
* @param newFileName
* @param newFileContentType
* @param newFileInputStream
* @param optionalFileParams
* @return
*/
public boolean runReplaceFromUI_Phase1(Long oldFileId,
String newFileName,
String newFileContentType,
InputStream newFileInputStream,
String fullStorageId,
OptionalFileParams optionalFileParams){
initErrorHandling();
this.currentOperation = FILE_REPLACE_FORCE_OPERATION;
if (oldFileId==null){
this.addErrorSevere(getBundleErr("existing_file_to_replace_id_is_null"));
return false;
}
// Loads local variable "fileToReplace"
//
if (!this.step_005_loadFileToReplaceById(oldFileId)){
return false;
}
//Update params to match existing file (except checksum, which should match the new file)
if(fileToReplace != null) {
String checksum = optionalFileParams.getCheckSum();
ChecksumType checkSumType = optionalFileParams.getCheckSumType();
try {
optionalFileParams = new OptionalFileParams(fileToReplace);
optionalFileParams.setCheckSum(checksum, checkSumType);
} catch (DataFileTagException e) {
// Shouldn't happen since fileToReplace should have valid tags
e.printStackTrace();
}
}
return this.runAddReplacePhase1(fileToReplace.getOwner(),
newFileName,
newFileContentType,
fullStorageId,
newFileInputStream,
optionalFileParams);
}
/**
* For the UI: File add/replace has been broken into 2 steps
*
* Phase 1 (here): Add/replace the file and make sure there are no errors
* But don't update the Dataset (yet)
* @param newStorageIdentifier
*
* @return
*/
private boolean runAddReplacePhase1(Dataset owner,
String newFileName,
String newFileContentType,
String newStorageIdentifier, InputStream newFileInputStream,
OptionalFileParams optionalFileParams){
if (this.hasError()){
return false; // possible to have errors already...
}
msgt("step_001_loadDataset");
if (!this.step_001_loadDataset(owner)){
return false;
}
msgt("step_010_VerifyUserAndPermissions");
if (!this.step_010_VerifyUserAndPermissions()){
return false;
}
msgt("step_020_loadNewFile");
if (!this.step_020_loadNewFile(newFileName, newFileContentType, newStorageIdentifier, newFileInputStream)){
return false;
}
if(optionalFileParams != null) {
if(optionalFileParams.hasCheckSum()) {
newCheckSum = optionalFileParams.getCheckSum();
newCheckSumType = optionalFileParams.getCheckSumType();
}
}
msgt("step_030_createNewFilesViaIngest");
if (!this.step_030_createNewFilesViaIngest()){
return false;
}
msgt("step_050_checkForConstraintViolations");
if (!this.step_050_checkForConstraintViolations()){
return false;
}
msgt("step_055_loadOptionalFileParams");
if (!this.step_055_loadOptionalFileParams(optionalFileParams)){
return false;
}
// if the fileToReplace hasn't been released,
if (fileToReplace != null && !fileToReplace.isReleased()) {
DataFile df = finalFileList.get(0); // step_055 uses a loop and assumes only one file
// set the replacement file's previous and root datafileIds to match (unless
// they are the defaults)
if (fileToReplace.getPreviousDataFileId() != null) {
df.setPreviousDataFileId(fileToReplace.getPreviousDataFileId());
df.setRootDataFileId(fileToReplace.getRootDataFileId());
}
// Reuse any file PID during a replace operation (if File PIDs are in use)
if (systemConfig.isFilePIDsEnabled()) {
df.setGlobalId(fileToReplace.getGlobalId());
df.setGlobalIdCreateTime(fileToReplace.getGlobalIdCreateTime());
// Should be true or fileToReplace wouldn't have an identifier (since it's not
// yet released in this if statement)
df.setIdentifierRegistered(fileToReplace.isIdentifierRegistered());
fileToReplace.setGlobalId(null);
}
}
return true;
}
public boolean runReplaceFromUI_Phase2(){
return runAddReplacePhase2();
}
/**
* Called from the UI backing bean
*
* @param categoriesList
* @return
*/
public boolean updateCategoriesFromUI(List<String> categoriesList){
if (hasError()){
logger.severe("Should not be calling this method");
return false;
}
if ((finalFileList==null)||(finalFileList.size()==0)){
throw new NullPointerException("finalFileList needs at least 1 file!!");
}
// don't need to make updates
//
if (categoriesList ==null){
return true;
}
// remove nulls, dupes, etc.
//
categoriesList = Util.removeDuplicatesNullsEmptyStrings(categoriesList);
if (categoriesList.isEmpty()){
return true;
}
for (DataFile df : finalFileList){
df.getFileMetadata().setCategoriesByName(categoriesList);
}
return true;
}
/**
* Called from the UI backing bean
* @param label
* @param description
* @param restricted
* @return
*/
public boolean updateLabelDescriptionRestrictedFromUI(String label, String description, Boolean restricted){
if (hasError()){
logger.severe("Should not be calling this method");
return false;
}
if ((finalFileList==null)||(finalFileList.size()==0)){
throw new NullPointerException("finalFileList needs at least 1 file!!");
}
for (DataFile df : finalFileList){
// update description
if (description != null){
df.getFileMetadata().setDescription(description.trim());
}
// update label
if (label != null){
df.getFileMetadata().setLabel(label.trim());
}
// update restriction
if (restricted == null){
restricted = false;
}
df.getFileMetadata().setRestricted(restricted);
}
return true;
}
/**
* For the UI: File add/replace has been broken into 2 steps
*
* Phase 2 (here): Phase 1 has run ok, Update the Dataset -- issue the commands!
*
* @return
*/
private boolean runAddReplacePhase2(){
if (this.hasError()){
return false; // possible to have errors already...
}
if ((finalFileList == null)||(finalFileList.isEmpty())){
addError(getBundleErr("phase2_called_early_no_new_files"));
return false;
}
msgt("step_060_addFilesViaIngestService");
if (!this.step_060_addFilesViaIngestService()){
return false;
}
if (this.isFileReplaceOperation()){
msgt("step_080_run_update_dataset_command_for_replace");
if (!this.step_080_run_update_dataset_command_for_replace()){
return false;
}
}else{
msgt("step_070_run_update_dataset_command");
if (!this.step_070_run_update_dataset_command()){
return false;
}
}
msgt("step_090_notifyUser");
if (!this.step_090_notifyUser()){
return false;
}
msgt("step_100_startIngestJobs");
if (!this.step_100_startIngestJobs()){
return false;
}
return true;
}
/**
* Get for currentOperation
* @return String
*/
public String getCurrentOperation(){
return this.currentOperation;
}
/**
* Is this a file FORCE replace operation?
*
* Only overrides warnings of content type change
*
* @return
*/
public boolean isForceFileOperation(){
return this.currentOperation.equals(FILE_REPLACE_FORCE_OPERATION);
}
/**
* Is this a file replace operation?
* @return
*/
public boolean isFileReplaceOperation(){
if (this.currentOperation.equals(FILE_REPLACE_OPERATION)){
return true;
}else if (this.currentOperation.equals(FILE_REPLACE_FORCE_OPERATION)){
return true;
}
return false;
}
/**
* Is this a file add operation?
*
* @return
*/
public boolean isFileAddOperation(){
return this.currentOperation.equals(FILE_ADD_OPERATION);
}
/**
* Initialize error handling vars
*/
private void initErrorHandling(){
this.errorFound = false;
this.errorMessages = new ArrayList<>();
this.httpErrorCode = null;
contentTypeWarningFound = false;
contentTypeWarningString = null;
}
/**
* Add error message
*
* @param errMsg
*/
private void addError(String errMsg){
if (errMsg == null){
throw new NullPointerException("errMsg cannot be null");
}
this.errorFound = true;
logger.fine(errMsg);
this.errorMessages.add(errMsg);
}
/**
* Add Error mesage and, if it's known, the HTTP response code
*
* @param badHttpResponse, e.g. Response.Status.FORBIDDEN
* @param errMsg
*/
private void addError(Response.Status badHttpResponse, String errMsg){
if (badHttpResponse == null){
throw new NullPointerException("badHttpResponse cannot be null");
}
if (errMsg == null){
throw new NullPointerException("errMsg cannot be null");
}
this.httpErrorCode = badHttpResponse;
this.addError(errMsg);
}
private void addErrorWarning(String errMsg){
if (errMsg == null){
throw new NullPointerException("errMsg cannot be null");
}
logger.severe(errMsg);
this.setDuplicateFileWarning(errMsg);
this.errorMessages.add(errMsg);
}
private void addErrorSevere(String errMsg){
if (errMsg == null){
throw new NullPointerException("errMsg cannot be null");
}
this.errorFound = true;
logger.severe(errMsg);
this.errorMessages.add(errMsg);
}
/**
* Was an error found?
*
* @return
*/
public boolean hasError(){
return this.errorFound;
}
/**
* get error messages
*
* @return
*/
public List<String> getErrorMessages(){
return this.errorMessages;
}
/**
* get error messages as string
*
* @param joinString
* @return
*/
public String getErrorMessagesAsString(String joinString){
if (joinString==null){
joinString = "\n";
}
return String.join(joinString, this.errorMessages);
}
/**
* For API use, return the HTTP error code
*
* Default is BAD_REQUEST
*
* @return
*/
public Response.Status getHttpErrorCode(){
if (!hasError()){
logger.severe("Do not call this method unless there is an error! check '.hasError()'");
}
if (httpErrorCode == null){
return Response.Status.BAD_REQUEST;
}else{
return httpErrorCode;
}
}
/**
* Convenience method for getting bundle properties
*
* @param msgName
* @return
* @deprecated This method is deprecated because you have to know to search
* only part of a bundle key ("add_file_error") rather than the full bundle
* key ("file.addreplace.error.add.add_file_error") leading you to believe
* that the bundle key is not used.
*/
@Deprecated
private String getBundleMsg(String msgName, boolean isErr){
if (msgName == null){
throw new NullPointerException("msgName cannot be null");
}
if (isErr){
return BundleUtil.getStringFromBundle("file.addreplace.error." + msgName);
}else{
return BundleUtil.getStringFromBundle("file.addreplace.success." + msgName);
}
}
/**
* Convenience method for getting bundle error message
*
* @param msgName
* @return
*/
private String getBundleErr(String msgName){
return this.getBundleMsg(msgName, true);
}
/**
*
*/
private boolean step_001_loadDataset(Dataset selectedDataset){
if (this.hasError()){
return false;
}
if (selectedDataset == null){
this.addErrorSevere(getBundleErr("dataset_is_null"));
return false;
}
dataset = selectedDataset;
return true;
}
/**
* Step 10 Verify User and Permissions
*
*
* @return
*/
private boolean step_010_VerifyUserAndPermissions(){
if (this.hasError()){
return false;
}
return step_015_auto_check_permissions(dataset);
}
private boolean step_015_auto_check_permissions(Dataset datasetToCheck){
if (this.hasError()){
return false;
}
if (datasetToCheck == null){
addError(getBundleErr("dataset_is_null"));
return false;
}
// Make a temp. command
//
Command updateDatasetVersionCommand = new UpdateDatasetVersionCommand(datasetToCheck, dvRequest);
// Can this user run the command?
//
if (!permissionService.isUserAllowedOn(dvRequest.getUser(), updateDatasetVersionCommand, datasetToCheck)) {
addError(Response.Status.FORBIDDEN,getBundleErr("no_edit_dataset_permission"));
return false;
}
return true;
}
private boolean step_020_loadNewFile(String fileName, String fileContentType, String storageIdentifier, InputStream fileInputStream){
if (this.hasError()){
return false;
}
if (fileName == null){
this.addErrorSevere(getBundleErr("filename_undetermined"));
return false;
}
if (fileContentType == null){
this.addErrorSevere(getBundleErr("file_content_type_undetermined"));
return false;
}
if (fileInputStream == null) {
if (storageIdentifier == null) {
this.addErrorSevere(getBundleErr("file_upload_failed"));
return false;
}
}
newFileName = fileName;
newFileContentType = fileContentType;
//One of these will be null
newStorageIdentifier = storageIdentifier;
newFileInputStream = fileInputStream;
return true;
}
/**
* Optional: old file to replace
*
* @param oldFile
* @return
*/
private boolean step_005_loadFileToReplaceById(Long dataFileId){
if (this.hasError()){
return false;
}
// Check for Null
//
if (dataFileId == null){
this.addErrorSevere(getBundleErr("existing_file_to_replace_id_is_null"));
return false;
}
// Does the file exist?
//
DataFile existingFile = fileService.find(dataFileId);
if (existingFile == null){
this.addError(BundleUtil.getStringFromBundle("file.addreplace.error.existing_file_to_replace_not_found_by_id", Collections.singletonList(dataFileId.toString())));
return false;
}
// Do we have permission to replace this file? e.g. Edit the file's dataset
//
if (!step_015_auto_check_permissions(existingFile.getOwner())){
return false;
};
// Is the file in the latest dataset version?
//
if (!step_007_auto_isReplacementInLatestVersion(existingFile)){
return false;
}
fileToReplace = existingFile;
return true;
}
/**
* Make sure the file to replace is in the workingVersion
* -- e.g. that it wasn't deleted from a previous Version
*
* @return
*/
private boolean step_007_auto_isReplacementInLatestVersion(DataFile existingFile){
if (existingFile == null){
throw new NullPointerException("existingFile cannot be null!");
}
if (this.hasError()){
return false;
}
DatasetVersion latestVersion = existingFile.getOwner().getLatestVersion();
boolean fileInLatestVersion = false;
for (FileMetadata fm : latestVersion.getFileMetadatas()){
if (fm.getDataFile().getId() != null){
if (Objects.equals(existingFile.getId(),fm.getDataFile().getId())){
fileInLatestVersion = true;
}
}
}
if (!fileInLatestVersion){
addError(getBundleErr("existing_file_not_in_latest_published_version"));
return false;
}
return true;
}
private boolean step_030_createNewFilesViaIngest(){
if (this.hasError()){
return false;
}
// Load the working version of the Dataset
workingVersion = dataset.getEditVersion();
clone = workingVersion.cloneDatasetVersion();
try {
initialFileList = FileUtil.createDataFiles(workingVersion,
this.newFileInputStream,
this.newFileName,
this.newFileContentType,
this.newStorageIdentifier,
this.newCheckSum,
this.newCheckSumType,
this.systemConfig);
} catch (IOException ex) {
if (!Strings.isNullOrEmpty(ex.getMessage())) {
this.addErrorSevere(getBundleErr("ingest_create_file_err") + " " + ex.getMessage());
} else {
this.addErrorSevere(getBundleErr("ingest_create_file_err"));
}
logger.severe(ex.toString());
this.runMajorCleanup();
return false;
} finally {
IOUtils.closeQuietly(this.newFileInputStream);
}
/**
* This only happens:
* (1) the dataset was empty
* (2) the new file (or new file unzipped) did not ingest via "createDataFiles"
*/
if (initialFileList.isEmpty()){
this.addErrorSevere(getBundleErr("initial_file_list_empty"));
this.runMajorCleanup();
return false;
}
/**
* REPLACE: File replacement is limited to a single file!!
*
* ADD: When adding files, some types of individual files
* are broken into several files--which is OK
*/
if (isFileReplaceOperation()){
if (initialFileList.size() > 1){
this.addError(getBundleErr("initial_file_list_more_than_one"));
this.runMajorCleanup();
return false;
}
}
if (this.step_040_auto_checkForDuplicates()){
return true;
}
/*
commenting out. see the comment in the source of the method below.
if (this.step_045_auto_checkForFileReplaceDuplicate()) {
return true;
}*/
return false;
}
/**
* Create a "final file list"
*
* This is always run after step 30 -- the ingest
*
* @return
*/
private boolean step_040_auto_checkForDuplicates(){
this.duplicateFileErrorString = "";
this.duplicateFileErrorFound = false;
msgt("step_040_auto_checkForDuplicates");
if (this.hasError()){
return false;
}
// Double checked -- this check also happens in step 30
//
if (initialFileList.isEmpty()){
this.addErrorSevere(getBundleErr("initial_file_list_empty"));
return false;
}
// Initialize new file list
this.finalFileList = new ArrayList<>();
String warningMessage = null;
if (isFileReplaceOperation() && this.fileToReplace == null){
// This error shouldn't happen if steps called correctly
this.addErrorSevere(getBundleErr("existing_file_to_replace_is_null") + " (This error shouldn't happen if steps called in sequence....checkForFileReplaceDuplicate)");
return false;
}
// -----------------------------------------------------------
// Iterate through the recently ingest files
// -----------------------------------------------------------
for (DataFile df : initialFileList){
msg("Checking file: " + df.getFileMetadata().getLabel());
// -----------------------------------------------------------
// (1) Check for ingest warnings
// -----------------------------------------------------------
if (df.isIngestProblem()) {
if (df.getIngestReportMessage() != null) {
// may collect multiple error messages
this.addError(df.getIngestReportMessage());
}
df.setIngestDone();
}
// -----------------------------------------------------------
// (2) Check for duplicates
// Only a warning now
// -----------------------------------------------------------
if (isFileReplaceOperation() && Objects.equals(df.getChecksumValue(), fileToReplace.getChecksumValue())){
this.addError(getBundleErr("replace.new_file_same_as_replacement"));
this.duplicateFileErrorFound = true;
this.duplicateFileErrorString = getBundleErr("replace.new_file_same_as_replacement");
break;
}
if (DuplicateFileChecker.isDuplicateOriginalWay(workingVersion, df.getFileMetadata())){
String dupeName = df.getFileMetadata().getLabel();
this.duplicateFileWarningFound = true;
this.duplicateFileWarningString = BundleUtil.getStringFromBundle("file.addreplace.warning.duplicate_file",
Arrays.asList(dupeName));
this.addErrorWarning(this.duplicateFileWarningString);
}
finalFileList.add(df);
}
if (this.hasError()){
// We're recovering from the duplicate check.
msg("We're recovering from a duplicate check 1");
runMajorCleanup();
msg("We're recovering from a duplicate check 2");
finalFileList.clear();
return false;
}
/**
* REPLACE: File replacement is limited to a single file!!
*
* ADD: When adding files, some types of individual files
* are broken into several files--which is OK
*/
/**
* Also: check that the file is being replaced with the same content type
* file. Treat this as a fatal error, unless this is a "force replace"
* operation; then it should be treated as merely a warning.
*/
if (isFileReplaceOperation()){
if (finalFileList.size() > 1){
String errMsg = "(This shouldn't happen -- error should have been detected in 030_createNewFilesViaIngest)";
this.addErrorSevere(getBundleErr("initial_file_list_more_than_one") + " " + errMsg);
return false;
}
// Has the content type of the file changed?
//
String fileType = fileToReplace.getOriginalFileFormat() != null ? fileToReplace.getOriginalFileFormat() : fileToReplace.getContentType();
if (!finalFileList.get(0).getContentType().equalsIgnoreCase(fileType)) {
String friendlyType = fileToReplace.getOriginalFormatLabel() != null ? fileToReplace.getOriginalFormatLabel() : fileToReplace.getFriendlyType();
List<String> errParams = Arrays.asList(friendlyType,
finalFileList.get(0).getFriendlyType());
String contentTypeErr = BundleUtil.getStringFromBundle("file.addreplace.error.replace.new_file_has_different_content_type",
errParams);
if (isForceFileOperation()){
// for force replace, just give a warning
this.setContentTypeWarning(contentTypeErr);
}else{
// not a force replace? it's an error
this.addError(contentTypeErr);
runMajorCleanup();
return false;
}
}
}
if (finalFileList.isEmpty()){
this.addErrorSevere("There are no files to add. (This error shouldn't happen if steps called in sequence....step_040_auto_checkForDuplicates)");
return false;
}
return true;
} // end step_040_auto_checkForDuplicates
/**
* This is always checked.
*
* For ADD: If there is not replacement file, then the check is considered a success
* For REPLACE: The checksum is examined against the "finalFileList" list
*
* NOTE: this method was always called AFTER the main duplicate check;
* So we would never detect this condition - of the file being replaced with
* the same file... because it would always be caught as simply an attempt
* to replace a file with a file alraedy in the dataset!
* So I commented it out, instead modifying the method above, step_040_auto_checkForDuplicates()
* to do both - check (first) if a file is being replaced with the exact same file;
* and check if a file, or files being uploaded are duplicates of files already
* in the dataset. AND the replacement content type too. -- L.A. Jan 16 2017
*
*/
/*private boolean step_045_auto_checkForFileReplaceDuplicate(){
if (this.hasError()){
return false;
}
// Not a FILE REPLACE operation -- skip this step!!
//
if (!isFileReplaceOperation()){
return true;
}
if (finalFileList.isEmpty()){
// This error shouldn't happen if steps called in sequence....
this.addErrorSevere("There are no files to add. (This error shouldn't happen if steps called in sequence....checkForFileReplaceDuplicate)");
return false;
}
if (this.fileToReplace == null){
// This error shouldn't happen if steps called correctly
this.addErrorSevere(getBundleErr("existing_file_to_replace_is_null") + " (This error shouldn't happen if steps called in sequence....checkForFileReplaceDuplicate)");
return false;
}
for (DataFile df : finalFileList){
if (Objects.equals(df.getChecksumValue(), fileToReplace.getChecksumValue())){
this.addError(getBundleErr("replace.new_file_same_as_replacement"));
break;
}
// Has the content type of the file changed?
//
if (!df.getContentType().equalsIgnoreCase(fileToReplace.getContentType())){
List<String> errParams = Arrays.asList(fileToReplace.getFriendlyType(),
df.getFriendlyType());
String contentTypeErr = BundleUtil.getStringFromBundle("file.addreplace.error.replace.new_file_has_different_content_type",
errParams);
if (isForceFileOperation()){
// for force replace, just give a warning
this.setContentTypeWarning(contentTypeErr);
}else{
// not a force replace? it's an error
this.addError(contentTypeErr);
}
}
}
if (hasError()){
runMajorCleanup();
return false;
}
return true;
} // end step_045_auto_checkForFileReplaceDuplicate
*/
private boolean step_050_checkForConstraintViolations(){
if (this.hasError()){
return false;
}
if (finalFileList.isEmpty()){
// This error shouldn't happen if steps called in sequence....
this.addErrorSevere(getBundleErr("final_file_list_empty"));
return false;
}
// -----------------------------------------------------------
// Iterate through checking for constraint violations
// Gather all error messages
// -----------------------------------------------------------
Set<ConstraintViolation> constraintViolations = workingVersion.validate();
// -----------------------------------------------------------
// No violations found
// -----------------------------------------------------------
if (constraintViolations.isEmpty()){
return true;
}
// -----------------------------------------------------------
// violations found: gather all error messages
// -----------------------------------------------------------
List<String> errMsgs = new ArrayList<>();
for (ConstraintViolation violation : constraintViolations){
this.addError(violation.getMessage());
}
return this.hasError();
}
/**
* Load optional file params such as description, tags, fileDataTags, etc..
*
* @param optionalFileParams
* @return
*/
private boolean step_055_loadOptionalFileParams(OptionalFileParams optionalFileParams){
if (hasError()){
return false;
}
// --------------------------------------------
// OK, the object may be null
// --------------------------------------------
if (optionalFileParams == null){
return true;
}
// --------------------------------------------
// Iterate through files (should only be 1 for now)
// Add tags, description, etc
// --------------------------------------------
for (DataFile df : finalFileList){
try {
optionalFileParams.addOptionalParams(df);
// call restriction command here
boolean restrict = optionalFileParams.getRestriction();
if (restrict != df.getFileMetadata().isRestricted()) {
commandEngine.submit(new RestrictFileCommand(df, dvRequest, restrict));
}
} catch (DataFileTagException ex) {
Logger.getLogger(AddReplaceFileHelper.class.getName()).log(Level.SEVERE, null, ex);
addError(ex.getMessage());
return false;
} catch (CommandException ex) {
addError(ex.getMessage());
}
}
return true;
}
private boolean step_060_addFilesViaIngestService(){
if (this.hasError()){
return false;
}
if (finalFileList.isEmpty()){
// This error shouldn't happen if steps called in sequence....
this.addErrorSevere(getBundleErr("final_file_list_empty"));
return false;
}
int nFiles = finalFileList.size();
finalFileList = ingestService.saveAndAddFilesToDataset(workingVersion, finalFileList, fileToReplace);
if (nFiles != finalFileList.size()) {
if (nFiles == 1) {
addError("Failed to save the content of the uploaded file.");
} else {
addError("Failed to save the content of at least one of the uploaded files.");
}
return false;
}
return true;
}
/**
* Create and run the update dataset command
*
* @return
*/
private boolean step_070_run_update_dataset_command(){
if (this.hasError()){
return false;
}
Command<Dataset> update_cmd;
String deleteStorageLocation = null;
long deleteFileId=-1;
if(isFileReplaceOperation()) {
List<FileMetadata> filesToDelete = new ArrayList<FileMetadata>();
filesToDelete.add(fileToReplace.getFileMetadata());
if(!fileToReplace.isReleased()) {
//If file is only in draft version, also need to delete the physical file
deleteStorageLocation = fileService.getPhysicalFileToDelete(fileToReplace);
deleteFileId=fileToReplace.getId();
}
//Adding the file to the delete list for the command will delete this filemetadata and, if the file hasn't been released, the datafile itself.
update_cmd = new UpdateDatasetVersionCommand(dataset, dvRequest, filesToDelete, clone);
} else {
update_cmd = new UpdateDatasetVersionCommand(dataset, dvRequest, clone);
}
((UpdateDatasetVersionCommand) update_cmd).setValidateLenient(true);
try {
// Submit the update dataset command
// and update the local dataset object
//
dataset = commandEngine.submit(update_cmd);
} catch (CommandException ex) {
/**
* @todo Add a test to exercise this error.
*/
this.addErrorSevere(getBundleErr("add.add_file_error"));
logger.severe(ex.getMessage());
return false;
}catch (EJBException ex) {
/**
* @todo Add a test to exercise this error.
*/
this.addErrorSevere("add.add_file_error (see logs)");
logger.severe(ex.getMessage());
return false;
}
//Sanity check
if(isFileReplaceOperation()) {
if (deleteStorageLocation != null) {
// Finalize the delete of the physical file
// (File service will double-check that the datafile no
// longer exists in the database, before proceeding to
// delete the physical file)
try {
fileService.finalizeFileDelete(deleteFileId, deleteStorageLocation);
} catch (IOException ioex) {
logger.warning("Failed to delete the physical file associated with the deleted datafile id="
+ deleteFileId + ", storage location: " + deleteStorageLocation);
}
}
}
return true;
}
private boolean runMajorCleanup(){
// (1) remove unsaved files from the working version
removeUnSavedFilesFromWorkingVersion();
// ----------------------------------------------------
// (2) if the working version is brand new, delete it
// It doesn't have an "id" so you can't use the DeleteDatasetVersionCommand
// ----------------------------------------------------
// Remove this working version from the dataset
Iterator<DatasetVersion> versionIterator = dataset.getVersions().iterator();
msgt("Clear Files");
while (versionIterator.hasNext()) {
DatasetVersion dsv = versionIterator.next();
if (dsv.getId() == null){
versionIterator.remove();
}
}
return true;
}
/**
* We are outta here! Remove everything unsaved from the edit version!
*
* @return
*/
private boolean removeUnSavedFilesFromWorkingVersion(){
msgt("Clean up: removeUnSavedFilesFromWorkingVersion");
// -----------------------------------------------------------
// (1) Remove all new FileMetadata objects
// -----------------------------------------------------------
//Iterator<FileMetadata> fmIt = dataset.getEditVersion().getFileMetadatas().iterator();//
Iterator<FileMetadata> fmIt = workingVersion.getFileMetadatas().iterator(); //dataset.getEditVersion().getFileMetadatas().iterator();//
while (fmIt.hasNext()) {
FileMetadata fm = fmIt.next();
if (fm.getDataFile().getId() == null){
fmIt.remove();
}
}
// -----------------------------------------------------------
// (2) Remove all new DataFile objects
// -----------------------------------------------------------
Iterator<DataFile> dfIt = dataset.getFiles().iterator();
msgt("Clear Files");
while (dfIt.hasNext()) {
DataFile df = dfIt.next();
if (df.getId() == null){
dfIt.remove();
}
}
return true;
}
private boolean step_080_run_update_dataset_command_for_replace(){
if (!isFileReplaceOperation()){
// Shouldn't happen!
this.addErrorSevere(getBundleErr("only_replace_operation") + " (step_080_run_update_dataset_command_for_replace)");
return false;
}
if (this.hasError()){
return false;
}
// -----------------------------------------------------------
// Set the "root file ids" and "previous file ids"
// THIS IS A KEY STEP - SPLIT IT OUT
// (1) Old file: Set the Root File Id on the original file
// (2) New file: Set the previousFileId to the id of the original file
// (3) New file: Set the rootFileId to the rootFileId of the original file
// -----------------------------------------------------------
if (fileToReplace.isReleased()) {
/*
* Check the root file id on fileToReplace, updating it if necessary
*/
if (fileToReplace.getRootDataFileId().equals(DataFile.ROOT_DATAFILE_ID_DEFAULT)) {
fileToReplace.setRootDataFileId(fileToReplace.getId());
fileToReplace = fileService.save(fileToReplace);
}
/*
* Go through the final file list, settting the rootFileId and previousFileId
*/
for (DataFile df : finalFileList) {
df.setPreviousDataFileId(fileToReplace.getId());
df.setRootDataFileId(fileToReplace.getRootDataFileId());
}
}
// Call the update dataset command which will delete the replaced filemetadata and file in needed (if file is not released)
//
return step_070_run_update_dataset_command();
}
/**
* We want the version of the newly added file that has an id set
*
* TODO: This is inefficient/expensive. Need to redo it in a sane way
* - e.g. Query to find
* (1) latest dataset version in draft
* (2) pick off files that are NOT released
* (3) iterate through only those files
* - or an alternate/better version
*
* @param df
*/
private void setNewlyAddedFiles(List<DataFile> datafiles){
if (hasError()){
return;
}
// Init. newly added file list
newlyAddedFiles = new ArrayList<>();
newlyAddedFileMetadatas = new ArrayList<>();
// Loop of uglinesss...but expect 1 to 4 files in final file list
List<FileMetadata> latestFileMetadatas = dataset.getEditVersion().getFileMetadatas();
for (DataFile newlyAddedFile : finalFileList){
for (FileMetadata fm : latestFileMetadatas){
if (newlyAddedFile.getChecksumValue().equals(fm.getDataFile().getChecksumValue())){
if (newlyAddedFile.getStorageIdentifier().equals(fm.getDataFile().getStorageIdentifier())){
newlyAddedFiles.add(fm.getDataFile());
newlyAddedFileMetadatas.add(fm);
}
}
}
}
/*
newlyAddedFile = df;
for (FileMetadata fm : dataset.getEditVersion().getFileMetadatas()){
// Find a file where the checksum value and identifiers are the same..
//
if (newlyAddedFile.getChecksumValue().equals(fm.getDataFile().getChecksumValue())){
if (newlyAddedFile.getStorageIdentifier().equals(fm.getDataFile().getStorageIdentifier())){
newlyAddedFile = fm.getDataFile();
break;
}
}
}
*/
}
/**
* For a successful replace operation, return a the first newly added file
* @return
*/
public DataFile getFirstNewlyAddedFile(){
if ((newlyAddedFiles == null)||(newlyAddedFiles.size() == 0)){
return null;
}
return newlyAddedFiles.get(0);
}
public List<DataFile> getNewlyAddedFiles(){
return newlyAddedFiles;
}
public List<FileMetadata> getNewlyAddedFileMetadatas(){
return newlyAddedFileMetadatas;
}
public String getSuccessResult() throws NoFilesException{
if (hasError()){
throw new NoFilesException("Don't call this method if an error exists!! First check 'hasError()'");
}
if (newlyAddedFiles == null){
throw new NullPointerException("newlyAddedFiles is null!");
}
return getSuccessResultAsJsonObjectBuilder().toString();
}
public JsonObjectBuilder getSuccessResultAsJsonObjectBuilder() throws NoFilesException{
if (hasError()){
throw new NoFilesException("Don't call this method if an error exists!! First check 'hasError()'");
}
if (newlyAddedFiles == null){
throw new NullPointerException("newlyAddedFiles is null!");
}
if (newlyAddedFiles.isEmpty()){
throw new NoFilesException("newlyAddedFiles is empty!");
}
return JsonPrinter.jsonDataFileList(newlyAddedFiles);
}
/**
* Currently this is a placeholder if we decide to send
* user notifications.
*
*/
private boolean step_090_notifyUser(){
if (this.hasError()){
return false;
}
// Create a notification!
// skip for now, may be part of dataset update listening
//
return true;
}
private boolean step_100_startIngestJobs(){
if (this.hasError()){
return false;
}
// Should only be one file in the list
setNewlyAddedFiles(finalFileList);
// clear old file list
//
finalFileList.clear();
// TODO: Need to run ingwest async......
//if (true){
//return true;
//}
msg("pre ingest start");
// start the ingest!
//
ingestService.startIngestJobsForDataset(dataset, dvRequest.getAuthenticatedUser());
msg("post ingest start");
return true;
}
private void msg(String m){
logger.fine(m);
//System.out.println(m);
}
private void dashes(){
msg("----------------");
}
private void msgt(String m){
dashes(); msg(m); dashes();
}
/**
* Return file list before saving
*
* Used for UI display
*
* @return
*/
public List<DataFile> getFileListBeforeSave(){
return this.finalFileList;
}
public Boolean isFinalFileListEmpty (){
return this.finalFileList.isEmpty();
}
/**
* Return file list before saving
*
* Used for UI display
*
* @return
*/
public List<FileMetadata> getNewFileMetadatasBeforeSave(){
if (this.finalFileList.size() == 0){
return null;
}
List<FileMetadata> fileMetadatas = new ArrayList<>();
for (DataFile df : finalFileList){
fileMetadatas.add(df.getFileMetadata());
}
return fileMetadatas;
}
public void setContentTypeWarning(String warningString){
if ((warningString == null)||(warningString.isEmpty())){
throw new NullPointerException("warningString cannot be null");
}
contentTypeWarningFound = true;
contentTypeWarningString = warningString;
}
public boolean hasContentTypeWarning(){
return this.contentTypeWarningFound;
}
public String getContentTypeWarningString(){
if (!hasContentTypeWarning()){
// not really a NullPointerException but want to blow up here without adding try/catch everywhere
//
throw new NullPointerException("Don't call this method without checking 'hasContentTypeWarning()'");
}
return contentTypeWarningString;
}
private String duplicateFileWarning;
public String getDuplicateFileWarning() {
return duplicateFileWarning;
}
public void setDuplicateFileWarning(String duplicateFileWarning) {
this.duplicateFileWarning = duplicateFileWarning;
}
} // end class
/*
DatasetPage sequence:
(A) editFilesFragment.xhtml -> EditDataFilesPage.handleFileUpload
(B) EditDataFilesPage.java -> handleFileUpload
(1) UploadedFile uf event.getFile() // UploadedFile
--------
UploadedFile interface:
public String getFileName()
public InputStream getInputstream() throws IOException;
public long getSize();
public byte[] getContents();
public String getContentType();
public void write(String string) throws Exception;
--------
(2) List<DataFile> dFileList = null;
try {
// Note: A single file may be unzipped into multiple files
dFileList = ingestService.createDataFiles(workingVersion, uFile.getInputstream(), uFile.getFileName(), uFile.getContentType());
}
(3) processUploadedFileList(dFileList);
(C) EditDataFilesPage.java -> processUploadedFileList
- iterate through list of DataFile objects -- which COULD happen with a single .zip
- isDuplicate check
- if good:
- newFiles.add(dataFile); // looks good
- fileMetadatas.add(dataFile.getFileMetadata());
- return null; // looks good, return null
(D) save() // in the UI, user clicks the button. API is automatic if no errors
(1) Look for constraintViolations:
// DatasetVersion workingVersion;
Set<ConstraintViolation> constraintViolations = workingVersion.validate();
if (!constraintViolations.isEmpty()) {
//JsfHelper.addFlashMessage(JH.localize("dataset.message.validationError"));
JH.addMessage(FacesMessage.SEVERITY_ERROR, JH.localize("dataset.message.validationError"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Validation Error", "See below for details."));
return "";
}
(2) Use the ingestService for a final check
// ask Leonid if this is needed for API
// One last check before we save the files - go through the newly-uploaded
// ones and modify their names so that there are no duplicates.
// (but should we really be doing it here? - maybe a better approach to do it
// in the ingest service bean, when the files get uploaded.)
// Finally, save the files permanently:
ingestService.saveAndAddFilesToDataset(workingVersion, newFiles);
(3) Use the API to save the dataset
- make new CreateDatasetCommand
- check if dataset has a template
- creates UserNotification message
*/
// Checks:
// - Does the md5 already exist in the dataset?
// - If it's a replace, has the name and/or extension changed?
// On failure, send back warning
//
// - All looks good
// - Create a DataFile
// - Create a FileMetadata
// - Copy the Dataset version, making a new DRAFT
// - If it's replace, don't copy the file being replaced
// - Add this new file.
// ....
/*
1) Recovery from adding same file and duplicate being found
- draft ok
- published verion - nope
*/ | 1 | 44,437 | We generally avoid wildcard imports. If you feel like changing it back, great. Otherwise, no big deal. | IQSS-dataverse | java |
@@ -51,7 +51,7 @@
// dispatcher := yarpc.NewDispatcher(yarpc.Config{
// Name: "myservice",
// Outbounds: yarpc.OUtbounds{
-// {Unary: myserviceOutbound},
+// "outboundservice": {Unary: myserviceOutbound},
// },
// })
// | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package tchannel implements a YARPC transport based on the TChannel
// protocol. The TChannel transport provides support for Unary RPCs only.
//
// Usage
//
// A ChannelTransport must be constructed to use this transport. You can
// provide an existing TChannel Channel to construct the Channel transport.
//
// ch := getTChannelChannel()
// tchannelTransport, err := tchannel.NewChannelTransport(tchannel.WithChannel(ch))
//
// Alternatively, you can let YARPC own and manage the TChannel Channel for
// you by providing the service name. Note that this is the name of the local
// service, not the name of the service you will be sending requests to.
//
// tchannelTransport, err := tchannel.NewChannelTransport(tchannel.ServiceName("myservice"))
//
// To serve a YARPC application over TChannel, pass a TChannel inbound in your
// yarpc.Config.
//
// myInbound := tchannelTransport.NewInbound()
// dispatcher := yarpc.NewDispatcher(yarpc.Config{
// Name: "myservice",
// Inbounds: yarpc.Inbounds{myInbound},
// })
//
// To make requests to a YARPC application that supports TChannel, pass a
// TChannel outbound in your yarpc.Config.
//
// myserviceOutbound := tchannelTransport.NewOutbound()
// dispatcher := yarpc.NewDispatcher(yarpc.Config{
// Name: "myservice",
// Outbounds: yarpc.OUtbounds{
// {Unary: myserviceOutbound},
// },
// })
//
// Configuration
//
// A TChannel transport may be configured using YARPC's configuration system.
// See TransportConfig, InboundConfig, and OutboundConfig for details on the
// different configuration parameters supported by this transport.
package tchannel
| 1 | 16,223 | We also have a typo on the line above! OUt or Out? | yarpc-yarpc-go | go |
@@ -7,5 +7,7 @@
<%= @step.action_name %>
<%= generate_approve_url(@step) %>
-<%= t("mailer.step_mailer.proposal_notification.cta") %>
-<%= proposal_url(@proposal, anchor: "comments") %>
+<%= t("mailer.view_request_cta") %>
+<%= proposal_url(@proposal) %>
+
+<%= t("mailer.footer", feedback_url: feedback_url) %> | 1 | <%= t("mailer.step_mailer.proposal_notification.header",
requester_name: @proposal.requester.full_name,
step_type_noun: @step.noun) %>
<%= t("mailer.step_mailer.proposal_notification.step_status") %>
<%= @step.action_name %>
<%= generate_approve_url(@step) %>
<%= t("mailer.step_mailer.proposal_notification.cta") %>
<%= proposal_url(@proposal, anchor: "comments") %>
| 1 | 16,848 | OH! These are text.... (Ignore above) | 18F-C2 | rb |
@@ -98,7 +98,6 @@ class Reader implements DataSourceReader, SupportsScanColumnarBatch, SupportsPus
private List<Expression> filterExpressions = null;
private Filter[] pushedFilters = NO_FILTERS;
private final boolean localityPreferred;
- private final int batchSize;
private final boolean readTimestampWithoutZone;
// lazy variables | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.IOException;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.SerializableTable;
import org.apache.iceberg.SnapshotSummary;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.TableScan;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.hadoop.Util;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.spark.SparkFilters;
import org.apache.iceberg.spark.SparkReadOptions;
import org.apache.iceberg.spark.SparkSchemaUtil;
import org.apache.iceberg.spark.SparkUtil;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.TableScanUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.iceberg.util.ThreadPools;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.RuntimeConfig;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.sources.Filter;
import org.apache.spark.sql.sources.v2.DataSourceOptions;
import org.apache.spark.sql.sources.v2.reader.DataSourceReader;
import org.apache.spark.sql.sources.v2.reader.InputPartition;
import org.apache.spark.sql.sources.v2.reader.InputPartitionReader;
import org.apache.spark.sql.sources.v2.reader.Statistics;
import org.apache.spark.sql.sources.v2.reader.SupportsPushDownFilters;
import org.apache.spark.sql.sources.v2.reader.SupportsPushDownRequiredColumns;
import org.apache.spark.sql.sources.v2.reader.SupportsReportStatistics;
import org.apache.spark.sql.sources.v2.reader.SupportsScanColumnarBatch;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.vectorized.ColumnarBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class Reader implements DataSourceReader, SupportsScanColumnarBatch, SupportsPushDownFilters,
SupportsPushDownRequiredColumns, SupportsReportStatistics {
private static final Logger LOG = LoggerFactory.getLogger(Reader.class);
private static final Filter[] NO_FILTERS = new Filter[0];
private static final ImmutableSet<String> LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs");
private final JavaSparkContext sparkContext;
private final Table table;
private final DataSourceOptions options;
private final Long snapshotId;
private final Long startSnapshotId;
private final Long endSnapshotId;
private final Long asOfTimestamp;
private final Long splitSize;
private final Integer splitLookback;
private final Long splitOpenFileCost;
private final boolean caseSensitive;
private StructType requestedSchema = null;
private List<Expression> filterExpressions = null;
private Filter[] pushedFilters = NO_FILTERS;
private final boolean localityPreferred;
private final int batchSize;
private final boolean readTimestampWithoutZone;
// lazy variables
private Schema schema = null;
private StructType type = null; // cached because Spark accesses it multiple times
private List<CombinedScanTask> tasks = null; // lazy cache of tasks
private Boolean readUsingBatch = null;
Reader(SparkSession spark, Table table, boolean caseSensitive, DataSourceOptions options) {
this.sparkContext = JavaSparkContext.fromSparkContext(spark.sparkContext());
this.table = table;
this.options = options;
this.snapshotId = options.get(SparkReadOptions.SNAPSHOT_ID).map(Long::parseLong).orElse(null);
this.asOfTimestamp = options.get(SparkReadOptions.AS_OF_TIMESTAMP).map(Long::parseLong).orElse(null);
if (snapshotId != null && asOfTimestamp != null) {
throw new IllegalArgumentException(
"Cannot scan using both snapshot-id and as-of-timestamp to select the table snapshot");
}
this.startSnapshotId = options.get("start-snapshot-id").map(Long::parseLong).orElse(null);
this.endSnapshotId = options.get("end-snapshot-id").map(Long::parseLong).orElse(null);
if (snapshotId != null || asOfTimestamp != null) {
if (startSnapshotId != null || endSnapshotId != null) {
throw new IllegalArgumentException(
"Cannot specify start-snapshot-id and end-snapshot-id to do incremental scan when either snapshot-id or " +
"as-of-timestamp is specified");
}
} else {
if (startSnapshotId == null && endSnapshotId != null) {
throw new IllegalArgumentException("Cannot only specify option end-snapshot-id to do incremental scan");
}
}
// look for split behavior overrides in options
this.splitSize = options.get(SparkReadOptions.SPLIT_SIZE).map(Long::parseLong).orElse(null);
this.splitLookback = options.get(SparkReadOptions.LOOKBACK).map(Integer::parseInt).orElse(null);
this.splitOpenFileCost = options.get(SparkReadOptions.FILE_OPEN_COST).map(Long::parseLong).orElse(null);
if (table.io() instanceof HadoopFileIO) {
String fsscheme = "no_exist";
try {
Configuration conf = SparkSession.active().sessionState().newHadoopConf();
// merge hadoop config set on table
mergeIcebergHadoopConfs(conf, table.properties());
// merge hadoop config passed as options and overwrite the one on table
mergeIcebergHadoopConfs(conf, options.asMap());
FileSystem fs = new Path(table.location()).getFileSystem(conf);
fsscheme = fs.getScheme().toLowerCase(Locale.ENGLISH);
} catch (IOException ioe) {
LOG.warn("Failed to get Hadoop Filesystem", ioe);
}
String scheme = fsscheme; // Makes an effectively final version of scheme
this.localityPreferred = options.get("locality").map(Boolean::parseBoolean)
.orElseGet(() -> LOCALITY_WHITELIST_FS.contains(scheme));
} else {
this.localityPreferred = false;
}
this.schema = table.schema();
this.caseSensitive = caseSensitive;
this.batchSize = options.get(SparkReadOptions.VECTORIZATION_BATCH_SIZE).map(Integer::parseInt).orElseGet(() ->
PropertyUtil.propertyAsInt(table.properties(),
TableProperties.PARQUET_BATCH_SIZE, TableProperties.PARQUET_BATCH_SIZE_DEFAULT));
RuntimeConfig sessionConf = SparkSession.active().conf();
this.readTimestampWithoutZone = SparkUtil.canHandleTimestampWithoutZone(options.asMap(), sessionConf);
}
private Schema lazySchema() {
if (schema == null) {
if (requestedSchema != null) {
// the projection should include all columns that will be returned, including those only used in filters
this.schema = SparkSchemaUtil.prune(table.schema(), requestedSchema, filterExpression(), caseSensitive);
} else {
this.schema = table.schema();
}
}
return schema;
}
private Expression filterExpression() {
if (filterExpressions != null) {
return filterExpressions.stream().reduce(Expressions.alwaysTrue(), Expressions::and);
}
return Expressions.alwaysTrue();
}
private StructType lazyType() {
if (type == null) {
Preconditions.checkArgument(readTimestampWithoutZone || !SparkUtil.hasTimestampWithoutZone(lazySchema()),
SparkUtil.TIMESTAMP_WITHOUT_TIMEZONE_ERROR);
this.type = SparkSchemaUtil.convert(lazySchema());
}
return type;
}
@Override
public StructType readSchema() {
return lazyType();
}
/**
* This is called in the Spark Driver when data is to be materialized into {@link ColumnarBatch}
*/
@Override
public List<InputPartition<ColumnarBatch>> planBatchInputPartitions() {
Preconditions.checkState(enableBatchRead(), "Batched reads not enabled");
Preconditions.checkState(batchSize > 0, "Invalid batch size");
String expectedSchemaString = SchemaParser.toJson(lazySchema());
ValidationException.check(tasks().stream().noneMatch(TableScanUtil::hasDeletes),
"Cannot scan table %s: cannot apply required delete files", table);
// broadcast the table metadata as input partitions will be sent to executors
Broadcast<Table> tableBroadcast = sparkContext.broadcast(SerializableTable.copyOf(table));
List<CombinedScanTask> scanTasks = tasks();
InputPartition<ColumnarBatch>[] readTasks = new InputPartition[scanTasks.size()];
Tasks.range(readTasks.length)
.stopOnFailure()
.executeWith(localityPreferred ? ThreadPools.getWorkerPool() : null)
.run(index -> readTasks[index] = new ReadTask<>(
scanTasks.get(index), tableBroadcast, expectedSchemaString, caseSensitive,
localityPreferred, new BatchReaderFactory(batchSize)));
LOG.info("Batching input partitions with {} tasks.", readTasks.length);
return Arrays.asList(readTasks);
}
/**
* This is called in the Spark Driver when data is to be materialized into {@link InternalRow}
*/
@Override
public List<InputPartition<InternalRow>> planInputPartitions() {
String expectedSchemaString = SchemaParser.toJson(lazySchema());
// broadcast the table metadata as input partitions will be sent to executors
Broadcast<Table> tableBroadcast = sparkContext.broadcast(SerializableTable.copyOf(table));
List<CombinedScanTask> scanTasks = tasks();
InputPartition<InternalRow>[] readTasks = new InputPartition[scanTasks.size()];
Tasks.range(readTasks.length)
.stopOnFailure()
.executeWith(localityPreferred ? ThreadPools.getWorkerPool() : null)
.run(index -> readTasks[index] = new ReadTask<>(
scanTasks.get(index), tableBroadcast, expectedSchemaString, caseSensitive,
localityPreferred, InternalRowReaderFactory.INSTANCE));
return Arrays.asList(readTasks);
}
@Override
public Filter[] pushFilters(Filter[] filters) {
this.tasks = null; // invalidate cached tasks, if present
List<Expression> expressions = Lists.newArrayListWithExpectedSize(filters.length);
List<Filter> pushed = Lists.newArrayListWithExpectedSize(filters.length);
for (Filter filter : filters) {
Expression expr = SparkFilters.convert(filter);
if (expr != null) {
expressions.add(expr);
pushed.add(filter);
}
}
this.filterExpressions = expressions;
this.pushedFilters = pushed.toArray(new Filter[0]);
// invalidate the schema that will be projected
this.schema = null;
this.type = null;
// Spark doesn't support residuals per task, so return all filters
// to get Spark to handle record-level filtering
return filters;
}
@Override
public Filter[] pushedFilters() {
return pushedFilters;
}
@Override
public void pruneColumns(StructType newRequestedSchema) {
this.requestedSchema = newRequestedSchema;
// invalidate the schema that will be projected
this.schema = null;
this.type = null;
}
@Override
public Statistics estimateStatistics() {
// its a fresh table, no data
if (table.currentSnapshot() == null) {
return new Stats(0L, 0L);
}
// estimate stats using snapshot summary only for partitioned tables (metadata tables are unpartitioned)
if (!table.spec().isUnpartitioned() && filterExpression() == Expressions.alwaysTrue()) {
long totalRecords = PropertyUtil.propertyAsLong(table.currentSnapshot().summary(),
SnapshotSummary.TOTAL_RECORDS_PROP, Long.MAX_VALUE);
return new Stats(SparkSchemaUtil.estimateSize(lazyType(), totalRecords), totalRecords);
}
long sizeInBytes = 0L;
long numRows = 0L;
for (CombinedScanTask task : tasks()) {
for (FileScanTask file : task.files()) {
sizeInBytes += file.length();
numRows += file.file().recordCount();
}
}
return new Stats(sizeInBytes, numRows);
}
@Override
public boolean enableBatchRead() {
if (readUsingBatch == null) {
boolean allParquetFileScanTasks =
tasks().stream()
.allMatch(combinedScanTask -> !combinedScanTask.isDataTask() && combinedScanTask.files()
.stream()
.allMatch(fileScanTask -> fileScanTask.file().format().equals(
FileFormat.PARQUET)));
boolean allOrcFileScanTasks =
tasks().stream()
.allMatch(combinedScanTask -> !combinedScanTask.isDataTask() && combinedScanTask.files()
.stream()
.allMatch(fileScanTask -> fileScanTask.file().format().equals(
FileFormat.ORC)));
boolean atLeastOneColumn = lazySchema().columns().size() > 0;
boolean onlyPrimitives = lazySchema().columns().stream().allMatch(c -> c.type().isPrimitiveType());
boolean hasNoDeleteFiles = tasks().stream().noneMatch(TableScanUtil::hasDeletes);
boolean batchReadsEnabled = batchReadsEnabled(allParquetFileScanTasks, allOrcFileScanTasks);
this.readUsingBatch = batchReadsEnabled && hasNoDeleteFiles && (allOrcFileScanTasks ||
(allParquetFileScanTasks && atLeastOneColumn && onlyPrimitives));
}
return readUsingBatch;
}
private boolean batchReadsEnabled(boolean isParquetOnly, boolean isOrcOnly) {
if (isParquetOnly) {
return isVectorizationEnabled(FileFormat.PARQUET);
} else if (isOrcOnly) {
return isVectorizationEnabled(FileFormat.ORC);
} else {
return false;
}
}
public boolean isVectorizationEnabled(FileFormat fileFormat) {
String readOptionValue = options.get(SparkReadOptions.VECTORIZATION_ENABLED).orElse(null);
if (readOptionValue != null) {
return Boolean.parseBoolean(readOptionValue);
}
RuntimeConfig sessionConf = SparkSession.active().conf();
String sessionConfValue = sessionConf.get("spark.sql.iceberg.vectorization.enabled", null);
if (sessionConfValue != null) {
return Boolean.parseBoolean(sessionConfValue);
}
switch (fileFormat) {
case PARQUET:
return PropertyUtil.propertyAsBoolean(
table.properties(),
TableProperties.PARQUET_VECTORIZATION_ENABLED,
TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT);
case ORC:
return PropertyUtil.propertyAsBoolean(
table.properties(),
TableProperties.ORC_VECTORIZATION_ENABLED,
TableProperties.ORC_VECTORIZATION_ENABLED_DEFAULT);
default:
return false;
}
}
private static void mergeIcebergHadoopConfs(
Configuration baseConf, Map<String, String> options) {
options.keySet().stream()
.filter(key -> key.startsWith("hadoop."))
.forEach(key -> baseConf.set(key.replaceFirst("hadoop.", ""), options.get(key)));
}
private List<CombinedScanTask> tasks() {
if (tasks == null) {
TableScan scan = table
.newScan()
.caseSensitive(caseSensitive)
.project(lazySchema());
if (snapshotId != null) {
scan = scan.useSnapshot(snapshotId);
}
if (asOfTimestamp != null) {
scan = scan.asOfTime(asOfTimestamp);
}
if (startSnapshotId != null) {
if (endSnapshotId != null) {
scan = scan.appendsBetween(startSnapshotId, endSnapshotId);
} else {
scan = scan.appendsAfter(startSnapshotId);
}
}
if (splitSize != null) {
scan = scan.option(TableProperties.SPLIT_SIZE, splitSize.toString());
}
if (splitLookback != null) {
scan = scan.option(TableProperties.SPLIT_LOOKBACK, splitLookback.toString());
}
if (splitOpenFileCost != null) {
scan = scan.option(TableProperties.SPLIT_OPEN_FILE_COST, splitOpenFileCost.toString());
}
if (filterExpressions != null) {
for (Expression filter : filterExpressions) {
scan = scan.filter(filter);
}
}
try (CloseableIterable<CombinedScanTask> tasksIterable = scan.planTasks()) {
this.tasks = Lists.newArrayList(tasksIterable);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close table scan: %s", scan);
}
}
return tasks;
}
@Override
public String toString() {
return String.format(
"IcebergScan(table=%s, type=%s, filters=%s, caseSensitive=%s, batchedReads=%s)",
table, lazySchema().asStruct(), filterExpressions, caseSensitive, enableBatchRead());
}
private static class ReadTask<T> implements Serializable, InputPartition<T> {
private final CombinedScanTask task;
private final Broadcast<Table> tableBroadcast;
private final String expectedSchemaString;
private final boolean caseSensitive;
private final boolean localityPreferred;
private final ReaderFactory<T> readerFactory;
private transient Schema expectedSchema = null;
private transient String[] preferredLocations = null;
private ReadTask(CombinedScanTask task, Broadcast<Table> tableBroadcast, String expectedSchemaString,
boolean caseSensitive, boolean localityPreferred, ReaderFactory<T> readerFactory) {
this.task = task;
this.tableBroadcast = tableBroadcast;
this.expectedSchemaString = expectedSchemaString;
this.caseSensitive = caseSensitive;
this.localityPreferred = localityPreferred;
this.preferredLocations = getPreferredLocations();
this.readerFactory = readerFactory;
}
@Override
public InputPartitionReader<T> createPartitionReader() {
Table table = tableBroadcast.value();
return readerFactory.create(task, table, lazyExpectedSchema(), caseSensitive);
}
@Override
public String[] preferredLocations() {
return preferredLocations;
}
private Schema lazyExpectedSchema() {
if (expectedSchema == null) {
this.expectedSchema = SchemaParser.fromJson(expectedSchemaString);
}
return expectedSchema;
}
@SuppressWarnings("checkstyle:RegexpSingleline")
private String[] getPreferredLocations() {
if (!localityPreferred) {
return new String[0];
}
Configuration conf = SparkSession.active().sparkContext().hadoopConfiguration();
return Util.blockLocations(task, conf);
}
}
private interface ReaderFactory<T> extends Serializable {
InputPartitionReader<T> create(CombinedScanTask task, Table table, Schema expectedSchema, boolean caseSensitive);
}
private static class InternalRowReaderFactory implements ReaderFactory<InternalRow> {
private static final InternalRowReaderFactory INSTANCE = new InternalRowReaderFactory();
private InternalRowReaderFactory() {
}
@Override
public InputPartitionReader<InternalRow> create(CombinedScanTask task, Table table,
Schema expectedSchema, boolean caseSensitive) {
return new RowReader(task, table, expectedSchema, caseSensitive);
}
}
private static class BatchReaderFactory implements ReaderFactory<ColumnarBatch> {
private final int batchSize;
BatchReaderFactory(int batchSize) {
this.batchSize = batchSize;
}
@Override
public InputPartitionReader<ColumnarBatch> create(CombinedScanTask task, Table table,
Schema expectedSchema, boolean caseSensitive) {
return new BatchReader(task, table, expectedSchema, caseSensitive, batchSize);
}
}
private static class RowReader extends RowDataReader implements InputPartitionReader<InternalRow> {
RowReader(CombinedScanTask task, Table table, Schema expectedSchema, boolean caseSensitive) {
super(task, table, expectedSchema, caseSensitive);
}
}
private static class BatchReader extends BatchDataReader implements InputPartitionReader<ColumnarBatch> {
BatchReader(CombinedScanTask task, Table table, Schema expectedSchema, boolean caseSensitive, int size) {
super(task, table, expectedSchema, caseSensitive, size);
}
}
}
| 1 | 42,105 | How was this set before? Was it just ignored? | apache-iceberg | java |
@@ -0,0 +1,12 @@
+class CreditCardsController < ApplicationController
+ def update
+ customer = Stripe::Customer.retrieve(current_user.stripe_customer_id)
+ customer.card = params['stripe_token']
+ begin
+ customer.save
+ redirect_to my_account_path, notice: I18n.t('subscriptions.flashes.update.success')
+ rescue Stripe::CardError => error
+ redirect_to my_account_path, notice: error.message
+ end
+ end
+end | 1 | 1 | 8,049 | Should we `save!` here? | thoughtbot-upcase | rb |
|
@@ -284,7 +284,7 @@ void TServerSocket::listen() {
hints.ai_family = PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
- sprintf(port, "%d", port_);
+ snprintf(port, sizeof("65535"), "%d", port_);
// If address is not specified use wildcard address (NULL)
TGetAddrInfoWrapper info(address_.empty() ? NULL : &address_[0], port, &hints); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/thrift-config.h>
#include <cstring>
#include <stdexcept>
#include <sys/types.h>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#include <netinet/tcp.h>
#endif
#ifdef HAVE_NETDB_H
#include <netdb.h>
#endif
#include <fcntl.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <thrift/transport/TSocket.h>
#include <thrift/transport/TServerSocket.h>
#include <thrift/transport/PlatformSocket.h>
#include <boost/shared_ptr.hpp>
#ifndef AF_LOCAL
#define AF_LOCAL AF_UNIX
#endif
#ifndef SOCKOPT_CAST_T
#ifndef _WIN32
#define SOCKOPT_CAST_T void
#else
#define SOCKOPT_CAST_T char
#endif // _WIN32
#endif
#if defined(_WIN32) && (_WIN32_WINNT < 0x0600)
#define AI_ADDRCONFIG 0x0400
#endif
template <class T>
inline const SOCKOPT_CAST_T* const_cast_sockopt(const T* v) {
return reinterpret_cast<const SOCKOPT_CAST_T*>(v);
}
template <class T>
inline SOCKOPT_CAST_T* cast_sockopt(T* v) {
return reinterpret_cast<SOCKOPT_CAST_T*>(v);
}
void destroyer_of_fine_sockets(THRIFT_SOCKET* ssock) {
::THRIFT_CLOSESOCKET(*ssock);
delete ssock;
}
class TGetAddrInfoWrapper {
public:
TGetAddrInfoWrapper(const char* node, const char* service, const struct addrinfo* hints);
virtual ~TGetAddrInfoWrapper();
int init();
const struct addrinfo* res();
private:
const char* node_;
const char* service_;
const struct addrinfo* hints_;
struct addrinfo* res_;
};
TGetAddrInfoWrapper::TGetAddrInfoWrapper(const char* node,
const char* service,
const struct addrinfo* hints)
: node_(node), service_(service), hints_(hints), res_(NULL) {}
TGetAddrInfoWrapper::~TGetAddrInfoWrapper() {
if (this->res_ != NULL)
freeaddrinfo(this->res_);
}
int TGetAddrInfoWrapper::init() {
if (this->res_ == NULL)
return getaddrinfo(this->node_, this->service_, this->hints_, &(this->res_));
return 0;
}
const struct addrinfo* TGetAddrInfoWrapper::res() {
return this->res_;
}
namespace apache {
namespace thrift {
namespace transport {
using namespace std;
using boost::shared_ptr;
TServerSocket::TServerSocket(int port)
: interruptableChildren_(true),
port_(port),
serverSocket_(THRIFT_INVALID_SOCKET),
acceptBacklog_(DEFAULT_BACKLOG),
sendTimeout_(0),
recvTimeout_(0),
accTimeout_(-1),
retryLimit_(0),
retryDelay_(0),
tcpSendBuffer_(0),
tcpRecvBuffer_(0),
keepAlive_(false),
listening_(false),
interruptSockWriter_(THRIFT_INVALID_SOCKET),
interruptSockReader_(THRIFT_INVALID_SOCKET),
childInterruptSockWriter_(THRIFT_INVALID_SOCKET) {
}
TServerSocket::TServerSocket(int port, int sendTimeout, int recvTimeout)
: interruptableChildren_(true),
port_(port),
serverSocket_(THRIFT_INVALID_SOCKET),
acceptBacklog_(DEFAULT_BACKLOG),
sendTimeout_(sendTimeout),
recvTimeout_(recvTimeout),
accTimeout_(-1),
retryLimit_(0),
retryDelay_(0),
tcpSendBuffer_(0),
tcpRecvBuffer_(0),
keepAlive_(false),
listening_(false),
interruptSockWriter_(THRIFT_INVALID_SOCKET),
interruptSockReader_(THRIFT_INVALID_SOCKET),
childInterruptSockWriter_(THRIFT_INVALID_SOCKET) {
}
TServerSocket::TServerSocket(const string& address, int port)
: interruptableChildren_(true),
port_(port),
address_(address),
serverSocket_(THRIFT_INVALID_SOCKET),
acceptBacklog_(DEFAULT_BACKLOG),
sendTimeout_(0),
recvTimeout_(0),
accTimeout_(-1),
retryLimit_(0),
retryDelay_(0),
tcpSendBuffer_(0),
tcpRecvBuffer_(0),
keepAlive_(false),
listening_(false),
interruptSockWriter_(THRIFT_INVALID_SOCKET),
interruptSockReader_(THRIFT_INVALID_SOCKET),
childInterruptSockWriter_(THRIFT_INVALID_SOCKET) {
}
TServerSocket::TServerSocket(const string& path)
: interruptableChildren_(true),
port_(0),
path_(path),
serverSocket_(THRIFT_INVALID_SOCKET),
acceptBacklog_(DEFAULT_BACKLOG),
sendTimeout_(0),
recvTimeout_(0),
accTimeout_(-1),
retryLimit_(0),
retryDelay_(0),
tcpSendBuffer_(0),
tcpRecvBuffer_(0),
keepAlive_(false),
listening_(false),
interruptSockWriter_(THRIFT_INVALID_SOCKET),
interruptSockReader_(THRIFT_INVALID_SOCKET),
childInterruptSockWriter_(THRIFT_INVALID_SOCKET) {
}
TServerSocket::~TServerSocket() {
close();
}
void TServerSocket::setSendTimeout(int sendTimeout) {
sendTimeout_ = sendTimeout;
}
void TServerSocket::setRecvTimeout(int recvTimeout) {
recvTimeout_ = recvTimeout;
}
void TServerSocket::setAcceptTimeout(int accTimeout) {
accTimeout_ = accTimeout;
}
void TServerSocket::setAcceptBacklog(int accBacklog) {
acceptBacklog_ = accBacklog;
}
void TServerSocket::setRetryLimit(int retryLimit) {
retryLimit_ = retryLimit;
}
void TServerSocket::setRetryDelay(int retryDelay) {
retryDelay_ = retryDelay;
}
void TServerSocket::setTcpSendBuffer(int tcpSendBuffer) {
tcpSendBuffer_ = tcpSendBuffer;
}
void TServerSocket::setTcpRecvBuffer(int tcpRecvBuffer) {
tcpRecvBuffer_ = tcpRecvBuffer;
}
void TServerSocket::setInterruptableChildren(bool enable) {
if (listening_) {
throw std::logic_error("setInterruptableChildren cannot be called after listen()");
}
interruptableChildren_ = enable;
}
void TServerSocket::listen() {
listening_ = true;
#ifdef _WIN32
TWinsockSingleton::create();
#endif // _WIN32
THRIFT_SOCKET sv[2];
// Create the socket pair used to interrupt
if (-1 == THRIFT_SOCKETPAIR(AF_LOCAL, SOCK_STREAM, 0, sv)) {
GlobalOutput.perror("TServerSocket::listen() socketpair() interrupt", THRIFT_GET_SOCKET_ERROR);
interruptSockWriter_ = THRIFT_INVALID_SOCKET;
interruptSockReader_ = THRIFT_INVALID_SOCKET;
} else {
interruptSockWriter_ = sv[1];
interruptSockReader_ = sv[0];
}
// Create the socket pair used to interrupt all clients
if (-1 == THRIFT_SOCKETPAIR(AF_LOCAL, SOCK_STREAM, 0, sv)) {
GlobalOutput.perror("TServerSocket::listen() socketpair() childInterrupt",
THRIFT_GET_SOCKET_ERROR);
childInterruptSockWriter_ = THRIFT_INVALID_SOCKET;
pChildInterruptSockReader_.reset();
} else {
childInterruptSockWriter_ = sv[1];
pChildInterruptSockReader_
= boost::shared_ptr<THRIFT_SOCKET>(new THRIFT_SOCKET(sv[0]), destroyer_of_fine_sockets);
}
// Validate port number
if (port_ < 0 || port_ > 0xFFFF) {
throw TTransportException(TTransportException::BAD_ARGS, "Specified port is invalid");
}
struct addrinfo hints;
const struct addrinfo *res;
int error;
char port[sizeof("65535")];
std::memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
sprintf(port, "%d", port_);
// If address is not specified use wildcard address (NULL)
TGetAddrInfoWrapper info(address_.empty() ? NULL : &address_[0], port, &hints);
error = info.init();
if (error) {
GlobalOutput.printf("getaddrinfo %d: %s", error, THRIFT_GAI_STRERROR(error));
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not resolve host for server socket.");
}
// Pick the ipv6 address first since ipv4 addresses can be mapped
// into ipv6 space.
for (res = info.res(); res; res = res->ai_next) {
if (res->ai_family == AF_INET6 || res->ai_next == NULL)
break;
}
if (!path_.empty()) {
serverSocket_ = socket(PF_UNIX, SOCK_STREAM, IPPROTO_IP);
} else {
serverSocket_ = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
}
if (serverSocket_ == THRIFT_INVALID_SOCKET) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() socket() ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not create server socket.",
errno_copy);
}
// Set THRIFT_NO_SOCKET_CACHING to prevent 2MSL delay on accept
int one = 1;
if (-1 == setsockopt(serverSocket_,
SOL_SOCKET,
THRIFT_NO_SOCKET_CACHING,
cast_sockopt(&one),
sizeof(one))) {
// ignore errors coming out of this setsockopt on Windows. This is because
// SO_EXCLUSIVEADDRUSE requires admin privileges on WinXP, but we don't
// want to force servers to be an admin.
#ifndef _WIN32
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() setsockopt() THRIFT_NO_SOCKET_CACHING ",
errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not set THRIFT_NO_SOCKET_CACHING",
errno_copy);
#endif
}
// Set TCP buffer sizes
if (tcpSendBuffer_ > 0) {
if (-1 == setsockopt(serverSocket_,
SOL_SOCKET,
SO_SNDBUF,
cast_sockopt(&tcpSendBuffer_),
sizeof(tcpSendBuffer_))) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() setsockopt() SO_SNDBUF ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not set SO_SNDBUF",
errno_copy);
}
}
if (tcpRecvBuffer_ > 0) {
if (-1 == setsockopt(serverSocket_,
SOL_SOCKET,
SO_RCVBUF,
cast_sockopt(&tcpRecvBuffer_),
sizeof(tcpRecvBuffer_))) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() setsockopt() SO_RCVBUF ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not set SO_RCVBUF",
errno_copy);
}
}
// Defer accept
#ifdef TCP_DEFER_ACCEPT
if (path_.empty()) {
if (-1 == setsockopt(serverSocket_, IPPROTO_TCP, TCP_DEFER_ACCEPT, &one, sizeof(one))) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() setsockopt() TCP_DEFER_ACCEPT ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not set TCP_DEFER_ACCEPT",
errno_copy);
}
}
#endif // #ifdef TCP_DEFER_ACCEPT
#ifdef IPV6_V6ONLY
if (res->ai_family == AF_INET6 && path_.empty()) {
int zero = 0;
if (-1 == setsockopt(serverSocket_,
IPPROTO_IPV6,
IPV6_V6ONLY,
cast_sockopt(&zero),
sizeof(zero))) {
GlobalOutput.perror("TServerSocket::listen() IPV6_V6ONLY ", THRIFT_GET_SOCKET_ERROR);
}
}
#endif // #ifdef IPV6_V6ONLY
// Turn linger off, don't want to block on calls to close
struct linger ling = {0, 0};
if (-1 == setsockopt(serverSocket_, SOL_SOCKET, SO_LINGER, cast_sockopt(&ling), sizeof(ling))) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() setsockopt() SO_LINGER ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN, "Could not set SO_LINGER", errno_copy);
}
// Unix Sockets do not need that
if (path_.empty()) {
// TCP Nodelay, speed over bandwidth
if (-1
== setsockopt(serverSocket_, IPPROTO_TCP, TCP_NODELAY, cast_sockopt(&one), sizeof(one))) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() setsockopt() TCP_NODELAY ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not set TCP_NODELAY",
errno_copy);
}
}
// Set NONBLOCK on the accept socket
int flags = THRIFT_FCNTL(serverSocket_, THRIFT_F_GETFL, 0);
if (flags == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() THRIFT_FCNTL() THRIFT_F_GETFL ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"THRIFT_FCNTL() THRIFT_F_GETFL failed",
errno_copy);
}
if (-1 == THRIFT_FCNTL(serverSocket_, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() THRIFT_FCNTL() THRIFT_O_NONBLOCK ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"THRIFT_FCNTL() THRIFT_F_SETFL THRIFT_O_NONBLOCK failed",
errno_copy);
}
// prepare the port information
// we may want to try to bind more than once, since THRIFT_NO_SOCKET_CACHING doesn't
// always seem to work. The client can configure the retry variables.
int retries = 0;
if (!path_.empty()) {
#ifndef _WIN32
// Unix Domain Socket
size_t len = path_.size() + 1;
if (len > sizeof(((sockaddr_un*)NULL)->sun_path)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::listen() Unix Domain socket path too long", errno_copy);
throw TTransportException(TTransportException::NOT_OPEN,
"Unix Domain socket path too long",
errno_copy);
}
struct sockaddr_un address;
address.sun_family = AF_UNIX;
memcpy(address.sun_path, path_.c_str(), len);
socklen_t structlen = static_cast<socklen_t>(sizeof(address));
if (!address.sun_path[0]) { // abstract namespace socket
#ifdef __linux__
// sun_path is not null-terminated in this case and structlen determines its length
structlen -= sizeof(address.sun_path) - len;
#else
GlobalOutput.perror("TSocket::open() Abstract Namespace Domain sockets only supported on linux: ", -99);
throw TTransportException(TTransportException::NOT_OPEN,
" Abstract Namespace Domain socket path not supported");
#endif
}
do {
if (0 == ::bind(serverSocket_, (struct sockaddr*)&address, structlen)) {
break;
}
// use short circuit evaluation here to only sleep if we need to
} while ((retries++ < retryLimit_) && (THRIFT_SLEEP_SEC(retryDelay_) == 0));
#else
GlobalOutput.perror("TSocket::open() Unix Domain socket path not supported on windows", -99);
throw TTransportException(TTransportException::NOT_OPEN,
" Unix Domain socket path not supported");
#endif
} else {
do {
if (0 == ::bind(serverSocket_, res->ai_addr, static_cast<int>(res->ai_addrlen))) {
break;
}
// use short circuit evaluation here to only sleep if we need to
} while ((retries++ < retryLimit_) && (THRIFT_SLEEP_SEC(retryDelay_) == 0));
// retrieve bind info
if (port_ == 0 && retries <= retryLimit_) {
struct sockaddr_storage sa;
socklen_t len = sizeof(sa);
std::memset(&sa, 0, len);
if (::getsockname(serverSocket_, reinterpret_cast<struct sockaddr*>(&sa), &len) < 0) {
int errno_copy = errno;
GlobalOutput.perror("TServerSocket::getPort() getsockname() ", errno_copy);
} else {
if (sa.ss_family == AF_INET6) {
const struct sockaddr_in6* sin = reinterpret_cast<const struct sockaddr_in6*>(&sa);
port_ = ntohs(sin->sin6_port);
} else {
const struct sockaddr_in* sin = reinterpret_cast<const struct sockaddr_in*>(&sa);
port_ = ntohs(sin->sin_port);
}
}
}
}
// throw an error if we failed to bind properly
if (retries > retryLimit_) {
char errbuf[1024];
if (!path_.empty()) {
sprintf(errbuf, "TServerSocket::listen() PATH %s", path_.c_str());
} else {
sprintf(errbuf, "TServerSocket::listen() BIND %d", port_);
}
GlobalOutput(errbuf);
close();
throw TTransportException(TTransportException::NOT_OPEN,
"Could not bind",
THRIFT_GET_SOCKET_ERROR);
}
if (listenCallback_)
listenCallback_(serverSocket_);
// Call listen
if (-1 == ::listen(serverSocket_, acceptBacklog_)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::listen() listen() ", errno_copy);
close();
throw TTransportException(TTransportException::NOT_OPEN, "Could not listen", errno_copy);
}
// The socket is now listening!
}
int TServerSocket::getPort() {
return port_;
}
shared_ptr<TTransport> TServerSocket::acceptImpl() {
if (serverSocket_ == THRIFT_INVALID_SOCKET) {
throw TTransportException(TTransportException::NOT_OPEN, "TServerSocket not listening");
}
struct THRIFT_POLLFD fds[2];
int maxEintrs = 5;
int numEintrs = 0;
while (true) {
std::memset(fds, 0, sizeof(fds));
fds[0].fd = serverSocket_;
fds[0].events = THRIFT_POLLIN;
if (interruptSockReader_ != THRIFT_INVALID_SOCKET) {
fds[1].fd = interruptSockReader_;
fds[1].events = THRIFT_POLLIN;
}
/*
TODO: if THRIFT_EINTR is received, we'll restart the timeout.
To be accurate, we need to fix this in the future.
*/
int ret = THRIFT_POLL(fds, 2, accTimeout_);
if (ret < 0) {
// error cases
if (THRIFT_GET_SOCKET_ERROR == THRIFT_EINTR && (numEintrs++ < maxEintrs)) {
// THRIFT_EINTR needs to be handled manually and we can tolerate
// a certain number
continue;
}
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::acceptImpl() THRIFT_POLL() ", errno_copy);
throw TTransportException(TTransportException::UNKNOWN, "Unknown", errno_copy);
} else if (ret > 0) {
// Check for an interrupt signal
if (interruptSockReader_ != THRIFT_INVALID_SOCKET && (fds[1].revents & THRIFT_POLLIN)) {
int8_t buf;
if (-1 == recv(interruptSockReader_, cast_sockopt(&buf), sizeof(int8_t), 0)) {
GlobalOutput.perror("TServerSocket::acceptImpl() recv() interrupt ",
THRIFT_GET_SOCKET_ERROR);
}
throw TTransportException(TTransportException::INTERRUPTED);
}
// Check for the actual server socket being ready
if (fds[0].revents & THRIFT_POLLIN) {
break;
}
} else {
GlobalOutput("TServerSocket::acceptImpl() THRIFT_POLL 0");
throw TTransportException(TTransportException::UNKNOWN);
}
}
struct sockaddr_storage clientAddress;
int size = sizeof(clientAddress);
THRIFT_SOCKET clientSocket
= ::accept(serverSocket_, (struct sockaddr*)&clientAddress, (socklen_t*)&size);
if (clientSocket == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TServerSocket::acceptImpl() ::accept() ", errno_copy);
throw TTransportException(TTransportException::UNKNOWN, "accept()", errno_copy);
}
// Make sure client socket is blocking
int flags = THRIFT_FCNTL(clientSocket, THRIFT_F_GETFL, 0);
if (flags == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
::THRIFT_CLOSESOCKET(clientSocket);
GlobalOutput.perror("TServerSocket::acceptImpl() THRIFT_FCNTL() THRIFT_F_GETFL ", errno_copy);
throw TTransportException(TTransportException::UNKNOWN,
"THRIFT_FCNTL(THRIFT_F_GETFL)",
errno_copy);
}
if (-1 == THRIFT_FCNTL(clientSocket, THRIFT_F_SETFL, flags & ~THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
::THRIFT_CLOSESOCKET(clientSocket);
GlobalOutput
.perror("TServerSocket::acceptImpl() THRIFT_FCNTL() THRIFT_F_SETFL ~THRIFT_O_NONBLOCK ",
errno_copy);
throw TTransportException(TTransportException::UNKNOWN,
"THRIFT_FCNTL(THRIFT_F_SETFL)",
errno_copy);
}
shared_ptr<TSocket> client = createSocket(clientSocket);
if (sendTimeout_ > 0) {
client->setSendTimeout(sendTimeout_);
}
if (recvTimeout_ > 0) {
client->setRecvTimeout(recvTimeout_);
}
if (keepAlive_) {
client->setKeepAlive(keepAlive_);
}
client->setCachedAddress((sockaddr*)&clientAddress, size);
if (acceptCallback_)
acceptCallback_(clientSocket);
return client;
}
shared_ptr<TSocket> TServerSocket::createSocket(THRIFT_SOCKET clientSocket) {
if (interruptableChildren_) {
return shared_ptr<TSocket>(new TSocket(clientSocket, pChildInterruptSockReader_));
} else {
return shared_ptr<TSocket>(new TSocket(clientSocket));
}
}
void TServerSocket::notify(THRIFT_SOCKET notifySocket) {
if (notifySocket != THRIFT_INVALID_SOCKET) {
int8_t byte = 0;
if (-1 == send(notifySocket, cast_sockopt(&byte), sizeof(int8_t), 0)) {
GlobalOutput.perror("TServerSocket::notify() send() ", THRIFT_GET_SOCKET_ERROR);
}
}
}
void TServerSocket::interrupt() {
notify(interruptSockWriter_);
}
void TServerSocket::interruptChildren() {
notify(childInterruptSockWriter_);
}
void TServerSocket::close() {
if (serverSocket_ != THRIFT_INVALID_SOCKET) {
shutdown(serverSocket_, THRIFT_SHUT_RDWR);
::THRIFT_CLOSESOCKET(serverSocket_);
}
if (interruptSockWriter_ != THRIFT_INVALID_SOCKET) {
::THRIFT_CLOSESOCKET(interruptSockWriter_);
}
if (interruptSockReader_ != THRIFT_INVALID_SOCKET) {
::THRIFT_CLOSESOCKET(interruptSockReader_);
}
if (childInterruptSockWriter_ != THRIFT_INVALID_SOCKET) {
::THRIFT_CLOSESOCKET(childInterruptSockWriter_);
}
serverSocket_ = THRIFT_INVALID_SOCKET;
interruptSockWriter_ = THRIFT_INVALID_SOCKET;
interruptSockReader_ = THRIFT_INVALID_SOCKET;
childInterruptSockWriter_ = THRIFT_INVALID_SOCKET;
pChildInterruptSockReader_.reset();
listening_ = false;
}
}
}
} // apache::thrift::transport
| 1 | 12,095 | sizeof(port) seems ever-so-slightly clearer. | apache-thrift | c |
@@ -116,6 +116,12 @@ type Rule struct {
// conjunction with NetworkPolicySpec/ClusterNetworkPolicySpec.AppliedTo.
// +optional
AppliedTo []NetworkPolicyPeer `json:"appliedTo,omitempty"`
+ // SourceGroups is a list of ClusterGroup names which must be set as the
+ // source for this rule.
+ SourceGroups []string `json:"sourceGroups,omitempty"`
+ // DestinationGroups is a list of ClusterGroup names which must be set as the
+ // destination for this rule.
+ DestinationGroups []string `json:"destinationGroups,omitempty"`
}
// NetworkPolicyPeer describes the grouping selector of workloads. | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type NetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
// Standard metadata of the object.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of NetworkPolicy.
Spec NetworkPolicySpec `json:"spec"`
// Most recently observed status of the NetworkPolicy.
Status NetworkPolicyStatus `json:"status"`
}
// NetworkPolicySpec defines the desired state for NetworkPolicy.
type NetworkPolicySpec struct {
// Tier specifies the tier to which this NetworkPolicy belongs to.
// The NetworkPolicy order will be determined based on the combination of the
// Tier's Priority and the NetworkPolicy's own Priority. If not specified,
// this policy will be created in the Application Tier right above the K8s
// NetworkPolicy which resides at the bottom.
Tier string `json:"tier,omitempty"`
// Priority specfies the order of the NetworkPolicy relative to other
// NetworkPolicies.
Priority float64 `json:"priority"`
// Select workloads on which the rules will be applied to. Cannot be set in
// conjunction with AppliedTo in each rule.
// +optional
AppliedTo []NetworkPolicyPeer `json:"appliedTo,omitempty"`
// Set of ingress rules evaluated based on the order in which they are set.
// Currently Ingress rule supports setting the `From` field but not the `To`
// field within a Rule.
// +optional
Ingress []Rule `json:"ingress"`
// Set of egress rules evaluated based on the order in which they are set.
// Currently Egress rule supports setting the `To` field but not the `From`
// field within a Rule.
// +optional
Egress []Rule `json:"egress"`
}
// NetworkPolicyPhase defines the phase in which a NetworkPolicy is.
type NetworkPolicyPhase string
// These are the valid values for NetworkPolicyPhase.
const (
// NetworkPolicyPending means the NetworkPolicy has been accepted by the system, but it has not been processed by Antrea.
NetworkPolicyPending NetworkPolicyPhase = "Pending"
// NetworkPolicyRealizing means the NetworkPolicy has been observed by Antrea and is being realized.
NetworkPolicyRealizing NetworkPolicyPhase = "Realizing"
// NetworkPolicyRealized means the NetworkPolicy has been enforced to all Pods on all Nodes it applies to.
NetworkPolicyRealized NetworkPolicyPhase = "Realized"
)
// NetworkPolicyStatus represents information about the status of a NetworkPolicy.
type NetworkPolicyStatus struct {
// The phase of a NetworkPolicy is a simple, high-level summary of the NetworkPolicy's status.
Phase NetworkPolicyPhase `json:"phase"`
// The generation observed by Antrea.
ObservedGeneration int64 `json:"observedGeneration"`
// The number of nodes that have realized the NetworkPolicy.
CurrentNodesRealized int32 `json:"currentNodesRealized"`
// The total number of nodes that should realize the NetworkPolicy.
DesiredNodesRealized int32 `json:"desiredNodesRealized"`
}
// Rule describes the traffic allowed to/from the workloads selected by
// Spec.AppliedTo. Based on the action specified in the rule, traffic is either
// allowed or denied which exactly match the specified ports and protocol.
type Rule struct {
// Action specifies the action to be applied on the rule.
Action *RuleAction `json:"action"`
// Set of port and protocol allowed/denied by the rule. If this field is unset
// or empty, this rule matches all ports.
// +optional
Ports []NetworkPolicyPort `json:"ports,omitempty"`
// Rule is matched if traffic originates from workloads selected by
// this field. If this field is empty, this rule matches all sources.
// +optional
From []NetworkPolicyPeer `json:"from"`
// Rule is matched if traffic is intended for workloads selected by
// this field. If this field is empty or missing, this rule matches all
// destinations.
// +optional
To []NetworkPolicyPeer `json:"to"`
// Name describes the intention of this rule.
// Name should be unique within the policy.
// +optional
Name string `json:"name"`
// EnableLogging is used to indicate if agent should generate logs
// when rules are matched. Should be default to false.
EnableLogging bool `json:"enableLogging"`
// Select workloads on which this rule will be applied to. Cannot be set in
// conjunction with NetworkPolicySpec/ClusterNetworkPolicySpec.AppliedTo.
// +optional
AppliedTo []NetworkPolicyPeer `json:"appliedTo,omitempty"`
}
// NetworkPolicyPeer describes the grouping selector of workloads.
type NetworkPolicyPeer struct {
// IPBlock describes the IPAddresses/IPBlocks that is matched in to/from.
// IPBlock cannot be set as part of the AppliedTo field.
// Cannot be set with any other selector.
// +optional
IPBlock *IPBlock `json:"ipBlock,omitempty"`
// Select Pods from NetworkPolicy's Namespace as workloads in
// AppliedTo/To/From fields. If set with NamespaceSelector, Pods are
// matched from Namespaces matched by the NamespaceSelector.
// Cannot be set with any other selector except NamespaceSelector.
// +optional
PodSelector *metav1.LabelSelector `json:"podSelector,omitempty"`
// Select all Pods from Namespaces matched by this selector, as
// workloads in To/From fields. If set with PodSelector,
// Pods are matched from Namespaces matched by the NamespaceSelector.
// Cannot be set with any other selector except PodSelector or
// ExternalEntitySelector.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
// Select ExternalEntities from NetworkPolicy's Namespace as workloads
// in AppliedTo/To/From fields. If set with NamespaceSelector,
// ExternalEntities are matched from Namespaces matched by the
// NamespaceSelector.
// Cannot be set with any other selector except NamespaceSelector.
// +optional
ExternalEntitySelector *metav1.LabelSelector `json:"externalEntitySelector,omitempty"`
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed
// or denied to/from the workloads matched by a Spec.AppliedTo.
type IPBlock struct {
// CIDR is a string representing the IP Block
// Valid examples are "192.168.1.1/24".
CIDR string `json:"cidr"`
}
// NetworkPolicyPort describes the port and protocol to match in a rule.
type NetworkPolicyPort struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match.
// If not specified, this field defaults to TCP.
// +optional
Protocol *v1.Protocol `json:"protocol,omitempty"`
// The port on the given protocol. This can be either a numerical
// or named port on a Pod. If this field is not provided, this
// matches all port names and numbers.
// +optional
Port *intstr.IntOrString `json:"port,omitempty"`
// EndPort defines the end of the port range, being the end included within the range.
// It can only be specified when a numerical `port` is specified.
// +optional
EndPort *int32 `json:"endPort,omitempty"`
}
// RuleAction describes the action to be applied on traffic matching a rule.
type RuleAction string
const (
// RuleActionAllow describes that rule matching traffic must be allowed.
RuleActionAllow RuleAction = "Allow"
// RuleActionDrop describes that rule matching traffic must be dropped.
RuleActionDrop RuleAction = "Drop"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type NetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []NetworkPolicy `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterNetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
// Standard metadata of the object.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of ClusterNetworkPolicy.
Spec ClusterNetworkPolicySpec `json:"spec"`
// Most recently observed status of the NetworkPolicy.
Status NetworkPolicyStatus `json:"status"`
}
// ClusterNetworkPolicySpec defines the desired state for ClusterNetworkPolicy.
type ClusterNetworkPolicySpec struct {
// Tier specifies the tier to which this ClusterNetworkPolicy belongs to.
// The ClusterNetworkPolicy order will be determined based on the
// combination of the Tier's Priority and the ClusterNetworkPolicy's own
// Priority. If not specified, this policy will be created in the Application
// Tier right above the K8s NetworkPolicy which resides at the bottom.
Tier string `json:"tier,omitempty"`
// Priority specfies the order of the ClusterNetworkPolicy relative to
// other AntreaClusterNetworkPolicies.
Priority float64 `json:"priority"`
// Select workloads on which the rules will be applied to. Cannot be set in
// conjunction with AppliedTo in each rule.
// +optional
AppliedTo []NetworkPolicyPeer `json:"appliedTo,omitempty"`
// Set of ingress rules evaluated based on the order in which they are set.
// Currently Ingress rule supports setting the `From` field but not the `To`
// field within a Rule.
// +optional
Ingress []Rule `json:"ingress"`
// Set of egress rules evaluated based on the order in which they are set.
// Currently Egress rule supports setting the `To` field but not the `From`
// field within a Rule.
// +optional
Egress []Rule `json:"egress"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterNetworkPolicy `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Tier struct {
metav1.TypeMeta `json:",inline"`
// Standard metadata of the object.
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of Tier.
Spec TierSpec `json:"spec"`
}
// TierSpec defines the desired state for Tier.
type TierSpec struct {
// Priority specfies the order of the Tier relative to other Tiers.
Priority int32 `json:"priority"`
// Description is an optional field to add more information regarding
// the purpose of this Tier.
Description string `json:"description,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TierList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Tier `json:"items"`
}
| 1 | 31,043 | Not sure if this has been discussed. I wonder why SourceGroups and DestinationGroups are not in From and To NetworkPolicyPeer as NetworkPolicyPeer already describes the source/destination of the traffic. | antrea-io-antrea | go |
@@ -151,6 +151,19 @@ class Completer(QObject):
except KeyError:
# No completion model for this section/option.
model = None
+ elif completion == usertypes.Completion.keybinding:
+ # look for --mode to provide mode-specific binding completions
+ # since flags are ignored by most completers, we need to re-parse
+ parts, cursor_part = self._filter_cmdline_parts(self.split(),
+ cursor_part,
+ ignore_flags=False)
+ if parts[cursor_part - 1].startswith("--mode="):
+ mode = parts[cursor_part - 1][7:] # strip --mode=
+ key = parts[cursor_part]
+ else:
+ mode = 'normal'
+ key = parts[cursor_part - 1]
+ model = instances.get(completion)[mode][key]
else:
model = instances.get(completion)
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Completer attached to a CompletionView."""
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QTimer
from qutebrowser.config import config
from qutebrowser.commands import cmdexc, cmdutils, runners
from qutebrowser.utils import usertypes, log, objreg, utils
from qutebrowser.completion.models import instances, sortfilter
class Completer(QObject):
"""Completer which manages completions in a CompletionView.
Attributes:
_cmd: The statusbar Command object this completer belongs to.
_ignore_change: Whether to ignore the next completion update.
_win_id: The window ID this completer is in.
_timer: The timer used to trigger the completion update.
_cursor_part: The cursor part index for the next completion update.
_last_cursor_pos: The old cursor position so we avoid double completion
updates.
_last_text: The old command text so we avoid double completion updates.
_signals_connected: Whether the signals are connected to update the
completion when the command widget requests that.
Signals:
next_prev_item: Emitted to select the next/previous item in the
completion.
arg0: True for the previous item, False for the next.
"""
next_prev_item = pyqtSignal(bool)
def __init__(self, cmd, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
self._cmd = cmd
self._signals_connected = False
self._ignore_change = False
self._empty_item_idx = None
self._timer = QTimer()
self._timer.setSingleShot(True)
self._timer.setInterval(0)
self._timer.timeout.connect(self.update_completion)
self._cursor_part = None
self._last_cursor_pos = None
self._last_text = None
objreg.get('config').changed.connect(self.on_auto_open_changed)
self.handle_signal_connections()
self._cmd.clear_completion_selection.connect(
self.handle_signal_connections)
def __repr__(self):
return utils.get_repr(self)
@config.change_filter('completion', 'auto-open')
def on_auto_open_changed(self):
self.handle_signal_connections()
@pyqtSlot()
def handle_signal_connections(self):
self._connect_signals(config.get('completion', 'auto-open'))
def _connect_signals(self, connect=True):
"""Connect or disconnect the completion signals.
Args:
connect: Whether to connect (True) or disconnect (False) the
signals.
Return:
True if the signals were connected (connect=True and aren't
connected yet) - otherwise False.
"""
connections = [
(self._cmd.update_completion, self.schedule_completion_update),
(self._cmd.textChanged, self.on_text_edited),
]
if connect and not self._signals_connected:
for sender, receiver in connections:
sender.connect(receiver)
self._signals_connected = True
return True
elif not connect:
for sender, receiver in connections:
try:
sender.disconnect(receiver)
except TypeError:
# Don't fail if not connected
pass
self._signals_connected = False
return False
def _open_completion_if_needed(self):
"""If auto-open is false, temporarily connect signals.
Also opens the completion.
"""
if not config.get('completion', 'auto-open'):
connected = self._connect_signals(True)
if connected:
self.update_completion()
def _model(self):
"""Convenience method to get the current completion model."""
completion = objreg.get('completion', scope='window',
window=self._win_id)
return completion.model()
def _get_completion_model(self, completion, parts, cursor_part):
"""Get a completion model based on an enum member.
Args:
completion: A usertypes.Completion member.
parts: The parts currently in the commandline.
cursor_part: The part the cursor is in.
Return:
A completion model or None.
"""
if completion == usertypes.Completion.option:
section = parts[cursor_part - 1]
model = instances.get(completion).get(section)
elif completion == usertypes.Completion.value:
section = parts[cursor_part - 2]
option = parts[cursor_part - 1]
try:
model = instances.get(completion)[section][option]
except KeyError:
# No completion model for this section/option.
model = None
else:
model = instances.get(completion)
if model is None:
return None
else:
return sortfilter.CompletionFilterModel(source=model, parent=self)
def _filter_cmdline_parts(self, parts, cursor_part):
"""Filter a list of commandline parts to exclude flags.
Args:
parts: A list of parts.
cursor_part: The index of the part the cursor is over.
Return:
A (parts, cursor_part) tuple with the modified values.
"""
if parts == ['']:
# Empty commandline, i.e. only :.
return [''], 0
filtered_parts = []
for i, part in enumerate(parts):
if part == '--':
break
elif part.startswith('-'):
if cursor_part >= i:
cursor_part -= 1
else:
filtered_parts.append(part)
return filtered_parts, cursor_part
def _get_new_completion(self, parts, cursor_part):
"""Get a new completion.
Args:
parts: The command chunks to get a completion for.
cursor_part: The part the cursor is over currently.
Return:
A completion model.
"""
try:
if parts[cursor_part].startswith('-'):
# cursor on a flag
return
except IndexError:
pass
log.completion.debug("Before filtering flags: parts {}, cursor_part "
"{}".format(parts, cursor_part))
parts, cursor_part = self._filter_cmdline_parts(parts, cursor_part)
log.completion.debug("After filtering flags: parts {}, cursor_part "
"{}".format(parts, cursor_part))
if cursor_part == 0:
# '|' or 'set|'
model = instances.get(usertypes.Completion.command)
return sortfilter.CompletionFilterModel(source=model, parent=self)
# delegate completion to command
try:
completions = cmdutils.cmd_dict[parts[0]].completion
except KeyError:
# entering an unknown command
return None
if completions is None:
# command without any available completions
return None
dbg_completions = [c.name for c in completions]
try:
idx = cursor_part - 1
completion = completions[idx]
except IndexError:
# More arguments than completions
log.completion.debug("completions: {}".format(
', '.join(dbg_completions)))
return None
dbg_completions[idx] = '*' + dbg_completions[idx] + '*'
log.completion.debug("completions: {}".format(
', '.join(dbg_completions)))
model = self._get_completion_model(completion, parts, cursor_part)
return model
def _quote(self, s):
"""Quote s if it needs quoting for the commandline.
Note we don't use shlex.quote because that quotes a lot of shell
metachars we don't need to have quoted.
"""
if not s:
return "''"
elif any(c in s for c in ' \'\t\n\\'):
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
else:
return s
def selection_changed(self, selected, _deselected):
"""Change the completed part if a new item was selected.
Called from the views selectionChanged method.
Args:
selected: New selection.
_deselected: Previous selection.
"""
indexes = selected.indexes()
if not indexes:
return
model = self._model()
data = model.data(indexes[0])
if data is None:
return
parts = self.split()
try:
needs_quoting = cmdutils.cmd_dict[parts[0]].maxsplit is None
except KeyError:
needs_quoting = True
if needs_quoting:
data = self._quote(data)
if model.count() == 1 and config.get('completion', 'quick-complete'):
# If we only have one item, we want to apply it immediately
# and go on to the next part.
self.change_completed_part(data, immediate=True)
else:
log.completion.debug("Will ignore next completion update.")
self._ignore_change = True
self.change_completed_part(data)
@pyqtSlot()
def schedule_completion_update(self):
"""Schedule updating/enabling completion.
For performance reasons we don't want to block here, instead we do this
in the background.
"""
if (self._cmd.cursorPosition() == self._last_cursor_pos and
self._cmd.text() == self._last_text):
log.completion.debug("Ignoring update because there were no "
"changes.")
else:
log.completion.debug("Scheduling completion update.")
self._timer.start()
self._last_cursor_pos = self._cmd.cursorPosition()
self._last_text = self._cmd.text()
@pyqtSlot()
def update_completion(self):
"""Check if completions are available and activate them."""
self.update_cursor_part()
parts = self.split()
log.completion.debug(
"Updating completion - prefix {}, parts {}, cursor_part {}".format(
self._cmd.prefix(), parts, self._cursor_part))
if self._ignore_change:
log.completion.debug("Ignoring completion update because "
"ignore_change is True.")
self._ignore_change = False
return
completion = objreg.get('completion', scope='window',
window=self._win_id)
if self._cmd.prefix() != ':':
# This is a search or gibberish, so we don't need to complete
# anything (yet)
# FIXME complete searches
# https://github.com/The-Compiler/qutebrowser/issues/32
completion.hide()
return
model = self._get_new_completion(parts, self._cursor_part)
if model != self._model():
if model is None:
completion.hide()
else:
completion.set_model(model)
if model is None:
log.completion.debug("No completion model for {}.".format(parts))
return
try:
pattern = parts[self._cursor_part].strip()
except IndexError:
pattern = ''
completion.set_pattern(pattern)
log.completion.debug(
"New completion for {}: {}, with pattern '{}'".format(
parts, model.srcmodel.__class__.__name__, pattern))
if self._model().count() == 0:
completion.hide()
return
if completion.enabled:
completion.show()
def split(self, keep=False, aliases=False):
"""Get the text split up in parts.
Args:
keep: Whether to keep special chars and whitespace.
aliases: Whether to resolve aliases.
"""
text = self._cmd.text()[len(self._cmd.prefix()):]
if not text:
# When only ":" is entered, we already have one imaginary part,
# which just is empty at the moment.
return ['']
if not text.strip():
# Text is only whitespace so we treat this as a single element with
# the whitespace.
return [text]
runner = runners.CommandRunner(self._win_id)
result = runner.parse(text, fallback=True, aliases=aliases, keep=keep)
parts = result.cmdline
if self._empty_item_idx is not None:
log.completion.debug("Empty element queued at {}, "
"inserting.".format(self._empty_item_idx))
parts.insert(self._empty_item_idx, '')
#log.completion.debug("Splitting '{}' -> {}".format(text, parts))
return parts
@pyqtSlot()
def update_cursor_part(self):
"""Get the part index of the commandline where the cursor is over."""
cursor_pos = self._cmd.cursorPosition()
snippet = slice(cursor_pos - 1, cursor_pos + 1)
spaces = self._cmd.text()[snippet] == ' '
cursor_pos -= len(self._cmd.prefix())
parts = self.split(keep=True)
log.completion.vdebug(
"text: {}, parts: {}, cursor_pos after removing prefix '{}': "
"{}".format(self._cmd.text(), parts, self._cmd.prefix(),
cursor_pos))
skip = 0
for i, part in enumerate(parts):
log.completion.vdebug("Checking part {}: {!r}".format(i, parts[i]))
if not part:
skip += 1
continue
if cursor_pos <= len(part):
# foo| bar
self._cursor_part = i - skip
if spaces:
self._empty_item_idx = i - skip
else:
self._empty_item_idx = None
log.completion.vdebug("cursor_pos {} <= len(part) {}, "
"setting cursor_part {} - {} (skip), "
"empty_item_idx {}".format(
cursor_pos, len(part), i, skip,
self._empty_item_idx))
break
cursor_pos -= len(part)
log.completion.vdebug(
"Removing len({!r}) -> {} from cursor_pos -> {}".format(
part, len(part), cursor_pos))
else:
if i == 0:
# Initial `:` press without any text.
self._cursor_part = 0
else:
self._cursor_part = i - skip
if spaces:
self._empty_item_idx = i - skip
else:
self._empty_item_idx = None
log.completion.debug("cursor_part {}, spaces {}".format(
self._cursor_part, spaces))
return
def change_completed_part(self, newtext, immediate=False):
"""Change the part we're currently completing in the commandline.
Args:
text: The text to set (string).
immediate: True if the text should be completed immediately
including a trailing space and we shouldn't continue
completing the current item.
"""
parts = self.split()
log.completion.debug("changing part {} to '{}'".format(
self._cursor_part, newtext))
try:
parts[self._cursor_part] = newtext
except IndexError:
parts.append(newtext)
# We want to place the cursor directly after the part we just changed.
cursor_str = self._cmd.prefix() + ' '.join(
parts[:self._cursor_part + 1])
if immediate:
# If we should complete immediately, we want to move the cursor by
# one more char, to get to the next field.
cursor_str += ' '
text = self._cmd.prefix() + ' '.join(parts)
if immediate and self._cursor_part == len(parts) - 1:
# If we should complete immediately and we're completing the last
# part in the commandline, we automatically add a space.
text += ' '
self._cmd.setText(text)
log.completion.debug("Placing cursor after '{}'".format(cursor_str))
log.modes.debug("Completion triggered, focusing {!r}".format(self))
self._cmd.setCursorPosition(len(cursor_str))
self._cmd.setFocus()
self._cmd.show_cmd.emit()
@pyqtSlot()
def on_text_edited(self):
"""Reset _empty_item_idx if text was edited."""
self._empty_item_idx = None
# We also want to update the cursor part and emit update_completion
# here, but that's already done for us by cursorPositionChanged
# anyways, so we don't need to do it twice.
@cmdutils.register(instance='completer', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
def completion_item_prev(self):
"""Select the previous completion item."""
self._open_completion_if_needed()
self.next_prev_item.emit(True)
@cmdutils.register(instance='completer', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
def completion_item_next(self):
"""Select the next completion item."""
self._open_completion_if_needed()
self.next_prev_item.emit(False)
@cmdutils.register(instance='completion', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
def completion_item_del(self):
"""Delete the current completion item."""
completion = objreg.get('completion', scope='window',
window=self._win_id)
if not completion.currentIndex().isValid():
raise cmdexc.CommandError("No item selected!")
try:
self.model().srcmodel.delete_cur_item(completion)
except NotImplementedError:
raise cmdexc.CommandError("Cannot delete this item.")
| 1 | 14,623 | This doesn't account for `:bind --mode caret ...` (i.e. `--mode caret` instead of `--mode=caret`; both work). Do you see an easy way to make that work? | qutebrowser-qutebrowser | py |
@@ -41,7 +41,8 @@ var (
errorPriority = map[reflect.Type]int{
reflect.TypeOf(&p.ShardOwnershipLostError{}): 0,
reflect.TypeOf(&p.CurrentWorkflowConditionFailedError{}): 1,
- reflect.TypeOf(&p.ConditionFailedError{}): 2,
+ reflect.TypeOf(&p.WorkflowConditionFailedError{}): 2,
+ reflect.TypeOf(&p.ConditionFailedError{}): 3,
}
errorDefaultPriority = math.MaxInt64 | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"encoding/json"
"fmt"
"math"
"reflect"
"sort"
persistencespb "go.temporal.io/server/api/persistence/v1"
p "go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/nosql/nosqlplugin/cassandra/gocql"
"go.temporal.io/server/common/persistence/serialization"
)
var (
errorPriority = map[reflect.Type]int{
reflect.TypeOf(&p.ShardOwnershipLostError{}): 0,
reflect.TypeOf(&p.CurrentWorkflowConditionFailedError{}): 1,
reflect.TypeOf(&p.ConditionFailedError{}): 2,
}
errorDefaultPriority = math.MaxInt64
)
type (
executionCASCondition struct {
runID string
dbVersion int64
nextEventID int64 // TODO deprecate this variable once DB version comparison is the default
}
)
func convertErrors(
record map[string]interface{},
iter gocql.Iter,
requestShardID int32,
requestRangeID int64,
requestCurrentRunID string,
requestExecutionCASConditions []executionCASCondition,
) error {
records := []map[string]interface{}{record}
errors := extractErrors(
record,
requestShardID,
requestRangeID,
requestCurrentRunID,
requestExecutionCASConditions,
)
record = make(map[string]interface{})
for iter.MapScan(record) {
records = append(records, record)
errors = append(errors, extractErrors(
record,
requestShardID,
requestRangeID,
requestCurrentRunID,
requestExecutionCASConditions,
)...)
record = make(map[string]interface{})
}
errors = sortErrors(errors)
if len(errors) == 0 {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("Encounter unknown error: shard ID: %v, range ID: %v, error: %v",
requestShardID,
requestRangeID,
printRecords(records),
),
}
}
return errors[0]
}
func extractErrors(
record map[string]interface{},
requestShardID int32,
requestRangeID int64,
requestCurrentRunID string,
requestExecutionCASConditions []executionCASCondition,
) []error {
var errors []error
if err := extractShardOwnershipLostError(
record,
requestShardID,
requestRangeID,
); err != nil {
errors = append(errors, err)
}
if err := extractCurrentWorkflowConflictError(
record,
requestCurrentRunID,
); err != nil {
errors = append(errors, err)
}
for _, condition := range requestExecutionCASConditions {
if err := extractWorkflowVersionConflictError(
record,
condition.runID,
condition.dbVersion,
condition.nextEventID,
); err != nil {
errors = append(errors, err)
}
}
return errors
}
func sortErrors(
errors []error,
) []error {
sort.Slice(errors, func(i int, j int) bool {
leftPriority, ok := errorPriority[reflect.TypeOf(errors[i])]
if !ok {
leftPriority = errorDefaultPriority
}
rightPriority, ok := errorPriority[reflect.TypeOf(errors[j])]
if !ok {
rightPriority = errorDefaultPriority
}
return leftPriority < rightPriority
})
return errors
}
func extractShardOwnershipLostError(
record map[string]interface{},
requestShardID int32,
requestRangeID int64,
) error {
rowType, ok := record["type"].(int)
if !ok {
// this case should not happen, maybe panic?
return nil
}
if rowType != rowTypeShard {
return nil
}
actualRangeID := record["range_id"].(int64)
if actualRangeID != requestRangeID {
return &p.ShardOwnershipLostError{
ShardID: requestShardID,
Msg: fmt.Sprintf("Encounter shard ownership lost, request range ID: %v, actual range ID: %v",
requestRangeID,
actualRangeID,
),
}
}
return nil
}
func extractCurrentWorkflowConflictError(
record map[string]interface{},
requestCurrentRunID string,
) error {
rowType, ok := record["type"].(int)
if !ok {
// this case should not happen, maybe panic?
return nil
}
if rowType != rowTypeExecution {
return nil
}
if runID := gocql.UUIDToString(record["run_id"]); runID != permanentRunID {
return nil
}
actualCurrentRunID := gocql.UUIDToString(record["current_run_id"])
if actualCurrentRunID != requestCurrentRunID {
binary, _ := record["execution_state"].([]byte)
encoding, _ := record["execution_state_encoding"].(string)
executionState := &persistencespb.WorkflowExecutionState{}
if state, err := serialization.WorkflowExecutionStateFromBlob(
binary,
encoding,
); err == nil {
executionState = state
}
// if err != nil, this means execution state cannot be parsed, just use default values
lastWriteVersion, _ := record["workflow_last_write_version"].(int64)
// TODO maybe assert actualCurrentRunID == executionState.RunId ?
return &p.CurrentWorkflowConditionFailedError{
Msg: fmt.Sprintf("Encounter concurrent workflow error, request run ID: %v, actual run ID: %v",
requestCurrentRunID,
actualCurrentRunID,
),
RequestID: executionState.CreateRequestId,
RunID: executionState.RunId,
State: executionState.State,
LastWriteVersion: lastWriteVersion,
}
}
return nil
}
func extractWorkflowVersionConflictError(
record map[string]interface{},
requestRunID string,
requestDBVersion int64,
requestNextEventID int64, // TODO deprecate this variable once DB version comparison is the default
) error {
rowType, ok := record["type"].(int)
if !ok {
// this case should not happen, maybe panic?
return nil
}
if rowType != rowTypeExecution {
return nil
}
if runID := gocql.UUIDToString(record["run_id"]); runID != requestRunID {
return nil
}
actualNextEventID, _ := record["next_event_id"].(int64)
actualDBVersion, _ := record["db_version"].(int64)
// TODO remove this block once DB version comparison is the default
if requestDBVersion == 0 {
if actualNextEventID != requestNextEventID {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("Encounter workflow next event ID mismatch, request next event ID: %v, actual next event ID: %v",
requestNextEventID,
actualNextEventID,
),
}
}
return nil
}
if actualDBVersion != requestDBVersion {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("Encounter workflow db version mismatch, request db version ID: %v, actual db version ID: %v",
requestDBVersion,
actualDBVersion,
),
}
}
return nil
}
func printRecords(
records []map[string]interface{},
) string {
binary, _ := json.MarshalIndent(records, "", " ")
return string(binary)
}
| 1 | 12,170 | Is this map used for durable state anywhere? If so we probably want to keep ConditionFailedError as 2 | temporalio-temporal | go |
@@ -25,6 +25,7 @@ type roundTripperOpts struct {
}
var dialAddr = quic.DialAddr
+var methodHEAD = "HEAD"
// client is a HTTP2 client doing QUIC requests
type client struct { | 1 | package h2quic
import (
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"strings"
"sync"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
quic "github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/qerr"
)
type roundTripperOpts struct {
DisableCompression bool
}
var dialAddr = quic.DialAddr
// client is a HTTP2 client doing QUIC requests
type client struct {
mutex sync.RWMutex
tlsConf *tls.Config
config *quic.Config
opts *roundTripperOpts
hostname string
handshakeErr error
dialOnce sync.Once
session quic.Session
headerStream quic.Stream
headerErr *qerr.QuicError
headerErrored chan struct{} // this channel is closed if an error occurs on the header stream
requestWriter *requestWriter
responses map[protocol.StreamID]chan *http.Response
}
var _ http.RoundTripper = &client{}
var defaultQuicConfig = &quic.Config{
RequestConnectionIDOmission: true,
KeepAlive: true,
}
// newClient creates a new client
func newClient(
hostname string,
tlsConfig *tls.Config,
opts *roundTripperOpts,
quicConfig *quic.Config,
) *client {
config := defaultQuicConfig
if quicConfig != nil {
config = quicConfig
}
return &client{
hostname: authorityAddr("https", hostname),
responses: make(map[protocol.StreamID]chan *http.Response),
tlsConf: tlsConfig,
config: config,
opts: opts,
headerErrored: make(chan struct{}),
}
}
// dial dials the connection
func (c *client) dial() error {
var err error
c.session, err = dialAddr(c.hostname, c.tlsConf, c.config)
if err != nil {
return err
}
// once the version has been negotiated, open the header stream
c.headerStream, err = c.session.OpenStream()
if err != nil {
return err
}
c.requestWriter = newRequestWriter(c.headerStream)
go c.handleHeaderStream()
return nil
}
func (c *client) handleHeaderStream() {
decoder := hpack.NewDecoder(4096, func(hf hpack.HeaderField) {})
h2framer := http2.NewFramer(nil, c.headerStream)
var err error
for err == nil {
err = c.readResponse(h2framer, decoder)
}
utils.Debugf("Error handling header stream: %s", err)
c.headerErr = qerr.Error(qerr.InvalidHeadersStreamData, err.Error())
// stop all running request
close(c.headerErrored)
}
func (c *client) readResponse(h2framer *http2.Framer, decoder *hpack.Decoder) error {
frame, err := h2framer.ReadFrame()
if err != nil {
return err
}
hframe, ok := frame.(*http2.HeadersFrame)
if !ok {
return errors.New("not a headers frame")
}
mhframe := &http2.MetaHeadersFrame{HeadersFrame: hframe}
mhframe.Fields, err = decoder.DecodeFull(hframe.HeaderBlockFragment())
if err != nil {
return fmt.Errorf("cannot read header fields: %s", err.Error())
}
c.mutex.RLock()
responseChan, ok := c.responses[protocol.StreamID(hframe.StreamID)]
c.mutex.RUnlock()
if !ok {
return fmt.Errorf("response channel for stream %d not found", hframe.StreamID)
}
rsp, err := responseFromHeaders(mhframe)
if err != nil {
return err
}
responseChan <- rsp
return nil
}
// Roundtrip executes a request and returns a response
func (c *client) RoundTrip(req *http.Request) (*http.Response, error) {
// TODO: add port to address, if it doesn't have one
if req.URL.Scheme != "https" {
return nil, errors.New("quic http2: unsupported scheme")
}
if authorityAddr("https", hostnameFromRequest(req)) != c.hostname {
return nil, fmt.Errorf("h2quic Client BUG: RoundTrip called for the wrong client (expected %s, got %s)", c.hostname, req.Host)
}
c.dialOnce.Do(func() {
c.handshakeErr = c.dial()
})
if c.handshakeErr != nil {
return nil, c.handshakeErr
}
hasBody := (req.Body != nil)
responseChan := make(chan *http.Response)
dataStream, err := c.session.OpenStreamSync()
if err != nil {
_ = c.CloseWithError(err)
return nil, err
}
c.mutex.Lock()
c.responses[dataStream.StreamID()] = responseChan
c.mutex.Unlock()
var requestedGzip bool
if !c.opts.DisableCompression && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && req.Method != "HEAD" {
requestedGzip = true
}
// TODO: add support for trailers
endStream := !hasBody
err = c.requestWriter.WriteRequest(req, dataStream.StreamID(), endStream, requestedGzip)
if err != nil {
_ = c.CloseWithError(err)
return nil, err
}
resc := make(chan error, 1)
if hasBody {
go func() {
resc <- c.writeRequestBody(dataStream, req.Body)
}()
}
var res *http.Response
var receivedResponse bool
var bodySent bool
if !hasBody {
bodySent = true
}
for !(bodySent && receivedResponse) {
select {
case res = <-responseChan:
receivedResponse = true
c.mutex.Lock()
delete(c.responses, dataStream.StreamID())
c.mutex.Unlock()
case err := <-resc:
bodySent = true
if err != nil {
return nil, err
}
case <-c.headerErrored:
// an error occured on the header stream
_ = c.CloseWithError(c.headerErr)
return nil, c.headerErr
}
}
// TODO: correctly set this variable
var streamEnded bool
isHead := (req.Method == "HEAD")
res = setLength(res, isHead, streamEnded)
if streamEnded || isHead {
res.Body = noBody
} else {
res.Body = dataStream
if requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
res.Header.Del("Content-Encoding")
res.Header.Del("Content-Length")
res.ContentLength = -1
res.Body = &gzipReader{body: res.Body}
res.Uncompressed = true
}
}
res.Request = req
return res, nil
}
func (c *client) writeRequestBody(dataStream quic.Stream, body io.ReadCloser) (err error) {
defer func() {
cerr := body.Close()
if err == nil {
// TODO: what to do with dataStream here? Maybe reset it?
err = cerr
}
}()
_, err = io.Copy(dataStream, body)
if err != nil {
// TODO: what to do with dataStream here? Maybe reset it?
return err
}
return dataStream.Close()
}
// Close closes the client
func (c *client) CloseWithError(e error) error {
if c.session == nil {
return nil
}
return c.session.Close(e)
}
func (c *client) Close() error {
return c.CloseWithError(nil)
}
// copied from net/transport.go
// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
// and returns a host:port. The port 443 is added if needed.
func authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port
port = "443"
if scheme == "http" {
port = "80"
}
host = authority
}
if a, err := idna.ToASCII(host); err == nil {
host = a
}
// IPv6 address literal, without a port:
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
return host + ":" + port
}
return net.JoinHostPort(host, port)
}
| 1 | 6,408 | You can use `http.MethodHead` here. | lucas-clemente-quic-go | go |
@@ -0,0 +1,5 @@
+package org.openqa.selenium.grid.session.remote;
+
+public class SessionTest {
+
+} | 1 | 1 | 16,858 | And another empty test. Please remove until there's content. | SeleniumHQ-selenium | py |
|
@@ -2,9 +2,9 @@ define(["localassetmanager"], function(localassetmanager) {
"use strict";
function processDownloadStatus(apiClient, serverInfo, options) {
- return console.log("[mediasync] Begin processDownloadStatus"), localassetmanager.resyncTransfers().then(function() {
+ return console.debug("mediasync: begin processDownloadStatus"), localassetmanager.resyncTransfers().then(function() {
return localassetmanager.getServerItems(serverInfo.Id).then(function(items) {
- console.log("[mediasync] Begin processDownloadStatus getServerItems completed");
+ console.debug("mediasync: begin processDownloadStatus getServerItems completed");
var p = Promise.resolve(),
cnt = 0;
return items.filter(function(item) { | 1 | define(["localassetmanager"], function(localassetmanager) {
"use strict";
function processDownloadStatus(apiClient, serverInfo, options) {
return console.log("[mediasync] Begin processDownloadStatus"), localassetmanager.resyncTransfers().then(function() {
return localassetmanager.getServerItems(serverInfo.Id).then(function(items) {
console.log("[mediasync] Begin processDownloadStatus getServerItems completed");
var p = Promise.resolve(),
cnt = 0;
return items.filter(function(item) {
return "transferring" === item.SyncStatus || "queued" === item.SyncStatus
}).forEach(function(item) {
p = p.then(function() {
return reportTransfer(apiClient, item)
}), cnt++
}), p.then(function() {
return console.log("[mediasync] Exit processDownloadStatus. Items reported: " + cnt.toString()), Promise.resolve()
})
})
})
}
function reportTransfer(apiClient, item) {
return localassetmanager.getItemFileSize(item.LocalPath).then(function(size) {
return size > 0 ? apiClient.reportSyncJobItemTransferred(item.SyncJobItemId).then(function() {
return item.SyncStatus = "synced", console.log("[mediasync] reportSyncJobItemTransferred called for " + item.LocalPath), localassetmanager.addOrUpdateLocalItem(item)
}, function(error) {
return console.error("[mediasync] Mediasync error on reportSyncJobItemTransferred", error), item.SyncStatus = "error", localassetmanager.addOrUpdateLocalItem(item)
}) : localassetmanager.isDownloadFileInQueue(item.LocalPath).then(function(result) {
return result ? Promise.resolve() : (console.log("[mediasync] reportTransfer: Size is 0 and download no longer in queue. Deleting item."), localassetmanager.removeLocalItem(item).then(function() {
return console.log("[mediasync] reportTransfer: Item deleted."), Promise.resolve()
}, function(err2) {
return console.log("[mediasync] reportTransfer: Failed to delete item.", err2), Promise.resolve()
}))
})
}, function(error) {
return console.error("[mediasync] reportTransfer: error on getItemFileSize. Deleting item.", error), localassetmanager.removeLocalItem(item).then(function() {
return console.log("[mediasync] reportTransfer: Item deleted."), Promise.resolve()
}, function(err2) {
return console.log("[mediasync] reportTransfer: Failed to delete item.", err2), Promise.resolve()
})
})
}
function reportOfflineActions(apiClient, serverInfo) {
return console.log("[mediasync] Begin reportOfflineActions"), localassetmanager.getUserActions(serverInfo.Id).then(function(actions) {
return actions.length ? apiClient.reportOfflineActions(actions).then(function() {
return localassetmanager.deleteUserActions(actions).then(function() {
return console.log("[mediasync] Exit reportOfflineActions (actions reported and deleted.)"), Promise.resolve()
})
}, function(err) {
return console.error("[mediasync] error on apiClient.reportOfflineActions: " + err.toString()), localassetmanager.deleteUserActions(actions)
}) : (console.log("[mediasync] Exit reportOfflineActions (no actions)"), Promise.resolve())
})
}
function syncData(apiClient, serverInfo) {
return console.log("[mediasync] Begin syncData"), localassetmanager.getServerItems(serverInfo.Id).then(function(items) {
var completedItems = items.filter(function(item) {
return item && ("synced" === item.SyncStatus || "error" === item.SyncStatus)
}),
request = {
TargetId: apiClient.deviceId(),
LocalItemIds: completedItems.map(function(xitem) {
return xitem.ItemId
})
};
return apiClient.syncData(request).then(function(result) {
return afterSyncData(apiClient, serverInfo, result).then(function() {
return console.log("[mediasync] Exit syncData"), Promise.resolve()
}, function(err) {
return console.error("[mediasync] Error in syncData: " + err.toString()), Promise.resolve()
})
})
})
}
function afterSyncData(apiClient, serverInfo, syncDataResult) {
console.log("[mediasync] Begin afterSyncData");
var p = Promise.resolve();
return syncDataResult.ItemIdsToRemove && syncDataResult.ItemIdsToRemove.length > 0 && syncDataResult.ItemIdsToRemove.forEach(function(itemId) {
p = p.then(function() {
return removeLocalItem(itemId, serverInfo.Id)
})
}), p = p.then(function() {
return removeObsoleteContainerItems(serverInfo.Id)
}), p.then(function() {
return console.log("[mediasync] Exit afterSyncData"), Promise.resolve()
})
}
function removeObsoleteContainerItems(serverId) {
return console.log("[mediasync] Begin removeObsoleteContainerItems"), localassetmanager.removeObsoleteContainerItems(serverId)
}
function removeLocalItem(itemId, serverId) {
return console.log("[mediasync] Begin removeLocalItem"), localassetmanager.getLocalItem(serverId, itemId).then(function(item) {
return item ? localassetmanager.removeLocalItem(item) : Promise.resolve()
}, function(err2) {
return console.error("[mediasync] removeLocalItem: Failed: ", err2), Promise.resolve()
})
}
function getNewMedia(apiClient, downloadCount) {
return console.log("[mediasync] Begin getNewMedia"), apiClient.getReadySyncItems(apiClient.deviceId()).then(function(jobItems) {
console.log("[mediasync] getReadySyncItems returned " + jobItems.length + " items");
var p = Promise.resolve(),
currentCount = downloadCount;
return jobItems.forEach(function(jobItem) {
currentCount++ <= 10 && (p = p.then(function() {
return getNewItem(jobItem, apiClient)
}))
}), p.then(function() {
return console.log("[mediasync] Exit getNewMedia"), Promise.resolve()
})
}, function(err) {
return console.error("[mediasync] getReadySyncItems: Failed: ", err), Promise.resolve()
})
}
function afterMediaDownloaded(apiClient, jobItem, localItem) {
return console.log("[mediasync] Begin afterMediaDownloaded"), getImages(apiClient, jobItem, localItem).then(function() {
var libraryItem = jobItem.Item;
return downloadParentItems(apiClient, jobItem, libraryItem).then(function() {
return getSubtitles(apiClient, jobItem, localItem)
})
})
}
function createLocalItem(libraryItem, jobItem) {
console.log("[localassetmanager] Begin createLocalItem");
var item = {
Item: libraryItem,
ItemId: libraryItem.Id,
ServerId: libraryItem.ServerId,
Id: libraryItem.Id
};
return jobItem && (item.SyncJobItemId = jobItem.SyncJobItemId), console.log("[localassetmanager] End createLocalItem"), item
}
function getNewItem(jobItem, apiClient) {
console.log("[mediasync] Begin getNewItem");
var libraryItem = jobItem.Item;
return localassetmanager.getLocalItem(libraryItem.ServerId, libraryItem.Id).then(function(existingItem) {
if (existingItem && ("queued" === existingItem.SyncStatus || "transferring" === existingItem.SyncStatus || "synced" === existingItem.SyncStatus) && (console.log("[mediasync] getNewItem: getLocalItem found existing item"), localassetmanager.enableBackgroundCompletion())) return Promise.resolve();
libraryItem.CanDelete = !1, libraryItem.CanDownload = !1, libraryItem.SupportsSync = !1, libraryItem.People = [], libraryItem.Chapters = [], libraryItem.Studios = [], libraryItem.SpecialFeatureCount = null, libraryItem.LocalTrailerCount = null, libraryItem.RemoteTrailers = [];
var localItem = createLocalItem(libraryItem, jobItem);
return localItem.SyncStatus = "queued", downloadMedia(apiClient, jobItem, localItem)
})
}
function downloadParentItems(apiClient, jobItem, libraryItem) {
var p = Promise.resolve();
return libraryItem.SeriesId && (p = p.then(function() {
return downloadItem(apiClient, libraryItem.SeriesId)
})), libraryItem.SeasonId && (p = p.then(function() {
return downloadItem(apiClient, libraryItem.SeasonId).then(function(seasonItem) {
return libraryItem.SeasonPrimaryImageTag = (seasonItem.Item.ImageTags || {}).Primary, Promise.resolve()
})
})), libraryItem.AlbumId && (p = p.then(function() {
return downloadItem(apiClient, libraryItem.AlbumId)
})), p
}
function downloadItem(apiClient, itemId) {
return apiClient.getItem(apiClient.getCurrentUserId(), itemId).then(function(downloadedItem) {
downloadedItem.CanDelete = !1, downloadedItem.CanDownload = !1, downloadedItem.SupportsSync = !1, downloadedItem.People = [], downloadedItem.SpecialFeatureCount = null, downloadedItem.BackdropImageTags = null, downloadedItem.ParentBackdropImageTags = null, downloadedItem.ParentArtImageTag = null, downloadedItem.ParentLogoImageTag = null;
var localItem = createLocalItem(downloadedItem, null);
return localassetmanager.addOrUpdateLocalItem(localItem).then(function() {
return Promise.resolve(localItem)
}, function(err) {
return console.error("[mediasync] downloadItem failed: " + err.toString()), Promise.resolve(null)
})
})
}
function ensureLocalPathParts(localItem, jobItem) {
if (!localItem.LocalPathParts) {
var libraryItem = localItem.Item,
parts = localassetmanager.getDirectoryPath(libraryItem);
parts.push(localassetmanager.getLocalFileName(libraryItem, jobItem.OriginalFileName)), localItem.LocalPathParts = parts
}
}
function downloadMedia(apiClient, jobItem, localItem) {
console.log("[mediasync] downloadMedia: start.");
var url = apiClient.getUrl("Sync/JobItems/" + jobItem.SyncJobItemId + "/File", {
api_key: apiClient.accessToken()
});
return ensureLocalPathParts(localItem, jobItem), localassetmanager.downloadFile(url, localItem).then(function(result) {
console.log("[mediasync] downloadMedia-downloadFile returned path: " + result.path);
var localPath = result.path,
libraryItem = localItem.Item;
if (localPath && libraryItem.MediaSources)
for (var i = 0; i < libraryItem.MediaSources.length; i++) {
var mediaSource = libraryItem.MediaSources[i];
mediaSource.Path = localPath, mediaSource.Protocol = "File"
}
return localItem.LocalPath = localPath, localItem.SyncStatus = "transferring", localassetmanager.addOrUpdateLocalItem(localItem).then(function() {
return afterMediaDownloaded(apiClient, jobItem, localItem).then(function() {
return result.isComplete ? (localItem.SyncStatus = "synced", reportTransfer(apiClient, localItem)) : Promise.resolve()
}, function(err) {
return console.log("[mediasync] downloadMedia: afterMediaDownloaded failed: " + err), Promise.reject(err)
})
}, function(err) {
return console.log("[mediasync] downloadMedia: addOrUpdateLocalItem failed: " + err), Promise.reject(err)
})
}, function(err) {
return console.log("[mediasync] downloadMedia: localassetmanager.downloadFile failed: " + err), Promise.reject(err)
})
}
function getImages(apiClient, jobItem, localItem) {
console.log("[mediasync] Begin getImages");
var p = Promise.resolve(),
libraryItem = localItem.Item,
serverId = libraryItem.ServerId,
mainImageTag = (libraryItem.ImageTags || {}).Primary;
libraryItem.Id && mainImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.Id, mainImageTag, "Primary")
}));
var logoImageTag = (libraryItem.ImageTags || {}).Logo;
libraryItem.Id && logoImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.Id, logoImageTag, "Logo")
}));
var artImageTag = (libraryItem.ImageTags || {}).Art;
libraryItem.Id && artImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.Id, artImageTag, "Art")
}));
var bannerImageTag = (libraryItem.ImageTags || {}).Banner;
libraryItem.Id && bannerImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.Id, bannerImageTag, "Banner")
}));
var thumbImageTag = (libraryItem.ImageTags || {}).Thumb;
if (libraryItem.Id && thumbImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.Id, thumbImageTag, "Thumb")
})), libraryItem.Id && libraryItem.BackdropImageTags)
for (var i = 0; i < libraryItem.BackdropImageTags.length; i++);
return libraryItem.SeriesId && libraryItem.SeriesPrimaryImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.SeriesId, libraryItem.SeriesPrimaryImageTag, "Primary")
})), libraryItem.SeriesId && libraryItem.SeriesThumbImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.SeriesId, libraryItem.SeriesThumbImageTag, "Thumb")
})), libraryItem.SeasonId && libraryItem.SeasonPrimaryImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.SeasonId, libraryItem.SeasonPrimaryImageTag, "Primary")
})), libraryItem.AlbumId && libraryItem.AlbumPrimaryImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.AlbumId, libraryItem.AlbumPrimaryImageTag, "Primary")
})), libraryItem.ParentThumbItemId && libraryItem.ParentThumbImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.ParentThumbItemId, libraryItem.ParentThumbImageTag, "Thumb")
})), libraryItem.ParentPrimaryImageItemId && libraryItem.ParentPrimaryImageTag && (p = p.then(function() {
return downloadImage(localItem, apiClient, serverId, libraryItem.ParentPrimaryImageItemId, libraryItem.ParentPrimaryImageTag, "Primary")
})), p.then(function() {
return console.log("[mediasync] Finished getImages"), localassetmanager.addOrUpdateLocalItem(localItem)
}, function(err) {
return console.log("[mediasync] Error getImages: " + err.toString()), Promise.resolve()
})
}
function downloadImage(localItem, apiClient, serverId, itemId, imageTag, imageType, index) {
return index = index || 0, localassetmanager.hasImage(serverId, itemId, imageType, index).then(function(hasImage) {
if (hasImage) return console.log("[mediasync] downloadImage - skip existing: " + itemId + " " + imageType + "_" + index.toString()), Promise.resolve();
var maxWidth = 400;
"backdrop" === imageType && (maxWidth = null);
var imageUrl = apiClient.getScaledImageUrl(itemId, {
tag: imageTag,
type: imageType,
maxWidth: maxWidth,
api_key: apiClient.accessToken()
});
return console.log("[mediasync] downloadImage " + itemId + " " + imageType + "_" + index.toString()), localassetmanager.downloadImage(localItem, imageUrl, serverId, itemId, imageType, index).then(function(result) {
return Promise.resolve(result)
}, function(err) {
return console.log("[mediasync] Error downloadImage: " + err.toString()), Promise.resolve()
})
}, function(err) {
return console.log("[mediasync] Error downloadImage: " + err.toString()), Promise.resolve()
})
}
function getSubtitles(apiClient, jobItem, localItem) {
if (console.log("[mediasync] Begin getSubtitles"), !jobItem.Item.MediaSources.length) return console.log("[mediasync] Cannot download subtitles because video has no media source info."), Promise.resolve();
var files = jobItem.AdditionalFiles.filter(function(f) {
return "Subtitles" === f.Type
}),
mediaSource = jobItem.Item.MediaSources[0],
p = Promise.resolve();
return files.forEach(function(file) {
p = p.then(function() {
return getItemSubtitle(file, apiClient, jobItem, localItem, mediaSource)
})
}), p.then(function() {
return console.log("[mediasync] Exit getSubtitles"), Promise.resolve()
})
}
function getItemSubtitle(file, apiClient, jobItem, localItem, mediaSource) {
console.log("[mediasync] Begin getItemSubtitle");
var subtitleStream = mediaSource.MediaStreams.filter(function(m) {
return "Subtitle" === m.Type && m.Index === file.Index
})[0];
if (!subtitleStream) return console.log("[mediasync] Cannot download subtitles because matching stream info was not found."), Promise.resolve();
var url = apiClient.getUrl("Sync/JobItems/" + jobItem.SyncJobItemId + "/AdditionalFiles", {
Name: file.Name,
api_key: apiClient.accessToken()
}),
fileName = localassetmanager.getSubtitleSaveFileName(localItem, jobItem.OriginalFileName, subtitleStream.Language, subtitleStream.IsForced, subtitleStream.Codec);
return localassetmanager.downloadSubtitles(url, fileName).then(function(subtitleResult) {
return localItem.AdditionalFiles && localItem.AdditionalFiles.forEach(function(item) {
item.Name === file.Name && (item.Path = subtitleResult.path)
}), subtitleStream.Path = subtitleResult.path, subtitleStream.DeliveryMethod = "External", localassetmanager.addOrUpdateLocalItem(localItem)
})
}
function checkLocalFileExistence(apiClient, serverInfo, options) {
return options.checkFileExistence ? (console.log("[mediasync] Begin checkLocalFileExistence"), localassetmanager.getServerItems(serverInfo.Id).then(function(items) {
var completedItems = items.filter(function(item) {
return item && ("synced" === item.SyncStatus || "error" === item.SyncStatus)
}),
p = Promise.resolve();
return completedItems.forEach(function(completedItem) {
p = p.then(function() {
return localassetmanager.fileExists(completedItem.LocalPath).then(function(exists) {
return exists ? Promise.resolve() : localassetmanager.removeLocalItem(completedItem).then(function() {
return Promise.resolve()
}, function() {
return Promise.resolve()
})
})
})
}), p
})) : Promise.resolve()
}
return function() {
var self = this;
"string" == typeof webWorkerBaseUrl && -1 !== webWorkerBaseUrl.indexOf("ms-appx://") ? self.sync = function(apiClient, serverInfo, options) {
return console.log("[mediasync]************************************* Start sync"), checkLocalFileExistence(apiClient, serverInfo, options).then(function() {
return processDownloadStatus(apiClient, serverInfo, options).then(function() {
return localassetmanager.getDownloadItemCount().then(function(downloadCount) {
return !0 === options.syncCheckProgressOnly && downloadCount > 2 ? Promise.resolve() : reportOfflineActions(apiClient, serverInfo).then(function() {
return getNewMedia(apiClient, downloadCount).then(function() {
return syncData(apiClient, serverInfo).then(function() {
return console.log("[mediasync]************************************* Exit sync"), Promise.resolve()
})
})
})
})
})
}, function(err) {
console.error(err.toString())
})
} : self.sync = function(apiClient, serverInfo, options) {
return console.log("[mediasync]************************************* Start sync"), checkLocalFileExistence(apiClient, serverInfo, options).then(function() {
return syncData(apiClient, serverInfo).then(function() {
return processDownloadStatus(apiClient, serverInfo, options).then(function() {
return localassetmanager.getDownloadItemCount().then(function(downloadCount) {
return !0 === options.syncCheckProgressOnly && downloadCount > 2 ? Promise.resolve() : reportOfflineActions(apiClient, serverInfo).then(function() {
return getNewMedia(apiClient, downloadCount).then(function() {
return syncData(apiClient, serverInfo)
})
})
})
})
})
}, function(err) {
console.error(err.toString())
})
}
}
}); | 1 | 13,293 | You really didn't need to do any of these, we need to remove the API client from source eventually anyway. | jellyfin-jellyfin-web | js |
@@ -58,6 +58,9 @@ import org.tikv.kvproto.Pdpb.TsoResponse;
public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
implements ReadOnlyPDClient {
+
+ private TiSession session;
+
private RequestHeader header;
private TsoRequest tsoReq;
private volatile LeaderWrapper leaderWrapper; | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.pingcap.tikv.operation.PDErrorHandler.getRegionResponseErrorExtractor;
import static com.pingcap.tikv.pd.PDError.buildFromPdpbError;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.protobuf.ByteString;
import com.pingcap.tikv.codec.Codec.BytesCodec;
import com.pingcap.tikv.codec.CodecDataOutput;
import com.pingcap.tikv.exception.GrpcException;
import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.meta.TiTimestamp;
import com.pingcap.tikv.operation.PDErrorHandler;
import com.pingcap.tikv.pd.PDUtils;
import com.pingcap.tikv.region.TiRegion;
import com.pingcap.tikv.util.BackOffer;
import com.pingcap.tikv.util.FutureObserver;
import io.grpc.ManagedChannel;
import java.net.URI;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.tikv.kvproto.Metapb.Store;
import org.tikv.kvproto.PDGrpc;
import org.tikv.kvproto.PDGrpc.PDBlockingStub;
import org.tikv.kvproto.PDGrpc.PDStub;
import org.tikv.kvproto.Pdpb.GetMembersRequest;
import org.tikv.kvproto.Pdpb.GetMembersResponse;
import org.tikv.kvproto.Pdpb.GetRegionByIDRequest;
import org.tikv.kvproto.Pdpb.GetRegionRequest;
import org.tikv.kvproto.Pdpb.GetRegionResponse;
import org.tikv.kvproto.Pdpb.GetStoreRequest;
import org.tikv.kvproto.Pdpb.GetStoreResponse;
import org.tikv.kvproto.Pdpb.RequestHeader;
import org.tikv.kvproto.Pdpb.Timestamp;
import org.tikv.kvproto.Pdpb.TsoRequest;
import org.tikv.kvproto.Pdpb.TsoResponse;
public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
implements ReadOnlyPDClient {
private RequestHeader header;
private TsoRequest tsoReq;
private volatile LeaderWrapper leaderWrapper;
private ScheduledExecutorService service;
private List<URI> pdAddrs;
@Override
public TiTimestamp getTimestamp(BackOffer backOffer) {
Supplier<TsoRequest> request = () -> tsoReq;
PDErrorHandler<TsoResponse> handler =
new PDErrorHandler<>(
r -> r.getHeader().hasError() ? buildFromPdpbError(r.getHeader().getError()) : null,
this);
TsoResponse resp = callWithRetry(backOffer, PDGrpc.METHOD_TSO, request, handler);
Timestamp timestamp = resp.getTimestamp();
return new TiTimestamp(timestamp.getPhysical(), timestamp.getLogical());
}
@Override
public TiRegion getRegionByKey(BackOffer backOffer, ByteString key) {
CodecDataOutput cdo = new CodecDataOutput();
BytesCodec.writeBytes(cdo, key.toByteArray());
ByteString encodedKey = cdo.toByteString();
Supplier<GetRegionRequest> request =
() -> GetRegionRequest.newBuilder().setHeader(header).setRegionKey(encodedKey).build();
PDErrorHandler<GetRegionResponse> handler =
new PDErrorHandler<>(getRegionResponseErrorExtractor, this);
GetRegionResponse resp = callWithRetry(backOffer, PDGrpc.METHOD_GET_REGION, request, handler);
return new TiRegion(
resp.getRegion(), resp.getLeader(), conf.getIsolationLevel(), conf.getCommandPriority());
}
@Override
public Future<TiRegion> getRegionByKeyAsync(BackOffer backOffer, ByteString key) {
FutureObserver<TiRegion, GetRegionResponse> responseObserver =
new FutureObserver<>(
resp ->
new TiRegion(
resp.getRegion(),
resp.getLeader(),
conf.getIsolationLevel(),
conf.getCommandPriority()));
Supplier<GetRegionRequest> request =
() -> GetRegionRequest.newBuilder().setHeader(header).setRegionKey(key).build();
PDErrorHandler<GetRegionResponse> handler =
new PDErrorHandler<>(getRegionResponseErrorExtractor, this);
callAsyncWithRetry(backOffer, PDGrpc.METHOD_GET_REGION, request, responseObserver, handler);
return responseObserver.getFuture();
}
@Override
public TiRegion getRegionByID(BackOffer backOffer, long id) {
Supplier<GetRegionByIDRequest> request =
() -> GetRegionByIDRequest.newBuilder().setHeader(header).setRegionId(id).build();
PDErrorHandler<GetRegionResponse> handler =
new PDErrorHandler<>(getRegionResponseErrorExtractor, this);
GetRegionResponse resp =
callWithRetry(backOffer, PDGrpc.METHOD_GET_REGION_BY_ID, request, handler);
// Instead of using default leader instance, explicitly set no leader to null
return new TiRegion(
resp.getRegion(), resp.getLeader(), conf.getIsolationLevel(), conf.getCommandPriority());
}
@Override
public Future<TiRegion> getRegionByIDAsync(BackOffer backOffer, long id) {
FutureObserver<TiRegion, GetRegionResponse> responseObserver =
new FutureObserver<>(
resp ->
new TiRegion(
resp.getRegion(),
resp.getLeader(),
conf.getIsolationLevel(),
conf.getCommandPriority()));
Supplier<GetRegionByIDRequest> request =
() -> GetRegionByIDRequest.newBuilder().setHeader(header).setRegionId(id).build();
PDErrorHandler<GetRegionResponse> handler =
new PDErrorHandler<>(getRegionResponseErrorExtractor, this);
callAsyncWithRetry(
backOffer, PDGrpc.METHOD_GET_REGION_BY_ID, request, responseObserver, handler);
return responseObserver.getFuture();
}
private Supplier<GetStoreRequest> buildGetStroeReq(long storeId) {
return () -> GetStoreRequest.newBuilder().setHeader(header).setStoreId(storeId).build();
}
private PDErrorHandler<GetStoreResponse> buildPDErrorHandler() {
return new PDErrorHandler<>(
r -> r.getHeader().hasError() ? buildFromPdpbError(r.getHeader().getError()) : null, this);
}
@Override
public Store getStore(BackOffer backOffer, long storeId) {
return callWithRetry(
backOffer, PDGrpc.METHOD_GET_STORE, buildGetStroeReq(storeId), buildPDErrorHandler())
.getStore();
}
@Override
public Future<Store> getStoreAsync(BackOffer backOffer, long storeId) {
FutureObserver<Store, GetStoreResponse> responseObserver =
new FutureObserver<>(GetStoreResponse::getStore);
callAsyncWithRetry(
backOffer,
PDGrpc.METHOD_GET_STORE,
buildGetStroeReq(storeId),
responseObserver,
buildPDErrorHandler());
return responseObserver.getFuture();
}
@Override
public void close() throws InterruptedException {
if (service != null) {
service.shutdownNow();
}
}
public static ReadOnlyPDClient create(TiSession session) {
return createRaw(session);
}
@VisibleForTesting
RequestHeader getHeader() {
return header;
}
@VisibleForTesting
LeaderWrapper getLeaderWrapper() {
return leaderWrapper;
}
class LeaderWrapper {
private final String leaderInfo;
private final PDBlockingStub blockingStub;
private final PDStub asyncStub;
private final long createTime;
LeaderWrapper(
String leaderInfo,
PDGrpc.PDBlockingStub blockingStub,
PDGrpc.PDStub asyncStub,
long createTime) {
this.leaderInfo = leaderInfo;
this.blockingStub = blockingStub;
this.asyncStub = asyncStub;
this.createTime = createTime;
}
String getLeaderInfo() {
return leaderInfo;
}
PDBlockingStub getBlockingStub() {
return blockingStub;
}
PDStub getAsyncStub() {
return asyncStub;
}
long getCreateTime() {
return createTime;
}
@Override
public String toString() {
return "[leaderInfo: " + leaderInfo + "]";
}
}
private GetMembersResponse getMembers(URI url) {
try {
ManagedChannel probChan = session.getChannel(url.getHost() + ":" + url.getPort());
PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(probChan);
GetMembersRequest request =
GetMembersRequest.newBuilder().setHeader(RequestHeader.getDefaultInstance()).build();
return stub.getMembers(request);
} catch (Exception e) {
logger.warn("failed to get member from pd server.", e);
}
return null;
}
synchronized boolean switchLeader(List<String> leaderURLs) {
if (leaderURLs.isEmpty()) return false;
String leaderUrlStr = leaderURLs.get(0);
// TODO: Why not strip protocol info on server side since grpc does not need it
if (leaderWrapper != null && leaderUrlStr.equals(leaderWrapper.getLeaderInfo())) {
return true;
}
// switch leader
return createLeaderWrapper(leaderUrlStr);
}
private boolean createLeaderWrapper(String leaderUrlStr) {
try {
URI newLeader = PDUtils.addrToUrl(leaderUrlStr);
leaderUrlStr = newLeader.getHost() + ":" + newLeader.getPort();
if (leaderWrapper != null && leaderUrlStr.equals(leaderWrapper.getLeaderInfo())) {
return true;
}
// create new Leader
ManagedChannel clientChannel = session.getChannel(leaderUrlStr);
leaderWrapper =
new LeaderWrapper(
leaderUrlStr,
PDGrpc.newBlockingStub(clientChannel),
PDGrpc.newStub(clientChannel),
System.nanoTime());
} catch (IllegalArgumentException e) {
logger.error("Error updating leader. " + leaderUrlStr, e);
return false;
}
logger.info(String.format("Switched to new leader: %s", leaderWrapper));
return true;
}
public void updateLeader() {
for (URI url : this.pdAddrs) {
// since resp is null, we need update leader's address by walking through all pd server.
GetMembersResponse resp = getMembers(url);
if (resp == null) {
continue;
}
// if leader is switched, just return.
if (switchLeader(resp.getLeader().getClientUrlsList())) {
return;
}
}
throw new TiClientInternalException(
"already tried all address on file, but not leader found yet.");
}
@Override
protected PDBlockingStub getBlockingStub() {
if (leaderWrapper == null) {
throw new GrpcException("PDClient may not be initialized");
}
return leaderWrapper
.getBlockingStub()
.withDeadlineAfter(getConf().getTimeout(), getConf().getTimeoutUnit());
}
@Override
protected PDStub getAsyncStub() {
if (leaderWrapper == null) {
throw new GrpcException("PDClient may not be initialized");
}
return leaderWrapper
.getAsyncStub()
.withDeadlineAfter(getConf().getTimeout(), getConf().getTimeoutUnit());
}
private PDClient(TiSession session) {
super(session);
}
private void initCluster() {
GetMembersResponse resp = null;
List<URI> pdAddrs = getSession().getConf().getPdAddrs();
for (URI u : pdAddrs) {
resp = getMembers(u);
if (resp != null) {
break;
}
}
checkNotNull(resp, "Failed to init client for PD cluster.");
long clusterId = resp.getHeader().getClusterId();
header = RequestHeader.newBuilder().setClusterId(clusterId).build();
tsoReq = TsoRequest.newBuilder().setHeader(header).setCount(1).build();
this.pdAddrs = pdAddrs;
createLeaderWrapper(resp.getLeader().getClientUrls(0));
service =
Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(true).build());
service.scheduleAtFixedRate(
() -> {
// Wrap this with a try catch block in case schedule update fails
try {
updateLeader();
} catch (Exception e) {
logger.warn("Update leader failed", e);
}
},
1,
1,
TimeUnit.MINUTES);
}
static PDClient createRaw(TiSession session) {
PDClient client = null;
try {
client = new PDClient(session);
client.initCluster();
} catch (Exception e) {
if (client != null) {
try {
client.close();
} catch (InterruptedException ignore) {
}
}
throw e;
}
return client;
}
}
| 1 | 9,181 | seems pretty strange here because TiSession contains PDClient. | pingcap-tispark | java |
@@ -73,4 +73,11 @@ class NotesControllerTest < ActionDispatch::IntegrationTest
assert_response :success
assert_select "table.note_list tr", :count => 11
end
+
+ def test_empty_page
+ user = create(:user)
+ get user_notes_path(:display_name => user.display_name)
+ assert_response :success
+ assert_select "h4", :html => "No notes"
+ end
end | 1 | require "test_helper"
class NotesControllerTest < ActionDispatch::IntegrationTest
def setup
super
# Stub nominatim response for note locations
stub_request(:get, %r{^https://nominatim\.openstreetmap\.org/reverse\?})
.to_return(:status => 404)
end
##
# test all routes which lead to this controller
def test_routes
assert_routing(
{ :path => "/user/username/notes", :method => :get },
{ :controller => "notes", :action => "index", :display_name => "username" }
)
end
def test_index_success
first_user = create(:user)
second_user = create(:user)
moderator_user = create(:moderator_user)
create(:note) do |note|
create(:note_comment, :note => note, :author => first_user)
end
create(:note) do |note|
create(:note_comment, :note => note, :author => second_user)
end
create(:note, :status => "hidden") do |note|
create(:note_comment, :note => note, :author => second_user)
end
# Note that the table rows include a header row
get user_notes_path(:display_name => first_user.display_name)
assert_response :success
assert_select "table.note_list tr", :count => 2
get user_notes_path(:display_name => second_user.display_name)
assert_response :success
assert_select "table.note_list tr", :count => 2
get user_notes_path(:display_name => "non-existent")
assert_response :not_found
session_for(moderator_user)
get user_notes_path(:display_name => first_user.display_name)
assert_response :success
assert_select "table.note_list tr", :count => 2
get user_notes_path(:display_name => second_user.display_name)
assert_response :success
assert_select "table.note_list tr", :count => 3
get user_notes_path(:display_name => "non-existent")
assert_response :not_found
end
def test_index_paged
user = create(:user)
create_list(:note, 50) do |note|
create(:note_comment, :note => note, :author => user)
end
get user_notes_path(:display_name => user.display_name)
assert_response :success
assert_select "table.note_list tr", :count => 11
get user_notes_path(:display_name => user.display_name, :page => 2)
assert_response :success
assert_select "table.note_list tr", :count => 11
end
end
| 1 | 13,306 | I created a test method for this scenario. Would you normally split it out like that or bundle this into one of the existing test methods? I'm more accustomed to creating lots of separate `it` blocks in nested `context` blocks in rspec | openstreetmap-openstreetmap-website | rb |
@@ -76,6 +76,8 @@ public abstract class NewSessionQueuer implements HasReadyState, Routable {
.with(requiresSecret),
get("/se/grid/newsessionqueuer/queue/size")
.to(() -> new GetNewSessionQueueSize(tracer, this)),
+ get("/se/grid/newsessionqueue")
+ .to(() -> new GetSessionQueue(tracer, this)),
delete("/se/grid/newsessionqueuer/queue")
.to(() -> new ClearSessionQueue(tracer, this))
.with(requiresSecret)); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.sessionqueue;
import static org.openqa.selenium.remote.http.Contents.reader;
import static org.openqa.selenium.remote.http.Route.combine;
import static org.openqa.selenium.remote.http.Route.delete;
import static org.openqa.selenium.remote.http.Route.get;
import static org.openqa.selenium.remote.http.Route.post;
import static org.openqa.selenium.remote.tracing.Tags.EXCEPTION;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.grid.data.RequestId;
import org.openqa.selenium.grid.security.RequiresSecretFilter;
import org.openqa.selenium.grid.security.Secret;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.NewSessionPayload;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.http.Routable;
import org.openqa.selenium.remote.http.Route;
import org.openqa.selenium.remote.tracing.AttributeKey;
import org.openqa.selenium.remote.tracing.EventAttribute;
import org.openqa.selenium.remote.tracing.EventAttributeValue;
import org.openqa.selenium.remote.tracing.Span;
import org.openqa.selenium.remote.tracing.Tracer;
import org.openqa.selenium.status.HasReadyState;
import java.io.IOException;
import java.io.Reader;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.UUID;
public abstract class NewSessionQueuer implements HasReadyState, Routable {
private final Route routes;
protected final Tracer tracer;
protected NewSessionQueuer(Tracer tracer, Secret registrationSecret) {
this.tracer = Require.nonNull("Tracer", tracer);
Require.nonNull("Registration secret", registrationSecret);
RequiresSecretFilter requiresSecret = new RequiresSecretFilter(registrationSecret);
routes = combine(
post("/session")
.to(() -> this::addToQueue),
post("/se/grid/newsessionqueuer/session")
.to(() -> new AddToSessionQueue(tracer, this)),
post("/se/grid/newsessionqueuer/session/retry/{requestId}")
.to(params -> new AddBackToSessionQueue(tracer, this, requestIdFrom(params)))
.with(requiresSecret),
get("/se/grid/newsessionqueuer/session/{requestId}")
.to(params -> new RemoveFromSessionQueue(tracer, this, requestIdFrom(params)))
.with(requiresSecret),
get("/se/grid/newsessionqueuer/queue/size")
.to(() -> new GetNewSessionQueueSize(tracer, this)),
delete("/se/grid/newsessionqueuer/queue")
.to(() -> new ClearSessionQueue(tracer, this))
.with(requiresSecret));
}
private RequestId requestIdFrom(Map<String, String> params) {
return new RequestId(UUID.fromString(params.get("requestId")));
}
public void validateSessionRequest(HttpRequest request) {
try (Span span = tracer.getCurrentContext().createSpan("newsession_queuer.validate")) {
Map<String, EventAttributeValue> attributeMap = new HashMap<>();
try (
Reader reader = reader(request);
NewSessionPayload payload = NewSessionPayload.create(reader)) {
Objects.requireNonNull(payload, "Requests to process must be set.");
attributeMap.put("request.payload", EventAttribute.setValue(payload.toString()));
Iterator<Capabilities> iterator = payload.stream().iterator();
if (!iterator.hasNext()) {
SessionNotCreatedException exception =
new SessionNotCreatedException("No capabilities found");
EXCEPTION.accept(attributeMap, exception);
attributeMap.put(
AttributeKey.EXCEPTION_MESSAGE.getKey(), EventAttribute.setValue(exception.getMessage()));
span.addEvent(AttributeKey.EXCEPTION_EVENT.getKey(), attributeMap);
throw exception;
}
} catch (IOException e) {
SessionNotCreatedException exception = new SessionNotCreatedException(e.getMessage(), e);
EXCEPTION.accept(attributeMap, exception);
String errorMessage = "IOException while reading the request payload. " +
exception.getMessage();
attributeMap.put(
AttributeKey.EXCEPTION_MESSAGE.getKey(), EventAttribute.setValue(errorMessage));
span.addEvent(AttributeKey.EXCEPTION_EVENT.getKey(), attributeMap);
throw exception;
}
}
}
public abstract HttpResponse addToQueue(HttpRequest request);
public abstract boolean retryAddToQueue(HttpRequest request, RequestId reqId);
public abstract Optional<HttpRequest> remove(RequestId reqId);
public abstract int clearQueue();
public abstract int getQueueSize();
@Override
public boolean matches(HttpRequest req) {
return routes.matches(req);
}
@Override
public HttpResponse execute(HttpRequest req) {
return routes.execute(req);
}
}
| 1 | 18,614 | should it be `newsessionqueue` or `newsessionqueuer`? In case we'd like to be consistent | SeleniumHQ-selenium | js |
@@ -5463,6 +5463,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
throw new IllegalArgumentException("number of documents in the index cannot exceed " + actualMaxDocs + " (current document count is " + pendingNumDocs.get() + "; added numDocs is " + addedNumDocs + ")");
}
+ /**
+ * Returns the number of documents in the index including documents are being added (i.e., reserved).
+ */
+ public long getPendingNumDocs() {
+ return pendingNumDocs.get();
+ }
+
/** Returns the highest <a href="#sequence_number">sequence number</a> across
* all completed operations, or 0 if no operations have finished yet. Still
* in-flight operations (in other threads) are not counted until they finish. | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BooleanSupplier;
import java.util.function.IntPredicate;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.FieldInfosFormat;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DocValuesUpdate.BinaryDocValuesUpdate;
import org.apache.lucene.index.DocValuesUpdate.NumericDocValuesUpdate;
import org.apache.lucene.index.FieldInfos.FieldNumbers;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FlushInfo;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.LockValidatingDirectoryWrapper;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.MergeInfo;
import org.apache.lucene.store.TrackingDirectoryWrapper;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InfoStream;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.Version;
import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
/**
An <code>IndexWriter</code> creates and maintains an index.
<p>The {@link OpenMode} option on
{@link IndexWriterConfig#setOpenMode(OpenMode)} determines
whether a new index is created, or whether an existing index is
opened. Note that you can open an index with {@link OpenMode#CREATE}
even while readers are using the index. The old readers will
continue to search the "point in time" snapshot they had opened,
and won't see the newly created index until they re-open. If
{@link OpenMode#CREATE_OR_APPEND} is used IndexWriter will create a
new index if there is not already an index at the provided path
and otherwise open the existing index.</p>
<p>In either case, documents are added with {@link #addDocument(Iterable)
addDocument} and removed with {@link #deleteDocuments(Term...)} or {@link
#deleteDocuments(Query...)}. A document can be updated with {@link
#updateDocument(Term, Iterable) updateDocument} (which just deletes
and then adds the entire document). When finished adding, deleting
and updating documents, {@link #close() close} should be called.</p>
<a id="sequence_numbers"></a>
<p>Each method that changes the index returns a {@code long} sequence number, which
expresses the effective order in which each change was applied.
{@link #commit} also returns a sequence number, describing which
changes are in the commit point and which are not. Sequence numbers
are transient (not saved into the index in any way) and only valid
within a single {@code IndexWriter} instance.</p>
<a id="flush"></a>
<p>These changes are buffered in memory and periodically
flushed to the {@link Directory} (during the above method
calls). A flush is triggered when there are enough added documents
since the last flush. Flushing is triggered either by RAM usage of the
documents (see {@link IndexWriterConfig#setRAMBufferSizeMB}) or the
number of added documents (see {@link IndexWriterConfig#setMaxBufferedDocs(int)}).
The default is to flush when RAM usage hits
{@link IndexWriterConfig#DEFAULT_RAM_BUFFER_SIZE_MB} MB. For
best indexing speed you should flush by RAM usage with a
large RAM buffer.
In contrast to the other flush options {@link IndexWriterConfig#setRAMBufferSizeMB} and
{@link IndexWriterConfig#setMaxBufferedDocs(int)}, deleted terms
won't trigger a segment flush. Note that flushing just moves the
internal buffered state in IndexWriter into the index, but
these changes are not visible to IndexReader until either
{@link #commit()} or {@link #close} is called. A flush may
also trigger one or more segment merges which by default
run with a background thread so as not to block the
addDocument calls (see <a href="#mergePolicy">below</a>
for changing the {@link MergeScheduler}).</p>
<p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
another <code>IndexWriter</code> on the same directory will lead to a
{@link LockObtainFailedException}.</p>
<a id="deletionPolicy"></a>
<p>Expert: <code>IndexWriter</code> allows an optional
{@link IndexDeletionPolicy} implementation to be specified. You
can use this to control when prior commits are deleted from
the index. The default policy is {@link KeepOnlyLastCommitDeletionPolicy}
which removes all prior commits as soon as a new commit is
done. Creating your own policy can allow you to explicitly
keep previous "point in time" commits alive in the index for
some time, either because this is useful for your application,
or to give readers enough time to refresh to the new commit
without having the old commit deleted out from under them.
The latter is necessary when multiple computers take turns opening
their own {@code IndexWriter} and {@code IndexReader}s
against a single shared index mounted via remote filesystems
like NFS which do not support "delete on last close" semantics.
A single computer accessing an index via NFS is fine with the
default deletion policy since NFS clients emulate "delete on
last close" locally. That said, accessing an index via NFS
will likely result in poor performance compared to a local IO
device. </p>
<a id="mergePolicy"></a> <p>Expert:
<code>IndexWriter</code> allows you to separately change
the {@link MergePolicy} and the {@link MergeScheduler}.
The {@link MergePolicy} is invoked whenever there are
changes to the segments in the index. Its role is to
select which merges to do, if any, and return a {@link
MergePolicy.MergeSpecification} describing the merges.
The default is {@link LogByteSizeMergePolicy}. Then, the {@link
MergeScheduler} is invoked with the requested merges and
it decides when and how to run the merges. The default is
{@link ConcurrentMergeScheduler}. </p>
<a id="OOME"></a><p><b>NOTE</b>: if you hit a
VirtualMachineError, or disaster strikes during a checkpoint
then IndexWriter will close itself. This is a
defensive measure in case any internal state (buffered
documents, deletions, reference counts) were corrupted.
Any subsequent calls will throw an AlreadyClosedException.</p>
<a id="thread-safety"></a><p><b>NOTE</b>: {@link
IndexWriter} instances are completely thread
safe, meaning multiple threads can call any of its
methods, concurrently. If your application requires
external synchronization, you should <b>not</b>
synchronize on the <code>IndexWriter</code> instance as
this may cause deadlock; use your own (non-Lucene) objects
instead. </p>
<p><b>NOTE</b>: If you call
<code>Thread.interrupt()</code> on a thread that's within
IndexWriter, IndexWriter will try to catch this (eg, if
it's in a wait() or Thread.sleep()), and will then throw
the unchecked exception {@link ThreadInterruptedException}
and <b>clear</b> the interrupt status on the thread.</p>
*/
/*
* Clarification: Check Points (and commits)
* IndexWriter writes new index files to the directory without writing a new segments_N
* file which references these new files. It also means that the state of
* the in memory SegmentInfos object is different than the most recent
* segments_N file written to the directory.
*
* Each time the SegmentInfos is changed, and matches the (possibly
* modified) directory files, we have a new "check point".
* If the modified/new SegmentInfos is written to disk - as a new
* (generation of) segments_N file - this check point is also an
* IndexCommit.
*
* A new checkpoint always replaces the previous checkpoint and
* becomes the new "front" of the index. This allows the IndexFileDeleter
* to delete files that are referenced only by stale checkpoints.
* (files that were created since the last commit, but are no longer
* referenced by the "front" of the index). For this, IndexFileDeleter
* keeps track of the last non commit checkpoint.
*/
public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
MergePolicy.MergeContext {
/** Hard limit on maximum number of documents that may be added to the
* index. If you try to add more than this you'll hit {@code IllegalArgumentException}. */
// We defensively subtract 128 to be well below the lowest
// ArrayUtil.MAX_ARRAY_LENGTH on "typical" JVMs. We don't just use
// ArrayUtil.MAX_ARRAY_LENGTH here because this can vary across JVMs:
public static final int MAX_DOCS = Integer.MAX_VALUE - 128;
/** Maximum value of the token position in an indexed field. */
public static final int MAX_POSITION = Integer.MAX_VALUE - 128;
// Use package-private instance var to enforce the limit so testing
// can use less electricity:
private static int actualMaxDocs = MAX_DOCS;
/** Used only for testing. */
static void setMaxDocs(int maxDocs) {
if (maxDocs > MAX_DOCS) {
// Cannot go higher than the hard max:
throw new IllegalArgumentException("maxDocs must be <= IndexWriter.MAX_DOCS=" + MAX_DOCS + "; got: " + maxDocs);
}
IndexWriter.actualMaxDocs = maxDocs;
}
static int getActualMaxDocs() {
return IndexWriter.actualMaxDocs;
}
/** Used only for testing. */
private final boolean enableTestPoints;
private static final int UNBOUNDED_MAX_MERGE_SEGMENTS = -1;
/**
* Name of the write lock in the index.
*/
public static final String WRITE_LOCK_NAME = "write.lock";
/** Key for the source of a segment in the {@link SegmentInfo#getDiagnostics() diagnostics}. */
public static final String SOURCE = "source";
/** Source of a segment which results from a merge of other segments. */
public static final String SOURCE_MERGE = "merge";
/** Source of a segment which results from a flush. */
public static final String SOURCE_FLUSH = "flush";
/** Source of a segment which results from a call to {@link #addIndexes(CodecReader...)}. */
public static final String SOURCE_ADDINDEXES_READERS = "addIndexes(CodecReader...)";
/**
* Absolute hard maximum length for a term, in bytes once
* encoded as UTF8. If a term arrives from the analyzer
* longer than this length, an
* <code>IllegalArgumentException</code> is thrown
* and a message is printed to infoStream, if set (see {@link
* IndexWriterConfig#setInfoStream(InfoStream)}).
*/
public final static int MAX_TERM_LENGTH = BYTE_BLOCK_SIZE-2;
/**
* Maximum length string for a stored field.
*/
public final static int MAX_STORED_STRING_LENGTH = ArrayUtil.MAX_ARRAY_LENGTH / UnicodeUtil.MAX_UTF8_BYTES_PER_CHAR;
// when unrecoverable disaster strikes, we populate this with the reason that we had to close IndexWriter
private final AtomicReference<Throwable> tragedy = new AtomicReference<>(null);
private final Directory directoryOrig; // original user directory
private final Directory directory; // wrapped with additional checks
private final AtomicLong changeCount = new AtomicLong(); // increments every time a change is completed
private volatile long lastCommitChangeCount; // last changeCount that was committed
private List<SegmentCommitInfo> rollbackSegments; // list of segmentInfo we will fallback to if the commit fails
private volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit())
private volatile long pendingSeqNo;
private volatile long pendingCommitChangeCount;
private Collection<String> filesToCommit;
private final SegmentInfos segmentInfos;
final FieldNumbers globalFieldNumberMap;
final DocumentsWriter docWriter;
private final EventQueue eventQueue = new EventQueue(this);
private final MergeScheduler.MergeSource mergeSource = new IndexWriterMergeSource(this);
private final ReentrantLock writeDocValuesLock = new ReentrantLock();
static final class EventQueue implements Closeable {
private volatile boolean closed;
// we use a semaphore here instead of simply synced methods to allow
// events to be processed concurrently by multiple threads such that all events
// for a certain thread are processed once the thread returns from IW
private final Semaphore permits = new Semaphore(Integer.MAX_VALUE);
private final Queue<Event> queue = new ConcurrentLinkedQueue<>();
private final IndexWriter writer;
EventQueue(IndexWriter writer) {
this.writer = writer;
}
private void acquire() {
if (permits.tryAcquire() == false) {
throw new AlreadyClosedException("queue is closed");
}
if (closed) {
permits.release();
throw new AlreadyClosedException("queue is closed");
}
}
boolean add(Event event) {
acquire();
try {
return queue.add(event);
} finally {
permits.release();
}
}
void processEvents() throws IOException {
acquire();
try {
processEventsInternal();
} finally {
permits.release();
}
}
private void processEventsInternal() throws IOException {
assert Integer.MAX_VALUE - permits.availablePermits() > 0 : "must acquire a permit before processing events";
Event event;
while ((event = queue.poll()) != null) {
event.process(writer);
}
}
@Override
public synchronized void close() throws IOException { // synced to prevent double closing
assert closed == false : "we should never close this twice";
closed = true;
// it's possible that we close this queue while we are in a processEvents call
if (writer.getTragicException() != null) {
// we are already handling a tragic exception let's drop it all on the floor and return
queue.clear();
} else {
// now we acquire all the permits to ensure we are the only one processing the queue
try {
permits.acquire(Integer.MAX_VALUE);
} catch (InterruptedException e) {
throw new ThreadInterruptedException(e);
}
try {
processEventsInternal();
} finally {
permits.release(Integer.MAX_VALUE);
}
}
}
}
private final IndexFileDeleter deleter;
// used by forceMerge to note those needing merging
private final Map<SegmentCommitInfo,Boolean> segmentsToMerge = new HashMap<>();
private int mergeMaxNumSegments;
private Lock writeLock;
private volatile boolean closed;
private volatile boolean closing;
private final AtomicBoolean maybeMerge = new AtomicBoolean();
private Iterable<Map.Entry<String,String>> commitUserData;
// Holds all SegmentInfo instances currently involved in
// merges
private final HashSet<SegmentCommitInfo> mergingSegments = new HashSet<>();
private final MergeScheduler mergeScheduler;
private final Set<SegmentMerger> runningAddIndexesMerges = new HashSet<>();
private final LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<>();
private final Set<MergePolicy.OneMerge> runningMerges = new HashSet<>();
private final List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<>();
private long mergeGen;
private Merges merges = new Merges();
private boolean didMessageState;
private final AtomicInteger flushCount = new AtomicInteger();
private final AtomicInteger flushDeletesCount = new AtomicInteger();
private final ReaderPool readerPool;
private final BufferedUpdatesStream bufferedUpdatesStream;
/** Counts how many merges have completed; this is used by {@link #forceApply(FrozenBufferedUpdates)}
* to handle concurrently apply deletes/updates with merges completing. */
private final AtomicLong mergeFinishedGen = new AtomicLong();
// The instance that was passed to the constructor. It is saved only in order
// to allow users to query an IndexWriter settings.
private final LiveIndexWriterConfig config;
/** System.nanoTime() when commit started; used to write
* an infoStream message about how long commit took. */
private long startCommitTime;
/** How many documents are in the index, or are in the process of being
* added (reserved). E.g., operations like addIndexes will first reserve
* the right to add N docs, before they actually change the index,
* much like how hotels place an "authorization hold" on your credit
* card to make sure they can later charge you when you check out. */
private final AtomicLong pendingNumDocs = new AtomicLong();
private final boolean softDeletesEnabled;
private final DocumentsWriter.FlushNotifications flushNotifications = new DocumentsWriter.FlushNotifications() {
@Override
public void deleteUnusedFiles(Collection<String> files) {
eventQueue.add(w -> w.deleteNewFiles(files));
}
@Override
public void flushFailed(SegmentInfo info) {
eventQueue.add(w -> w.flushFailed(info));
}
@Override
public void afterSegmentsFlushed() throws IOException {
publishFlushedSegments(false);
}
@Override
public void onTragicEvent(Throwable event, String message) {
IndexWriter.this.onTragicEvent(event, message);
}
@Override
public void onDeletesApplied() {
eventQueue.add(w -> {
try {
w.publishFlushedSegments(true);
} finally {
flushCount.incrementAndGet();
}
}
);
}
@Override
public void onTicketBacklog() {
eventQueue.add(w -> w.publishFlushedSegments(true));
}
};
DirectoryReader getReader() throws IOException {
return getReader(true, false);
}
/**
* Expert: returns a readonly reader, covering all
* committed as well as un-committed changes to the index.
* This provides "near real-time" searching, in that
* changes made during an IndexWriter session can be
* quickly made available for searching without closing
* the writer nor calling {@link #commit}.
*
* <p>Note that this is functionally equivalent to calling
* {#flush} and then opening a new reader. But the turnaround time of this
* method should be faster since it avoids the potentially
* costly {@link #commit}.</p>
*
* <p>You must close the {@link IndexReader} returned by
* this method once you are done using it.</p>
*
* <p>It's <i>near</i> real-time because there is no hard
* guarantee on how quickly you can get a new reader after
* making changes with IndexWriter. You'll have to
* experiment in your situation to determine if it's
* fast enough. As this is a new and experimental
* feature, please report back on your findings so we can
* learn, improve and iterate.</p>
*
* <p>The resulting reader supports {@link
* DirectoryReader#openIfChanged}, but that call will simply forward
* back to this method (though this may change in the
* future).</p>
*
* <p>The very first time this method is called, this
* writer instance will make every effort to pool the
* readers that it opens for doing merges, applying
* deletes, etc. This means additional resources (RAM,
* file descriptors, CPU time) will be consumed.</p>
*
* <p>For lower latency on reopening a reader, you should
* call {@link IndexWriterConfig#setMergedSegmentWarmer} to
* pre-warm a newly merged segment before it's committed
* to the index. This is important for minimizing
* index-to-search delay after a large merge. </p>
*
* <p>If an addIndexes* call is running in another thread,
* then this reader will only search those segments from
* the foreign index that have been successfully copied
* over, so far</p>.
*
* <p><b>NOTE</b>: Once the writer is closed, any
* outstanding readers may continue to be used. However,
* if you attempt to reopen any of those readers, you'll
* hit an {@link AlreadyClosedException}.</p>
*
* @lucene.experimental
*
* @return IndexReader that covers entire index plus all
* changes made so far by this IndexWriter instance
*
* @throws IOException If there is a low-level I/O error
*/
DirectoryReader getReader(boolean applyAllDeletes, boolean writeAllDeletes) throws IOException {
ensureOpen();
if (writeAllDeletes && applyAllDeletes == false) {
throw new IllegalArgumentException("applyAllDeletes must be true when writeAllDeletes=true");
}
final long tStart = System.currentTimeMillis();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "flush at getReader");
}
// Do this up front before flushing so that the readers
// obtained during this flush are pooled, the first time
// this method is called:
readerPool.enableReaderPooling();
StandardDirectoryReader r = null;
doBeforeFlush();
boolean anyChanges;
final long maxFullFlushMergeWaitMillis = config.getMaxFullFlushMergeWaitMillis();
/*
* for releasing a NRT reader we must ensure that
* DW doesn't add any segments or deletes until we are
* done with creating the NRT DirectoryReader.
* We release the two stage full flush after we are done opening the
* directory reader!
*/
MergePolicy.MergeSpecification onGetReaderMerges = null;
final AtomicBoolean stopCollectingMergedReaders = new AtomicBoolean(false);
final Map<String, SegmentReader> mergedReaders = new HashMap<>();
final Map<String, SegmentReader> openedReadOnlyClones = new HashMap<>();
// this function is used to control which SR are opened in order to keep track of them
// and to reuse them in the case we wait for merges in this getReader call.
IOUtils.IOFunction<SegmentCommitInfo, SegmentReader> readerFactory = sci -> {
final ReadersAndUpdates rld = getPooledInstance(sci, true);
try {
assert Thread.holdsLock(IndexWriter.this);
SegmentReader segmentReader = rld.getReadOnlyClone(IOContext.READ);
if (maxFullFlushMergeWaitMillis > 0) { // only track this if we actually do fullFlush merges
openedReadOnlyClones.put(sci.info.name, segmentReader);
}
return segmentReader;
} finally {
release(rld);
}
};
Closeable onGetReaderMergeResources = null;
SegmentInfos openingSegmentInfos = null;
boolean success2 = false;
try {
/* this is the essential part of the getReader method. We need to take care of the following things:
* - flush all currently in-memory DWPTs to disk
* - apply all deletes & updates to new and to the existing DWPTs
* - prevent flushes and applying deletes of concurrently indexing DWPTs to be applied
* - open a SDR on the updated SIS
*
* in order to prevent concurrent flushes we call DocumentsWriter#flushAllThreads that swaps out the deleteQueue
* (this enforces a happens before relationship between this and the subsequent full flush) and informs the
* FlushControl (#markForFullFlush()) that it should prevent any new DWPTs from flushing until we are \
* done (DocumentsWriter#finishFullFlush(boolean)). All this is guarded by the fullFlushLock to prevent multiple
* full flushes from happening concurrently. Once the DocWriter has initiated a full flush we can sequentially flush
* and apply deletes & updates to the written segments without worrying about concurrently indexing DWPTs. The important
* aspect is that it all happens between DocumentsWriter#flushAllThread() and DocumentsWriter#finishFullFlush(boolean)
* since once the flush is marked as done deletes start to be applied to the segments on disk without guarantees that
* the corresponding added documents (in the update case) are flushed and visible when opening a SDR.
*
*/
boolean success = false;
synchronized (fullFlushLock) {
try {
// TODO: should we somehow make the seqNo available in the returned NRT reader?
anyChanges = docWriter.flushAllThreads() < 0;
if (anyChanges == false) {
// prevent double increment since docWriter#doFlush increments the flushcount
// if we flushed anything.
flushCount.incrementAndGet();
}
publishFlushedSegments(true);
processEvents(false);
if (applyAllDeletes) {
applyAllDeletesAndUpdates();
}
synchronized(this) {
// NOTE: we cannot carry doc values updates in memory yet, so we always must write them through to disk and re-open each
// SegmentReader:
// TODO: we could instead just clone SIS and pull/incref readers in sync'd block, and then do this w/o IW's lock?
// Must do this sync'd on IW to prevent a merge from completing at the last second and failing to write its DV updates:
writeReaderPool(writeAllDeletes);
// Prevent segmentInfos from changing while opening the
// reader; in theory we could instead do similar retry logic,
// just like we do when loading segments_N
r = StandardDirectoryReader.open(this, readerFactory, segmentInfos, applyAllDeletes, writeAllDeletes);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
}
if (maxFullFlushMergeWaitMillis > 0) {
// we take the SIS from the reader which has already pruned away fully deleted readers
// this makes pulling the readers below after the merge simpler since we can be safe that
// they are not closed. Every segment has a corresponding SR in the SDR we opened if we use
// this SIS
// we need to do this rather complicated management of SRs and infos since we can't wait for merges
// while we hold the fullFlushLock since the merge might hit a tragic event and that must not be reported
// while holding that lock. Merging outside of the lock ie. after calling docWriter.finishFullFlush(boolean) would
// yield wrong results because deletes might sneak in during the merge
openingSegmentInfos = r.getSegmentInfos().clone();
onGetReaderMerges = preparePointInTimeMerge(openingSegmentInfos, stopCollectingMergedReaders::get, MergeTrigger.GET_READER,
sci -> {
assert stopCollectingMergedReaders.get() == false : "illegal state merge reader must be not pulled since we already stopped waiting for merges";
SegmentReader apply = readerFactory.apply(sci);
mergedReaders.put(sci.info.name, apply);
// we need to incRef the files of the opened SR otherwise it's possible that another merge
// removes the segment before we pass it on to the SDR
deleter.incRef(sci.files());
});
onGetReaderMergeResources = () -> {
// this needs to be closed once after we are done. In the case of an exception it releases
// all resources, closes the merged readers and decrements the files references.
// this only happens for readers that haven't been removed from the mergedReaders and release elsewhere
synchronized (this) {
stopCollectingMergedReaders.set(true);
IOUtils.close(mergedReaders.values().stream().map(sr -> (Closeable) () -> {
try {
deleter.decRef(sr.getSegmentInfo().files());
} finally {
sr.close();
}
}).collect(Collectors.toList()));
}
};
}
}
success = true;
} finally {
// Done: finish the full flush!
assert Thread.holdsLock(fullFlushLock);
docWriter.finishFullFlush(success);
if (success) {
processEvents(false);
doAfterFlush();
} else {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during NRT reader");
}
}
}
}
if (onGetReaderMerges != null) { // only relevant if we do merge on getReader
StandardDirectoryReader mergedReader = finishGetReaderMerge(stopCollectingMergedReaders, mergedReaders,
openedReadOnlyClones, openingSegmentInfos, applyAllDeletes,
writeAllDeletes, onGetReaderMerges, maxFullFlushMergeWaitMillis);
if (mergedReader != null) {
try {
r.close();
} finally {
r = mergedReader;
}
}
}
anyChanges |= maybeMerge.getAndSet(false);
if (anyChanges) {
maybeMerge(config.getMergePolicy(), MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "getReader took " + (System.currentTimeMillis() - tStart) + " msec");
}
success2 = true;
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "getReader");
throw tragedy;
} finally {
if (!success2) {
try {
IOUtils.closeWhileHandlingException(r, onGetReaderMergeResources);
} finally {
maybeCloseOnTragicEvent();
}
} else {
IOUtils.close(onGetReaderMergeResources);
}
}
return r;
}
private StandardDirectoryReader finishGetReaderMerge(AtomicBoolean stopCollectingMergedReaders, Map<String, SegmentReader> mergedReaders,
Map<String, SegmentReader> openedReadOnlyClones, SegmentInfos openingSegmentInfos,
boolean applyAllDeletes, boolean writeAllDeletes,
MergePolicy.MergeSpecification pointInTimeMerges, long maxCommitMergeWaitMillis) throws IOException {
assert openingSegmentInfos != null;
mergeScheduler.merge(mergeSource, MergeTrigger.GET_READER);
pointInTimeMerges.await(maxCommitMergeWaitMillis, TimeUnit.MILLISECONDS);
synchronized (this) {
stopCollectingMergedReaders.set(true);
StandardDirectoryReader reader = maybeReopenMergedNRTReader(mergedReaders, openedReadOnlyClones, openingSegmentInfos,
applyAllDeletes, writeAllDeletes);
IOUtils.close(mergedReaders.values());
mergedReaders.clear();
return reader;
}
}
private StandardDirectoryReader maybeReopenMergedNRTReader(Map<String, SegmentReader> mergedReaders,
Map<String, SegmentReader> openedReadOnlyClones, SegmentInfos openingSegmentInfos,
boolean applyAllDeletes, boolean writeAllDeletes) throws IOException {
assert Thread.holdsLock(this);
if (mergedReaders.isEmpty() == false) {
Collection<String> files = new ArrayList<>();
try {
return StandardDirectoryReader.open(this,
sci -> {
// as soon as we remove the reader and return it the StandardDirectoryReader#open
// will take care of closing it. We only need to handle the readers that remain in the
// mergedReaders map and close them.
SegmentReader remove = mergedReaders.remove(sci.info.name);
if (remove == null) {
remove = openedReadOnlyClones.remove(sci.info.name);
assert remove != null;
// each of the readers we reuse from the previous reader needs to be incRef'd
// since we reuse them but don't have an implicit incRef in the SDR:open call
remove.incRef();
} else {
files.addAll(remove.getSegmentInfo().files());
}
return remove;
}, openingSegmentInfos, applyAllDeletes, writeAllDeletes);
} finally {
// now the SDR#open call has incRef'd the files so we can let them go
deleter.decRef(files);
}
}
return null;
}
@Override
public final long ramBytesUsed() {
ensureOpen();
return docWriter.ramBytesUsed();
}
/**
* Returns the number of bytes currently being flushed
*/
public final long getFlushingBytes() {
ensureOpen();
return docWriter.getFlushingBytes();
}
final void writeSomeDocValuesUpdates() throws IOException {
if (writeDocValuesLock.tryLock()) {
try {
final double ramBufferSizeMB = config.getRAMBufferSizeMB();
// If the reader pool is > 50% of our IW buffer, then write the updates:
if (ramBufferSizeMB != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
long startNS = System.nanoTime();
long ramBytesUsed = readerPool.ramBytesUsed();
if (ramBytesUsed > 0.5 * ramBufferSizeMB * 1024 * 1024) {
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", String.format(Locale.ROOT, "now write some pending DV updates: %.2f MB used vs IWC Buffer %.2f MB",
ramBytesUsed/1024./1024., ramBufferSizeMB));
}
// Sort by largest ramBytesUsed:
final List<ReadersAndUpdates> list = readerPool.getReadersByRam();
int count = 0;
for (ReadersAndUpdates rld : list) {
if (ramBytesUsed <= 0.5 * ramBufferSizeMB * 1024 * 1024) {
break;
}
// We need to do before/after because not all RAM in this RAU is used by DV updates, and
// not all of those bytes can be written here:
long bytesUsedBefore = rld.ramBytesUsed.get();
if (bytesUsedBefore == 0) {
continue; // nothing to do here - lets not acquire the lock
}
// Only acquire IW lock on each write, since this is a time consuming operation. This way
// other threads get a chance to run in between our writes.
synchronized (this) {
// It's possible that the segment of a reader returned by readerPool#getReadersByRam
// is dropped before being processed here. If it happens, we need to skip that reader.
// this is also best effort to free ram, there might be some other thread writing this rld concurrently
// which wins and then if readerPooling is off this rld will be dropped.
if (readerPool.get(rld.info, false) == null) {
continue;
}
if (rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream)) {
checkpointNoSIS();
}
}
long bytesUsedAfter = rld.ramBytesUsed.get();
ramBytesUsed -= bytesUsedBefore - bytesUsedAfter;
count++;
}
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", String.format(Locale.ROOT, "done write some DV updates for %d segments: now %.2f MB used vs IWC Buffer %.2f MB; took %.2f sec",
count, readerPool.ramBytesUsed()/1024./1024., ramBufferSizeMB, ((System.nanoTime() - startNS)/1000000000.)));
}
}
}
} finally {
writeDocValuesLock.unlock();
}
}
}
/**
* Obtain the number of deleted docs for a pooled reader.
* If the reader isn't being pooled, the segmentInfo's
* delCount is returned.
*/
@Override
public int numDeletedDocs(SegmentCommitInfo info) {
ensureOpen(false);
validate(info);
final ReadersAndUpdates rld = getPooledInstance(info, false);
if (rld != null) {
return rld.getDelCount(); // get the full count from here since SCI might change concurrently
} else {
final int delCount = info.getDelCount(softDeletesEnabled);
assert delCount <= info.info.maxDoc(): "delCount: " + delCount + " maxDoc: " + info.info.maxDoc();
return delCount;
}
}
/**
* Used internally to throw an {@link AlreadyClosedException} if this
* IndexWriter has been closed or is in the process of closing.
*
* @param failIfClosing
* if true, also fail when {@code IndexWriter} is in the process of
* closing ({@code closing=true}) but not yet done closing (
* {@code closed=false})
* @throws AlreadyClosedException
* if this IndexWriter is closed or in the process of closing
*/
protected final void ensureOpen(boolean failIfClosing) throws AlreadyClosedException {
if (closed || (failIfClosing && closing)) {
throw new AlreadyClosedException("this IndexWriter is closed", tragedy.get());
}
}
/**
* Used internally to throw an {@link
* AlreadyClosedException} if this IndexWriter has been
* closed ({@code closed=true}) or is in the process of
* closing ({@code closing=true}).
* <p>
* Calls {@link #ensureOpen(boolean) ensureOpen(true)}.
* @throws AlreadyClosedException if this IndexWriter is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
ensureOpen(true);
}
/**
* Constructs a new IndexWriter per the settings given in <code>conf</code>.
* If you want to make "live" changes to this writer instance, use
* {@link #getConfig()}.
*
* <p>
* <b>NOTE:</b> after ths writer is created, the given configuration instance
* cannot be passed to another writer.
*
* @param d
* the index directory. The index is either created or appended
* according <code>conf.getOpenMode()</code>.
* @param conf
* the configuration settings according to which IndexWriter should
* be initialized.
* @throws IOException
* if the directory cannot be read/written to, or if it does not
* exist and <code>conf.getOpenMode()</code> is
* <code>OpenMode.APPEND</code> or if there is any other low-level
* IO error
*/
public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
enableTestPoints = isEnableTestPoints();
conf.setIndexWriter(this); // prevent reuse by other instances
config = conf;
infoStream = config.getInfoStream();
softDeletesEnabled = config.getSoftDeletesField() != null;
// obtain the write.lock. If the user configured a timeout,
// we wrap with a sleeper and this might take some time.
writeLock = d.obtainLock(WRITE_LOCK_NAME);
boolean success = false;
try {
directoryOrig = d;
directory = new LockValidatingDirectoryWrapper(d, writeLock);
mergeScheduler = config.getMergeScheduler();
mergeScheduler.initialize(infoStream, directoryOrig);
OpenMode mode = config.getOpenMode();
final boolean indexExists;
final boolean create;
if (mode == OpenMode.CREATE) {
indexExists = DirectoryReader.indexExists(directory);
create = true;
} else if (mode == OpenMode.APPEND) {
indexExists = true;
create = false;
} else {
// CREATE_OR_APPEND - create only if an index does not exist
indexExists = DirectoryReader.indexExists(directory);
create = !indexExists;
}
// If index is too old, reading the segments will throw
// IndexFormatTooOldException.
String[] files = directory.listAll();
// Set up our initial SegmentInfos:
IndexCommit commit = config.getIndexCommit();
// Set up our initial SegmentInfos:
StandardDirectoryReader reader;
if (commit == null) {
reader = null;
} else {
reader = commit.getReader();
}
if (create) {
if (config.getIndexCommit() != null) {
// We cannot both open from a commit point and create:
if (mode == OpenMode.CREATE) {
throw new IllegalArgumentException("cannot use IndexWriterConfig.setIndexCommit() with OpenMode.CREATE");
} else {
throw new IllegalArgumentException("cannot use IndexWriterConfig.setIndexCommit() when index has no commit");
}
}
// Try to read first. This is to allow create
// against an index that's currently open for
// searching. In this case we write the next
// segments_N file with no segments:
final SegmentInfos sis = new SegmentInfos(config.getIndexCreatedVersionMajor());
if (indexExists) {
final SegmentInfos previous = SegmentInfos.readLatestCommit(directory);
sis.updateGenerationVersionAndCounter(previous);
}
segmentInfos = sis;
rollbackSegments = segmentInfos.createBackupSegmentInfos();
// Record that we have a change (zero out all
// segments) pending:
changed();
} else if (reader != null) {
// Init from an existing already opened NRT or non-NRT reader:
if (reader.directory() != commit.getDirectory()) {
throw new IllegalArgumentException("IndexCommit's reader must have the same directory as the IndexCommit");
}
if (reader.directory() != directoryOrig) {
throw new IllegalArgumentException("IndexCommit's reader must have the same directory passed to IndexWriter");
}
if (reader.segmentInfos.getLastGeneration() == 0) {
// TODO: maybe we could allow this? It's tricky...
throw new IllegalArgumentException("index must already have an initial commit to open from reader");
}
// Must clone because we don't want the incoming NRT reader to "see" any changes this writer now makes:
segmentInfos = reader.segmentInfos.clone();
SegmentInfos lastCommit;
try {
lastCommit = SegmentInfos.readCommit(directoryOrig, segmentInfos.getSegmentsFileName());
} catch (IOException ioe) {
throw new IllegalArgumentException("the provided reader is stale: its prior commit file \"" + segmentInfos.getSegmentsFileName() + "\" is missing from index");
}
if (reader.writer != null) {
// The old writer better be closed (we have the write lock now!):
assert reader.writer.closed;
// In case the old writer wrote further segments (which we are now dropping),
// update SIS metadata so we remain write-once:
segmentInfos.updateGenerationVersionAndCounter(reader.writer.segmentInfos);
lastCommit.updateGenerationVersionAndCounter(reader.writer.segmentInfos);
}
rollbackSegments = lastCommit.createBackupSegmentInfos();
} else {
// Init from either the latest commit point, or an explicit prior commit point:
String lastSegmentsFile = SegmentInfos.getLastCommitSegmentsFileName(files);
if (lastSegmentsFile == null) {
throw new IndexNotFoundException("no segments* file found in " + directory + ": files: " + Arrays.toString(files));
}
// Do not use SegmentInfos.read(Directory) since the spooky
// retrying it does is not necessary here (we hold the write lock):
segmentInfos = SegmentInfos.readCommit(directoryOrig, lastSegmentsFile);
if (commit != null) {
// Swap out all segments, but, keep metadata in
// SegmentInfos, like version & generation, to
// preserve write-once. This is important if
// readers are open against the future commit
// points.
if (commit.getDirectory() != directoryOrig) {
throw new IllegalArgumentException("IndexCommit's directory doesn't match my directory, expected=" + directoryOrig + ", got=" + commit.getDirectory());
}
SegmentInfos oldInfos = SegmentInfos.readCommit(directoryOrig, commit.getSegmentsFileName());
segmentInfos.replace(oldInfos);
changed();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "init: loaded commit \"" + commit.getSegmentsFileName() + "\"");
}
}
rollbackSegments = segmentInfos.createBackupSegmentInfos();
}
commitUserData = new HashMap<>(segmentInfos.getUserData()).entrySet();
pendingNumDocs.set(segmentInfos.totalMaxDoc());
// start with previous field numbers, but new FieldInfos
// NOTE: this is correct even for an NRT reader because we'll pull FieldInfos even for the un-committed segments:
globalFieldNumberMap = getFieldNumberMap();
validateIndexSort();
config.getFlushPolicy().init(config);
bufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
docWriter = new DocumentsWriter(flushNotifications, segmentInfos.getIndexCreatedVersionMajor(), pendingNumDocs,
enableTestPoints, this::newSegmentName,
config, directoryOrig, directory, globalFieldNumberMap);
readerPool = new ReaderPool(directory, directoryOrig, segmentInfos, globalFieldNumberMap,
bufferedUpdatesStream::getCompletedDelGen, infoStream, conf.getSoftDeletesField(), reader);
if (config.getReaderPooling()) {
readerPool.enableReaderPooling();
}
// Default deleter (for backwards compatibility) is
// KeepOnlyLastCommitDeleter:
// Sync'd is silly here, but IFD asserts we sync'd on the IW instance:
synchronized(this) {
deleter = new IndexFileDeleter(files, directoryOrig, directory,
config.getIndexDeletionPolicy(),
segmentInfos, infoStream, this,
indexExists, reader != null);
// We incRef all files when we return an NRT reader from IW, so all files must exist even in the NRT case:
assert create || filesExist(segmentInfos);
}
if (deleter.startingCommitDeleted) {
// Deletion policy deleted the "head" commit point.
// We have to mark ourself as changed so that if we
// are closed w/o any further changes we write a new
// segments_N file.
changed();
}
if (reader != null) {
// We always assume we are carrying over incoming changes when opening from reader:
segmentInfos.changed();
changed();
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "init: create=" + create + " reader=" + reader);
messageState();
}
success = true;
} finally {
if (!success) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "init: hit exception on init; releasing write lock");
}
IOUtils.closeWhileHandlingException(writeLock);
writeLock = null;
}
}
}
/** Confirms that the incoming index sort (if any) matches the existing index sort (if any). */
private void validateIndexSort() {
Sort indexSort = config.getIndexSort();
if (indexSort != null) {
for(SegmentCommitInfo info : segmentInfos) {
Sort segmentIndexSort = info.info.getIndexSort();
if (segmentIndexSort == null || isCongruentSort(indexSort, segmentIndexSort) == false) {
throw new IllegalArgumentException("cannot change previous indexSort=" + segmentIndexSort + " (from segment=" + info + ") to new indexSort=" + indexSort);
}
}
}
}
/**
* Returns true if <code>indexSort</code> is a prefix of <code>otherSort</code>.
**/
static boolean isCongruentSort(Sort indexSort, Sort otherSort) {
final SortField[] fields1 = indexSort.getSort();
final SortField[] fields2 = otherSort.getSort();
if (fields1.length > fields2.length) {
return false;
}
return Arrays.asList(fields1).equals(Arrays.asList(fields2).subList(0, fields1.length));
}
// reads latest field infos for the commit
// this is used on IW init and addIndexes(Dir) to create/update the global field map.
// TODO: fix tests abusing this method!
static FieldInfos readFieldInfos(SegmentCommitInfo si) throws IOException {
Codec codec = si.info.getCodec();
FieldInfosFormat reader = codec.fieldInfosFormat();
if (si.hasFieldUpdates()) {
// there are updates, we read latest (always outside of CFS)
final String segmentSuffix = Long.toString(si.getFieldInfosGen(), Character.MAX_RADIX);
return reader.read(si.info.dir, si.info, segmentSuffix, IOContext.READONCE);
} else if (si.info.getUseCompoundFile()) {
// cfs
try (Directory cfs = codec.compoundFormat().getCompoundReader(si.info.dir, si.info, IOContext.DEFAULT)) {
return reader.read(cfs, si.info, "", IOContext.READONCE);
}
} else {
// no cfs
return reader.read(si.info.dir, si.info, "", IOContext.READONCE);
}
}
/**
* Loads or returns the already loaded the global field number map for this {@link SegmentInfos}.
* If this {@link SegmentInfos} has no global field number map the returned instance is empty
*/
private FieldNumbers getFieldNumberMap() throws IOException {
final FieldNumbers map = new FieldNumbers(config.softDeletesField);
for(SegmentCommitInfo info : segmentInfos) {
FieldInfos fis = readFieldInfos(info);
for(FieldInfo fi : fis) {
map.addOrGet(fi.name, fi.number, fi.getIndexOptions(), fi.getDocValuesType(), fi.getPointDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField());
}
}
return map;
}
/**
* Returns a {@link LiveIndexWriterConfig}, which can be used to query the IndexWriter
* current settings, as well as modify "live" ones.
*/
public LiveIndexWriterConfig getConfig() {
ensureOpen(false);
return config;
}
private void messageState() {
if (infoStream.isEnabled("IW") && didMessageState == false) {
didMessageState = true;
infoStream.message("IW", "\ndir=" + directoryOrig + "\n" +
"index=" + segString() + "\n" +
"version=" + Version.LATEST.toString() + "\n" +
config.toString());
final StringBuilder unmapInfo = new StringBuilder(Boolean.toString(MMapDirectory.UNMAP_SUPPORTED));
if (!MMapDirectory.UNMAP_SUPPORTED) {
unmapInfo.append(" (").append(MMapDirectory.UNMAP_NOT_SUPPORTED_REASON).append(")");
}
infoStream.message("IW", "MMapDirectory.UNMAP_SUPPORTED=" + unmapInfo);
}
}
/**
* Gracefully closes (commits, waits for merges), but calls rollback
* if there's an exc so the IndexWriter is always closed. This is called
* from {@link #close} when {@link IndexWriterConfig#commitOnClose} is
* {@code true}.
*/
private void shutdown() throws IOException {
if (pendingCommit != null) {
throw new IllegalStateException("cannot close: prepareCommit was already called with no corresponding call to commit");
}
// Ensure that only one thread actually gets to do the
// closing
if (shouldClose(true)) {
try {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now flush at close");
}
flush(true, true);
waitForMerges();
commitInternal(config.getMergePolicy());
} catch (Throwable t) {
// Be certain to close the index on any exception
try {
rollbackInternal();
} catch (Throwable t1) {
t.addSuppressed(t1);
}
throw t;
}
rollbackInternal(); // if we got that far lets rollback and close
}
}
/**
* Closes all open resources and releases the write lock.
*
* If {@link IndexWriterConfig#commitOnClose} is <code>true</code>,
* this will attempt to gracefully shut down by writing any
* changes, waiting for any running merges, committing, and closing.
* In this case, note that:
* <ul>
* <li>If you called prepareCommit but failed to call commit, this
* method will throw {@code IllegalStateException} and the {@code IndexWriter}
* will not be closed.</li>
* <li>If this method throws any other exception, the {@code IndexWriter}
* will be closed, but changes may have been lost.</li>
* </ul>
*
* <p>
* Note that this may be a costly
* operation, so, try to re-use a single writer instead of
* closing and opening a new one. See {@link #commit()} for
* caveats about write caching done by some IO devices.
*
* <p><b>NOTE</b>: You must ensure no other threads are still making
* changes at the same time that this method is invoked.</p>
*/
@Override
public void close() throws IOException {
if (config.getCommitOnClose()) {
shutdown();
} else {
rollback();
}
}
// Returns true if this thread should attempt to close, or
// false if IndexWriter is now closed; else,
// waits until another thread finishes closing
synchronized private boolean shouldClose(boolean waitForClose) {
while (true) {
if (closed == false) {
if (closing == false) {
// We get to close
closing = true;
return true;
} else if (waitForClose == false) {
return false;
} else {
// Another thread is presently trying to close;
// wait until it finishes one way (closes
// successfully) or another (fails to close)
doWait();
}
} else {
return false;
}
}
}
/** Returns the Directory used by this index. */
public Directory getDirectory() {
// return the original directory the user supplied, unwrapped.
return directoryOrig;
}
@Override
public InfoStream getInfoStream() {
return infoStream;
}
/** Returns the analyzer used by this index. */
public Analyzer getAnalyzer() {
ensureOpen();
return config.getAnalyzer();
}
/** If {@link SegmentInfos#getVersion} is below {@code newVersion} then update it to this value.
*
* @lucene.internal */
public synchronized void advanceSegmentInfosVersion(long newVersion) {
ensureOpen();
if (segmentInfos.getVersion() < newVersion) {
segmentInfos.setVersion(newVersion);
}
changed();
}
/**
* Returns true if this index has deletions (including
* buffered deletions). Note that this will return true
* if there are buffered Term/Query deletions, even if it
* turns out those buffered deletions don't match any
* documents.
*/
public synchronized boolean hasDeletions() {
ensureOpen();
if (bufferedUpdatesStream.any()
|| docWriter.anyDeletions()
|| readerPool.anyDeletions()) {
return true;
}
for (final SegmentCommitInfo info : segmentInfos) {
if (info.hasDeletions()) {
return true;
}
}
return false;
}
/**
* Adds a document to this index.
*
* <p> Note that if an Exception is hit (for example disk full)
* then the index will be consistent, but this document
* may not have been added. Furthermore, it's possible
* the index will have one segment in non-compound format
* even when using compound files (when a merge has
* partially succeeded).</p>
*
* <p> This method periodically flushes pending documents
* to the Directory (see <a href="#flush">above</a>), and
* also periodically triggers segment merges in the index
* according to the {@link MergePolicy} in use.</p>
*
* <p>Merges temporarily consume space in the
* directory. The amount of space required is up to 1X the
* size of all segments being merged, when no
* readers/searchers are open against the index, and up to
* 2X the size of all segments being merged when
* readers/searchers are open against the index (see
* {@link #forceMerge(int)} for details). The sequence of
* primitive merge operations performed is governed by the
* merge policy.
*
* <p>Note that each term in the document can be no longer
* than {@link #MAX_TERM_LENGTH} in bytes, otherwise an
* IllegalArgumentException will be thrown.</p>
*
* <p>Note that it's possible to create an invalid Unicode
* string in java if a UTF16 surrogate pair is malformed.
* In this case, the invalid characters are silently
* replaced with the Unicode replacement character
* U+FFFD.</p>
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public long addDocument(Iterable<? extends IndexableField> doc) throws IOException {
return updateDocument(null, doc);
}
/**
* Atomically adds a block of documents with sequentially
* assigned document IDs, such that an external reader
* will see all or none of the documents.
*
* <p><b>WARNING</b>: the index does not currently record
* which documents were added as a block. Today this is
* fine, because merging will preserve a block. The order of
* documents within a segment will be preserved, even when child
* documents within a block are deleted. Most search features
* (like result grouping and block joining) require you to
* mark documents; when these documents are deleted these
* search features will not work as expected. Obviously adding
* documents to an existing block will require you the reindex
* the entire block.
*
* <p>However it's possible that in the future Lucene may
* merge more aggressively re-order documents (for example,
* perhaps to obtain better index compression), in which case
* you may need to fully re-index your documents at that time.
*
* <p>See {@link #addDocument(Iterable)} for details on
* index and IndexWriter state after an Exception, and
* flushing/merging temporary free space requirements.</p>
*
* <p><b>NOTE</b>: tools that do offline splitting of an index
* (for example, IndexSplitter in contrib) or
* re-sorting of documents (for example, IndexSorter in
* contrib) are not aware of these atomically added documents
* and will likely break them up. Use such tools at your
* own risk!
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*
* @lucene.experimental
*/
public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
return updateDocuments((DocumentsWriterDeleteQueue.Node<?>) null, docs);
}
/**
* Atomically deletes documents matching the provided
* delTerm and adds a block of documents with sequentially
* assigned document IDs, such that an external reader
* will see all or none of the documents.
*
* See {@link #addDocuments(Iterable)}.
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*
* @lucene.experimental
*/
public long updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
return updateDocuments(delTerm == null ? null : DocumentsWriterDeleteQueue.newNode(delTerm), docs);
}
private long updateDocuments(final DocumentsWriterDeleteQueue.Node<?> delNode, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
ensureOpen();
boolean success = false;
try {
final long seqNo = maybeProcessEvents(docWriter.updateDocuments(docs, delNode));
success = true;
return seqNo;
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "updateDocuments");
throw tragedy;
} finally {
if (success == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception updating document");
}
maybeCloseOnTragicEvent();
}
}
}
/**
* Expert:
* Atomically updates documents matching the provided
* term with the given doc-values fields
* and adds a block of documents with sequentially
* assigned document IDs, such that an external reader
* will see all or none of the documents.
*
* One use of this API is to retain older versions of
* documents instead of replacing them. The existing
* documents can be updated to reflect they are no
* longer current while atomically adding new documents
* at the same time.
*
* In contrast to {@link #updateDocuments(Term, Iterable)}
* this method will not delete documents in the index
* matching the given term but instead update them with
* the given doc-values fields which can be used as a
* soft-delete mechanism.
*
* See {@link #addDocuments(Iterable)}
* and {@link #updateDocuments(Term, Iterable)}.
*
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*
* @lucene.experimental
*/
public long softUpdateDocuments(Term term, Iterable<? extends Iterable<? extends IndexableField>> docs, Field... softDeletes) throws IOException {
if (term == null) {
throw new IllegalArgumentException("term must not be null");
}
if (softDeletes == null || softDeletes.length == 0) {
throw new IllegalArgumentException("at least one soft delete must be present");
}
return updateDocuments(DocumentsWriterDeleteQueue.newNode(buildDocValuesUpdate(term, softDeletes)), docs);
}
/** Expert: attempts to delete by document ID, as long as
* the provided reader is a near-real-time reader (from {@link
* DirectoryReader#open(IndexWriter)}). If the
* provided reader is an NRT reader obtained from this
* writer, and its segment has not been merged away, then
* the delete succeeds and this method returns a valid (> 0) sequence
* number; else, it returns -1 and the caller must then
* separately delete by Term or Query.
*
* <b>NOTE</b>: this method can only delete documents
* visible to the currently open NRT reader. If you need
* to delete documents indexed after opening the NRT
* reader you must use {@link #deleteDocuments(Term...)}). */
public synchronized long tryDeleteDocument(IndexReader readerIn, int docID) throws IOException {
// NOTE: DON'T use docID inside the closure
return tryModifyDocument(readerIn, docID, (leafDocId, rld) -> {
if (rld.delete(leafDocId)) {
if (isFullyDeleted(rld)) {
dropDeletedSegment(rld.info);
checkpoint();
}
// Must bump changeCount so if no other changes
// happened, we still commit this change:
changed();
}
});
}
/** Expert: attempts to update doc values by document ID, as long as
* the provided reader is a near-real-time reader (from {@link
* DirectoryReader#open(IndexWriter)}). If the
* provided reader is an NRT reader obtained from this
* writer, and its segment has not been merged away, then
* the update succeeds and this method returns a valid (> 0) sequence
* number; else, it returns -1 and the caller must then
* either retry the update and resolve the document again.
* If a doc values fields data is <code>null</code> the existing
* value is removed from all documents matching the term. This can be used
* to un-delete a soft-deleted document since this method will apply the
* field update even if the document is marked as deleted.
*
* <b>NOTE</b>: this method can only updates documents
* visible to the currently open NRT reader. If you need
* to update documents indexed after opening the NRT
* reader you must use {@link #updateDocValues(Term, Field...)}. */
public synchronized long tryUpdateDocValue(IndexReader readerIn, int docID, Field... fields) throws IOException {
// NOTE: DON'T use docID inside the closure
final DocValuesUpdate[] dvUpdates = buildDocValuesUpdate(null, fields);
return tryModifyDocument(readerIn, docID, (leafDocId, rld) -> {
long nextGen = bufferedUpdatesStream.getNextGen();
try {
Map<String, DocValuesFieldUpdates> fieldUpdatesMap = new HashMap<>();
for (DocValuesUpdate update : dvUpdates) {
DocValuesFieldUpdates docValuesFieldUpdates = fieldUpdatesMap.computeIfAbsent(update.field, k -> {
switch (update.type) {
case NUMERIC:
return new NumericDocValuesFieldUpdates(nextGen, k, rld.info.info.maxDoc());
case BINARY:
return new BinaryDocValuesFieldUpdates(nextGen, k, rld.info.info.maxDoc());
default:
throw new AssertionError("type: " + update.type + " is not supported");
}
});
if (update.hasValue()) {
switch (update.type) {
case NUMERIC:
docValuesFieldUpdates.add(leafDocId, ((NumericDocValuesUpdate) update).getValue());
break;
case BINARY:
docValuesFieldUpdates.add(leafDocId, ((BinaryDocValuesUpdate) update).getValue());
break;
default:
throw new AssertionError("type: " + update.type + " is not supported");
}
} else {
docValuesFieldUpdates.reset(leafDocId);
}
}
for (DocValuesFieldUpdates updates : fieldUpdatesMap.values()) {
updates.finish();
rld.addDVUpdate(updates);
}
} finally {
bufferedUpdatesStream.finishedSegment(nextGen);
}
// Must bump changeCount so if no other changes
// happened, we still commit this change:
changed();
});
}
@FunctionalInterface
private interface DocModifier {
void run(int docId, ReadersAndUpdates readersAndUpdates) throws IOException;
}
private synchronized long tryModifyDocument(IndexReader readerIn, int docID, DocModifier toApply) throws IOException {
final LeafReader reader;
if (readerIn instanceof LeafReader) {
// Reader is already atomic: use the incoming docID:
reader = (LeafReader) readerIn;
} else {
// Composite reader: lookup sub-reader and re-base docID:
List<LeafReaderContext> leaves = readerIn.leaves();
int subIndex = ReaderUtil.subIndex(docID, leaves);
reader = leaves.get(subIndex).reader();
docID -= leaves.get(subIndex).docBase;
assert docID >= 0;
assert docID < reader.maxDoc();
}
if (!(reader instanceof SegmentReader)) {
throw new IllegalArgumentException("the reader must be a SegmentReader or composite reader containing only SegmentReaders");
}
final SegmentCommitInfo info = ((SegmentReader) reader).getOriginalSegmentInfo();
// TODO: this is a slow linear search, but, number of
// segments should be contained unless something is
// seriously wrong w/ the index, so it should be a minor
// cost:
if (segmentInfos.indexOf(info) != -1) {
ReadersAndUpdates rld = getPooledInstance(info, false);
if (rld != null) {
synchronized(bufferedUpdatesStream) {
toApply.run(docID, rld);
return docWriter.getNextSequenceNumber();
}
}
}
return -1;
}
/** Drops a segment that has 100% deleted documents. */
private synchronized void dropDeletedSegment(SegmentCommitInfo info) throws IOException {
// If a merge has already registered for this
// segment, we leave it in the readerPool; the
// merge will skip merging it and will then drop
// it once it's done:
if (mergingSegments.contains(info) == false) {
// it's possible that we invoke this method more than once for the same SCI
// we must only remove the docs once!
boolean dropPendingDocs = segmentInfos.remove(info);
try {
// this is sneaky - we might hit an exception while dropping a reader but then we have already
// removed the segment for the segmentInfo and we lost the pendingDocs update due to that.
// therefore we execute the adjustPendingNumDocs in a finally block to account for that.
dropPendingDocs |= readerPool.drop(info);
} finally {
if (dropPendingDocs) {
adjustPendingNumDocs(-info.info.maxDoc());
}
}
}
}
/**
* Deletes the document(s) containing any of the
* terms. All given deletes are applied and flushed atomically
* at the same time.
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @param terms array of terms to identify the documents
* to be deleted
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public long deleteDocuments(Term... terms) throws IOException {
ensureOpen();
try {
return maybeProcessEvents(docWriter.deleteTerms(terms));
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "deleteDocuments(Term..)");
throw tragedy;
}
}
/**
* Deletes the document(s) matching any of the provided queries.
* All given deletes are applied and flushed atomically at the same time.
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @param queries array of queries to identify the documents
* to be deleted
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public long deleteDocuments(Query... queries) throws IOException {
ensureOpen();
// LUCENE-6379: Specialize MatchAllDocsQuery
for(Query query : queries) {
if (query.getClass() == MatchAllDocsQuery.class) {
return deleteAll();
}
}
try {
return maybeProcessEvents(docWriter.deleteQueries(queries));
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "deleteDocuments(Query..)");
throw tragedy;
}
}
/**
* Updates a document by first deleting the document(s)
* containing <code>term</code> and then adding the new
* document. The delete and then add are atomic as seen
* by a reader on the same index (flush may happen only after
* the add).
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @param term the term to identify the document(s) to be
* deleted
* @param doc the document to be added
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public long updateDocument(Term term, Iterable<? extends IndexableField> doc) throws IOException {
return updateDocuments(term == null ? null : DocumentsWriterDeleteQueue.newNode(term), List.of(doc));
}
/**
* Expert:
* Updates a document by first updating the document(s)
* containing <code>term</code> with the given doc-values fields
* and then adding the new document. The doc-values update and
* then add are atomic as seen by a reader on the same index
* (flush may happen only after the add).
*
* One use of this API is to retain older versions of
* documents instead of replacing them. The existing
* documents can be updated to reflect they are no
* longer current while atomically adding new documents
* at the same time.
*
* In contrast to {@link #updateDocument(Term, Iterable)}
* this method will not delete documents in the index
* matching the given term but instead update them with
* the given doc-values fields which can be used as a
* soft-delete mechanism.
*
* See {@link #addDocuments(Iterable)}
* and {@link #updateDocuments(Term, Iterable)}.
*
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*
* @lucene.experimental
*/
public long softUpdateDocument(Term term, Iterable<? extends IndexableField> doc, Field... softDeletes) throws IOException {
if (term == null) {
throw new IllegalArgumentException("term must not be null");
}
if (softDeletes == null || softDeletes.length == 0) {
throw new IllegalArgumentException("at least one soft delete must be present");
}
return updateDocuments(DocumentsWriterDeleteQueue.newNode(buildDocValuesUpdate(term, softDeletes)), List.of(doc));
}
/**
* Updates a document's {@link NumericDocValues} for <code>field</code> to the
* given <code>value</code>. You can only update fields that already exist in
* the index, not add new fields through this method.
*
* @param term
* the term to identify the document(s) to be updated
* @param field
* field name of the {@link NumericDocValues} field
* @param value
* new value for the field
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException
* if the index is corrupt
* @throws IOException
* if there is a low-level IO error
*/
public long updateNumericDocValue(Term term, String field, long value) throws IOException {
ensureOpen();
if (!globalFieldNumberMap.contains(field, DocValuesType.NUMERIC)) {
throw new IllegalArgumentException("can only update existing numeric-docvalues fields!");
}
if (config.getIndexSortFields().contains(field)) {
throw new IllegalArgumentException("cannot update docvalues field involved in the index sort, field=" + field + ", sort=" + config.getIndexSort());
}
try {
return maybeProcessEvents(docWriter.updateDocValues(new NumericDocValuesUpdate(term, field, value)));
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "updateNumericDocValue");
throw tragedy;
}
}
/**
* Updates a document's {@link BinaryDocValues} for <code>field</code> to the
* given <code>value</code>. You can only update fields that already exist in
* the index, not add new fields through this method.
*
* <p>
* <b>NOTE:</b> this method currently replaces the existing value of all
* affected documents with the new value.
*
* @param term
* the term to identify the document(s) to be updated
* @param field
* field name of the {@link BinaryDocValues} field
* @param value
* new value for the field
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException
* if the index is corrupt
* @throws IOException
* if there is a low-level IO error
*/
public long updateBinaryDocValue(Term term, String field, BytesRef value) throws IOException {
ensureOpen();
if (value == null) {
throw new IllegalArgumentException("cannot update a field to a null value: " + field);
}
if (!globalFieldNumberMap.contains(field, DocValuesType.BINARY)) {
throw new IllegalArgumentException("can only update existing binary-docvalues fields!");
}
try {
return maybeProcessEvents(docWriter.updateDocValues(new BinaryDocValuesUpdate(term, field, value)));
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "updateBinaryDocValue");
throw tragedy;
}
}
/**
* Updates documents' DocValues fields to the given values. Each field update
* is applied to the set of documents that are associated with the
* {@link Term} to the same value. All updates are atomically applied and
* flushed together. If a doc values fields data is <code>null</code> the existing
* value is removed from all documents matching the term.
*
*
* @param updates
* the updates to apply
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException
* if the index is corrupt
* @throws IOException
* if there is a low-level IO error
*/
public long updateDocValues(Term term, Field... updates) throws IOException {
ensureOpen();
DocValuesUpdate[] dvUpdates = buildDocValuesUpdate(term, updates);
try {
return maybeProcessEvents(docWriter.updateDocValues(dvUpdates));
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "updateDocValues");
throw tragedy;
}
}
private DocValuesUpdate[] buildDocValuesUpdate(Term term, Field[] updates) {
DocValuesUpdate[] dvUpdates = new DocValuesUpdate[updates.length];
for (int i = 0; i < updates.length; i++) {
final Field f = updates[i];
final DocValuesType dvType = f.fieldType().docValuesType();
if (dvType == null) {
throw new NullPointerException("DocValuesType must not be null (field: \"" + f.name() + "\")");
}
if (dvType == DocValuesType.NONE) {
throw new IllegalArgumentException("can only update NUMERIC or BINARY fields! field=" + f.name());
}
if (globalFieldNumberMap.contains(f.name(), dvType) == false) {
// if this field doesn't exists we try to add it. if it exists and the DV type doesn't match we
// get a consistent error message as if you try to do that during an indexing operation.
globalFieldNumberMap.addOrGet(f.name(), -1, IndexOptions.NONE, dvType, 0, 0, 0, f.name().equals(config.softDeletesField));
assert globalFieldNumberMap.contains(f.name(), dvType);
}
if (config.getIndexSortFields().contains(f.name())) {
throw new IllegalArgumentException("cannot update docvalues field involved in the index sort, field=" + f.name() + ", sort=" + config.getIndexSort());
}
switch (dvType) {
case NUMERIC:
Long value = (Long)f.numericValue();
dvUpdates[i] = new NumericDocValuesUpdate(term, f.name(), value);
break;
case BINARY:
dvUpdates[i] = new BinaryDocValuesUpdate(term, f.name(), f.binaryValue());
break;
default:
throw new IllegalArgumentException("can only update NUMERIC or BINARY fields: field=" + f.name() + ", type=" + dvType);
}
}
return dvUpdates;
}
// for test purpose
final synchronized int getSegmentCount(){
return segmentInfos.size();
}
// for test purpose
final synchronized int getNumBufferedDocuments(){
return docWriter.getNumDocs();
}
// for test purpose
final synchronized int maxDoc(int i) {
if (i >= 0 && i < segmentInfos.size()) {
return segmentInfos.info(i).info.maxDoc();
} else {
return -1;
}
}
// for test purpose
final int getFlushCount() {
return flushCount.get();
}
// for test purpose
final int getFlushDeletesCount() {
return flushDeletesCount.get();
}
private final String newSegmentName() {
// Cannot synchronize on IndexWriter because that causes
// deadlock
synchronized(segmentInfos) {
// Important to increment changeCount so that the
// segmentInfos is written on close. Otherwise we
// could close, re-open and re-return the same segment
// name that was previously returned which can cause
// problems at least with ConcurrentMergeScheduler.
changeCount.incrementAndGet();
segmentInfos.changed();
return "_" + Long.toString(segmentInfos.counter++, Character.MAX_RADIX);
}
}
/** If enabled, information about merges will be printed to this.
*/
private final InfoStream infoStream;
/**
* Forces merge policy to merge segments until there are
* {@code <= maxNumSegments}. The actual merges to be
* executed are determined by the {@link MergePolicy}.
*
* <p>This is a horribly costly operation, especially when
* you pass a small {@code maxNumSegments}; usually you
* should only call this if the index is static (will no
* longer be changed).</p>
*
* <p>Note that this requires free space that is proportional
* to the size of the index in your Directory: 2X if you are
* not using compound file format, and 3X if you are.
* For example, if your index size is 10 MB then you need
* an additional 20 MB free for this to complete (30 MB if
* you're using compound file format). This is also affected
* by the {@link Codec} that is used to execute the merge,
* and may result in even a bigger index. Also, it's best
* to call {@link #commit()} afterwards, to allow IndexWriter
* to free up disk space.</p>
*
* <p>If some but not all readers re-open while merging
* is underway, this will cause {@code > 2X} temporary
* space to be consumed as those new readers will then
* hold open the temporary segments at that time. It is
* best not to re-open readers while merging is running.</p>
*
* <p>The actual temporary usage could be much less than
* these figures (it depends on many factors).</p>
*
* <p>In general, once this completes, the total size of the
* index will be less than the size of the starting index.
* It could be quite a bit smaller (if there were many
* pending deletes) or just slightly smaller.</p>
*
* <p>If an Exception is hit, for example
* due to disk full, the index will not be corrupted and no
* documents will be lost. However, it may have
* been partially merged (some segments were merged but
* not all), and it's possible that one of the segments in
* the index will be in non-compound format even when
* using compound file format. This will occur when the
* Exception is hit during conversion of the segment into
* compound format.</p>
*
* <p>This call will merge those segments present in
* the index when the call started. If other threads are
* still adding documents and flushing segments, those
* newly created segments will not be merged unless you
* call forceMerge again.</p>
*
* @param maxNumSegments maximum number of segments left
* in the index after merging finishes
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @see MergePolicy#findMerges
*
*/
public void forceMerge(int maxNumSegments) throws IOException {
forceMerge(maxNumSegments, true);
}
/** Just like {@link #forceMerge(int)}, except you can
* specify whether the call should block until
* all merging completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
* background threads.
*/
public void forceMerge(int maxNumSegments, boolean doWait) throws IOException {
ensureOpen();
if (maxNumSegments < 1) {
throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "forceMerge: index now " + segString());
infoStream.message("IW", "now flush at forceMerge");
}
flush(true, true);
synchronized(this) {
resetMergeExceptions();
segmentsToMerge.clear();
for(SegmentCommitInfo info : segmentInfos) {
assert info != null;
segmentsToMerge.put(info, Boolean.TRUE);
}
mergeMaxNumSegments = maxNumSegments;
// Now mark all pending & running merges for forced
// merge:
for(final MergePolicy.OneMerge merge : pendingMerges) {
merge.maxNumSegments = maxNumSegments;
if (merge.info != null) {
// this can be null since we register the merge under lock before we then do the actual merge and
// set the merge.info in _mergeInit
segmentsToMerge.put(merge.info, Boolean.TRUE);
}
}
for (final MergePolicy.OneMerge merge: runningMerges) {
merge.maxNumSegments = maxNumSegments;
if (merge.info != null) {
// this can be null since we put the merge on runningMerges before we do the actual merge and
// set the merge.info in _mergeInit
segmentsToMerge.put(merge.info, Boolean.TRUE);
}
}
}
maybeMerge(config.getMergePolicy(), MergeTrigger.EXPLICIT, maxNumSegments);
if (doWait) {
synchronized(this) {
while(true) {
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete forceMerge", tragedy.get());
}
if (mergeExceptions.size() > 0) {
// Forward any exceptions in background merge
// threads to the current thread:
final int size = mergeExceptions.size();
for(int i=0;i<size;i++) {
final MergePolicy.OneMerge merge = mergeExceptions.get(i);
if (merge.maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS) {
throw new IOException("background merge hit exception: " + merge.segString(), merge.getException());
}
}
}
if (maxNumSegmentsMergesPending()) {
testPoint("forceMergeBeforeWait");
doWait();
} else {
break;
}
}
}
// If close is called while we are still
// running, throw an exception so the calling
// thread will know merging did not
// complete
ensureOpen();
}
// NOTE: in the ConcurrentMergeScheduler case, when
// doWait is false, we can return immediately while
// background threads accomplish the merging
}
/** Returns true if any merges in pendingMerges or
* runningMerges are maxNumSegments merges. */
private synchronized boolean maxNumSegmentsMergesPending() {
for (final MergePolicy.OneMerge merge : pendingMerges) {
if (merge.maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS)
return true;
}
for (final MergePolicy.OneMerge merge : runningMerges) {
if (merge.maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS)
return true;
}
return false;
}
/** Just like {@link #forceMergeDeletes()}, except you can
* specify whether the call should block until the
* operation completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
* background threads. */
public void forceMergeDeletes(boolean doWait)
throws IOException {
ensureOpen();
flush(true, true);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "forceMergeDeletes: index now " + segString());
}
final MergePolicy mergePolicy = config.getMergePolicy();
MergePolicy.MergeSpecification spec;
boolean newMergesFound = false;
synchronized(this) {
spec = mergePolicy.findForcedDeletesMerges(segmentInfos, this);
newMergesFound = spec != null;
if (newMergesFound) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++)
registerMerge(spec.merges.get(i));
}
}
mergeScheduler.merge(mergeSource, MergeTrigger.EXPLICIT);
if (spec != null && doWait) {
final int numMerges = spec.merges.size();
synchronized(this) {
boolean running = true;
while(running) {
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete forceMergeDeletes", tragedy.get());
}
// Check each merge that MergePolicy asked us to
// do, to see if any of them are still running and
// if any of them have hit an exception.
running = false;
for(int i=0;i<numMerges;i++) {
final MergePolicy.OneMerge merge = spec.merges.get(i);
if (pendingMerges.contains(merge) || runningMerges.contains(merge)) {
running = true;
}
Throwable t = merge.getException();
if (t != null) {
throw new IOException("background merge hit exception: " + merge.segString(), t);
}
}
// If any of our merges are still running, wait:
if (running)
doWait();
}
}
}
// NOTE: in the ConcurrentMergeScheduler case, when
// doWait is false, we can return immediately while
// background threads accomplish the merging
}
/**
* Forces merging of all segments that have deleted
* documents. The actual merges to be executed are
* determined by the {@link MergePolicy}. For example,
* the default {@link TieredMergePolicy} will only
* pick a segment if the percentage of
* deleted docs is over 10%.
*
* <p>This is often a horribly costly operation; rarely
* is it warranted.</p>
*
* <p>To see how
* many deletions you have pending in your index, call
* {@link IndexReader#numDeletedDocs}.</p>
*
* <p><b>NOTE</b>: this method first flushes a new
* segment (if there are indexed documents), and applies
* all buffered deletes.
*/
public void forceMergeDeletes() throws IOException {
forceMergeDeletes(true);
}
/**
* Expert: asks the mergePolicy whether any merges are
* necessary now and if so, runs the requested merges and
* then iterate (test again if merges are needed) until no
* more merges are returned by the mergePolicy.
*
* Explicit calls to maybeMerge() are usually not
* necessary. The most common case is when merge policy
* parameters have changed.
*
* This method will call the {@link MergePolicy} with
* {@link MergeTrigger#EXPLICIT}.
*/
public final void maybeMerge() throws IOException {
maybeMerge(config.getMergePolicy(), MergeTrigger.EXPLICIT, UNBOUNDED_MAX_MERGE_SEGMENTS);
}
private final void maybeMerge(MergePolicy mergePolicy, MergeTrigger trigger, int maxNumSegments) throws IOException {
ensureOpen(false);
if (updatePendingMerges(mergePolicy, trigger, maxNumSegments) != null) {
executeMerge(trigger);
}
}
final void executeMerge(MergeTrigger trigger) throws IOException {
mergeScheduler.merge(mergeSource, trigger);
}
private synchronized MergePolicy.MergeSpecification updatePendingMerges(MergePolicy mergePolicy, MergeTrigger trigger, int maxNumSegments)
throws IOException {
// In case infoStream was disabled on init, but then enabled at some
// point, try again to log the config here:
messageState();
assert maxNumSegments == UNBOUNDED_MAX_MERGE_SEGMENTS || maxNumSegments > 0;
assert trigger != null;
if (merges.areEnabled() == false) {
return null;
}
// Do not start new merges if disaster struck
if (tragedy.get() != null) {
return null;
}
final MergePolicy.MergeSpecification spec;
if (maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS) {
assert trigger == MergeTrigger.EXPLICIT || trigger == MergeTrigger.MERGE_FINISHED :
"Expected EXPLICT or MERGE_FINISHED as trigger even with maxNumSegments set but was: " + trigger.name();
spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge), this);
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++) {
final MergePolicy.OneMerge merge = spec.merges.get(i);
merge.maxNumSegments = maxNumSegments;
}
}
} else {
switch (trigger) {
case GET_READER:
case COMMIT:
spec = mergePolicy.findFullFlushMerges(trigger, segmentInfos, this);
break;
default:
spec = mergePolicy.findMerges(trigger, segmentInfos, this);
}
}
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++) {
registerMerge(spec.merges.get(i));
}
}
return spec;
}
/** Expert: to be used by a {@link MergePolicy} to avoid
* selecting merges for segments already being merged.
* The returned collection is not cloned, and thus is
* only safe to access if you hold IndexWriter's lock
* (which you do when IndexWriter invokes the
* MergePolicy).
*
* <p>The Set is unmodifiable. */
public synchronized Set<SegmentCommitInfo> getMergingSegments() {
return Collections.unmodifiableSet(mergingSegments);
}
/**
* Expert: the {@link MergeScheduler} calls this method to retrieve the next
* merge requested by the MergePolicy
*
* @lucene.experimental
*/
private synchronized MergePolicy.OneMerge getNextMerge() {
if (pendingMerges.size() == 0) {
return null;
} else {
// Advance the merge from pending to running
MergePolicy.OneMerge merge = pendingMerges.removeFirst();
runningMerges.add(merge);
return merge;
}
}
/**
* Expert: returns true if there are merges waiting to be scheduled.
*
* @lucene.experimental
*/
public synchronized boolean hasPendingMerges() {
return pendingMerges.size() != 0;
}
/**
* Close the <code>IndexWriter</code> without committing
* any changes that have occurred since the last commit
* (or since it was opened, if commit hasn't been called).
* This removes any temporary files that had been created,
* after which the state of the index will be the same as
* it was when commit() was last called or when this
* writer was first opened. This also clears a previous
* call to {@link #prepareCommit}.
* @throws IOException if there is a low-level IO error
*/
@Override
public void rollback() throws IOException {
// don't call ensureOpen here: this acts like "close()" in closeable.
// Ensure that only one thread actually gets to do the
// closing, and make sure no commit is also in progress:
if (shouldClose(true)) {
rollbackInternal();
}
}
private void rollbackInternal() throws IOException {
// Make sure no commit is running, else e.g. we can close while another thread is still fsync'ing:
synchronized(commitLock) {
rollbackInternalNoCommit();
assert pendingNumDocs.get() == segmentInfos.totalMaxDoc()
: "pendingNumDocs " + pendingNumDocs.get() + " != " + segmentInfos.totalMaxDoc() + " totalMaxDoc";
}
}
private void rollbackInternalNoCommit() throws IOException {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "rollback");
}
try {
synchronized (this) {
// must be synced otherwise register merge might throw and exception if merges
// changes concurrently, abortMerges is synced as well
abortMerges(); // this disables merges forever since we are closing and can't reenable them
assert mergingSegments.isEmpty() : "we aborted all merges but still have merging segments: " + mergingSegments;
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "rollback: done finish merges");
}
// Must pre-close in case it increments changeCount so that we can then
// set it to false before calling rollbackInternal
mergeScheduler.close();
docWriter.close(); // mark it as closed first to prevent subsequent indexing actions/flushes
assert !Thread.holdsLock(this) : "IndexWriter lock should never be hold when aborting";
docWriter.abort(); // don't sync on IW here
docWriter.flushControl.waitForFlush(); // wait for all concurrently running flushes
publishFlushedSegments(true); // empty the flush ticket queue otherwise we might not have cleaned up all resources
eventQueue.close();
synchronized (this) {
if (pendingCommit != null) {
pendingCommit.rollbackCommit(directory);
try {
deleter.decRef(pendingCommit);
} finally {
pendingCommit = null;
notifyAll();
}
}
final int totalMaxDoc = segmentInfos.totalMaxDoc();
// Keep the same segmentInfos instance but replace all
// of its SegmentInfo instances so IFD below will remove
// any segments we flushed since the last commit:
segmentInfos.rollbackSegmentInfos(rollbackSegments);
int rollbackMaxDoc = segmentInfos.totalMaxDoc();
// now we need to adjust this back to the rolled back SI but don't set it to the absolute value
// otherwise we might hide internal bugsf
adjustPendingNumDocs(-(totalMaxDoc - rollbackMaxDoc));
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "rollback: infos=" + segString(segmentInfos));
}
testPoint("rollback before checkpoint");
// Ask deleter to locate unreferenced files & remove
// them ... only when we are not experiencing a tragedy, else
// these methods throw ACE:
if (tragedy.get() == null) {
deleter.checkpoint(segmentInfos, false);
deleter.refresh();
deleter.close();
}
lastCommitChangeCount = changeCount.get();
// Don't bother saving any changes in our segmentInfos
readerPool.close();
// Must set closed while inside same sync block where we call deleter.refresh, else concurrent threads may try to sneak a flush in,
// after we leave this sync block and before we enter the sync block in the finally clause below that sets closed:
closed = true;
IOUtils.close(writeLock); // release write lock
writeLock = null;
closed = true;
closing = false;
// So any "concurrently closing" threads wake up and see that the close has now completed:
notifyAll();
}
} catch (Throwable throwable) {
try {
// Must not hold IW's lock while closing
// mergeScheduler: this can lead to deadlock,
// e.g. TestIW.testThreadInterruptDeadlock
IOUtils.closeWhileHandlingException(mergeScheduler);
synchronized (this) {
// we tried to be nice about it: do the minimum
// don't leak a segments_N file if there is a pending commit
if (pendingCommit != null) {
try {
pendingCommit.rollbackCommit(directory);
deleter.decRef(pendingCommit);
} catch (Throwable t) {
throwable.addSuppressed(t);
}
pendingCommit = null;
}
// close all the closeables we can (but important is readerPool and writeLock to prevent leaks)
IOUtils.closeWhileHandlingException(readerPool, deleter, writeLock);
writeLock = null;
closed = true;
closing = false;
// So any "concurrently closing" threads wake up and see that the close has now completed:
notifyAll();
}
} catch (Throwable t) {
throwable.addSuppressed(t);
} finally {
if (throwable instanceof VirtualMachineError) {
try {
tragicEvent(throwable, "rollbackInternal");
} catch (Throwable t1){
throwable.addSuppressed(t1);
}
}
}
throw throwable;
}
}
/**
* Delete all documents in the index.
*
* <p>
* This method will drop all buffered documents and will remove all segments
* from the index. This change will not be visible until a {@link #commit()}
* has been called. This method can be rolled back using {@link #rollback()}.
* </p>
*
* <p>
* NOTE: this method is much faster than using deleteDocuments( new
* MatchAllDocsQuery() ). Yet, this method also has different semantics
* compared to {@link #deleteDocuments(Query...)} since internal
* data-structures are cleared as well as all segment information is
* forcefully dropped anti-viral semantics like omitting norms are reset or
* doc value types are cleared. Essentially a call to {@link #deleteAll()} is
* equivalent to creating a new {@link IndexWriter} with
* {@link OpenMode#CREATE} which a delete query only marks documents as
* deleted.
* </p>
*
* <p>
* NOTE: this method will forcefully abort all merges in progress. If other
* threads are running {@link #forceMerge}, {@link #addIndexes(CodecReader[])}
* or {@link #forceMergeDeletes} methods, they may receive
* {@link MergePolicy.MergeAbortedException}s.
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*/
@SuppressWarnings("try")
public long deleteAll() throws IOException {
ensureOpen();
// Remove any buffered docs
boolean success = false;
/* hold the full flush lock to prevent concurrency commits / NRT reopens to
* get in our way and do unnecessary work. -- if we don't lock this here we might
* get in trouble if */
/*
* We first abort and trash everything we have in-memory
* and keep the thread-states locked, the lockAndAbortAll operation
* also guarantees "point in time semantics" ie. the checkpoint that we need in terms
* of logical happens-before relationship in the DW. So we do
* abort all in memory structures
* We also drop global field numbering before during abort to make
* sure it's just like a fresh index.
*/
try {
synchronized (fullFlushLock) {
try (Closeable finalizer = docWriter.lockAndAbortAll()) {
processEvents(false);
synchronized (this) {
try {
// Abort any running merges
try {
abortMerges();
assert merges.areEnabled() == false : "merges should be disabled - who enabled them?";
assert mergingSegments.isEmpty() : "found merging segments but merges are disabled: " + mergingSegments;
} finally {
// abortMerges disables all merges and we need to re-enable them here to make sure
// IW can function properly. An exception in abortMerges() might be fatal for IW but just to be sure
// lets re-enable merges anyway.
merges.enable();
}
adjustPendingNumDocs(-segmentInfos.totalMaxDoc());
// Remove all segments
segmentInfos.clear();
// Ask deleter to locate unreferenced files & remove them:
deleter.checkpoint(segmentInfos, false);
/* don't refresh the deleter here since there might
* be concurrent indexing requests coming in opening
* files on the directory after we called DW#abort()
* if we do so these indexing requests might hit FNF exceptions.
* We will remove the files incrementally as we go...
*/
// Don't bother saving any changes in our segmentInfos
readerPool.dropAll();
// Mark that the index has changed
changeCount.incrementAndGet();
segmentInfos.changed();
globalFieldNumberMap.clear();
success = true;
long seqNo = docWriter.getNextSequenceNumber();
return seqNo;
} finally {
if (success == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during deleteAll");
}
}
}
}
}
}
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "deleteAll");
throw tragedy;
}
}
/** Aborts running merges. Be careful when using this
* method: when you abort a long-running merge, you lose
* a lot of work that must later be redone. */
private synchronized void abortMerges() throws IOException {
merges.disable();
// Abort all pending & running merges:
IOUtils.applyToAll(pendingMerges, merge -> {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now abort pending merge " + segString(merge.segments));
}
abortOneMerge(merge);
mergeFinish(merge);
});
pendingMerges.clear();
for (final MergePolicy.OneMerge merge : runningMerges) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now abort running merge " + segString(merge.segments));
}
merge.setAborted();
}
// We wait here to make all merges stop. It should not
// take very long because they periodically check if
// they are aborted.
while (runningMerges.size() + runningAddIndexesMerges.size() != 0) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now wait for " + runningMerges.size()
+ " running merge/s to abort; currently running addIndexes: " + runningAddIndexesMerges.size());
}
doWait();
}
notifyAll();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "all running merges have aborted");
}
}
/**
* Wait for any currently outstanding merges to finish.
*
* <p>It is guaranteed that any merges started prior to calling this method
* will have completed once this method completes.</p>
*/
void waitForMerges() throws IOException {
// Give merge scheduler last chance to run, in case
// any pending merges are waiting. We can't hold IW's lock
// when going into merge because it can lead to deadlock.
mergeScheduler.merge(mergeSource, MergeTrigger.CLOSING);
synchronized (this) {
ensureOpen(false);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "waitForMerges");
}
while (pendingMerges.size() > 0 || runningMerges.size() > 0) {
doWait();
}
// sanity check
assert 0 == mergingSegments.size();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "waitForMerges done");
}
}
}
/**
* Called whenever the SegmentInfos has been updated and
* the index files referenced exist (correctly) in the
* index directory.
*/
private synchronized void checkpoint() throws IOException {
changed();
deleter.checkpoint(segmentInfos, false);
}
/** Checkpoints with IndexFileDeleter, so it's aware of
* new files, and increments changeCount, so on
* close/commit we will write a new segments file, but
* does NOT bump segmentInfos.version. */
private synchronized void checkpointNoSIS() throws IOException {
changeCount.incrementAndGet();
deleter.checkpoint(segmentInfos, false);
}
/** Called internally if any index state has changed. */
private synchronized void changed() {
changeCount.incrementAndGet();
segmentInfos.changed();
}
private synchronized long publishFrozenUpdates(FrozenBufferedUpdates packet) {
assert packet != null && packet.any();
long nextGen = bufferedUpdatesStream.push(packet);
// Do this as an event so it applies higher in the stack when we are not holding DocumentsWriterFlushQueue.purgeLock:
eventQueue.add(w -> {
try {
// we call tryApply here since we don't want to block if a refresh or a flush is already applying the
// packet. The flush will retry this packet anyway to ensure all of them are applied
tryApply(packet);
} catch (Throwable t) {
try {
w.onTragicEvent(t, "applyUpdatesPacket");
} catch (Throwable t1) {
t.addSuppressed(t1);
}
throw t;
}
w.flushDeletesCount.incrementAndGet();
});
return nextGen;
}
/**
* Atomically adds the segment private delete packet and publishes the flushed
* segments SegmentInfo to the index writer.
*/
private synchronized void publishFlushedSegment(SegmentCommitInfo newSegment, FieldInfos fieldInfos,
FrozenBufferedUpdates packet, FrozenBufferedUpdates globalPacket,
Sorter.DocMap sortMap) throws IOException {
boolean published = false;
try {
// Lock order IW -> BDS
ensureOpen(false);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "publishFlushedSegment " + newSegment);
}
if (globalPacket != null && globalPacket.any()) {
publishFrozenUpdates(globalPacket);
}
// Publishing the segment must be sync'd on IW -> BDS to make the sure
// that no merge prunes away the seg. private delete packet
final long nextGen;
if (packet != null && packet.any()) {
nextGen = publishFrozenUpdates(packet);
} else {
// Since we don't have a delete packet to apply we can get a new
// generation right away
nextGen = bufferedUpdatesStream.getNextGen();
// No deletes/updates here, so marked finished immediately:
bufferedUpdatesStream.finishedSegment(nextGen);
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "publish sets newSegment delGen=" + nextGen + " seg=" + segString(newSegment));
}
newSegment.setBufferedDeletesGen(nextGen);
segmentInfos.add(newSegment);
published = true;
checkpoint();
if (packet != null && packet.any() && sortMap != null) {
// TODO: not great we do this heavyish op while holding IW's monitor lock,
// but it only applies if you are using sorted indices and updating doc values:
ReadersAndUpdates rld = getPooledInstance(newSegment, true);
rld.sortMap = sortMap;
// DON't release this ReadersAndUpdates we need to stick with that sortMap
}
FieldInfo fieldInfo = fieldInfos.fieldInfo(config.softDeletesField); // will return null if no soft deletes are present
// this is a corner case where documents delete them-self with soft deletes. This is used to
// build delete tombstones etc. in this case we haven't seen any updates to the DV in this fresh flushed segment.
// if we have seen updates the update code checks if the segment is fully deleted.
boolean hasInitialSoftDeleted = (fieldInfo != null
&& fieldInfo.getDocValuesGen() == -1
&& fieldInfo.getDocValuesType() != DocValuesType.NONE);
final boolean isFullyHardDeleted = newSegment.getDelCount() == newSegment.info.maxDoc();
// we either have a fully hard-deleted segment or one or more docs are soft-deleted. In both cases we need
// to go and check if they are fully deleted. This has the nice side-effect that we now have accurate numbers
// for the soft delete right after we flushed to disk.
if (hasInitialSoftDeleted || isFullyHardDeleted){
// this operation is only really executed if needed an if soft-deletes are not configured it only be executed
// if we deleted all docs in this newly flushed segment.
ReadersAndUpdates rld = getPooledInstance(newSegment, true);
try {
if (isFullyDeleted(rld)) {
dropDeletedSegment(newSegment);
checkpoint();
}
} finally {
release(rld);
}
}
} finally {
if (published == false) {
adjustPendingNumDocs(-newSegment.info.maxDoc());
}
flushCount.incrementAndGet();
doAfterFlush();
}
}
private synchronized void resetMergeExceptions() {
mergeExceptions.clear();
mergeGen++;
}
private void noDupDirs(Directory... dirs) {
HashSet<Directory> dups = new HashSet<>();
for(int i=0;i<dirs.length;i++) {
if (dups.contains(dirs[i]))
throw new IllegalArgumentException("Directory " + dirs[i] + " appears more than once");
if (dirs[i] == directoryOrig)
throw new IllegalArgumentException("Cannot add directory to itself");
dups.add(dirs[i]);
}
}
/** Acquires write locks on all the directories; be sure
* to match with a call to {@link IOUtils#close} in a
* finally clause. */
private List<Lock> acquireWriteLocks(Directory... dirs) throws IOException {
List<Lock> locks = new ArrayList<>(dirs.length);
for(int i=0;i<dirs.length;i++) {
boolean success = false;
try {
Lock lock = dirs[i].obtainLock(WRITE_LOCK_NAME);
locks.add(lock);
success = true;
} finally {
if (success == false) {
// Release all previously acquired locks:
// TODO: addSuppressed? it could be many...
IOUtils.closeWhileHandlingException(locks);
}
}
}
return locks;
}
/**
* Adds all segments from an array of indexes into this index.
*
* <p>This may be used to parallelize batch indexing. A large document
* collection can be broken into sub-collections. Each sub-collection can be
* indexed in parallel, on a different thread, process or machine. The
* complete index can then be created by merging sub-collection indexes
* with this method.
*
* <p>
* <b>NOTE:</b> this method acquires the write lock in
* each directory, to ensure that no {@code IndexWriter}
* is currently open or tries to open while this is
* running.
*
* <p>This method is transactional in how Exceptions are
* handled: it does not commit a new segments_N file until
* all indexes are added. This means if an Exception
* occurs (for example disk full), then either no indexes
* will have been added or they all will have been.
*
* <p>Note that this requires temporary free space in the
* {@link Directory} up to 2X the sum of all input indexes
* (including the starting index). If readers/searchers
* are open against the starting index, then temporary
* free space required will be higher by the size of the
* starting index (see {@link #forceMerge(int)} for details).
*
* <p>This requires this index not be among those to be added.
*
* <p>All added indexes must have been created by the same
* Lucene version as this index.
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @throws IllegalArgumentException if addIndexes would cause
* the index to exceed {@link #MAX_DOCS}, or if the indoming
* index sort does not match this index's index sort
*/
public long addIndexes(Directory... dirs) throws IOException {
ensureOpen();
noDupDirs(dirs);
List<Lock> locks = acquireWriteLocks(dirs);
Sort indexSort = config.getIndexSort();
boolean successTop = false;
long seqNo;
try {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "flush at addIndexes(Directory...)");
}
flush(false, true);
List<SegmentCommitInfo> infos = new ArrayList<>();
// long so we can detect int overflow:
long totalMaxDoc = 0;
List<SegmentInfos> commits = new ArrayList<>(dirs.length);
for (Directory dir : dirs) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "addIndexes: process directory " + dir);
}
SegmentInfos sis = SegmentInfos.readLatestCommit(dir); // read infos from dir
if (segmentInfos.getIndexCreatedVersionMajor() != sis.getIndexCreatedVersionMajor()) {
throw new IllegalArgumentException("Cannot use addIndexes(Directory) with indexes that have been created "
+ "by a different Lucene version. The current index was generated by Lucene "
+ segmentInfos.getIndexCreatedVersionMajor()
+ " while one of the directories contains an index that was generated with Lucene "
+ sis.getIndexCreatedVersionMajor());
}
totalMaxDoc += sis.totalMaxDoc();
commits.add(sis);
}
// Best-effort up front check:
testReserveDocs(totalMaxDoc);
boolean success = false;
try {
for (SegmentInfos sis : commits) {
for (SegmentCommitInfo info : sis) {
assert !infos.contains(info): "dup info dir=" + info.info.dir + " name=" + info.info.name;
Sort segmentIndexSort = info.info.getIndexSort();
if (indexSort != null && (segmentIndexSort == null || isCongruentSort(indexSort, segmentIndexSort) == false)) {
throw new IllegalArgumentException("cannot change index sort from " + segmentIndexSort + " to " + indexSort);
}
String newSegName = newSegmentName();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "addIndexes: process segment origName=" + info.info.name + " newName=" + newSegName + " info=" + info);
}
IOContext context = new IOContext(new FlushInfo(info.info.maxDoc(), info.sizeInBytes()));
FieldInfos fis = readFieldInfos(info);
for(FieldInfo fi : fis) {
// This will throw exceptions if any of the incoming fields have an illegal schema change:
globalFieldNumberMap.addOrGet(fi.name, fi.number, fi.getIndexOptions(), fi.getDocValuesType(), fi.getPointDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField());
}
infos.add(copySegmentAsIs(info, newSegName, context));
}
}
success = true;
} finally {
if (!success) {
for(SegmentCommitInfo sipc : infos) {
// Safe: these files must exist
deleteNewFiles(sipc.files());
}
}
}
synchronized (this) {
success = false;
try {
ensureOpen();
// Now reserve the docs, just before we update SIS:
reserveDocs(totalMaxDoc);
seqNo = docWriter.getNextSequenceNumber();
success = true;
} finally {
if (!success) {
for(SegmentCommitInfo sipc : infos) {
// Safe: these files must exist
deleteNewFiles(sipc.files());
}
}
}
segmentInfos.addAll(infos);
checkpoint();
}
successTop = true;
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "addIndexes(Directory...)");
throw tragedy;
} finally {
if (successTop) {
IOUtils.close(locks);
} else {
IOUtils.closeWhileHandlingException(locks);
}
}
maybeMerge();
return seqNo;
}
private void validateMergeReader(CodecReader leaf) {
LeafMetaData segmentMeta = leaf.getMetaData();
if (segmentInfos.getIndexCreatedVersionMajor() != segmentMeta.getCreatedVersionMajor()) {
throw new IllegalArgumentException("Cannot merge a segment that has been created with major version "
+ segmentMeta.getCreatedVersionMajor() + " into this index which has been created by major version "
+ segmentInfos.getIndexCreatedVersionMajor());
}
if (segmentInfos.getIndexCreatedVersionMajor() >= 7 && segmentMeta.getMinVersion() == null) {
throw new IllegalStateException("Indexes created on or after Lucene 7 must record the created version major, but " + leaf + " hides it");
}
Sort leafIndexSort = segmentMeta.getSort();
if (config.getIndexSort() != null &&
(leafIndexSort == null || isCongruentSort(config.getIndexSort(), leafIndexSort) == false)) {
throw new IllegalArgumentException("cannot change index sort from " + leafIndexSort + " to " + config.getIndexSort());
}
}
/**
* Merges the provided indexes into this index.
*
* <p>
* The provided IndexReaders are not closed.
*
* <p>
* See {@link #addIndexes} for details on transactional semantics, temporary
* free space required in the Directory, and non-CFS segments on an Exception.
*
* <p>
* <b>NOTE:</b> empty segments are dropped by this method and not added to this
* index.
*
* <p>
* <b>NOTE:</b> this merges all given {@link LeafReader}s in one
* merge. If you intend to merge a large number of readers, it may be better
* to call this method multiple times, each time with a small set of readers.
* In principle, if you use a merge policy with a {@code mergeFactor} or
* {@code maxMergeAtOnce} parameter, you should pass that many readers in one
* call.
*
* <p>
* <b>NOTE:</b> this method does not call or make use of the {@link MergeScheduler},
* so any custom bandwidth throttling is at the moment ignored.
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException
* if the index is corrupt
* @throws IOException
* if there is a low-level IO error
* @throws IllegalArgumentException
* if addIndexes would cause the index to exceed {@link #MAX_DOCS}
*/
public long addIndexes(CodecReader... readers) throws IOException {
ensureOpen();
// long so we can detect int overflow:
long numDocs = 0;
long seqNo;
try {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "flush at addIndexes(CodecReader...)");
}
flush(false, true);
String mergedName = newSegmentName();
int numSoftDeleted = 0;
for (CodecReader leaf : readers) {
numDocs += leaf.numDocs();
validateMergeReader(leaf);
if (softDeletesEnabled) {
Bits liveDocs = leaf.getLiveDocs();
numSoftDeleted += PendingSoftDeletes.countSoftDeletes(
DocValuesFieldExistsQuery.getDocValuesDocIdSetIterator(config.getSoftDeletesField(), leaf), liveDocs);
}
}
// Best-effort up front check:
testReserveDocs(numDocs);
final IOContext context = new IOContext(new MergeInfo(Math.toIntExact(numDocs), -1, false, UNBOUNDED_MAX_MERGE_SEGMENTS));
// TODO: somehow we should fix this merge so it's
// abortable so that IW.close(false) is able to stop it
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
Codec codec = config.getCodec();
// We set the min version to null for now, it will be set later by SegmentMerger
SegmentInfo info = new SegmentInfo(directoryOrig, Version.LATEST, null, mergedName, -1,
false, codec, Collections.emptyMap(), StringHelper.randomId(), Collections.emptyMap(), config.getIndexSort());
SegmentMerger merger = new SegmentMerger(Arrays.asList(readers), info, infoStream, trackingDir,
globalFieldNumberMap,
context);
if (!merger.shouldMerge()) {
return docWriter.getNextSequenceNumber();
}
synchronized (this) {
ensureOpen();
assert merges.areEnabled();
runningAddIndexesMerges.add(merger);
}
try {
merger.merge(); // merge 'em
} finally {
synchronized (this) {
runningAddIndexesMerges.remove(merger);
notifyAll();
}
}
SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, numSoftDeleted, -1L, -1L, -1L, StringHelper.randomId());
info.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
trackingDir.clearCreatedFiles();
setDiagnostics(info, SOURCE_ADDINDEXES_READERS);
final MergePolicy mergePolicy = config.getMergePolicy();
boolean useCompoundFile;
synchronized(this) { // Guard segmentInfos
if (merges.areEnabled() == false) {
// Safe: these files must exist
deleteNewFiles(infoPerCommit.files());
return docWriter.getNextSequenceNumber();
}
ensureOpen();
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, infoPerCommit, this);
}
// Now create the compound file if needed
if (useCompoundFile) {
Collection<String> filesToDelete = infoPerCommit.files();
TrackingDirectoryWrapper trackingCFSDir = new TrackingDirectoryWrapper(directory);
// TODO: unlike merge, on exception we arent sniping any trash cfs files here?
// createCompoundFile tries to cleanup, but it might not always be able to...
try {
createCompoundFile(infoStream, trackingCFSDir, info, context, this::deleteNewFiles);
} finally {
// delete new non cfs files directly: they were never
// registered with IFD
deleteNewFiles(filesToDelete);
}
info.setUseCompoundFile(true);
}
// Have codec write SegmentInfo. Must do this after
// creating CFS so that 1) .si isn't slurped into CFS,
// and 2) .si reflects useCompoundFile=true change
// above:
codec.segmentInfoFormat().write(trackingDir, info, context);
info.addFiles(trackingDir.getCreatedFiles());
// Register the new segment
synchronized(this) {
if (merges.areEnabled() == false) {
// Safe: these files must exist
deleteNewFiles(infoPerCommit.files());
return docWriter.getNextSequenceNumber();
}
ensureOpen();
// Now reserve the docs, just before we update SIS:
reserveDocs(numDocs);
segmentInfos.add(infoPerCommit);
seqNo = docWriter.getNextSequenceNumber();
checkpoint();
}
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "addIndexes(CodecReader...)");
throw tragedy;
}
maybeMerge();
return seqNo;
}
/** Copies the segment files as-is into the IndexWriter's directory. */
private SegmentCommitInfo copySegmentAsIs(SegmentCommitInfo info, String segName, IOContext context) throws IOException {
// Same SI as before but we change directory and name
SegmentInfo newInfo = new SegmentInfo(directoryOrig, info.info.getVersion(), info.info.getMinVersion(), segName, info.info.maxDoc(),
info.info.getUseCompoundFile(), info.info.getCodec(),
info.info.getDiagnostics(), info.info.getId(), info.info.getAttributes(), info.info.getIndexSort());
SegmentCommitInfo newInfoPerCommit = new SegmentCommitInfo(newInfo, info.getDelCount(), info.getSoftDelCount(), info.getDelGen(),
info.getFieldInfosGen(), info.getDocValuesGen(), info.getId());
newInfo.setFiles(info.info.files());
newInfoPerCommit.setFieldInfosFiles(info.getFieldInfosFiles());
newInfoPerCommit.setDocValuesUpdatesFiles(info.getDocValuesUpdatesFiles());
boolean success = false;
Set<String> copiedFiles = new HashSet<>();
try {
// Copy the segment's files
for (String file: info.files()) {
final String newFileName = newInfo.namedForThisSegment(file);
directory.copyFrom(info.info.dir, file, newFileName, context);
copiedFiles.add(newFileName);
}
success = true;
} finally {
if (!success) {
// Safe: these files must exist
deleteNewFiles(copiedFiles);
}
}
assert copiedFiles.equals(newInfoPerCommit.files()): "copiedFiles=" + copiedFiles + " vs " + newInfoPerCommit.files();
return newInfoPerCommit;
}
/**
* A hook for extending classes to execute operations after pending added and
* deleted documents have been flushed to the Directory but before the change
* is committed (new segments_N file written).
*/
protected void doAfterFlush() throws IOException {}
/**
* A hook for extending classes to execute operations before pending added and
* deleted documents are flushed to the Directory.
*/
protected void doBeforeFlush() throws IOException {}
/** <p>Expert: prepare for commit. This does the
* first phase of 2-phase commit. This method does all
* steps necessary to commit changes since this writer
* was opened: flushes pending added and deleted docs,
* syncs the index files, writes most of next segments_N
* file. After calling this you must call either {@link
* #commit()} to finish the commit, or {@link
* #rollback()} to revert the commit and undo all changes
* done since the writer was opened.</p>
*
* <p>You can also just call {@link #commit()} directly
* without prepareCommit first in which case that method
* will internally call prepareCommit.
*
* @return The <a href="#sequence_number">sequence number</a>
* of the last operation in the commit. All sequence numbers <= this value
* will be reflected in the commit, and all others will not.
*/
@Override
public final long prepareCommit() throws IOException {
ensureOpen();
pendingSeqNo = prepareCommitInternal();
// we must do this outside of the commitLock else we can deadlock:
if (maybeMerge.getAndSet(false)) {
maybeMerge(config.getMergePolicy(), MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
}
return pendingSeqNo;
}
/**
* <p>Expert: Flushes the next pending writer per thread buffer if available or the largest active
* non-pending writer per thread buffer in the calling thread.
* This can be used to flush documents to disk outside of an indexing thread. In contrast to {@link #flush()}
* this won't mark all currently active indexing buffers as flush-pending.
*
* Note: this method is best-effort and might not flush any segments to disk. If there is a full flush happening
* concurrently multiple segments might have been flushed.
* Users of this API can access the IndexWriters current memory consumption via {@link #ramBytesUsed()}
* </p>
* @return <code>true</code> iff this method flushed at least on segment to disk.
* @lucene.experimental
*/
public final boolean flushNextBuffer() throws IOException {
try {
if (docWriter.flushOneDWPT()) {
processEvents(true);
return true; // we wrote a segment
}
return false;
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "flushNextBuffer");
throw tragedy;
} finally {
maybeCloseOnTragicEvent();
}
}
private long prepareCommitInternal() throws IOException {
startCommitTime = System.nanoTime();
synchronized(commitLock) {
ensureOpen(false);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "prepareCommit: flush");
infoStream.message("IW", " index before flush " + segString());
}
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot commit", tragedy.get());
}
if (pendingCommit != null) {
throw new IllegalStateException("prepareCommit was already called with no corresponding call to commit");
}
doBeforeFlush();
testPoint("startDoFlush");
SegmentInfos toCommit = null;
boolean anyChanges = false;
long seqNo;
MergePolicy.MergeSpecification pointInTimeMerges = null;
AtomicBoolean stopAddingMergedSegments = new AtomicBoolean(false);
final long maxCommitMergeWaitMillis = config.getMaxFullFlushMergeWaitMillis();
// This is copied from doFlush, except it's modified to
// clone & incRef the flushed SegmentInfos inside the
// sync block:
try {
synchronized (fullFlushLock) {
boolean flushSuccess = false;
boolean success = false;
try {
seqNo = docWriter.flushAllThreads();
if (seqNo < 0) {
anyChanges = true;
seqNo = -seqNo;
}
if (anyChanges == false) {
// prevent double increment since docWriter#doFlush increments the flushcount
// if we flushed anything.
flushCount.incrementAndGet();
}
publishFlushedSegments(true);
// cannot pass triggerMerges=true here else it can lead to deadlock:
processEvents(false);
flushSuccess = true;
applyAllDeletesAndUpdates();
synchronized(this) {
writeReaderPool(true);
if (changeCount.get() != lastCommitChangeCount) {
// There are changes to commit, so we will write a new segments_N in startCommit.
// The act of committing is itself an NRT-visible change (an NRT reader that was
// just opened before this should see it on reopen) so we increment changeCount
// and segments version so a future NRT reopen will see the change:
changeCount.incrementAndGet();
segmentInfos.changed();
}
if (commitUserData != null) {
Map<String,String> userData = new HashMap<>();
for(Map.Entry<String,String> ent : commitUserData) {
userData.put(ent.getKey(), ent.getValue());
}
segmentInfos.setUserData(userData, false);
}
// Must clone the segmentInfos while we still
// hold fullFlushLock and while sync'd so that
// no partial changes (eg a delete w/o
// corresponding add from an updateDocument) can
// sneak into the commit point:
toCommit = segmentInfos.clone();
pendingCommitChangeCount = changeCount.get();
// This protects the segmentInfos we are now going
// to commit. This is important in case, eg, while
// we are trying to sync all referenced files, a
// merge completes which would otherwise have
// removed the files we are now syncing.
deleter.incRef(toCommit.files(false));
if (anyChanges && maxCommitMergeWaitMillis > 0) {
// we can safely call preparePointInTimeMerge since writeReaderPool(true) above wrote all
// necessary files to disk and checkpointed them.
pointInTimeMerges = preparePointInTimeMerge(toCommit, stopAddingMergedSegments::get, MergeTrigger.COMMIT, sci->{});
}
}
success = true;
} finally {
if (!success) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during prepareCommit");
}
}
assert Thread.holdsLock(fullFlushLock);
// Done: finish the full flush!
docWriter.finishFullFlush(flushSuccess);
doAfterFlush();
}
}
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "prepareCommit");
throw tragedy;
} finally {
maybeCloseOnTragicEvent();
}
if (pointInTimeMerges != null) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now run merges during commit: " + pointInTimeMerges.segString(directory));
}
mergeScheduler.merge(mergeSource, MergeTrigger.COMMIT);
pointInTimeMerges.await(maxCommitMergeWaitMillis, TimeUnit.MILLISECONDS);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "done waiting for merges during commit");
}
synchronized (this) {
// we need to call this under lock since mergeFinished above is also called under the IW lock
stopAddingMergedSegments.set(true);
}
}
// do this after handling any pointInTimeMerges since the files will have changed if any merges
// did complete
filesToCommit = toCommit.files(false);
try {
if (anyChanges) {
maybeMerge.set(true);
}
startCommit(toCommit);
if (pendingCommit == null) {
return -1;
} else {
return seqNo;
}
} catch (Throwable t) {
synchronized (this) {
if (filesToCommit != null) {
try {
deleter.decRef(filesToCommit);
} catch (Throwable t1) {
t.addSuppressed(t1);
} finally {
filesToCommit = null;
}
}
}
throw t;
}
}
}
/**
* This optimization allows a commit/getReader to wait for merges on smallish segments to
* reduce the eventual number of tiny segments in the commit point / NRT Reader. We wrap a {@code OneMerge} to
* update the {@code mergingSegmentInfos} once the merge has finished. We replace the source segments
* in the SIS that we are going to commit / open the reader on with the freshly merged segment, but ignore all deletions and updates
* that are made to documents in the merged segment while it was merging. The updates that are made do not belong to
* the point-in-time commit point / NRT READER and should therefore not be included. See the clone call in {@code onMergeComplete}
* below. We also ensure that we pull the merge readers while holding {@code IndexWriter}'s lock. Otherwise
* we could see concurrent deletions/updates applied that do not belong to the segment.
*/
private MergePolicy.MergeSpecification preparePointInTimeMerge(SegmentInfos mergingSegmentInfos, BooleanSupplier stopCollectingMergeResults,
MergeTrigger trigger,
IOUtils.IOConsumer<SegmentCommitInfo> mergeFinished) throws IOException {
assert Thread.holdsLock(this);
assert trigger == MergeTrigger.GET_READER || trigger == MergeTrigger.COMMIT : "illegal trigger: " + trigger;
MergePolicy.MergeSpecification pointInTimeMerges = updatePendingMerges(new OneMergeWrappingMergePolicy(config.getMergePolicy(), toWrap ->
new MergePolicy.OneMerge(toWrap.segments) {
SegmentCommitInfo origInfo;
final AtomicBoolean onlyOnce = new AtomicBoolean(false);
@Override
public void mergeFinished(boolean committed, boolean segmentDropped) throws IOException {
assert Thread.holdsLock(IndexWriter.this);
// includedInCommit will be set (above, by our caller) to false if the allowed max wall clock
// time (IWC.getMaxCommitMergeWaitMillis()) has elapsed, which means we did not make the timeout
// and will not commit our merge to the to-be-committed SegmentInfos
if (segmentDropped == false
&& committed
&& stopCollectingMergeResults.getAsBoolean() == false) {
// make sure onMergeComplete really was called:
assert origInfo != null;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now apply merge during commit: " + toWrap.segString());
}
if (trigger == MergeTrigger.COMMIT) {
// if we do this in a getReader call here this is obsolete since we already hold a reader that has
// incRef'd these files
deleter.incRef(origInfo.files());
}
Set<String> mergedSegmentNames = new HashSet<>();
for (SegmentCommitInfo sci : segments) {
mergedSegmentNames.add(sci.info.name);
}
List<SegmentCommitInfo> toCommitMergedAwaySegments = new ArrayList<>();
for (SegmentCommitInfo sci : mergingSegmentInfos) {
if (mergedSegmentNames.contains(sci.info.name)) {
toCommitMergedAwaySegments.add(sci);
if (trigger == MergeTrigger.COMMIT) {
// if we do this in a getReader call here this is obsolete since we already hold a reader that has
// incRef'd these files and will decRef them when it's closed
deleter.decRef(sci.files());
}
}
}
// Construct a OneMerge that applies to toCommit
MergePolicy.OneMerge applicableMerge = new MergePolicy.OneMerge(toCommitMergedAwaySegments);
applicableMerge.info = origInfo;
long segmentCounter = Long.parseLong(origInfo.info.name.substring(1), Character.MAX_RADIX);
mergingSegmentInfos.counter = Math.max(mergingSegmentInfos.counter, segmentCounter + 1);
mergingSegmentInfos.applyMergeChanges(applicableMerge, false);
} else {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "skip apply merge during commit: " + toWrap.segString());
}
}
toWrap.mergeFinished(committed, segmentDropped);
super.mergeFinished(committed, segmentDropped);
}
@Override
void onMergeComplete() throws IOException {
assert Thread.holdsLock(IndexWriter.this);
if (stopCollectingMergeResults.getAsBoolean() == false
&& isAborted() == false
&& info.info.maxDoc() > 0/* never do this if the segment if dropped / empty */) {
mergeFinished.accept(info);
// clone the target info to make sure we have the original info without the updated del and update gens
origInfo = info.clone();
}
toWrap.onMergeComplete();
super.onMergeComplete();
}
@Override
void initMergeReaders(IOUtils.IOFunction<SegmentCommitInfo, MergePolicy.MergeReader> readerFactory) throws IOException {
if (onlyOnce.compareAndSet(false, true)) {
// we do this only once below to pull readers as point in time readers with respect to the commit point
// we try to update
super.initMergeReaders(readerFactory);
}
}
@Override
public CodecReader wrapForMerge(CodecReader reader) throws IOException {
return toWrap.wrapForMerge(reader); // must delegate
}
}
), trigger, UNBOUNDED_MAX_MERGE_SEGMENTS);
if (pointInTimeMerges != null) {
boolean closeReaders = true;
try {
for (MergePolicy.OneMerge merge : pointInTimeMerges.merges) {
IOContext context = new IOContext(merge.getStoreMergeInfo());
merge.initMergeReaders(
sci -> {
final ReadersAndUpdates rld = getPooledInstance(sci, true);
// calling setIsMerging is important since it causes the RaU to record all DV updates
// in a separate map in order to be applied to the merged segment after it's done
rld.setIsMerging();
return rld.getReaderForMerge(context);
});
}
closeReaders = false;
} finally {
if (closeReaders) {
IOUtils.applyToAll(pointInTimeMerges.merges, merge -> {
// that merge is broken we need to clean up after it - it's fine we still have the IW lock to do this
boolean removed = pendingMerges.remove(merge);
assert removed: "merge should be pending but isn't: " + merge.segString();
try {
abortOneMerge(merge);
} finally {
mergeFinish(merge);
}
});
}
}
}
return pointInTimeMerges;
}
/**
* Ensures that all changes in the reader-pool are written to disk.
* @param writeDeletes if <code>true</code> if deletes should be written to disk too.
*/
private void writeReaderPool(boolean writeDeletes) throws IOException {
assert Thread.holdsLock(this);
if (writeDeletes) {
if (readerPool.commit(segmentInfos)) {
checkpointNoSIS();
}
} else { // only write the docValues
if (readerPool.writeAllDocValuesUpdates()) {
checkpoint();
}
}
// now do some best effort to check if a segment is fully deleted
List<SegmentCommitInfo> toDrop = new ArrayList<>(); // don't modify segmentInfos in-place
for (SegmentCommitInfo info : segmentInfos) {
ReadersAndUpdates readersAndUpdates = readerPool.get(info, false);
if (readersAndUpdates != null) {
if (isFullyDeleted(readersAndUpdates)) {
toDrop.add(info);
}
}
}
for (SegmentCommitInfo info : toDrop) {
dropDeletedSegment(info);
}
if (toDrop.isEmpty() == false) {
checkpoint();
}
}
/**
* Sets the iterator to provide the commit user data map at commit time. Calling this method
* is considered a committable change and will be {@link #commit() committed} even if
* there are no other changes this writer. Note that you must call this method
* before {@link #prepareCommit()}. Otherwise it won't be included in the
* follow-on {@link #commit()}.
* <p>
* <b>NOTE:</b> the iterator is late-binding: it is only visited once all documents for the
* commit have been written to their segments, before the next segments_N file is written
*/
public final synchronized void setLiveCommitData(Iterable<Map.Entry<String,String>> commitUserData) {
setLiveCommitData(commitUserData, true);
}
/**
* Sets the commit user data iterator, controlling whether to advance the {@link SegmentInfos#getVersion}.
*
* @see #setLiveCommitData(Iterable)
*
* @lucene.internal */
public final synchronized void setLiveCommitData(Iterable<Map.Entry<String,String>> commitUserData, boolean doIncrementVersion) {
this.commitUserData = commitUserData;
if (doIncrementVersion) {
segmentInfos.changed();
}
changeCount.incrementAndGet();
}
/**
* Returns the commit user data iterable previously set with {@link #setLiveCommitData(Iterable)}, or null if nothing has been set yet.
*/
public final synchronized Iterable<Map.Entry<String,String>> getLiveCommitData() {
return commitUserData;
}
// Used only by commit and prepareCommit, below; lock
// order is commitLock -> IW
private final Object commitLock = new Object();
/**
* <p>Commits all pending changes (added and deleted
* documents, segment merges, added
* indexes, etc.) to the index, and syncs all referenced
* index files, such that a reader will see the changes
* and the index updates will survive an OS or machine
* crash or power loss. Note that this does not wait for
* any running background merges to finish. This may be a
* costly operation, so you should test the cost in your
* application and do it only when really necessary.</p>
*
* <p> Note that this operation calls Directory.sync on
* the index files. That call should not return until the
* file contents and metadata are on stable storage. For
* FSDirectory, this calls the OS's fsync. But, beware:
* some hardware devices may in fact cache writes even
* during fsync, and return before the bits are actually
* on stable storage, to give the appearance of faster
* performance. If you have such a device, and it does
* not have a battery backup (for example) then on power
* loss it may still lose data. Lucene cannot guarantee
* consistency on such devices. </p>
*
* <p> If nothing was committed, because there were no
* pending changes, this returns -1. Otherwise, it returns
* the sequence number such that all indexing operations
* prior to this sequence will be included in the commit
* point, and all other operations will not. </p>
*
* @see #prepareCommit
*
* @return The <a href="#sequence_number">sequence number</a>
* of the last operation in the commit. All sequence numbers <= this value
* will be reflected in the commit, and all others will not.
*/
@Override
public final long commit() throws IOException {
ensureOpen();
return commitInternal(config.getMergePolicy());
}
/** Returns true if there may be changes that have not been
* committed. There are cases where this may return true
* when there are no actual "real" changes to the index,
* for example if you've deleted by Term or Query but
* that Term or Query does not match any documents.
* Also, if a merge kicked off as a result of flushing a
* new segment during {@link #commit}, or a concurrent
* merged finished, this method may return true right
* after you had just called {@link #commit}. */
public final boolean hasUncommittedChanges() {
return changeCount.get() != lastCommitChangeCount || hasChangesInRam();
}
/**
* Returns true if there are any changes or deletes that are not flushed or applied.
*/
boolean hasChangesInRam() {
return docWriter.anyChanges() || bufferedUpdatesStream.any();
}
private long commitInternal(MergePolicy mergePolicy) throws IOException {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: start");
}
long seqNo;
synchronized(commitLock) {
ensureOpen(false);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: enter lock");
}
if (pendingCommit == null) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: now prepare");
}
seqNo = prepareCommitInternal();
} else {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: already prepared");
}
seqNo = pendingSeqNo;
}
finishCommit();
}
// we must do this outside of the commitLock else we can deadlock:
if (maybeMerge.getAndSet(false)) {
maybeMerge(mergePolicy, MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
}
return seqNo;
}
@SuppressWarnings("try")
private void finishCommit() throws IOException {
boolean commitCompleted = false;
String committedSegmentsFileName = null;
try {
synchronized(this) {
ensureOpen(false);
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete commit", tragedy.get());
}
if (pendingCommit != null) {
final Collection<String> commitFiles = this.filesToCommit;
try (Closeable finalizer = () -> deleter.decRef(commitFiles)) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: pendingCommit != null");
}
committedSegmentsFileName = pendingCommit.finishCommit(directory);
// we committed, if anything goes wrong after this, we are screwed and it's a tragedy:
commitCompleted = true;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: done writing segments file \"" + committedSegmentsFileName + "\"");
}
// NOTE: don't use this.checkpoint() here, because
// we do not want to increment changeCount:
deleter.checkpoint(pendingCommit, true);
// Carry over generation to our master SegmentInfos:
segmentInfos.updateGeneration(pendingCommit);
lastCommitChangeCount = pendingCommitChangeCount;
rollbackSegments = pendingCommit.createBackupSegmentInfos();
} finally {
notifyAll();
pendingCommit = null;
this.filesToCommit = null;
}
} else {
assert filesToCommit == null;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: pendingCommit == null; skip");
}
}
}
} catch (Throwable t) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during finishCommit: " + t.getMessage());
}
if (commitCompleted) {
tragicEvent(t, "finishCommit");
}
throw t;
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", String.format(Locale.ROOT, "commit: took %.1f msec", (System.nanoTime()-startCommitTime)/1000000.0));
infoStream.message("IW", "commit: done");
}
}
// Ensures only one flush() is actually flushing segments
// at a time:
private final Object fullFlushLock = new Object();
/** Moves all in-memory segments to the {@link Directory}, but does not commit
* (fsync) them (call {@link #commit} for that). */
public final void flush() throws IOException {
flush(true, true);
}
/**
* Flush all in-memory buffered updates (adds and deletes)
* to the Directory.
* @param triggerMerge if true, we may merge segments (if
* deletes or docs were flushed) if necessary
* @param applyAllDeletes whether pending deletes should also
*/
final void flush(boolean triggerMerge, boolean applyAllDeletes) throws IOException {
// NOTE: this method cannot be sync'd because
// maybeMerge() in turn calls mergeScheduler.merge which
// in turn can take a long time to run and we don't want
// to hold the lock for that. In the case of
// ConcurrentMergeScheduler this can lead to deadlock
// when it stalls due to too many running merges.
// We can be called during close, when closing==true, so we must pass false to ensureOpen:
ensureOpen(false);
if (doFlush(applyAllDeletes) && triggerMerge) {
maybeMerge(config.getMergePolicy(), MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
}
}
/** Returns true a segment was flushed or deletes were applied. */
private boolean doFlush(boolean applyAllDeletes) throws IOException {
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot flush", tragedy.get());
}
doBeforeFlush();
testPoint("startDoFlush");
boolean success = false;
try {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", " start flush: applyAllDeletes=" + applyAllDeletes);
infoStream.message("IW", " index before flush " + segString());
}
boolean anyChanges;
synchronized (fullFlushLock) {
boolean flushSuccess = false;
try {
long seqNo = docWriter.flushAllThreads() ;
if (seqNo < 0) {
seqNo = -seqNo;
anyChanges = true;
} else {
anyChanges = false;
}
if (!anyChanges) {
// flushCount is incremented in flushAllThreads
flushCount.incrementAndGet();
}
publishFlushedSegments(true);
flushSuccess = true;
} finally {
assert Thread.holdsLock(fullFlushLock);;
docWriter.finishFullFlush(flushSuccess);
processEvents(false);
}
}
if (applyAllDeletes) {
applyAllDeletesAndUpdates();
}
anyChanges |= maybeMerge.getAndSet(false);
synchronized(this) {
writeReaderPool(applyAllDeletes);
doAfterFlush();
success = true;
return anyChanges;
}
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "doFlush");
throw tragedy;
} finally {
if (!success) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during flush");
}
maybeCloseOnTragicEvent();
}
}
}
private void applyAllDeletesAndUpdates() throws IOException {
assert Thread.holdsLock(this) == false;
flushDeletesCount.incrementAndGet();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now apply all deletes for all segments buffered updates bytesUsed=" + bufferedUpdatesStream.ramBytesUsed() + " reader pool bytesUsed=" + readerPool.ramBytesUsed());
}
bufferedUpdatesStream.waitApplyAll(this);
}
// for testing only
DocumentsWriter getDocsWriter() {
return docWriter;
}
/** Expert: Return the number of documents currently
* buffered in RAM. */
public final synchronized int numRamDocs() {
ensureOpen();
return docWriter.getNumDocs();
}
private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) {
for(SegmentCommitInfo info : merge.segments) {
if (!segmentInfos.contains(info)) {
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString());
}
}
}
/**
* Carefully merges deletes and updates for the segments we just merged. This
* is tricky because, although merging will clear all deletes (compacts the
* documents) and compact all the updates, new deletes and updates may have
* been flushed to the segments since the merge was started. This method
* "carries over" such new deletes and updates onto the newly merged segment,
* and saves the resulting deletes and updates files (incrementing the delete
* and DV generations for merge.info). If no deletes were flushed, no new
* deletes file is saved.
*/
private synchronized ReadersAndUpdates commitMergedDeletesAndUpdates(MergePolicy.OneMerge merge, MergeState mergeState) throws IOException {
mergeFinishedGen.incrementAndGet();
testPoint("startCommitMergeDeletes");
final List<SegmentCommitInfo> sourceSegments = merge.segments;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commitMergeDeletes " + segString(merge.segments));
}
// Carefully merge deletes that occurred after we
// started merging:
long minGen = Long.MAX_VALUE;
// Lazy init (only when we find a delete or update to carry over):
final ReadersAndUpdates mergedDeletesAndUpdates = getPooledInstance(merge.info, true);
int numDeletesBefore = mergedDeletesAndUpdates.getDelCount();
// field -> delGen -> dv field updates
Map<String,Map<Long,DocValuesFieldUpdates>> mappedDVUpdates = new HashMap<>();
boolean anyDVUpdates = false;
assert sourceSegments.size() == mergeState.docMaps.length;
for (int i = 0; i < sourceSegments.size(); i++) {
SegmentCommitInfo info = sourceSegments.get(i);
minGen = Math.min(info.getBufferedDeletesGen(), minGen);
final int maxDoc = info.info.maxDoc();
final ReadersAndUpdates rld = getPooledInstance(info, false);
// We hold a ref, from when we opened the readers during mergeInit, so it better still be in the pool:
assert rld != null: "seg=" + info.info.name;
MergeState.DocMap segDocMap = mergeState.docMaps[i];
MergeState.DocMap segLeafDocMap = mergeState.leafDocMaps[i];
carryOverHardDeletes(mergedDeletesAndUpdates, maxDoc, mergeState.liveDocs[i], merge.getMergeReader().get(i).hardLiveDocs, rld.getHardLiveDocs(),
segDocMap, segLeafDocMap);
// Now carry over all doc values updates that were resolved while we were merging, remapping the docIDs to the newly merged docIDs.
// We only carry over packets that finished resolving; if any are still running (concurrently) they will detect that our merge completed
// and re-resolve against the newly merged segment:
Map<String,List<DocValuesFieldUpdates>> mergingDVUpdates = rld.getMergingDVUpdates();
for (Map.Entry<String,List<DocValuesFieldUpdates>> ent : mergingDVUpdates.entrySet()) {
String field = ent.getKey();
Map<Long,DocValuesFieldUpdates> mappedField = mappedDVUpdates.get(field);
if (mappedField == null) {
mappedField = new HashMap<>();
mappedDVUpdates.put(field, mappedField);
}
for (DocValuesFieldUpdates updates : ent.getValue()) {
if (bufferedUpdatesStream.stillRunning(updates.delGen)) {
continue;
}
// sanity check:
assert field.equals(updates.field);
DocValuesFieldUpdates mappedUpdates = mappedField.get(updates.delGen);
if (mappedUpdates == null) {
switch (updates.type) {
case NUMERIC:
mappedUpdates = new NumericDocValuesFieldUpdates(updates.delGen, updates.field, merge.info.info.maxDoc());
break;
case BINARY:
mappedUpdates = new BinaryDocValuesFieldUpdates(updates.delGen, updates.field, merge.info.info.maxDoc());
break;
default:
throw new AssertionError();
}
mappedField.put(updates.delGen, mappedUpdates);
}
DocValuesFieldUpdates.Iterator it = updates.iterator();
int doc;
while ((doc = it.nextDoc()) != NO_MORE_DOCS) {
int mappedDoc = segDocMap.get(segLeafDocMap.get(doc));
if (mappedDoc != -1) {
if (it.hasValue()) {
// not deleted
mappedUpdates.add(mappedDoc, it);
} else {
mappedUpdates.reset(mappedDoc);
}
anyDVUpdates = true;
}
}
}
}
}
if (anyDVUpdates) {
// Persist the merged DV updates onto the RAU for the merged segment:
for(Map<Long,DocValuesFieldUpdates> d : mappedDVUpdates.values()) {
for (DocValuesFieldUpdates updates : d.values()) {
updates.finish();
mergedDeletesAndUpdates.addDVUpdate(updates);
}
}
}
if (infoStream.isEnabled("IW")) {
if (mergedDeletesAndUpdates == null) {
infoStream.message("IW", "no new deletes or field updates since merge started");
} else {
String msg = mergedDeletesAndUpdates.getDelCount() - numDeletesBefore + " new deletes";
if (anyDVUpdates) {
msg += " and " + mergedDeletesAndUpdates.getNumDVUpdates() + " new field updates";
msg += " (" + mergedDeletesAndUpdates.ramBytesUsed.get() + ") bytes";
}
msg += " since merge started";
infoStream.message("IW", msg);
}
}
merge.info.setBufferedDeletesGen(minGen);
return mergedDeletesAndUpdates;
}
/**
* This method carries over hard-deleted documents that are applied to the source segment during a merge.
*/
private static void carryOverHardDeletes(ReadersAndUpdates mergedReadersAndUpdates, int maxDoc,
Bits mergeLiveDocs, // the liveDocs used to build the segDocMaps
Bits prevHardLiveDocs, // the hard deletes when the merge reader was pulled
Bits currentHardLiveDocs, // the current hard deletes
MergeState.DocMap segDocMap, MergeState.DocMap segLeafDocMap) throws IOException {
assert mergeLiveDocs == null || mergeLiveDocs.length() == maxDoc;
// if we mix soft and hard deletes we need to make sure that we only carry over deletes
// that were not deleted before. Otherwise the segDocMap doesn't contain a mapping.
// yet this is also required if any MergePolicy modifies the liveDocs since this is
// what the segDocMap is build on.
final IntPredicate carryOverDelete = mergeLiveDocs == null || mergeLiveDocs == prevHardLiveDocs
? docId -> currentHardLiveDocs.get(docId) == false
: docId -> mergeLiveDocs.get(docId) && currentHardLiveDocs.get(docId) == false;
if (prevHardLiveDocs != null) {
// If we had deletions on starting the merge we must
// still have deletions now:
assert currentHardLiveDocs != null;
assert mergeLiveDocs != null;
assert prevHardLiveDocs.length() == maxDoc;
assert currentHardLiveDocs.length() == maxDoc;
// There were deletes on this segment when the merge
// started. The merge has collapsed away those
// deletes, but, if new deletes were flushed since
// the merge started, we must now carefully keep any
// newly flushed deletes but mapping them to the new
// docIDs.
// Since we copy-on-write, if any new deletes were
// applied after merging has started, we can just
// check if the before/after liveDocs have changed.
// If so, we must carefully merge the liveDocs one
// doc at a time:
if (currentHardLiveDocs != prevHardLiveDocs) {
// This means this segment received new deletes
// since we started the merge, so we
// must merge them:
for (int j = 0; j < maxDoc; j++) {
if (prevHardLiveDocs.get(j) == false) {
// if the document was deleted before, it better still be deleted!
assert currentHardLiveDocs.get(j) == false;
} else if (carryOverDelete.test(j)) {
// the document was deleted while we were merging:
mergedReadersAndUpdates.delete(segDocMap.get(segLeafDocMap.get(j)));
}
}
}
} else if (currentHardLiveDocs != null) {
assert currentHardLiveDocs.length() == maxDoc;
// This segment had no deletes before but now it
// does:
for (int j = 0; j < maxDoc; j++) {
if (carryOverDelete.test(j)) {
mergedReadersAndUpdates.delete(segDocMap.get(segLeafDocMap.get(j)));
}
}
}
}
@SuppressWarnings("try")
private synchronized boolean commitMerge(MergePolicy.OneMerge merge, MergeState mergeState) throws IOException {
merge.onMergeComplete();
testPoint("startCommitMerge");
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete merge", tragedy.get());
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commitMerge: " + segString(merge.segments) + " index=" + segString());
}
assert merge.registerDone;
// If merge was explicitly aborted, or, if rollback() or
// rollbackTransaction() had been called since our merge
// started (which results in an unqualified
// deleter.refresh() call that will remove any index
// file that current segments does not reference), we
// abort this merge
if (merge.isAborted()) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commitMerge: skip: it was aborted");
}
// In case we opened and pooled a reader for this
// segment, drop it now. This ensures that we close
// the reader before trying to delete any of its
// files. This is not a very big deal, since this
// reader will never be used by any NRT reader, and
// another thread is currently running close(false)
// so it will be dropped shortly anyway, but not
// doing this makes MockDirWrapper angry in
// TestNRTThreads (LUCENE-5434):
readerPool.drop(merge.info);
// Safe: these files must exist:
deleteNewFiles(merge.info.files());
return false;
}
final ReadersAndUpdates mergedUpdates = merge.info.info.maxDoc() == 0 ? null : commitMergedDeletesAndUpdates(merge, mergeState);
// If the doc store we are using has been closed and
// is in now compound format (but wasn't when we
// started), then we will switch to the compound
// format as well:
assert !segmentInfos.contains(merge.info);
final boolean allDeleted = merge.segments.size() == 0 ||
merge.info.info.maxDoc() == 0 ||
(mergedUpdates != null && isFullyDeleted(mergedUpdates));
if (infoStream.isEnabled("IW")) {
if (allDeleted) {
infoStream.message("IW", "merged segment " + merge.info + " is 100% deleted; skipping insert");
}
}
final boolean dropSegment = allDeleted;
// If we merged no segments then we better be dropping
// the new segment:
assert merge.segments.size() > 0 || dropSegment;
assert merge.info.info.maxDoc() != 0 || dropSegment;
if (mergedUpdates != null) {
boolean success = false;
try {
if (dropSegment) {
mergedUpdates.dropChanges();
}
// Pass false for assertInfoLive because the merged
// segment is not yet live (only below do we commit it
// to the segmentInfos):
release(mergedUpdates, false);
success = true;
} finally {
if (!success) {
mergedUpdates.dropChanges();
readerPool.drop(merge.info);
}
}
}
// Must do this after readerPool.release, in case an
// exception is hit e.g. writing the live docs for the
// merge segment, in which case we need to abort the
// merge:
segmentInfos.applyMergeChanges(merge, dropSegment);
// Now deduct the deleted docs that we just reclaimed from this
// merge:
int delDocCount;
if (dropSegment) {
// if we drop the segment we have to reduce the pendingNumDocs by merge.totalMaxDocs since we never drop
// the docs when we apply deletes if the segment is currently merged.
delDocCount = merge.totalMaxDoc;
} else {
delDocCount = merge.totalMaxDoc - merge.info.info.maxDoc();
}
assert delDocCount >= 0;
adjustPendingNumDocs(-delDocCount);
if (dropSegment) {
assert !segmentInfos.contains(merge.info);
readerPool.drop(merge.info);
// Safe: these files must exist
deleteNewFiles(merge.info.files());
}
try (Closeable finalizer = this::checkpoint) {
// Must close before checkpoint, otherwise IFD won't be
// able to delete the held-open files from the merge
// readers:
closeMergeReaders(merge, false, dropSegment);
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "after commitMerge: " + segString());
}
if (merge.maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS && !dropSegment) {
// cascade the forceMerge:
if (!segmentsToMerge.containsKey(merge.info)) {
segmentsToMerge.put(merge.info, Boolean.FALSE);
}
}
return true;
}
private void handleMergeException(Throwable t, MergePolicy.OneMerge merge) throws IOException {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "handleMergeException: merge=" + segString(merge.segments) + " exc=" + t);
}
// Set the exception on the merge, so if
// forceMerge is waiting on us it sees the root
// cause exception:
merge.setException(t);
addMergeException(merge);
if (t instanceof MergePolicy.MergeAbortedException) {
// We can ignore this exception (it happens when
// deleteAll or rollback is called), unless the
// merge involves segments from external directories,
// in which case we must throw it so, for example, the
// rollbackTransaction code in addIndexes* is
// executed.
if (merge.isExternal) { // TODO can we simplify this and just throw all the time? this would simplify this a lot
throw (MergePolicy.MergeAbortedException) t;
}
} else {
assert t != null;
throw IOUtils.rethrowAlways(t);
}
}
/**
* Merges the indicated segments, replacing them in the stack with a
* single segment.
*
* @lucene.experimental
*/
protected void merge(MergePolicy.OneMerge merge) throws IOException {
boolean success = false;
final long t0 = System.currentTimeMillis();
final MergePolicy mergePolicy = config.getMergePolicy();
try {
try {
try {
mergeInit(merge);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now merge\n merge=" + segString(merge.segments) + "\n index=" + segString());
}
mergeMiddle(merge, mergePolicy);
mergeSuccess(merge);
success = true;
} catch (Throwable t) {
handleMergeException(t, merge);
}
} finally {
synchronized(this) {
// Readers are already closed in commitMerge if we didn't hit
// an exc:
if (success == false) {
closeMergeReaders(merge, true, false);
}
mergeFinish(merge);
if (success == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during merge");
}
} else if (!merge.isAborted() && (merge.maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS || (!closed && !closing))) {
// This merge (and, generally, any change to the
// segments) may now enable new merges, so we call
// merge policy & update pending merges.
updatePendingMerges(mergePolicy, MergeTrigger.MERGE_FINISHED, merge.maxNumSegments);
}
}
}
} catch (Throwable t) {
// Important that tragicEvent is called after mergeFinish, else we hang
// waiting for our merge thread to be removed from runningMerges:
tragicEvent(t, "merge");
throw t;
}
if (merge.info != null && merge.isAborted() == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "merge time " + (System.currentTimeMillis()-t0) + " msec for " + merge.info.info.maxDoc() + " docs");
}
}
}
/** Hook that's called when the specified merge is complete. */
protected void mergeSuccess(MergePolicy.OneMerge merge) {}
private void abortOneMerge(MergePolicy.OneMerge merge) throws IOException {
merge.setAborted();
closeMergeReaders(merge, true, false);
}
/** Checks whether this merge involves any segments
* already participating in a merge. If not, this merge
* is "registered", meaning we record that its segments
* are now participating in a merge, and true is
* returned. Else (the merge conflicts) false is
* returned. */
private synchronized boolean registerMerge(MergePolicy.OneMerge merge) throws IOException {
if (merge.registerDone) {
return true;
}
assert merge.segments.size() > 0;
if (merges.areEnabled() == false) {
abortOneMerge(merge);
throw new MergePolicy.MergeAbortedException("merge is aborted: " + segString(merge.segments));
}
boolean isExternal = false;
for(SegmentCommitInfo info : merge.segments) {
if (mergingSegments.contains(info)) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "reject merge " + segString(merge.segments) + ": segment " + segString(info) + " is already marked for merge");
}
return false;
}
if (!segmentInfos.contains(info)) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "reject merge " + segString(merge.segments) + ": segment " + segString(info) + " does not exist in live infos");
}
return false;
}
if (info.info.dir != directoryOrig) {
isExternal = true;
}
if (segmentsToMerge.containsKey(info)) {
merge.maxNumSegments = mergeMaxNumSegments;
}
}
ensureValidMerge(merge);
pendingMerges.add(merge);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "add merge to pendingMerges: " + segString(merge.segments) + " [total " + pendingMerges.size() + " pending]");
}
merge.mergeGen = mergeGen;
merge.isExternal = isExternal;
// OK it does not conflict; now record that this merge
// is running (while synchronized) to avoid race
// condition where two conflicting merges from different
// threads, start
if (infoStream.isEnabled("IW")) {
StringBuilder builder = new StringBuilder("registerMerge merging= [");
for (SegmentCommitInfo info : mergingSegments) {
builder.append(info.info.name).append(", ");
}
builder.append("]");
// don't call mergingSegments.toString() could lead to ConcurrentModException
// since merge updates the segments FieldInfos
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", builder.toString());
}
}
for(SegmentCommitInfo info : merge.segments) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "registerMerge info=" + segString(info));
}
mergingSegments.add(info);
}
assert merge.estimatedMergeBytes == 0;
assert merge.totalMergeBytes == 0;
for(SegmentCommitInfo info : merge.segments) {
if (info.info.maxDoc() > 0) {
final int delCount = numDeletedDocs(info);
assert delCount <= info.info.maxDoc();
final double delRatio = ((double) delCount)/info.info.maxDoc();
merge.estimatedMergeBytes += (long) (info.sizeInBytes() * (1.0 - delRatio));
merge.totalMergeBytes += info.sizeInBytes();
}
}
// Merge is now registered
merge.registerDone = true;
return true;
}
/** Does initial setup for a merge, which is fast but holds
* the synchronized lock on IndexWriter instance. */
final void mergeInit(MergePolicy.OneMerge merge) throws IOException {
assert Thread.holdsLock(this) == false;
// Make sure any deletes that must be resolved before we commit the merge are complete:
bufferedUpdatesStream.waitApplyForMerge(merge.segments, this);
boolean success = false;
try {
_mergeInit(merge);
success = true;
} finally {
if (!success) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception in mergeInit");
}
mergeFinish(merge);
}
}
}
private synchronized void _mergeInit(MergePolicy.OneMerge merge) throws IOException {
testPoint("startMergeInit");
assert merge.registerDone;
assert merge.maxNumSegments == UNBOUNDED_MAX_MERGE_SEGMENTS || merge.maxNumSegments > 0;
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot merge", tragedy.get());
}
if (merge.info != null) {
// mergeInit already done
return;
}
merge.mergeInit();
if (merge.isAborted()) {
return;
}
// TODO: in the non-pool'd case this is somewhat
// wasteful, because we open these readers, close them,
// and then open them again for merging. Maybe we
// could pre-pool them somehow in that case...
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now apply deletes for " + merge.segments.size() + " merging segments");
}
// Must move the pending doc values updates to disk now, else the newly merged segment will not see them:
// TODO: we could fix merging to pull the merged DV iterator so we don't have to move these updates to disk first, i.e. just carry them
// in memory:
if (readerPool.writeDocValuesUpdatesForMerge(merge.segments)) {
checkpoint();
}
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment
// names.
final String mergeSegmentName = newSegmentName();
// We set the min version to null for now, it will be set later by SegmentMerger
SegmentInfo si = new SegmentInfo(directoryOrig, Version.LATEST, null, mergeSegmentName, -1, false, config.getCodec(),
Collections.emptyMap(), StringHelper.randomId(), Collections.emptyMap(), config.getIndexSort());
Map<String,String> details = new HashMap<>();
details.put("mergeMaxNumSegments", "" + merge.maxNumSegments);
details.put("mergeFactor", Integer.toString(merge.segments.size()));
setDiagnostics(si, SOURCE_MERGE, details);
merge.setMergeInfo(new SegmentCommitInfo(si, 0, 0, -1L, -1L, -1L, StringHelper.randomId()));
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "merge seg=" + merge.info.info.name + " " + segString(merge.segments));
}
}
static void setDiagnostics(SegmentInfo info, String source) {
setDiagnostics(info, source, null);
}
private static void setDiagnostics(SegmentInfo info, String source, Map<String,String> details) {
Map<String,String> diagnostics = new HashMap<>();
diagnostics.put("source", source);
diagnostics.put("lucene.version", Version.LATEST.toString());
diagnostics.put("os", Constants.OS_NAME);
diagnostics.put("os.arch", Constants.OS_ARCH);
diagnostics.put("os.version", Constants.OS_VERSION);
diagnostics.put("java.version", Constants.JAVA_VERSION);
diagnostics.put("java.vendor", Constants.JAVA_VENDOR);
// On IBM J9 JVM this is better than java.version which is just 1.7.0 (no update level):
diagnostics.put("java.runtime.version", System.getProperty("java.runtime.version", "undefined"));
// Hotspot version, e.g. 2.8 for J9:
diagnostics.put("java.vm.version", System.getProperty("java.vm.version", "undefined"));
diagnostics.put("timestamp", Long.toString(new Date().getTime()));
if (details != null) {
diagnostics.putAll(details);
}
info.setDiagnostics(diagnostics);
}
/** Does finishing for a merge, which is fast but holds
* the synchronized lock on IndexWriter instance. */
private synchronized void mergeFinish(MergePolicy.OneMerge merge) {
// forceMerge, addIndexes or waitForMerges may be waiting
// on merges to finish.
notifyAll();
// It's possible we are called twice, eg if there was an
// exception inside mergeInit
if (merge.registerDone) {
final List<SegmentCommitInfo> sourceSegments = merge.segments;
for (SegmentCommitInfo info : sourceSegments) {
mergingSegments.remove(info);
}
merge.registerDone = false;
}
runningMerges.remove(merge);
}
@SuppressWarnings("try")
private synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions, boolean droppedSegment) throws IOException {
if (merge.hasFinished() == false) {
final boolean drop = suppressExceptions == false;
// first call mergeFinished before we potentially drop the reader and the last reference.
merge.close(suppressExceptions == false, droppedSegment, mr -> {
final SegmentReader sr = mr.reader;
final ReadersAndUpdates rld = getPooledInstance(sr.getOriginalSegmentInfo(), false);
// We still hold a ref so it should not have been removed:
assert rld != null;
if (drop) {
rld.dropChanges();
} else {
rld.dropMergingUpdates();
}
rld.release(sr);
release(rld);
if (drop) {
readerPool.drop(rld.info);
}
});
} else {
assert merge.getMergeReader().isEmpty() : "we are done but still have readers: " + merge.getMergeReader();
assert suppressExceptions : "can't be done and not suppressing exceptions";
}
}
private void countSoftDeletes(CodecReader reader, Bits wrappedLiveDocs, Bits hardLiveDocs, Counter softDeleteCounter,
Counter hardDeleteCounter) throws IOException {
int hardDeleteCount = 0;
int softDeletesCount = 0;
DocIdSetIterator softDeletedDocs = DocValuesFieldExistsQuery.getDocValuesDocIdSetIterator(config.getSoftDeletesField(), reader);
if (softDeletedDocs != null) {
int docId;
while ((docId = softDeletedDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (wrappedLiveDocs == null || wrappedLiveDocs.get(docId)) {
if (hardLiveDocs == null || hardLiveDocs.get(docId)) {
softDeletesCount++;
} else {
hardDeleteCount++;
}
}
}
}
softDeleteCounter.addAndGet(softDeletesCount);
hardDeleteCounter.addAndGet(hardDeleteCount);
}
private boolean assertSoftDeletesCount(CodecReader reader, int expectedCount) throws IOException {
Counter count = Counter.newCounter(false);
Counter hardDeletes = Counter.newCounter(false);
countSoftDeletes(reader, reader.getLiveDocs(), null, count, hardDeletes);
assert count.get() == expectedCount : "soft-deletes count mismatch expected: "
+ expectedCount + " but actual: " + count.get() ;
return true;
}
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
* instance */
private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) throws IOException {
testPoint("mergeMiddleStart");
merge.checkAborted();
Directory mergeDirectory = mergeScheduler.wrapForMerge(merge, directory);
IOContext context = new IOContext(merge.getStoreMergeInfo());
final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(mergeDirectory);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "merging " + segString(merge.segments));
}
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
try {
merge.initMergeReaders(sci -> {
final ReadersAndUpdates rld = getPooledInstance(sci, true);
rld.setIsMerging();
return rld.getReaderForMerge(context);
});
// Let the merge wrap readers
List<CodecReader> mergeReaders = new ArrayList<>();
Counter softDeleteCount = Counter.newCounter(false);
for (MergePolicy.MergeReader mergeReader : merge.getMergeReader()) {
SegmentReader reader = mergeReader.reader;
CodecReader wrappedReader = merge.wrapForMerge(reader);
validateMergeReader(wrappedReader);
if (softDeletesEnabled) {
if (reader != wrappedReader) { // if we don't have a wrapped reader we won't preserve any soft-deletes
Bits hardLiveDocs = mergeReader.hardLiveDocs;
if (hardLiveDocs != null) { // we only need to do this accounting if we have mixed deletes
Bits wrappedLiveDocs = wrappedReader.getLiveDocs();
Counter hardDeleteCounter = Counter.newCounter(false);
countSoftDeletes(wrappedReader, wrappedLiveDocs, hardLiveDocs, softDeleteCount, hardDeleteCounter);
int hardDeleteCount = Math.toIntExact(hardDeleteCounter.get());
// Wrap the wrapped reader again if we have excluded some hard-deleted docs
if (hardDeleteCount > 0) {
Bits liveDocs = wrappedLiveDocs == null ? hardLiveDocs : new Bits() {
@Override
public boolean get(int index) {
return hardLiveDocs.get(index) && wrappedLiveDocs.get(index);
}
@Override
public int length() {
return hardLiveDocs.length();
}
};
wrappedReader = FilterCodecReader.wrapLiveDocs(wrappedReader, liveDocs, wrappedReader.numDocs() - hardDeleteCount);
}
} else {
final int carryOverSoftDeletes = reader.getSegmentInfo().getSoftDelCount() - wrappedReader.numDeletedDocs();
assert carryOverSoftDeletes >= 0 : "carry-over soft-deletes must be positive";
assert assertSoftDeletesCount(wrappedReader, carryOverSoftDeletes);
softDeleteCount.addAndGet(carryOverSoftDeletes);
}
}
}
mergeReaders.add(wrappedReader);
}
final SegmentMerger merger = new SegmentMerger(mergeReaders,
merge.info.info, infoStream, dirWrapper,
globalFieldNumberMap,
context);
merge.info.setSoftDelCount(Math.toIntExact(softDeleteCount.get()));
merge.checkAborted();
merge.mergeStartNS = System.nanoTime();
// This is where all the work happens:
if (merger.shouldMerge()) {
merger.merge();
}
MergeState mergeState = merger.mergeState;
assert mergeState.segmentInfo == merge.info.info;
merge.info.info.setFiles(new HashSet<>(dirWrapper.getCreatedFiles()));
Codec codec = config.getCodec();
if (infoStream.isEnabled("IW")) {
if (merger.shouldMerge()) {
String pauseInfo = merge.getMergeProgress().getPauseTimes().entrySet()
.stream()
.filter((e) -> e.getValue() > 0)
.map((e) -> String.format(Locale.ROOT, "%.1f sec %s",
e.getValue() / 1000000000.,
e.getKey().name().toLowerCase(Locale.ROOT)))
.collect(Collectors.joining(", "));
if (!pauseInfo.isEmpty()) {
pauseInfo = " (" + pauseInfo + ")";
}
long t1 = System.nanoTime();
double sec = (t1-merge.mergeStartNS)/1000000000.;
double segmentMB = (merge.info.sizeInBytes()/1024./1024.);
infoStream.message("IW", "merge codec=" + codec + " maxDoc=" + merge.info.info.maxDoc() + "; merged segment has " +
(mergeState.mergeFieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " +
(mergeState.mergeFieldInfos.hasNorms() ? "norms" : "no norms") + "; " +
(mergeState.mergeFieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " +
(mergeState.mergeFieldInfos.hasProx() ? "prox" : "no prox") + "; " +
(mergeState.mergeFieldInfos.hasProx() ? "freqs" : "no freqs") + "; " +
(mergeState.mergeFieldInfos.hasPointValues() ? "points" : "no points") + "; " +
String.format(Locale.ROOT,
"%.1f sec%s to merge segment [%.2f MB, %.2f MB/sec]",
sec,
pauseInfo,
segmentMB,
segmentMB / sec));
} else {
infoStream.message("IW", "skip merging fully deleted segments");
}
}
if (merger.shouldMerge() == false) {
// Merge would produce a 0-doc segment, so we do nothing except commit the merge to remove all the 0-doc segments that we "merged":
assert merge.info.info.maxDoc() == 0;
commitMerge(merge, mergeState);
success = true;
return 0;
}
assert merge.info.info.maxDoc() > 0;
// Very important to do this before opening the reader
// because codec must know if prox was written for
// this segment:
boolean useCompoundFile;
synchronized (this) { // Guard segmentInfos
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info, this);
}
if (useCompoundFile) {
success = false;
Collection<String> filesToRemove = merge.info.files();
TrackingDirectoryWrapper trackingCFSDir = new TrackingDirectoryWrapper(mergeDirectory);
try {
createCompoundFile(infoStream, trackingCFSDir, merge.info.info, context, this::deleteNewFiles);
success = true;
} catch (Throwable t) {
synchronized(this) {
if (merge.isAborted()) {
// This can happen if rollback is called while we were building
// our CFS -- fall through to logic below to remove the non-CFS
// merged files:
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit merge abort exception creating compound file during merge");
}
return 0;
} else {
handleMergeException(t, merge);
}
}
} finally {
if (success == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception creating compound file during merge");
}
// Safe: these files must exist
deleteNewFiles(merge.info.files());
}
}
// So that, if we hit exc in deleteNewFiles (next)
// or in commitMerge (later), we close the
// per-segment readers in the finally clause below:
success = false;
synchronized(this) {
// delete new non cfs files directly: they were never
// registered with IFD
deleteNewFiles(filesToRemove);
if (merge.isAborted()) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "abort merge after building CFS");
}
// Safe: these files must exist
deleteNewFiles(merge.info.files());
return 0;
}
}
merge.info.info.setUseCompoundFile(true);
} else {
// So that, if we hit exc in commitMerge (later),
// we close the per-segment readers in the finally
// clause below:
success = false;
}
// Have codec write SegmentInfo. Must do this after
// creating CFS so that 1) .si isn't slurped into CFS,
// and 2) .si reflects useCompoundFile=true change
// above:
boolean success2 = false;
try {
codec.segmentInfoFormat().write(directory, merge.info.info, context);
success2 = true;
} finally {
if (!success2) {
// Safe: these files must exist
deleteNewFiles(merge.info.files());
}
}
// TODO: ideally we would freeze merge.info here!!
// because any changes after writing the .si will be
// lost...
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", String.format(Locale.ROOT, "merged segment size=%.3f MB vs estimate=%.3f MB", merge.info.sizeInBytes()/1024./1024., merge.estimatedMergeBytes/1024/1024.));
}
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
if (readerPool.isReaderPoolingEnabled() && mergedSegmentWarmer != null) {
final ReadersAndUpdates rld = getPooledInstance(merge.info, true);
final SegmentReader sr = rld.getReader(IOContext.READ);
try {
mergedSegmentWarmer.warm(sr);
} finally {
synchronized(this) {
rld.release(sr);
release(rld);
}
}
}
if (!commitMerge(merge, mergeState)) {
// commitMerge will return false if this merge was
// aborted
return 0;
}
success = true;
} finally {
// Readers are already closed in commitMerge if we didn't hit
// an exc:
if (success == false) {
closeMergeReaders(merge, true, false);
}
}
return merge.info.info.maxDoc();
}
private synchronized void addMergeException(MergePolicy.OneMerge merge) {
assert merge.getException() != null;
if (!mergeExceptions.contains(merge) && mergeGen == merge.mergeGen) {
mergeExceptions.add(merge);
}
}
// For test purposes.
final int getBufferedDeleteTermsSize() {
return docWriter.getBufferedDeleteTermsSize();
}
// For test purposes.
final int getNumBufferedDeleteTerms() {
return docWriter.getNumBufferedDeleteTerms();
}
// utility routines for tests
synchronized SegmentCommitInfo newestSegment() {
return segmentInfos.size() > 0 ? segmentInfos.info(segmentInfos.size()-1) : null;
}
/** Returns a string description of all segments, for
* debugging.
*
* @lucene.internal */
synchronized String segString() {
return segString(segmentInfos);
}
synchronized String segString(Iterable<SegmentCommitInfo> infos) {
return StreamSupport.stream(infos.spliterator(), false)
.map(this::segString).collect(Collectors.joining(" "));
}
/** Returns a string description of the specified
* segment, for debugging.
*
* @lucene.internal */
private synchronized String segString(SegmentCommitInfo info) {
return info.toString(numDeletedDocs(info) - info.getDelCount(softDeletesEnabled));
}
private synchronized void doWait() {
// NOTE: the callers of this method should in theory
// be able to do simply wait(), but, as a defense
// against thread timing hazards where notifyAll()
// fails to be called, we wait for at most 1 second
// and then return so caller can check if wait
// conditions are satisfied:
try {
wait(1000);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
// called only from assert
private boolean filesExist(SegmentInfos toSync) throws IOException {
Collection<String> files = toSync.files(false);
for(final String fileName: files) {
// If this trips it means we are missing a call to
// .checkpoint somewhere, because by the time we
// are called, deleter should know about every
// file referenced by the current head
// segmentInfos:
assert deleter.exists(fileName): "IndexFileDeleter doesn't know about file " + fileName;
}
return true;
}
// For infoStream output
synchronized SegmentInfos toLiveInfos(SegmentInfos sis) {
final SegmentInfos newSIS = new SegmentInfos(sis.getIndexCreatedVersionMajor());
final Map<SegmentCommitInfo,SegmentCommitInfo> liveSIS = new HashMap<>();
for(SegmentCommitInfo info : segmentInfos) {
liveSIS.put(info, info);
}
for(SegmentCommitInfo info : sis) {
SegmentCommitInfo liveInfo = liveSIS.get(info);
if (liveInfo != null) {
info = liveInfo;
}
newSIS.add(info);
}
return newSIS;
}
/** Walk through all files referenced by the current
* segmentInfos and ask the Directory to sync each file,
* if it wasn't already. If that succeeds, then we
* prepare a new segments_N file but do not fully commit
* it. */
private void startCommit(final SegmentInfos toSync) throws IOException {
testPoint("startStartCommit");
assert pendingCommit == null;
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot commit", tragedy.get());
}
try {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "startCommit(): start");
}
synchronized(this) {
if (lastCommitChangeCount > changeCount.get()) {
throw new IllegalStateException("lastCommitChangeCount=" + lastCommitChangeCount + ",changeCount=" + changeCount);
}
if (pendingCommitChangeCount == lastCommitChangeCount) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", " skip startCommit(): no changes pending");
}
try {
deleter.decRef(filesToCommit);
} finally {
filesToCommit = null;
}
return;
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "startCommit index=" + segString(toLiveInfos(toSync)) + " changeCount=" + changeCount);
}
assert filesExist(toSync);
}
testPoint("midStartCommit");
boolean pendingCommitSet = false;
try {
testPoint("midStartCommit2");
synchronized (this) {
assert pendingCommit == null;
assert segmentInfos.getGeneration() == toSync.getGeneration();
// Exception here means nothing is prepared
// (this method unwinds everything it did on
// an exception)
toSync.prepareCommit(directory);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "startCommit: wrote pending segments file \"" + IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", toSync.getGeneration()) + "\"");
}
pendingCommitSet = true;
pendingCommit = toSync;
}
// This call can take a long time -- 10s of seconds
// or more. We do it without syncing on this:
boolean success = false;
final Collection<String> filesToSync;
try {
filesToSync = toSync.files(false);
directory.sync(filesToSync);
success = true;
} finally {
if (!success) {
pendingCommitSet = false;
pendingCommit = null;
toSync.rollbackCommit(directory);
}
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "done all syncs: " + filesToSync);
}
testPoint("midStartCommitSuccess");
} catch (Throwable t) {
synchronized(this) {
if (!pendingCommitSet) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception committing segments file");
}
try {
// Hit exception
deleter.decRef(filesToCommit);
} catch (Throwable t1) {
t.addSuppressed(t1);
} finally {
filesToCommit = null;
}
}
}
throw t;
} finally {
synchronized(this) {
// Have our master segmentInfos record the
// generations we just prepared. We do this
// on error or success so we don't
// double-write a segments_N file.
segmentInfos.updateGeneration(toSync);
}
}
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "startCommit");
throw tragedy;
}
testPoint("finishStartCommit");
}
/** If {@link DirectoryReader#open(IndexWriter)} has
* been called (ie, this writer is in near real-time
* mode), then after a merge completes, this class can be
* invoked to warm the reader on the newly merged
* segment, before the merge commits. This is not
* required for near real-time search, but will reduce
* search latency on opening a new near real-time reader
* after a merge completes.
*
* @lucene.experimental
*
* <p><b>NOTE</b>: {@link #warm(LeafReader)} is called before any
* deletes have been carried over to the merged segment. */
@FunctionalInterface
public interface IndexReaderWarmer {
/**
* Invoked on the {@link LeafReader} for the newly
* merged segment, before that segment is made visible
* to near-real-time readers.
*/
void warm(LeafReader reader) throws IOException;
}
/**
* This method should be called on a tragic event ie. if a downstream class of the writer
* hits an unrecoverable exception. This method does not rethrow the tragic event exception.
* Note: This method will not close the writer but can be called from any location without respecting any lock order
*/
private void onTragicEvent(Throwable tragedy, String location) {
// This is not supposed to be tragic: IW is supposed to catch this and
// ignore, because it means we asked the merge to abort:
assert tragedy instanceof MergePolicy.MergeAbortedException == false;
// How can it be a tragedy when nothing happened?
assert tragedy != null;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit tragic " + tragedy.getClass().getSimpleName() + " inside " + location);
}
this.tragedy.compareAndSet(null, tragedy); // only set it once
}
/**
* This method set the tragic exception unless it's already set and closes the writer
* if necessary. Note this method will not rethrow the throwable passed to it.
*/
private void tragicEvent(Throwable tragedy, String location) throws IOException {
try {
onTragicEvent(tragedy, location);
} finally {
maybeCloseOnTragicEvent();
}
}
private void maybeCloseOnTragicEvent() throws IOException {
// We cannot hold IW's lock here else it can lead to deadlock:
assert Thread.holdsLock(this) == false;
assert Thread.holdsLock(fullFlushLock) == false;
// if we are already closed (e.g. called by rollback), this will be a no-op.
if (this.tragedy.get() != null && shouldClose(false)) {
rollbackInternal();
}
}
/** If this {@code IndexWriter} was closed as a side-effect of a tragic exception,
* e.g. disk full while flushing a new segment, this returns the root cause exception.
* Otherwise (no tragic exception has occurred) it returns null. */
public Throwable getTragicException() {
return tragedy.get();
}
/** Returns {@code true} if this {@code IndexWriter} is still open. */
public boolean isOpen() {
return closing == false && closed == false;
}
// Used for testing. Current points:
// startDoFlush
// startCommitMerge
// startStartCommit
// midStartCommit
// midStartCommit2
// midStartCommitSuccess
// finishStartCommit
// startCommitMergeDeletes
// startMergeInit
// DocumentsWriterPerThread addDocuments start
private void testPoint(String message) {
if (enableTestPoints) {
assert infoStream.isEnabled("TP"); // don't enable unless you need them.
infoStream.message("TP", message);
}
}
synchronized boolean nrtIsCurrent(SegmentInfos infos) {
ensureOpen();
boolean isCurrent = infos.getVersion() == segmentInfos.getVersion()
&& docWriter.anyChanges() == false
&& bufferedUpdatesStream.any() == false
&& readerPool.anyDocValuesChanges() == false;
if (infoStream.isEnabled("IW")) {
if (isCurrent == false) {
infoStream.message("IW", "nrtIsCurrent: infoVersion matches: " + (infos.getVersion() == segmentInfos.getVersion()) + "; DW changes: " + docWriter.anyChanges() + "; BD changes: "+ bufferedUpdatesStream.any());
}
}
return isCurrent;
}
synchronized boolean isClosed() {
return closed;
}
boolean isDeleterClosed() {
return deleter.isClosed();
}
/** Expert: remove any index files that are no longer
* used.
*
* <p> IndexWriter normally deletes unused files itself,
* during indexing. However, on Windows, which disallows
* deletion of open files, if there is a reader open on
* the index then those files cannot be deleted. This is
* fine, because IndexWriter will periodically retry
* the deletion.</p>
*
* <p> However, IndexWriter doesn't try that often: only
* on open, close, flushing a new segment, and finishing
* a merge. If you don't do any of these actions with your
* IndexWriter, you'll see the unused files linger. If
* that's a problem, call this method to delete them
* (once you've closed the open readers that were
* preventing their deletion).
*
* <p> In addition, you can call this method to delete
* unreferenced index commits. This might be useful if you
* are using an {@link IndexDeletionPolicy} which holds
* onto index commits until some criteria are met, but those
* commits are no longer needed. Otherwise, those commits will
* be deleted the next time commit() is called.
*/
public synchronized void deleteUnusedFiles() throws IOException {
// TODO: should we remove this method now that it's the Directory's job to retry deletions? Except, for the super expert IDP use case
// it's still needed?
ensureOpen(false);
deleter.revisitPolicy();
}
/**
* NOTE: this method creates a compound file for all files returned by
* info.files(). While, generally, this may include separate norms and
* deletion files, this SegmentInfo must not reference such files when this
* method is called, because they are not allowed within a compound file.
*/
static void createCompoundFile(InfoStream infoStream, TrackingDirectoryWrapper directory, final SegmentInfo info, IOContext context, IOUtils.IOConsumer<Collection<String>> deleteFiles) throws IOException {
// maybe this check is not needed, but why take the risk?
if (!directory.getCreatedFiles().isEmpty()) {
throw new IllegalStateException("pass a clean trackingdir for CFS creation");
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "create compound file");
}
// Now merge all added files
boolean success = false;
try {
info.getCodec().compoundFormat().write(directory, info, context);
success = true;
} finally {
if (!success) {
// Safe: these files must exist
deleteFiles.accept(directory.getCreatedFiles());
}
}
// Replace all previous files with the CFS/CFE files:
info.setFiles(new HashSet<>(directory.getCreatedFiles()));
}
/**
* Tries to delete the given files if unreferenced
* @param files the files to delete
* @throws IOException if an {@link IOException} occurs
* @see IndexFileDeleter#deleteNewFiles(Collection)
*/
private synchronized void deleteNewFiles(Collection<String> files) throws IOException {
deleter.deleteNewFiles(files);
}
/**
* Cleans up residuals from a segment that could not be entirely flushed due to an error
*/
private synchronized void flushFailed(SegmentInfo info) throws IOException {
// TODO: this really should be a tragic
Collection<String> files;
try {
files = info.files();
} catch (IllegalStateException ise) {
// OK
files = null;
}
if (files != null) {
deleter.deleteNewFiles(files);
}
}
/**
* Publishes the flushed segment, segment-private deletes (if any) and its
* associated global delete (if present) to IndexWriter. The actual
* publishing operation is synced on {@code IW -> BDS} so that the {@link SegmentInfo}'s
* delete generation is always GlobalPacket_deleteGeneration + 1
* @param forced if <code>true</code> this call will block on the ticket queue if the lock is held by another thread.
* if <code>false</code> the call will try to acquire the queue lock and exits if it's held by another thread.
*
*/
private void publishFlushedSegments(boolean forced) throws IOException {
docWriter.purgeFlushTickets(forced, ticket -> {
DocumentsWriterPerThread.FlushedSegment newSegment = ticket.getFlushedSegment();
FrozenBufferedUpdates bufferedUpdates = ticket.getFrozenUpdates();
ticket.markPublished();
if (newSegment == null) { // this is a flushed global deletes package - not a segments
if (bufferedUpdates != null && bufferedUpdates.any()) { // TODO why can this be null?
publishFrozenUpdates(bufferedUpdates);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "flush: push buffered updates: " + bufferedUpdates);
}
}
} else {
assert newSegment.segmentInfo != null;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "publishFlushedSegment seg-private updates=" + newSegment.segmentUpdates);
}
if (newSegment.segmentUpdates != null && infoStream.isEnabled("DW")) {
infoStream.message("IW", "flush: push buffered seg private updates: " + newSegment.segmentUpdates);
}
// now publish!
publishFlushedSegment(newSegment.segmentInfo, newSegment.fieldInfos, newSegment.segmentUpdates,
bufferedUpdates, newSegment.sortMap);
}
});
}
/** Record that the files referenced by this {@link SegmentInfos} are still in use.
*
* @lucene.internal */
public synchronized void incRefDeleter(SegmentInfos segmentInfos) throws IOException {
ensureOpen();
deleter.incRef(segmentInfos, false);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "incRefDeleter for NRT reader version=" + segmentInfos.getVersion() + " segments=" + segString(segmentInfos));
}
}
/** Record that the files referenced by this {@link SegmentInfos} are no longer in use. Only call this if you are sure you previously
* called {@link #incRefDeleter}.
*
* @lucene.internal */
public synchronized void decRefDeleter(SegmentInfos segmentInfos) throws IOException {
ensureOpen();
deleter.decRef(segmentInfos);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "decRefDeleter for NRT reader version=" + segmentInfos.getVersion() + " segments=" + segString(segmentInfos));
}
}
/**
* Processes all events and might trigger a merge if the given seqNo is negative
* @param seqNo if the seqNo is less than 0 this method will process events otherwise it's a no-op.
* @return the given seqId inverted if negative.
*/
private long maybeProcessEvents(long seqNo) throws IOException {
if (seqNo < 0) {
seqNo = -seqNo;
processEvents(true);
}
return seqNo;
}
private void processEvents(boolean triggerMerge) throws IOException {
if (tragedy.get() == null) {
eventQueue.processEvents();
}
if (triggerMerge) {
maybeMerge(getConfig().getMergePolicy(), MergeTrigger.SEGMENT_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
}
}
/**
* Interface for internal atomic events. See {@link DocumentsWriter} for details. Events are executed concurrently and no order is guaranteed.
* Each event should only rely on the serializeability within its process method. All actions that must happen before or after a certain action must be
* encoded inside the {@link #process(IndexWriter)} method.
*
*/
@FunctionalInterface
interface Event {
/**
* Processes the event. This method is called by the {@link IndexWriter}
* passed as the first argument.
*
* @param writer
* the {@link IndexWriter} that executes the event.
* @throws IOException
* if an {@link IOException} occurs
*/
void process(IndexWriter writer) throws IOException;
}
/** Anything that will add N docs to the index should reserve first to
* make sure it's allowed. This will throw {@code
* IllegalArgumentException} if it's not allowed. */
private void reserveDocs(long addedNumDocs) {
assert addedNumDocs >= 0;
if (adjustPendingNumDocs(addedNumDocs) > actualMaxDocs) {
// Reserve failed: put the docs back and throw exc:
adjustPendingNumDocs(-addedNumDocs);
tooManyDocs(addedNumDocs);
}
}
/** Does a best-effort check, that the current index would accept this many additional docs, but does not actually reserve them.
*
* @throws IllegalArgumentException if there would be too many docs */
private void testReserveDocs(long addedNumDocs) {
assert addedNumDocs >= 0;
if (pendingNumDocs.get() + addedNumDocs > actualMaxDocs) {
tooManyDocs(addedNumDocs);
}
}
private void tooManyDocs(long addedNumDocs) {
assert addedNumDocs >= 0;
throw new IllegalArgumentException("number of documents in the index cannot exceed " + actualMaxDocs + " (current document count is " + pendingNumDocs.get() + "; added numDocs is " + addedNumDocs + ")");
}
/** Returns the highest <a href="#sequence_number">sequence number</a> across
* all completed operations, or 0 if no operations have finished yet. Still
* in-flight operations (in other threads) are not counted until they finish.
*
* @lucene.experimental */
public long getMaxCompletedSequenceNumber() {
ensureOpen();
return docWriter.getMaxCompletedSequenceNumber();
}
private long adjustPendingNumDocs(long numDocs) {
long count = pendingNumDocs.addAndGet(numDocs);
assert count >= 0 : "pendingNumDocs is negative: " + count;
return count;
}
final boolean isFullyDeleted(ReadersAndUpdates readersAndUpdates) throws IOException {
if (readersAndUpdates.isFullyDeleted()) {
assert Thread.holdsLock(this);
return readersAndUpdates.keepFullyDeletedSegment(config.getMergePolicy()) == false;
}
return false;
}
/**
* Returns the number of deletes a merge would claim back if the given segment is merged.
* @see MergePolicy#numDeletesToMerge(SegmentCommitInfo, int, org.apache.lucene.util.IOSupplier)
* @param info the segment to get the number of deletes for
* @lucene.experimental
*/
@Override
public final int numDeletesToMerge(SegmentCommitInfo info) throws IOException {
ensureOpen(false);
validate(info);
MergePolicy mergePolicy = config.getMergePolicy();
final ReadersAndUpdates rld = getPooledInstance(info, false);
int numDeletesToMerge;
if (rld != null) {
numDeletesToMerge = rld.numDeletesToMerge(mergePolicy);
} else {
// if we don't have a pooled instance lets just return the hard deletes, this is safe!
numDeletesToMerge = info.getDelCount();
}
assert numDeletesToMerge <= info.info.maxDoc() :
"numDeletesToMerge: " + numDeletesToMerge + " > maxDoc: " + info.info.maxDoc();
return numDeletesToMerge;
}
void release(ReadersAndUpdates readersAndUpdates) throws IOException {
release(readersAndUpdates, true);
}
private void release(ReadersAndUpdates readersAndUpdates, boolean assertLiveInfo) throws IOException {
assert Thread.holdsLock(this);
if (readerPool.release(readersAndUpdates, assertLiveInfo)) {
// if we write anything here we have to hold the lock otherwise IDF will delete files underneath us
assert Thread.holdsLock(this);
checkpointNoSIS();
}
}
ReadersAndUpdates getPooledInstance(SegmentCommitInfo info, boolean create) {
ensureOpen(false);
return readerPool.get(info, create);
}
// FrozenBufferedUpdates
/**
* Translates a frozen packet of delete term/query, or doc values
* updates, into their actual docIDs in the index, and applies the change. This is a heavy
* operation and is done concurrently by incoming indexing threads.
* This method will return immediately without blocking if another thread is currently
* applying the package. In order to ensure the packet has been applied,
* {@link IndexWriter#forceApply(FrozenBufferedUpdates)} must be called.
*/
@SuppressWarnings("try")
final boolean tryApply(FrozenBufferedUpdates updates) throws IOException {
if (updates.tryLock()) {
try {
forceApply(updates);
return true;
} finally {
updates.unlock();
}
}
return false;
}
/**
* Translates a frozen packet of delete term/query, or doc values
* updates, into their actual docIDs in the index, and applies the change. This is a heavy
* operation and is done concurrently by incoming indexing threads.
*/
final void forceApply(FrozenBufferedUpdates updates) throws IOException {
updates.lock();
try {
if (updates.isApplied()) {
// already done
return;
}
long startNS = System.nanoTime();
assert updates.any();
Set<SegmentCommitInfo> seenSegments = new HashSet<>();
int iter = 0;
int totalSegmentCount = 0;
long totalDelCount = 0;
boolean finished = false;
// Optimistic concurrency: assume we are free to resolve the deletes against all current segments in the index, despite that
// concurrent merges are running. Once we are done, we check to see if a merge completed while we were running. If so, we must retry
// resolving against the newly merged segment(s). Eventually no merge finishes while we were running and we are done.
while (true) {
String messagePrefix;
if (iter == 0) {
messagePrefix = "";
} else {
messagePrefix = "iter " + iter;
}
long iterStartNS = System.nanoTime();
long mergeGenStart = mergeFinishedGen.get();
Set<String> delFiles = new HashSet<>();
BufferedUpdatesStream.SegmentState[] segStates;
synchronized (this) {
List<SegmentCommitInfo> infos = getInfosToApply(updates);
if (infos == null) {
break;
}
for (SegmentCommitInfo info : infos) {
delFiles.addAll(info.files());
}
// Must open while holding IW lock so that e.g. segments are not merged
// away, dropped from 100% deletions, etc., before we can open the readers
segStates = openSegmentStates(infos, seenSegments, updates.delGen());
if (segStates.length == 0) {
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", "packet matches no segments");
}
break;
}
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", String.format(Locale.ROOT,
messagePrefix + "now apply del packet (%s) to %d segments, mergeGen %d",
this, segStates.length, mergeGenStart));
}
totalSegmentCount += segStates.length;
// Important, else IFD may try to delete our files while we are still using them,
// if e.g. a merge finishes on some of the segments we are resolving on:
deleter.incRef(delFiles);
}
AtomicBoolean success = new AtomicBoolean();
long delCount;
try (Closeable finalizer = () -> finishApply(segStates, success.get(), delFiles)) {
assert finalizer != null; // access the finalizer to prevent a warning
// don't hold IW monitor lock here so threads are free concurrently resolve deletes/updates:
delCount = updates.apply(segStates);
success.set(true);
}
// Since we just resolved some more deletes/updates, now is a good time to write them:
writeSomeDocValuesUpdates();
// It's OK to add this here, even if the while loop retries, because delCount only includes newly
// deleted documents, on the segments we didn't already do in previous iterations:
totalDelCount += delCount;
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", String.format(Locale.ROOT,
messagePrefix + "done inner apply del packet (%s) to %d segments; %d new deletes/updates; took %.3f sec",
this, segStates.length, delCount, (System.nanoTime() - iterStartNS) / 1000000000.));
}
if (updates.privateSegment != null) {
// No need to retry for a segment-private packet: the merge that folds in our private segment already waits for all deletes to
// be applied before it kicks off, so this private segment must already not be in the set of merging segments
break;
}
// Must sync on writer here so that IW.mergeCommit is not running concurrently, so that if we exit, we know mergeCommit will succeed
// in pulling all our delGens into a merge:
synchronized (this) {
long mergeGenCur = mergeFinishedGen.get();
if (mergeGenCur == mergeGenStart) {
// Must do this while still holding IW lock else a merge could finish and skip carrying over our updates:
// Record that this packet is finished:
bufferedUpdatesStream.finished(updates);
finished = true;
// No merge finished while we were applying, so we are done!
break;
}
}
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", messagePrefix + "concurrent merges finished; move to next iter");
}
// A merge completed while we were running. In this case, that merge may have picked up some of the updates we did, but not
// necessarily all of them, so we cycle again, re-applying all our updates to the newly merged segment.
iter++;
}
if (finished == false) {
// Record that this packet is finished:
bufferedUpdatesStream.finished(updates);
}
if (infoStream.isEnabled("BD")) {
String message = String.format(Locale.ROOT,
"done apply del packet (%s) to %d segments; %d new deletes/updates; took %.3f sec",
this, totalSegmentCount, totalDelCount, (System.nanoTime() - startNS) / 1000000000.);
if (iter > 0) {
message += "; " + (iter + 1) + " iters due to concurrent merges";
}
message += "; " + bufferedUpdatesStream.getPendingUpdatesCount() + " packets remain";
infoStream.message("BD", message);
}
} finally {
updates.unlock();
}
}
/** Returns the {@link SegmentCommitInfo} that this packet is supposed to apply its deletes to, or null
* if the private segment was already merged away. */
private synchronized List<SegmentCommitInfo> getInfosToApply(FrozenBufferedUpdates updates) {
final List<SegmentCommitInfo> infos;
if (updates.privateSegment != null) {
if (segmentInfos.contains(updates.privateSegment)) {
infos = Collections.singletonList(updates.privateSegment);
}else {
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", "private segment already gone; skip processing updates");
}
infos = null;
}
} else {
infos = segmentInfos.asList();
}
return infos;
}
private void finishApply(BufferedUpdatesStream.SegmentState[] segStates,
boolean success, Set<String> delFiles) throws IOException {
synchronized (this) {
BufferedUpdatesStream.ApplyDeletesResult result;
try {
result = closeSegmentStates(segStates, success);
} finally {
// Matches the incRef we did above, but we must do the decRef after closing segment states else
// IFD can't delete still-open files
deleter.decRef(delFiles);
}
if (result.anyDeletes) {
maybeMerge.set(true);
checkpoint();
}
if (result.allDeleted != null) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "drop 100% deleted segments: " + segString(result.allDeleted));
}
for (SegmentCommitInfo info : result.allDeleted) {
dropDeletedSegment(info);
}
checkpoint();
}
}
}
/** Close segment states previously opened with openSegmentStates. */
private BufferedUpdatesStream.ApplyDeletesResult closeSegmentStates(BufferedUpdatesStream.SegmentState[] segStates, boolean success) throws IOException {
List<SegmentCommitInfo> allDeleted = null;
long totDelCount = 0;
try {
for (BufferedUpdatesStream.SegmentState segState : segStates) {
if (success) {
totDelCount += segState.rld.getDelCount() - segState.startDelCount;
int fullDelCount = segState.rld.getDelCount();
assert fullDelCount <= segState.rld.info.info.maxDoc() : fullDelCount + " > " + segState.rld.info.info.maxDoc();
if (segState.rld.isFullyDeleted() && getConfig().getMergePolicy().keepFullyDeletedSegment(() -> segState.reader) == false) {
if (allDeleted == null) {
allDeleted = new ArrayList<>();
}
allDeleted.add(segState.reader.getOriginalSegmentInfo());
}
}
}
} finally {
IOUtils.close(segStates);
}
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", "closeSegmentStates: " + totDelCount + " new deleted documents; pool " + bufferedUpdatesStream.getPendingUpdatesCount() + " packets; bytesUsed=" + readerPool.ramBytesUsed());
}
return new BufferedUpdatesStream.ApplyDeletesResult(totDelCount > 0, allDeleted);
}
/** Opens SegmentReader and inits SegmentState for each segment. */
private BufferedUpdatesStream.SegmentState[] openSegmentStates(List<SegmentCommitInfo> infos,
Set<SegmentCommitInfo> alreadySeenSegments, long delGen) throws IOException {
List<BufferedUpdatesStream.SegmentState> segStates = new ArrayList<>();
try {
for (SegmentCommitInfo info : infos) {
if (info.getBufferedDeletesGen() <= delGen && alreadySeenSegments.contains(info) == false) {
segStates.add(new BufferedUpdatesStream.SegmentState(getPooledInstance(info, true), this::release, info));
alreadySeenSegments.add(info);
}
}
} catch (Throwable t) {
try {
IOUtils.close(segStates);
} catch (Throwable t1) {
t.addSuppressed(t1);
}
throw t;
}
return segStates.toArray(new BufferedUpdatesStream.SegmentState[0]);
}
/**
* Tests should override this to enable test points. Default is <code>false</code>.
*/
protected boolean isEnableTestPoints() {
return false;
}
private void validate(SegmentCommitInfo info) {
if (info.info.dir != directoryOrig) {
throw new IllegalArgumentException("SegmentCommitInfo must be from the same directory");
}
}
/** Tests should use this method to snapshot the current segmentInfos to have a consistent view */
final synchronized SegmentInfos cloneSegmentInfos() {
return segmentInfos.clone();
}
/**
* Returns accurate {@link DocStats} form this writer. The numDoc for instance can change after maxDoc is fetched
* that causes numDocs to be greater than maxDoc which makes it hard to get accurate document stats from IndexWriter.
*/
public synchronized DocStats getDocStats() {
ensureOpen();
int numDocs = docWriter.getNumDocs();
int maxDoc = numDocs;
for (final SegmentCommitInfo info : segmentInfos) {
maxDoc += info.info.maxDoc();
numDocs += info.info.maxDoc() - numDeletedDocs(info);
}
assert maxDoc >= numDocs : "maxDoc is less than numDocs: " + maxDoc + " < " + numDocs;
return new DocStats(maxDoc, numDocs);
}
/**
* DocStats for this index
*/
public static final class DocStats {
/**
* The total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer),
* not counting deletions.
*/
public final int maxDoc;
/**
* The total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer), and
* including deletions. <b>NOTE:</b> buffered deletions
* are not counted. If you really need these to be
* counted you should call {@link IndexWriter#commit()} first.
*/
public final int numDocs;
private DocStats(int maxDoc, int numDocs) {
this.maxDoc = maxDoc;
this.numDocs = numDocs;
}
}
private static class IndexWriterMergeSource implements MergeScheduler.MergeSource {
private final IndexWriter writer;
private IndexWriterMergeSource(IndexWriter writer) {
this.writer = writer;
}
@Override
public MergePolicy.OneMerge getNextMerge() {
MergePolicy.OneMerge nextMerge = writer.getNextMerge();
if (nextMerge != null) {
if (writer.mergeScheduler.verbose()) {
writer.mergeScheduler.message(" checked out merge " + writer.segString(nextMerge.segments));
}
}
return nextMerge;
}
@Override
public void onMergeFinished(MergePolicy.OneMerge merge) {
writer.mergeFinish(merge);
}
@Override
public boolean hasPendingMerges() {
return writer.hasPendingMerges();
}
@Override
public void merge(MergePolicy.OneMerge merge) throws IOException {
assert Thread.holdsLock(writer) == false;
writer.merge(merge);
}
public String toString() {
return writer.segString();
}
}
private class Merges {
private boolean mergesEnabled = true;
boolean areEnabled() {
assert Thread.holdsLock(IndexWriter.this);
return mergesEnabled;
}
void disable() {
assert Thread.holdsLock(IndexWriter.this);
mergesEnabled = false;
}
void enable() {
ensureOpen();
assert Thread.holdsLock(IndexWriter.this);
mergesEnabled = true;
}
}
}
| 1 | 37,480 | Maybe add `@lucene.experimental`? We are exposing (slightly) internal details about `IndexWriter` so maybe we need to reserve the right to change this API in the future ... | apache-lucene-solr | java |
@@ -676,7 +676,10 @@ module Beaker
def sleep_until_puppetdb_started(host, nonssl_port = nil, ssl_port = nil)
nonssl_port = options[:puppetdb_port_nonssl] if nonssl_port.nil?
ssl_port = options[:puppetdb_port_ssl] if ssl_port.nil?
- curl_with_retries("start puppetdb", host, "http://localhost:#{nonssl_port}", 0, 120)
+ endpoint = 'status/v1/services/puppetdb-status'
+ retry_on(host,
+ "curl -m 1 http://localhost:#{nonssl_port}/#{endpoint} | grep '\"state\":\"running\"'",
+ {:max_retries => 120})
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:#{ssl_port}", [35, 60])
end | 1 | require 'timeout'
require 'inifile'
require 'resolv'
module Beaker
module DSL
module Helpers
# Methods that help you interact with your puppet installation, puppet must be installed
# for these methods to execute correctly
module PuppetHelpers
# Return the regular expression pattern for an IPv4 address
def ipv4_regex
return /(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/
end
# Return the IP address that given hostname returns when resolved on
# the given host.
#
# @ param [Host] host One object that acts like a Beaker::Host
# @ param [String] hostname The hostname to perform a DNS resolution on
#
# @return [String, nil] An IP address, or nil.
def resolve_hostname_on(host, hostname)
match = curl_on(host, "--verbose #{hostname}", :accept_all_exit_codes => true).stderr.match(ipv4_regex)
return match ? match[0] : nil
end
# @!macro [new] common_opts
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :silent (false) Do not produce log output
# @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array
# (or range) of integer exit codes that should be considered
# acceptable. An error will be thrown if the exit code does not
# match one of the values in this list.
# @option opts [Boolean] :accept_all_exit_codes (false) Consider all
# exit codes as passing.
# @option opts [Boolean] :dry_run (false) Do not actually execute any
# commands on the SUT
# @option opts [String] :stdin (nil) Input to be provided during command
# execution on the SUT.
# @option opts [Boolean] :pty (false) Execute this command in a pseudoterminal.
# @option opts [Boolean] :expect_connection_failure (false) Expect this command
# to result in a connection failure, reconnect and continue execution.
# @option opts [Hash{String=>String}] :environment ({}) These will be
# treated as extra environment variables that should be set before
# running the command.
#
# Return the name of the puppet user.
#
# @param [Host] host One object that acts like a Beaker::Host
#
# @note This method assumes puppet is installed on the host.
#
def puppet_user(host)
return host.puppet('master')['user']
end
# Return the name of the puppet group.
#
# @param [Host] host One object that acts like a Beaker::Host
#
# @note This method assumes puppet is installed on the host.
#
def puppet_group(host)
return host.puppet('master')['group']
end
# Test Puppet running in a certain run mode with specific options.
# This ensures the following steps are performed:
# 1. The pre-test Puppet configuration is backed up
# 2. A new Puppet configuraton file is layed down
# 3. Puppet is started or restarted in the specified run mode
# 4. Ensure Puppet has started correctly
# 5. Further tests are yielded to
# 6. Revert Puppet to the pre-test state
# 7. Testing artifacts are saved in a folder named for the test
#
# @note Whether Puppet is started or restarted depends on what kind of
# server you're running. Passenger and puppetserver are restarted before.
# Webrick is started before and stopped after yielding, unless you're using
# service scripts, then it'll behave like passenger & puppetserver.
# Passenger and puppetserver (or webrick using service scripts)
# restart after yielding by default. You can stop this from happening
# by setting the :restart_when_done flag of the conf_opts argument.
#
# @param [Host] host One object that act like Host
#
# @param [Hash{Symbol=>String}] conf_opts Represents puppet settings.
# Sections of the puppet.conf may be
# specified, if no section is specified the
# a puppet.conf file will be written with the
# options put in a section named after [mode]
# @option conf_opts [String] :__commandline_args__ A special setting for
# command_line arguments such as --debug or
# --logdest, which cannot be set in
# puppet.conf. For example:
#
# :__commandline_args__ => '--logdest /tmp/a.log'
#
# These will only be applied when starting a FOSS
# master, as a pe master is just bounced.
# @option conf_opts [Hash] :__service_args__ A special setting of options
# for controlling how the puppet master service is
# handled. The only setting currently is
# :bypass_service_script, which if set true will
# force stopping and starting a webrick master
# using the start_puppet_from_source_* methods,
# even if it seems the host has passenger.
# This is needed in FOSS tests to initialize
# SSL.
# @option conf_opts [Boolean] :restart_when_done determines whether a restart
# should be run after the test has been yielded to.
# Will stop puppet if false. Default behavior
# is to restart, but you can override this on the
# host or with this option.
# (Note: only works for passenger & puppetserver
# masters (or webrick using the service scripts))
# @param [File] testdir The temporary directory which will hold backup
# configuration, and other test artifacts.
#
# @param [Block] block The point of this method, yields so
# tests may be ran. After the block is finished
# puppet will revert to a previous state.
#
# @example A simple use case to ensure a master is running
# with_puppet_running_on( master ) do
# ...tests that require a master...
# end
#
# @example Fully utilizing the possiblities of config options
# with_puppet_running_on( master,
# :main => {:logdest => '/var/blah'},
# :master => {:masterlog => '/elswhere'},
# :agent => {:server => 'localhost'} ) do
#
# ...tests to be ran...
# end
#
def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash)
cmdline_args = conf_opts[:__commandline_args__]
service_args = conf_opts[:__service_args__] || {}
restart_when_done = true
restart_when_done = host[:restart_when_done] if host.has_key?(:restart_when_done)
restart_when_done = conf_opts.fetch(:restart_when_done, restart_when_done)
conf_opts = conf_opts.reject { |k,v| [:__commandline_args__, :__service_args__, :restart_when_done].include?(k) }
curl_retries = host['master-start-curl-retries'] || options['master-start-curl-retries']
logger.debug "Setting curl retries to #{curl_retries}"
if options[:is_puppetserver] || host[:is_puppetserver]
confdir = host.puppet('master')['confdir']
vardir = host.puppet('master')['vardir']
if cmdline_args
split_args = cmdline_args.split()
split_args.each do |arg|
case arg
when /--confdir=(.*)/
confdir = $1
when /--vardir=(.*)/
vardir = $1
end
end
end
puppetserver_opts = { "jruby-puppet" => {
"master-conf-dir" => confdir,
"master-var-dir" => vardir,
}}
puppetserver_conf = File.join("#{host['puppetserver-confdir']}", "puppetserver.conf")
modify_tk_config(host, puppetserver_conf, puppetserver_opts)
end
begin
backup_file = backup_the_file(host, host.puppet('master')['confdir'], testdir, 'puppet.conf')
lay_down_new_puppet_conf host, conf_opts, testdir
if host.use_service_scripts? && !service_args[:bypass_service_script]
bounce_service( host, host['puppetservice'], curl_retries )
else
puppet_master_started = start_puppet_from_source_on!( host, cmdline_args )
end
yield self if block_given?
# FIXME: these test-flow-control exceptions should be using throw
# they can be caught in test_case. current layout dows not allow it
rescue Beaker::DSL::Outcomes::PassTest => early_assertion
pass_test(early_assertion)
rescue Beaker::DSL::Outcomes::FailTest => early_assertion
fail_test(early_assertion)
rescue Beaker::DSL::Outcomes::PendingTest => early_assertion
pending_test(early_assertion)
rescue Beaker::DSL::Outcomes::SkipTest => early_assertion
skip_test(early_assertion)
rescue Beaker::DSL::Assertions, Minitest::Assertion => early_assertion
fail_test(early_assertion)
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception)
ensure
begin
if host.use_service_scripts? && !service_args[:bypass_service_script]
restore_puppet_conf_from_backup( host, backup_file )
if restart_when_done
bounce_service( host, host['puppetservice'], curl_retries )
else
host.exec puppet_resource('service', host['puppetservice'], 'ensure=stopped')
end
else
if puppet_master_started
stop_puppet_from_source_on( host )
else
dump_puppet_log(host)
end
restore_puppet_conf_from_backup( host, backup_file )
end
rescue Exception => teardown_exception
begin
if !host.is_pe?
dump_puppet_log(host)
end
rescue Exception => dumping_exception
logger.error("Raised during attempt to dump puppet logs: #{dumping_exception}")
end
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception
else
raise teardown_exception
end
end
end
end
# Test Puppet running in a certain run mode with specific options,
# on the default host
# @see #with_puppet_running_on
def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
with_puppet_running_on(default, conf_opts, testdir, &block)
end
# @!visibility private
def restore_puppet_conf_from_backup( host, backup_file )
puppet_conf = host.puppet('master')['config']
if backup_file
host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " +
"cat '#{backup_file}' > " +
"'#{puppet_conf}'; " +
"rm -f '#{backup_file}'; " +
"fi" ) )
else
host.exec( Command.new( "rm -f '#{puppet_conf}'" ))
end
end
# @!visibility private
def start_puppet_from_source_on! host, args = ''
host.exec( puppet( 'master', args ) )
logger.debug 'Waiting for the puppet master to start'
unless port_open_within?( host, 8140, 10 )
raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion'
end
logger.debug 'The puppet master has started'
return true
end
# @!visibility private
def stop_puppet_from_source_on( host )
pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp
host.exec( Command.new( "kill #{pid}" ) )
Timeout.timeout(10) do
while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do
# until kill -0 finds no process and we know that puppet has finished cleaning up
sleep 1
end
end
end
# @!visibility private
def dump_puppet_log(host)
syslogfile = case host['platform']
when /fedora|centos|el|redhat|scientific/ then '/var/log/messages'
when /ubuntu|debian|cumulus/ then '/var/log/syslog'
else return
end
logger.notify "\n*************************"
logger.notify "* Dumping master log *"
logger.notify "*************************"
host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1])
logger.notify "*************************\n"
end
# @!visibility private
def lay_down_new_puppet_conf( host, configuration_options, testdir )
puppetconf_main = host.puppet('master')['config']
puppetconf_filename = File.basename(puppetconf_main)
puppetconf_test = File.join(testdir, puppetconf_filename)
new_conf = puppet_conf_for( host, configuration_options )
create_remote_file host, puppetconf_test, new_conf.to_s
host.exec(
Command.new( "cat #{puppetconf_test} > #{puppetconf_main}" ),
:silent => true
)
host.exec( Command.new( "cat #{puppetconf_main}" ) )
end
# @!visibility private
def puppet_conf_for host, conf_opts
puppetconf = host.exec( Command.new( "cat #{host.puppet('master')['config']}" ) ).stdout
new_conf = IniFile.new( puppetconf ).merge( conf_opts )
new_conf
end
# Restarts the named puppet service
#
# @param [Host] host Host the service runs on
# @param [String] service Name of the service to restart
# @param [Fixnum] curl_retries Number of seconds to wait for the restart to complete before failing
# @param [Fixnum] port Port to check status at
#
# @return [Result] Result of last status check
# @!visibility private
def bounce_service host, service, curl_retries = nil, port = nil
curl_retries = 120 if curl_retries.nil?
port = options[:puppetserver_port] if port.nil?
if host.graceful_restarts?
service = host.check_for_command('apache2ctl') ? 'apache2ctl' : 'apachectl'
apachectl_path = host.is_pe? ? "#{host['puppetsbindir']}/#{service}" : service
host.exec(Command.new("#{apachectl_path} graceful"))
else
result = host.exec(Command.new("service #{service} reload"),
:acceptable_exit_codes => [0,1,3])
if result.exit_code == 0
return result
else
host.exec puppet_resource('service', service, 'ensure=stopped')
host.exec puppet_resource('service', service, 'ensure=running')
end
end
curl_with_retries(" #{service} ", host, "https://localhost:#{port}", [35, 60], curl_retries)
end
# Runs 'puppet apply' on a remote host, piping manifest through stdin
#
# @param [Host] host The host that this command should be run on
#
# @param [String] manifest The puppet manifest to apply
#
# @!macro common_opts
# @option opts [Boolean] :parseonly (false) If this key is true, the
# "--parseonly" command line parameter will
# be passed to the 'puppet apply' command.
#
# @option opts [Boolean] :trace (false) If this key exists in the Hash,
# the "--trace" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit
# codes that will NOT raise an error when found upon
# command completion. If provided, these values will
# be combined with those used in :catch_failures and
# :expect_failures to create the full list of
# passing exit codes.
#
# @option opts [Hash] :environment Additional environment variables to be
# passed to the 'puppet apply' command
#
# @option opts [Boolean] :catch_failures (false) By default `puppet
# --apply` will exit with 0, which does not count
# as a test failure, even if there were errors or
# changes when applying the manifest. This option
# enables detailed exit codes and causes a test
# failure if `puppet --apply` indicates there was
# a failure during its execution.
#
# @option opts [Boolean] :catch_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# changes or failures during its execution.
#
# @option opts [Boolean] :expect_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# no resource changes during its execution.
#
# @option opts [Boolean] :expect_failures (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates there were no
# failure during its execution.
#
# @option opts [Boolean] :future_parser (false) This option enables
# the future parser option that is available
# from Puppet verion 3.2
# By default it will use the 'current' parser.
#
# @option opts [Boolean] :noop (false) If this option exists, the
# the "--noop" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [String] :modulepath The search path for modules, as
# a list of directories separated by the system
# path separator character. (The POSIX path separator
# is ‘:’, and the Windows path separator is ‘;’.)
#
# @option opts [String] :debug (false) If this option exists,
# the "--debug" command line parameter
# will be passed to the 'puppet apply' command.
# @option opts [Boolean] :run_in_parallel Whether to run on each host in parallel.
#
# @param [Block] block This method will yield to a block of code passed
# by the caller; this can be used for additional
# validation, etc.
#
# @return [Array<Result>, Result, nil] An array of results, a result object,
# or nil. Check {#run_block_on} for more details on this.
def apply_manifest_on(host, manifest, opts = {}, &block)
block_on host, opts do | host |
on_options = {}
on_options[:acceptable_exit_codes] = Array(opts[:acceptable_exit_codes])
puppet_apply_opts = {}
if opts[:debug]
puppet_apply_opts[:debug] = nil
else
puppet_apply_opts[:verbose] = nil
end
puppet_apply_opts[:parseonly] = nil if opts[:parseonly]
puppet_apply_opts[:trace] = nil if opts[:trace]
puppet_apply_opts[:parser] = 'future' if opts[:future_parser]
puppet_apply_opts[:modulepath] = opts[:modulepath] if opts[:modulepath]
puppet_apply_opts[:noop] = nil if opts[:noop]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of
# '4' means there were failures during the transaction, and an exit
# code of '6' means there were both changes and failures."
if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures],opts[:expect_changes]].compact.length > 1
raise(ArgumentError,
'Cannot specify more than one of `catch_failures`, ' +
'`catch_changes`, `expect_failures`, or `expect_changes` ' +
'for a single manifest')
end
if opts[:catch_changes]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after idempotency so allow exit code 0 only.
on_options[:acceptable_exit_codes] |= [0]
elsif opts[:catch_failures]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after only complete success so allow exit codes 0 and 2 only.
on_options[:acceptable_exit_codes] |= [0, 2]
elsif opts[:expect_failures]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after failures specifically so allow exit codes 1, 4, and 6 only.
on_options[:acceptable_exit_codes] |= [1, 4, 6]
elsif opts[:expect_changes]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after changes specifically so allow exit code 2 only.
on_options[:acceptable_exit_codes] |= [2]
else
# Either use the provided acceptable_exit_codes or default to [0]
on_options[:acceptable_exit_codes] |= [0]
end
# Not really thrilled with this implementation, might want to improve it
# later. Basically, there is a magic trick in the constructor of
# PuppetCommand which allows you to pass in a Hash for the last value in
# the *args Array; if you do so, it will be treated specially. So, here
# we check to see if our caller passed us a hash of environment variables
# that they want to set for the puppet command. If so, we set the final
# value of *args to a new hash with just one entry (the value of which
# is our environment variables hash)
if opts.has_key?(:environment)
puppet_apply_opts['ENV'] = opts[:environment]
end
file_path = host.tmpfile('apply_manifest.pp')
create_remote_file(host, file_path, manifest + "\n")
if host[:default_apply_opts].respond_to? :merge
puppet_apply_opts = host[:default_apply_opts].merge( puppet_apply_opts )
end
on host, puppet('apply', file_path, puppet_apply_opts), on_options, &block
end
end
# Runs 'puppet apply' on default host, piping manifest through stdin
# @see #apply_manifest_on
def apply_manifest(manifest, opts = {}, &block)
apply_manifest_on(default, manifest, opts, &block)
end
# @deprecated
def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test',
options={}, &block)
block_on host do | host |
on host, puppet_agent(arg), options, &block
end
end
# This method using the puppet resource 'host' will setup host aliases
# and register the remove of host aliases via Beaker::TestCase#teardown
#
# A teardown step is also added to make sure unstubbing of the host is
# removed always.
#
# @param [Host, Array<Host>, String, Symbol] machine One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @param alias_spec [Hash{String=>Array[String]] an hash containing the host to alias(es) mappings to apply
# @example Stub puppetlabs.com on the master to 127.0.0.1 with an alias example.com
# stub_hosts_on(master, {'puppetlabs.com' => '127.0.0.1'}, {'puppetlabs.com' => ['example.com']})
def stub_hosts_on(machine, ip_spec, alias_spec={})
block_on machine do | host |
ip_spec.each do |address, ip|
aliases = alias_spec[address] || []
manifest =<<-EOS.gsub /^\s+/, ""
host { '#{address}':
\tensure => present,
\tip => '#{ip}',
\thost_aliases => #{aliases},
}
EOS
logger.notify("Stubbing address #{address} to IP #{ip} on machine #{host}")
apply_manifest_on( host, manifest )
end
teardown do
ip_spec.each do |address, ip|
logger.notify("Unstubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=absent') )
end
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @param alias_spec [Hash{String=>Array[String]] an hash containing the host to alias(es) mappings to apply
# @example Stub forgeapi.puppetlabs.com on the master to 127.0.0.1 with an alias forgeapi.example.com
# with_host_stubbed_on(master, {'forgeapi.puppetlabs.com' => '127.0.0.1'}, {'forgeapi.puppetlabs.com' => ['forgeapi.example.com']}) do
# puppet( "module install puppetlabs-stdlib" )
# end
def with_host_stubbed_on(host, ip_spec, alias_spec={}, &block)
begin
block_on host do |host|
# this code is duplicated from the `stub_hosts_on` method. The
# `stub_hosts_on` method itself is not used here because this
# method is used by modules tests using `beaker-rspec`. Since
# the `stub_hosts_on` method contains a `teardown` step, it is
# incompatible with `beaker_rspec`.
ip_spec.each do |address, ip|
aliases = alias_spec[address] || []
manifest =<<-EOS.gsub /^\s+/, ""
host { '#{address}':
\tensure => present,
\tip => '#{ip}',
\thost_aliases => #{aliases},
}
EOS
logger.notify("Stubbing address #{address} to IP #{ip} on machine #{host}")
apply_manifest_on( host, manifest )
end
end
block.call
ensure
ip_spec.each do |address, ip|
logger.notify("Unstubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=absent') )
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block on the default host
#
# @example Stub puppetlabs.com on the default host to 127.0.0.1
# stub_hosts('puppetlabs.com' => '127.0.0.1')
# @see #stub_hosts_on
def stub_hosts(ip_spec)
stub_hosts_on(default, ip_spec)
end
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param machine [String] the host to perform the stub on
# @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the
# global options hash
def stub_forge_on(machine, forge_host = nil)
#use global options hash
primary_forge_name = 'forge.puppetlabs.com'
forge_host ||= options[:forge_host]
forge_ip = resolve_hostname_on(machine, forge_host)
raise "Failed to resolve forge host '#{forge_host}'" unless forge_ip
@forge_ip ||= forge_ip
block_on machine do | host |
stub_hosts_on(host, {primary_forge_name => @forge_ip}, {primary_forge_name => ['forge.puppet.com','forgeapi.puppetlabs.com','forgeapi.puppet.com']})
end
end
# This wraps the method `with_host_stubbed_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param host [String] the host to perform the stub on
# @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the
# global options hash
def with_forge_stubbed_on( host, forge_host = nil, &block )
#use global options hash
primary_forge_name = 'forge.puppetlabs.com'
forge_host ||= options[:forge_host]
forge_ip = resolve_hostname_on(host, forge_host)
raise "Failed to resolve forge host '#{forge_host}'" unless forge_ip
@forge_ip ||= forge_ip
with_host_stubbed_on( host, {primary_forge_name => @forge_ip}, {primary_forge_name => ['forge.puppet.com','forgeapi.puppetlabs.com','forgeapi.puppet.com']}, &block )
end
# This wraps `with_forge_stubbed_on` and provides it the default host
# @see with_forge_stubbed_on
def with_forge_stubbed( forge_host = nil, &block )
with_forge_stubbed_on( default, forge_host, &block )
end
# This wraps the method `stub_hosts` and makes the stub specific to
# the forge alias.
#
# @see #stub_forge_on
def stub_forge(forge_host = nil)
#use global options hash
forge_host ||= options[:forge_host]
stub_forge_on(default, forge_host)
end
# Waits until a successful curl check has happened against puppetdb
#
# @param [Host] host Host puppetdb is on
# @param [Fixnum] nonssl_port Port to make the HTTP status check over
# @param [Fixnum] ssl_port Port to make the HTTPS status check over
#
# @return [Result] Result of the last HTTPS status check
def sleep_until_puppetdb_started(host, nonssl_port = nil, ssl_port = nil)
nonssl_port = options[:puppetdb_port_nonssl] if nonssl_port.nil?
ssl_port = options[:puppetdb_port_ssl] if ssl_port.nil?
curl_with_retries("start puppetdb", host, "http://localhost:#{nonssl_port}", 0, 120)
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:#{ssl_port}", [35, 60])
end
# Waits until a successful curl check has happened against puppetserver
#
# @param [Host] host Host puppetserver is on
# @param [Fixnum] port Port to make the HTTPS status check over
#
# @return [Result] Result of the last HTTPS status check
def sleep_until_puppetserver_started(host, port = nil)
port = options[:puppetserver_port] if port.nil?
curl_with_retries("start puppetserver (ssl)",
host, "https://#{host.node_name}:#{port}", [35, 60])
end
# Waits until a successful curl check has happaned against node classifier
#
# @param [Host] host Host node classifier is on
# @param [Fixnum] port Port to make the HTTPS status check over
#
# @return [Result] Result of the last HTTPS status check
def sleep_until_nc_started(host, port = nil)
port = options[:nodeclassifier_port] if port.nil?
curl_with_retries("start nodeclassifier (ssl)",
host, "https://#{host.node_name}:#{port}", [35, 60])
end
#stops the puppet agent running on the host
# @param [Host, Array<Host>, String, Symbol] agent One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :run_in_parallel Whether to run on each host in parallel.
def stop_agent_on(agent, opts = {})
block_on agent, opts do | host |
vardir = host.puppet_configprint['vardir']
agent_running = true
while agent_running
agent_running = host.file_exist?("#{vardir}/state/agent_catalog_run.lock")
if agent_running
sleep 2
end
end
# In 4.0 this was changed to just be `puppet`
agent_service = 'puppet'
if !aio_version?(host)
# The agent service is `pe-puppet` everywhere EXCEPT certain linux distros on PE 2.8
# In all the case that it is different, this init script will exist. So we can assume
# that if the script doesn't exist, we should just use `pe-puppet`
agent_service = 'pe-puppet-agent'
agent_service = 'pe-puppet' unless host.file_exist?('/etc/init.d/pe-puppet-agent')
end
# Under a number of stupid circumstances, we can't stop the
# agent using puppet. This is usually because of issues with
# the init script or system on that particular configuration.
avoid_puppet_at_all_costs = false
avoid_puppet_at_all_costs ||= host['platform'] =~ /el-4/
avoid_puppet_at_all_costs ||= host['pe_ver'] && version_is_less(host['pe_ver'], '3.2') && host['platform'] =~ /sles/
if avoid_puppet_at_all_costs
# When upgrading, puppet is already stopped. On EL4, this causes an exit code of '1'
on host, "/etc/init.d/#{agent_service} stop", :acceptable_exit_codes => [0, 1]
else
on host, puppet_resource('service', agent_service, 'ensure=stopped')
end
end
end
#stops the puppet agent running on the default host
# @see #stop_agent_on
def stop_agent
stop_agent_on(default)
end
#wait for a given host to appear in the dashboard
def wait_for_host_in_dashboard(host)
hostname = host.node_name
if host['platform'] =~ /aix/ then
curl_opts = '--tlsv1 -I'
else
curl_opts = '--tlsv1 -k -I'
end
retry_on(dashboard, "! curl #{curl_opts} https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'")
end
# Ensure the host has requested a cert, then sign it
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts, or a role (String or Symbol)
# that identifies one or more hosts to validate certificate signing.
# No argument, or an empty array means no validation of success
# for specific hosts will be performed. This will always execute
# 'cert --sign --all --allow-dns-alt-names' even for a single host.
#
# @return nil
# @raise [FailTest] if process times out
def sign_certificate_for(host = [])
hostnames = []
hosts = host.is_a?(Array) ? host : [host]
hosts.each{ |current_host|
if [master, dashboard, database].include? current_host
on current_host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2]
on master, puppet( "cert --allow-dns-alt-names sign #{current_host}" ), :acceptable_exit_codes => [0,24]
else
hostnames << Regexp.escape( current_host.node_name )
end
}
if hostnames.size < 1
on master, puppet("cert --sign --all --allow-dns-alt-names"),
:acceptable_exit_codes => [0,24]
return
end
while hostnames.size > 0
last_sleep = 0
next_sleep = 1
(0..10).each do |i|
if i == 10
fail_test("Failed to sign cert for #{hostnames}")
hostnames.clear
end
on master, puppet("cert --sign --all --allow-dns-alt-names"), :acceptable_exit_codes => [0,24]
out = on(master, puppet("cert --list --all")).stdout
if hostnames.all? { |hostname| out =~ /\+ "?#{hostname}"?/ }
hostnames.clear
break
end
sleep next_sleep
(last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep
end
end
host
end
#prompt the master to sign certs then check to confirm the cert for the default host is signed
#@see #sign_certificate_for
def sign_certificate
sign_certificate_for(default)
end
# Create a temp directory on remote host with a user. Default user
# is puppet master user.
#
# @param [Host] host A single remote host on which to create and adjust
# the ownership of a temp directory.
# @param [String] name A remote path prefix for the new temp
# directory. Default value is '/tmp/beaker'
# @param [String] user The name of user that should own the temp
# directory. If no username is specified, use `puppet master
# --configprint user` to obtain username from master. Raise RuntimeError
# if this puppet command returns a non-zero exit code.
#
# @return [String] Returns the name of the newly-created dir.
def create_tmpdir_for_user(host, name='/tmp/beaker', user=nil)
if not user
result = on host, puppet("master --configprint user")
if not result.exit_code == 0
raise "`puppet master --configprint` failed, check that puppet is installed on #{host} or explicitly pass in a user name."
end
user = result.stdout.strip
end
create_tmpdir_on(host, name, user)
end
end
end
end
end
| 1 | 14,337 | @johnduarte I know this isn't part of your code change, but it seems to me that this `curl_with_retries` is not necessary. If the `retry_on` host command is successful, isn't puppetdb ostensibly listening to the ssl port as well? What do you think about removing that `curl_with_retries` after the `retry_on`? | voxpupuli-beaker | rb |
@@ -17,13 +17,15 @@ package labelindex
import (
log "github.com/Sirupsen/logrus"
+ "github.com/projectcalico/felix/set"
"github.com/projectcalico/libcalico-go/lib/selector"
+ "github.com/projectcalico/libcalico-go/lib/selector/parser"
)
type Index interface {
UpdateSelector(id interface{}, sel selector.Selector)
DeleteSelector(id interface{})
- UpdateLabels(id interface{}, labels map[string]string)
+ UpdateLabels(id interface{}, labels parser.Labels)
DeleteLabels(id interface{})
}
| 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package labelindex
import (
log "github.com/Sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/selector"
)
type Index interface {
UpdateSelector(id interface{}, sel selector.Selector)
DeleteSelector(id interface{})
UpdateLabels(id interface{}, labels map[string]string)
DeleteLabels(id interface{})
}
type MatchCallback func(selId, labelId interface{})
type linearScanIndex struct {
// All known labels and selectors.
labelsById map[interface{}]map[string]string
selectorsById map[interface{}]selector.Selector
// Current matches.
selIdsByLabelId map[interface{}]map[interface{}]bool
labelIdsBySelId map[interface{}]map[interface{}]bool
// Callback functions
OnMatchStarted MatchCallback
OnMatchStopped MatchCallback
}
func NewIndex(onMatchStarted, onMatchStopped MatchCallback) Index {
return &linearScanIndex{
labelsById: make(map[interface{}]map[string]string),
selectorsById: make(map[interface{}]selector.Selector),
selIdsByLabelId: make(map[interface{}]map[interface{}]bool),
labelIdsBySelId: make(map[interface{}]map[interface{}]bool),
OnMatchStarted: onMatchStarted,
OnMatchStopped: onMatchStopped,
}
}
func (idx *linearScanIndex) UpdateSelector(id interface{}, sel selector.Selector) {
log.Infof("Updating selector %v", id)
if sel == nil {
panic("Selector should not be nil")
}
idx.scanAllLabels(id, sel)
idx.selectorsById[id] = sel
}
func (idx *linearScanIndex) DeleteSelector(id interface{}) {
log.Infof("Deleting selector %v", id)
matchSet := idx.labelIdsBySelId[id]
matchSlice := make([]interface{}, 0, len(matchSet))
for labelId, _ := range matchSet {
matchSlice = append(matchSlice, labelId)
}
for _, labelId := range matchSlice {
idx.deleteMatch(id, labelId)
}
delete(idx.selectorsById, id)
}
func (idx *linearScanIndex) UpdateLabels(id interface{}, labels map[string]string) {
log.Debugf("Updating labels for ID %v", id)
idx.scanAllSelectors(id, labels)
idx.labelsById[id] = labels
}
func (idx *linearScanIndex) DeleteLabels(id interface{}) {
log.Debugf("Deleting labels for %v", id)
matchSet := idx.selIdsByLabelId[id]
matchSlice := make([]interface{}, 0, len(matchSet))
for selId, _ := range matchSet {
matchSlice = append(matchSlice, selId)
}
for _, selId := range matchSlice {
idx.deleteMatch(selId, id)
}
delete(idx.labelsById, id)
}
func (idx *linearScanIndex) scanAllLabels(selId interface{}, sel selector.Selector) {
log.Debugf("Scanning all (%v) labels against selector %v",
len(idx.labelsById), selId)
for labelId, labels := range idx.labelsById {
idx.updateMatches(selId, sel, labelId, labels)
}
}
func (idx *linearScanIndex) scanAllSelectors(labelId interface{}, labels map[string]string) {
log.Debugf("Scanning all (%v) selectors against labels %v",
len(idx.selectorsById), labelId)
for selId, sel := range idx.selectorsById {
idx.updateMatches(selId, sel, labelId, labels)
}
}
func (idx *linearScanIndex) updateMatches(selId interface{}, sel selector.Selector,
labelId interface{}, labels map[string]string) {
nowMatches := sel.Evaluate(labels)
if nowMatches {
idx.storeMatch(selId, labelId)
} else {
idx.deleteMatch(selId, labelId)
}
}
func (idx *linearScanIndex) storeMatch(selId, labelId interface{}) {
previouslyMatched := idx.labelIdsBySelId[selId][labelId]
if !previouslyMatched {
log.Debugf("Selector %v now matches labels %v", selId, labelId)
labelIds, ok := idx.labelIdsBySelId[selId]
if !ok {
labelIds = make(map[interface{}]bool)
idx.labelIdsBySelId[selId] = labelIds
}
labelIds[labelId] = true
selIDs, ok := idx.selIdsByLabelId[labelId]
if !ok {
selIDs = make(map[interface{}]bool)
idx.selIdsByLabelId[labelId] = selIDs
}
selIDs[selId] = true
idx.OnMatchStarted(selId, labelId)
}
}
func (idx *linearScanIndex) deleteMatch(selId, labelId interface{}) {
previouslyMatched := idx.labelIdsBySelId[selId][labelId]
if previouslyMatched {
log.Debugf("Selector %v no longer matches labels %v",
selId, labelId)
delete(idx.labelIdsBySelId[selId], labelId)
if len(idx.labelIdsBySelId[selId]) == 0 {
delete(idx.labelIdsBySelId, selId)
}
delete(idx.selIdsByLabelId[labelId], selId)
if len(idx.selIdsByLabelId[labelId]) == 0 {
delete(idx.selIdsByLabelId, labelId)
}
idx.OnMatchStopped(selId, labelId)
}
}
| 1 | 15,217 | Does an empty struct really take less storage than a bool? I guess it must be, or else you wouldn't have changed this, but I'm curious about the detail. | projectcalico-felix | go |
@@ -259,6 +259,12 @@ class EditableTextWithoutAutoSelectDetection(EditableText):
This should be used when an object does not notify of selection changes.
"""
+ def waitForAndSpeakSelectionChange(self, oldTextInfo):
+ api.processPendingEvents(processEventQueue=False)
+ newInfo=self.makeTextInfo(textInfos.POSITION_SELECTION)
+ speech.speakSelectionChange(oldTextInfo,newInfo)
+ braille.handler.handleCaretMove(self)
+
def script_caret_changeSelection(self,gesture):
try:
oldInfo=self.makeTextInfo(textInfos.POSITION_SELECTION) | 1 | #editableText.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2006-2012 NV Access Limited
"""Common support for editable text.
@note: If you want editable text functionality for an NVDAObject,
you should use the EditableText classes in L{NVDAObjects.behaviors}.
"""
import time
import sayAllHandler
import api
import review
from baseObject import ScriptableObject
import braille
import speech
import config
import eventHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import textInfos
import controlTypes
class EditableText(ScriptableObject):
"""Provides scripts to report appropriately when moving the caret in editable text fields.
This does not handle the selection change keys.
To have selection changes reported, the object must notify of selection changes.
If the object supports selection but does not notify of selection changes, L{EditableTextWithoutAutoSelectDetection} should be used instead.
If the object notifies of selection changes, the following should be done:
* When the object gains focus, L{initAutoSelectDetection} must be called.
* When the object notifies of a possible selection change, L{detectPossibleSelectionChange} must be called.
* Optionally, if the object notifies of changes to its content, L{hasContentChangedSinceLastSelection} should be set to C{True}.
@ivar hasContentChangedSinceLastSelection: Whether the content has changed since the last selection occurred.
@type hasContentChangedSinceLastSelection: bool
"""
#: Whether to fire caretMovementFailed events when the caret doesn't move in response to a caret movement key.
shouldFireCaretMovementFailedEvents = False
#: Whether or not to announce text found before the caret on a new line (e.g. auto numbering)
announceNewLineText=True
def _hasCaretMoved(self, bookmark, retryInterval=0.01, timeout=0.03):
"""
Waits for the caret to move, for a timeout to elapse, or for a new focus event or script to be queued.
@param bookmark: a bookmark representing the position of the caret before it was instructed to move
@type bookmark: bookmark
@param retryInterval: the interval of time in seconds this method should wait before checking the caret each time.
@type retryInterval: float
@param timeout: the over all amount of time in seconds the method should wait before giving up completely.
@type timeout: float
@return: a tuple containing a boolean denoting whether this method timed out, and a TextInfo representing the old or updated caret position or None if interupted by a script or focus event.
@rtype: tuple
"""
elapsed = 0
newInfo=None
while elapsed < timeout:
if isScriptWaiting():
return (False,None)
api.processPendingEvents(processEventQueue=False)
if eventHandler.isPendingEvents("gainFocus"):
return (True,None)
#The caret may stop working as the focus jumps, we want to stay in the while loop though
try:
newInfo = self.makeTextInfo(textInfos.POSITION_CARET)
newBookmark = newInfo.bookmark
except (RuntimeError,NotImplementedError):
newInfo=None
else:
if newBookmark!=bookmark:
return (True,newInfo)
time.sleep(retryInterval)
elapsed += retryInterval
return (False,newInfo)
def _caretScriptPostMovedHelper(self, speakUnit, gesture, info=None):
if isScriptWaiting():
return
if not info:
try:
info = self.makeTextInfo(textInfos.POSITION_CARET)
except:
return
review.handleCaretMove(info)
if speakUnit and not willSayAllResume(gesture):
info.expand(speakUnit)
speech.speakTextInfo(info, unit=speakUnit, reason=controlTypes.REASON_CARET)
braille.handler.handleCaretMove(self)
def _caretMovementScriptHelper(self, gesture, unit):
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
bookmark=info.bookmark
gesture.send()
caretMoved,newInfo=self._hasCaretMoved(bookmark)
if not caretMoved and self.shouldFireCaretMovementFailedEvents:
eventHandler.executeEvent("caretMovementFailed", self, gesture=gesture)
self._caretScriptPostMovedHelper(unit,gesture,newInfo)
def script_caret_newLine(self,gesture):
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
bookmark=info.bookmark
gesture.send()
caretMoved,newInfo=self._hasCaretMoved(bookmark)
if not caretMoved or not newInfo:
return
# newInfo.copy should be good enough here, but in MS Word we get strange results.
try:
lineInfo=self.makeTextInfo(textInfos.POSITION_CARET)
except (RuntimeError,NotImplementedError):
return
lineInfo.expand(textInfos.UNIT_LINE)
lineInfo.setEndPoint(newInfo,"endToStart")
if lineInfo.isCollapsed:
lineInfo.expand(textInfos.UNIT_CHARACTER)
onlyInitial=True
else:
onlyInitial=False
speech.speakTextInfo(lineInfo,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET,onlyInitialFields=onlyInitial,suppressBlanks=True)
def _caretMoveBySentenceHelper(self, gesture, direction):
if isScriptWaiting():
return
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.move(textInfos.UNIT_SENTENCE, direction)
info.updateCaret()
self._caretScriptPostMovedHelper(textInfos.UNIT_SENTENCE,gesture,info)
except:
gesture.send()
return
def script_caret_moveByLine(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_LINE)
script_caret_moveByLine.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def script_caret_moveByCharacter(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_CHARACTER)
def script_caret_moveByWord(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_WORD)
def script_caret_moveByParagraph(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_PARAGRAPH)
script_caret_moveByParagraph.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def script_caret_previousSentence(self,gesture):
self._caretMoveBySentenceHelper(gesture, -1)
script_caret_previousSentence.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def script_caret_nextSentence(self,gesture):
self._caretMoveBySentenceHelper(gesture, 1)
script_caret_nextSentence.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def _backspaceScriptHelper(self,unit,gesture):
try:
oldInfo=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
oldBookmark=oldInfo.bookmark
testInfo=oldInfo.copy()
res=testInfo.move(textInfos.UNIT_CHARACTER,-1)
if res<0:
testInfo.expand(unit)
delChunk=testInfo.text
else:
delChunk=""
gesture.send()
caretMoved,newInfo=self._hasCaretMoved(oldBookmark)
if not caretMoved:
return
if len(delChunk)>1:
speech.speakMessage(delChunk)
else:
speech.speakSpelling(delChunk)
self._caretScriptPostMovedHelper(None,gesture,newInfo)
def script_caret_backspaceCharacter(self,gesture):
self._backspaceScriptHelper(textInfos.UNIT_CHARACTER,gesture)
def script_caret_backspaceWord(self,gesture):
self._backspaceScriptHelper(textInfos.UNIT_WORD,gesture)
def script_caret_delete(self,gesture):
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
bookmark=info.bookmark
gesture.send()
# We'll try waiting for the caret to move, but we don't care if it doesn't.
caretMoved,newInfo=self._hasCaretMoved(bookmark)
self._caretScriptPostMovedHelper(textInfos.UNIT_CHARACTER,gesture,newInfo)
braille.handler.handleCaretMove(self)
__gestures = {
"kb:upArrow": "caret_moveByLine",
"kb:downArrow": "caret_moveByLine",
"kb:leftArrow": "caret_moveByCharacter",
"kb:rightArrow": "caret_moveByCharacter",
"kb:pageUp": "caret_moveByLine",
"kb:pageDown": "caret_moveByLine",
"kb:control+leftArrow": "caret_moveByWord",
"kb:control+rightArrow": "caret_moveByWord",
"kb:control+upArrow": "caret_moveByParagraph",
"kb:control+downArrow": "caret_moveByParagraph",
"kb:alt+upArrow": "caret_previousSentence",
"kb:alt+downArrow": "caret_nextSentence",
"kb:home": "caret_moveByCharacter",
"kb:end": "caret_moveByCharacter",
"kb:control+home": "caret_moveByLine",
"kb:control+end": "caret_moveByLine",
"kb:delete": "caret_delete",
"kb:numpadDelete": "caret_delete",
"kb:backspace": "caret_backspaceCharacter",
"kb:control+backspace": "caret_backspaceWord",
}
def initAutoSelectDetection(self):
"""Initialise automatic detection of selection changes.
This should be called when the object gains focus.
"""
try:
self._lastSelectionPos=self.makeTextInfo(textInfos.POSITION_SELECTION)
except:
self._lastSelectionPos=None
self.hasContentChangedSinceLastSelection=False
def detectPossibleSelectionChange(self):
"""Detects if the selection has been changed, and if so it speaks the change.
"""
try:
newInfo=self.makeTextInfo(textInfos.POSITION_SELECTION)
except:
# Just leave the old selection, which is usually better than nothing.
return
oldInfo=getattr(self,'_lastSelectionPos',None)
self._lastSelectionPos=newInfo.copy()
if not oldInfo:
# There's nothing we can do, but at least the last selection will be right next time.
return
hasContentChanged=getattr(self,'hasContentChangedSinceLastSelection',False)
self.hasContentChangedSinceLastSelection=False
speech.speakSelectionChange(oldInfo,newInfo,generalize=hasContentChanged)
class EditableTextWithoutAutoSelectDetection(EditableText):
"""In addition to L{EditableText}, provides scripts to report appropriately when the selection changes.
This should be used when an object does not notify of selection changes.
"""
def script_caret_changeSelection(self,gesture):
try:
oldInfo=self.makeTextInfo(textInfos.POSITION_SELECTION)
except:
gesture.send()
return
gesture.send()
if isScriptWaiting() or eventHandler.isPendingEvents("gainFocus"):
return
api.processPendingEvents(processEventQueue=False)
try:
newInfo=self.makeTextInfo(textInfos.POSITION_SELECTION)
except:
return
speech.speakSelectionChange(oldInfo,newInfo)
braille.handler.handleCaretMove(self)
__changeSelectionGestures = (
"kb:shift+upArrow",
"kb:shift+downArrow",
"kb:shift+leftArrow",
"kb:shift+rightArrow",
"kb:shift+pageUp",
"kb:shift+pageDown",
"kb:shift+control+leftArrow",
"kb:shift+control+rightArrow",
"kb:shift+control+upArrow",
"kb:shift+control+downArrow",
"kb:shift+home",
"kb:shift+end",
"kb:shift+control+home",
"kb:shift+control+end",
"kb:control+a",
)
def initClass(self):
for gesture in self.__changeSelectionGestures:
self.bindGesture(gesture, "caret_changeSelection")
| 1 | 17,956 | nit: This should perhaps be called `waitForAndReportSelectionChange`, as it affects braille as well, not just speech. | nvaccess-nvda | py |
@@ -127,10 +127,14 @@ class ProxyListenerSNS(ProxyListener):
elif req_action == 'CreateTopic':
topic_arn = aws_stack.sns_topic_arn(req_data['Name'][0])
- self._extract_tags(topic_arn, req_data)
+ tag_error_response = self._extract_tags(topic_arn, req_data, True)
+ # in case if there is an error it returns an error , other wise it will continue as expected.
+ if not tag_error_response:
+ return make_error(code=400, code_string='InvalidParameter',
+ message='Topic already exists with different tags')
elif req_action == 'TagResource':
- self._extract_tags(topic_arn, req_data)
+ self._extract_tags(topic_arn, req_data, False)
return make_response(req_action)
elif req_action == 'UntagResource': | 1 | import ast
import json
import uuid
import logging
import traceback
import six
import requests
import xmltodict
from flask import Response as FlaskResponse
from requests.models import Response, Request
from six.moves.urllib import parse as urlparse
from localstack.config import external_service_url
from localstack.constants import TEST_AWS_ACCOUNT_ID, MOTO_ACCOUNT_ID
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import ProxyListener
from localstack.utils.analytics import event_publisher
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_responses import response_regex_replace
from localstack.utils.aws.dead_letter_queue import sns_error_to_dead_letter_queue
from localstack.utils.common import timestamp_millis, short_uid, to_str
# set up logger
LOG = logging.getLogger(__name__)
# mappings for SNS topic subscriptions
SNS_SUBSCRIPTIONS = {}
# mappings for subscription status
SUBSCRIPTION_STATUS = {}
# mappings for SNS tags
SNS_TAGS = {}
class ProxyListenerSNS(ProxyListener):
def forward_request(self, method, path, data, headers):
if method == 'OPTIONS':
return 200
# check region
try:
aws_stack.check_valid_region(headers)
aws_stack.set_default_region_in_headers(headers)
except Exception as e:
return make_error(message=str(e), code=400)
if method == 'POST' and path == '/':
# parse payload and extract fields
req_data = urlparse.parse_qs(to_str(data), keep_blank_values=True)
req_action = req_data['Action'][0]
topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn') or req_data.get('ResourceArn')
if topic_arn:
topic_arn = topic_arn[0]
topic_arn = aws_stack.fix_account_id_in_arns(topic_arn)
if req_action == 'SetSubscriptionAttributes':
sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])
if not sub:
return make_error(message='Unable to find subscription for given ARN', code=400)
attr_name = req_data['AttributeName'][0]
attr_value = req_data['AttributeValue'][0]
sub[attr_name] = attr_value
return make_response(req_action)
elif req_action == 'GetSubscriptionAttributes':
sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])
if not sub:
return make_error(message='Unable to find subscription for given ARN', code=400)
content = '<Attributes>'
for key, value in sub.items():
content += '<entry><key>%s</key><value>%s</value></entry>\n' % (key, value)
content += '</Attributes>'
return make_response(req_action, content=content)
elif req_action == 'Subscribe':
if 'Endpoint' not in req_data:
return make_error(message='Endpoint not specified in subscription', code=400)
elif req_action == 'ConfirmSubscription':
if 'TopicArn' not in req_data:
return make_error(message='TopicArn not specified in confirm subscription request', code=400)
if 'Token' not in req_data:
return make_error(message='Token not specified in confirm subscription request', code=400)
do_confirm_subscription(req_data.get('TopicArn')[0], req_data.get('Token')[0])
elif req_action == 'Unsubscribe':
if 'SubscriptionArn' not in req_data:
return make_error(message='SubscriptionArn not specified in unsubscribe request', code=400)
do_unsubscribe(req_data.get('SubscriptionArn')[0])
elif req_action == 'DeleteTopic':
do_delete_topic(topic_arn)
elif req_action == 'Publish':
if req_data.get('Subject') == ['']:
return make_error(code=400, code_string='InvalidParameter', message='Subject')
# No need to create a topic to send SMS or single push notifications with SNS
# but we can't mock a sending so we only return that it went well
if 'PhoneNumber' not in req_data and 'TargetArn' not in req_data:
if topic_arn not in SNS_SUBSCRIPTIONS.keys():
return make_error(code=404, code_string='NotFound', message='Topic does not exist')
publish_message(topic_arn, req_data)
# return response here because we do not want the request to be forwarded to SNS backend
return make_response(req_action)
elif req_action == 'ListTagsForResource':
tags = do_list_tags_for_resource(topic_arn)
content = '<Tags/>'
if len(tags) > 0:
content = '<Tags>'
for tag in tags:
content += '<member>'
content += '<Key>%s</Key>' % tag['Key']
content += '<Value>%s</Value>' % tag['Value']
content += '</member>'
content += '</Tags>'
return make_response(req_action, content=content)
elif req_action == 'CreateTopic':
topic_arn = aws_stack.sns_topic_arn(req_data['Name'][0])
self._extract_tags(topic_arn, req_data)
elif req_action == 'TagResource':
self._extract_tags(topic_arn, req_data)
return make_response(req_action)
elif req_action == 'UntagResource':
tags_to_remove = []
req_tags = {k: v for k, v in req_data.items() if k.startswith('TagKeys.member.')}
req_tags = req_tags.values()
for tag in req_tags:
tags_to_remove.append(tag[0])
do_untag_resource(topic_arn, tags_to_remove)
return make_response(req_action)
data = self._reset_account_id(data)
return Request(data=data, headers=headers, method=method)
return True
@staticmethod
def _extract_tags(topic_arn, req_data):
tags = []
req_tags = {k: v for k, v in req_data.items() if k.startswith('Tags.member.')}
for i in range(int(len(req_tags.keys()) / 2)):
key = req_tags['Tags.member.' + str(i + 1) + '.Key'][0]
value = req_tags['Tags.member.' + str(i + 1) + '.Value'][0]
tags.append({'Key': key, 'Value': value})
do_tag_resource(topic_arn, tags)
@staticmethod
def _reset_account_id(data):
""" Fix account ID in request payload. All external-facing responses contain our
predefined account ID (defaults to 000000000000), whereas the backend endpoint
from moto expects a different hardcoded account ID (123456789012). """
return aws_stack.fix_account_id_in_arns(
data, colon_delimiter='%3A', existing=TEST_AWS_ACCOUNT_ID, replace=MOTO_ACCOUNT_ID)
def return_response(self, method, path, data, headers, response):
if method == 'POST' and path == '/':
# convert account IDs in ARNs
data = aws_stack.fix_account_id_in_arns(data, colon_delimiter='%3A')
aws_stack.fix_account_id_in_arns(response)
# remove "None" strings from result
search = r'<entry><key>[^<]+</key>\s*<value>\s*None\s*</[^>]+>\s*</entry>'
response_regex_replace(response, search, '')
# parse request and extract data
req_data = urlparse.parse_qs(to_str(data))
req_action = req_data['Action'][0]
if req_action == 'Subscribe' and response.status_code < 400:
response_data = xmltodict.parse(response.content)
topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]
filter_policy = (req_data.get('FilterPolicy') or [None])[0]
attributes = get_subscribe_attributes(req_data)
sub_arn = response_data['SubscribeResponse']['SubscribeResult']['SubscriptionArn']
do_subscribe(
topic_arn,
req_data['Endpoint'][0],
req_data['Protocol'][0],
sub_arn,
attributes,
filter_policy
)
if req_action == 'CreateTopic' and response.status_code < 400:
response_data = xmltodict.parse(response.content)
topic_arn = response_data['CreateTopicResponse']['CreateTopicResult']['TopicArn']
do_create_topic(topic_arn)
# publish event
event_publisher.fire_event(
event_publisher.EVENT_SNS_CREATE_TOPIC,
payload={'t': event_publisher.get_hash(topic_arn)}
)
if req_action == 'DeleteTopic' and response.status_code < 400:
# publish event
topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]
event_publisher.fire_event(
event_publisher.EVENT_SNS_DELETE_TOPIC,
payload={'t': event_publisher.get_hash(topic_arn)}
)
# instantiate listener
UPDATE_SNS = ProxyListenerSNS()
def unsubscribe_sqs_queue(queue_url):
""" Called upon deletion of an SQS queue, to remove the queue from subscriptions """
for topic_arn, subscriptions in SNS_SUBSCRIPTIONS.items():
subscriptions = SNS_SUBSCRIPTIONS.get(topic_arn, [])
for subscriber in list(subscriptions):
sub_url = subscriber.get('sqs_queue_url') or subscriber['Endpoint']
if queue_url == sub_url:
subscriptions.remove(subscriber)
def publish_message(topic_arn, req_data, subscription_arn=None):
message = req_data['Message'][0]
sqs_client = aws_stack.connect_to_service('sqs')
LOG.debug('Publishing message to TopicArn: %s | Message: %s' % (topic_arn, message))
subscriptions = SNS_SUBSCRIPTIONS.get(topic_arn, [])
for subscriber in list(subscriptions):
if subscription_arn not in [None, subscriber['SubscriptionArn']]:
continue
filter_policy = json.loads(subscriber.get('FilterPolicy') or '{}')
message_attributes = get_message_attributes(req_data)
if not check_filter_policy(filter_policy, message_attributes):
continue
if subscriber['Protocol'] == 'sqs':
queue_url = None
try:
endpoint = subscriber['Endpoint']
if 'sqs_queue_url' in subscriber:
queue_url = subscriber.get('sqs_queue_url')
elif '://' in endpoint:
queue_url = endpoint
else:
queue_name = endpoint.split(':')[5]
queue_url = aws_stack.get_sqs_queue_url(queue_name)
subscriber['sqs_queue_url'] = queue_url
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=create_sns_message_body(subscriber, req_data),
MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes)
)
except Exception as exc:
sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc))
if 'NonExistentQueue' in str(exc):
LOG.info('Removing non-existent queue "%s" subscribed to topic "%s"' % (queue_url, topic_arn))
subscriptions.remove(subscriber)
elif subscriber['Protocol'] == 'lambda':
try:
external_url = external_service_url('sns')
unsubscribe_url = '%s/?Action=Unsubscribe&SubscriptionArn=%s' % (external_url,
subscriber['SubscriptionArn'])
response = lambda_api.process_sns_notification(
subscriber['Endpoint'],
topic_arn,
subscriber['SubscriptionArn'],
message,
message_attributes,
unsubscribe_url,
subject=req_data.get('Subject', [None])[0]
)
if isinstance(response, FlaskResponse):
response.raise_for_status()
except Exception as exc:
LOG.warning('Unable to run Lambda function on SNS message: %s %s' % (exc, traceback.format_exc()))
sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc))
elif subscriber['Protocol'] in ['http', 'https']:
msg_type = (req_data.get('Type') or ['Notification'])[0]
try:
message_body = create_sns_message_body(subscriber, req_data)
except Exception:
continue
try:
response = requests.post(
subscriber['Endpoint'],
headers={
'Content-Type': 'text/plain',
# AWS headers according to
# https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html#http-header
'x-amz-sns-message-type': msg_type,
'x-amz-sns-topic-arn': subscriber['TopicArn'],
'x-amz-sns-subscription-arn': subscriber['SubscriptionArn'],
'User-Agent': 'Amazon Simple Notification Service Agent'
},
data=message_body,
verify=False
)
response.raise_for_status()
except Exception as exc:
sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc))
else:
LOG.warning('Unexpected protocol "%s" for SNS subscription' % subscriber['Protocol'])
def do_create_topic(topic_arn):
if topic_arn not in SNS_SUBSCRIPTIONS:
SNS_SUBSCRIPTIONS[topic_arn] = []
def do_delete_topic(topic_arn):
SNS_SUBSCRIPTIONS.pop(topic_arn, None)
def do_confirm_subscription(topic_arn, token):
for k, v in SUBSCRIPTION_STATUS.items():
if v['Token'] == token and v['TopicArn'] == topic_arn:
v['Status'] = 'Subscribed'
def do_subscribe(topic_arn, endpoint, protocol, subscription_arn, attributes, filter_policy=None):
# An endpoint may only be subscribed to a topic once. Subsequent
# subscribe calls do nothing (subscribe is idempotent).
for existing_topic_subscription in SNS_SUBSCRIPTIONS.get(topic_arn, []):
if existing_topic_subscription.get('Endpoint') == endpoint:
return
subscription = {
# http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html
'TopicArn': topic_arn,
'Endpoint': endpoint,
'Protocol': protocol,
'SubscriptionArn': subscription_arn,
'FilterPolicy': filter_policy
}
subscription.update(attributes)
SNS_SUBSCRIPTIONS[topic_arn].append(subscription)
if subscription_arn not in SUBSCRIPTION_STATUS.keys():
SUBSCRIPTION_STATUS[subscription_arn] = {}
SUBSCRIPTION_STATUS[subscription_arn].update(
{
'TopicArn': topic_arn,
'Token': short_uid(),
'Status': 'Not Subscribed'
}
)
# Send out confirmation message for HTTP(S), fix for https://github.com/localstack/localstack/issues/881
if protocol in ['http', 'https']:
token = short_uid()
external_url = external_service_url('sns')
confirmation = {
'Type': ['SubscriptionConfirmation'],
'Token': [token],
'Message': [('You have chosen to subscribe to the topic %s.\n' % topic_arn) +
'To confirm the subscription, visit the SubscribeURL included in this message.'],
'SubscribeURL': ['%s/?Action=ConfirmSubscription&TopicArn=%s&Token=%s' % (external_url, topic_arn, token)]
}
publish_message(topic_arn, confirmation, subscription_arn)
def do_unsubscribe(subscription_arn):
for topic_arn in SNS_SUBSCRIPTIONS:
SNS_SUBSCRIPTIONS[topic_arn] = [
sub for sub in SNS_SUBSCRIPTIONS[topic_arn]
if sub['SubscriptionArn'] != subscription_arn
]
def _get_tags(topic_arn):
if topic_arn not in SNS_TAGS:
SNS_TAGS[topic_arn] = []
return SNS_TAGS[topic_arn]
def do_list_tags_for_resource(topic_arn):
return _get_tags(topic_arn)
def do_tag_resource(topic_arn, tags):
existing_tags = SNS_TAGS.get(topic_arn, [])
tags = [
tag for idx, tag in enumerate(tags)
if tag not in tags[:idx]
]
def existing_tag_index(item):
for idx, tag in enumerate(existing_tags):
if item['Key'] == tag['Key']:
return idx
return None
for item in tags:
existing_index = existing_tag_index(item)
if existing_index is None:
existing_tags.append(item)
else:
existing_tags[existing_index] = item
SNS_TAGS[topic_arn] = existing_tags
def do_untag_resource(topic_arn, tag_keys):
SNS_TAGS[topic_arn] = [t for t in _get_tags(topic_arn) if t['Key'] not in tag_keys]
# ---------------
# HELPER METHODS
# ---------------
def get_topic_by_arn(topic_arn):
return SNS_SUBSCRIPTIONS.get(topic_arn)
def get_subscription_by_arn(sub_arn):
# TODO maintain separate map instead of traversing all items
for key, subscriptions in SNS_SUBSCRIPTIONS.items():
for sub in subscriptions:
if sub['SubscriptionArn'] == sub_arn:
return sub
def make_response(op_name, content=''):
response = Response()
if not content:
content = '<MessageId>%s</MessageId>' % short_uid()
response._content = """<{op_name}Response xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<{op_name}Result>
{content}
</{op_name}Result>
<ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>
</{op_name}Response>""".format(op_name=op_name, content=content, req_id=short_uid())
response.status_code = 200
return response
# TODO move to utils!
def make_error(message, code=400, code_string='InvalidParameter'):
response = Response()
response._content = """<ErrorResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/"><Error>
<Type>Sender</Type>
<Code>{code_string}</Code>
<Message>{message}</Message>
</Error><RequestId>{req_id}</RequestId>
</ErrorResponse>""".format(message=message, code_string=code_string, req_id=short_uid())
response.status_code = code
return response
def create_sns_message_body(subscriber, req_data):
message = req_data['Message'][0]
subject = req_data.get('Subject', [None])[0]
protocol = subscriber['Protocol']
if six.PY2 and type(message).__name__ == 'unicode':
# fix non-ascii unicode characters under Python 2
message = message.encode('raw-unicode-escape')
if is_raw_message_delivery(subscriber):
return message
if req_data.get('MessageStructure') == ['json']:
message = json.loads(message)
try:
message = message.get(protocol, message['default'])
except KeyError:
raise Exception("Unable to find 'default' key in message payload")
data = {
'Type': req_data.get('Type', ['Notification'])[0],
'MessageId': str(uuid.uuid4()),
'Token': req_data.get('Token', [None])[0],
'TopicArn': subscriber['TopicArn'],
'Message': message,
'SubscribeURL': req_data.get('SubscribeURL', [None])[0],
'Timestamp': timestamp_millis(),
'SignatureVersion': '1',
# TODO Add a more sophisticated solution with an actual signature
# Hardcoded
'Signature': 'EXAMPLEpH+..',
'SigningCertURL': 'https://sns.us-east-1.amazonaws.com/SimpleNotificationService-0000000000000000000000.pem'
}
if subject is not None:
data['Subject'] = subject
attributes = get_message_attributes(req_data)
if attributes:
data['MessageAttributes'] = attributes
return json.dumps(data)
def create_sqs_message_attributes(subscriber, attributes):
if not is_raw_message_delivery(subscriber):
return {}
message_attributes = {}
for key, value in attributes.items():
attribute = {
'DataType': value['Type']
}
if value['Type'] == 'Binary':
attribute['BinaryValue'] = value['Value']
else:
attribute['StringValue'] = str(value['Value'])
message_attributes[key] = attribute
return message_attributes
def get_message_attributes(req_data):
attributes = {}
x = 1
while True:
name = req_data.get('MessageAttributes.entry.' + str(x) + '.Name', [None])[0]
if name is not None:
attribute = {
'Type': req_data.get('MessageAttributes.entry.' + str(x) + '.Value.DataType', [None])[0]
}
string_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.StringValue', [None])[0]
binary_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.BinaryValue', [None])[0]
if string_value is not None:
attribute['Value'] = string_value
elif binary_value is not None:
attribute['Value'] = binary_value
attributes[name] = attribute
x += 1
else:
break
return attributes
def get_subscribe_attributes(req_data):
attributes = {}
for key in req_data.keys():
if '.key' in key:
attributes[req_data[key][0]] = req_data[key.replace('key', 'value')][0]
return attributes
def is_number(x):
try:
float(x)
return True
except ValueError:
return False
def evaluate_numeric_condition(conditions, value):
if not is_number(value):
return False
for i in range(0, len(conditions), 2):
value = float(value)
operator = conditions[i]
operand = float(conditions[i + 1])
if operator == '=':
if value != operand:
return False
elif operator == '>':
if value <= operand:
return False
elif operator == '<':
if value >= operand:
return False
elif operator == '>=':
if value < operand:
return False
elif operator == '<=':
if value > operand:
return False
return True
def evaluate_exists_condition(conditions, message_attributes, criteria):
# filtering should not match any messages if the exists is set to false,As per aws docs
# https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html
if conditions:
return bool(message_attributes.get(criteria))
return False
def evaluate_condition(value, condition, message_attributes, criteria):
if type(condition) is not dict:
return value == condition
elif condition.get('anything-but'):
return value not in condition.get('anything-but')
elif condition.get('prefix'):
prefix = condition.get('prefix')
return value.startswith(prefix)
elif condition.get('numeric'):
return evaluate_numeric_condition(condition.get('numeric'), value)
elif condition.get('exists'):
return evaluate_exists_condition(condition.get('exists'), message_attributes, criteria)
return False
def evaluate_filter_policy_conditions(conditions, attribute, message_attributes, criteria):
if type(conditions) is not list:
conditions = [conditions]
if attribute['Type'] == 'String.Array':
values = ast.literal_eval(attribute['Value'])
for value in values:
for condition in conditions:
if evaluate_condition(value, condition, message_attributes, criteria):
return True
else:
for condition in conditions:
if evaluate_condition(attribute['Value'], condition, message_attributes, criteria):
return True
return False
def check_filter_policy(filter_policy, message_attributes):
if not filter_policy:
return True
for criteria in filter_policy:
conditions = filter_policy.get(criteria)
attribute = message_attributes.get(criteria)
if attribute is None:
return False
if evaluate_filter_policy_conditions(conditions, attribute, message_attributes, criteria) is False:
return False
return True
def is_raw_message_delivery(susbcriber):
return susbcriber.get('RawMessageDelivery') in ('true', True, 'True')
| 1 | 11,061 | nit: I suggest that we rename `tag_error_response` to `tag_resource_success`, as later in line 132 we're returning an error if this value is *not* truthy. | localstack-localstack | py |
@@ -0,0 +1,15 @@
+<?php
+
+declare(strict_types=1);
+
+namespace Shopsys\ShopBundle\Controller\Styleguide;
+
+use Shopsys\ShopBundle\Controller\Front\FrontBaseController;
+
+class StyleguideController extends FrontBaseController
+{
+ public function styleguideAction()
+ {
+ return $this->render('@ShopsysShop/Styleguide/styleguide.html.twig');
+ }
+} | 1 | 1 | 19,677 | I am missing return typehint | shopsys-shopsys | php |
|
@@ -0,0 +1,8 @@
+_base_ = [
+ 'retinanet_pvt-t_fpn_1x_coco.py',
+]
+model = dict(
+ backbone=dict(
+ num_layers=[3, 4, 6, 3],
+ init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
+ 'releases/download/v2/pvt_small.pth'))) | 1 | 1 | 25,037 | _base_ = 'retinanet_pvt-t_fpn_1x_coco.py' | open-mmlab-mmdetection | py |
|
@@ -394,11 +394,12 @@ public class OAuthWebviewHelper implements KeyChainAliasCallback {
private class SwapJWTForAccessTokenTask extends BaseFinishAuthFlowTask<LoginOptions> {
@Override
- protected TokenEndpointResponse performRequest(LoginOptions options) {
+ protected TokenEndpointResponse performRequest(LoginOptions options) throws Exception {
try {
return OAuth2.swapJWTForTokens(HttpAccess.DEFAULT, new URI(options.loginUrl), options.jwt);
} catch (Exception e) {
- Log.w("OAuth.SwapJWT", e);
+ onAuthFlowError("jwt_oauth_error", e.getLocalizedMessage());
+ callback.finish();
}
return null;
} | 1 | /*
* Copyright (c) 2011-2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.ui;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivateKey;
import java.security.cert.X509Certificate;
import java.util.Locale;
import java.util.Map;
import android.annotation.TargetApi;
import android.app.Activity;
import android.content.Context;
import android.net.Uri;
import android.net.http.SslError;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Bundle;
import android.security.KeyChain;
import android.security.KeyChainAliasCallback;
import android.security.KeyChainException;
import android.text.TextUtils;
import android.util.Log;
import android.webkit.ClientCertRequest;
import android.webkit.SslErrorHandler;
import android.webkit.WebChromeClient;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import android.widget.Toast;
import com.salesforce.androidsdk.R;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.auth.OAuth2;
import com.salesforce.androidsdk.auth.OAuth2.IdServiceResponse;
import com.salesforce.androidsdk.auth.OAuth2.TokenEndpointResponse;
import com.salesforce.androidsdk.config.BootConfig;
import com.salesforce.androidsdk.config.RuntimeConfig;
import com.salesforce.androidsdk.push.PushMessaging;
import com.salesforce.androidsdk.rest.ClientManager;
import com.salesforce.androidsdk.rest.ClientManager.LoginOptions;
import com.salesforce.androidsdk.security.PasscodeManager;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import com.salesforce.androidsdk.util.UriFragmentParser;
/**
* Helper class to manage a WebView instance that is going through the OAuth login process.
* Basic flow is
* a) load and show the login page to the user
* b) user logins in and authorizes app
* c) we see the navigation to the auth complete Url, and grab the tokens
* d) we call the Id service to obtain additional info about the user
* e) we create a local account, and return an authentication result bundle.
* f) done!
*
*/
public class OAuthWebviewHelper implements KeyChainAliasCallback {
// Set a custom permission on your connected application with that name if you want
// the application to be restricted to managed devices
public static final String MUST_BE_MANAGED_APP_PERM = "must_be_managed_app";
private static final String ACCOUNT_OPTIONS = "accountOptions";
/**
* the host activity/fragment should pass in an implementation of this
* interface so that it can notify it of things it needs to do as part of
* the oauth process.
*/
public interface OAuthWebviewHelperEvents {
/** we're starting to load this login page into the webview */
void loadingLoginPage(String loginUrl);
/**
* progress update of loading the webview, totalProgress will go from
* 0..10000 (you can pass this directly to the activity progressbar)
*/
void onLoadingProgress(int totalProgress);
/** We're doing something that takes some unknown amount of time */
void onIndeterminateProgress(boolean show);
/** We've completed the auth process and here's the resulting Authentication Result bundle to return to the Authenticator */
void onAccountAuthenticatorResult(Bundle authResult);
/** we're in some end state and requesting that the host activity be finished/closed. */
void finish();
}
/**
* Construct a new OAuthWebviewHelper and perform the initial configuration of the Webview.
*/
@Deprecated
public OAuthWebviewHelper(OAuthWebviewHelperEvents callback,
LoginOptions options, WebView webview, Bundle savedInstanceState) {
this(new LoginActivity(), callback, options, webview, savedInstanceState);
}
/**
* Construct a new OAuthWebviewHelper and perform the initial configuration of the Webview.
*/
public OAuthWebviewHelper(Activity activity, OAuthWebviewHelperEvents callback,
LoginOptions options, WebView webview, Bundle savedInstanceState) {
assert options != null && callback != null && webview != null && activity != null;
this.activity = activity;
this.callback = callback;
this.loginOptions = options;
this.webview = webview;
webview.getSettings().setJavaScriptEnabled(true);
webview.setWebViewClient(makeWebViewClient());
webview.setWebChromeClient(makeWebChromeClient());
// Restore webview's state if available.
// This ensures the user is not forced to type in credentials again
// once the auth process has been kicked off.
if (savedInstanceState != null) {
webview.restoreState(savedInstanceState);
accountOptions = AccountOptions.fromBundle(savedInstanceState.getBundle(ACCOUNT_OPTIONS));
} else {
clearCookies();
}
}
private final OAuthWebviewHelperEvents callback;
protected final LoginOptions loginOptions;
private final WebView webview;
private AccountOptions accountOptions;
private Activity activity;
private PrivateKey key;
private X509Certificate[] certChain;
public void saveState(Bundle outState) {
webview.saveState(outState);
if (accountOptions != null) {
// we have completed the auth flow but not created the account, because we need to create a pin
outState.putBundle(ACCOUNT_OPTIONS, accountOptions.asBundle());
}
}
public WebView getWebView() {
return webview;
}
public void clearCookies() {
SalesforceSDKManager.getInstance().removeAllCookies();
}
public void clearView() {
webview.loadUrl("about:blank");
}
/**
* Method called by login activity when it resumes after the passcode activity
*
* When the server has a mobile policy requiring a passcode, we start the passcode activity after completing the
* auth flow (see onAuthFlowComplete).
* When the passcode activity completes, the login activity's onActivityResult gets invoked, and it calls this method
* to finalize the account creation.
*/
public void onNewPasscode() {
/*
* Re-encryption of existing accounts with the new passcode is taken
* care of in the 'Confirm Passcode' step in PasscodeActivity.
*/
if (accountOptions != null) {
loginOptions.passcodeHash = SalesforceSDKManager.getInstance().getPasscodeHash();
addAccount();
callback.finish();
}
}
/** Factory method for the WebViewClient, you can replace this with something else if you need to */
protected WebViewClient makeWebViewClient() {
return new AuthWebViewClient();
}
/** Factory method for the WebChromeClient, you can replace this with something else if you need to */
protected WebChromeClient makeWebChromeClient() {
return new AuthWebChromeClient();
}
protected Context getContext() {
return webview.getContext();
}
/**
* Called when the user facing part of the auth flow completed with an error.
* We show the user an error and end the activity.
*/
protected void onAuthFlowError(String error, String errorDesc) {
Log.w("LoginActivity:onAuthFlowError", error + ":" + errorDesc);
// look for deny. kick them back to login, so clear cookies and repoint browser
if ("access_denied".equals(error)
&& "end-user denied authorization".equals(errorDesc)) {
webview.post(new Runnable() {
@Override
public void run() {
clearCookies();
loadLoginPage();
}
});
} else {
Toast t = Toast.makeText(webview.getContext(), error + " : " + errorDesc,
Toast.LENGTH_LONG);
webview.postDelayed(new Runnable() {
@Override
public void run() {
callback.finish();
}
}, t.getDuration());
t.show();
}
}
protected void showError(Exception exception) {
Toast.makeText(getContext(),
getContext().getString(SalesforceSDKManager.getInstance().getSalesforceR().stringGenericError(), exception.toString()),
Toast.LENGTH_LONG).show();
}
/**
* Tells the webview to load the authorization page.
* We also update the window title, so its easier to
* see which system you're logging in to
*/
public void loadLoginPage() {
if (TextUtils.isEmpty(loginOptions.jwt)) {
loginOptions.loginUrl = getLoginUrl();
doLoadPage(false);
} else {
new SwapJWTForAccessTokenTask().execute(loginOptions);
}
}
private void doLoadPage(boolean jwtFlow) {
try {
URI uri = getAuthorizationUrl(jwtFlow);
callback.loadingLoginPage(loginOptions.loginUrl);
webview.loadUrl(uri.toString());
} catch (URISyntaxException ex) {
showError(ex);
}
}
protected String getOAuthClientId() {
return loginOptions.oauthClientId;
}
protected URI getAuthorizationUrl(Boolean jwtFlow) throws URISyntaxException {
if (jwtFlow) {
return OAuth2.getAuthorizationUrl(
new URI(loginOptions.loginUrl),
getOAuthClientId(),
loginOptions.oauthCallbackUrl,
loginOptions.oauthScopes,
null,
getAuthorizationDisplayType(), loginOptions.jwt, loginOptions.loginUrl);
}
return OAuth2.getAuthorizationUrl(
new URI(loginOptions.loginUrl),
getOAuthClientId(),
loginOptions.oauthCallbackUrl,
loginOptions.oauthScopes,
null,
getAuthorizationDisplayType());
}
protected URI getAuthorizationUrl() throws URISyntaxException {
return getAuthorizationUrl(false);
}
/**
* Override this to replace the default login webview's display param with
* your custom display param. You can override this by either subclassing this class,
* or adding "<string name="sf__oauth_display_type">desiredDisplayParam</string>"
* to your app's resource so that it overrides the default value in the SDK library.
*
* @return the OAuth login display type, e.g. 'mobile', 'touch',
* see the OAuth docs for the complete list of valid values.
*/
protected String getAuthorizationDisplayType() {
return this.getContext().getString(R.string.oauth_display_type);
}
/**
* Override this method to customize the login url.
* @return login url
*/
protected String getLoginUrl() {
return SalesforceSDKManager.getInstance().getLoginServerManager().getSelectedLoginServer().url.trim();
}
/**
* WebViewClient which intercepts the redirect to the oauth callback url.
* That redirect marks the end of the user facing portion of the authentication flow.
*
*/
protected class AuthWebViewClient extends WebViewClient {
@Override
public void onPageFinished(WebView view, String url) {
EventsObservable.get().notifyEvent(EventType.AuthWebViewPageFinished, url);
super.onPageFinished(view, url);
}
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
boolean isDone = url.replace("///", "/").toLowerCase(Locale.US).startsWith(loginOptions.oauthCallbackUrl.replace("///", "/").toLowerCase(Locale.US));
if (isDone) {
Uri callbackUri = Uri.parse(url);
Map<String, String> params = UriFragmentParser.parse(callbackUri);
String error = params.get("error");
// Did we fail?
if (error != null) {
String errorDesc = params.get("error_description");
onAuthFlowError(error, errorDesc);
}
// Or succeed?
else {
TokenEndpointResponse tr = new TokenEndpointResponse(params);
onAuthFlowComplete(tr);
}
}
return isDone;
}
@Override
public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) {
int primError = error.getPrimaryError();
// Figuring out string resource id
SalesforceR r = SalesforceSDKManager.getInstance().getSalesforceR();
int primErrorStringId = r.stringSSLUnknownError();
switch (primError) {
case SslError.SSL_EXPIRED: primErrorStringId = r.stringSSLExpired(); break;
case SslError.SSL_IDMISMATCH: primErrorStringId = r.stringSSLIdMismatch(); break;
case SslError.SSL_NOTYETVALID: primErrorStringId = r.stringSSLNotYetValid(); break;
case SslError.SSL_UNTRUSTED: primErrorStringId = r.stringSSLUntrusted(); break;
}
// Building text message to show
String text = getContext().getString(r.stringSSLError(), getContext().getString(primErrorStringId));
// Bringing up toast
Toast.makeText(getContext(), text, Toast.LENGTH_LONG).show();
handler.cancel();
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
@Override
public void onReceivedClientCertRequest(WebView view, ClientCertRequest request) {
request.proceed(key, certChain);
}
}
/**
* Called when the user facing part of the auth flow completed successfully.
* The last step is to call the identity service to get the username.
*/
protected void onAuthFlowComplete(TokenEndpointResponse tr) {
FinishAuthTask t = new FinishAuthTask();
t.execute(tr);
}
private class SwapJWTForAccessTokenTask extends BaseFinishAuthFlowTask<LoginOptions> {
@Override
protected TokenEndpointResponse performRequest(LoginOptions options) {
try {
return OAuth2.swapJWTForTokens(HttpAccess.DEFAULT, new URI(options.loginUrl), options.jwt);
} catch (Exception e) {
Log.w("OAuth.SwapJWT", e);
}
return null;
}
@Override
protected void onPostExecute(TokenEndpointResponse tr) {
if (tr != null && tr.authToken != null) {
loginOptions.setJwt(tr.authToken);
doLoadPage(true);
}
else {
doLoadPage(false);
}
loginOptions.setJwt(null);
}
}
// base class with common code for the background task that finishes off the auth process
protected abstract class BaseFinishAuthFlowTask<RequestType> extends AsyncTask<RequestType, Boolean, TokenEndpointResponse> {
protected volatile Exception backgroundException;
protected volatile IdServiceResponse id = null;
public BaseFinishAuthFlowTask() {
}
@SafeVarargs
@Override
protected final TokenEndpointResponse doInBackground(RequestType ... params) {
try {
publishProgress(true);
return performRequest(params[0]);
} catch (Exception ex) {
handleException(ex);
}
return null;
}
protected abstract TokenEndpointResponse performRequest(RequestType param) throws Exception;
@Override
protected void onPostExecute(OAuth2.TokenEndpointResponse tr) {
final SalesforceSDKManager mgr = SalesforceSDKManager.getInstance();
//
// Failure cases.
//
if (backgroundException != null) {
Log.w("LoginActiviy.onAuthFlowComplete", backgroundException);
// Error
onAuthFlowError(getContext().getString(mgr.getSalesforceR().stringGenericAuthenticationErrorTitle()),
getContext().getString(mgr.getSalesforceR().stringGenericAuthenticationErrorBody()));
callback.finish();
return;
}
if (id.customPermissions != null) {
final boolean mustBeManagedApp = id.customPermissions.optBoolean(MUST_BE_MANAGED_APP_PERM);
if (mustBeManagedApp && !RuntimeConfig.getRuntimeConfig(getContext()).isManagedApp()) {
onAuthFlowError(getContext().getString(mgr.getSalesforceR().stringGenericAuthenticationErrorTitle()),
getContext().getString(mgr.getSalesforceR().stringManagedAppError()));
callback.finish();
return;
}
}
//
// Putting together all the information needed to create the new account.
//
accountOptions = new AccountOptions(id.username, tr.refreshToken,
tr.authToken, tr.idUrl, tr.instanceUrl, tr.orgId, tr.userId,
tr.communityId, tr.communityUrl, id.firstName, id.lastName,
id.displayName, id.email, id.pictureUrl, id.thumbnailUrl);
// Sets additional admin prefs, if they exist.
final UserAccount account = new UserAccount(accountOptions.authToken,
accountOptions.refreshToken, loginOptions.loginUrl,
accountOptions.identityUrl, accountOptions.instanceUrl,
accountOptions.orgId, accountOptions.userId,
accountOptions.username, buildAccountName(accountOptions.username,
accountOptions.instanceUrl), loginOptions.clientSecret,
accountOptions.communityId, accountOptions.communityUrl,
accountOptions.firstName, accountOptions.lastName, accountOptions.displayName,
accountOptions.email, accountOptions.photoUrl,
accountOptions.thumbnailUrl);
if (id.customAttributes != null) {
mgr.getAdminSettingsManager().setPrefs(id.customAttributes, account);
}
if (id.customPermissions != null) {
mgr.getAdminPermsManager().setPrefs(id.customPermissions, account);
}
// Screen lock required by mobile policy
if (id.screenLockTimeout > 0) {
// Stores the mobile policy for the org.
final PasscodeManager passcodeManager = mgr.getPasscodeManager();
passcodeManager.storeMobilePolicyForOrg(account, id.screenLockTimeout * 1000 * 60, id.pinLength);
passcodeManager.setTimeoutMs(id.screenLockTimeout * 1000 * 60);
passcodeManager.setMinPasscodeLength(id.pinLength);
/*
* Checks if a passcode already exists. If a passcode has NOT
* been created yet, the user is taken through the passcode
* creation flow, at the end of which account data is encrypted
* with a hash of the passcode. Other existing accounts are
* also re-encrypted behind the scenes at this point. If a
* passcode already exists, the existing hash is used and the
* account is added at this point.
*/
if (!passcodeManager.hasStoredPasscode(mgr.getAppContext())) {
// This will bring up the create passcode screen - we will create the account in onResume
mgr.getPasscodeManager().setEnabled(true);
mgr.getPasscodeManager().lockIfNeeded((Activity) getContext(), true);
} else {
loginOptions.passcodeHash = mgr.getPasscodeHash();
addAccount();
callback.finish();
}
}
// No screen lock required or no mobile policy specified
else {
final PasscodeManager passcodeManager = mgr.getPasscodeManager();
passcodeManager.storeMobilePolicyForOrg(account, 0, PasscodeManager.MIN_PASSCODE_LENGTH);
loginOptions.passcodeHash = mgr.getPasscodeHash();
addAccount();
callback.finish();
}
}
protected void handleException(Exception ex) {
if (ex.getMessage() != null)
Log.w("BaseFinishAuthFlowTask", "handleException", ex);
backgroundException = ex;
}
@Override
protected void onProgressUpdate(Boolean... values) {
callback.onIndeterminateProgress(values[0]);
}
}
/**
* This is a background process that will call the identity service to get the info we need from
* the Identity service, and finally wrap up and create account.
*/
private class FinishAuthTask extends BaseFinishAuthFlowTask<TokenEndpointResponse> {
@Override
protected TokenEndpointResponse performRequest(TokenEndpointResponse tr) throws Exception {
try {
id = OAuth2.callIdentityService(
HttpAccess.DEFAULT, tr.idUrlWithInstance, tr.authToken);
} catch(Exception e) {
backgroundException = e;
}
return tr;
}
}
protected void addAccount() {
ClientManager clientManager = new ClientManager(getContext(),
SalesforceSDKManager.getInstance().getAccountType(),
loginOptions, SalesforceSDKManager.getInstance().shouldLogoutWhenTokenRevoked());
// Create account name (shown in Settings -> Accounts & sync)
String accountName = buildAccountName(accountOptions.username,
accountOptions.instanceUrl);
// New account
Bundle extras = clientManager.createNewAccount(accountName,
accountOptions.username,
accountOptions.refreshToken,
accountOptions.authToken,
accountOptions.instanceUrl,
loginOptions.loginUrl,
accountOptions.identityUrl,
getOAuthClientId(),
accountOptions.orgId,
accountOptions.userId,
loginOptions.passcodeHash,
loginOptions.clientSecret,
accountOptions.communityId,
accountOptions.communityUrl,
accountOptions.firstName,
accountOptions.lastName,
accountOptions.displayName,
accountOptions.email,
accountOptions.photoUrl,
accountOptions.thumbnailUrl);
/*
* Registers for push notifications, if push notification client ID is present.
* This step needs to happen after the account has been added by client
* manager, so that the push service has all the account info it needs.
*/
final Context appContext = SalesforceSDKManager.getInstance().getAppContext();
final String pushNotificationId = BootConfig.getBootConfig(appContext).getPushNotificationClientId();
if (!TextUtils.isEmpty(pushNotificationId)) {
final UserAccount account = new UserAccount(accountOptions.authToken,
accountOptions.refreshToken, loginOptions.loginUrl,
accountOptions.identityUrl, accountOptions.instanceUrl,
accountOptions.orgId, accountOptions.userId,
accountOptions.username, accountName,
loginOptions.clientSecret, accountOptions.communityId,
accountOptions.communityUrl, accountOptions.firstName,
accountOptions.lastName, accountOptions.displayName, accountOptions.email,
accountOptions.photoUrl, accountOptions.thumbnailUrl);
PushMessaging.register(appContext, account);
}
callback.onAccountAuthenticatorResult(extras);
}
/**
* @return name to be shown for account in Settings -> Accounts & Sync
*/
protected String buildAccountName(String username, String instanceServer) {
return String.format("%s (%s) (%s)", username, instanceServer,
SalesforceSDKManager.getInstance().getApplicationName());
}
/**
* WebChromeClient used to report back loading progress.
*/
protected class AuthWebChromeClient extends WebChromeClient {
@Override
public void onProgressChanged(WebView view, int newProgress) {
callback.onLoadingProgress(newProgress * 100);
}
}
/**
* Class encapsulating the parameters required to create a new account
*/
public static class AccountOptions {
private static final String USER_ID = "userId";
private static final String ORG_ID = "orgId";
private static final String IDENTITY_URL = "identityUrl";
private static final String INSTANCE_URL = "instanceUrl";
private static final String AUTH_TOKEN = "authToken";
private static final String REFRESH_TOKEN = "refreshToken";
private static final String USERNAME = "username";
private static final String COMMUNITY_ID = "communityId";
private static final String COMMUNITY_URL = "communityUrl";
private static final String FIRST_NAME = "firstName";
private static final String LAST_NAME = "lastName";
private static final String DISPLAY_NAME = "displayName";
private static final String EMAIL = "email";
private static final String PHOTO_URL = "photoUrl";
private static final String THUMBNAIL_URL = "thumbnailUrl";
public final String username;
public final String refreshToken;
public final String authToken;
public final String identityUrl;
public final String instanceUrl;
public final String orgId;
public final String userId;
public final String communityId;
public final String communityUrl;
public final String firstName;
public final String lastName;
public final String displayName;
public final String email;
public final String photoUrl;
public final String thumbnailUrl;
private final Bundle bundle;
public AccountOptions(String username, String refreshToken,
String authToken, String identityUrl, String instanceUrl,
String orgId, String userId, String communityId, String communityUrl,
String firstName, String lastName, String displayName, String email, String photoUrl, String thumbnailUrl) {
super();
this.username = username;
this.refreshToken = refreshToken;
this.authToken = authToken;
this.identityUrl = identityUrl;
this.instanceUrl = instanceUrl;
this.orgId = orgId;
this.userId = userId;
this.communityId = communityId;
this.communityUrl = communityUrl;
this.firstName = firstName;
this.lastName = lastName;
this.displayName = displayName;
this.email = email;
this.photoUrl = photoUrl;
this.thumbnailUrl = thumbnailUrl;
bundle = new Bundle();
bundle.putString(USERNAME, username);
bundle.putString(REFRESH_TOKEN, refreshToken);
bundle.putString(AUTH_TOKEN, authToken);
bundle.putString(IDENTITY_URL, identityUrl);
bundle.putString(INSTANCE_URL, instanceUrl);
bundle.putString(ORG_ID, orgId);
bundle.putString(USER_ID, userId);
bundle.putString(COMMUNITY_ID, communityId);
bundle.putString(COMMUNITY_URL, communityUrl);
bundle.putString(FIRST_NAME, firstName);
bundle.putString(LAST_NAME, lastName);
bundle.putString(DISPLAY_NAME, displayName);
bundle.putString(EMAIL, email);
bundle.putString(PHOTO_URL, photoUrl);
bundle.putString(THUMBNAIL_URL, thumbnailUrl);
}
public Bundle asBundle() {
return bundle;
}
public static AccountOptions fromBundle(Bundle options) {
if (options == null) return null;
return new AccountOptions(
options.getString(USERNAME),
options.getString(REFRESH_TOKEN),
options.getString(AUTH_TOKEN),
options.getString(IDENTITY_URL),
options.getString(INSTANCE_URL),
options.getString(ORG_ID),
options.getString(USER_ID),
options.getString(COMMUNITY_ID),
options.getString(COMMUNITY_URL),
options.getString(FIRST_NAME),
options.getString(LAST_NAME),
options.getString(DISPLAY_NAME),
options.getString(EMAIL),
options.getString(PHOTO_URL),
options.getString(THUMBNAIL_URL)
);
}
}
@Override
public void alias(String alias) {
try {
certChain = KeyChain.getCertificateChain(activity, alias);
key = KeyChain.getPrivateKey(activity, alias);
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
loadLoginPage();
}
});
} catch (KeyChainException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
| 1 | 15,517 | We should remove `throws Exception` here, since we're catching it anyway and triggering `onAuthFlowError`. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -0,0 +1,18 @@
+package ps
+
+import "gx/ipfs/QmepvmmYNM6q4RaUiwEikQFhgMFHXg2PLhx2E9iaRd3jmS/go-libp2p-pubsub"
+
+// Publisher publishes to pubsub topics
+type Publisher struct {
+ pubsub *pubsub.PubSub
+}
+
+// NewPublisher builds a new publisher
+func NewPublisher(sub *pubsub.PubSub) *Publisher {
+ return &Publisher{pubsub: sub}
+}
+
+// Publish publishes to a pubsub topic
+func (s *Publisher) Publish(topic string, data []byte) error {
+ return s.pubsub.Publish(topic, data)
+} | 1 | 1 | 17,039 | a super thin wrapper like this makes me think we should consider not having it, instead just defining in the plumbing.api the publisher and subscriber interfaces we expect a ps dependency passed in to support and then to pass calls directly to that thing rather than through this wrapper | filecoin-project-venus | go |
|
@@ -93,7 +93,7 @@ public class Docker {
throw new WebDriverException("Unable to pull container: " + name);
}
- LOG.info(String.format("Pull of %s:%s complete", name, tag));
+ LOG.fine(String.format("Pull of %s:%s complete", name, tag));
return findImage(new ImageNamePredicate(name, tag))
.orElseThrow(() -> new DockerException( | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.docker;
import com.google.common.reflect.TypeToken;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonException;
import org.openqa.selenium.json.JsonOutput;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Predicate;
import java.util.logging.Logger;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.openqa.selenium.json.Json.MAP_TYPE;
import static org.openqa.selenium.remote.http.Contents.string;
import static org.openqa.selenium.remote.http.Contents.utf8String;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
public class Docker {
private static final Logger LOG = Logger.getLogger(Docker.class.getName());
private static final Json JSON = new Json();
private final HttpHandler client;
public Docker(HttpHandler client) {
Objects.requireNonNull(client, "Docker HTTP client must be set.");
this.client = req -> {
HttpResponse resp = client.execute(req);
if (resp.getStatus() < 200 && resp.getStatus() > 200) {
String value = string(resp);
try {
Object obj = JSON.toType(value, Object.class);
if (obj instanceof Map) {
Map<?, ?> map = (Map<?, ?>) obj;
String message = map.get("message") instanceof String ?
(String) map.get("message") :
value;
throw new RuntimeException(message);
}
throw new RuntimeException(value);
} catch (JsonException e) {
throw new RuntimeException(value);
}
}
return resp;
};
}
public Image pull(String name, String tag) {
Objects.requireNonNull(name);
Objects.requireNonNull(tag);
findImage(new ImageNamePredicate(name, tag));
LOG.info(String.format("Pulling %s:%s", name, tag));
HttpRequest request = new HttpRequest(POST, "/images/create")
.addQueryParameter("fromImage", name)
.addQueryParameter("tag", tag);
HttpResponse res = client.execute(request);
if (res.getStatus() != HTTP_OK) {
throw new WebDriverException("Unable to pull container: " + name);
}
LOG.info(String.format("Pull of %s:%s complete", name, tag));
return findImage(new ImageNamePredicate(name, tag))
.orElseThrow(() -> new DockerException(
String.format("Cannot find image matching: %s:%s", name, tag)));
}
public List<Image> listImages() {
LOG.fine("Listing images");
HttpResponse response = client.execute(new HttpRequest(GET, "/images/json"));
List<ImageSummary> images =
JSON.toType(string(response), new TypeToken<List<ImageSummary>>() {}.getType());
return images.stream()
.map(Image::new)
.collect(toImmutableList());
}
public Optional<Image> findImage(Predicate<Image> filter) {
Objects.requireNonNull(filter);
LOG.fine("Finding image: " + filter);
return listImages().stream()
.filter(filter)
.findFirst();
}
public Container create(ContainerInfo info) {
StringBuilder json = new StringBuilder();
try (JsonOutput output = JSON.newOutput(json)) {
output.setPrettyPrint(false);
output.write(info);
}
LOG.info("Creating container: " + json);
HttpRequest request = new HttpRequest(POST, "/containers/create");
request.setContent(utf8String(json));
HttpResponse response = client.execute(request);
Map<String, Object> toRead = JSON.toType(string(response), MAP_TYPE);
return new Container(client, new ContainerId((String) toRead.get("Id")));
}
}
| 1 | 17,123 | Waiting for the pull takes a long time. This message informs the user that at least one of the images being pulled is available. Please leave. | SeleniumHQ-selenium | py |
@@ -67,10 +67,12 @@ setup(
"configargparse>=0.10, <0.11",
"construct>=2.5.2, <2.6",
"cryptography>=1.3, <1.5",
+ "cssutils>=1.0.1, <1.1",
"Flask>=0.10.1, <0.12",
"h2>=2.4.0, <3",
"html2text>=2016.1.8, <=2016.5.29",
"hyperframe>=4.0.1, <5",
+ "jsbeautifier>=1.6.3"
"lxml>=3.5.0, <=3.6.0", # no wheels for 3.6.1 yet.
"Pillow>=3.2, <3.4",
"passlib>=1.6.5, <1.7", | 1 | from setuptools import setup, find_packages
from codecs import open
import os
from netlib import version
# Based on https://github.com/pypa/sampleproject/blob/master/setup.py
# and https://python-packaging-user-guide.readthedocs.org/
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="mitmproxy",
version=version.VERSION,
description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
long_description=long_description,
url="http://mitmproxy.org",
author="Aldo Cortesi",
author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Console :: Curses",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: Proxy Servers",
"Topic :: Software Development :: Testing"
],
packages=find_packages(include=[
"mitmproxy", "mitmproxy.*",
"pathod", "pathod.*",
"netlib", "netlib.*"
]),
include_package_data=True,
entry_points={
'console_scripts': [
"mitmproxy = mitmproxy.main:mitmproxy",
"mitmdump = mitmproxy.main:mitmdump",
"mitmweb = mitmproxy.main:mitmweb",
"pathod = pathod.pathod_cmdline:go_pathod",
"pathoc = pathod.pathoc_cmdline:go_pathoc"
]
},
# https://packaging.python.org/en/latest/requirements/#install-requires
# It is not considered best practice to use install_requires to pin dependencies to specific versions.
install_requires=[
"backports.ssl_match_hostname>=3.5.0.1, <3.6",
"blinker>=1.4, <1.5",
"click>=6.2, <7.0",
"certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
"configargparse>=0.10, <0.11",
"construct>=2.5.2, <2.6",
"cryptography>=1.3, <1.5",
"Flask>=0.10.1, <0.12",
"h2>=2.4.0, <3",
"html2text>=2016.1.8, <=2016.5.29",
"hyperframe>=4.0.1, <5",
"lxml>=3.5.0, <=3.6.0", # no wheels for 3.6.1 yet.
"Pillow>=3.2, <3.4",
"passlib>=1.6.5, <1.7",
"pyasn1>=0.1.9, <0.2",
"pyOpenSSL>=16.0, <17.0",
"pyparsing>=2.1.3, <2.2",
"pyperclip>=1.5.22, <1.6",
"requests>=2.9.1, <2.11",
"six>=1.10, <1.11",
"tornado>=4.3, <4.5",
"urwid>=1.3.1, <1.4",
"watchdog>=0.8.3, <0.9",
],
extras_require={
':sys_platform == "win32"': [
"pydivert>=0.0.7, <0.1",
],
':sys_platform != "win32"': [
],
# Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380
# Ubuntu Trusty and other still ship with setuptools < 17.1
':python_version == "2.7"': [
"enum34>=1.0.4, <2",
"ipaddress>=1.0.15, <1.1",
"typing==3.5.2.2",
],
'dev': [
"tox>=2.3, <3",
"mock>=2.0, <2.1",
"pytest>=2.8.7, <3",
"pytest-cov>=2.2.1, <3",
"pytest-timeout>=1.0.0, <2",
"pytest-xdist>=1.14, <2",
"sphinx>=1.3.5, <1.5",
"sphinx-autobuild>=0.5.2, <0.7",
"sphinxcontrib-documentedlist>=0.4.0, <0.5",
"sphinx_rtd_theme>=0.1.9, <0.2",
],
'contentviews': [
"cssutils>=1.0.1, <1.1",
# TODO: Find Python 3 replacements
# "protobuf>=2.6.1, <2.7",
# "pyamf>=0.8.0, <0.9",
],
'examples': [
"beautifulsoup4>=4.4.1, <4.6",
"harparser>=0.2, <0.3",
"pytz>=2015.07.0, <=2016.6.1",
]
}
)
| 1 | 12,006 | We should set a 1.7 upper bound for this as well. :smiley: | mitmproxy-mitmproxy | py |
@@ -39,11 +39,14 @@ import Spinner from 'GoogleComponents/spinner';
import SettingsOverlay from 'GoogleComponents/settings/settings-overlay';
import GenericError from 'GoogleComponents/notifications/generic-error';
-const { Component, Fragment } = wp.element;
-const { __, sprintf } = wp.i18n;
-const { filter, map } = lodash;
-const { applyFilters } = wp.hooks;
-const { withFilters } = wp.components;
+/**
+ * WordPress dependencies
+ */
+import { Component, Fragment } from '@wordpress/element';
+import { __, sprintf } from '@wordpress/i18n';
+import { filter, map } from 'lodash';
+import { applyFilters } from '@wordpress/hooks';
+import { withFilters } from '@wordpress/components';
/**
* A single module. Keeps track of its own active state and settings. | 1 | /**
* SettingsModule component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
import Link from 'GoogleComponents/link';
import Button from 'GoogleComponents/button';
import data, { TYPE_MODULES } from 'GoogleComponents/data';
import SvgIcon from 'GoogleUtil/svg-icon';
import SetupModule from 'GoogleComponents/setup-module';
import Dialog from 'GoogleComponents/dialog';
import ModuleSettingsDetails from 'GoogleComponents/settings/module-settings-details';
import ModuleSetupIncomplete from 'GoogleComponents/settings/module-setup-incomplete';
import {
activateOrDeactivateModule,
refreshAuthentication,
getReAuthURL,
moduleIcon,
showErrorNotification,
} from 'GoogleUtil';
import Spinner from 'GoogleComponents/spinner';
import SettingsOverlay from 'GoogleComponents/settings/settings-overlay';
import GenericError from 'GoogleComponents/notifications/generic-error';
const { Component, Fragment } = wp.element;
const { __, sprintf } = wp.i18n;
const { filter, map } = lodash;
const { applyFilters } = wp.hooks;
const { withFilters } = wp.components;
/**
* A single module. Keeps track of its own active state and settings.
*/
class SettingsModule extends Component {
constructor( props ) {
super( props );
const { slug } = props;
const { setupComplete } = googlesitekit.modules[ slug ];
this.state = {
isSaving: false,
active: props.active,
setupComplete,
dialogActive: false,
};
this.deactivate = this.deactivate.bind( this );
this.activateOrDeactivate = this.activateOrDeactivate.bind( this );
this.handleDialog = this.handleDialog.bind( this );
this.handleCloseModal = this.handleCloseModal.bind( this );
this.handleConfirmRemoveModule = this.handleConfirmRemoveModule.bind( this );
}
componentDidMount() {
window.addEventListener( 'keyup', this.handleCloseModal, false );
}
componentWillUnmount() {
window.removeEventListener( 'keyup', this.handleCloseModal );
}
async activateOrDeactivate() {
try {
const { active } = this.state;
const newActiveState = ! active;
this.setState( { isSaving: true } );
await activateOrDeactivateModule(
data,
this.props.slug,
newActiveState
);
await refreshAuthentication();
if ( false === newActiveState ) {
data.invalidateCacheGroup( TYPE_MODULES, this.props.slug );
}
this.setState( {
isSaving: false,
active: newActiveState,
} );
window.location = getReAuthURL( this.props.slug, false );
} catch ( err ) {
showErrorNotification( GenericError, {
id: 'activate-module-error',
title: __( 'Internal Server Error', 'google-site-kit' ),
description: err.message,
format: 'small',
type: 'win-error',
} );
this.setState( { isSaving: false } );
}
}
deactivate() {
if ( this.props.autoActivate ) {
return;
}
this.activateOrDeactivate();
}
handleDialog() {
this.setState( ( prevState ) => {
return {
dialogActive: ! prevState.dialogActive,
};
} );
}
// Handle user click on the confirm removal button.
handleConfirmRemoveModule() {
this.deactivate();
}
handleCloseModal( e ) {
if ( 27 === e.keyCode ) {
this.setState( {
dialogActive: false,
} );
}
}
// Find modules that depend on a module.
getDependentModules() {
const { slug } = this.props;
const { modules } = googlesitekit;
const dependants = {};
if ( modules[ slug ].dependants ) {
modules[ slug ].dependants.forEach( ( dependantSlug ) => {
if ( modules[ dependantSlug ] ) {
dependants[ dependantSlug ] = modules[ dependantSlug ];
}
} );
}
return dependants;
}
render() {
const {
active,
setupComplete,
dialogActive,
} = this.state;
const {
name,
slug,
homepage,
isEditing,
isOpen,
handleAccordion,
handleEdit,
description,
hasSettings,
autoActivate,
provides,
isSaving,
screenID,
error,
} = this.props;
const moduleKey = `${ slug }-module`;
const isConnected = applyFilters( `googlesitekit.Connected-${ slug }`, setupComplete );
const connectedClassName = isConnected ?
'googlesitekit-settings-module__status-icon--connected' :
'googlesitekit-settings-module__status-icon--not-connected';
const subtitle = sprintf( __( 'By disconnecting the %s module from Site Kit, you will no longer have access to:', 'google-site-kit' ), name );
const isSavingModule = isSaving === `${ slug }-module`;
// Disabled because this rule doesn't acknowledge our use of the variable
// as a component in JSX.
// eslint-disable-next-line @wordpress/no-unused-vars-before-return
const FilteredModuleSettingsDetails = withFilters( `googlesitekit.ModuleSettingsDetails-${ slug }` )( ModuleSettingsDetails );
// Disable other modules during editing
const modulesBeingEdited = filter( isEditing, ( module ) => module );
const editActive = 0 < modulesBeingEdited.length;
const dependentModules = map( this.getDependentModules(), 'name' ).join( ', ' );
const nothingToSave = 'pagespeed-insights' === slug;
// Set button text based on state.
let buttonText = __( 'Close', 'google-site-kit' );
if ( hasSettings && setupComplete ) {
if ( isSavingModule ) {
buttonText = __( 'Saving...', 'google-site-kit' );
} else if ( nothingToSave === false ) {
buttonText = __( 'Confirm Changes', 'google-site-kit' );
}
}
return (
<Fragment>
{ active ? (
<div
className={ `
googlesitekit-settings-module
googlesitekit-settings-module--${ slug }
googlesitekit-settings-module--active
${ error && editActive && isEditing[ moduleKey ] ? 'googlesitekit-settings-module--error' : '' }
` }
key={ moduleKey }
>
{ editActive && ! isEditing[ moduleKey ] && <SettingsOverlay compress={ ! isOpen } /> }
<button
className={ `
googlesitekit-settings-module__header
${ isOpen ? 'googlesitekit-settings-module__header--open' : '' }
` }
id={ `googlesitekit-settings-module__header--${ slug }` }
type="button"
role="tab"
aria-selected={ !! isOpen }
aria-expanded={ !! isOpen }
aria-controls={ `googlesitekit-settings-module__content--${ slug }` }
onClick={ handleAccordion.bind( null, slug ) }
>
{ error && editActive && isEditing[ moduleKey ] &&
<div className="googlesitekit-settings-module__error">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
{ __( 'Error:', 'google-site-kit' ) } { error.errorMsg }
</div>
</div>
</div>
</div>
}
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-4-tablet
mdc-layout-grid__cell--span-4-phone
">
<h3 className="
googlesitekit-heading-4
googlesitekit-settings-module__title
">
{ moduleIcon( slug, false, '24', '26', 'googlesitekit-settings-module__title-icon' ) }
{ name }
</h3>
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-4-tablet
mdc-layout-grid__cell--span-4-phone
mdc-layout-grid__cell--align-middle
mdc-layout-grid__cell--align-right-tablet
">
<p className="googlesitekit-settings-module__status">
{
isConnected ?
sprintf( __( '%s is connected', 'google-site-kit' ), name ) :
sprintf( __( '%s is not connected', 'google-site-kit' ), name )
}
<span className={ `googlesitekit-settings-module__status-icon ${ connectedClassName } ` }>
<span className="screen-reader-text">
{ isConnected ?
__( 'Connected', 'google-site-kit' ) :
__( 'Not Connected', 'google-site-kit' )
}
</span>
</span>
</p>
</div>
</div>
</div>
</button>
<div
className={ `
googlesitekit-settings-module__content
${ isOpen ? 'googlesitekit-settings-module__content--open' : '' }
` }
id={ `googlesitekit-settings-module__content--${ slug }` }
role="tabpanel"
aria-hidden={ ! isOpen }
aria-labelledby={ `googlesitekit-settings-module__header--${ slug }` }
>
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
{ setupComplete &&
<Fragment>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<FilteredModuleSettingsDetails module={ moduleKey } isEditing={ isEditing[ moduleKey ] } />
</div>
</Fragment>
}
{
hasSettings && ! setupComplete &&
<ModuleSetupIncomplete
screenID={ screenID }
slug={ slug }
/>
}
</div>
</div>
<footer className="googlesitekit-settings-module__footer">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-8-tablet
mdc-layout-grid__cell--span-4-phone
">
{ isEditing[ moduleKey ] || isSavingModule ? (
<Fragment>
<Button
onClick={ () => handleEdit( moduleKey, setupComplete ? 'confirm' : 'cancel', nothingToSave ) }
disabled={ isSavingModule }
id={ hasSettings && setupComplete ? `confirm-changes-${ slug }` : `close-${ slug }` }
>
{ buttonText }
</Button>
<Spinner isSaving={ isSavingModule } />
{ hasSettings &&
<Link
className="googlesitekit-settings-module__footer-cancel"
onClick={ () => handleEdit( moduleKey, 'cancel' ) }
inherit
>
{ __( 'Cancel', 'google-site-kit' ) }
</Link>
}
</Fragment>
) : ( hasSettings &&
<Link
className="googlesitekit-settings-module__edit-button"
onClick={ () => {
handleEdit( moduleKey, 'edit' );
} }
inherit
>
{ __( 'Edit', 'google-site-kit' ) }
<SvgIcon
className="googlesitekit-settings-module__edit-button-icon"
id="pencil"
width="10"
height="10"
/>
</Link>
) }
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-8-tablet
mdc-layout-grid__cell--span-4-phone
mdc-layout-grid__cell--align-middle
mdc-layout-grid__cell--align-right-desktop
">
{ isEditing[ moduleKey ] && ! autoActivate && (
<Link
className="googlesitekit-settings-module__remove-button"
onClick={ this.handleDialog }
inherit
danger
>
{ sprintf( __( 'Disconnect %s from Site Kit', 'google-site-kit' ), name ) }
<SvgIcon
className="googlesitekit-settings-module__remove-button-icon"
id="trash"
width="13"
height="13"
/>
</Link>
) }
{ ! isEditing[ moduleKey ] && (
<Link
href={ homepage }
className="googlesitekit-settings-module__cta-button"
inherit
external
>
{ sprintf( __( 'See full details in %s', 'google-site-kit' ), name ) }
</Link>
) }
</div>
</div>
</div>
</footer>
</div>
<Dialog
dialogActive={ dialogActive }
handleDialog={ this.handleDialog }
title={ sprintf( __( 'Disconnect %s from Site Kit?', 'google-site-kit' ), name ) }
subtitle={ subtitle }
onKeyPress={ this.handleCloseModal }
provides={ provides }
handleConfirm={ this.handleConfirmRemoveModule }
dependentModules={ dependentModules ?
sprintf(
__( 'these active modules depend on %s and will also be disconnected: ', 'google-site-kit' ),
name
) + dependentModules : false
}
/>
</div>
) : (
<Fragment>
<SetupModule
key={ `${ slug }-module` }
slug={ slug }
name={ name }
description={ description }
active={ active }
showLink
/>
</Fragment>
)
}
</Fragment>
);
}
}
SettingsModule.propTypes = {
name: PropTypes.string,
slug: PropTypes.string,
homepage: PropTypes.string,
isEditing: PropTypes.object,
handleEdit: PropTypes.func,
handleDialog: PropTypes.func,
autoActivate: PropTypes.bool,
hasSettings: PropTypes.bool,
required: PropTypes.array,
active: PropTypes.bool,
};
SettingsModule.defaultProps = {
name: '',
slug: '',
homepage: '',
isEditing: {},
handleEdit: null,
handleDialog: null,
active: false,
};
export default SettingsModule;
| 1 | 24,794 | Lodash mixed in with WordPress dependencies | google-site-kit-wp | js |
@@ -22,10 +22,10 @@ class StripeEvents
if subscription = Subscription.find_by(stripe_id: stripe_subscription.id)
subscription
else
- Airbrake.notify_or_ignore(
+ Honeybadger.notify(
error_message: "No subscription found for #{stripe_subscription.id}",
error_class: "StripeEvents",
- parameters: @event.to_hash
+ context: @event.to_hash,
)
nil
end | 1 | class StripeEvents
def initialize(event)
@event = event
end
def customer_subscription_deleted
if subscription
Cancellation.new(subscription: subscription).process
end
end
def customer_subscription_updated
if subscription
subscription.write_plan(sku: stripe_subscription.plan.id)
SubscriptionUpcomingInvoiceUpdater.new([subscription]).process
end
end
private
def subscription
if subscription = Subscription.find_by(stripe_id: stripe_subscription.id)
subscription
else
Airbrake.notify_or_ignore(
error_message: "No subscription found for #{stripe_subscription.id}",
error_class: "StripeEvents",
parameters: @event.to_hash
)
nil
end
end
def stripe_subscription
@event.data.object
end
end
| 1 | 16,925 | Put a comma after the last parameter of a multiline method call. | thoughtbot-upcase | rb |
@@ -0,0 +1,11 @@
+describe "User can manage their own profile" do
+ it "allows editing of name" do
+ user = create(:user)
+ login_as(user)
+ visit "/me"
+ fill_in :first_name, with: "Some"
+ fill_in :last_name, with: "Body"
+ click_button "Update profile"
+ expect(page).to have_content("Welcome, Some Body <#{user.email_address}>")
+ end
+end | 1 | 1 | 15,454 | perhaps we should test that users cannot edit the profiles of others? | 18F-C2 | rb |
|
@@ -43,7 +43,7 @@ class TestMasterSecretLogger(tservers.ServerTestBase):
tls.log_master_secret.close()
with open(logfile, "rb") as f:
- assert f.read().count(b"CLIENT_RANDOM") >= 2
+ assert f.read().count(b"SERVER_HANDSHAKE_TRAFFIC_SECRET") >= 2
tls.log_master_secret = _logfun
| 1 | import io
import pytest
from mitmproxy import exceptions
from mitmproxy.net import tls
from mitmproxy.net.tcp import TCPClient
from test.mitmproxy.net.test_tcp import EchoHandler
from . import tservers
CLIENT_HELLO_NO_EXTENSIONS = bytes.fromhex(
"03015658a756ab2c2bff55f636814deac086b7ca56b65058c7893ffc6074f5245f70205658a75475103a152637"
"78e1bb6d22e8bbd5b6b0a3a59760ad354e91ba20d353001a0035002f000a000500040009000300060008006000"
"61006200640100"
)
FULL_CLIENT_HELLO_NO_EXTENSIONS = (
b"\x16\x03\x03\x00\x65" # record layer
b"\x01\x00\x00\x61" + # handshake header
CLIENT_HELLO_NO_EXTENSIONS
)
class TestMasterSecretLogger(tservers.ServerTestBase):
handler = EchoHandler
ssl = dict(
cipher_list="AES256-SHA"
)
def test_log(self, tmpdir):
testval = b"echo!\n"
_logfun = tls.log_master_secret
logfile = str(tmpdir.join("foo", "bar", "logfile"))
tls.log_master_secret = tls.MasterSecretLogger(logfile)
c = TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls()
c.wfile.write(testval)
c.wfile.flush()
assert c.rfile.readline() == testval
c.finish()
tls.log_master_secret.close()
with open(logfile, "rb") as f:
assert f.read().count(b"CLIENT_RANDOM") >= 2
tls.log_master_secret = _logfun
def test_create_logfun(self):
assert isinstance(
tls.MasterSecretLogger.create_logfun("test"),
tls.MasterSecretLogger)
assert not tls.MasterSecretLogger.create_logfun(False)
class TestTLSInvalid:
def test_invalid_ssl_method_should_fail(self):
fake_ssl_method = 100500
with pytest.raises(exceptions.TlsException):
tls.create_client_context(method=fake_ssl_method)
def test_alpn_error(self):
with pytest.raises(exceptions.TlsException, match="must be a function"):
tls.create_client_context(alpn_select_callback="foo")
with pytest.raises(exceptions.TlsException, match="ALPN error"):
tls.create_client_context(alpn_select="foo", alpn_select_callback="bar")
def test_is_record_magic():
assert not tls.is_tls_record_magic(b"POST /")
assert not tls.is_tls_record_magic(b"\x16\x03")
assert not tls.is_tls_record_magic(b"\x16\x03\x04")
assert tls.is_tls_record_magic(b"\x16\x03\x00")
assert tls.is_tls_record_magic(b"\x16\x03\x01")
assert tls.is_tls_record_magic(b"\x16\x03\x02")
assert tls.is_tls_record_magic(b"\x16\x03\x03")
def test_get_client_hello():
rfile = io.BufferedReader(io.BytesIO(
FULL_CLIENT_HELLO_NO_EXTENSIONS
))
assert tls.get_client_hello(rfile)
rfile = io.BufferedReader(io.BytesIO(
FULL_CLIENT_HELLO_NO_EXTENSIONS[:30]
))
with pytest.raises(exceptions.TlsProtocolException, match="Unexpected EOF"):
tls.get_client_hello(rfile)
rfile = io.BufferedReader(io.BytesIO(
b"GET /"
))
with pytest.raises(exceptions.TlsProtocolException, match="Expected TLS record"):
tls.get_client_hello(rfile)
class TestClientHello:
def test_no_extensions(self):
c = tls.ClientHello(CLIENT_HELLO_NO_EXTENSIONS)
assert repr(c)
assert c.sni is None
assert c.cipher_suites == [53, 47, 10, 5, 4, 9, 3, 6, 8, 96, 97, 98, 100]
assert c.alpn_protocols == []
assert c.extensions == []
def test_extensions(self):
data = bytes.fromhex(
"03033b70638d2523e1cba15f8364868295305e9c52aceabda4b5147210abc783e6e1000022c02bc02fc02cc030"
"cca9cca8cc14cc13c009c013c00ac014009c009d002f0035000a0100006cff0100010000000010000e00000b65"
"78616d706c652e636f6d0017000000230000000d00120010060106030501050304010403020102030005000501"
"00000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a00080006001d00"
"170018"
)
c = tls.ClientHello(data)
assert repr(c)
assert c.sni == b'example.com'
assert c.cipher_suites == [
49195, 49199, 49196, 49200, 52393, 52392, 52244, 52243, 49161,
49171, 49162, 49172, 156, 157, 47, 53, 10
]
assert c.alpn_protocols == [b'h2', b'http/1.1']
assert c.extensions == [
(65281, b'\x00'),
(0, b'\x00\x0e\x00\x00\x0bexample.com'),
(23, b''),
(35, b''),
(13, b'\x00\x10\x06\x01\x06\x03\x05\x01\x05\x03\x04\x01\x04\x03\x02\x01\x02\x03'),
(5, b'\x01\x00\x00\x00\x00'),
(18, b''),
(16, b'\x00\x0c\x02h2\x08http/1.1'),
(30032, b''),
(11, b'\x01\x00'),
(10, b'\x00\x06\x00\x1d\x00\x17\x00\x18')
]
def test_from_file(self):
rfile = io.BufferedReader(io.BytesIO(
FULL_CLIENT_HELLO_NO_EXTENSIONS
))
assert tls.ClientHello.from_file(rfile)
rfile = io.BufferedReader(io.BytesIO(
b""
))
with pytest.raises(exceptions.TlsProtocolException):
tls.ClientHello.from_file(rfile)
rfile = io.BufferedReader(io.BytesIO(
b"\x16\x03\x03\x00\x07" # record layer
b"\x01\x00\x00\x03" + # handshake header
b"foo"
))
with pytest.raises(exceptions.TlsProtocolException, match='Cannot parse Client Hello'):
tls.ClientHello.from_file(rfile)
| 1 | 15,275 | Does this mean the content changed with the new pyopenssl version? Is Wireshark already compatible with this new format? | mitmproxy-mitmproxy | py |
@@ -618,7 +618,7 @@ func TestCachedBatch(t *testing.T) {
}
func TestSTXCachedBatch(t *testing.T) {
- ws := newStateTX(0, db.NewMemKVStore(), []protocol.ActionHandler{account.NewProtocol(config.NewHeightUpgrade(config.Default))})
+ ws := newStateTX(0, db.NewMemKVStore(), []protocol.ActionHandler{account.NewProtocol(config.NewHeightUpgrade(config.Default))}, config.Default.DB)
testCachedBatch(ws, t, true)
}
| 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package factory
import (
"context"
"io/ioutil"
"math/big"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/vote/candidatesutil"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/enc"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/pkg/util/fileutil"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
)
const (
triePath = "trie.test"
stateDBPath = "stateDB.test"
)
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
func TestSnapshot(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), triePath)
testTriePath := testTrieFile.Name()
cfg := config.Default
cfg.DB.DbPath = testTriePath
sf, err := NewFactory(cfg, PrecreatedTrieDBOption(db.NewBoltDB(cfg.DB)))
require.NoError(err)
require.NoError(sf.Start(context.Background()))
defer func() {
require.NoError(sf.Stop(context.Background()))
}()
ws, err := sf.NewWorkingSet()
require.NoError(err)
testSnapshot(ws, t)
testRevert(ws, t)
}
func TestSDBSnapshot(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), stateDBPath)
testStateDBPath := testTrieFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testStateDBPath
sdb, err := NewStateDB(cfg, DefaultStateDBOption())
require.NoError(err)
require.NoError(sdb.Start(context.Background()))
ws, err := sdb.NewWorkingSet()
require.NoError(err)
testSnapshot(ws, t)
testSDBRevert(ws, t)
}
func testRevert(ws WorkingSet, t *testing.T) {
require := require.New(t)
addr := identityset.Address(28).String()
_, err := accountutil.LoadOrCreateAccount(ws, addr, big.NewInt(5))
require.NoError(err)
sHash := hash.BytesToHash160(identityset.Address(28).Bytes())
s, err := accountutil.LoadAccount(ws, sHash)
require.NoError(err)
require.Equal(big.NewInt(5), s.Balance)
s0 := ws.Snapshot()
require.Equal(1, s0)
h0 := ws.RootHash()
require.NotEqual(h0, hash.ZeroHash256)
s.Balance.Add(s.Balance, big.NewInt(5))
require.Equal(big.NewInt(10), s.Balance)
require.NoError(ws.PutState(sHash, s))
h1 := ws.RootHash()
require.NotEqual(h1, h0)
require.NoError(ws.Revert(s0))
require.NoError(ws.State(sHash, s))
require.Equal(big.NewInt(5), s.Balance)
h2 := ws.RootHash()
require.Equal(h0, h2)
}
func testSDBRevert(ws WorkingSet, t *testing.T) {
require := require.New(t)
addr := identityset.Address(28).String()
_, err := accountutil.LoadOrCreateAccount(ws, addr, big.NewInt(5))
require.NoError(err)
sHash := hash.BytesToHash160(identityset.Address(28).Bytes())
s, err := accountutil.LoadAccount(ws, sHash)
require.NoError(err)
require.Equal(big.NewInt(5), s.Balance)
s0 := ws.Snapshot()
require.Equal(1, s0)
h0 := ws.Digest()
require.NotEqual(h0, hash.ZeroHash256)
s.Balance.Add(s.Balance, big.NewInt(5))
require.Equal(big.NewInt(10), s.Balance)
require.NoError(ws.PutState(sHash, s))
h1 := ws.Digest()
require.NotEqual(h1, h0)
require.NoError(ws.Revert(s0))
require.NoError(ws.State(sHash, s))
require.Equal(big.NewInt(5), s.Balance)
h2 := ws.Digest()
require.Equal(h0, h2)
}
func testSnapshot(ws WorkingSet, t *testing.T) {
require := require.New(t)
addr := identityset.Address(28).String()
_, err := accountutil.LoadOrCreateAccount(ws, addr, big.NewInt(5))
require.NoError(err)
sHash := hash.BytesToHash160(identityset.Address(28).Bytes())
s, err := accountutil.LoadAccount(ws, sHash)
require.NoError(err)
require.Equal(big.NewInt(5), s.Balance)
s0 := ws.Snapshot()
require.Zero(s0)
s.Balance.Add(s.Balance, big.NewInt(5))
require.Equal(big.NewInt(10), s.Balance)
require.NoError(ws.PutState(sHash, s))
s1 := ws.Snapshot()
require.Equal(1, s1)
s.Balance.Add(s.Balance, big.NewInt(5))
require.Equal(big.NewInt(15), s.Balance)
require.NoError(ws.PutState(sHash, s))
// add another account
addr = identityset.Address(29).String()
_, err = accountutil.LoadOrCreateAccount(ws, addr, big.NewInt(7))
require.NoError(err)
tHash := hash.BytesToHash160(identityset.Address(29).Bytes())
s, err = accountutil.LoadAccount(ws, tHash)
require.NoError(err)
require.Equal(big.NewInt(7), s.Balance)
s2 := ws.Snapshot()
require.Equal(2, s2)
require.NoError(s.AddBalance(big.NewInt(6)))
require.Equal(big.NewInt(13), s.Balance)
require.NoError(ws.PutState(tHash, s))
require.NoError(ws.Revert(s2))
require.NoError(ws.State(sHash, s))
require.Equal(big.NewInt(15), s.Balance)
require.NoError(ws.State(tHash, s))
require.Equal(big.NewInt(7), s.Balance)
require.NoError(ws.Revert(s1))
require.NoError(ws.State(sHash, s))
require.Equal(big.NewInt(10), s.Balance)
require.Equal(state.ErrStateNotExist, errors.Cause(ws.State(tHash, s)))
require.NoError(ws.Revert(s0))
require.NoError(ws.State(sHash, s))
require.Equal(big.NewInt(5), s.Balance)
require.Equal(state.ErrStateNotExist, errors.Cause(ws.State(tHash, s)))
}
func TestCandidates(t *testing.T) {
cfg := config.Default
sf, err := NewFactory(cfg, InMemTrieOption())
require.NoError(t, err)
testCandidates(sf, t)
}
func TestSDBCandidates(t *testing.T) {
cfg := config.Default
sdb, err := NewStateDB(cfg, InMemStateDBOption())
require.NoError(t, err)
testCandidates(sdb, t)
}
func testCandidates(sf Factory, t *testing.T) {
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
require.NoError(t, candidatesutil.LoadAndAddCandidates(ws, 1, identityset.Address(0).String()))
require.NoError(t, candidatesutil.LoadAndUpdateCandidates(ws, 1, identityset.Address(0).String(), big.NewInt(0)))
require.NoError(t, candidatesutil.LoadAndAddCandidates(ws, 1, identityset.Address(1).String()))
require.NoError(t, candidatesutil.LoadAndUpdateCandidates(ws, 1, identityset.Address(1).String(), big.NewInt(1)))
require.NoError(t, sf.Commit(ws))
candidates, err := sf.CandidatesByHeight(1)
require.NoError(t, err)
require.Equal(t, 2, len(candidates))
assert.Equal(t, candidates[0].Address, identityset.Address(1).String())
assert.Equal(t, candidates[0].Votes, big.NewInt(1))
assert.Equal(t, candidates[1].Address, identityset.Address(0).String())
assert.Equal(t, candidates[1].Votes, big.NewInt(0))
}
func TestState(t *testing.T) {
testTrieFile, _ := ioutil.TempFile(os.TempDir(), triePath)
testTriePath := testTrieFile.Name()
cfg := config.Default
cfg.DB.DbPath = testTriePath
sf, err := NewFactory(cfg, PrecreatedTrieDBOption(db.NewBoltDB(cfg.DB)))
require.NoError(t, err)
testState(sf, t)
}
func TestSDBState(t *testing.T) {
testDBFile, _ := ioutil.TempFile(os.TempDir(), stateDBPath)
testDBPath := testDBFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testDBPath
sdb, err := NewStateDB(cfg, DefaultStateDBOption())
require.NoError(t, err)
testState(sdb, t)
}
func testState(sf Factory, t *testing.T) {
// Create a dummy iotex address
a := identityset.Address(28).String()
priKeyA := identityset.PrivateKey(28)
sf.AddActionHandlers(account.NewProtocol(config.NewHeightUpgrade(config.Default)))
require.NoError(t, sf.Start(context.Background()))
defer func() {
require.NoError(t, sf.Stop(context.Background()))
}()
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100))
require.NoError(t, err)
tsf, err := action.NewTransfer(1, big.NewInt(10), identityset.Address(31).String(), nil, uint64(20000), big.NewInt(0))
require.NoError(t, err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetAction(tsf).SetGasLimit(20000).Build()
selp, err := action.Sign(elp, priKeyA)
require.NoError(t, err)
gasLimit := uint64(1000000)
raCtx := protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
}
_, err = ws.RunAction(raCtx, selp)
require.NoError(t, err)
_ = ws.UpdateBlockLevelInfo(0)
require.NoError(t, sf.Commit(ws))
//test AccountState() & State()
var testAccount state.Account
accountA, err := sf.AccountState(a)
require.NoError(t, err)
sHash := hash.BytesToHash160(identityset.Address(28).Bytes())
err = sf.State(sHash, &testAccount)
require.NoError(t, err)
require.Equal(t, accountA, &testAccount)
require.Equal(t, big.NewInt(90), accountA.Balance)
}
func TestNonce(t *testing.T) {
testTrieFile, _ := ioutil.TempFile(os.TempDir(), triePath)
testTriePath := testTrieFile.Name()
cfg := config.Default
cfg.DB.DbPath = testTriePath
sf, err := NewFactory(cfg, PrecreatedTrieDBOption(db.NewBoltDB(cfg.DB)))
require.NoError(t, err)
testNonce(sf, t)
}
func TestSDBNonce(t *testing.T) {
testDBFile, _ := ioutil.TempFile(os.TempDir(), stateDBPath)
testDBPath := testDBFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testDBPath
sdb, err := NewStateDB(cfg, DefaultStateDBOption())
require.NoError(t, err)
testNonce(sdb, t)
}
func testNonce(sf Factory, t *testing.T) {
// Create two dummy iotex address
a := identityset.Address(28).String()
priKeyA := identityset.PrivateKey(28)
b := identityset.Address(29).String()
sf.AddActionHandlers(account.NewProtocol(config.NewHeightUpgrade(config.Default)))
require.NoError(t, sf.Start(context.Background()))
defer func() {
require.NoError(t, sf.Stop(context.Background()))
}()
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100))
require.NoError(t, err)
tx, err := action.NewTransfer(0, big.NewInt(2), b, nil, uint64(20000), big.NewInt(0))
require.NoError(t, err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetAction(tx).SetNonce(0).SetGasLimit(20000).Build()
selp, err := action.Sign(elp, priKeyA)
require.NoError(t, err)
gasLimit := uint64(1000000)
raCtx := protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
}
_, err = ws.RunAction(raCtx, selp)
require.NoError(t, err)
nonce, err := sf.Nonce(a)
require.NoError(t, err)
require.Equal(t, uint64(0), nonce)
tx, err = action.NewTransfer(1, big.NewInt(2), b, nil, uint64(20000), big.NewInt(0))
require.NoError(t, err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetAction(tx).SetNonce(1).SetGasLimit(20000).Build()
selp, err = action.Sign(elp, priKeyA)
require.NoError(t, err)
_, err = ws.RunAction(raCtx, selp)
require.NoError(t, err)
_ = ws.UpdateBlockLevelInfo(0)
require.NoError(t, sf.Commit(ws))
nonce, err = sf.Nonce(a)
require.NoError(t, err)
require.Equal(t, uint64(1), nonce)
}
func TestLoadStoreHeight(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), triePath)
testTriePath := testTrieFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
statefactory, err := NewFactory(cfg, DefaultTrieOption())
require.NoError(err)
testLoadStoreHeight(statefactory, t)
}
func TestLoadStoreHeightInMem(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), triePath)
testTriePath := testTrieFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
statefactory, err := NewFactory(cfg, InMemTrieOption())
require.NoError(err)
testLoadStoreHeight(statefactory, t)
}
func TestSDBLoadStoreHeight(t *testing.T) {
require := require.New(t)
testDBFile, _ := ioutil.TempFile(os.TempDir(), stateDBPath)
testDBPath := testDBFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testDBPath
db, err := NewStateDB(cfg, DefaultStateDBOption())
require.NoError(err)
testLoadStoreHeight(db, t)
}
func TestSDBLoadStoreHeightInMem(t *testing.T) {
require := require.New(t)
testDBFile, _ := ioutil.TempFile(os.TempDir(), stateDBPath)
testDBPath := testDBFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testDBPath
db, err := NewStateDB(cfg, InMemStateDBOption())
require.NoError(err)
testLoadStoreHeight(db, t)
}
func testLoadStoreHeight(sf Factory, t *testing.T) {
require := require.New(t)
require.NoError(sf.Start(context.Background()))
defer func() {
require.NoError(sf.Stop(context.Background()))
}()
ws, err := sf.NewWorkingSet()
require.NoError(err)
dao := ws.GetDB()
require.NoError(dao.Put(AccountKVNameSpace, []byte(CurrentHeightKey), byteutil.Uint64ToBytes(0)))
height, err := sf.Height()
require.NoError(err)
require.Equal(uint64(0), height)
require.NoError(dao.Put(AccountKVNameSpace, []byte(CurrentHeightKey), byteutil.Uint64ToBytes(10)))
height, err = sf.Height()
require.NoError(err)
require.Equal(uint64(10), height)
}
func TestFactory_RootHashByHeight(t *testing.T) {
cfg := config.Default
ctx := context.Background()
sf, err := NewFactory(cfg, InMemTrieOption())
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
defer func() {
require.NoError(t, sf.Stop(ctx))
}()
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
_, err = ws.RunActions(context.Background(), 1, nil)
require.NoError(t, err)
require.NoError(t, sf.Commit(ws))
rootHash, err := sf.RootHashByHeight(1)
require.NoError(t, err)
require.NotEqual(t, hash.ZeroHash256, rootHash)
}
func TestRunActions(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), triePath)
testTriePath := testTrieFile.Name()
cfg := config.Default
cfg.DB.DbPath = testTriePath
sf, err := NewFactory(cfg, PrecreatedTrieDBOption(db.NewBoltDB(cfg.DB)))
require.NoError(err)
sf.AddActionHandlers(account.NewProtocol(config.NewHeightUpgrade(cfg)))
require.NoError(sf.Start(context.Background()))
defer func() {
require.NoError(sf.Stop(context.Background()))
}()
ws, err := sf.NewWorkingSet()
require.NoError(err)
testRunActions(ws, t)
}
func TestSTXRunActions(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), stateDBPath)
testStateDBPath := testTrieFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testStateDBPath
sdb, err := NewStateDB(cfg, DefaultStateDBOption())
require.NoError(err)
sdb.AddActionHandlers(account.NewProtocol(config.NewHeightUpgrade(cfg)))
require.NoError(sdb.Start(context.Background()))
defer func() {
require.NoError(sdb.Stop(context.Background()))
}()
ws, err := sdb.NewWorkingSet()
require.NoError(err)
testSTXRunActions(ws, t)
}
func testRunActions(ws WorkingSet, t *testing.T) {
require := require.New(t)
require.Equal(uint64(0), ws.Version())
a := identityset.Address(28).String()
priKeyA := identityset.PrivateKey(28)
b := identityset.Address(29).String()
priKeyB := identityset.PrivateKey(29)
_, err := accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100))
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, b, big.NewInt(200))
require.NoError(err)
tx1, err := action.NewTransfer(uint64(1), big.NewInt(10), b, nil, uint64(100000), big.NewInt(0))
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(1).SetAction(tx1).Build()
selp1, err := action.Sign(elp, priKeyA)
require.NoError(err)
tx2, err := action.NewTransfer(uint64(1), big.NewInt(20), a, nil, uint64(100000), big.NewInt(0))
require.NoError(err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetNonce(1).SetAction(tx2).Build()
selp2, err := action.Sign(elp, priKeyB)
require.NoError(err)
gasLimit := uint64(1000000)
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
})
s0 := ws.Snapshot()
rootHash0 := ws.RootHash()
_, err = ws.RunActions(ctx, 1, []action.SealedEnvelope{selp1, selp2})
require.NoError(err)
rootHash1 := ws.UpdateBlockLevelInfo(1)
rootHash2 := ws.RootHash()
require.Equal(rootHash1, rootHash2)
h := ws.Height()
require.Equal(uint64(1), h)
require.NoError(ws.Revert(s0))
require.Equal(rootHash0, ws.RootHash())
_, err = ws.RunActions(ctx, 1, []action.SealedEnvelope{selp2, selp1})
require.NoError(err)
rootHash1 = ws.UpdateBlockLevelInfo(1)
require.NoError(ws.Commit())
rootHash3 := ws.RootHash()
require.Equal(rootHash1, rootHash3)
h = ws.Height()
require.Equal(uint64(1), h)
require.Equal(rootHash3, rootHash2)
}
func testSTXRunActions(ws WorkingSet, t *testing.T) {
require := require.New(t)
require.Equal(uint64(0), ws.Version())
a := identityset.Address(28).String()
priKeyA := identityset.PrivateKey(28)
b := identityset.Address(29).String()
priKeyB := identityset.PrivateKey(29)
_, err := accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100))
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, b, big.NewInt(200))
require.NoError(err)
tx1, err := action.NewTransfer(uint64(1), big.NewInt(10), b, nil, uint64(0), big.NewInt(0))
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(1).SetAction(tx1).Build()
selp1, err := action.Sign(elp, priKeyA)
require.NoError(err)
tx2, err := action.NewTransfer(uint64(1), big.NewInt(20), a, nil, uint64(0), big.NewInt(0))
require.NoError(err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetNonce(1).SetAction(tx2).Build()
selp2, err := action.Sign(elp, priKeyB)
require.NoError(err)
gasLimit := uint64(1000000)
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
})
s0 := ws.Snapshot()
rootHash0 := ws.Digest()
_, err = ws.RunActions(ctx, 1, []action.SealedEnvelope{selp1, selp2})
require.NoError(err)
ws.UpdateBlockLevelInfo(1)
rootHash2 := ws.Digest()
h := ws.Height()
require.Equal(uint64(1), h)
require.NoError(ws.Revert(s0))
require.Equal(rootHash0, ws.Digest())
_, err = ws.RunActions(ctx, 1, []action.SealedEnvelope{selp2, selp1})
require.NoError(err)
ws.UpdateBlockLevelInfo(1)
require.NoError(ws.Commit())
rootHash3 := ws.Digest()
h = ws.Height()
require.Equal(uint64(1), h)
require.NotEqual(rootHash2, rootHash3)
}
func TestCachedBatch(t *testing.T) {
sf, err := NewFactory(config.Default, InMemTrieOption())
require.NoError(t, err)
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
testCachedBatch(ws, t, false)
}
func TestSTXCachedBatch(t *testing.T) {
ws := newStateTX(0, db.NewMemKVStore(), []protocol.ActionHandler{account.NewProtocol(config.NewHeightUpgrade(config.Default))})
testCachedBatch(ws, t, true)
}
func testCachedBatch(ws WorkingSet, t *testing.T, chechCachedBatchHash bool) {
require := require.New(t)
hash1 := ws.Digest()
if chechCachedBatchHash {
require.NotEqual(hash.ZeroHash256, hash1)
}
// test PutState()
hashA := hash.BytesToHash160(identityset.Address(28).Bytes())
accountA := state.EmptyAccount()
accountA.Balance = big.NewInt(70)
err := ws.PutState(hashA, accountA)
require.NoError(err)
hash2 := ws.Digest()
if chechCachedBatchHash {
require.NotEqual(hash1, hash2)
}
// test State()
testAccount := state.EmptyAccount()
err = ws.State(hashA, &testAccount)
require.NoError(err)
require.Equal(accountA, testAccount)
// test DelState()
err = ws.DelState(hashA)
require.NoError(err)
hash3 := ws.Digest()
if chechCachedBatchHash {
require.NotEqual(hash2, hash3)
}
// can't state account "alfa" anymore
err = ws.State(hashA, &testAccount)
require.Error(err)
}
func TestGetDB(t *testing.T) {
sf, err := NewFactory(config.Default, InMemTrieOption())
require.NoError(t, err)
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
testGetDB(ws, t)
}
func TestSTXGetDB(t *testing.T) {
ws := newStateTX(0, db.NewMemKVStore(), []protocol.ActionHandler{account.NewProtocol(config.NewHeightUpgrade(config.Default))})
testGetDB(ws, t)
}
func testGetDB(ws WorkingSet, t *testing.T) {
require := require.New(t)
memDB := db.NewMemKVStore()
require.Equal(uint64(0), ws.Version())
require.NoError(ws.GetDB().Start(context.Background()))
require.Equal(memDB, ws.GetDB())
}
func TestDeleteAndPutSameKey(t *testing.T) {
testDeleteAndPutSameKey := func(t *testing.T, ws WorkingSet) {
key := hash.Hash160b([]byte("test"))
acc := state.Account{
Nonce: 1,
}
require.NoError(t, ws.PutState(key, acc))
require.NoError(t, ws.DelState(key))
require.Equal(t, state.ErrStateNotExist, errors.Cause(ws.State(key, &acc)))
require.Equal(t, state.ErrStateNotExist, errors.Cause(ws.State(hash.Hash160b([]byte("other")), &acc)))
}
t.Run("workingSet", func(t *testing.T) {
sf, err := NewFactory(config.Default, InMemTrieOption())
require.NoError(t, err)
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
testDeleteAndPutSameKey(t, ws)
})
t.Run("stateTx", func(t *testing.T) {
ws := newStateTX(0, db.NewMemKVStore(), nil)
testDeleteAndPutSameKey(t, ws)
})
}
func BenchmarkInMemRunAction(b *testing.B) {
cfg := config.Default
sf, err := NewFactory(cfg, InMemTrieOption())
if err != nil {
b.Fatal(err)
}
benchRunAction(sf, b)
}
func BenchmarkDBRunAction(b *testing.B) {
tp := filepath.Join(os.TempDir(), triePath)
if fileutil.FileExists(tp) && os.RemoveAll(tp) != nil {
b.Error("Fail to remove testDB file")
}
cfg := config.Default
cfg.DB.DbPath = tp
sf, err := NewFactory(cfg, PrecreatedTrieDBOption(db.NewBoltDB(cfg.DB)))
if err != nil {
b.Fatal(err)
}
benchRunAction(sf, b)
if fileutil.FileExists(tp) && os.RemoveAll(tp) != nil {
b.Error("Fail to remove testDB file")
}
}
func BenchmarkSDBInMemRunAction(b *testing.B) {
cfg := config.Default
sdb, err := NewStateDB(cfg, InMemStateDBOption())
if err != nil {
b.Fatal(err)
}
benchRunAction(sdb, b)
}
func BenchmarkSDBRunAction(b *testing.B) {
tp := filepath.Join(os.TempDir(), stateDBPath)
if fileutil.FileExists(tp) && os.RemoveAll(tp) != nil {
b.Error("Fail to remove testDB file")
}
cfg := config.Default
cfg.Chain.TrieDBPath = tp
sdb, err := NewStateDB(cfg, DefaultStateDBOption())
if err != nil {
b.Fatal(err)
}
benchRunAction(sdb, b)
if fileutil.FileExists(tp) && os.RemoveAll(tp) != nil {
b.Error("Fail to remove testDB file")
}
}
func benchRunAction(sf Factory, b *testing.B) {
// set up
accounts := []string{
identityset.Address(28).String(),
identityset.Address(29).String(),
identityset.Address(30).String(),
identityset.Address(31).String(),
identityset.Address(32).String(),
identityset.Address(33).String(),
}
pubKeys := []crypto.PublicKey{
identityset.PrivateKey(28).PublicKey(),
identityset.PrivateKey(29).PublicKey(),
identityset.PrivateKey(30).PublicKey(),
identityset.PrivateKey(31).PublicKey(),
identityset.PrivateKey(32).PublicKey(),
identityset.PrivateKey(33).PublicKey(),
}
nonces := make([]uint64, len(accounts))
sf.AddActionHandlers(account.NewProtocol(config.NewHeightUpgrade(config.Default)))
if err := sf.Start(context.Background()); err != nil {
b.Fatal(err)
}
defer func() {
defer func() {
if err := sf.Stop(context.Background()); err != nil {
b.Fatal(err)
}
}()
}()
ws, err := sf.NewWorkingSet()
if err != nil {
b.Fatal(err)
}
for _, acc := range accounts {
_, err = accountutil.LoadOrCreateAccount(ws, acc, big.NewInt(int64(b.N*100)))
if err != nil {
b.Fatal(err)
}
}
if err := sf.Commit(ws); err != nil {
b.Fatal(err)
}
gasLimit := testutil.TestGasLimit * 100000
for n := 0; n < b.N; n++ {
ws, err := sf.NewWorkingSet()
if err != nil {
b.Fatal(err)
}
// put 500 actions together to run
b.StopTimer()
total := 500
acts := make([]action.SealedEnvelope, 0, total)
for numActs := 0; numActs < total; numActs++ {
senderIdx := rand.Int() % len(accounts)
var chainIDBytes [4]byte
enc.MachineEndian.PutUint32(chainIDBytes[:], 1)
payload := []byte(randStringRunes(20))
receiverAddr, err := address.FromBytes(payload)
if err != nil {
b.Fatal(err)
}
receiver := receiverAddr.String()
nonces[senderIdx] += nonces[senderIdx]
tx, err := action.NewTransfer(nonces[senderIdx], big.NewInt(1), receiver, nil, uint64(0), big.NewInt(0))
if err != nil {
b.Fatal(err)
}
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(nonces[senderIdx]).SetAction(tx).Build()
selp := action.FakeSeal(elp, pubKeys[senderIdx])
acts = append(acts, selp)
}
b.StartTimer()
zctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
})
_, err = ws.RunActions(zctx, uint64(n), acts)
if err != nil {
b.Fatal(err)
}
b.StopTimer()
if err := sf.Commit(ws); err != nil {
b.Fatal(err)
}
b.StartTimer()
}
}
func init() {
rand.Seed(time.Now().UnixNano())
}
| 1 | 19,224 | line is 147 characters (from `lll`) | iotexproject-iotex-core | go |
@@ -65,6 +65,7 @@ confspec = ConfigObj(StringIO(
#possible log levels are DEBUG, IO, DEBUGWARNING, INFO
loggingLevel = string(default="INFO")
showWelcomeDialogAtStartup = boolean(default=true)
+ showSpeechViewerAtStartup = boolean(default=false)
# Speech settings
[speech] | 1 | """Manages NVDA configuration.
"""
import globalVars
import _winreg
import ctypes
import ctypes.wintypes
import os
import sys
from cStringIO import StringIO
import itertools
import contextlib
from collections import OrderedDict
from configobj import ConfigObj, ConfigObjError
from validate import Validator
from logHandler import log
import shlobj
import baseObject
import easeOfAccess
import winKernel
def validateConfig(configObj,validator,validationResult=None,keyList=None):
"""
@deprecated: Add-ons which need this should provide their own implementation.
"""
import warnings
warnings.warn("config.validateConfig deprecated. Callers should provide their own implementation.",
DeprecationWarning, 2)
if validationResult is None:
validationResult=configObj.validate(validator,preserve_errors=True)
if validationResult is True:
return None #No errors
if validationResult is False:
return "Badly formed configuration file"
errorStrings=[]
for k,v in validationResult.iteritems():
if v is True:
continue
newKeyList=list(keyList) if keyList is not None else []
newKeyList.append(k)
if isinstance(v,dict):
errorStrings.extend(validateConfig(configObj[k],validator,v,newKeyList))
else:
#If a key is invalid configObj does not record its default, thus we need to get and set the default manually
defaultValue=validator.get_default_value(configObj.configspec[k])
configObj[k]=defaultValue
if k not in configObj.defaults:
configObj.defaults.append(k)
errorStrings.append("%s: %s, defaulting to %s"%(k,v,defaultValue))
return errorStrings
#: @deprecated: Use C{conf.validator} instead.
val = Validator()
#: The configuration specification
#: @type: ConfigObj
confspec = ConfigObj(StringIO(
"""# NVDA Configuration File
[general]
language = string(default="Windows")
saveConfigurationOnExit = boolean(default=True)
askToExit = boolean(default=true)
playStartAndExitSounds = boolean(default=true)
#possible log levels are DEBUG, IO, DEBUGWARNING, INFO
loggingLevel = string(default="INFO")
showWelcomeDialogAtStartup = boolean(default=true)
# Speech settings
[speech]
# The synthesiser to use
synth = string(default=auto)
symbolLevel = integer(default=100)
trustVoiceLanguage = boolean(default=true)
beepSpeechModePitch = integer(default=10000,min=50,max=11025)
outputDevice = string(default=default)
autoLanguageSwitching = boolean(default=true)
autoDialectSwitching = boolean(default=false)
[[__many__]]
capPitchChange = integer(default=30,min=-100,max=100)
sayCapForCapitals = boolean(default=false)
beepForCapitals = boolean(default=false)
useSpellingFunctionality = boolean(default=true)
# Audio settings
[audio]
audioDuckingMode = integer(default=0)
# Braille settings
[braille]
display = string(default=noBraille)
translationTable = string(default=en-us-comp8.ctb)
inputTable = string(default=en-us-comp8.ctb)
expandAtCursor = boolean(default=true)
showCursor = boolean(default=true)
cursorBlinkRate = integer(default=500,min=0,max=2000)
cursorShape = integer(default=192,min=1,max=255)
messageTimeout = integer(default=4,min=0,max=20)
tetherTo = string(default="focus")
readByParagraph = boolean(default=false)
wordWrap = boolean(default=true)
# Braille display driver settings
[[__many__]]
port = string(default="")
# Presentation settings
[presentation]
reportKeyboardShortcuts = boolean(default=true)
reportObjectPositionInformation = boolean(default=true)
guessObjectPositionInformationWhenUnavailable = boolean(default=false)
reportTooltips = boolean(default=false)
reportHelpBalloons = boolean(default=true)
reportObjectDescriptions = boolean(default=True)
reportDynamicContentChanges = boolean(default=True)
[[progressBarUpdates]]
reportBackgroundProgressBars = boolean(default=false)
#output modes are beep, speak, both, or off
progressBarOutputMode = string(default="beep")
speechPercentageInterval = integer(default=10)
beepPercentageInterval = integer(default=1)
beepMinHZ = integer(default=110)
[mouse]
enableMouseTracking = boolean(default=True) #must be true for any of the other settings to work
mouseTextUnit = string(default="paragraph")
reportObjectRoleOnMouseEnter = boolean(default=False)
audioCoordinatesOnMouseMove = boolean(default=False)
audioCoordinates_detectBrightness = boolean(default=False)
audioCoordinates_blurFactor = integer(default=3)
audioCoordinates_minVolume = float(default=0.1)
audioCoordinates_maxVolume = float(default=1.0)
audioCoordinates_minPitch = integer(default=220)
audioCoordinates_maxPitch = integer(default=880)
reportMouseShapeChanges = boolean(default=false)
#Keyboard settings
[keyboard]
useCapsLockAsNVDAModifierKey = boolean(default=false)
useNumpadInsertAsNVDAModifierKey = boolean(default=true)
useExtendedInsertAsNVDAModifierKey = boolean(default=true)
keyboardLayout = string(default="desktop")
speakTypedCharacters = boolean(default=true)
speakTypedWords = boolean(default=false)
beepForLowercaseWithCapslock = boolean(default=true)
speakCommandKeys = boolean(default=false)
speechInterruptForCharacters = boolean(default=true)
speechInterruptForEnter = boolean(default=true)
allowSkimReadingInSayAll = boolean(default=False)
alertForSpellingErrors = boolean(default=True)
handleInjectedKeys= boolean(default=true)
[virtualBuffers]
maxLineLength = integer(default=100)
linesPerPage = integer(default=25)
useScreenLayout = boolean(default=True)
autoPassThroughOnFocusChange = boolean(default=true)
autoPassThroughOnCaretMove = boolean(default=false)
passThroughAudioIndication = boolean(default=true)
autoSayAllOnPageLoad = boolean(default=true)
trapNonCommandGestures = boolean(default=true)
#Settings for document reading (such as MS Word and wordpad)
[documentFormatting]
#These settings affect what information is reported when you navigate to text where the formatting or placement has changed
detectFormatAfterCursor = boolean(default=false)
reportFontName = boolean(default=false)
reportFontSize = boolean(default=false)
reportFontAttributes = boolean(default=false)
reportRevisions = boolean(default=true)
reportEmphasis = boolean(default=false)
reportColor = boolean(default=False)
reportAlignment = boolean(default=false)
reportLineSpacing = boolean(default=false)
reportStyle = boolean(default=false)
reportSpellingErrors = boolean(default=true)
reportPage = boolean(default=true)
reportLineNumber = boolean(default=False)
reportLineIndentation = boolean(default=False)
reportParagraphIndentation = boolean(default=False)
reportTables = boolean(default=true)
includeLayoutTables = boolean(default=False)
reportTableHeaders = boolean(default=True)
reportTableCellCoords = boolean(default=True)
reportLinks = boolean(default=true)
reportComments = boolean(default=true)
reportLists = boolean(default=true)
reportHeadings = boolean(default=true)
reportBlockQuotes = boolean(default=true)
reportLandmarks = boolean(default=true)
reportFrames = boolean(default=true)
reportClickable = boolean(default=true)
[reviewCursor]
simpleReviewMode = boolean(default=True)
followFocus = boolean(default=True)
followCaret = boolean(default=True)
followMouse = boolean(default=False)
[UIA]
minWindowsVersion = float(default=6.1)
enabled = boolean(default=true)
[update]
autoCheck = boolean(default=true)
[inputComposition]
autoReportAllCandidates = boolean(default=True)
announceSelectedCandidate = boolean(default=True)
alwaysIncludeShortCharacterDescriptionInCandidateName = boolean(default=True)
reportReadingStringChanges = boolean(default=True)
reportCompositionStringChanges = boolean(default=True)
[debugLog]
hwIo = boolean(default=false)
audioDucking = boolean(default=false)
[upgrade]
newLaptopKeyboardLayout = boolean(default=false)
"""
), list_values=False, encoding="UTF-8")
confspec.newlines = "\r\n"
#: The active configuration, C{None} if it has not yet been loaded.
#: @type: ConfigObj
conf = None
def initialize():
global conf
conf = ConfigManager()
def save():
"""
@deprecated: Use C{conf.save} instead.
"""
import warnings
warnings.warn("config.save deprecated. Use config.conf.save instead.",
DeprecationWarning, 2)
conf.save()
def saveOnExit():
"""Save the configuration if configured to save on exit.
This should only be called if NVDA is about to exit.
Errors are ignored.
"""
if conf["general"]["saveConfigurationOnExit"]:
try:
conf.save()
except:
pass
def isInstalledCopy():
"""Checks to see if this running copy of NVDA is installed on the system"""
try:
k=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\NVDA")
instDir=_winreg.QueryValueEx(k,"UninstallDirectory")[0]
except WindowsError:
return False
_winreg.CloseKey(k)
try:
return os.stat(instDir)==os.stat(os.getcwdu())
except WindowsError:
return False
def getInstalledUserConfigPath():
try:
return os.path.join(shlobj.SHGetFolderPath(0, shlobj.CSIDL_APPDATA), "nvda")
except WindowsError:
return None
def getUserDefaultConfigPath(useInstalledPathIfExists=False):
"""Get the default path for the user configuration directory.
This is the default path and doesn't reflect overriding from the command line,
which includes temporary copies.
Most callers will want the C{globalVars.appArgs.configPath variable} instead.
"""
installedUserConfigPath=getInstalledUserConfigPath()
if installedUserConfigPath and (isInstalledCopy() or (useInstalledPathIfExists and os.path.isdir(installedUserConfigPath))):
return installedUserConfigPath
return u'.\\userConfig\\'
def getSystemConfigPath():
if isInstalledCopy():
try:
return os.path.join(shlobj.SHGetFolderPath(0, shlobj.CSIDL_COMMON_APPDATA), "nvda")
except WindowsError:
pass
return None
def initConfigPath(configPath=None):
"""
Creates the current configuration path if it doesn't exist. Also makes sure that various sub directories also exist.
@param configPath: an optional path which should be used instead (only useful when being called from outside of NVDA)
@type configPath: basestring
"""
if not configPath:
configPath=globalVars.appArgs.configPath
if not os.path.isdir(configPath):
os.makedirs(configPath)
for subdir in ("addons", "appModules","brailleDisplayDrivers","speechDicts","synthDrivers","globalPlugins","profiles"):
subdir=os.path.join(configPath,subdir)
if not os.path.isdir(subdir):
os.makedirs(subdir)
RUN_REGKEY = ur"SOFTWARE\Microsoft\Windows\CurrentVersion\Run"
def getStartAfterLogon():
if (easeOfAccess.isSupported and easeOfAccess.canConfigTerminateOnDesktopSwitch
and easeOfAccess.willAutoStart(_winreg.HKEY_CURRENT_USER)):
return True
try:
k = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, RUN_REGKEY)
val = _winreg.QueryValueEx(k, u"nvda")[0]
return os.stat(val) == os.stat(sys.argv[0])
except (WindowsError, OSError):
return False
def setStartAfterLogon(enable):
if getStartAfterLogon() == enable:
return
if easeOfAccess.isSupported and easeOfAccess.canConfigTerminateOnDesktopSwitch:
easeOfAccess.setAutoStart(_winreg.HKEY_CURRENT_USER, enable)
if enable:
return
# We're disabling, so ensure the run key is cleared,
# as it might have been set by an old version.
run = False
else:
run = enable
k = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, RUN_REGKEY, 0, _winreg.KEY_WRITE)
if run:
_winreg.SetValueEx(k, u"nvda", None, _winreg.REG_SZ, sys.argv[0])
else:
try:
_winreg.DeleteValue(k, u"nvda")
except WindowsError:
pass
SERVICE_FILENAME = u"nvda_service.exe"
def isServiceInstalled():
if not os.path.isfile(SERVICE_FILENAME):
return False
try:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, ur"SYSTEM\CurrentControlSet\Services\nvda")
val = _winreg.QueryValueEx(k, u"ImagePath")[0].replace(u'"', u'')
return os.stat(val) == os.stat(SERVICE_FILENAME)
except (WindowsError, OSError):
return False
def canStartOnSecureScreens():
return isInstalledCopy() and (easeOfAccess.isSupported or isServiceInstalled())
def execElevated(path, params=None, wait=False,handleAlreadyElevated=False):
import subprocess
import shellapi
import winUser
if params is not None:
params = subprocess.list2cmdline(params)
sei = shellapi.SHELLEXECUTEINFO(lpFile=os.path.abspath(path), lpParameters=params, nShow=winUser.SW_HIDE)
#IsUserAnAdmin is apparently deprecated so may not work above Windows 8
if not handleAlreadyElevated or not ctypes.windll.shell32.IsUserAnAdmin():
sei.lpVerb=u"runas"
if wait:
sei.fMask = shellapi.SEE_MASK_NOCLOSEPROCESS
shellapi.ShellExecuteEx(sei)
if wait:
try:
h=ctypes.wintypes.HANDLE(sei.hProcess)
msg=ctypes.wintypes.MSG()
while ctypes.windll.user32.MsgWaitForMultipleObjects(1,ctypes.byref(h),False,-1,255)==1:
while ctypes.windll.user32.PeekMessageW(ctypes.byref(msg),None,0,0,1):
ctypes.windll.user32.TranslateMessage(ctypes.byref(msg))
ctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))
return winKernel.GetExitCodeProcess(sei.hProcess)
finally:
winKernel.closeHandle(sei.hProcess)
SLAVE_FILENAME = u"nvda_slave.exe"
NVDA_REGKEY = ur"SOFTWARE\NVDA"
def getStartOnLogonScreen():
if easeOfAccess.isSupported and easeOfAccess.willAutoStart(_winreg.HKEY_LOCAL_MACHINE):
return True
try:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, NVDA_REGKEY)
return bool(_winreg.QueryValueEx(k, u"startOnLogonScreen")[0])
except WindowsError:
return False
def _setStartOnLogonScreen(enable):
if easeOfAccess.isSupported:
# The installer will have migrated service config to EoA if appropriate,
# so we only need to deal with EoA here.
easeOfAccess.setAutoStart(_winreg.HKEY_LOCAL_MACHINE, enable)
else:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, NVDA_REGKEY, 0, _winreg.KEY_WRITE)
_winreg.SetValueEx(k, u"startOnLogonScreen", None, _winreg.REG_DWORD, int(enable))
def setSystemConfigToCurrentConfig():
fromPath=os.path.abspath(globalVars.appArgs.configPath)
if ctypes.windll.shell32.IsUserAnAdmin():
_setSystemConfig(fromPath)
else:
res=execElevated(SLAVE_FILENAME, (u"setNvdaSystemConfig", fromPath), wait=True)
if res==2:
raise installer.RetriableFailure
elif res!=0:
raise RuntimeError("Slave failure")
def _setSystemConfig(fromPath):
import installer
toPath=os.path.join(sys.prefix.decode('mbcs'),'systemConfig')
if os.path.isdir(toPath):
installer.tryRemoveFile(toPath)
for curSourceDir,subDirs,files in os.walk(fromPath):
if curSourceDir==fromPath:
curDestDir=toPath
else:
curDestDir=os.path.join(toPath,os.path.relpath(curSourceDir,fromPath))
if not os.path.isdir(curDestDir):
os.makedirs(curDestDir)
for f in files:
sourceFilePath=os.path.join(curSourceDir,f)
destFilePath=os.path.join(curDestDir,f)
installer.tryCopyFile(sourceFilePath,destFilePath)
def setStartOnLogonScreen(enable):
if getStartOnLogonScreen() == enable:
return
try:
# Try setting it directly.
_setStartOnLogonScreen(enable)
except WindowsError:
# We probably don't have admin privs, so we need to elevate to do this using the slave.
if execElevated(SLAVE_FILENAME, (u"config_setStartOnLogonScreen", u"%d" % enable), wait=True) != 0:
raise RuntimeError("Slave failed to set startOnLogonScreen")
def getConfigDirs(subpath=None):
"""Retrieve all directories that should be used when searching for configuration.
IF C{subpath} is provided, it will be added to each directory returned.
@param subpath: The path to be added to each directory, C{None} for none.
@type subpath: str
@return: The configuration directories in the order in which they should be searched.
@rtype: list of str
"""
return [os.path.join(dir, subpath) if subpath else dir
for dir in (globalVars.appArgs.configPath,)
]
def addConfigDirsToPythonPackagePath(module, subdir=None):
"""Add the configuration directories to the module search path (__path__) of a Python package.
C{subdir} is added to each configuration directory. It defaults to the name of the Python package.
@param module: The root module of the package.
@type module: module
@param subdir: The subdirectory to be used, C{None} for the name of C{module}.
@type subdir: str
"""
if globalVars.appArgs.disableAddons:
return
if not subdir:
subdir = module.__name__
# Python 2.x doesn't properly handle unicode import paths, so convert them.
dirs = [dir.encode("mbcs") for dir in getConfigDirs(subdir)]
dirs.extend(module.__path__ )
module.__path__ = dirs
# FIXME: this should not be coupled to the config module....
import addonHandler
for addon in addonHandler.getRunningAddons():
addon.addToPackagePath(module)
class ConfigManager(object):
"""Manages and provides access to configuration.
In addition to the base configuration, there can be multiple active configuration profiles.
Settings in more recently activated profiles take precedence,
with the base configuration being consulted last.
This allows a profile to override settings in profiles activated earlier and the base configuration.
A profile need only include a subset of the available settings.
Changed settings are written to the most recently activated profile.
"""
#: Sections that only apply to the base configuration;
#: i.e. they cannot be overridden in profiles.
BASE_ONLY_SECTIONS = {"general", "update", "upgrade"}
def __init__(self):
self.spec = confspec
#: All loaded profiles by name.
self._profileCache = {}
#: The active profiles.
self.profiles = []
#: Whether profile triggers are enabled (read-only).
#: @type: bool
self.profileTriggersEnabled = True
self.validator = val
self.rootSection = None
self._shouldHandleProfileSwitch = True
self._pendingHandleProfileSwitch = False
self._suspendedTriggers = None
self._initBaseConf()
#: Maps triggers to profiles.
self.triggersToProfiles = None
self._loadProfileTriggers()
#: The names of all profiles that have been modified since they were last saved.
self._dirtyProfiles = set()
def _handleProfileSwitch(self):
if not self._shouldHandleProfileSwitch:
self._pendingHandleProfileSwitch = True
return
init = self.rootSection is None
# Reset the cache.
self.rootSection = AggregatedSection(self, (), self.spec, self.profiles)
if init:
# We're still initialising, so don't notify anyone about this change.
return
import synthDriverHandler
synthDriverHandler.handleConfigProfileSwitch()
import braille
braille.handler.handleConfigProfileSwitch()
import audioDucking
audioDucking.handleConfigProfileSwitch()
def _initBaseConf(self, factoryDefaults=False):
fn = os.path.join(globalVars.appArgs.configPath, "nvda.ini")
if factoryDefaults:
profile = ConfigObj(None, indent_type="\t", encoding="UTF-8")
profile.filename = fn
else:
try:
profile = ConfigObj(fn, indent_type="\t", encoding="UTF-8")
self.baseConfigError = False
except:
log.error("Error loading base configuration", exc_info=True)
self.baseConfigError = True
return self._initBaseConf(factoryDefaults=True)
# Python converts \r\n to \n when reading files in Windows, so ConfigObj can't determine the true line ending.
profile.newlines = "\r\n"
for key in self.BASE_ONLY_SECTIONS:
# These sections are returned directly from the base config, so validate them here.
try:
sect = profile[key]
except KeyError:
profile[key] = {}
# ConfigObj mutates this into a configobj.Section.
sect = profile[key]
sect.configspec = self.spec[key]
profile.validate(self.validator, section=sect)
self._profileCache[None] = profile
self.profiles.append(profile)
self._handleProfileSwitch()
def __getitem__(self, key):
if key in self.BASE_ONLY_SECTIONS:
# Return these directly from the base configuration.
return self.profiles[0][key]
return self.rootSection[key]
def __contains__(self, key):
return key in self.rootSection
def get(self, key, default=None):
return self.rootSection.get(key, default)
def __setitem__(self, key, val):
self.rootSection[key] = val
def listProfiles(self):
for name in os.listdir(os.path.join(globalVars.appArgs.configPath, "profiles")):
name, ext = os.path.splitext(name)
if ext == ".ini":
yield name
def _getProfileFn(self, name):
return os.path.join(globalVars.appArgs.configPath, "profiles", name + ".ini")
def _getProfile(self, name, load=True):
try:
return self._profileCache[name]
except KeyError:
if not load:
raise KeyError(name)
# Load the profile.
fn = self._getProfileFn(name)
profile = ConfigObj(fn, indent_type="\t", encoding="UTF-8", file_error=True)
# Python converts \r\n to \n when reading files in Windows, so ConfigObj can't determine the true line ending.
profile.newlines = "\r\n"
profile.name = name
profile.manual = False
profile.triggered = False
self._profileCache[name] = profile
return profile
def getProfile(self, name):
"""Get a profile given its name.
This is useful for checking whether a profile has been manually activated or triggered.
@param name: The name of the profile.
@type name: basestring
@return: The profile object.
@raise KeyError: If the profile is not loaded.
"""
return self._getProfile(name, load=False)
def manualActivateProfile(self, name):
"""Manually activate a profile.
Only one profile can be manually active at a time.
If another profile was manually activated, deactivate it first.
If C{name} is C{None}, a profile will not be activated.
@param name: The name of the profile or C{None} for no profile.
@type name: basestring
"""
if len(self.profiles) > 1:
profile = self.profiles[-1]
if profile.manual:
del self.profiles[-1]
profile.manual = False
if name:
profile = self._getProfile(name)
profile.manual = True
self.profiles.append(profile)
self._handleProfileSwitch()
def _markWriteProfileDirty(self):
if len(self.profiles) == 1:
# There's nothing other than the base config, which is always saved anyway.
return
self._dirtyProfiles.add(self.profiles[-1].name)
def save(self):
"""Save all modified profiles and the base configuration to disk.
"""
if globalVars.appArgs.secure:
# Never save the config if running securely.
return
try:
self.profiles[0].write()
log.info("Base configuration saved")
for name in self._dirtyProfiles:
self._profileCache[name].write()
log.info("Saved configuration profile %s" % name)
self._dirtyProfiles.clear()
except Exception as e:
log.warning("Error saving configuration; probably read only file system")
log.debugWarning("", exc_info=True)
raise e
def reset(self, factoryDefaults=False):
"""Reset the configuration to saved settings or factory defaults.
@param factoryDefaults: C{True} to reset to factory defaults, C{False} to reset to saved configuration.
@type factoryDefaults: bool
"""
self.profiles = []
self._profileCache.clear()
# Signal that we're initialising.
self.rootSection = None
self._initBaseConf(factoryDefaults=factoryDefaults)
def createProfile(self, name):
"""Create a profile.
@param name: The name of the profile ot create.
@type name: basestring
@raise ValueError: If a profile with this name already exists.
"""
if globalVars.appArgs.secure:
return
fn = self._getProfileFn(name)
if os.path.isfile(fn):
raise ValueError("A profile with the same name already exists: %s" % name)
# Just create an empty file to make sure we can.
file(fn, "w")
def deleteProfile(self, name):
"""Delete a profile.
@param name: The name of the profile to delete.
@type name: basestring
@raise LookupError: If the profile doesn't exist.
"""
if globalVars.appArgs.secure:
return
fn = self._getProfileFn(name)
if not os.path.isfile(fn):
raise LookupError("No such profile: %s" % name)
os.remove(fn)
try:
del self._profileCache[name]
except KeyError:
pass
# Remove any triggers associated with this profile.
allTriggers = self.triggersToProfiles
# You can't delete from a dict while iterating through it.
delTrigs = [trigSpec for trigSpec, trigProfile in allTriggers.iteritems()
if trigProfile == name]
if delTrigs:
for trigSpec in delTrigs:
del allTriggers[trigSpec]
self.saveProfileTriggers()
# Check if this profile was active.
delProfile = None
for index in xrange(len(self.profiles) - 1, -1, -1):
profile = self.profiles[index]
if profile.name == name:
# Deactivate it.
del self.profiles[index]
delProfile = profile
if not delProfile:
return
self._handleProfileSwitch()
if self._suspendedTriggers:
# Remove any suspended triggers referring to this profile.
for trigger in self._suspendedTriggers.keys():
if trigger._profile == delProfile:
del self._suspendedTriggers[trigger]
def renameProfile(self, oldName, newName):
"""Rename a profile.
@param oldName: The current name of the profile.
@type oldName: basestring
@param newName: The new name for the profile.
@type newName: basestring
@raise LookupError: If the profile doesn't exist.
@raise ValueError: If a profile with the new name already exists.
"""
if globalVars.appArgs.secure:
return
if newName == oldName:
return
oldFn = self._getProfileFn(oldName)
newFn = self._getProfileFn(newName)
if not os.path.isfile(oldFn):
raise LookupError("No such profile: %s" % oldName)
# Windows file names are case insensitive,
# so only test for file existence if the names don't match case insensitively.
if oldName.lower() != newName.lower() and os.path.isfile(newFn):
raise ValueError("A profile with the same name already exists: %s" % newName)
os.rename(oldFn, newFn)
# Update any associated triggers.
allTriggers = self.triggersToProfiles
saveTrigs = False
for trigSpec, trigProfile in allTriggers.iteritems():
if trigProfile == oldName:
allTriggers[trigSpec] = newName
saveTrigs = True
if saveTrigs:
self.saveProfileTriggers()
try:
profile = self._profileCache.pop(oldName)
except KeyError:
# The profile hasn't been loaded, so there's nothing more to do.
return
profile.name = newName
self._profileCache[newName] = profile
try:
self._dirtyProfiles.remove(oldName)
except KeyError:
# The profile wasn't dirty.
return
self._dirtyProfiles.add(newName)
def _triggerProfileEnter(self, trigger):
"""Called by L{ProfileTrigger.enter}}}.
"""
if not self.profileTriggersEnabled:
return
if self._suspendedTriggers is not None:
self._suspendedTriggers[trigger] = "enter"
return
try:
profile = trigger._profile = self._getProfile(trigger.profileName)
except:
trigger._profile = None
raise
profile.triggered = True
if len(self.profiles) > 1 and self.profiles[-1].manual:
# There's a manually activated profile.
# Manually activated profiles must be at the top of the stack, so insert this one below.
self.profiles.insert(-1, profile)
else:
self.profiles.append(profile)
self._handleProfileSwitch()
def _triggerProfileExit(self, trigger):
"""Called by L{ProfileTrigger.exit}}}.
"""
if not self.profileTriggersEnabled:
return
if self._suspendedTriggers is not None:
if trigger in self._suspendedTriggers:
# This trigger was entered and is now being exited.
# These cancel each other out.
del self._suspendedTriggers[trigger]
else:
self._suspendedTriggers[trigger] = "exit"
return
profile = trigger._profile
if profile is None:
return
profile.triggered = False
try:
self.profiles.remove(profile)
except ValueError:
# This is probably due to the user resetting the configuration.
log.debugWarning("Profile not active when exiting trigger")
return
self._handleProfileSwitch()
@contextlib.contextmanager
def atomicProfileSwitch(self):
"""Indicate that multiple profile switches should be treated as one.
This is useful when multiple triggers may be exited/entered at once;
e.g. when switching applications.
While multiple switches aren't harmful, they might take longer;
e.g. unnecessarily switching speech synthesizers or braille displays.
This is a context manager to be used with the C{with} statement.
"""
self._shouldHandleProfileSwitch = False
try:
yield
finally:
self._shouldHandleProfileSwitch = True
if self._pendingHandleProfileSwitch:
self._handleProfileSwitch()
self._pendingHandleProfileSwitch = False
def suspendProfileTriggers(self):
"""Suspend handling of profile triggers.
Any triggers that currently apply will continue to apply.
Subsequent enters or exits will not apply until triggers are resumed.
@see: L{resumeTriggers}
"""
if self._suspendedTriggers is not None:
return
self._suspendedTriggers = OrderedDict()
def resumeProfileTriggers(self):
"""Resume handling of profile triggers after previous suspension.
Any trigger enters or exits that occurred while triggers were suspended will be applied.
Trigger handling will then return to normal.
@see: L{suspendTriggers}
"""
if self._suspendedTriggers is None:
return
triggers = self._suspendedTriggers
self._suspendedTriggers = None
with self.atomicProfileSwitch():
for trigger, action in triggers.iteritems():
trigger.enter() if action == "enter" else trigger.exit()
def disableProfileTriggers(self):
"""Temporarily disable all profile triggers.
Any triggered profiles will be deactivated and subsequent triggers will not apply.
Call L{enableTriggers} to re-enable triggers.
"""
if not self.profileTriggersEnabled:
return
self.profileTriggersEnabled = False
for profile in self.profiles[1:]:
profile.triggered = False
if len(self.profiles) > 1 and self.profiles[-1].manual:
del self.profiles[1:-1]
else:
del self.profiles[1:]
self._suspendedTriggers = None
self._handleProfileSwitch()
def enableProfileTriggers(self):
"""Re-enable profile triggers after they were previously disabled.
"""
self.profileTriggersEnabled = True
def _loadProfileTriggers(self):
fn = os.path.join(globalVars.appArgs.configPath, "profileTriggers.ini")
try:
cobj = ConfigObj(fn, indent_type="\t", encoding="UTF-8")
except:
log.error("Error loading profile triggers", exc_info=True)
cobj = ConfigObj(None, indent_type="\t", encoding="UTF-8")
cobj.filename = fn
# Python converts \r\n to \n when reading files in Windows, so ConfigObj can't determine the true line ending.
cobj.newlines = "\r\n"
try:
self.triggersToProfiles = cobj["triggersToProfiles"]
except KeyError:
cobj["triggersToProfiles"] = {}
# ConfigObj will have mutated this into a configobj.Section.
self.triggersToProfiles = cobj["triggersToProfiles"]
def saveProfileTriggers(self):
"""Save profile trigger information to disk.
This should be called whenever L{profilesToTriggers} is modified.
"""
if globalVars.appArgs.secure:
# Never save if running securely.
return
self.triggersToProfiles.parent.write()
log.info("Profile triggers saved")
class AggregatedSection(object):
"""A view of a section of configuration which aggregates settings from all active profiles.
"""
def __init__(self, manager, path, spec, profiles):
self.manager = manager
self.path = path
self._spec = spec
#: The relevant section in all of the profiles.
self.profiles = profiles
self._cache = {}
def __getitem__(self, key):
# Try the cache first.
try:
val = self._cache[key]
except KeyError:
pass
else:
if val is KeyError:
# We know there's no such setting.
raise KeyError(key)
return val
spec = self._spec.get(key)
foundSection = False
if isinstance(spec, dict):
foundSection = True
# Walk through the profiles looking for the key.
# If it's a section, collect that section from all profiles.
subProfiles = []
for profile in reversed(self.profiles):
try:
val = profile[key]
except (KeyError, TypeError):
# Indicate that this key doesn't exist in this profile.
subProfiles.append(None)
continue
if isinstance(val, dict):
foundSection = True
subProfiles.append(val)
else:
# This is a setting.
return self._cacheLeaf(key, spec, val)
subProfiles.reverse()
if not foundSection and spec:
# This might have a default.
try:
val = self.manager.validator.get_default_value(spec)
except KeyError:
pass
else:
self._cache[key] = val
return val
if not foundSection:
# The key doesn't exist, so cache this fact.
self._cache[key] = KeyError
raise KeyError(key)
if spec is None:
# Create this section in the config spec.
self._spec[key] = {}
# ConfigObj might have mutated this into a configobj.Section.
spec = self._spec[key]
sect = self._cache[key] = AggregatedSection(self.manager, self.path + (key,), spec, subProfiles)
return sect
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def isSet(self, key):
"""Check whether a given key has been explicitly set.
This is sometimes useful because it can return C{False} even if there is a default for the key.
@return: C{True} if the key has been explicitly set, C{False} if not.
@rtype: bool
"""
for profile in self.profiles:
if not profile:
continue
if key in profile:
return True
return False
def _cacheLeaf(self, key, spec, val):
if spec:
# Validate and convert the value.
val = self.manager.validator.check(spec, val)
self._cache[key] = val
return val
def iteritems(self):
keys = set()
# Start with the cached items.
for key, val in self._cache.iteritems():
keys.add(key)
if val is not KeyError:
yield key, val
# Walk through the profiles and spec looking for items not yet cached.
for profile in itertools.chain(reversed(self.profiles), (self._spec,)):
if not profile:
continue
for key in profile:
if key in keys:
continue
keys.add(key)
# Use __getitem__ so caching, AggregatedSections, etc. are handled.
try:
yield key, self[key]
except KeyError:
# This could happen if the item is in the spec but there's no default.
pass
def copy(self):
return dict(self.iteritems())
def __setitem__(self, key, val):
spec = self._spec.get(key) if self.spec else None
if isinstance(spec, dict) and not isinstance(val, dict):
raise ValueError("Value must be a section")
if isinstance(spec, dict) or isinstance(val, dict):
# The value is a section.
# Update the profile.
updateSect = self._getUpdateSection()
updateSect[key] = val
self.manager._markWriteProfileDirty()
# ConfigObj will have mutated this into a configobj.Section.
val = updateSect[key]
cache = self._cache.get(key)
if cache and cache is not KeyError:
# An AggregatedSection has already been cached, so update it.
cache = self._cache[key]
cache.profiles[-1] = val
cache._cache.clear()
elif cache is KeyError:
# This key now exists, so remove the cached non-existence.
del self._cache[key]
# If an AggregatedSection isn't already cached,
# An appropriate AggregatedSection will be created the next time this section is fetched.
return
if spec:
# Validate and convert the value.
val = self.manager.validator.check(spec, val)
try:
curVal = self[key]
except KeyError:
pass
else:
if val == curVal:
# The value isn't different, so there's nothing to do.
return
# Set this value in the most recently activated profile.
self._getUpdateSection()[key] = val
self.manager._markWriteProfileDirty()
self._cache[key] = val
def _getUpdateSection(self):
profile = self.profiles[-1]
if profile is not None:
# This section already exists in the profile.
return profile
section = self.manager.rootSection
profile = section.profiles[-1]
for part in self.path:
parentProfile = profile
section = section[part]
profile = section.profiles[-1]
if profile is None:
# This section doesn't exist in the profile yet.
# Create it and update the AggregatedSection.
parentProfile[part] = {}
# ConfigObj might have mutated this into a configobj.Section.
profile = section.profiles[-1] = parentProfile[part]
return profile
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, val):
# This section is being replaced.
# Clear it and replace the content so it remains linked to the main spec.
self._spec.clear()
self._spec.update(val)
class ProfileTrigger(object):
"""A trigger for automatic activation/deactivation of a configuration profile.
The user can associate a profile with a trigger.
When the trigger applies, the associated profile is activated.
When the trigger no longer applies, the profile is deactivated.
L{spec} is a string used to search for this trigger and must be implemented.
To signal that this trigger applies, call L{enter}.
To signal that it no longer applies, call L{exit}.
Alternatively, you can use this object as a context manager via the with statement;
i.e. this trigger will apply only inside the with block.
"""
@baseObject.Getter
def spec(self):
"""The trigger specification.
This is a string used to search for this trigger in the user's configuration.
@rtype: basestring
"""
raise NotImplementedError
def enter(self):
"""Signal that this trigger applies.
The associated profile (if any) will be activated.
"""
try:
self.profileName = conf.triggersToProfiles[self.spec]
except KeyError:
self.profileName = None
return
try:
conf._triggerProfileEnter(self)
except:
log.error("Error entering trigger %s, profile %s"
% (self.spec, self.profileName), exc_info=True)
__enter__ = enter
def exit(self):
"""Signal that this trigger no longer applies.
The associated profile (if any) will be deactivated.
"""
if not self.profileName:
return
try:
conf._triggerProfileExit(self)
except:
log.error("Error exiting trigger %s, profile %s"
% (self.spec, self.profileName), exc_info=True)
def __exit__(self, excType, excVal, traceback):
self.exit()
TokenUIAccess = 26
def hasUiAccess():
token = ctypes.wintypes.HANDLE()
ctypes.windll.advapi32.OpenProcessToken(ctypes.windll.kernel32.GetCurrentProcess(),
winKernel.MAXIMUM_ALLOWED, ctypes.byref(token))
try:
val = ctypes.wintypes.DWORD()
ctypes.windll.advapi32.GetTokenInformation(token, TokenUIAccess,
ctypes.byref(val), ctypes.sizeof(ctypes.wintypes.DWORD),
ctypes.byref(ctypes.wintypes.DWORD()))
return bool(val.value)
finally:
ctypes.windll.kernel32.CloseHandle(token)
| 1 | 18,201 | Perhaps move this option into a separate speechViewer section, as we are planning to also save position information as well. | nvaccess-nvda | py |
@@ -22,4 +22,9 @@ interface SegmentProductsQueryInterface
* @return array
*/
public function getProductsByType(SegmentId $segmentId, string $type): array;
+
+ /**
+ * @return array
+ */
+ public function getAllEditedProducts(SegmentId $segmentId, ?\DateTime $dateTime = null): array;
} | 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Segment\Domain\Query;
use Ergonode\SharedKernel\Domain\Aggregate\SegmentId;
interface SegmentProductsQueryInterface
{
/**
* @return string[]
*/
public function getProducts(SegmentId $segmentId): array;
/**
* @return array
*/
public function getProductsByType(SegmentId $segmentId, string $type): array;
}
| 1 | 9,742 | This is not a segment responsibility this query should be in exporter-File module | ergonode-backend | php |
@@ -174,6 +174,8 @@ class ClangTidy(analyzer_base.SourceAnalyzer):
analyzer_cmd.append(self.source_file)
+ analyzer_cmd.extend(['--export-fixes', result_handler.fixit_file])
+
analyzer_cmd.append("--")
analyzer_cmd.append('-Qunused-arguments') | 1 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
"""
import json
import os
import re
import shlex
import subprocess
from codechecker_common.logger import get_logger
from codechecker_analyzer import host_check
from codechecker_analyzer import env
from .. import analyzer_base
from ..config_handler import CheckerState
from ..flag import has_flag
from ..flag import prepend_all
from ..clangsa.analyzer import ClangSA
from . import config_handler
from . import result_handler
LOG = get_logger('analyzer')
def parse_checkers(tidy_output):
"""
Parse clang tidy checkers list.
Skip clang static analyzer checkers.
Store them to checkers.
"""
checkers = []
pattern = re.compile(r'^\S+$')
for line in tidy_output.splitlines():
line = line.strip()
if line.startswith('Enabled checks:') or line == '':
continue
elif line.startswith('clang-analyzer-'):
continue
match = pattern.match(line)
if match:
checkers.append((match.group(0), ''))
return checkers
def parse_checker_config(config_dump):
"""
Return the parsed clang-tidy config options as a list of
(flag, default_value) tuples.
config_dump -- clang-tidy config options YAML dump.
"""
reg = re.compile(r'key:\s+(\S+)\s+value:\s+([^\n]+)')
return re.findall(reg, config_dump)
def parse_analyzer_config(config_dump):
"""
Return the parsed clang-tidy analyzer options as a list of
(flag, default_value) tuples.
config_dump -- clang-tidy config options YAML dump.
"""
return re.findall(r'^(\S+):\s+(\S+)$', config_dump, re.MULTILINE)
class ClangTidy(analyzer_base.SourceAnalyzer):
"""
Constructs the clang tidy analyzer commands.
"""
ANALYZER_NAME = 'clang-tidy'
def add_checker_config(self, checker_cfg):
LOG.error("Not implemented yet")
@classmethod
def get_analyzer_checkers(cls, cfg_handler, environ):
"""
Return the list of the supported checkers.
"""
try:
result = subprocess.check_output(
[cfg_handler.analyzer_binary, "-list-checks", "-checks=*"],
env=environ,
universal_newlines=True,
encoding="utf-8",
errors="ignore")
return parse_checkers(result)
except (subprocess.CalledProcessError, OSError):
return []
@classmethod
def get_checker_config(cls, cfg_handler, environ):
try:
result = subprocess.check_output(
[cfg_handler.analyzer_binary, "-dump-config"],
env=environ,
universal_newlines=True,
encoding="utf-8",
errors="ignore")
return parse_checker_config(result)
except (subprocess.CalledProcessError, OSError):
return []
@classmethod
def get_analyzer_config(cls, cfg_handler, environ):
try:
result = subprocess.check_output(
[cfg_handler.analyzer_binary, "-dump-config"],
env=environ,
universal_newlines=True,
encoding="utf-8",
errors="ignore")
return parse_analyzer_config(result)
except (subprocess.CalledProcessError, OSError):
return []
def construct_analyzer_cmd(self, result_handler):
"""
"""
try:
config = self.config_handler
analyzer_cmd = [config.analyzer_binary]
# Do not disable any clang-tidy checks explicitly, but don't run
# ClangSA checkers. ClangSA checkers are driven by an other
# analyzer in CodeChecker.
# For clang compiler warnings a correspoding
# clang-diagnostic error is generated by Clang tidy.
# They can be disabled by this glob -clang-diagnostic-*
checkers_cmdline = ['-clang-analyzer-*', 'clang-diagnostic-*']
compiler_warnings = []
# Config handler stores which checkers are enabled or disabled.
for checker_name, value in config.checks().items():
state, _ = value
# Checker name is a compiler warning.
if checker_name.startswith('W'):
warning_name = checker_name[4:] if \
checker_name.startswith('Wno-') else checker_name[1:]
if state == CheckerState.enabled:
compiler_warnings.append('-W' + warning_name)
elif state == CheckerState.disabled:
compiler_warnings.append('-Wno-' + warning_name)
continue
if state == CheckerState.enabled:
checkers_cmdline.append(checker_name)
elif state == CheckerState.disabled:
checkers_cmdline.append('-' + checker_name)
# The invocation should end in a Popen call with shell=False, so
# no globbing should occur even if the checks argument contains
# characters that would trigger globbing in the shell.
analyzer_cmd.append("-checks=%s" % ','.join(checkers_cmdline))
analyzer_cmd.extend(config.analyzer_extra_arguments)
if config.checker_config and config.checker_config != '{}':
analyzer_cmd.append('-config=' + config.checker_config)
analyzer_cmd.append(self.source_file)
analyzer_cmd.append("--")
analyzer_cmd.append('-Qunused-arguments')
# Enable these compiler warnings by default.
analyzer_cmd.extend(['-Wall', '-Wextra'])
compile_lang = self.buildaction.lang
if not has_flag('-x', analyzer_cmd):
analyzer_cmd.extend(['-x', compile_lang])
if not has_flag('--target', analyzer_cmd) and \
self.buildaction.target.get(compile_lang, "") != "":
analyzer_cmd.append(
"--target=" + self.buildaction.target.get(compile_lang,
""))
analyzer_cmd.extend(self.buildaction.analyzer_options)
analyzer_cmd.extend(prepend_all(
'-isystem',
self.buildaction.compiler_includes[compile_lang]))
if not has_flag('-std', analyzer_cmd) and not \
has_flag('--std', analyzer_cmd):
analyzer_cmd.append(
self.buildaction.compiler_standard.get(compile_lang, ""))
analyzer_cmd.extend(compiler_warnings)
return analyzer_cmd
except Exception as ex:
LOG.error(ex)
return []
def get_analyzer_mentioned_files(self, output):
"""
Parse Clang-Tidy's output to generate a list of files that were
mentioned in the standard output or standard error.
"""
if not output:
return set()
# A line mentioning a file in Clang-Tidy's output looks like this:
# /home/.../.cpp:L:C: warning: foobar.
regex = re.compile(
# File path followed by a ':'.
r'^(?P<path>[\S ]+?):'
# Line number followed by a ':'.
r'(?P<line>\d+?):'
# Column number followed by a ':' and a space.
r'(?P<column>\d+?): ')
paths = []
for line in output.splitlines():
match = re.match(regex, line)
if match:
paths.append(match.group('path'))
return set(paths)
@classmethod
def resolve_missing_binary(cls, configured_binary, environ):
"""
In case of the configured binary for the analyzer is not found in the
PATH, this method is used to find a callable binary.
"""
LOG.debug("%s not found in path for ClangTidy!", configured_binary)
if os.path.isabs(configured_binary):
# Do not autoresolve if the path is an absolute path as there
# is nothing we could auto-resolve that way.
return False
# clang-tidy, clang-tidy-5.0, ...
clangtidy = env.get_binary_in_path(['clang-tidy'],
r'^clang-tidy(-\d+(\.\d+){0,2})?$',
environ)
if clangtidy:
LOG.debug("Using '%s' for Clang-tidy!", clangtidy)
return clangtidy
def construct_result_handler(self, buildaction, report_output,
severity_map, skiplist_handler):
"""
See base class for docs.
"""
report_hash = self.config_handler.report_hash
res_handler = result_handler.ClangTidyPlistToFile(buildaction,
report_output,
report_hash)
res_handler.severity_map = severity_map
res_handler.skiplist_handler = skiplist_handler
return res_handler
@classmethod
def construct_config_handler(cls, args, context):
handler = config_handler.ClangTidyConfigHandler()
handler.analyzer_binary = context.analyzer_binaries.get(
cls.ANALYZER_NAME)
handler.report_hash = args.report_hash \
if 'report_hash' in args else None
# FIXME We cannot get the resource dir from the clang-tidy binary,
# therefore we get a sibling clang binary which of clang-tidy.
# TODO Support "clang-tidy -print-resource-dir" .
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# Overwrite PATH to contain only the parent of the clang binary.
if os.path.isabs(handler.analyzer_binary):
check_env['PATH'] = os.path.dirname(handler.analyzer_binary)
clang_bin = ClangSA.resolve_missing_binary('clang',
check_env)
handler.compiler_resource_dir = \
host_check.get_resource_dir(clang_bin, context)
try:
with open(args.tidy_args_cfg_file, 'r', encoding='utf-8',
errors='ignore') as tidy_cfg:
handler.analyzer_extra_arguments = \
re.sub(r'\$\((.*?)\)', env.replace_env_var,
tidy_cfg.read().strip())
handler.analyzer_extra_arguments = \
shlex.split(handler.analyzer_extra_arguments)
except IOError as ioerr:
LOG.debug_analyzer(ioerr)
except AttributeError as aerr:
# No clang tidy arguments file was given in the command line.
LOG.debug_analyzer(aerr)
analyzer_config = {}
# TODO: This extra "isinsrance" check is needed for
# CodeChecker analyzers --analyzer-config. This command also
# runs this function in order to construct a config handler.
if 'analyzer_config' in args and \
isinstance(args.analyzer_config, list):
r = re.compile(r'(?P<analyzer>.+?):(?P<key>.+?)=(?P<value>.+)')
for cfg in args.analyzer_config:
m = re.search(r, cfg)
if m.group('analyzer') == cls.ANALYZER_NAME:
analyzer_config[m.group('key')] = m.group('value')
# TODO: This extra "isinsrance" check is needed for
# CodeChecker checkers --checker-config. This command also
# runs this function in order to construct a config handler.
if 'checker_config' in args and \
isinstance(args.checker_config, list):
r = re.compile(r'(?P<analyzer>.+?):(?P<key>.+?)=(?P<value>.+)')
check_options = []
for cfg in args.checker_config:
m = re.search(r, cfg)
if m.group('analyzer') == cls.ANALYZER_NAME:
check_options.append({'key': m.group('key'),
'value': m.group('value')})
analyzer_config['CheckOptions'] = check_options
else:
try:
with open(args.tidy_config, 'r',
encoding='utf-8', errors='ignore') as tidy_config:
handler.checker_config = tidy_config.read()
except IOError as ioerr:
LOG.debug_analyzer(ioerr)
except AttributeError as aerr:
# No clang tidy config file was given in the command line.
LOG.debug_analyzer(aerr)
# 'take-config-from-directory' is a special option which let the user
# to use the '.clang-tidy' config files. It will disable analyzer and
# checker configuration options.
if not handler.checker_config and \
analyzer_config.get('take-config-from-directory') != 'true':
handler.checker_config = json.dumps(analyzer_config)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
checkers = ClangTidy.get_analyzer_checkers(handler, check_env)
# Read clang-tidy checkers from the config file.
clang_tidy_checkers = context.checker_config.get(cls.ANALYZER_NAME +
'_checkers')
try:
cmdline_checkers = args.ordered_checkers
except AttributeError:
LOG.debug_analyzer('No checkers were defined in '
'the command line for %s',
cls.ANALYZER_NAME)
cmdline_checkers = None
handler.initialize_checkers(
context.available_profiles,
context.package_root,
checkers,
clang_tidy_checkers,
cmdline_checkers,
'enable_all' in args and args.enable_all)
return handler
| 1 | 11,990 | Did you measure the performance with and without this option. Do we want to export fixits even if the `clang-apply-replacements` tool is not available in the user's PATH? | Ericsson-codechecker | c |
@@ -19,12 +19,12 @@ package v1alpha1
import (
apis "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1"
"github.com/pkg/errors"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Get is spc client implementation to get disk.
func (s *SpcObjectClient) Get(name string) (*Disk, error) {
- spcDiskList := s.Spc.Spec.Disks.DiskList
+ spcDiskList := s.Spc.Spec.BlockDevices.BlockDeviceList
var diskName string
for _, disk := range spcDiskList {
if name == disk { | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
apis "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Get is spc client implementation to get disk.
func (s *SpcObjectClient) Get(name string) (*Disk, error) {
spcDiskList := s.Spc.Spec.Disks.DiskList
var diskName string
for _, disk := range spcDiskList {
if name == disk {
diskName = name
}
}
if diskName == "" {
return nil, errors.Errorf("Disk %s not found in the given SPC %s", diskName, s.Spc.Name)
}
d, err := s.NDMClientset.OpenebsV1alpha1().Disks().Get(diskName, v1.GetOptions{})
return &Disk{d, nil}, err
}
// List is spc client implementation to list disk.
func (s *SpcObjectClient) List(opts v1.ListOptions) (*DiskList, error) {
diskL := &DiskList{
DiskList: &apis.DiskList{},
errs: nil,
}
var err error
spcDiskList := s.Spc.Spec.Disks.DiskList
if len(spcDiskList) == 0 {
return nil, errors.Errorf("No disk found in the given SPC %s", s.Spc.Name)
}
spcDiskMap := make(map[string]int)
for _, diskName := range spcDiskList {
spcDiskMap[diskName]++
}
getAllDisk, err := s.NDMClientset.OpenebsV1alpha1().Disks().List(opts)
if getAllDisk.Items == nil {
return nil, errors.Wrapf(err, "Could not get disk from kube apiserver")
}
for _, disk := range getAllDisk.Items {
if spcDiskMap[disk.Name] > 0 {
diskL.DiskList.Items = append(diskL.DiskList.Items, disk)
}
}
return diskL, err
}
// Create is kubernetes client implementation to create disk.
func (s *SpcObjectClient) Create(diskObj *apis.Disk) (*Disk, error) {
return nil, errors.New("Disk object creation is not supported through spc client")
}
| 1 | 15,892 | What if we do not change this file ? Are we using this `pkg/disk/v1alpha1` elsewhere for block device requirement? | openebs-maya | go |
@@ -27,7 +27,7 @@ module Bolt
non_interactive: true
}
- options[:port] = @port if @port
+ options[:port] = port if port
options[:password] = @password if @password
options[:keys] = @key if @key
options[:verify_host_key] = if @insecure | 1 | require 'json'
require 'shellwords'
require 'logging'
require 'net/ssh'
require 'net/scp'
require 'bolt/node/output'
module Bolt
class SSH < Node
def self.initialize_transport(logger)
require 'net/ssh/krb'
rescue LoadError
logger.debug {
"Authentication method 'gssapi-with-mic' is not available"
}
end
def protocol
'ssh'
end
def connect
transport_logger = Logging.logger[Net::SSH]
transport_logger.level = :warn
options = {
logger: transport_logger,
non_interactive: true
}
options[:port] = @port if @port
options[:password] = @password if @password
options[:keys] = @key if @key
options[:verify_host_key] = if @insecure
Net::SSH::Verifiers::Lenient.new
else
Net::SSH::Verifiers::Secure.new
end
options[:timeout] = @connect_timeout if @connect_timeout
@session = Net::SSH.start(@host, @user, options)
@logger.debug { "Opened session" }
rescue Net::SSH::AuthenticationFailed => e
raise Bolt::Node::ConnectError.new(
e.message,
'AUTH_ERROR'
)
rescue Net::SSH::HostKeyError => e
raise Bolt::Node::ConnectError.new(
"Host key verification failed for #{@uri}: #{e.message}",
'HOST_KEY_ERROR'
)
rescue Net::SSH::ConnectionTimeout
raise Bolt::Node::ConnectError.new(
"Timeout after #{@connect_timeout} seconds connecting to #{@uri}",
'CONNECT_ERROR'
)
rescue StandardError => e
raise Bolt::Node::ConnectError.new(
"Failed to connect to #{@uri}: #{e.message}",
'CONNECT_ERROR'
)
end
def disconnect
if @session && [email protected]?
@session.close
@logger.debug { "Closed session" }
end
end
def sudo_prompt
'[sudo] Bolt needs to run as another user, password: '
end
def handled_sudo(channel, data)
if data == sudo_prompt
if @sudo_password
channel.send_data "#{@sudo_password}\n"
channel.wait
return true
else
raise Bolt::Node::EscalateError.new(
"Sudo password for user #{@user} was not provided for #{@uri}",
'NO_PASSWORD'
)
end
elsif data =~ /^#{@user} is not in the sudoers file\./
@logger.debug { data }
raise Bolt::Node::EscalateError.new(
"User #{@user} does not have sudo permission on #{@uri}",
'SUDO_DENIED'
)
elsif data =~ /^Sorry, try again\./
@logger.debug { data }
raise Bolt::Node::EscalateError.new(
"Sudo password for user #{@user} not recognized on #{@uri}",
'BAD_PASSWORD'
)
end
false
end
def execute(command, sudoable: false, **options)
result_output = Bolt::Node::Output.new
use_sudo = sudoable && @run_as
if use_sudo
user_clause = if @run_as
"-u #{@run_as}"
else
''
end
command = "sudo -S #{user_clause} -p '#{sudo_prompt}' #{command}"
end
@logger.debug { "Executing: #{command}" }
session_channel = @session.open_channel do |channel|
# Request a pseudo tty
channel.request_pty if @tty
channel.exec(command) do |_, success|
unless success
raise Bolt::Node::ConnectError.new(
"Could not execute command: #{command.inspect}",
'EXEC_ERROR'
)
end
channel.on_data do |_, data|
unless use_sudo && handled_sudo(channel, data)
result_output.stdout << data
end
@logger.debug { "stdout: #{data}" }
end
channel.on_extended_data do |_, _, data|
unless use_sudo && handled_sudo(channel, data)
result_output.stderr << data
end
@logger.debug { "stderr: #{data}" }
end
channel.on_request("exit-status") do |_, data|
result_output.exit_code = data.read_long
end
if options[:stdin]
channel.send_data(options[:stdin])
channel.eof!
end
end
end
session_channel.wait
if result_output.exit_code == 0
@logger.debug { "Command returned successfully" }
else
@logger.info { "Command failed with exit code #{result_output.exit_code}" }
end
result_output
end
def _upload(source, destination)
write_remote_file(source, destination)
Bolt::Result.new
rescue StandardError => e
Bolt::Result.from_exception(e)
end
def write_remote_file(source, destination)
@session.scp.upload!(source, destination)
rescue StandardError => e
raise FileError.new(e.message, 'WRITE_ERROR')
end
def make_tempdir
tmppath = nil
if @tmpdir
tmppath = "#{@tmpdir}/#{SecureRandom.uuid}"
command = "mkdir -m 700 #{tmppath}"
else
command = 'mktemp -d'
end
result = execute(command)
if result.exit_code != 0
raise FileError.new("Could not make tempdir: #{result.stderr.string}", 'TEMPDIR_ERROR')
end
tmppath || result.stdout.string.chomp
end
def with_remote_tempdir
dir = make_tempdir
begin
yield dir
ensure
output = execute("rm -rf '#{dir}'")
if output.exit_code != 0
logger.warn("Failed to clean up tempdir '#{dir}': #{output.stderr.string}")
end
end
end
def with_remote_script(dir, file)
remote_path = "#{dir}/#{File.basename(file)}"
write_remote_file(file, remote_path)
make_executable(remote_path)
yield remote_path
end
def with_remote_file(file)
with_remote_tempdir do |dir|
with_remote_script(dir, file) do |remote_path|
yield remote_path
end
end
end
def make_wrapper_stringio(task_path, stdin)
StringIO.new(<<-SCRIPT)
#!/bin/sh
'#{task_path}' <<EOF
#{stdin}
EOF
SCRIPT
end
def make_executable(path)
result = execute("chmod u+x '#{path}'")
if result.exit_code != 0
raise FileError.new("Could not make file '#{path}' executable: #{result.stderr.string}", 'CHMOD_ERROR')
end
end
def with_task_wrapper(remote_task, dir, stdin)
wrapper = make_wrapper_stringio(remote_task, stdin)
command = "#{dir}/wrapper.sh"
write_remote_file(wrapper, command)
make_executable(command)
yield command
end
def with_remote_task(task_file, stdin)
with_remote_tempdir do |dir|
with_remote_script(dir, task_file) do |remote_task|
if stdin
with_task_wrapper(remote_task, dir, stdin) do |command|
yield command
end
else
yield remote_task
end
end
end
end
def _run_command(command)
output = execute(command, sudoable: true)
Bolt::CommandResult.from_output(output)
# TODO: We should be able to rely on the excutor for this but it will mean
# a test refactor
rescue StandardError => e
Bolt::Result.from_exception(e)
end
def _run_script(script, arguments)
with_remote_file(script) do |remote_path|
output = execute("'#{remote_path}' #{Shellwords.join(arguments)}",
sudoable: true)
Bolt::CommandResult.from_output(output)
end
# TODO: We should be able to rely on the excutor for this but it will mean
# a test refactor
rescue StandardError => e
Bolt::Result.from_exception(e)
end
def _run_task(task, input_method, arguments)
export_args = {}
stdin, output = nil
if STDIN_METHODS.include?(input_method)
stdin = JSON.dump(arguments)
end
if ENVIRONMENT_METHODS.include?(input_method)
export_args = arguments.map do |env, val|
"PT_#{env}='#{val}'"
end.join(' ')
end
command = export_args.empty? ? '' : "#{export_args} "
if @run_as
with_remote_task(task, stdin) do |remote_path|
command += "'#{remote_path}'"
output = execute(command, sudoable: true)
end
else
with_remote_file(task) do |remote_path|
command += "'#{remote_path}'"
output = execute(command, stdin: stdin)
end
end
Bolt::TaskResult.from_output(output)
# TODO: We should be able to rely on the excutor for this but it will mean
# a test refactor
rescue StandardError => e
Bolt::Result.from_exception(e)
end
end
end
| 1 | 7,470 | regarding dropping the port/host/uri methods on Nod: this would `options[:port] = target.port if target.port` | puppetlabs-bolt | rb |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.